query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
sequencelengths
30
30
negative_scores
sequencelengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
upate the balance, sell acount, buy acount, based on returan rate
def _balance_update(self): return_rate = self.df.loc[self.currentStep, "return_Close"] self.buy_amount += return_rate * self.buy_amount self.sell_amount -= return_rate * self.sell_amount
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calc_earning(self, data=None):\n result = Result()\n if data is None:\n data = self.security\n self.calcDecision()\n first_purchase_method = self.check_first_purchase_method()\n for i in np.arange(len(data['Close'])):\n if data['FinalDecision'].iloc[i] is None:\n pass\n elif data['FinalDecision'].iloc[i] == TransactionType.BUY:\n if data['FinalDecision'].iloc[i-1] == TransactionType.BUY:\n pass\n else:\n if (self.buys_made + self.sells_made) == 0:\n if first_purchase_method == FirstTransactionType.INIT_CAPITAL:\n self.shares_own = int((self.init_capital/data['Close'].iloc[i]))\n self.buys_made += 1\n elif first_purchase_method == FirstTransactionType.STOCK_QUANTITY:\n self.shares_own = self.stock_quantity\n self.buys_made += 1\n else:\n self.shares_own = int(self.final_capital / data['Close'].iloc[i])\n self.final_capital = self.final_capital % data['Close'].iloc[i]\n #print(self.shares_own)\n\n elif data['FinalDecision'].iloc[i] == TransactionType.SELL:\n if data['FinalDecision'].iloc[i-1] == TransactionType.SELL:\n pass\n else:\n if (self.buys_made + self.sells_made) == 0:\n pass\n else:\n self.final_capital += self.shares_own * data['Close'].iloc[i]\n self.shares_own = 0\n self.sells_made +=1\n #Checar si es el momento mas alto o bajo de ganancias\n if self.shares_own == 0:\n if (self.highest_point is None\n or self.highest_point < self.final_capital):\n self.highest_point = self.final_capital\n if (self.lowest_point is None\n or self.lowest_point > self.final_capital\n or self.lowest_point == 0):\n self.lowest_point = self.final_capital\n else:\n if (self.highest_point is None\n or self.highest_point < (self.shares_own * data['Close'].iloc[i])):\n self.highest_point = self.final_capital\n if (self.lowest_point is None\n or self.lowest_point > (self.shares_own * data['Close'].iloc[i])\n or self.lowest_point == 0):\n self.lowest_point = self.final_capital\n self.calcRealFinalCapital()\n self.calcDiferencePercentage()", "def calculate_profit(self):", "def test_open_ru_ballance(self, ):\n if self.report_type == 'open.ru':\n (mid, aid) = self.make_money_and_account() #@UnusedVariable\n self.load_data_into_account(aid)\n deals = self.get_deals()\n repo_deals = self.get_repo_deals()\n \n if self.open_ru_report_type == 'stock':\n comm = self.open_ru_get_micex_commission(deals, repo_deals)\n elif self.open_ru_report_type == 'future':\n atl = self.get_account_totally_line()\n comm = self.open_ru_get_forts_comm(atl)\n ballance = sum([float(d.getAttribute('deal_sign')) *\n float(d.getAttribute('price')) *\n float(d.getAttribute('quantity'))\n for d in deals])\n ballance += sum([float(d.getAttribute('deal_sign')) *\n float(d.getAttribute('deal_price')) *\n float(d.getAttribute('quantity'))\n for d in repo_deals])\n ballance += 10000 - comm # 10000 is the initial account amount\n accs = self.model.list_view_accounts().fetchall()\n self.assertEqual(1, len(accs))\n self.assertAlmostEqual(ballance, accs[0]['current_money'])", "def calc_b_a(self, data):\n # Calculate our expected bid / ask\n mkt_bid = data['Bid'].values[-1]\n mkt_ask = data['Ask'].values[-1]\n last_trade = data['Trade Price'].values[-1]\n shares = self.calc_shares(data=data, last_trade=last_trade)\n\n k = (mkt_bid - mkt_ask) / (mkt_bid + mkt_ask) * -100\n\n our_bid = np.average(data['Bid'], weights=self.weights) - k\n our_ask = np.average(data['Ask'], weights=self.weights) + k\n\n self.portfoolio['Unrealized_PnL'] = self.portfoolio['Shares'] * last_trade - self.portfoolio['Avg_Cost'] * self.portfoolio['Shares']\n\n if shares == 0:\n # Skip 0 share orders\n pass\n elif abs(self.portfoolio['Cost']) >= self.max_pos * .75:\n # If position size at or above 95% of max, reduce position\n self.risk_control(bid=mkt_bid, ask=mkt_ask, last_trade=last_trade)\n\n elif our_bid >= mkt_bid:\n # Buy at bid\n self.trade(shares=shares, price=mkt_bid, last_trade=last_trade)\n elif our_ask <= mkt_ask:\n # Sell at ask\n self.trade(shares=-shares, price=mkt_ask, last_trade=last_trade)\n else:\n print('No order placed')", "def buy(self,\n currency_pair,\n rate,\n amount):\n pass", "def __balance__(self) -> float:\n\n with dataset.connect(database.get_db()) as db:\n # Find last bank transaction.\n statement = statement = f\"\"\"\n SELECT opening_balance, transaction_amount\n FROM bank\n WHERE author_id = {self.user.id}\n ORDER BY id DESC\n LIMIT 1\n \"\"\"\n result = db.query(statement)\n\n for row in result:\n balance = row[\"opening_balance\"] + row[\"transaction_amount\"]\n break\n else:\n # If there was no result for the user, default balance is given.\n balance = 500\n\n return float(balance)", "def account_balance():\n return float(pareto.rvs(1.161))", "def sum_up(self):\n sum_base = 0\n sum_quote = 0\n for i in range(len(self.book[Trade.WAY_BUY])):\n offer = self.book[Trade.WAY_BUY][i]\n sum_base = sum_base + offer.get_base_amount()\n sum_quote = sum_quote + offer.get_quote_amount()\n offer.set_sum_base(sum_base)\n offer.set_sum_quote(sum_quote)\n\n sum_base = 0\n sum_quote = 0\n for i in range(len(self.book[Trade.WAY_SELL])):\n offer = self.book[Trade.WAY_SELL][i]\n sum_base = sum_base + offer.get_base_amount()\n sum_quote = sum_quote + offer.get_quote_amount()\n offer.set_sum_base(sum_base)\n offer.set_sum_quote(sum_quote)", "def withdraw(amt) :\r\n global bal \r\n bal_in = bal\r\n\t#PREMISES FOR NEXT LINE: \r\n\t# (amt >= 0)\r\n\t# (bal >= 0)\r\n\t# (bal == bal_in)\r\n\t\"\"\"{1.OK amt >= 0\tpremise\r\n\t\t2.OK bal >= 0\tpremise\r\n\t\t3.OK bal == bal_in\tpremise\r\n\t}\"\"\"\r\n\t#PREMISES FOR NEXT LINE: \r\n\t# (bal == bal_in)\r\n\tif amt <= bal:\r\n\t\t#PREMISES FOR THEN-ARM: \r\n\t\t# (amt <= bal)\r\n\t\t# (bal == bal_in)\r\n\t\t\"\"\"{1.OK amt <= bal\tpremise\r\n\t\t\t4.OK bal == bal_in\tpremise\r\n\t\t}\"\"\"\r\n\t\t#PREMISES FOR NEXT LINE: \r\n\t\t# (bal == bal_in)\r\n\t\tbal = bal - amt\r\n\t\t#PREMISES FOR ATTACHED PROOF, IF ANY: \r\n\t\t# (bal == (bal_old - amt))\r\n\t\t# (bal_old == bal_in)\r\n\t\t\"\"\"{1.OK bal == bal_old - amt\tpremise\r\n\t\t\t2.OK amt <= bal_old\talgebra 1\r\n\t\t\t3.OK amt >= 0\talgebra 1\r\n\t\t\t4.OK bal_old >= 0\talgebra 1\r\n\t\t\t5.OK bal_old == bal_in\tpremise\r\n\t\t\t6.OK amt == bal_in - bal\talgebra 1 5\r\n\t\t}\"\"\"\r\n\t\t#PREMISES FOR NEXT LINE: \r\n\t\t# (amt == (bal_in - bal))\r\n\t\tcash = amt\r\n\t\t#PREMISES FOR ATTACHED PROOF, IF ANY: \r\n\t\t# (cash == amt)\r\n\t\t# (amt == (bal_in - bal))\r\n\t\t\"\"\"{1.OK amt == bal_in - bal\tpremise\r\n\t\t\t2.OK cash == amt\tpremise\r\n\t\t\t3.OK cash == bal_in - bal\t\tsubst 2 1\r\n\t\t\t4.OK bal >= 0\talgebra 1\r\n\t\t\t5.OK bal_in == bal + cash\talgebra 3\r\n\t\t}\"\"\"\r\n\t\t#PREMISES FOR NEXT LINE: \r\n\t\t# (bal_in == (bal + cash))\r\n\telse :\r\n\t\t#PREMISES FOR ELSE-ARM: \r\n\t\t# not (amt <= bal)\r\n\t\t# (bal == bal_in)\r\n\t\t\"\"\"{1.OK not(amt <= bal)\tpremise\r\n\t\t\t4.OK bal == bal_in\tpremise\r\n\t\t}\"\"\"\r\n\t\t#PREMISES FOR NEXT LINE: \r\n\t\t# (bal == bal_in)\r\n\t\tcash = 0\r\n\t\t#PREMISES FOR ATTACHED PROOF, IF ANY: \r\n\t\t# (cash == 0)\r\n\t\t# (bal == bal_in)\r\n\t\tassert not (amt <= bal) # UNABLE TO VERIFY\r\n\t\t\"\"\"{1.OK cash == 0\tpremise\r\n\t\t\t2.OK bal == bal_in\tpremise\r\n\t\t\t3.?? not(amt <= bal)\tpremise\r\n\t\t\t4.OK bal >= 0\talgebra 3\r\n\t\t}\"\"\"\r\n\t\t#PREMISES FOR NEXT LINE: \r\n\t\t# (bal >= 0)\r\n# ERROR: uneven indentation of commands\r\n # prove here that bal >= 0 and bal + cash == bal_in\r\n return cash\r\n #PREMISES FOR NEXT LINE: \r\n # (bal >= 0)\r\n # ((bal + cash) == bal_in)\r\n\t#PREMISES FOR NEXT LINE: \r\n\t# ((bal_in == (bal + cash)) or ((bal >= 0) and ((bal + cash) == bal_in)))\r\n\tassert (bal >= 0) # UNABLE TO VERIFY\r", "def deposit(amt) :\r\n\tglobal bal\r\n\tbal_in = bal\r\n\t#PREMISES FOR NEXT LINE: \r\n\t# (amt >= 0)\r\n\t# (bal >= 0)\r\n\t# (bal == bal_in)\r\n\tbal = bal + amt\r\n\t#PREMISES FOR ATTACHED PROOF, IF ANY: \r\n\t# (bal == (bal_old + amt))\r\n\t# (amt >= 0)\r\n\t# (bal_old >= 0)\r\n\t# (bal_old == bal_in)\r\n\t#PREMISES FOR NEXT LINE: \r\n\t# (amt >= 0)\r", "def p_base_rate_trader(params, substep, state_history, state):\n APY = float(((1 + Decimal(state['target_rate'])) ** (60*60*24*365) - 1) * 100)\n\n share = (state['external_BASE_APY'] - APY)/params['base_rate_trader_max_APY_diff']\n if share > 1:\n share = 1\n elif share < -1:\n share = -1\n\n #move all to external market if target rate is negative and external is positive\n if APY <= 0:\n if state['external_BASE_APY'] > 0:\n share = 1\n \n update = moneyMarketStateChange(params, state, state['base_rate_trader_state'], share, state['base_rate_trader_max_balance'])\n update['base_rate_trader_state'] = update['state']\n return {**update}", "def calculate(self):\r\n if self.__calculation_type == self.__DIFFERENTIATED_PAY:\r\n for month in range(1, self.__principal_term+1):\r\n self.__differentiated_pay.append(\r\n ceil(\r\n (self.__credit_principal/self.__principal_term)\r\n + self.__credit_interest*(self.__credit_principal\r\n - (self.__credit_principal\r\n * (month-1))\r\n / self.__principal_term)\r\n )\r\n )\r\n self.__overpayment = sum(self.__differentiated_pay) - self.__credit_principal\r\n\r\n for i, dp in enumerate(self.__differentiated_pay, 1):\r\n print(f'Month {i}: paid out {dp}')\r\n print()\r\n print(f'Overpayment = {self.__overpayment}')\r\n\r\n elif self.__calculation_type == self.__ANNUITY:\r\n if self.__user_choice == self.__SEEK_ANNUITY_MONTHLY:\r\n self.__annuity_monthly = ceil(\r\n self.__credit_principal * ((self.__credit_interest\r\n * pow(1+self.__credit_interest\r\n , self.__principal_term)\r\n )\r\n / (pow(1+self.__credit_interest\r\n , self.__principal_term)\r\n - 1)\r\n )\r\n )\r\n self.__overpayment = (self.__annuity_monthly * self.__principal_term\r\n - self.__credit_principal\r\n )\r\n print(f'Your annuity payment = {self.__annuity_monthly}!')\r\n\r\n elif self.__user_choice == self.__SEEK_TERM:\r\n self.__principal_term = ceil(\r\n log(self.__annuity_monthly / (self.__annuity_monthly\r\n - (self.__credit_interest\r\n * self.__credit_principal))\r\n , 1+self.__credit_interest)\r\n )\r\n self.__overpayment = ceil(self.__annuity_monthly\r\n * self.__principal_term\r\n - self.__credit_principal\r\n )\r\n years = self.__principal_term // 12\r\n months = self.__principal_term % 12\r\n\r\n print(f'You need {years} year{\"s\" if self.__principal_term > 1 else \"\"}'\r\n f'{\" and \" + str(months) + \" months\" if months > 0 else \"\"}'\r\n f' to repay this credit!')\r\n\r\n elif self.__user_choice == self.__SEEK_CREDIT_PRINCIPAL:\r\n self.__credit_principal = ceil(\r\n self.__annuity_monthly\r\n / ((self.__credit_interest\r\n * pow(1+self.__credit_interest, self.__principal_term)\r\n )\r\n / (pow(1+self.__credit_interest, self.__principal_term)\r\n - 1)\r\n )\r\n )\r\n self.__overpayment = ceil(self.__annuity_monthly\r\n * self.__principal_term\r\n - self.__credit_principal)\r\n\r\n print(f'Your credit principal = {self.__credit_principal}!')\r\n print(f'Overpayment = {self.__overpayment}')\r\n\r\n else:\r\n print('Incorrect parameters')\r\n self.usage()", "def do_balance(self,args):\n \"\"\"Can show total, available(available for trading), or reserved(reserved in open orders)\"\"\"\n \"\"\"usage: balance [available/reserved](optional)\"\"\"\n args = stripoffensive(args)\n if 'available' in args:\n btc,usd = available() \n elif 'reserved' in args:\n btc,usd = reserved()\n else:\n btc,usd = bal()\n word = args if args else \"total\"\n print 'Your %s balance is %.8f BTC and $%.2f USD ' % (word,btc,usd)\n if word == \"total\":\n last = D(bitstamp.ticker()['last'])\n print 'Account Value: $%.2f @ Last BTC Price of $%.2f' % (btc*last+usd,last)", "def rate(self):\n return self.brate / FAC", "async def update_account_balance():\n\n try:\n balance = App.client.get_asset_balance(asset=App.config[\"base_asset\"])\n except Exception as e:\n log.error(f\"Binance exception in 'get_asset_balance' {e}\")\n return\n\n App.base_quantity = Decimal(balance.get(\"free\", \"0.00000000\")) # BTC\n\n try:\n balance = App.client.get_asset_balance(asset=App.config[\"quote_asset\"])\n except Exception as e:\n log.error(f\"Binance exception in 'get_asset_balance' {e}\")\n return\n\n App.quote_quantity = Decimal(balance.get(\"free\", \"0.00000000\")) # USD\n\n pass", "def pay_off_fully(balance, annualInterestRate):\n\n #variable assignment\n currentBalance = balance\n monthlyInterestRate = annualInterestRate/12", "def balance(self):\n return self._rbal - self._lbal", "def calculate_bonuses (the_sum_of_current_purchase):\n the_sum_of_previous_purchases = 0\n blue_card_percent = 0.05\n silver_card_percent = 0.07\n gold_card_percent = 0.1\n the_sum_of_previous_purchases = the_sum_of_previous_purchases + the_sum_of_current_purchase\n\n if the_sum_of_previous_purchases <1000:\n bonus_for_purchase = 0\n if 1000 <= the_sum_of_previous_purchases <= 15_000:\n bonus_for_purchase = the_sum_of_current_purchase * blue_card_percent\n\n if 15001 <= the_sum_of_previous_purchases < 150_000:\n bonus_for_purchase = the_sum_of_current_purchase * silver_card_percent\n\n if the_sum_of_previous_purchases >= 150_000:\n bonus_for_purchase = the_sum_of_current_purchase * gold_card_percent\n\n return bonus_for_purchase", "def my_rebalance(context, data):\n freq_month = 3\n context.counter += 1\n if context.counter == freq_month:\n for stock, weight in context.weights.iteritems():\n context.counter = 0\n if data.can_trade(stock):\n order_target_percent(stock, weight)", "def sell(self,\n currency_pair,\n rate,\n amount):\n pass", "def calcul_buy_nb_action(self):\n nb_action = self.max_loss / (self.buy_price - self.stop_loss)\n invest = self.max_loss / (self.buy_price - self.stop_loss) * self.buy_price\n\n if invest > self.capital:\n return round(self.capital / self.buy_price, 9)\n else:\n return round(nb_action, 9)", "def sell_cost(self, sell_price, count):\n\n g_cost = math.floor(self.g_fee * sell_price * 1000 * count)\n handling_cost = math.ceil(self.handling_fee * self.fee_count * sell_price * 1000 * count)\n new_fee = g_cost + handling_cost\n print(sell_price, self.g_fee, self.handling_fee, self.fee_count, new_fee)\n return int(sell_price*1000*count-new_fee)", "def update_last_purchase_rate(self, obj, is_submit):\n\n\t\timport frappe.utils\n\t\tthis_purchase_date = frappe.utils.getdate(obj.get('posting_date') or obj.get('transaction_date'))\n\n\t\tfor d in obj.get(\"items\"):\n\t\t\t# get last purchase details\n\t\t\tlast_purchase_details = get_last_purchase_details(d.item_code, obj.name)\n\n\t\t\t# compare last purchase date and this transaction's date\n\t\t\tlast_purchase_rate = None\n\t\t\tif last_purchase_details and \\\n\t\t\t\t\t(last_purchase_details.purchase_date > this_purchase_date):\n\t\t\t\tlast_purchase_rate = last_purchase_details['base_rate']\n\t\t\telif is_submit == 1:\n\t\t\t\t# even if this transaction is the latest one, it should be submitted\n\t\t\t\t# for it to be considered for latest purchase rate\n\t\t\t\tif flt(d.conversion_factor):\n\t\t\t\t\tlast_purchase_rate = flt(d.base_rate) / flt(d.conversion_factor)\n\t\t\t\telse:\n\t\t\t\t\tfrappe.throw(_(\"UOM Conversion factor is required in row {0}\").format(d.idx))\n\n\t\t\t# update last purchsae rate\n\t\t\tif last_purchase_rate:\n\t\t\t\tfrappe.db.sql(\"\"\"update `tabItem` set last_purchase_rate = %s where name = %s\"\"\",\n\t\t\t\t\t(flt(last_purchase_rate), d.item_code))", "def balance(p, r, t):\n return p*(1 + r)**t", "def __call__(self, auctioneer):\n curr_bid = auctioneer.current_bid\n bid_price = curr_bid * self._bid_increase_perc\n if bid_price <= self._budget and self.get_bid_probability() > 0.3:\n self._highest_bid = bid_price\n return bid_price\n return 0", "def balance(self):\n return sum(self.operations.select())\n 11", "def trade(self, action=None):\n #print(\"Trading {}\".format(action))\n # Buy\n if action > 0.2 : self.posture = 1\n # Hold\n if action < 0.2 and action > -0.2: self.posture = 0\n # Sell\n if action < -0.2: self.posture = -1\n \n # Evaluate posture and calculare actual cost of trade\n #print(\"Posture: {}\".format(self.posture))\n if self.posture == 1:\n _amt = self.amt_buy\n _base = (_amt * self.state['price'] \\\n + (_amt * self.commission)) * -1\n \n elif self.posture == -1:\n _amt = self.amt_sell\n _base = _amt * self.state['price'] \\\n + (_amt * self.commission) \\\n + (_amt * self.gap)\n _amt = _amt * -1 \n\n # Set posture to 0 if no balance available\n if (self.posture == 1 and self.balance < abs(_base)) \\\n or (self.posture == -1 and self.bag < abs(_amt)):\n print(\"NOT enough amount!!\")\n self.stop=True\n self.posture = 0\n\n if self.posture == 0:\n _amt = 0\n _base = 0\n\n # Modify balances\n self.transaction = _base\n self.amt = _amt\n self.balance = self.balance + _base\n self.bag = self.bag + _amt\n self.value = self.calculate_value()\n #print(\"Posture : {} // Transaction: {}\".format(self.posture, self.transaction))\n\n return self", "def ramp_up(self) -> None:\n self.cash_balance: float = self.initial_cash_balance()\n for stock in self.stocks:\n initial_date_idx = 0\n self.cash_balance = stock.buy(initial_date_idx, self.cash_balance, self.buy_budget)", "def test_underpayment(self):\n debit_jobs([(self.job, A(500), Entry.WORK_DEBIT)])\n credit_jobs([(self.job, A(480), A(0), A(0))], D(480))\n diff = A(500) - A(480)\n self.assert_balances(\n bank=A(480, 0, 0),\n invoiced=A(500),\n paid=A(-480),\n partial=A(480).net_amount,\n tax=A(480).tax_amount,\n balance=diff,\n promised=diff,\n ) # <- negative balances because of overpayment", "def test_update_balance(self):\n current_year_tuple = (0.1, 0.1, 0.8)\n iteration_balance = 90\n contribution = 10\n expected_result = 110\n test_balance = investment_growth.update_balance(iteration_balance, contribution, current_year_tuple)\n self.assertEqual(test_balance, expected_result)" ]
[ "0.6785312", "0.6220181", "0.6175861", "0.6114552", "0.6088199", "0.60283774", "0.6012482", "0.59921443", "0.5976123", "0.5962097", "0.59508103", "0.5949215", "0.5946042", "0.5930534", "0.5910977", "0.59003735", "0.5884636", "0.58771354", "0.5839308", "0.58320624", "0.5797567", "0.5795176", "0.5793032", "0.5792811", "0.5792245", "0.57862926", "0.5784341", "0.5781169", "0.5777878", "0.5766952" ]
0.72528595
0
Prepare the dict of values to create the new invoice for a sale order. This method may be overridden to implement custom invoice generation (making sure to call super() to establish a clean extension chain).
def _prepare_invoice(self, cr, uid, order, lines, context=None): invoice_vals = super(my_sale_order, self)._prepare_invoice(cr, uid, order, lines, context) invoice_vals.update({ 'partner_shipping_id': order.partner_shipping_id.id, }) # Care for deprecated _inv_get() hook - FIXME: to be removed after 6.1 invoice_vals.update(self._inv_get(cr, uid, order, context=context)) return invoice_vals
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _prepare_invoice(self):\n # get current logged in user's timezone\n local = pytz.timezone(self.env['res.users'].browse(self._uid).tz) or pytz.utc\n\n self.ensure_one()\n journal_id = self.env['account.journal'].search([('type', '=', 'purchase')], limit=1).id\n if not journal_id:\n raise UserError(_('Please define an accounting purchase journal for this company.'))\n invoice_vals = {\n 'name': self.partner_ref or '',\n 'origin': self.name,\n 'type': 'in_invoice',\n 'account_id': self.partner_id.property_account_payable_id.id,\n 'partner_id': self.partner_id.id,\n 'journal_id': journal_id,\n 'currency_id': self.currency_id.id,\n 'comment': self.notes,\n 'payment_term_id': self.payment_term_id.id,\n 'fiscal_position_id': self.fiscal_position_id.id or self.partner_id.property_account_position_id.id,\n 'company_id': self.company_id.id,\n 'purchase_id': self.id,\n 'date_invoice':pytz.utc.localize(datetime.datetime.now()).astimezone(local).strftime('%Y-%m-%d'),\n }\n return invoice_vals", "def prepare_invoice(self):\n journal_id = self.env['account.invoice'].default_get(['journal_id'])['journal_id']\n if not journal_id:\n raise UserError(_('Please define sales journal for this company: \"%s\" (id:%d).') % (self.company_id.name, self.company_id.id))\n invoice_vals = {\n 'order_id': self.id,\n 'name': self.order_no,\n 'origin': self.order_no,\n 'type': 'out_invoice',\n 'reference': self.patient_id.name + ':' + self.name,\n 'account_id': self.patient_id.partner_id.property_account_receivable_id.id,\n 'partner_id': self.patient_id.partner_id.id,\n 'journal_id': journal_id,\n 'comment': self.note,\n 'doctor_id': self.doctor_id.id,\n 'payment_term': False,\n 'user_id': False,\n }\n return invoice_vals", "def _prepare_invoice(self):\n self.ensure_one()\n # journal_id = self.env['account.invoice'].with_context(force_company=self.env.user.company_id.id).default_get(['journal_id'])['journal_id']\n journal_id = self.company_id.journal_id.id\n if not journal_id:\n raise UserError(_('Please define an accounting sales journal for this company.'))\n invoice_vals = {\n 'name': self.client_order_ref or '',\n 'origin': self.name,\n 'type': 'out_invoice',\n 'account_id': self.partner_invoice_id.property_account_receivable_id.id,\n 'partner_id': self.partner_invoice_id.id,\n 'partner_shipping_id': self.partner_shipping_id.id,\n 'journal_id': journal_id,\n 'currency_id': self.pricelist_id.currency_id.id,\n 'comment': self.note,\n 'payment_term_id': self.payment_term_id.id,\n 'fiscal_position_id': self.fiscal_position_id.id or self.partner_invoice_id.property_account_position_id.id,\n 'company_id': self.company_id.id,\n 'user_id': self.user_id and self.user_id.id,\n 'team_id': self.team_id.id\n }\n return invoice_vals", "def _prepare_invoice(self):\n self.ensure_one()\n journal_id = self.env['account.invoice'].default_get(['journal_id'])['journal_id']\n if not journal_id:\n raise UserError(_('Please define an accounting sales journal for this company.'))\n invoice_vals = {\n 'name': self.client_order_ref or '',\n 'origin': self.name,\n 'type': 'out_invoice',\n 'account_id': self.partner_invoice_id.property_account_receivable_id.id,\n 'partner_id': self.partner_invoice_id.id,\n 'partner_shipping_id': self.partner_shipping_id.id,\n 'journal_id': journal_id,\n 'currency_id': self.pricelist_id.currency_id.id,\n 'comment': self.note,\n 'payment_term_id': self.payment_term_id.id,\n 'fiscal_position_id': self.fiscal_position_id.id or self.partner_invoice_id.property_account_position_id.id,\n 'company_id': self.company_id.id,\n 'user_id': self.user_id and self.user_id.id,\n 'team_id': self.team_id.id,\n 'x_studio_field_rgEdd': self.x_studio_field_icWOZ.id,\n 'x_studio_car_type_1': self.vehicle.id,\n 'x_studio_job_card_1': self.x_studio_agency_job_card,\n 'x_studio_car_type_name': self.vehicle.model_id.name,\n 'x_studio_plate_num': self.vehicle.license_plate,\n 'x_studio_claim_num': self.claim_no,\n\n 'x_studio_is_insured':self.is_insured,\n 'x_studio_service_provider': self.service_advisor.id,\n 'date_invoice': fields.Date.today(),\n 'transaction_ids': [(6, 0, self.transaction_ids.ids)],\n }\n return invoice_vals\n\n # 'x_studio_field_rgEdd':order.x_studio_field_icWOZ.id,", "def _prepare_invoice(self, cr, uid, order, lines, context=None):\n if context is None:\n context = {}\n journal_ids = self.pool.get('account.journal').search(cr, uid,\n [('type', '=', 'sale'), ('company_id', '=', order.company_id.id)],\n limit=1)\n if not journal_ids:\n raise osv.except_osv(_('Error !'),\n _('There is no sales journal defined for this company: \"%s\" (id:%d)') % (order.company_id.name, order.company_id.id))\n\n invoice_vals = {\n 'name': order.client_order_ref or '',\n 'origin': order.name,\n 'type': 'out_invoice',\n 'reference': order.client_order_ref or order.name,\n 'account_id': order.partner_id.property_account_receivable.id,\n 'journal_id': order.partner_id.property_default_sale_invoice_journal.id,\n 'partner_id': order.partner_id.id,\n 'address_invoice_id': order.partner_invoice_id.id,\n #'address_contact_id': order.partner_order_id.id,\n 'invoice_line': [(6, 0, lines)],\n 'currency_id': order.pricelist_id.currency_id.id,\n 'comment': order.note,\n 'payment_term': order.payment_term and order.payment_term.id or False,\n 'fiscal_position': order.fiscal_position.id or order.partner_id.property_account_position.id,\n 'date_invoice': context.get('date_invoice', False),\n 'company_id': order.company_id.id,\n 'user_id': order.user_id and order.user_id.id or False\n }\n\n # Care for deprecated _inv_get() hook - FIXME: to be removed after 6.1\n invoice_vals.update(self._inv_get(cr, uid, order, context=context))\n\n return invoice_vals", "def _prepare_invoice(self, cr, uid, order, lines, context=None):\n if context is None:\n context = {}\n journal_id = self.pool['account.invoice'].default_get(cr, uid, ['journal_id'], context=context)['journal_id']\n if not journal_id:\n raise osv.except_osv(_('Error!'),\n _('Please define sales journal for this company: \"%s\" (id:%d).') % (order.company_id.name, order.company_id.id))\n invoice_vals = {\n 'name': order.client_order_ref or '',\n 'origin': order.name,\n 'type': 'out_invoice',\n 'reference': order.client_order_ref or order.name,\n 'account_id': order.partner_invoice_id.property_account_receivable.id,\n 'partner_id': order.partner_invoice_id.id,\n 'journal_id': journal_id,\n 'invoice_line': [(6, 0, lines)],\n 'currency_id': order.pricelist_id.currency_id.id,\n 'comment': order.note,\n 'payment_term': order.payment_term and order.payment_term.id or False,\n 'fiscal_position': order.fiscal_position.id or order.partner_invoice_id.property_account_position.id,\n 'date_invoice': context.get('date_invoice', False),\n 'company_id': order.company_id.id,\n 'user_id': order.user_id and order.user_id.id or False,\n 'section_id' : order.section_id.id,\n 'test_1' :order.test\n }\n\n # Care for deprecated _inv_get() hook - FIXME: to be removed after 6.1\n invoice_vals.update(self._inv_get(cr, uid, order, context=context))\n return invoice_vals", "def _prepare_invoice(self):\n self.ensure_one()\n result = super(SaleOrder, self)._prepare_invoice()\n result.update({\n 'cost_center_id': self.cost_center_id and self.cost_center_id.id or False\n })\n return result", "def _prepare_invoice(self, invoice_type):\n return {\n 'partner_id': self.picking_id.partner_id.id,\n 'company_id': self.picking_id.company_id.id,\n 'type': invoice_type,\n 'name': _('Exchange Inv for %s') % self.picking_id.name,\n 'currency_id': self.env.user.company_id.currency_id.id,\n }", "def _prepare_invoice_line(self, qty):\n self.ensure_one()\n res = {\n 'name': self.name,\n 'sequence': self.sequence,\n 'origin': self.order_id.name,\n 'account_id': self.product_id.product_tmpl_id._get_product_accounts()['stock_input'].id,\n 'price_unit': self.price_unit,\n 'quantity': qty,\n 'uom_id': self.product_uom.id,\n 'product_id': self.product_id.id or False,\n 'invoice_line_tax_ids': [(6, 0, self.taxes_id.ids)],\n 'account_analytic_id': self.account_analytic_id.id,\n 'analytic_tag_ids': [(6, 0, self.analytic_tag_ids.ids)],\n }\n return res", "def _prepare_invoice_line(self, inv_id):\n res = {}\n account_id = self.product_id.property_account_income_id.id or self.product_id.categ_id.property_account_income_categ_id.id\n if not account_id:\n raise UserError(_('Please define income account for this product: \"%s\" (id:%d).') % \\\n (self.product_id.name, self.product_id.id,))\n price_unit = self.product_id.lst_price\n res = {\n 'invoice_id': inv_id.id,\n 'name': self.name,\n 'origin': self.order_id.name,\n 'account_id': account_id,\n 'uom_id': self.product_uom_id.id,\n 'quantity': self.product_uom_qty,\n 'price_unit': price_unit,\n 'product_id': self.product_id.id,\n 'invoice_line_tax_id': False,\n 'order_line_id': self.id\n }\n return res", "def _prepare_invoice_lines(self, exchange_line, order_line):\n invoice_type = {\n 'sale.order.line': {\n 'higher': 'out_invoice', 'lower': 'out_refund',\n 'type': 'sale', 'field': 'exchange_sale_line_id'\n },\n 'purchase.order.line': {\n 'higher': 'in_invoice', 'lower': 'in_refund',\n 'type': 'purchase', 'field': 'exchange_purchase_line_id'\n },\n }\n product = exchange_line.exchange_product_id or exchange_line.product_id\n data = {\n 'invoice_type': False,\n 'values': {\n 'product_id': product.id,\n 'quantity': exchange_line.quantity,\n 'name': 'Exchange for [%s]' % exchange_line.product_id.display_name,\n }\n }\n if exchange_line.exchange_product_id or \\\n exchange_line.price_subtotal > order_line.price_subtotal:\n data['invoice_type'] = invoice_type[order_line._name]['higher']\n elif exchange_line.price_subtotal < order_line.price_subtotal:\n data['invoice_type'] = invoice_type[order_line._name]['lower']\n else:\n return {}\n data[invoice_type[order_line._name]['type']] = order_line.order_id\n data['values'][invoice_type[order_line._name]['field']] = order_line.id\n data['values']['price_unit'] = exchange_line.price_unit\n # TODO i think we should take the different between prices NOT the all price\n # abs(exchange_line.price_unit - order_line.price_unit)\n return data", "def _prepare_add_missing_fields(self, values):\n res = {}\n onchange_fields = ['name', 'price_unit', 'product_uom', 'tax_id']\n if values.get('order_id') and values.get('product_id') and any(f not in values for f in onchange_fields):\n line = self.new(values)\n line.product_id_change()\n for field in onchange_fields:\n if field not in values:\n res[field] = line._fields[field].convert_to_write(line[field], line)\n res['init_qty'] = values.get('product_uom_qty')\n _logger.debug(\"********************* dropship_portal\\sale_order res **********************: %r\", res)\n return res", "def _prepare_invoice_line(self, qty):\n self.ensure_one()\n res = super(SaleOrderLine, self)._prepare_invoice_line(qty)\n\n res.update({\n 'cost_center_id': self.cost_center_id and self.cost_center_id.id or False\n })\n return res", "def create_invoice(self):\n sales_tax = 0.06\n item_sum = 0\n inv = f'Invoice#: {self.invoice_id}\\n'\n for key, value in self.items_with_price.items():\n item_sum += value\n inv += f'{key}.....${value:.2f}\\n'\n\n tax = item_sum * sales_tax\n inv += f'Tax.....${tax:.2f}\\n'\n inv += f'Total.....${tax + item_sum:.2f}'\n # print(inv)\n # returning for unit testing purposes\n return inv", "def _prepare_invoice_line(self, cr, uid, group, picking, move_line, invoice_id,\n invoice_vals, context=None):\n result = super(stock_picking, self)._prepare_invoice_line(cr, uid, group, picking, move_line, invoice_id,\n invoice_vals, context=None)\n \n result['discount'] = self._get_discount_invoice(cr, uid, move_line)\n result['discount2'] = self._get_discount2_invoice(cr, uid, move_line)\n result['price_unit'] = self._get_price_unit_invoice(cr, uid, move_line, invoice_vals['type'], context=None)\n return result", "def create_sale_order_line_vals_amazon(self,order_line,qty_price_dict,tax_id,amazon_product=False,odoo_product=False,amazon_order=False,instance=False,title=False):\n sale_order_line = self.env['sale.order.line']\n# new_record=self.env['sale.order.line'].new({'order_id':amazon_order.id,\n# 'company_id':amazon_order.company_id.id,\n# 'product_id':amazon_product and amazon_product.product_id.id or odoo_product and odoo_product.id or False,\n# 'product_uom':amazon_product and amazon_product.product_tmpl_id.uom_id or odoo_product and odoo_product.product_tmpl_id.uom_id,\n# 'name':title\n# })\n# new_record.product_id_change()\n# order_vals=new_record._convert_to_write({name: new_record[name] for name in new_record._cache}) \n# \n# order_qty=qty_price_dict.get('order_qty')\n# order_vals.update({\n# 'product_uom_qty' : order_qty,\n# 'amazon_order_qty':order_line.get('QuantityOrdered',{}).get('value',0.0),\n# 'price_unit' : qty_price_dict.get('amount_per_unit'),\n# 'customer_lead' :amazon_product and amazon_product.sale_delay or False,\n# 'invoice_status' : False,\n# 'state' : 'draft',\n# 'amazon_order_item_id':order_line.get('OrderItemId',{}).get('value'),\n# 'discount':0.0,\n# 'amazon_product_id':amazon_product and amazon_product.id or False,\n# 'product_uom':new_record.product_uom.id,\n# 'producturl':\"%s%s\"%(instance.producturl_prefix or '',order_line.getvalue(\"ASIN\", \"value\"))\n# }) \n\n vals = ({\n 'order_id':amazon_order.id,\n 'product_id':amazon_product and amazon_product.product_id.id or odoo_product and odoo_product.id or False,\n 'company_id':amazon_order.company_id.id,\n 'description':title,\n 'order_qty':qty_price_dict.get('order_qty'),\n 'price_unit':qty_price_dict.get('amount_per_unit'),\n 'discount':0.0,\n 'product_uom':amazon_product and amazon_product.product_tmpl_id.uom_id or odoo_product and odoo_product.product_tmpl_id.uom_id\n }) \n order_vals = sale_order_line.create_sale_order_line_ept(vals)\n \n order_vals.update({\n 'amazon_order_qty':order_line.get('QuantityOrdered',{}).get('value',0.0),\n 'customer_lead' :amazon_product and amazon_product.sale_delay or False,\n 'invoice_status' : False,\n 'amazon_order_item_id':order_line.get('OrderItemId',{}).get('value'),\n 'amazon_product_id':amazon_product and amazon_product.id or False,\n 'producturl':\"%s%s\"%(instance.producturl_prefix or '',order_line.getvalue(\"ASIN\", \"value\"))\n })\n return order_vals", "def _create_invoice(self):\n self.ensure_one()\n partner = self.member_id.partner_id\n invoice = self.env['account.invoice'].create({\n 'partner_id': partner.id,\n 'account_id': partner.property_account_receivable_id.id,\n 'fiscal_position_id': partner.property_account_position_id.id\n })\n for line in self.line_ids:\n product = line.activity_id.product_id\n # Handling of invoice lines : needs cache record for onchange, then\n # real writing...\n invoice_line = self.env['account.invoice.line'].new({\n 'product_id': product.id,\n 'invoice_id': invoice.id\n })\n invoice_line._onchange_product_id()\n line_values = dict(invoice_line._cache)\n line_values['price_unit'] = line.price\n invoice_line = self.env['account.invoice.line'].create(line_values)\n invoice.compute_taxes()\n line.registration_id.invoice_line_id = invoice_line.id\n return invoice", "def _prepare_order_line_invoice_line(self, cr, uid, line, account_id=False, context=None):\n res = super(sale_order_line, self)._prepare_order_line_invoice_line(cr, uid, line, account_id=account_id, context=context)\n \n res.update({'part_number': line.part_number, 'internal_part_number' : line.internal_part_number})\n return res", "def create(self, vals):\n res = super(SaleOrder, self).create(vals)\n if not vals.get('fiscal_position'):\n fiscal_position = self._get_fiscal_position(\n res.partner_shipping_id)\n if fiscal_position:\n res.fiscal_position = fiscal_position\n return res", "def action_create_invoices(self, data):\n invoice_obj = self.env['account.invoice']\n values = {}\n for val in data:\n values.setdefault(val['invoice_type'], {\n 'order': val.get('sale', val.get('purchase')),\n 'values': []\n })\n values[val['invoice_type']]['values'].append((0, 0, val['values']))\n\n for inv_type, inv_data in values.items():\n invoice = invoice_obj.new(self._prepare_invoice(inv_type))\n invoice._onchange_partner_id()\n inv = invoice._convert_to_write({\n name: invoice[name] for name in invoice._cache\n })\n for _, _, line in inv_data['values']:\n line['account_id'] = inv['account_id']\n inv['invoice_line_ids'] = inv_data['values']\n new_invoice = invoice_obj.sudo().create(inv)\n new_invoice.action_invoice_open()\n inv_data['order'].write({\n 'exchange_invoice_ids': [(4, new_invoice.id)]\n })", "def create_invoices(self, cr, uid, ids, context=None):\n invoice_list = []\n po_obj = self.pool.get('purchase.order')\n inv_line_obj = self.pool.get('account.invoice.line')\n inv_obj = self.pool.get('account.invoice')\n addr_obj = self.pool.get('res.partner')\n journal_obj = self.pool.get('account.journal')\n if context is None:\n context = {}\n\n for purchase_adv_obj in self.browse(cr, uid, ids, context=context):\n for purchase_order in po_obj.browse(cr, uid, context.get('active_ids', []), context=context):\n inv_line_ids = []\n invoice_ids = []\n val = inv_line_obj.product_id_change(cr, uid, [], purchase_adv_obj.product_id.id,\n uom_id=False, partner_id=purchase_order.partner_id.id, fposition_id=purchase_order.fiscal_position.id)\n line_id = inv_line_obj.create(cr, uid, {\n 'name': val['value']['name'],\n 'account_id': val['value']['account_id'],\n 'price_unit': purchase_adv_obj.amount,\n 'quantity': purchase_adv_obj.qtty,\n 'discount': False,\n 'uos_id': val['value']['uos_id'],\n 'product_id': purchase_adv_obj.product_id.id,\n 'invoice_line_tax_id': [(6, 0, val['value']['invoice_line_tax_id'])],\n })\n inv_line_ids.append(line_id)\n addr = addr_obj.address_get(cr, uid, [purchase_order.partner_id.id], ['invoice'])\n journal_ids = journal_obj.search(cr, uid, [('type', '=', 'purchase')])\n context.update({'type':'in_invoice','journal_type':'purchase'})\n inv_vals = {\n 'name': purchase_order.partner_ref or purchase_order.name,\n 'origin': purchase_order.name,\n 'type': 'in_invoice',\n 'reference': False,\n 'account_id': purchase_order.partner_id.property_account_payable.id,\n 'journal_id':journal_ids and journal_ids[0] or False,\n 'partner_id': purchase_order.partner_id.id,\n 'address_invoice_id': addr['invoice'],\n 'invoice_line': [(6, 0, inv_line_ids)],\n 'currency_id': purchase_order.pricelist_id.currency_id.id,\n 'comment': '',\n 'payment_term': purchase_order.payment_term_id and purchase_order.payment_term_id.id or False,\n 'fiscal_position': purchase_order.fiscal_position.id or purchase_order.partner_id.property_account_position.id,\n 'prepaid': True\n }\n\n inv_id = inv_obj.create(cr, uid, inv_vals, context=context)\n inv_obj.button_reset_taxes(cr, uid, [inv_id], context=context)\n for invoice in purchase_order.invoice_ids:\n invoice_ids.append(invoice.id)\n invoice_ids.append(inv_id)\n po_obj.write(cr, uid, purchase_order.id, {'invoice_ids': [(6, 0, invoice_ids)]})\n invoice_list.append(inv_id)\n\n if purchase_order.invoice_method in ('picking','order'):\n self.pool.get('purchase.order.line').create(cr, uid, {\n 'order_id': purchase_order.id,\n 'name': val['value']['name'],\n 'date_planned':purchase_order.date_order,\n 'price_unit': -purchase_adv_obj.amount,\n 'product_uom_qty': purchase_adv_obj.qtty,\n 'product_uos': val['value']['uos_id'],\n 'product_uom': val['value']['uos_id'],\n 'product_id': purchase_adv_obj.product_id.id,\n 'adavance_product':True,\n 'discount': False,\n 'taxes_id': [(6, 0, val['value']['invoice_line_tax_id'])],\n }, context=context)\n\n\n context.update({'invoice_id':invoice_list})\n return {\n 'name': 'Open Invoice',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'purchase.open.invoice',\n 'type': 'ir.actions.act_window',\n 'target': 'new',\n 'context': context\n }", "def _generate_valuation_lines_data(self, partner_id, qty, debit_value, credit_value, debit_account_id, credit_account_id, description):\n self.ensure_one()\n\n rslt = super(StockMove, self)._generate_valuation_lines_data(partner_id, qty, debit_value, credit_value, debit_account_id, credit_account_id, description)\n if self.purchase_line_id:\n purchase_currency = self.purchase_line_id.currency_id\n if purchase_currency != self.company_id.currency_id:\n # Do not use price_unit since we want the price tax excluded. And by the way, qty\n # is in the UOM of the product, not the UOM of the PO line.\n purchase_price_unit = (\n self.purchase_line_id.price_subtotal / self.purchase_line_id.product_uom_qty\n if self.purchase_line_id.product_uom_qty\n else self.purchase_line_id.price_unit\n )\n currency_move_valuation = purchase_currency.round(purchase_price_unit * abs(qty))\n rslt['credit_line_vals']['amount_currency'] = rslt['credit_line_vals']['credit'] and -currency_move_valuation or currency_move_valuation\n rslt['credit_line_vals']['currency_id'] = purchase_currency.id\n rslt['debit_line_vals']['amount_currency'] = rslt['debit_line_vals']['credit'] and -currency_move_valuation or currency_move_valuation\n rslt['debit_line_vals']['currency_id'] = purchase_currency.id\n return rslt", "def _prepare_refund(self, invoice, date_invoice=None, date=None, description=None, journal_id=None):\n values = super(AccountInvoice, self)._prepare_refund(invoice, date_invoice, date, description, journal_id)\n if invoice.payment_term_id:\n values['payment_term_id'] = invoice.payment_term_id.id\n elif invoice.partner_id.property_payment_term_id:\n values['payment_term_id'] = invoice.partner_id.property_payment_term_id.id\n return values", "def _prepare_picking_values(self):\r\n return {\r\n 'origin': self.doc_num,\r\n 'company_id': self.company_id.id,\r\n 'move_type': 'direct',\r\n 'partner_id': self.partner_id.id,\r\n 'picking_type_id': self.picking_type_id.id,\r\n 'location_id': self.location_id.id,\r\n 'location_dest_id': self.location_dest_id.id,\r\n 'picking_type_code': self.request_type_code\r\n }", "def _set_additional_po_order_fields(self, invoice):\n\t\tpass", "def action_invoice_create(self, grouped=False, final=False):\n inv_obj = self.env['account.invoice']\n precision = self.env['decimal.precision'].sudo().precision_get('Product Unit of Measure')\n invoices = {}\n references = {}\n for order in self:\n group_key = order.id if grouped else (order.partner_invoice_id.id, order.currency_id.id)\n for line in order.order_line.sorted(key=lambda l: l.qty_to_invoice < 0):\n if float_is_zero(line.qty_to_invoice, precision_digits=precision):\n continue\n if group_key not in invoices:\n inv_data = order._prepare_invoice()\n invoice = inv_obj.sudo().create(inv_data)\n references[invoice] = order\n invoices[group_key] = invoice\n invoice['sale_order_id'] = order.id\n elif group_key in invoices:\n vals = {}\n if order.name not in invoices[group_key].origin.split(', '):\n vals['origin'] = invoices[group_key].origin + ', ' + order.name\n if order.client_order_ref and order.client_order_ref not in invoices[group_key].name.split(\n ', ') and order.client_order_ref != invoices[group_key].name:\n vals['name'] = invoices[group_key].name + ', ' + order.client_order_ref\n invoices[group_key].sudo().write(vals)\n if line.qty_to_invoice > 0:\n line.invoice_line_create(invoices[group_key].id, line.qty_to_invoice)\n elif line.qty_to_invoice < 0 and final:\n line.invoice_line_create(invoices[group_key].id, line.qty_to_invoice)\n\n if references.get(invoices.get(group_key)):\n if order not in references[invoices[group_key]]:\n references[invoices[group_key]] |= order\n if not invoices:\n raise UserError(_('There is no invoiceable line.'))\n for invoice in invoices.values():\n if not invoice.invoice_line_ids:\n raise UserError(_('There is no invoiceable line.'))\n # If invoice is negative, do a refund invoice instead\n if invoice.amount_untaxed < 0:\n invoice.type = 'out_refund'\n for line in invoice.invoice_line_ids:\n line.quantity = -line.quantity\n # Use additional field helper function (for account extensions)\n for line in invoice.invoice_line_ids:\n line._set_additional_fields(invoice)\n # Necessary to force computation of taxes. In account_invoice, they are triggered\n # by onchanges, which are not triggered when doing a create.\n invoice.compute_taxes()\n invoice.message_post_with_view('mail.message_origin_link',\n values={'self': invoice, 'origin': references[invoice]},\n subtype_id=self.env.ref('mail.mt_note').id)\n return [inv.id for inv in invoices.values()]", "def onchange_invoice(self):\n self.product_id = False\n self.date = self.invoice.date_invoice\n self.name = (self.invoice and self.invoice.reference) or ''\n self.analytic_account_id = False\n self.unit_amount = self.invoice.residual\n self.quantity = 1\n self.total_amount = self.unit_amount", "def _generate_valuation_lines_data(self, partner_id, qty, debit_value, credit_value, debit_account_id, credit_account_id):\n self.ensure_one()\n\n rslt = super(StockMove, self)._generate_valuation_lines_data(partner_id, qty, debit_value, credit_value, debit_account_id, credit_account_id)\n if self.subcontract_line_id:\n subcontract_currency = self.subcontract_line_id.currency_id\n if subcontract_currency != self.company_id.currency_id:\n subcontract_price_unit = self.subcontract_line_id.price_unit\n currency_move_valuation = subcontract_currency.round(subcontract_price_unit * abs(qty))\n rslt['credit_line_vals']['amount_currency'] = rslt['credit_line_vals']['credit'] and -currency_move_valuation or currency_move_valuation\n rslt['credit_line_vals']['currency_id'] = subcontract_currency.id\n rslt['debit_line_vals']['amount_currency'] = rslt['debit_line_vals']['credit'] and -currency_move_valuation or currency_move_valuation\n rslt['debit_line_vals']['currency_id'] = subcontract_currency.id\n return rslt", "def _prepare_inv_line(self, cr, uid, account_id, order_line, context=None):\n result = super(purchase_order, self)._prepare_inv_line(cr, uid, account_id, order_line, context=None)\n result['discount2'] = order_line.discount2 or 0.0\n return result", "def create(self, values):\n if values.get('name', _('New')) == _('New'):\n values['name'] = self.env['ir.sequence'].next_by_code('order.reference',\n None) or _('New')\n values['marks'] = values['name']\n customer_code = ''\n if values.get('customer_id'):\n customer = self.env['res.partner'].browse(values.get('customer_id'))\n customer_code = customer.customer_code\n if values.get('marks'):\n marks_field = values.get('marks')\n else:\n marks_field = ' '\n\n values['marks'] = '%s %s %s' % (customer_code, values['name'], marks_field)\n return super(PurchaseOrder, self).create(values)" ]
[ "0.74100953", "0.73465693", "0.73403376", "0.7328195", "0.7002523", "0.69652605", "0.68501127", "0.67724967", "0.6712132", "0.665417", "0.6562301", "0.6495313", "0.6490809", "0.6431741", "0.6104436", "0.6072918", "0.59833795", "0.59129834", "0.5863059", "0.5860928", "0.5786957", "0.5784383", "0.57769054", "0.573299", "0.57278657", "0.57132304", "0.5708501", "0.5666309", "0.5660624", "0.56384367" ]
0.76020634
0
The function prepares the images for our model based on a given video
def prepare_video(path_to_video: str, number_of_images=87) -> None: temp_video = path.join(path_to_video, 'temp_outpy.mp4') video = path.join(path_to_video, 'outpy.h264') # create mp4 video for metadata and compute video duration subprocess.run(['ffmpeg', '-i', video, '-c', 'copy', temp_video]) result = subprocess.run(["ffprobe", "-v", "error", "-show_entries", "format=duration", "-of", "default=noprint_wrappers=1:nokey=1", temp_video], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) video_duration = float(result.stdout) # create images folder path_to_images = path.join(path_to_video, 'images') if path.exists(path_to_images) and path.isdir(path_to_images): shutil.rmtree(path_to_images) makedirs(path_to_images) # split the given video into images subprocess.run(['ffmpeg', '-i', temp_video, '-r', str(number_of_images / video_duration), '-f', 'image2', path.join(path_to_images, 'image%d.jpg')]) # remove extra files remove_extra_images(path_to_images, number_of_images) remove(temp_video)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_train_video(opt, frame_path, Total_frames):\n clip = []\n i = 0\n loop = 0\n\n # choosing a random frame\n if Total_frames <= opt.sample_duration: \n loop = 1\n start_frame = 0\n else:\n start_frame = np.random.randint(0, Total_frames - opt.sample_duration)\n \n if opt.modality == 'RGB': \n while len(clip) < opt.sample_duration:\n try:\n im = Image.open(os.path.join(frame_path, '%05d.jpg'%(start_frame+i+1)))\n clip.append(im.copy())\n im.close()\n except:\n print('ERROR no such image {}'.format(os.path.join(frame_path, '%05d.jpg'%(i+1))))\n i += 1\n \n if loop==1 and i == Total_frames:\n i = 0\n\n elif opt.modality == 'Flow': \n while len(clip) < 2*opt.sample_duration:\n try:\n im_x = Image.open(os.path.join(frame_path, 'TVL1jpg_x_%05d.jpg'%(start_frame+i+1)))\n im_y = Image.open(os.path.join(frame_path, 'TVL1jpg_y_%05d.jpg'%(start_frame+i+1)))\n clip.append(im_x.copy())\n clip.append(im_y.copy())\n im_x.close()\n im_y.close()\n except:\n pass\n i += 1\n \n if loop==1 and i == Total_frames:\n i = 0\n \n elif opt.modality == 'RGB_Flow':\n while len(clip) < 3*opt.sample_duration:\n try:\n im = Image.open(os.path.join(frame_path, '%05d.jpg'%(start_frame+i+1)))\n im_x = Image.open(os.path.join(frame_path, 'TVL1jpg_x_%05d.jpg'%(start_frame+i+1)))\n im_y = Image.open(os.path.join(frame_path, 'TVL1jpg_y_%05d.jpg'%(start_frame+i+1)))\n clip.append(im.copy())\n clip.append(im_x.copy())\n clip.append(im_y.copy())\n im.close()\n im_x.close()\n im_y.close()\n except:\n pass\n i += 1\n \n if loop==1 and i == Total_frames:\n i = 0\n return clip", "def _convert_video2img(self, filename): # Added filename variable\n video = GetFrames(self.root + f\"/Videos/{filename}\", self.root + \"/Images\")\n video.get_frame_names()\n frames = video.frame_names()\n\n\n with concurrent.futures.ThreadPoolExecutor() as executor:\n executor.map(video.save_frames, frames)\n video.subfolders()\n os.chdir(self.cwd)\n print(\"Video 2 Image conversion --> DONE\")", "def make_images_from_video(video_name, video_dir, out_dir, limit=None):\n video_path = f\"{video_dir}/{video_name}\"\n video_name = os.path.basename(video_path)\n vidcap = cv2.VideoCapture(video_path)\n print(video_path)\n frame = 0\n while True:\n it_worked, img = vidcap.read()\n if not it_worked:\n break\n frame += 1\n # print(frame)\n image_path = f\"{out_dir}/{video_name}\".replace(\".mp4\", f\"_{frame}.png\")\n success = cv2.imwrite(image_path, img)\n if not success:\n raise ValueError(\"couldn't write image successfully\")\n if limit and frame > limit:\n print(f\"Made maximum: {limit} frames\")\n break", "def process_video(self):\n if os.path.isfile(self.source):\n self.cap = cv2.VideoCapture(self.source)\n else:\n try:\n file_name = \"input.mp4\"\n self.source = self.source.replace('open', 'uc')\n print( \"\\nDownloading video file from drive link to %s\\n\"%file_name)\n gdown.download(self.source, file_name, quiet=False)\n print( \"%s downloaded!\\n\"%file_name )\n self.cap = cv2.VideoCapture(file_name)\n except Exception:\n raise RuntimeError(\"Invalid source input, please specify a Google drive link or a downloaded local file as input \\n\")\n\n\n assert self.cap.isOpened(), \"Failed to open %s\" % self.source\n\n self.w = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n self.h = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n self.fps = self.cap.get(cv2.CAP_PROP_FPS) \n self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))\n return", "def tagVideo(modelpath, videopath, outputPath=None): \n model = get_model_instance_segmentation(3)\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n # model.load_state_dict(torch.load(modelpath, map_location=device), strict=False)\n model.load_state_dict(torch.load(modelpath, map_location=device))\n model = model.to(device)\n model.eval()\n\n \n data_transform = transforms.Compose([\n ToPILImage(),\n transforms.ToTensor(), \n ])\n\n\n if outputPath:\n writer = FFmpegWriter(str(outputPath))\n \n font = cv2.FONT_HERSHEY_SIMPLEX\n cv2.namedWindow('main', cv2.WINDOW_NORMAL)\n labels = ['No mask', 'Mask']\n labelColor = [(10, 0, 255), (10, 255, 0)]\n img_count = 0\n outputDir = os.path.dirname(os.path.realpath(outputPath))\n frame_count = 0\n boundingBoxes = []\n for frame in vreader(str(videopath)):\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n print('Frame:', frame_count)\n\n if frame_count%30==0:\n frameTensor = data_transform(frame)\n frameTensor = torch.unsqueeze(frameTensor, 0).to(device)\n output = model(frameTensor)\n boundingBoxes = plot_image_new(frame, frameTensor[0], output[0]) \n \n if len(boundingBoxes)>0:\n for bb in boundingBoxes:\n cv2.rectangle(frame,\n (bb[0], bb[1]),\n (bb[2], bb[3]),\n (54, 66, 227),\n thickness=2)\n\n cv2.imshow('main', frame)\n if outputPath:\n writer.writeFrame(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n frame_count += 1\n if outputPath:\n writer.close()\n cv2.destroyAllWindows()", "def video2img(video, csv, output_path, match):\n with open(csv, 'r') as file:\n lines = file.readlines()[1:]\n\n csv_content = []\n for line in lines:\n frame, vis, x, y = line.strip().split(',')\n csv_content.append((int(frame), int(vis), float(x), float(y)))\n\n name_split = os.path.split(video)\n name = \"match%d\"%(match) + '_' + name_split[-1][:-4]\n\n count = 0\n num_data = len(csv_content)\n cap = cv2.VideoCapture(video)\n success, image = cap.read()\n ratio = image.shape[0]/HEIGHT\n while success:\n if count >= num_data:\n break\n label = csv_content[count]\n if label[1] == 0:\n heat_map = genHeatMap(WIDTH, HEIGHT, -1, -1, sigma, mag)\n else:\n heat_map = genHeatMap(WIDTH, HEIGHT, int(label[2]/ratio), int(label[3]/ratio), sigma, mag)\n \n image = cv2.resize(image, (WIDTH, HEIGHT))\n heat_map = (heat_map*255).astype('uint8')\n cv2.imwrite(os.sep.join([output_path, 'x_data', name+'_%d.jpg' %(count)]), image)\n cv2.imwrite(os.sep.join([output_path, 'y_data', name+'_%d.jpg' %(count)]), heat_map)\n success, image = cap.read()\n count += 1", "def generate_images(video_path, index_first, index_second):\n cap = cv2.VideoCapture(video_path)\n cap.set(cv2.CAP_PROP_POS_FRAMES, index_first)\n success, img = cap.read()\n cv2.imwrite(os.path.join(data_folder, 'demo_single_first.png'), img)\n cap.set(cv2.CAP_PROP_POS_FRAMES, index_second)\n success, img = cap.read()\n cv2.imwrite(os.path.join(data_folder, 'demo_single_second.png'), img)", "def run(self):\n\n for file_cnt, file_path in enumerate(self.files_found):\n video_timer = SimbaTimer()\n video_timer.start_timer()\n _, self.video_name, _ = get_fn_ext(file_path)\n self.video_info, self.px_per_mm, self.fps = self.read_video_info(\n video_name=self.video_name\n )\n self.width, self.height = int(\n self.video_info[\"Resolution_width\"].values[0]\n ), int(self.video_info[\"Resolution_height\"].values[0])\n if self.video_setting:\n self.fourcc = cv2.VideoWriter_fourcc(*Formats.MP4_CODEC.value)\n self.video_save_path = os.path.join(\n self.heatmap_clf_location_dir, self.video_name + \".mp4\"\n )\n self.writer = cv2.VideoWriter(\n self.video_save_path,\n self.fourcc,\n self.fps,\n (self.width, self.height),\n )\n if self.frame_setting:\n self.save_video_folder = os.path.join(\n self.heatmap_clf_location_dir, self.video_name\n )\n if not os.path.exists(self.save_video_folder):\n os.makedirs(self.save_video_folder)\n self.data_df = read_df(file_path=file_path, file_type=self.file_type)\n clf_array, aspect_ratio = self.__calculate_bin_attr(\n data_df=self.data_df,\n clf_name=self.clf_name,\n bp_lst=self.bp_lst,\n px_per_mm=self.px_per_mm,\n img_width=self.width,\n img_height=self.height,\n bin_size=self.bin_size,\n fps=self.fps,\n )\n\n if self.max_scale == \"auto\":\n self.max_scale = self.__calculate_max_scale(clf_array=clf_array)\n if self.max_scale == 0:\n self.max_scale = 1\n\n if self.final_img_setting:\n self.make_clf_heatmap_plot(\n frm_data=clf_array[-1, :, :],\n max_scale=self.max_scale,\n palette=self.palette,\n aspect_ratio=aspect_ratio,\n file_name=os.path.join(\n self.heatmap_clf_location_dir,\n self.video_name + \"_final_frm.png\",\n ),\n shading=self.shading,\n clf_name=self.clf_name,\n img_size=(self.width, self.height),\n final_img=True,\n )\n\n if self.video_setting or self.frame_setting:\n for frm_cnt, cumulative_frm_idx in enumerate(range(clf_array.shape[0])):\n frm_data = clf_array[cumulative_frm_idx, :, :]\n cum_df = pd.DataFrame(frm_data).reset_index()\n cum_df = cum_df.melt(\n id_vars=\"index\",\n value_vars=None,\n var_name=None,\n value_name=\"seconds\",\n col_level=None,\n ).rename(\n columns={\"index\": \"vertical_idx\", \"variable\": \"horizontal_idx\"}\n )\n cum_df[\"color\"] = (\n (cum_df[\"seconds\"].astype(float) / float(self.max_scale))\n .round(2)\n .clip(upper=100)\n )\n color_array = np.zeros(\n (\n len(cum_df[\"vertical_idx\"].unique()),\n len(cum_df[\"horizontal_idx\"].unique()),\n )\n )\n for i in range(color_array.shape[0]):\n for j in range(color_array.shape[1]):\n value = cum_df[\"color\"][\n (cum_df[\"horizontal_idx\"] == j)\n & (cum_df[\"vertical_idx\"] == i)\n ].values[0]\n color_array[i, j] = value\n\n fig = plt.figure()\n im_ratio = color_array.shape[0] / color_array.shape[1]\n plt.pcolormesh(\n color_array,\n shading=self.shading,\n cmap=self.palette,\n rasterized=True,\n alpha=1,\n vmin=0.0,\n vmax=float(self.max_scale),\n )\n plt.gca().invert_yaxis()\n plt.xticks([])\n plt.yticks([])\n plt.axis(\"off\")\n plt.tick_params(axis=\"both\", which=\"both\", length=0)\n cb = plt.colorbar(pad=0.0, fraction=0.023 * im_ratio)\n cb.ax.tick_params(size=0)\n cb.outline.set_visible(False)\n cb.set_label(\n \"{} (seconds)\".format(self.clf_name), rotation=270, labelpad=10\n )\n plt.tight_layout()\n plt.gca().set_aspect(aspect_ratio)\n canvas = FigureCanvas(fig)\n canvas.draw()\n mat = np.array(canvas.renderer._renderer)\n image = cv2.cvtColor(mat, cv2.COLOR_RGB2BGR)\n image = cv2.resize(image, (self.width, self.height))\n image = np.uint8(image)\n plt.close()\n\n if self.video_setting:\n self.writer.write(image)\n if self.frame_setting:\n frame_save_path = os.path.join(\n self.save_video_folder, str(frm_cnt) + \".png\"\n )\n cv2.imwrite(frame_save_path, image)\n print(\n \"Created heatmap frame: {} / {}. Video: {} ({}/{})\".format(\n str(frm_cnt + 1),\n str(len(self.data_df)),\n self.video_name,\n str(file_cnt + 1),\n len(self.files_found),\n )\n )\n\n if self.video_setting:\n self.writer.release()\n\n video_timer.stop_timer()\n print(\n \"Heatmap plot for video {} saved (elapsed time: {}s) ... \".format(\n self.video_name, video_timer.elapsed_time_str\n )\n )\n\n self.timer.stop_timer()\n stdout_success(\n msg=\"All heatmap visualizations created in project_folder/frames/output/heatmaps_classifier_locations directory\",\n elapsed_time=\"self.timer.elapsed_time_str\",\n )", "def run_video(self, video_path):\n file, ext = os.path.splitext(video_path)\n video_name = file.split('/')[-1]\n out_filename = video_name + '_out' + '.avi'\n\n cap = cv2.VideoCapture(video_path)\n wi = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n he = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n print(wi, he)\n\n vwriter = cv2.VideoWriter(out_filename, cv2.VideoWriter_fourcc(*'MJPG'), 10, (wi, he))\n counter = 0\n fac = 2\n start = time.time()\n while True:\n ret, image = cap.read()\n\n if ret:\n counter += 1\n\n ## resize image\n\n height, width, channels = image.shape\n resize_ratio = 1.0 * self.INPUT_SIZE / max(width, height)\n target_size = (int(resize_ratio * width), int(resize_ratio * height))\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n resized_image = cv2.resize(image, target_size, interpolation=cv2.INTER_AREA)\n output = resized_image.copy()\n\n ## get segmentation map\n batch_seg_map = self.sess.run(\n self.OUTPUT_TENSOR_NAME,\n feed_dict={self.INPUT_TENSOR_NAME: [np.asarray(resized_image)]})\n seg_map = batch_seg_map[0]\n\n ## visualize\n seg_image = label_to_color_image(seg_map).astype(np.uint8)\n\n ## overlay on image\n alpha = 0.7\n cv2.addWeighted(seg_image, alpha, output, 1 - alpha, 0, output)\n\n output = cv2.resize(output, (wi, he), interpolation=cv2.INTER_AREA)\n # outimg = 'image_' + str(counter) + '.jpg'\n # cv2.imwrite(os.path.join(os.getcwd(), 'test_out', outimg),output)\n vwriter.write(output)\n else:\n break\n\n end = time.time()\n print(\"Frames and Time Taken: \", counter, end - start)\n cap.release()\n vwriter.release()", "def let_camera_update_parameters(path_to_images, name_image, video_source=\"/dev/video0\"):\n subprocess_cmd(\"ffmpeg -f video4linux2 -s 1280x720 -i {} -ss 00:00:02 -frames 1 ./{}/{} -loglevel error -nostats\".format(video_source, path_to_images, name_image))", "def imageMode(modename,movielist=[1]):\n\n moviedeq = []\n i2 = 0 \n for filenumber in movielist:\n cap = cv2.VideoCapture(glob.glob(\"*_\" + str(filenumber) + \".avi\")[0])\n ret,frame = cap.read()\n storedFrame = grayBlur(frame)\n totalFrames = 0\n while(cap.isOpened()):\n ret,frame = cap.read()\n if ret == False:\n break\n currentFrame = grayBlur(frame)\n if totalFrames < 50:\n if totalFrames % 3 == 0:\n moviedeq.append(currentFrame)\n totalFrames += 1\n storedFrame = currentFrame \n i2 += 1\n testing = calc_mode(moviedeq, np.zeros([ydim,xdim]))\n cv2.imwrite(\"mode_\" + modename + \".png\", testing)\n cap.release()\n cv2.destroyAllWindows()", "def setVidFrame(self, ori_images):\n self.cleanThread()\n if ori_images == 0:\n logging.critical(\"Video Image number 0\")\n else:\n self.buildRunDictMain(ori_images)", "def __init__(self, data_path, batch_size, video_size, mode=\"first80\"):\n self._batch_size = batch_size\n self._video_size = video_size\n\n\n # KTH video splits \n splits = [[11, 12, 13, 14, 15, 16, 17, 18], # train\n [19, 20, 21, 23, 24, 25, 1, 4], # validation\n [22, 2, 3, 5, 6, 7, 8, 9, 10]] # test\n \n label_mapping = {\"boxing\":0,\n \"handclapping\":1, \n \"handwaving\":2,\n \"jogging\":3,\n \"running\":4,\n \"walking\":5}\n self._num_classes = len(label_mapping)\n\n # file containing KTH video frame clip intervals\n sequence_list = os.path.join(data_path, \"00sequences.txt\")\n sequences = self._read_sequence_list(sequence_list)\n \n \n # clip and labels for each split, will be converted into [np.arrays()] format\n self._clips = [[] for _ in range(3)] # resized videos\n self._labels = [[] for _ in range(3)] # labels\n self._fns = [[] for _ in range(3)] # file names\n # read video into np array and create label according to splits \n for video_file in glob.glob(os.path.join(data_path, \"*.avi\")):\n fn = os.path.basename(video_file)\n fn = fn[0:len(fn) - 4]\n \n video = load_video(video_file, self._video_size)\n person_index = int(fn.split(\"_\")[0][-2:len(fn.split(\"_\")[0])])\n split = [i for i, j in enumerate(splits) if person_index in j][0]\n label = label_mapping[fn.split(\"_\")[1]]\n\n # obtain clips from video\n video_key_in_sequences = \"_\".join(fn.split(\"_\")[0:len(fn.split(\"_\")) - 1])\n print video_key_in_sequences\n\n if mode == \"episodes\":\n for clip_index, clip_range in enumerate(sequences[video_key_in_sequences]):\n self._labels[split].append(np.eye(len(label_mapping))[label]) \n self._clips[split].append(video[clip_range[0] - 1:clip_range[1] - 1, :, :, :])\n self._fns[split].append(fn + \"_\" + str(clip_index))\n elif mode == \"first80\":\n self._labels[split].append(np.eye(len(label_mapping))[label]) \n self._clips[split].append(video[0:80, :, :, :])\n self._fns[split].append(fn) \n else:\n raise NotImplementedError(\"Unknown preprocess mode.\")\n\n # maximum length for all clips, limit for padding\n self._clip_length = np.array(\\\n reduce(lambda a, b: a + [elem.shape[0] for elem in b], \n self._clips, [])).max() \n\n for split in range(3):\n for clip_index, (clip, label) in \\\n enumerate(zip(self._clips[split], self._labels[split])):\n self._clips[split][clip_index] = np.pad(clip, \\\n ((0, self._clip_length - clip.shape[0]), (0, 0), (0, 0), (0, 0)),\\\n mode=\"constant\", constant_values=0)\n # shuffling\n shuffle_index = range(len(self._clips[split]))\n random.shuffle(shuffle_index)\n self._clips[split] = [self._clips[split][i] for i in shuffle_index]\n self._labels[split] = [self._labels[split][i] for i in shuffle_index]\n self._fns[split] = [self._fns[split][i] for i in shuffle_index]\n \n self._clips[split] = np.concatenate(\\\n [np.expand_dims(i, axis=0) for i in self._clips[split]]) \n self._labels[split] = np.concatenate(\\\n [np.expand_dims(i, axis=0) for i in self._labels[split]])\n\n print self._clips[0].shape\n print self._labels[0].shape\n self._batch_index = [0 for _ in range(3)]", "def process_video(video_dir, save_dir):\n for sig_vid in tqdm(find_files(video_dir, '*.{}'.format(VID_FORMAT))):\n \n vc = cv2.VideoCapture(sig_vid) \n width = int(vc.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(vc.get(cv2.CAP_PROP_FRAME_HEIGHT))\n rig_bot_height, rig_bot_width = height // 2, width // 2\n\n if rig_bot_height == 540 and rig_bot_width == 960:\n # right bottom, r_h, l_w, r_w\n iou = [390, 90, 890]\n\n elif rig_bot_height == 720 and rig_bot_width == 1280:\n log.info('high resolution video, please confirm iou param')\n\n else:\n assert 'please confirm video resolution'\n\n count = 0\n cout_save = 0\n\n while vc: \n rval, frame = vc.read() \n\n if rval == True:\n count += 1\n # fisheye extract front preview\n ext_region = frame[rig_bot_height:, rig_bot_width:]\n cv2.imshow('ori frame', ext_region)\n\n key = cv2.waitKey(0) & 0xFF\n if key == ord('q'):\n break\n\n elif key == ord('s'): \n # Interval 20 frame save \n if cout_save % 20 == 0 or cout_save > 20: \n file_name = create_files(save_dir, sig_vid)\n img_res = process_frame(ext_region, iou)\n cv2.imwrite(os.path.join(save_dir, file_name)+\"/\"+ file_name+\"_{}.jpg\".format(count),img_res)\n cout_save = 0\n log.info('successful save current frame {}'.format(count))\n\n else:\n cout_save += 1\n continue\n cout_save += 1\n\n else:\n # skip current frame and cout pre save frame interval\n if cout_save > 0:\n cout_save += 1\n continue\n\n else:\n break\n \n vc.release()\n cv2.destroyAllWindows()", "def main():\n # Parameters\n opt = get_args()\n\n assert os.path.exists(opt.path_video), \"Video file does not exist\"\n try:\n os.makedirs(opt.path_images)\n except Exception:\n print(\"Folder already exists. Overwriting it\")\n pass\n\n assert opt.size is None or opt.size is not None and len(opt.size) <= 2, \"Make sure the size indicated contains at maximum two numbers [none, max_dimension or width and height]\"\n\n # Get base path\n base_path = os.path.join(opt.path_images, opt.basename)\n\n # Load video from file\n try:\n cap = cv2.VideoCapture(opt.path_video)\n except Exception as e:\n print('Video failed to be loaded:', e)\n sys.exit(0)\n\n # Parse video\n parse_video(cap, base_path, opt.step, opt.size)\n\n # Release capture\n cap.release()\n cv2.destroyAllWindows()\n \n return 0", "def classify_objects(video_dict, params, paths, vid_time_length=10, make_videos=True):\n yolo_dict = {}\n\n for video_num, (name, video) in enumerate(video_dict.items()):\n\n print('Classifying video {}/{}.'.format(video_num, len(video_dict)))\n yolo_dict[name] = {}\n\n # loop over frames of video and store in lists\n obj_bounds = []\n obj_labels = []\n obj_label_confidences = []\n cap_cvlib = []\n\n for i in range(video.shape[0]):\n frame = video[i, :, :, :]\n\n # apply object detection\n bbox, label, conf = cv.detect_common_objects(frame, confidence=params['detection_confidence_threshold'],\n model=params['detection_model'])\n obj_bounds.append(bbox)\n obj_labels.append([l.replace('motorcycle', 'motorbike') for l in label])\n obj_label_confidences.append(conf)\n\n # draw bounding box over detected objects\n if make_videos:\n img_cvlib = draw_bbox(frame, bbox, label, conf)\n cap_cvlib.append(img_cvlib)\n\n # write video to local file\n if make_videos:\n cap_cvlib_npy = np.asarray(cap_cvlib)\n local_mp4_path_out = paths['processed_video'] + name\n imageio.mimwrite(local_mp4_path_out, cap_cvlib_npy, fps=int(video.shape[0] / vid_time_length))\n\n yolo_dict[name]['bounds'] = obj_bounds\n yolo_dict[name]['labels'] = obj_labels\n yolo_dict[name]['confidences'] = obj_label_confidences\n\n frame_level_df = yolo_output_df(yolo_dict)\n\n return frame_level_df", "def main():\n\t# create output file\n\tif not os.path.exists(OUTPUT_PATH):\n\t\tos.makedirs(OUTPUT_PATH)\n\n\t# init model with pre-trained weights\n\tmodel = create_model()\n\n\tmodel.load_state_dict(torch.load(PATH_PYTORCH_WEIGHTS)['state_dict'])\n\tmodel.eval()\n\n\n\t# if GPU is enabled\n\tif USE_GPU:\n\t\tmodel.cuda()\n\tvideos = os.listdir(INPUT_PATH)\n\t# load and preprocess images in folder\n\tfor y in videos[numTraining:(numTraining+numValidation)]:\n\t\tif not os.path.exists(os.path.join(OUTPUT_PATH,y)):\n\t\t\tos.makedirs(os.path.join(OUTPUT_PATH,y))\n\t\t\tfor i, name in enumerate(os.listdir(os.path.join(INPUT_PATH,y))):\n\t\t\t\tfilename = os.path.join(INPUT_PATH,y,'{:04d}.jpg'.format(i+1))\n\t\t\t\timage_tensor, image_size = load_image(filename)\n\n\t\t\t\tif USE_GPU:\n\t\t\t\t\timage_tensor = image_tensor.cuda()\n\n\t\t\t\t# run model inference\n\t\t\t\tprediction = model.forward(image_tensor[None, ...]) # add extra batch dimension\n\n\t\t\t\t# get result to cpu and squeeze dimensions\n\t\t\t\tif USE_GPU:\n\t\t\t\t\tprediction = prediction.squeeze().data.cpu().numpy()\n\t\t\t\telse:\n\t\t\t\t\tprediction = prediction.squeeze().data.numpy()\n\n\t\t\t\t# postprocess\n\t\t\t\t# first normalize [0,1]\n\t\t\t\tprediction = normalize_map(prediction)\n\t\t\t\tsaliency = postprocess_prediction(prediction, image_size)\n\t\t\t\tsaliency = normalize_map(saliency)\n\t\t\t\tsaliency *= 255\n\t\t\t\tsaliency = saliency.astype(np.uint8)\n\t\t\t\t# save saliency\n\n\t\t\t\tcv2.imwrite(os.path.join(OUTPUT_PATH,str(y),name), saliency)\n\t\t\t\tprint(\"Processed image {} from video {}\".format(i+1,y), end=\"\\r\")\n\t\t\t\tsys.stdout.flush()", "def process_video(weights_path,video_path,output_path,margins=40,facenet_threshold=.985,euclidean_distance_threshold = 120.0):\n with torch.no_grad():\n mtcnn = MTCNN(image_size= 256, margin = 0)\n model = Model.VGGFace_Extractor().to(device)\n model.load_state_dict(torch.load(weights_path))\n model.eval()\n cap = cv2.VideoCapture(video_path)\n rotateCode = check_rotation(video_path)\n fourcc = cv2.VideoWriter_fourcc('M', 'J', 'P', 'G')\n out = cv2.VideoWriter(output_path, fourcc, 20.0, (int(cap.get(3)), int(cap.get(4))))\n ret, frame1 = cap.read()\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n i = 0\n while (cap.isOpened()):\n i += 1\n ret, frame2 = cap.read()\n if not (ret): break\n if rotateCode is not None:\n frame2 = correct_rotation(frame2, rotateCode)\n\n boxes, probs = mtcnn.detect(frame2)\n img_draw = frame2.copy()\n img_draw = Image.fromarray(img_draw)\n draw = ImageDraw.Draw(img_draw)\n if boxes is not None:\n names = []\n distances_difference = []\n for (box, point) in zip(boxes, probs):\n \"\"\" Loop from the extract_face method from facenet_pytorch\"\"\"\n\n if point < facenet_threshold: continue\n margin = margins\n image_size = 256\n margin = [\n margin * (box[2] - box[0]) / (image_size - margin),\n margin * (box[3] - box[1]) / (image_size - margin),\n ]\n raw_image_size = get_size(img_draw)\n box = [\n int(max(box[0] - margin[0] / 2, 0)),\n int(max(box[1] - margin[1] / 2, 0)),\n int(min(box[2] + margin[0] / 2, raw_image_size[0])),\n int(min(box[3] + margin[1] / 2, raw_image_size[1])),\n ]\n\n face = img_draw.crop(box).copy().resize((image_size, image_size), Image.BILINEAR).convert(\"RGB\")\n features_1 = model(utils.preprocess(face,device).reshape(-1, 3, 224, 224))\n images_path = \"individuals_extracted/\"\n data_path = os.path.join(images_path, '*pt')\n files = glob.glob(data_path)\n name = \"Unknown\"\n best_distance = euclidean_distance_threshold + 5\n for k,f1 in enumerate(files):\n features = torch.load(f1)\n distance = utils.euclidean_distance(features,features_1)\n if distance < euclidean_distance_threshold and distance < best_distance:\n best_distance = distance\n name = re.sub('_[1-9]*[.]*[a-zA-Z]*', '', f1.replace(images_path,\"\"))\n\n names.append(name)\n distances_difference.append(best_distance)\n\n for (box, point,name,distances) in zip(boxes, probs,names,distances_difference):\n if point < facenet_threshold or name == \"Unknown\": continue\n draw.rectangle(box.tolist(), width=4)\n draw.text(box.tolist(), name, font=ImageFont.truetype(\"Keyboard.ttf\",40))\n\n k = cv2.waitKey(3) & 0xff\n if k == 27:\n break\n out.write(np.asarray(img_draw))\n\n out.release()\n cap.release()\n cv2.destroyAllWindows()", "def detect_from_video(config: Dict):\n video = config['inference']['video_input']['video_input_path']\n vp = VideoProcessing(video=video)\n vp.generate_frames(export_path=config['inference']['video_input']['video_to_frames_export_path'])\n if config['inference']['video_input']['video_to_frames_export_path'] == config['inference']['predicted_frames_export_path']:\n print(\"[Warning]... You have given Video to frame path same as prediction output path /nPredicted output will overwrite video to frame\")\n img_height = config['inference']['img_height']\n img_width = config['inference']['img_width']\n model = ssd_300(image_size=(img_height, img_width, 3),\n n_classes=config['inference']['n_classes'],\n mode='inference',\n l2_regularization=0.0005,\n scales=[0.1, 0.2, 0.37, 0.54, 0.71, 0.88, 1.05], # The scales for MS COCO are [0.07, 0.15, 0.33, 0.51, 0.69, 0.87, 1.05]\n aspect_ratios_per_layer=[[1.0, 2.0, 0.5],\n [1.0, 2.0, 0.5, 3.0, 1.0/3.0],\n [1.0, 2.0, 0.5, 3.0, 1.0/3.0],\n [1.0, 2.0, 0.5, 3.0, 1.0/3.0],\n [1.0, 2.0, 0.5],\n [1.0, 2.0, 0.5]],\n two_boxes_for_ar1=True,\n steps=[8, 16, 32, 64, 100, 300],\n offsets=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5],\n clip_boxes=False,\n variances=[0.1, 0.1, 0.2, 0.2],\n normalize_coords=True,\n subtract_mean=[123, 117, 104],\n swap_channels=[2, 1, 0],\n confidence_thresh=0.5,\n iou_threshold=0.45,\n top_k=200,\n nms_max_output_size=400)\n\n # Load the trained weights into the model.\n weights_path = config['inference']['weights_path']\n\n model.load_weights(weights_path, by_name=True)\n \n # Working with image\n all_images = glob.glob(f\"{config['inference']['video_input']['video_to_frames_export_path']}/*/*\")\n \n # Setting Up Prediction Threshold\n confidence_threshold = config['inference']['confidence_threshold']\n \n # Setting Up Classes (Note Should be in same order as in training)\n classes = config['inference']['classes']\n \n vp.existsFolder(f\"{config['inference']['predicted_frames_export_path']}/{video.split('.')[0]}\")\n # Working with image\n for current_img in tqdm(all_images):\n current_img_name = current_img.split('/')[-1]\n orig_image = cv2.imread(current_img)\n input_images = [] # Store resized versions of the images here\n img = image.load_img(current_img, target_size=(img_height, img_width))\n img = image.img_to_array(img) \n input_images.append(img)\n input_images = np.array(input_images)\n \n # Prediction\n y_pred = model.predict(input_images)\n\n # Using threshold\n y_pred_thresh = [y_pred[k][y_pred[k,:,1] > confidence_threshold] for k in range(y_pred.shape[0])]\n \n # Drawing Boxes\n for box in y_pred_thresh[0]:\n xmin = box[2] * orig_image.shape[1] / img_width\n ymin = box[3] * orig_image.shape[0] / img_height\n xmax = box[4] * orig_image.shape[1] / img_width\n ymax = box[5] * orig_image.shape[0] / img_height\n \n label = f\"{classes[int(box[0])]}: {box[1]:.2f}\"\n cv2.rectangle(orig_image, (int(xmin), int(ymin)), (int(xmax),int(ymax)), (255, 0, 0), 2)\n cv2.putText(orig_image, label, (int(xmin), int(ymin)), cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255), 2, cv2.LINE_AA)\n cv2.imwrite(f\"{config['inference']['predicted_frames_export_path']}/{video.split('.')[0]}/{current_img_name}\", orig_image)\n \n # Creating video\n vp.generate_video(import_path=config['inference']['predicted_frames_export_path'],\n export_path=config['inference']['video_input']['video_output_path'])", "def __init__(self, video_w, video_h, video_fps, tracking_result):\n self.font = cv2.FONT_HERSHEY_SIMPLEX\n self.fontScale = 1\n self.thickness = 2\n self.thicknessUpdate = 3\n self.color = (238, 221, 192) # A surfrider color\n self.icons = get_icons()\n self.classes_to_icons = {'bottles':self.icons[0], 'fragments':self.icons[1], 'others':self.icons[2]}\n self.video_w = video_w\n self.video_h = video_h\n self.video_fps = video_fps\n self.tracking_result = tracking_result\n self.detection_image_size = (1024, 768)\n self.frames_to_boxes_dict = None\n self.frames_to_update_hud = None", "def makeVideo():\n os.system(\"cd video && ffmpeg -r 10 -i img%05d.jpg -vcodec mpeg4 -y caronthehill_clip.mp4\")", "def make_video(pattern, plotdir, moviedir, movienametag):\n images_list = glob('%s/%s'%(plotdir, pattern))\n images_list.sort()\n # save all required files into tmp_moviedir, with simple filenames: %.4d.png\n tmp_moviedir = '%s/tmp_movie_%s'%(plotdir, movienametag)\n os.system('mkdir -p %s'%tmp_moviedir)\n for i in range(len(images_list)):\n fname = images_list[i].split('%s/'%plotdir)[-1].split('.png')[0]\n os.system('cp %s/%s.png %s/%.4d.png'%(plotdir, fname, tmp_moviedir, i))\n\n os.system('avconv -i %s'%tmp_moviedir +'/%04d.png ' \\\n +' -y -c:v libx264 -pix_fmt yuv420p %s/%s.mp4'%(moviedir, movienametag))", "def preprocess_sample(file, params):\n\n videoFile = file + \".mp4\"\n audioFile = file + \".wav\"\n roiFile = file + \".png\"\n visualFeaturesFile = file + \".npy\"\n\n roiSize = params[\"roiSize\"]\n normMean = params[\"normMean\"]\n normStd = params[\"normStd\"]\n vf = params[\"vf\"]\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\n #Extract the audio from the video file using the FFmpeg utility and save it to a wav file.\n v2aCommand = \"ffmpeg -y -v quiet -i \" + videoFile + \" -ac 1 -ar 16000 -vn \" + audioFile\n os.system(v2aCommand)\n\n\n #for each frame, resize to 224x224 and crop the central 112x112 region\n captureObj = cv.VideoCapture(videoFile)\n roiSequence = list()\n while (captureObj.isOpened()):\n ret, frame = captureObj.read()\n if ret == True:\n grayed = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)\n grayed = grayed/255\n grayed = cv.resize(grayed, (224,224))\n roi = grayed[int(112-(roiSize/2)):int(112+(roiSize/2)), int(112-(roiSize/2)):int(112+(roiSize/2))]\n roiSequence.append(roi)\n else:\n break\n captureObj.release()\n cv.imwrite(roiFile, np.floor(255*np.concatenate(roiSequence, axis=1)).astype(np.int))\n\n\n #normalise the frames and extract features for each frame using the visual frontend\n #save the visual features to a .npy file\n inp = np.stack(roiSequence, axis=0)\n inp = np.expand_dims(inp, axis=[1,2])\n inp = (inp - normMean)/normStd\n inputBatch = torch.from_numpy(inp)\n inputBatch = (inputBatch.float()).to(device)\n vf.eval()\n with torch.no_grad():\n outputBatch = vf(inputBatch)\n out = torch.squeeze(outputBatch, dim=1)\n out = out.cpu().numpy()\n np.save(visualFeaturesFile, out)\n return", "def gen_test_output_video(sess, logits, keep_prob, image_pl, video_file, image_shape):\n cap = cv2.VideoCapture(video_file)\n counter=0\n while True:\n ret, frame = cap.read()\n if frame is None:\n break\n image = scipy.misc.imresize(frame, image_shape)\n\n im_softmax = sess.run(\n [tf.nn.softmax(logits)],\n {keep_prob: 1.0, image_pl: [image]})\n im_softmax = im_softmax[0][:, 1].reshape(image_shape[0], image_shape[1])\n segmentation = (im_softmax > 0.5).reshape(image_shape[0], image_shape[1], 1)\n mask = np.dot(segmentation, np.array([[0, 255, 0, 127]]))\n mask_full = scipy.misc.imresize(mask, frame.shape)\n mask_full = scipy.misc.toimage(mask_full, mode=\"RGBA\")\n mask = scipy.misc.toimage(mask, mode=\"RGBA\")\n\n\n street_im = scipy.misc.toimage(image)\n street_im.paste(mask, box=None, mask=mask)\n\n street_im_full = scipy.misc.toimage(frame)\n street_im_full.paste(mask_full, box=None, mask=mask_full)\n\n cv2.imwrite(\"4k-result/4k_image%08d.jpg\"%counter,np.array(street_im_full))\n counter=counter+1\n\n # When everything done, release the capture\n cap.release()\n cv2.destroyAllWindows()", "def video_process(threshold=THRESHOLD, inputpath=INPUTPATH, file=FILE):\n #create video capture object\n cap = cv2.VideoCapture(f'{inputpath}{file}')\n name = file.split('/')[-1].split('.')[0]\n frame_sqrs_list = []\n if (cap.isOpened()==False):\n logging.error('Error opening video stream or file')\n model = load_model()\n frame_n = 1\n print('model loaded')\n while(cap.isOpened()):\n #capture frame-by-frame\n ret, frame = cap.read()\n if ret == True:\n squares_list = img_preprocess(frame)\n frame_n = frame_n+1\n print(f'enter video file, frame{frame_n}')\n x_list = []\n y_list = []\n for sq in squares_list:\n predict = predict_hot_pxl(sq.sq, model)\n if predict > threshold:\n pred = 1\n print('ERROR')\n x_list.append(sq.y)\n y_list.append(sq.x)\n # draw square around error in frame:\n # FIXME: save a square to a list of squares\n continue\n else:\n pred = 0\n print('no error')\n # FIXME: draw_sqr(name, frame, frame_n, !!! PASS LIST INSTEAD !!! and rewrite the draw func to draw several squares sq.y, sq.x) \n sq = sq._replace(pred_float = predict)\n sq = sq._replace(pred_int = pred)\n # dict element sq is now obsolete, remove it\n sq = sq._replace(sq = None)\n # save single frame with squares marking errors as png to disc:\n draw_sqr(name, frame, frame_n, x_list, y_list)\n frame_sqrs_list.append(sq)\n # Break the loop\n else:\n break\n return name, frame_sqrs_list", "def process_video(filename, args, cfg, net):\n # Split video into frames\n images = split_video(filename)\n # Set output dir\n output_dir = args.output\n # Add brackets and extension to filename\n output_path = create_video_output_path(output_dir, cfg)\n # Get height and width of 1st image\n height, width, _ = check_img_size(images[0]).shape\n # Create VideoWriter object\n video = cv2.VideoWriter(output_path, \n cv2.VideoWriter_fourcc(*'FMP4'), \n cfg['video']['fps'], \n (width, height))\n for image in images:\n # Process frames\n img_steps = process_image(image, cfg, net)\n # Check for --show-detections flag\n output_img = check_if_adding_bboxes(args, img_steps) \n # Write to video\n video.write(output_img) \n # Release video writer object\n video.release()", "def generate_video(image_folder, video_name, video_frames_path):\n \n try:\n os.stat(video_frames_path)\n except:\n os.makedirs(video_frames_path)\n \n images = [img for img in os.listdir(image_folder)\n if img.endswith(\".jpg\") or\n img.endswith(\".jpeg\") or\n img.endswith(\"png\") or\n img.endswith(\"tif\")]\n\n images.sort()\n\n print(images)\n\n frame = cv2.imread(os.path.join(image_folder, images[0]))\n\n height, width, layers = frame.shape\n\n fourcc = cv2.VideoWriter_fourcc(*'XVID')\n video = cv2.VideoWriter(video_frames_path + '/' + video_name, fourcc, 1, (width, height))\n\n # Appending the images to the video one by one\n video_frame = np.zeros((height, width, 3), np.uint8)\n for image in images:\n img = cv2.imread(os.path.join(image_folder, image), cv2.IMREAD_UNCHANGED)\n video_frame = overlay_transparent(video_frame, img)\n cv2.imwrite(os.path.join(video_frames_path, image), video_frame)\n video.write(video_frame)\n\n # Deallocating memories taken for window creation\n cv2.destroyAllWindows()\n video.release() # releasing the video generated", "def main():\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"-hgt\", \"--imgHeight\", help=\"The height of the images, default=720.\",\n type=int, default=720)\n\n parser.add_argument(\"-wd\", \"--imgWidth\", help=\"The width of the images, default=1280.\",\n type=int, default=1280)\n\n parser.add_argument(\"-r\", \"--chessboardRows\", help=\"The rows of the chessboard calibration images, default=6.\",\n type=int, default=6)\n\n parser.add_argument(\"-c\", \"--chessboardCols\", help=\"The cols of the chessboard calibration images, default=9.\",\n type=int, default=9)\n\n parser.add_argument(\"-cp\", \"--calibrationPath\", help=\"The height of the images, default=720.\",\n type=str, default='')\n\n parser.add_argument(\"-in\", \"--inputVideoPath\", help=\"The path to the input video to be processed.\",\n type=str, default='')\n\n parser.add_argument(\"-out\", \"--outputVideoPath\", help=\"The path to the where to store output video.\",\n type=str, default='')\n\n args = parser.parse_args()\n\n print(args)\n\n assert args.calibrationPath != '', \"The path to calibration images can't be empty\"\n assert args.inputVideoPath != '', \"The path to input video can't be empty\"\n assert args.outputVideoPath != '', \"The path to output video can't be empty\"\n\n camera_mtx, dist_coeff = CameraCalibration((args.imgHeight, args.imgWidth),\n (args.chessboardRows, args.chessboardCols),\n args.calibrationPath).calibrate()\n print(\"Camera Mtx\", camera_mtx)\n print(\"Distortion Coefficient\", dist_coeff)\n # img = cv2.imread('test_images/test5.jpg')\n # img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n AdvancedLaneDetection(args.inputVideoPath, camera_mtx, dist_coeff).process_video(args.outputVideoPath)\n\n # cv2.imwrite(\"output.jpg\", result)", "def video2image(video, dest_folder, imgs_per_sec, start_frame=0, no_images=None):\n #test if video exists\n if not os.path.isfile(video):\n debug(1, 'No valid file ', video)\n return\n #get file name\n file_name,ending = ntpath.basename(video).split('.')\n\n #open video\n cap = cv2.VideoCapture(video)\n\n fps = int(cap.get(cv2.cv.CV_CAP_PROP_FPS))\n duration = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))\n\n step = int(ceil(float(fps)/float(imgs_per_sec)))\n if no_images == None:\n end= duration\n else:\n end = min(duration, start_frame+step*no_images)\n no_img_proc = 0\n\n names = []\n for t in range(start_frame,end,step):\n cap.set(cv2.cv.CV_CAP_PROP_POS_FRAMES,t)\n ret, frame = cap.read()\n name = dest_folder+file_name+'_{:08d}.jpg'.format(no_img_proc)#dest_folder+file_name+'-img_per_sec_'+str(imgs_per_sec)+'-start_frame_'+str(start_frame)+'-no_images_'+str(no_images)+'-img_num_'+str(no_img_proc)+'.jpg'\n names.append(name)\n cv2.imwrite(name, frame)\n\n no_img_proc = no_img_proc+1\n\n debug(0, no_img_proc, ' images have been written to ', dest_folder)\n return names", "def release_video(title):\r\n image_folder = '.'\r\n video_name = title\r\n\r\n images = [img for img in os.listdir(image_folder)\r\n if img.endswith(\".jpg\") or\r\n img.endswith(\".jpeg\") or\r\n img.endswith(\".JPEG\") or\r\n img.endswith(\".PNG\") or\r\n img.endswith(\"png\")]\r\n\r\n images = sorted(images, key=sort_by_title)\r\n frame = cv2.imread(os.path.join(image_folder, images[0]))\r\n height, width, layers = frame.shape\r\n video = cv2.VideoWriter(video_name, 0, 1, (width, height))\r\n\r\n for image in images:\r\n video.write(cv2.imread(os.path.join(image_folder, image)))\r\n cv2.destroyAllWindows()\r\n video.release() # releasing the video generated\r" ]
[ "0.6378548", "0.6341571", "0.6268886", "0.6267411", "0.62665486", "0.62576735", "0.61747694", "0.6135613", "0.61193234", "0.6107251", "0.607215", "0.6004102", "0.59735906", "0.5962933", "0.5939074", "0.59383684", "0.59096986", "0.5904158", "0.58933836", "0.5886941", "0.5883523", "0.58644396", "0.5845487", "0.58357006", "0.58166134", "0.5811612", "0.5808816", "0.57658994", "0.57622117", "0.57552975" ]
0.6345232
1
The function convert Euler angle to quaternion object
def euler_to_quaternion(euler: tuple) -> object: (yaw, pitch, roll) = (euler[0], euler[1], euler[2]) qy = np.sin(roll / 2) * np.cos(pitch / 2) * np.cos(yaw / 2) - np.cos(roll / 2) * np.sin(pitch / 2) * np.sin(yaw / 2) qx = np.cos(roll / 2) * np.sin(pitch / 2) * np.cos(yaw / 2) + np.sin(roll / 2) * np.cos(pitch / 2) * np.sin(yaw / 2) qw = np.cos(roll / 2) * np.cos(pitch / 2) * np.sin(yaw / 2) - np.sin(roll / 2) * np.sin(pitch / 2) * np.cos(yaw / 2) qz = np.cos(roll / 2) * np.cos(pitch / 2) * np.cos(yaw / 2) + np.sin(roll / 2) * np.sin(pitch / 2) * np.sin(yaw / 2) return qx, qy, qz, qw
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def euler_to_quaternion(psi, theta, phi):\n # Abbreviations for the various angular functions\n cy = np.cos(psi * 0.5)\n sy = np.sin(psi * 0.5)\n cp = np.cos(theta * 0.5)\n sp = np.sin(theta * 0.5)\n cr = np.cos(phi * 0.5)\n sr = np.sin(phi * 0.5)\n\n q = np.zeros(4)\n q[0] = cy * cp * cr + sy * sp * sr\n q[1] = cy * cp * sr - sy * sp * cr\n q[2] = sy * cp * sr + cy * sp * cr\n q[3] = sy * cp * cr - cy * sp * sr\n return q", "def euler_to_quaternion(yaw, pitch, roll):\r\n cy = math.cos(yaw * 0.5)\r\n sy = math.sin(yaw * 0.5)\r\n cp = math.cos(pitch * 0.5)\r\n sp = math.sin(pitch * 0.5)\r\n cr = math.cos(roll * 0.5)\r\n sr = math.sin(roll * 0.5)\r\n w = cy * cp * cr + sy * sp * sr\r\n x = cy * cp * sr - sy * sp * cr\r\n y = sy * cp * sr + cy * sp * cr\r\n z = sy * cp * cr - cy * sp * sr\r\n return w, x, y, z", "def convert_euler_to_quaternion(roll, yaw, pitch):\n\n # roll (z), yaw (y), pitch (x)\n\n cy = math.cos(math.radians(roll) * 0.5)\n sy = math.sin(math.radians(roll) * 0.5)\n\n cp = math.cos(math.radians(yaw) * 0.5)\n sp = math.sin(math.radians(yaw) * 0.5)\n\n cr = math.cos(math.radians(pitch) * 0.5)\n sr = math.sin(math.radians(pitch) * 0.5)\n\n w = cy * cp * cr + sy * sp * sr\n x = cy * cp * sr - sy * sp * cr\n y = sy * cp * sr + cy * sp * cr\n z = sy * cp * cr - cy * sp * sr\n\n quat = np.array([w, x, y, z])\n quat = quat / np.linalg.norm(quat)\n return quat", "def euler_to_quat(self, yaw):\n quat_array = t.quaternion_from_euler(0.0, 0.0, yaw)\n return Quaternion(quat_array[0], quat_array[1], quat_array[2], quat_array[3])", "def euler_to_quat(roll, pitch, yaw):\n pose = Pose()\n quaternion = tf.transformations.quaternion_from_euler(roll, pitch, yaw)\n pose.orientation.x = quaternion[0]\n pose.orientation.y = quaternion[1]\n pose.orientation.z = quaternion[2]\n pose.orientation.w = quaternion[3]\n return pose.orientation", "def euler2quaternion(psi, theta, phi):\n if abs(psi) == 0 and abs(theta) == 0 and abs(phi) == 0:\n quaternion = np.array([1., 0., 0., 0.])\n else:\n R = euler2rot3D(psi, theta, phi)\n W = np.array([R[1, 2]-R[2, 1], R[2, 0]-R[0, 2], R[0, 1]-R[1, 0]])\n if W[0] >= 0:\n W /= np.linalg.norm(W)\n else:\n W /= np.linalg.norm(W) * -1\n theta = np.arccos(0.5 * (np.trace(R) - 1))\n CCisTheta = corrCoeff(R, angleAxis2rot3D(W, theta))\n CCisNegTheta = corrCoeff(R, angleAxis2rot3D(W, -theta))\n if CCisNegTheta > CCisTheta:\n theta = -theta\n quaternion = np.array([np.cos(theta/2.), np.sin(theta/2.)*W[0], np.sin(theta/2.)*W[1], np.sin(theta/2.)*W[2]])\n if quaternion[0] < 0:\n quaternion *= -1\n return quaternion", "def quaternion_from_euler(ai, aj, ak, axes='sxyz'):\n try:\n firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()]\n except (AttributeError, KeyError):\n _ = _TUPLE2AXES[axes]\n firstaxis, parity, repetition, frame = axes\n\n i = firstaxis\n j = _NEXT_AXIS[i+parity]\n k = _NEXT_AXIS[i-parity+1]\n\n if frame:\n ai, ak = ak, ai\n if parity:\n aj = -aj\n\n ai /= 2.0\n aj /= 2.0\n # print(\"ak : {}\".format(type(ak)))\n ak /= 2.0\n ci = math.cos(ai)\n si = math.sin(ai)\n cj = math.cos(aj)\n sj = math.sin(aj)\n ck = math.cos(ak)\n sk = math.sin(ak)\n cc = ci*ck\n cs = ci*sk\n sc = si*ck\n ss = si*sk\n\n quaternion = np.empty((4, ), dtype=np.float64)\n if repetition:\n quaternion[i] = cj*(cs + sc)\n quaternion[j] = sj*(cc + ss)\n quaternion[k] = sj*(cs - sc)\n quaternion[3] = cj*(cc - ss)\n else:\n quaternion[i] = cj*sc - sj*cs\n quaternion[j] = cj*ss + sj*cc\n quaternion[k] = cj*cs - sj*sc\n quaternion[3] = cj*cc + sj*ss\n if parity:\n quaternion[j] *= -1\n\n return quaternion", "def euler_from_quaternion(self, quaternion):\n x = quaternion.x\n y = quaternion.y\n z = quaternion.z\n w = quaternion.w\n\n sinr_cosp = 2 * (w * x + y * z)\n cosr_cosp = 1 - 2 * (x * x + y * y)\n roll = np.arctan2(sinr_cosp, cosr_cosp)\n\n sinp = 2 * (w * y - z * x)\n pitch = np.arcsin(sinp)\n\n siny_cosp = 2 * (w * z + x * y)\n cosy_cosp = 1 - 2 * (y * y + z * z)\n yaw = np.arctan2(siny_cosp, cosy_cosp)\n\n return roll, pitch, yaw", "def quaternion_to_euler(q):\r\n W = q[0]\r\n X = q[1]\r\n Y = q[2]\r\n Z = q[3]\r\n\r\n # roll(x - axis rotation)\r\n sinr_cosp = +2.0 * (W * X + Y * Z)\r\n cosr_cosp = +1.0 - 2.0 * (X * X + Y * Y)\r\n roll = math.atan2(sinr_cosp, cosr_cosp)\r\n\r\n # pitch(y - axis rotation)\r\n sinp = +2.0 * (W * Y - Z * X)\r\n if abs(sinp) >= 1:\r\n pitch = np.copysign(math.pi / 2, sinp) # use 90 degrees if out of range\r\n else:\r\n pitch = math.asin(sinp)\r\n\r\n # yaw(z - axis rotation)\r\n siny_cosp = +2.0 * (W * Z + X * Y)\r\n cosy_cosp = +1.0 - 2.0 * (Y * Y + Z * Z)\r\n yaw = math.atan2(siny_cosp, cosy_cosp)\r\n\r\n return roll, pitch, yaw", "def euler_from_quaternion(x, y, z, w):\n t0 = +2.0 * (w * x + y * z)\n t1 = +1.0 - 2.0 * (x * x + y * y)\n roll_x = math.atan2(t0, t1)\n \n t2 = +2.0 * (w * y - z * x)\n pitch_y = math.asin(t2)\n \n t3 = +2.0 * (w * z + x * y)\n t4 = +1.0 - 2.0 * (y * y + z * z)\n yaw_z = math.atan2(t3, t4)\n \n return roll_x, pitch_y, yaw_z # in radians", "def euler_from_quaternion(x, y, z, w):\n t0 = +2.0 * (w * x + y * z)\n t1 = +1.0 - 2.0 * (x * x + y * y)\n roll_x = math.atan2(t0, t1)\n \n t2 = +2.0 * (w * y - z * x)\n t2 = +1.0 if t2 > +1.0 else t2\n t2 = -1.0 if t2 < -1.0 else t2\n pitch_y = math.asin(t2)\n \n t3 = +2.0 * (w * z + x * y)\n t4 = +1.0 - 2.0 * (y * y + z * z)\n yaw_z = math.atan2(t3, t4)\n \n return roll_x, pitch_y, yaw_z # in radians", "def angle_to_quaternion(angle):\n\treturn Quaternion(*tf.transformations.quaternion_from_euler(0, 0, angle))", "def euler_from_quaternion(x, y, z, w):\n t0 = +2.0 * (w * x + y * z)\n t1 = +1.0 - 2.0 * (x * x + y * y)\n roll_x = math.atan2(t0, t1)\n\n t2 = +2.0 * (w * y - z * x)\n t2 = +1.0 if t2 > +1.0 else t2\n t2 = -1.0 if t2 < -1.0 else t2\n pitch_y = math.asin(t2)\n\n t3 = +2.0 * (w * z + x * y)\n t4 = +1.0 - 2.0 * (y * y + z * z)\n yaw_z = math.atan2(t3, t4)\n\n return roll_x, pitch_y, yaw_z # in radians", "def quat_to_euler(orientation):\n quaternion = (\n orientation.x,\n orientation.y,\n orientation.z,\n orientation.w\n )\n euler = tf.transformations.euler_from_quaternion(quaternion)\n roll = euler[0]\n pitch = euler[1]\n yaw = euler[2]\n return (roll,pitch,yaw)", "def euler_from_quaternion(quaternion, axes='sxyz'):\r\n return euler_from_matrix(quaternion_matrix(quaternion), axes)", "def angle_to_quaternion(angle):\n return Quaternion(*tf.transformations.quaternion_from_euler(0, 0, angle))", "def quaternion_to_angle(self, q):\n\tx, y, z, w = q.x, q.y, q.z, q.w\n\troll, pitch, yaw = tf.transformations.euler_from_quaternion((x, y, z, w))\n\treturn yaw", "def euler_from_quaternion(x, y, z, w):\r\n\tt0 = +2.0 * (w * x + y * z)\r\n\tt1 = +1.0 - 2.0 * (x * x + y * y)\r\n\troll_x = math.atan2(t0, t1)\r\n\r\n\tt2 = +2.0 * (w * y - z * x)\r\n\tt2 = +1.0 if t2 > +1.0 else t2\r\n\tt2 = -1.0 if t2 < -1.0 else t2\r\n\tpitch_y = math.asin(t2)\r\n\r\n\tt3 = +2.0 * (w * z + x * y)\r\n\tt4 = +1.0 - 2.0 * (y * y + z * z)\r\n\tyaw_z = math.atan2(t3, t4)\r\n\r\n\treturn roll_x, pitch_y, yaw_z # in radians\r", "def euler_from_quaternion(self, x, y, z, w):\n t0 = +2.0 * (w * x + y * z)\n t1 = +1.0 - 2.0 * (x * x + y * y)\n roll_x = math.atan2(t0, t1)\n\n t2 = +2.0 * (w * y - z * x)\n t2 = +1.0 if t2 > +1.0 else t2\n t2 = -1.0 if t2 < -1.0 else t2\n pitch_y = math.asin(t2)\n\n t3 = +2.0 * (w * z + x * y)\n t4 = +1.0 - 2.0 * (y * y + z * z)\n yaw_z = math.atan2(t3, t4)\n\n return roll_x, pitch_y, yaw_z # in radians", "def quaternion_to_angle(q):\n\tx, y, z, w = q.x, q.y, q.z, q.w\n\troll, pitch, yaw = tf.transformations.euler_from_quaternion((x, y, z, w))\n\treturn yaw", "def quaternion_to_angle(q):\n x, y, z, w = q.x, q.y, q.z, q.w\n roll, pitch, yaw = tf.transformations.euler_from_quaternion((x, y, z, w))\n return yaw", "def _rotate_quaternion(self, q):\n self._normalise()\n return self * q * self.conjugate", "def quaternion_from_euler(ai, aj, ak, axes='sxyz'):\r\n try:\r\n firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()]\r\n except (AttributeError, KeyError):\r\n _TUPLE2AXES[axes] # noqa: validation\r\n firstaxis, parity, repetition, frame = axes\r\n\r\n i = firstaxis + 1\r\n j = _NEXT_AXIS[i+parity-1] + 1\r\n k = _NEXT_AXIS[i-parity] + 1\r\n\r\n if frame:\r\n ai, ak = ak, ai\r\n if parity:\r\n aj = -aj\r\n\r\n ai /= 2.0\r\n aj /= 2.0\r\n ak /= 2.0\r\n ci = math.cos(ai)\r\n si = math.sin(ai)\r\n cj = math.cos(aj)\r\n sj = math.sin(aj)\r\n ck = math.cos(ak)\r\n sk = math.sin(ak)\r\n cc = ci*ck\r\n cs = ci*sk\r\n sc = si*ck\r\n ss = si*sk\r\n\r\n q = numpy.empty((4, ))\r\n if repetition:\r\n q[0] = cj*(cc - ss)\r\n q[i] = cj*(cs + sc)\r\n q[j] = sj*(cc + ss)\r\n q[k] = sj*(cs - sc)\r\n else:\r\n q[0] = cj*cc + sj*ss\r\n q[i] = cj*sc - sj*cs\r\n q[j] = cj*ss + sj*cc\r\n q[k] = cj*cs - sj*sc\r\n if parity:\r\n q[j] *= -1.0\r\n\r\n return q", "def axang2quat(ax_ang):\n\n if ax_ang.ndim == 1:\n if np.size(ax_ang) == 5:\n ax_ang = np.reshape(ax_ang, (5, 1))\n msz = 1\n elif np.size(ax_ang) == 4:\n ax_ang = np.reshape(np.hstack((ax_ang, np.array([1]))), (5, 1))\n msz = 1\n else:\n raise Exception('Wrong Input Type')\n elif ax_ang.ndim == 2:\n if np.shape(ax_ang)[0] == 5:\n msz = np.shape(ax_ang)[1]\n elif np.shape(ax_ang)[1] == 5:\n ax_ang = ax_ang.transpose()\n msz = np.shape(ax_ang)[1]\n else:\n raise Exception('Wrong Input Type')\n else:\n raise Exception('Wrong Input Type')\n\n direction = ax_ang[0:3, :]\n angle = ax_ang[3, :]\n\n d = np.array(direction, dtype=np.float64)\n d /= np.linalg.norm(d, axis=0)\n x = d[0, :]\n y = d[1, :]\n z = d[2, :]\n q0 = np.cos(angle/2)\n s = np.sin(angle/2)\n\n q1 = x*s\n q2 = y*s\n q3 = z*s\n\n qtype = 0*q3;\n inds1 = np.where(ax_ang[4, :] == -1); qtype[inds1] = -1;\n inds2 = np.where(ax_ang[4, :] == 1); qtype[inds2] = 1;\n\n return quat.Quaternion(q0, q1, q2, q3, qtype)", "def _create_quaternion(direction, up) -> Tuple[float, float, float, float]:\n direction = direction / spy.vnorm(direction)\n up = up / spy.vnorm(up)\n\n x = spy.vcrss(up, direction)\n x = x / spy.vnorm(x)\n y = spy.vcrss(direction, x)\n y = y / spy.vnorm(y)\n z = direction\n\n r = sqrt(1.0 + x[0] + y[1] + z[2]) * 0.5\n i = (y[2] - z[1]) / (4 * r)\n j = (z[0] - x[2]) / (4 * r)\n k = (x[1] - y[0]) / (4 * r)\n\n return r, i, j, k", "def quaternion_from_axis_angle(x, y, z, theta):\n if x == y == z == 0:\n return np.array([1, 0, 0, 0])\n axis = np.array([x, y, z])\n axis /= np.linalg.norm(axis)\n return rowan.from_axis_angle(axis, theta)", "def invert_quaternion(quaternion):\n norm = np.linalg.norm(quaternion)\n quaternion[1:] = -1.0 * quaternion[1:]\n return quaternion / norm", "def euler2quat(angles, rot_seq='zyx'):\n cangle = np.cos(0.5*angles)\n sangle = np.sin(0.5*angles)\n rot_seq = rot_seq.lower()\n if rot_seq == 'zyx':\n return np.array([cangle[0]*cangle[1]*cangle[2] + sangle[0]*sangle[1]*sangle[2],\n cangle[0]*cangle[1]*sangle[2] - sangle[0]*sangle[1]*cangle[2],\n cangle[0]*sangle[1]*cangle[2] + sangle[0]*cangle[1]*sangle[2],\n sangle[0]*cangle[1]*cangle[2] - cangle[0]*sangle[1]*sangle[2]])\n elif rot_seq == 'zyz':\n return np.array([cangle[0]*cangle[1]*cangle[2] - sangle[0]*cangle[1]*sangle[2],\n cangle[0]*sangle[1]*sangle[2] - sangle[0]*sangle[1]*cangle[2],\n cangle[0]*sangle[1]*cangle[2] + sangle[0]*sangle[1]*sangle[2],\n sangle[0]*cangle[1]*cangle[2] + cangle[0]*cangle[1]*sangle[2]])\n elif rot_seq == 'zxy':\n return np.array([cangle[0]*cangle[1]*cangle[2] - sangle[0]*sangle[1]*sangle[2],\n cangle[0]*sangle[1]*cangle[2] - sangle[0]*cangle[1]*sangle[2],\n cangle[0]*cangle[1]*sangle[2] + sangle[0]*sangle[1]*cangle[2],\n cangle[0]*sangle[1]*sangle[2] + sangle[0]*cangle[1]*cangle[2]])\n elif rot_seq == 'zxz':\n return np.array([cangle[0]*cangle[1]*cangle[2] - sangle[0]*cangle[1]*sangle[2],\n cangle[0]*sangle[1]*cangle[2] + sangle[0]*sangle[1]*sangle[2],\n sangle[0]*sangle[1]*cangle[2] - cangle[0]*sangle[1]*sangle[2],\n cangle[0]*cangle[1]*sangle[2] + sangle[0]*cangle[1]*cangle[2]])\n elif rot_seq == 'yxz':\n return np.array([cangle[0]*cangle[1]*cangle[2] + sangle[0]*sangle[1]*sangle[2],\n cangle[0]*sangle[1]*cangle[2] + sangle[0]*cangle[1]*sangle[2],\n sangle[0]*cangle[1]*cangle[2] - cangle[0]*sangle[1]*sangle[2],\n cangle[0]*cangle[1]*sangle[2] - sangle[0]*sangle[1]*cangle[2]])\n elif rot_seq == 'yxy':\n return np.array([cangle[0]*cangle[1]*cangle[2] - sangle[0]*cangle[1]*sangle[2],\n cangle[0]*sangle[1]*cangle[2] + sangle[0]*sangle[1]*sangle[2],\n sangle[0]*cangle[1]*cangle[2] + cangle[0]*cangle[1]*sangle[2],\n cangle[0]*sangle[1]*sangle[2] - sangle[0]*sangle[1]*cangle[2]])\n elif rot_seq == 'yzx':\n return np.array([cangle[0]*cangle[1]*cangle[2] - sangle[0]*sangle[1]*sangle[2],\n cangle[0]*cangle[1]*sangle[2] + sangle[0]*sangle[1]*cangle[2],\n cangle[0]*sangle[1]*sangle[2] + sangle[0]*cangle[1]*cangle[2],\n cangle[0]*sangle[1]*cangle[2] - sangle[0]*cangle[1]*sangle[2]])\n elif rot_seq == 'yzy':\n return np.array([cangle[0]*cangle[1]*cangle[2] - sangle[0]*cangle[1]*sangle[2],\n sangle[0]*sangle[1]*cangle[2] - cangle[0]*sangle[1]*sangle[2],\n cangle[0]*cangle[1]*sangle[2] + sangle[0]*cangle[1]*cangle[2],\n cangle[0]*sangle[1]*cangle[2] + sangle[0]*sangle[1]*sangle[2]])\n elif rot_seq == 'xyz':\n return np.array([cangle[0]*cangle[1]*cangle[2] - sangle[0]*sangle[1]*sangle[2],\n cangle[0]*sangle[1]*sangle[2] + sangle[0]*cangle[1]*cangle[2],\n cangle[0]*sangle[1]*cangle[2] - sangle[0]*cangle[1]*sangle[2],\n cangle[0]*cangle[1]*sangle[2] + sangle[0]*sangle[1]*cangle[2]])\n elif rot_seq == 'xyx':\n return np.array([cangle[0]*cangle[1]*cangle[2] - sangle[0]*cangle[1]*sangle[2],\n cangle[0]*cangle[1]*sangle[2] + sangle[0]*cangle[1]*cangle[2],\n cangle[0]*sangle[1]*cangle[2] + sangle[0]*sangle[1]*sangle[2],\n sangle[0]*sangle[1]*cangle[2] - cangle[0]*sangle[1]*sangle[2]])\n elif rot_seq == 'xzy':\n return np.array([cangle[0]*cangle[1]*cangle[2] + sangle[0]*sangle[1]*sangle[2],\n sangle[0]*cangle[1]*cangle[2] - cangle[0]*sangle[1]*sangle[2],\n cangle[0]*cangle[1]*sangle[2] - sangle[0]*sangle[1]*cangle[2],\n cangle[0]*sangle[1]*cangle[2] + sangle[0]*cangle[1]*sangle[2]])\n elif rot_seq == 'xzx':\n return np.array([cangle[0]*cangle[1]*cangle[2] - sangle[0]*cangle[1]*sangle[2],\n cangle[0]*cangle[1]*sangle[2] + sangle[0]*cangle[1]*cangle[2],\n cangle[0]*sangle[1]*sangle[2] - sangle[0]*sangle[1]*cangle[2],\n cangle[0]*sangle[1]*cangle[2] + sangle[0]*sangle[1]*sangle[2]])\n else:\n return False", "def euler_angle_to_rotation(ea, convention='zyx'):\n axis_names_to_vectors = dict([('x', (1, 0, 0)), ('y', (0, 1, 0)), ('z', (0, 0, 1))])\n axis0, axis1, axis2 = convention\n R0 = so3.rotation(axis_names_to_vectors[axis0], ea[0])\n R1 = so3.rotation(axis_names_to_vectors[axis1], ea[1])\n R2 = so3.rotation(axis_names_to_vectors[axis2], ea[2])\n return so3.mul(R0, so3.mul(R1, R2))", "def quaternion_inv(quaternion):\r\n q = numpy.array(quaternion, dtype=numpy.float64, copy=True)\r\n numpy.negative(q[1:], q[1:])\r\n return q / numpy.dot(q, q)" ]
[ "0.7751907", "0.7692476", "0.7672758", "0.7591342", "0.7487414", "0.74774384", "0.7452368", "0.742516", "0.74124753", "0.7385367", "0.7384482", "0.73697144", "0.7299849", "0.72180927", "0.7195682", "0.71946454", "0.7185801", "0.71458083", "0.7089422", "0.70486486", "0.7017161", "0.6996515", "0.69569194", "0.69358623", "0.6924761", "0.6918279", "0.68400574", "0.6747262", "0.67450386", "0.6712952" ]
0.805849
0
The function write the recovered camera poses according to COLMAP documentation
def write_camera_pose_to_file(camera_pose_abs_dict: dict, pose_dir_path: str) -> None: image_dst = path.join(pose_dir_path, 'images.txt') with open(image_dst, 'w+') as file: file.write('# Image list with two lines of data per image:\n') file.write('# IMAGE_ID, QW, QX, QY, QZ, TX, TY, TZ, CAMERA_ID, NAME\n') file.write('# POINTS2D[] as (X, Y, POINT3D_ID)\n') file.write(f'# Number of images: {len(camera_pose_abs_dict.keys())}\n') # write each camera pose to file for image in camera_pose_abs_dict.keys(): image_pose_data = [] t_vector = camera_pose_abs_dict[image][1] qx, qy, qz, qw = rotation_matrix_to_quaternion(camera_pose_abs_dict[image][0]) image_pose_data.append(str(image)) # image_pose_data.append(f'{qw} {qx} {qy} {qz}') image_pose_data.append(f'{qz} {qy} {qx} {qw}') image_pose_data.append(' '.join(map(str, t_vector))) image_pose_data.append('1') image_pose_data.append(f'image{image}.jpg') file.write(' '.join(image_pose_data) + '\n\n')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_poses():\n get_marshmallow_pose(should_remember=True)\n get_mouth_pose(should_remember=True)\n rospy.sleep(1)\n print \"Finished saving poses\"", "def write(self, pathname='wind.png'):\r\n cv2.imwrite(pathname, self.matrix * 255)", "def writeCameraSettings(self):\n pass", "def save_camera_info(outbag_base_path, topic, msg):\n\n info_base = topic[1:].rsplit('/', 1)[0]\n image_dirpath = os.path.join(outbag_base_path, info_base)\n if not os.path.exists(image_dirpath):\n os.makedirs(image_dirpath)\n\n camera_info_savepath = os.path.join(image_dirpath, \"camera_info.json\")\n # camera_info_savepath = os.path.join(image_dirpath, \"camera_info.yaml\")\n\n frame_id = msg.header.frame_id\n height = msg.height\n width = msg.width\n distortion_model = msg.distortion_model\n # For \"plumb_bob\", the 5 parameters are: (k1, k2, t1, t2, k3).\n distortion_coeff = list(msg.D)\n intrinsic_mat = list(msg.K)\n cam_info_dict = {\n 'frame_id': frame_id,\n 'height': height,\n 'width': width,\n 'K': intrinsic_mat,\n 'distortion_model': distortion_model,\n 'D': distortion_coeff,\n }\n\n # json\n with open(camera_info_savepath, 'w') as cam_info_file:\n json.dump(\n cam_info_dict,\n cam_info_file,\n indent=4,\n sort_keys=False,\n )\n\n # yaml\n # with open(camera_info_savepath, 'w', encoding='utf-8') as f:\n # yaml.dump(cam_info_dict, default_flow_style=None)\n\n print(\"Save CameraInfo from {} topic!\".format(topic))", "def save_spi3d(self):\n for filename, colormap in colors.colormaps.items():\n if self.test:\n self.print_colormap(filename, colormap)\n lut = self.generate_spi3d_from_colormap(colormap)\n file_path = os.path.join(self.output, filename)\n file_io.save_file(lut, file_path)\n\n for filename, ev_colormap in colors.ev_colormaps.items():\n if self.test:\n self.print_colormap(filename, ev_colormap)\n lut = self.generate_spi3d_from_evs(ev_colormap)\n file_path = os.path.join(self.output, filename)\n file_io.save_file(lut, file_path)", "def save(self, filename):\n print(\"Saving...\", end=\"\\r\")\n canvas = self.canvas[self.N:self.S,self.W:self.E]\n cv2.imwrite(\"./Output/\"+filename, canvas)\n print(\"Saved:\",filename)", "def main():\n cam = Realsense()\n # cam.access_intr_and_extr()\n profile = cam.pipeline.start(cam.config)\n depth_sensor = profile.get_device().first_depth_sensor()\n depth_scale = depth_sensor.get_depth_scale()\n align_to = rs.stream.color\n align = rs.align(align_to)\n\n objp = np.zeros((3*4,3), np.float32)\n objp[:,:2] = np.mgrid[0:4,0:3].T.reshape(-1,2)\n axis = np.float32([[1,0,0], [0,1,0], [0,0,-1]]).reshape(-1,3)\n # print(objp)\n\n try:\n while (True):\n # detect ArUco markers in RGB images\n frames = cam.pipeline.wait_for_frames()\n aligned_frames = align.process(frames)\n color_frame = aligned_frames.get_color_frame()\n color_image = np.asanyarray(color_frame.get_data()) \n frame = color_image\n font = cv2.FONT_HERSHEY_SIMPLEX\n corners, ids, rvecs, tvecs = cam.detect_markers_realsense(frame)\n \n if np.all(ids != None): # if markers are detected\n for i in range(0, ids.size):\n aruco.drawAxis(frame, cam.newcameramtx, cam.dist, rvecs[i],\n tvecs[i], 0.1) # Draw axis\n aruco.drawDetectedMarkers(frame, corners) # draw square around markers\n\n ###### DRAW ID #####\n strg = ''\n for i in range(0, ids.size):\n strg += str(ids[i][0])+', '\n\n cv2.putText(frame, \"Id: \" + strg, (0,25), font, 1, (0,255,0), 2,\n cv2.LINE_AA)\n\n\t ###### Output marker positions in camera frame ######\n \t # output tvec\n y0 = 60\n dy = 40\n for i in range(0, ids.size):\n y = y0 + i*dy\n cv2.putText(frame, str(tvecs[i][0]), (0, y), font, 1, (0,255,0),\n 2, cv2.LINE_AA)\n\n else:\n ##### DRAW \"NO IDS\" #####\n cv2.putText(frame, \"No Ids\", (0,64), font, 1, (0,255,0), 2,\n cv2.LINE_AA)\n\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n ret, corners = cv2.findChessboardCorners(gray, (4,3), None)\n if ret == True:\n corners2 = cv2.cornerSubPix(gray, corners,(11,11), (-1,-1),\n cam.criteria)\n corners2 = corners2[::-1]\n # print(corners2)\n # print(objp)\n frame = cv2.drawChessboardCorners(frame, (4,3), corners2, ret)\n # Find the rotation and translation vectors.\n _, rvecs, tvecs = cv2.solvePnP(objp, corners2, cam.newcameramtx,\n cam.dist)\n rot, _ = cv2.Rodrigues(rvecs)\n # print(rot)\n # project 3D points to image plane\n imgpts, jac = cv2.projectPoints(axis, rvecs, tvecs,\n cam.newcameramtx, cam.dist)\n frame = draw(frame, corners2, imgpts)\n\n # Display the resulting frame\n cv2.imshow('frame',frame)\n cv2.waitKey(5)\n\n # When everything done, release the capture\n cv2.destroyAllWindows()\n\n finally:\n cam.pipeline.stop()", "def video(perspective_matrix_path, source=\"cam\", save=False, save_path=None, file_name=\"out\", cam_cal=None):\n if not os.path.isfile(perspective_matrix_path):\n raise FileNotFoundError(\"Path to perspective matrix file not exist!\")\n\n with open(perspective_matrix_path, \"rb\") as p:\n perspective_matrix = pickle.load(p)\n M = perspective_matrix[\"M\"]\n Minv = perspective_matrix[\"Minv\"]\n\n if source == \"cam\":\n cap = cv2.VideoCapture(0)\n else:\n if not os.path.isfile(source):\n raise FileNotFoundError(source, \" not Exist!\")\n cap = cv2.VideoCapture(source)\n\n # camera calibration parameters [ mtx , dist]\n mtx = None\n dist = None\n\n out = None\n if save:\n if not os.path.isdir(save_path):\n raise FileNotFoundError(save_path, \" Not Exist!\")\n file_name += \".mp4\"\n out = cv2.VideoWriter(save_path + file_name, -1, 20, (int(cap.get(3)), int(cap.get(4))))\n\n if cam_cal:\n if not os.path.isfile(cam_cal):\n raise FileNotFoundError(cam_cal, \" Not Exist!\")\n\n with open(cam_cal, \"rb\") as p:\n calibration = pickle.load(p)\n mtx = calibration[\"mtx\"]\n dist = calibration[\"dist\"]\n\n left_line = Line(5)\n right_line = Line(5)\n\n while True:\n # Capture frame-by-frame\n ret, frame = cap.read()\n if not ret:\n print(\"Finished..\")\n sys.exit(0)\n\n # cv2 read frame as BGR, convert it to RGB\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n\n # camera calibration\n if not (mtx is None or dist is None):\n frame = cv2.undistort(frame, mtx, dist, None, mtx)\n\n # get edges in image\n edges = apply_edge_detection(frame)\n\n # transform image to bird view\n warped = warped_img(edges, M)\n\n # init out image which will draw lane line on it then weight it with original frame\n out_img = np.zeros_like(warped)\n if len(warped.shape) == 3 and warped.shape[2] == 3:\n pass\n else:\n out_img = np.dstack((out_img, out_img, out_img))\n\n # if line not detected, apply sliding window\n if not left_line.detected or not right_line.detected:\n leftx, lefty, rightx, righty = sliding_window(warped, 9, 200)\n\n # if already detected apply search around detected line\n else:\n leftx, lefty = search_around_poly(left_line, warped)\n rightx, righty = search_around_poly(right_line, warped)\n\n # will used for plotting line, find x fitted\n ploty = np.linspace(warped.shape[0] // 4, warped.shape[0] - 1, warped.shape[0])\n\n # check if at least 100 pixels detected as line\n if len(leftx) > 100 and len(rightx) > 100:\n\n # make detected flag true\n left_line.detected = True\n right_line.detected = True\n\n left_line.current_x = leftx\n left_line.current_y = lefty\n\n right_line.current_x = rightx\n right_line.current_y = righty\n\n left_line.fit_polynomial(ploty)\n right_line.fit_polynomial(ploty)\n\n else:\n print(\"Line not detected in this frame \")\n # we just draw line form previous frame\n\n # make detected flag true\n left_line.detected = False\n right_line.detected = False\n\n # update Lane line radius\n left_line.radius()\n right_line.radius()\n\n # avg radius of to lines, and plot it\n radius = (left_line.radius_of_curvature + right_line.radius_of_curvature) // 2\n frame = write_text(frame, \"Radius of Curvature = \" + str(radius) + \" M\", pos=(20, 50))\n\n # calculate Alignment ( how much car away from center between Lane lines\n dir = \"Left\" # car far from left or right\n\n left_line.car_offset(frame.shape) # distance from left line\n right_line.car_offset(frame.shape) # distance from right line\n\n distance = round(right_line.line_base_pos - left_line.line_base_pos, 2)\n\n if distance < 0: # car far away from left line not right line\n distance = -distance\n dir = \"Right\"\n frame = write_text(frame, \"Vehicle is {}m {} of center\".format(distance, dir), pos=(20, 80))\n\n # ** plot lane lines on image **\n # left_line.draw_line(out_img, ploty)\n # right_line.draw_line(out_img, ploty)\n\n # color pixel which belong to lane lines\n left_line.color_pixel(out_img, (255, 0, 0))\n right_line.color_pixel(out_img, (255, 100, 0))\n\n # fit green triangle in area between lane lines\n pts_left = np.array([np.transpose(np.vstack([left_line.bestx, ploty]))])\n pts_right = np.array([np.flipud(np.transpose(np.vstack([right_line.bestx, ploty])))])\n pts = np.hstack((pts_left, pts_right))\n\n # Draw the lane onto the warped blank image\n cv2.fillPoly(out_img, np.int_([pts]), (0, 255, 0))\n\n # return image to normal view from bird view\n out_img_undit = warped_img(out_img, Minv)\n\n # weight out_image_undit with original frame\n frame = cv2.addWeighted(out_img_undit, 0.5, frame, 1, 0)\n frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)\n\n cv2.imshow(\"frame\", frame)\n\n # write video\n if save:\n out.write(frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n # When everything done, release the capture\n cap.release()\n cv2.destroyAllWindows()", "def rdmb_povray_save(out_file,\n vs,\n ucs, vcs,\n width=800, height=600,\n rotx=0, roty=0, rotz=0,\n angle=14):\n\n ucmax = 6.0\n ucs = ucs / ucmax\n ucs[ucs > 1.0] = 1.0\n # ucs = ucs / np.max(ucs)\n\n rot1 = [rotx, 0, 0]\n rot2 = [0, roty, 0]\n rot3 = [0, 0, rotz]\n\n camera = Camera('location', [0, 0, -25],\n 'look_at', [0, 0, 0],\n 'angle', angle,\n 'right x*image_width/image_height')\n\n light = LightSource([-3, 2, -6], 'color', [1.0, 1.0, 1.0], 'parallel')\n light2 = LightSource([2, -2, -6], 'color', [0.6, 0.6, 0.6], 'parallel')\n background = Background('color', [1, 1, 1, 1])\n\n spheres = [Sphere(v, 0.02,\n Finish('ambient', 0.2, 'diffuse', 0.8, 'phong', 1.0),\n Texture(Pigment('color',\n [0.3+uc*0.7, 0.2+uc*0.8, 0.2+uc*0.8])),\n 'rotate', rot1,\n 'rotate', rot2,\n 'rotate', rot3) for v, uc in zip(vs, ucs)]\n\n objects = [light, light2, background] + spheres\n\n scene = Scene(camera, objects=objects)\n scene.render(out_file, width=width, height=height,\n output_alpha=True, antialiasing=0.001,\n tempfile=out_file+\"__temp__.pov\")", "def savepos(self):\n self.out.write(self.csi + \"s\")", "def cam_calibration():\n # read all calibration images in a folder with similar names\n images = glob.glob('./camera_cal/calibration*.jpg')\n\n # calibrate camera and read object-points (3D), image points (2D) and image shape\n objpoints, imgpoints, img_shape = calibrate_camera(images)\n print(\"DONE: Camera calibration\")\n # save calibration parameters' pickle file\n save_calib_params(objpoints, imgpoints, img_shape)\n print(\"Calibration parameters pickle file saved \")", "def saveCalibrationPoints(self):\n\n if self.kinectCalibrated == True:\n with open('cali_points.csv', 'wb') as csvfile:\n csvwriter = csv.writer(csvfile, delimiter=',')\n for row in range(5):\n csvwriter.writerow(self.rgb_click_points[row])\n for row in range(5): \n csvwriter.writerow(self.depth_click_points[row])\n pass", "def save_calib_data(self):\r\n \r\n #get data to save\r\n x0 = self.ui.x0.value()\r\n x1 = self.ui.x1.value()\r\n y0 = self.ui.y0.value()\r\n y1 = self.ui.y1.value()\r\n \r\n directory, fileName = os.path.split(self.ui.imagePath.text())\r\n nofpixels = (max(x0,x1)-min(x0,x1))*(max(y0,y1)-min(y0,y1))\r\n\r\n #create a list with everything to be saved \r\n #(fast way in Python to build a string)\r\n strList = [self.ui.filmNumber.text(),#indentifier\r\n fileName, #file name\r\n \"{:d}\".format(x0),#coordinates\r\n \"{:d}\".format(y0),\r\n \"{:d}\".format(x1),\r\n \"{:d}\".format(y1),\r\n \"{:d}\".format(nofpixels)] \r\n \r\n \r\n #save the channel data\r\n for channel in [0,1,2]:\r\n avg = np.average(self.npImg[y0:y1,x0:x1,channel])\r\n std = np.std(self.npImg[y0:y1,x0:x1,channel])\r\n strList.append(\"{:.3f}\".format(avg))\r\n strList.append(\"{:.3f}\".format(std))\r\n\r\n #concatenate the list, using tab as a seperator\r\n saveStr = '\\t'.join(strList)+\"\\n\"\r\n \r\n self.saveTablePath = self.check_save_table_path(self.ui.saveTablePath.text())\r\n \r\n if self.saveTablePath == \"\":\r\n logging.error(\"no valid file selected, nothing written\")\r\n else:\r\n with open(self.saveTablePath,\"a\") as saveTable:\r\n saveTable.write(saveStr)\r\n logging.info((\"info for \"+self.ui.filmNumber.text()+\" written to file\"))", "def outputPulses(self,filename):\n np.save(filename,self.getData())\n return", "def writeEcMaps( self ):\n\n self.logger.info( 'writeEcMaps: START' )\n\n self.logger.info( 'writeEcMaps: insert file will be ecMapsInsert.psql' )\n\n ecMapsFile = self.openInsertFile( 'ecMapsInsert.psql' )\n\n self.logger.info( 'writeEcMaps: keggreader.getEcMaps(): START' )\n\n ecMaps = self.reader.getEcMaps()\n\n self.logger.info( 'writeEcMaps: keggreader.getEcMaps(): START' )\n\n for ec,mapNumbers in ecMaps.iteritems():\n ecId = self.importerEc.ecsInserted[ ec ]\n \n for mapNumber in mapNumbers:\n\n if mapNumber in self.importerPathway.pathwayMapsInserted:\n\n mapId = self.importerPathway.pathwayMapsInserted[ mapNumber ]\n\n #self.writeEcMapsFile( ecMapsFile, ecId, mapId )\n self.writeFile( ecMapsFile, 'ec_maps', [ str(ecId), str(mapId) ] )\n\n self.logger.info( 'writeEcMaps: DONE' )", "def save_pose(msg, t, text):\n text.write(\"%i.%09i\\t%f\\t%f\\t%f\\t%f\\t%f\\t%f\\t%f\\n\" \n %( t.secs, t.nsecs,\n msg.pose.position.x, msg.pose.position.y, msg.pose.position.z,\n msg.pose.orientation.x, msg.pose.orientation.y, msg.pose.orientation.z, msg.pose.orientation.w))", "def render_save(scene, cam, globalIdx, trajDir, camDir, NI=1280, NJ=720):\n #render image/convert to bimg\n expimg = scene.render(cam, NI, NJ);\n bimg = convert_image(expimg); \n exp_fname = trajDir + \"/exp_%(#)06d.png\"%{\"#\":globalIdx};\n save_image(bimg, exp_fname); \n\n #save cam\n cam_name = camDir + \"/cam_%(#)06d.txt\"%{\"#\":globalIdx}\n save_perspective_camera(cam, cam_name)\n remove_from_db([cam, expimg, bimg])", "def save_coefficients(R_op, R_po, T_op, T_po, RMSE, path):\n cv_file = cv2.FileStorage(path, cv2.FILE_STORAGE_WRITE)\n \n # Rotation matrix \n print(\"R_op: \" + str(R_op))\n cv_file.write(\"R_op\", R_op )\n\n print(\"R_po: \" + str(R_po))\n cv_file.write(\"R_po\", R_po)\n\n # Tranlastion vector\n cv_file.write(\"T_op\", T_op)\n print(\"T_op: \" + str(T_op))\n\n print(\"T_po: \" + str(T_po))\n cv_file.write(\"T_po\", T_po)\n\n print(\"RMSE: \", RMSE, \" (mm)\")\n cv_file.write(\"RMSE\", RMSE)\n\n # note you *release* you don't close() a FileStorage object\n cv_file.release()", "def export_camera(file, scene, global_matrix, render, tab_write):\n camera = scene.camera\n\n # DH disabled for now, this isn't the correct context\n active_object = None # bpy.context.active_object # does not always work MR\n matrix = global_matrix @ camera.matrix_world\n focal_point = camera.data.dof.focus_distance\n\n # compute resolution\n q_size = render.resolution_x / render.resolution_y\n tab_write(file, \"#declare camLocation = <%.6f, %.6f, %.6f>;\\n\" % matrix.translation[:])\n tab_write(\n file,\n (\n \"#declare camLookAt = <%.6f, %.6f, %.6f>;\\n\"\n % tuple(degrees(e) for e in matrix.to_3x3().to_euler())\n ),\n )\n\n tab_write(file, \"camera {\\n\")\n if scene.pov.baking_enable and active_object and active_object.type == \"MESH\":\n tab_write(file, \"mesh_camera{ 1 3\\n\") # distribution 3 is what we want here\n tab_write(file, \"mesh{%s}\\n\" % active_object.name)\n tab_write(file, \"}\\n\")\n tab_write(file, \"location <0,0,.01>\")\n tab_write(file, \"direction <0,0,-1>\")\n\n else:\n if camera.data.type == \"ORTHO\":\n # XXX todo: track when SensorHeightRatio was added to see if needed (not used)\n sensor_height_ratio = (\n render.resolution_x * camera.data.ortho_scale / render.resolution_y\n )\n tab_write(file, \"orthographic\\n\")\n # Blender angle is radian so should be converted to degrees:\n # % (camera.data.angle * (180.0 / pi) )\n # but actually argument is not compulsory after angle in pov ortho mode\n tab_write(file, \"angle\\n\")\n tab_write(file, \"right <%6f, 0, 0>\\n\" % -camera.data.ortho_scale)\n tab_write(file, \"location <0, 0, 0>\\n\")\n tab_write(file, \"look_at <0, 0, -1>\\n\")\n tab_write(file, \"up <0, %6f, 0>\\n\" % (camera.data.ortho_scale / q_size))\n\n elif camera.data.type == \"PANO\":\n tab_write(file, \"panoramic\\n\")\n tab_write(file, \"location <0, 0, 0>\\n\")\n tab_write(file, \"look_at <0, 0, -1>\\n\")\n tab_write(file, \"right <%s, 0, 0>\\n\" % -q_size)\n tab_write(file, \"up <0, 1, 0>\\n\")\n tab_write(file, \"angle %f\\n\" % (360.0 * atan(16.0 / camera.data.lens) / pi))\n elif camera.data.type == \"PERSP\":\n # Standard camera otherwise would be default in pov\n tab_write(file, \"location <0, 0, 0>\\n\")\n tab_write(file, \"look_at <0, 0, -1>\\n\")\n tab_write(file, \"right <%s, 0, 0>\\n\" % -q_size)\n tab_write(file, \"up <0, 1, 0>\\n\")\n tab_write(\n file,\n \"angle %f\\n\"\n % (2 * atan(camera.data.sensor_width / 2 / camera.data.lens) * 180.0 / pi),\n )\n\n tab_write(\n file,\n \"rotate <%.6f, %.6f, %.6f>\\n\" % tuple(degrees(e) for e in matrix.to_3x3().to_euler()),\n )\n\n tab_write(file, \"translate <%.6f, %.6f, %.6f>\\n\" % matrix.translation[:])\n if camera.data.dof.use_dof and (focal_point != 0 or camera.data.dof.focus_object):\n tab_write(\n file, \"aperture %.3g\\n\" % (1 / (camera.data.dof.aperture_fstop * 10000) * 1000)\n )\n tab_write(\n file,\n \"blur_samples %d %d\\n\"\n % (camera.data.pov.dof_samples_min, camera.data.pov.dof_samples_max),\n )\n tab_write(file, \"variance 1/%d\\n\" % camera.data.pov.dof_variance)\n tab_write(file, \"confidence %.3g\\n\" % camera.data.pov.dof_confidence)\n if camera.data.dof.focus_object:\n focal_ob = scene.objects[camera.data.dof.focus_object.name]\n matrix_blur = global_matrix @ focal_ob.matrix_world\n tab_write(file, \"focal_point <%.4f,%.4f,%.4f>\\n\" % matrix_blur.translation[:])\n else:\n tab_write(file, \"focal_point <0, 0, %f>\\n\" % focal_point)\n if camera.data.pov.normal_enable:\n tab_write(\n file,\n \"normal {%s %.4f turbulence %.4f scale %.4f}\\n\"\n % (\n camera.data.pov.normal_patterns,\n camera.data.pov.cam_normal,\n camera.data.pov.turbulence,\n camera.data.pov.scale,\n ),\n )\n tab_write(file, \"}\\n\")", "def save_pca(self, filepath):\n mean_beam, principal_components, variances = self.pca()\n image_shape = np.array(self.image_shape)\n with open(filepath, 'wb') as f:\n np.save(f, image_shape)\n np.save(f, mean_beam)\n np.save(f, principal_components)\n np.save(f, variances)\n np.save(f, self.mask)", "def saveCLIPPED_DR4(fileOUT, pathOUT, time, flux, xPOS, yPOS, temperature, exposureTIME, numberSTACKS, PSFC1, PSFC2, RTSC, **kwargs):\n # Checking if the last character of pathOUT is an '/'\n if not(pathOUT[-1] == '/'):\n pathOUT += '/'\n # Checking if the suffix of the file is given\n if not fileOUT[-4:] in ['.txt', '.dat']:\n fileOUT += '.dat' \n \n # Preparing the header of the output file\n headerSTRING = 'BRITE photometry, which was clipped for outliers on ' + strftime(\"%Y-%m-%d %H:%M:%s\") + '.'\n headerSTRING +='\\n----------------------------------------'\n headerSTRING +='\\nColumn1: time measurements [d]'\n headerSTRING +='\\nColumn2: flux [adu]'\n headerSTRING +='\\nColumn3: CCD centroid x-position [pixel]'\n headerSTRING +='\\nColumn4: CCD centroid y-position [pixel]'\n headerSTRING +='\\nColumn5: CCD temperature [deg]'\n headerSTRING +='\\nColumn6: exposure time of the observations [s]'\n headerSTRING +='\\nColumn7: number of stacked observations corresponding to one datapoint []'\n headerSTRING +='\\nColumn8: PSF blurring coeffient 1 []'\n headerSTRING +='\\nColumn9: PSF blurring coeffient 2 []'\n headerSTRING +='\\nColumn10: RTS estimate coeffient []'\n headerSTRING +='\\n----------------------------------------'\n \n # Constructing the matrix\n dtOUT = np.dtype([('time', np.float), ('flux', np.float), ('xPOS', np.float), ('yPOS', np.float), ('temperature', np.float), ('exposureTIME', np.float), ('numberSTACKS', np.float), ('PSFC1', np.float), ('PSFC2', np.float), ('RTSC', np.float)])\n matrixOUT = np.zeros(len(time), dtype=dtOUT)\n matrixOUT['time'] = time; matrixOUT['flux'] = flux; matrixOUT['xPOS'] = xPOS; matrixOUT['yPOS'] = yPOS; matrixOUT['temperature'] = temperature; matrixOUT['exposureTIME'] = exposureTIME; matrixOUT['numberSTACKS'] = numberSTACKS; matrixOUT['PSFC1'] = PSFC1; matrixOUT['PSFC2'] = PSFC2; matrixOUT['RTSC'] = RTSC\n \n # The actual saving using a numpy.savetxt \n np.savetxt(pathOUT + fileOUT, matrixOUT, fmt=('%.12e %.7f %.4f %.4f %.4f %.2f %i %.6f %.6f %.2f'), delimiter=' ', header=headerSTRING, comments='#')", "def apply_calibration(self, poses):\n new_poses = []\n for pose in poses:\n T = pose.copy()\n T[:3, :3] = np.eye(3)\n new_poses.append(T)\n return new_poses", "def rdmb_povray_save_q(out_file,\n vs,\n ucs, vcs,\n width=800, height=600,\n rotx=0, roty=0, rotz=0,\n angle=14):\n\n ucmax = 6.0\n ucs = ucs / ucmax\n ucs[ucs > 1.0] = 1.0\n # ucs = ucs / np.max(ucs)\n\n rot1 = [rotx, 0, 0]\n rot2 = [0, roty, 0]\n rot3 = [0, 0, rotz]\n\n camera = Camera('location', [0, 0, -25],\n 'look_at', [0, 0, 0],\n 'angle', angle,\n 'right x*image_width/image_height')\n\n light = LightSource([0, 0, -10],\n 'color', [1.0, 1.0, 1.0], 'parallel', 'shadowless')\n light1 = LightSource([-10, 0, 0],\n 'color', [0.5, 0.5, 0.5], 'parallel', 'shadowless')\n light2 = LightSource([10, 0, 0],\n 'color', [0.5, 0.5, 0.5], 'parallel', 'shadowless')\n light3 = LightSource([0, -10, 0],\n 'color', [0.5, 0.5, 0.5], 'parallel', 'shadowless')\n light4 = LightSource([0, 10, 0],\n 'color', [0.5, 0.5, 0.5], 'parallel', 'shadowless')\n\n background = Background('color', [1, 1, 1, 1])\n\n spheres = [Sphere(v, 0.02,\n Finish('ambient', 1.0),\n Texture(Pigment('color',\n [0.3+uc*0.7, 0.2+uc*0.8, 0.2+uc*0.8])),\n 'rotate', rot1,\n 'rotate', rot2,\n 'rotate', rot3) for v, uc in zip(vs, ucs)]\n\n objects = [light, light1, light2, light3, light4, background] + spheres\n\n scene = Scene(camera, objects=objects)\n scene.render(out_file, width=width, height=height,\n output_alpha=True, antialiasing=0.001,\n tempfile=out_file+\"__temp__.pov\")", "def mapBackToSurface(array,filename):\n #### Map back to surface\n if array.shape[0]==360:\n out_array = np.zeros((glasser2.shape[0],3))\n\n roicount = 0\n for roi in range(360):\n for col in range(array.shape[1]):\n vertex_ind = np.where(glasser2==roi+1)[0]\n out_array[vertex_ind,col] = array[roicount,col]\n\n roicount += 1\n\n else:\n out_array = array\n\n #### \n # Write file to csv and run wb_command\n np.savetxt(filename + '.csv', out_array,fmt='%s')\n wb_file = filename + '.dscalar.nii'\n wb_command = 'wb_command -cifti-convert -from-text ' + filename + '.csv ' + glasserfile2 + ' ' + wb_file + ' -reset-scalars'\n os.system(wb_command)\n os.remove(filename + '.csv')", "def pov_render(self, camera_position = (0,0,-10), camera_target = (0,0,0)):\n\n \"\"\"\n f=pov.File(\"demo.pov\",\"colors.inc\",\"stones.inc\")\n \n cam = pov.Camera(location=camera_position, sky=(1,0,1),look_at=camera_target)\n light = pov.LightSource( camera_position, color=\"White\")\n \n povObjs = [cam, light]\n for obj in self.objects[1:]:\n # test coordinate transfroms\n # print M\n # vectors = np.array([[0,0,0,1], #origin\n # [1,0,0,1], # x\n # [0,1,0,1], # y\n # [0,0,1,1]]).transpose() # z\n # origin,x,y,z = (T*vectors).transpose()\n povObjs.append(povObj(obj))\n \n #print tuple(povObjs)\n f.write(*tuple(povObjs))\n f.close()\n #sphere1 = pov.Sphere( (1,1,2), 2, pov.Texture(pov.Pigment(color=\"Yellow\")))\n #sphere2 = pov.Sphere( (0,1,2), 2, pov.Texture(pov.Pigment(color=\"Yellow\")))\n # composite2 = None#pov.Difference(sphere1, sphere2)\n # \n \n \n \n \n \n # f.write( cam, composite2, light )\n # f.close()\n subprocess.call(\"povray +H2400 +W3200 demo.pov\", shell=True)\n os.system(\"open demo.png\")\n \"\"\"", "def get_calibration_info():\n mjpeg_info_dict = redis_tools.get_dict(db,'mjpeg_info_dict')\n calibration_info = mct_introspection.get_homography_calibration_info()\n for camera in mjpeg_info_dict:\n if not camera in calibration_info:\n calibration_info[camera] = {'modified': ''}\n return calibration_info", "def save_file(camera, frame):\n save = input(\"Would you like to save your drawing? Enter yes or no \")\n if save == \"yes\" or save == \"y\" or save == \"ye\" or save == \"yes \": # accounting for typos\n name = input(\"What would you like to name your masterpiece? \")\n filename = 'images/' + name + '.png'\n cv2.imwrite(filename, cv2.flip(frame,1)) # saves the image as the last frame\n camera.release()\n pygame.quit()\n\n # reopen saved picture to display for user\n img = cv2.imread(filename, 1)\n b,g,r = cv2.split(img) # get b,g,r\n rgb_img = cv2.merge([r,g,b]) # convert from bgr colorspace to rgb\n crop_img = rgb_img[36:450, 0:600] # crop out the colorbar\n cv2.imshow(filename, crop_img)\n cv2.imwrite(filename, crop_img)\n cv2.waitKey(10000)\n cv2.destroyAllWindows()\n camera.release()\n pygame.quit() # cleanup the camera and close any open windows\n else:\n print(\"Thank you for trying CVPaint!\")\n pygame.quit()\n camera.release()\n cv2.destroyAllWindows()", "def write_map(self, file_name):\n\n if self.pixel == \"HEALPIX\":\n hp.fitsfunc.write_map(file_name, self.data, overwrite=True)\n if self.pixel == \"CAR\":\n enmap.write_map(file_name, self.data)", "def save(self):\n fname = self.dir_saving+str(self.folder)+'/colours.txt'\n if not os.path.isfile(fname):\n self.file_com = open(fname, 'w')\n else:\n print 'warning this person has an objects file in its dir, I will rewrite it.'\n self.file_com = open(fname, 'w')\n\n self.file_com.write(self.all_objects['upper']+','+self.all_objects['lower'])\n # self.all_objects = {}\n self.first_click = 1\n self.file_com.close()\n self.NextVideo()\n # count = 1\n # for im_name in self.onlyfiles:\n # img = cv2.imread(self.dir2+im_name)\n # cv2.rectangle(img,(0,0),(250,50),(255,255,255),-1)\n # cv2.putText(img,'frame : '+str(count),(10,30), cv2.FONT_HERSHEY_SIMPLEX, 1,(0,0,0),2)\n # img = self.add_objects(img)\n # cv2.imwrite(self.dir_saving+str(self.folder)+'/obj_images/'+im_name,img)\n # count+=1\n self.clear", "def _write_overlay_info(self):\n cv2.putText(\n img=self.output,\n text=f'X: {float(self.estimated_distance[0]):6.2f} m',\n org=(25, 25),\n fontFace=cv2.FONT_HERSHEY_SIMPLEX,\n color=(0, 0, 255),\n fontScale=0.5\n )\n cv2.putText(\n img=self.output,\n text=f'Y: {float(self.estimated_distance[1]):6.2f} m',\n org=(25, 50),\n fontFace=cv2.FONT_HERSHEY_SIMPLEX,\n color=(0, 0, 255),\n fontScale=0.5\n )\n cv2.putText(\n img=self.output,\n text=f'Z: {float(self.estimated_distance[2]):6.2f} m',\n org=(25, 75),\n fontFace=cv2.FONT_HERSHEY_SIMPLEX,\n color=(0, 0, 255),\n fontScale=0.5\n )\n cv2.putText(\n img=self.output,\n text=f'Rotation: {float(self.estimated_rotation):6.2f} rad',\n org=(25, 100),\n fontFace=cv2.FONT_HERSHEY_SIMPLEX,\n color=(0, 0, 255),\n fontScale=0.5\n )" ]
[ "0.6014014", "0.5977247", "0.5700586", "0.56356347", "0.56256527", "0.5615104", "0.560124", "0.55990064", "0.55756223", "0.5424815", "0.54085475", "0.5403678", "0.5390949", "0.5362901", "0.53255045", "0.53230655", "0.5303748", "0.5303022", "0.5295456", "0.52894944", "0.5287872", "0.528489", "0.5254615", "0.5249531", "0.5244113", "0.5238727", "0.52250737", "0.5200657", "0.519667", "0.518278" ]
0.6023144
0
Draw an n x n grid with edges / nodes from X in red
def draw_grid(n,X): G = nx.grid_2d_graph(n+1,n+1) set_node_colors(G,G.nodes(),'k') set_edge_colors(G,G.edges(),'k') set_edge_weights(G,G.edges(),0.5) set_node_colors(G,edge_subgraph_nodes(X),'r') set_edge_colors(G,X,'r') set_edge_weights(G,X,1) nc = [G.node[n]['color'] for n in G.nodes()] ec = [G[i][j]['color'] for i,j in G.edges()] w = [G[i][j]['weight'] for i,j in G.edges()] nx.draw(G,grid_positions(G,2),node_size=0.5,width=w,node_color=nc,edge_color=ec)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw_grid(self):\n plt.imshow(py.array(\n map(lambda x: map(lambda y: mplc.colorConverter.to_rgb(colord[y]), x), self.create_grid(self.graph))),\n interpolation='nearest')\n plt.show()", "def draw_grid(self):\n for i in range(N * N + 1):\n color = \"blue\" if i % N == 0 else \"gray\"\n x0 = MARGIN + i * SIDE\n y0 = MARGIN\n x1 = MARGIN + i * SIDE\n y1 = HEIGHT - MARGIN\n self.canvas.create_line(x0, y0, x1, y1, fill=color)\n\n x0 = MARGIN\n y0 = MARGIN + i * SIDE\n x1 = WIDTH - MARGIN\n y1 = MARGIN + i * SIDE\n self.canvas.create_line(x0, y0, x1, y1, fill=color)", "def draw(self):\n nx.draw_networkx(self.rc)", "def draw(self):\n for x in range(self.numRows):\n print self.grid[x]", "def draw_grid(self):\n for x in range(0, WIDTH, TILESIZE):\n pg.draw.line(self.screen, LIGHTGREY, (x, 0), (x, HEIGHT))\n \n for y in range(0, HEIGHT, TILESIZE):\n pg.draw.line(self.screen, LIGHTGREY, (0, y), (WIDTH, y))", "def visualize(G, color=None, figsize=(5, 5)):\n plt.figure(figsize=figsize)\n plt.xticks([])\n plt.yticks([])\n nx.draw_networkx(G,\n pos=nx.spring_layout(G, seed=42),\n with_labels=True,\n node_color=color,\n cmap=\"Set2\")\n plt.show();", "def draw_occupied_cells(self):\n reds = [cell for cell in self.game.get_cells() if cell.player == 1]\n blacks = [cell for cell in self.game.get_cells() if cell.player == 2]\n nx.draw_networkx_nodes(self.G, pos=self.positions, nodelist=reds,\n edgecolors='black', node_color='red', linewidths=2)\n nx.draw_networkx_nodes(self.G, pos=self.positions, nodelist=blacks,\n edgecolors='black', node_color='black', linewidths=2)", "def draw_coloring(G, coloring, colors):\n fig = plt.figure()\n n_colors = len(colors)\n\n pos = nx.spring_layout(G)\n for i in range(n_colors):\n nx.draw_networkx_nodes(G, pos, [x for x in G.nodes() if coloring[x] == i], node_color=colors[i])\n nx.draw_networkx_labels(G, pos)\n nx.draw_networkx_edges(G, pos, width=1.0, alpha=0.5)\n\n plt.axis('off')\n plt.show()\n return fig", "def draw_edges(self):\n nx.draw_networkx_edges(self.G, pos=self.positions)", "def draw_nodes(self):\n pass", "def drawGrid(self):\n for div in range(NBCELL):\n sec = SSIZE*div\n self.can.create_line(0, sec, GSIZE, sec, width=3, fill=GFILL)\n self.can.create_line(sec, 0, sec, GSIZE, width=3, fill=GFILL)", "def grid_edges(num_node):\n m = math.sqrt(num_node)\n top = []\n bottom = []\n left = []\n right = []\n for node_id in range(1, num_node + 1):\n if node_id % m == 1:\n left.append(node_id)\n elif node_id % m == 0:\n right.append(node_id)\n elif node_id <= m:\n top.append(node_id)\n elif node_id >= num_node - m + 1:\n bottom.append(node_id)\n else:\n pass\n return (top, bottom, left, right)", "def draw_edges(self):\n pass", "def generate_grid():\n y_offset = -10\n for a in range(20):\n # Line 1\n # Adds offset to the x position of the squares\n x_offset = 10\n for b in range(1):\n # Adds offset to the y position of the squares\n y_offset += 20\n for c in range(20):\n # Prints a row of squares(5 squares along the x)\n square(x_offset, y_offset, 20, 20, dark_green)\n for d in range(1):\n # Adds x offset for the next line of squares on the y axis\n x_offset += 40\n # Line 2 (needs 2 lines because the offset of each line)\n # Adds offset to the x position of the squares\n x_offset = 30\n for e in range(1):\n # Adds offset to the y position of the squares\n y_offset += 20\n for f in range(20):\n # Prints a row of squares(5 squares along the x)\n square(x_offset, y_offset, 20, 20, dark_green)\n for g in range(1):\n # Adds x offset for the next line of squares on the y axis\n x_offset += 40", "def shade_neighbours(x: int, y: int) -> None:\r\n if x > 0:\r\n safeboard[x-1, y] = 0\r\n if x < shape-1:\r\n safeboard[x+1, y] = 0\r\n if y > 0:\r\n safeboard[x, y-1] = 0\r\n if y < shape-1:\r\n safeboard[x, y+1] = 0\r\n safeboard[x, y] = 0", "def draw_grid(self):\n\n screen.fill(GREY)\n\n for row in self.grid:\n for cell in row:\n if cell.root:\n color = GREEN\n elif cell.goal:\n color = RED\n elif cell.value:\n color = DARK_BLUE\n elif cell.visited:\n color = LIGHT_BLUE\n elif cell.f:\n color = LIGHT_GREEN\n elif cell.wall:\n color = GRAY\n else:\n color = WHITE\n\n pygame.draw.rect(screen, color, cell.rect)\n\n x, y = cell.rect.x, cell.rect.y\n\n if cell.g:\n self.draw_score(x + 2, y + 2, cell.g)\n if cell.h:\n self.draw_score(x + 18, y + 2, cell.h)\n if cell.f:\n self.draw_score(x + 2, y + self.cell_size - 10, cell.f)", "def draw_grid(grid):\n rows = grid.shape[0]\n cols = grid.shape[1]\n for row in range(rows):\n for col in range(cols):\n if grid[row, col] == 0: # empty\n sys.stdout.write(\" . \")\n elif grid[row, col] == 1: # path\n sys.stdout.write(\" X \")\n elif grid[row, col] == 2:\n sys.stdout.write(\" O \")\n else:\n sys.stdout.write(\" @ \")\n\n if col % cols == cols - 1:\n sys.stdout.write(\"\\n\")", "def draw_grid(self):\n buf = self.__hbar\n for rInd in range(self.row):\n line = '\\t|'\n for cInd in range(self.col):\n this = ((rInd * self.col) + cInd)\n cell = self.get_cell(this)\n if not cell:\n line += '%s|' % ' '.center(5)\n else:\n if this == self.new_cell:\n tmp = green(str(cell).center(5))\n else:\n tmp = str(cell).center(5)\n line += '%s|' % tmp\n buf += line + '\\n' + self.__hbar\n print(buf)", "def _build_grid(self):\n n = self.params['n']\n\n x_min, x_max = min(self.node[:, 0]), max(self.node[:, 0])\n y_min, y_max = min(self.node[:, 1]), max(self.node[:, 1])\n xv = np.linspace(x_min, x_max, num=n, endpoint=True)\n yv = np.linspace(y_min, y_max, num=n, endpoint=True)\n xg, yg = np.meshgrid(xv, yv, sparse=False, indexing='xy')\n\n return xg, yg", "def visualize(grid, board_size=16):\n visual_grid = []\n for i in range(board_size):\n row = []\n for j in range(board_size):\n row.append(grid[(j, i)])\n visual_grid.append(row)\n print(visual_grid)", "def _5x5_grid_clusters():\n return [mn(mean=np.array([i, j]), cov=np.array([[1.0, 0.0],\n [0.0, 1.0]]))\n for i in range(5)\n for j in range(5)]", "def draw_gray_grid(self):\n gray = \"#D3D3D3\"\n # Draw the vertical lines\n for x in range(0, self.width, self.scale):\n self.canvas.create_line(x, 0, x, self.height, fill=gray)\n\n # Draw the horizontal lines\n for y in range(0, self.height, self.scale):\n self.canvas.create_line(0, y, self.width, y, fill=gray)", "def drawCheckerBoard(N=5, white=GLfloat_3(1, 1, 1), black=GLfloat_3(0, 0, 0)):\r\n glDisable(GL_LIGHTING)\r\n try:\r\n for x in range(-N, N):\r\n for y in range(-N, N):\r\n if (x + y) % 2 == 0:\r\n glColor3fv(white)\r\n else:\r\n glColor3fv(black)\r\n glRectf(x, y, x + 1, y + 1)\r\n finally:\r\n glEnable(GL_LIGHTING)", "def draw_grid(self) -> None:\n grid = self.life.curr_generation\n for row in range(self.cell_height):\n for column in range(self.cell_width):\n if grid[row][column] == 1:\n color = \"green\"\n else:\n color = \"white\"\n pygame.draw.rect(\n self.screen,\n pygame.Color(color),\n (column * self.cell_size, row * self.cell_size, self.cell_size, self.cell_size),\n )", "def cell_edges(self):", "def print_grid(x):\n row = int(x/2)\n if x % 2 == 0:\n col = x\n else:\n col = x - 1\n for i in range(2):\n prow(row)\n for i in range(row):\n pcolumn(col)\n prow(row)", "def draw(self):\n\t\tnx_graph = self.parse_graph()\n\t\tpos = nx.spring_layout(nx_graph, k=0.15, iterations=20) # to spread out the nodes\n\n\t\tnx.draw(nx_graph, pos, edge_color=\"black\", width=1, linewidths=1, node_size=500, node_color=\"pink\", alpha=0.9, with_labels=True)\n\n\t\tedge_labels = {(edge[0], edge[1]):edge[2] for edge in self.edges}\n\t\tnx.draw_networkx_edge_labels(nx_graph, pos, edge_labels=edge_labels, font_color='red')\n\n\t\tplt.show()", "def node_colors(self, nodes):\n zmin, zmax = nodes[:, 2].min(), nodes[:, 2].max()\n start_color = np.array(self.background) + 5\n end_color = np.array(self.nodeColor)\n z = (nodes[:, 2] - zmin) / (zmax - zmin)\n # indexing [:, None] is used to explicitly state second axis\n c = (1 - z)[:, None] @ start_color[:, None].T + z[:, None] @ end_color[:, None].T\n self.wireframe_col = c\n # return c", "def draw_grid(self) -> None:\n for x in range(0, WIDTH, TILE_SIZE):\n pg.draw.line(self.screen, LIGHT_GREY, (x, INFO_HEIGHT), (x, HEIGHT))\n for y in range(INFO_HEIGHT, INFO_HEIGHT + HEIGHT, TILE_SIZE):\n pg.draw.line(self.screen, LIGHT_GREY, (0, y), (WIDTH, y))", "def create_grid(grid):\r\n for i in range(4):\r\n grid.append([0,0,0,0])" ]
[ "0.6630089", "0.647417", "0.6226133", "0.6224112", "0.6188585", "0.6169128", "0.61569834", "0.6127831", "0.61215925", "0.6074973", "0.6070254", "0.606452", "0.60638255", "0.6051185", "0.60291207", "0.6009064", "0.600086", "0.5957386", "0.59433025", "0.59421194", "0.59329593", "0.59322345", "0.5924985", "0.59137124", "0.589027", "0.58834094", "0.58810824", "0.5873422", "0.58482844", "0.5838252" ]
0.7901583
0
Only show the debug toolbar to users with the superuser flag.
def _custom_show_toolbar(request: 'HttpRequest') -> bool: return DEBUG and request.user.is_superuser
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _custom_show_toolbar(request):\n return DEBUG and request.user.is_superuser", "def show_toolbar(request: HttpRequest) -> bool:\n conditions = (\n settings.DEBUG\n and request.META.get('REMOTE_ADDR', None) in settings.INTERNAL_IPS,\n request.user.is_superuser,\n )\n disqualifiers = (settings.TESTING,)\n if any(conditions) and not any(disqualifiers):\n return True\n return False", "def test_debugtoolbar_visible(self) -> None:\n if os.getenv(\"TEST_WITH_DEBUGTOOLBAR\", \"off\") != \"on\":\n self.skipTest(\"debug-toolbar is disabled, no need to test.\")\n\n response = self.client.get(\"/admin/\")\n self.assertIn(\n 'id=\"djDebug\"',\n str(response.content),\n '`id=\"djDebug\"` tag not found in HTML',\n )", "def admin_only():\n return 'Super-seekrit admin page.'", "def test_should_render_with_superuser_in_read_only(self) -> None:\n with override_feature_check(unified_banner_feature.feature_id, False):\n super().test_should_render_with_superuser_in_read_only()", "def test_should_render_with_superuser_in_read_only(self) -> None:\n with override_feature_check(unified_banner_feature.feature_id, False):\n super().test_should_render_with_superuser_in_read_only()", "def is_user_admin(request):\n return request.user.is_superuser", "def test_superuser_permission_with_super_user(self):\n with self.settings(MAINTENANCE_MODE_PERMISSION_PROCESSORS=(\n 'maintenancemode.permission_processors.is_superuser',\n )):\n self.client.login(username='super_user', password='maintenance_pw')\n response = self.client.get('/')\n self.assertNormalMode(response)", "def super_admin(self, super_admin):\n\n self._super_admin = super_admin", "def is_superuser(self):\n return self.is_admin", "def is_superuser(connection, window_info, kwargs):\n return window_info and window_info.is_superuser", "def showZoomToolbar():\n\treturn False", "def has_access_to_admin_console(self):\n return self.is_superuser or self.has_perm('user.can_view_admin_console')", "def is_superuser():\n if sys.version > \"2.7\":\n for uid in os.getresuid():\n if uid == 0:\n return True\n else:\n if os.getuid() == 0 or os.getegid() == 0:\n return True\n return False", "def should_show_toolbar(self, request):\n if ADMIN_BASE and request.path.startswith(ADMIN_BASE):\n return False\n # check session\n if request.session.get('cms_edit', False):\n return True\n # check GET\n if 'edit' in request.GET:\n request.session['cms_edit'] = True\n return True\n return False", "def has_super_access():\n current_user = frappe.get_doc('User', frappe.session.user)\n roles = set([role.role for role in current_user.roles])\n return bool(roles & {'Administrator', 'Instructor', 'Education Manager', 'System Manager', 'Academic User'})", "def show_staff_ui(self):\n return self.is_course_staff and not self.in_studio_preview", "def isSuper(self):\n user = self.getSession()\n return self.pipe.auth.isSuper(user)", "def enable_debugger(app):\n import aiohttp_debugtoolbar\n\n # dev mode only\n # this will be served at API_SERVER_URL/_debugtoolbar\n aiohttp_debugtoolbar.setup(app)", "def enable_debugger(app):\n import aiohttp_debugtoolbar\n\n # dev mode only\n # this will be served at API_SERVER_URL/_debugtoolbar\n aiohttp_debugtoolbar.setup(app)", "def _should_profile_development_default():\n return True", "def displayUser(self, user):\n root = (user == 'root')\n self.grouplist.setEnabled(not root)\n self.delete.set_sensitive((not root) and (user != runninguser))", "def show_user_info():\n \n vprint( 'Effective User :', os.geteuid())\n vprint( 'Effective Group :', os.getegid())\n vprint( 'Actual User :', os.getuid(), 'Login user:', os.getlogin())\n vprint( 'Actual Group :', os.getgid())\n vprint( 'Actual Groups :', os.getgroups())\n return", "def test_user_is_superuser(self):\n super_user = self.create_superuser()\n regular_user = self.create_user()\n thread = self.create_thread(status='deleted')\n message = thread.first_message\n self.assertTrue(message.visible_to_user(super_user))\n self.assertFalse(message.visible_to_user(regular_user))", "def testSuperUserPermission(self):\n self.login_user(self.superuser)\n response = self.client.get(self.url, self.args)\n self.assertEqual(response.status_code, 200)", "def user_is_admin_or_superadmin(userobj):\n if userobj.is_superuser:\n return True\n else:\n return user_is_admin(userobj)", "def debug():\n assert current_app.debug == False, \"Don't panic! You're here by request of debug()\"", "def superuser_required(view_func):\n return user_passes_test(lambda u: u.is_superuser, login_url='/', redirect_field_name=None)(view_func)", "def debug_allowed(request_object):\n \n return utilities.debug_allowed(request_object)", "def is_developer(self):\n return int(self.developer_status) == 2" ]
[ "0.817702", "0.67734593", "0.61041766", "0.6046557", "0.5890961", "0.5890961", "0.5883483", "0.5869873", "0.58142656", "0.58089703", "0.5765674", "0.57116914", "0.57055", "0.56569725", "0.5652845", "0.5607011", "0.5505614", "0.5483127", "0.5456996", "0.5456996", "0.5435421", "0.53692216", "0.5362425", "0.53458285", "0.53446025", "0.53254884", "0.5295861", "0.52470106", "0.5246212", "0.52383685" ]
0.8088223
1
Formats the location values separating keys, values and k/v pairs >>> l = Location(42.1, 23.5, "test")
def format_geocommit(self, keyval_separator, entry_separator): end = entry_separator sep = keyval_separator msg = "lat" + sep + str(self.lat) + end msg += "long" + sep + str(self.long) + end for attr in self.optional_keys: if hasattr(self, attr): val = getattr(self, attr) if not val is None: msg += attr + sep + str(val) + end # no value separator after last value msg += "src" + sep + str(self.src) return msg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def format_pair(self, k, v):\n if isinstance(v, int):\n data_width = len(str(v)) + 1\n header_width = len(str(k))\n w = max(data_width, header_width)\n h = ('%% %us'%w)%k\n return ' '*len(h), h, '%%%ud'%w\n elif k=='dt':\n fmt = '%6.3f'\n return 6*' ', '%6s'%k, fmt\n elif isinstance(v, float):\n fmt = '% .3e'\n data_width = len(fmt%1)\n header_width = len(str(k))\n w = max(data_width, header_width)\n spaces = ' '*(w-data_width)\n h = ('%%%us'%w)%k\n return ' '*len(h), h, spaces+fmt\n elif isinstance(v, dict):\n results = [self.format_pair(k_, v_) for k_, v_ in v.items()]\n keys = ' '.join([str(r[-2]) for r in results])\n fmts = ' '.join([str(r[-1]) for r in results])\n h1 = ('%%.%us'%(len(keys))) % k\n pl = (len(keys)-len(h1)) // 2\n pr = (len(keys)-len(h1)) - pl\n h1 = '.' * pl + h1 + '.' * pr\n return h1, keys, fmts\n elif isinstance(v, h5py.ExternalLink):\n data_width = len('hdf5-link')\n header_width = len(str(k))\n w = max(data_width, header_width)\n h = ('%%%us'%w)%k\n return ' '*len(h), h, '%%%us'%w\n elif isinstance(v, h5py.VirtualLayout):\n data_width = len('hdf5-vds')\n header_width = len(str(k))\n w = max(data_width, header_width)\n h = ('%%%us'%w)%k\n return ' '*len(h), h, '%%%us'%w\n else:\n fmt = '%%%u.%us' % (self.min_str_len, self.max_str_len)\n w = len(fmt%v)\n h = ('%%%us'%w)%k\n return ' '*len(h), h, fmt", "def format_dict(kv_list):\n return '\\n'.join(['{} - {}'.format(key, value) for\n key, value in kv_list])", "def coords_dict_to_coords_string(coords):\n longitude, latitude = None, None\n for k,v in coords.items():\n if \"at\" in k:\n latitude = v\n if \"ong\" in k:\n longitude = v\n if not longitude and latitude:\n print(\"Unable to identify longitude and latitude keys\")\n return \"\"\n coords_string = \"{:.2f}_{:.2f}\".format(longitude, latitude)\n return coords_string", "def __str__(self):\n s = ''\n for i, (k, v) in enumerate(self.meters.iteritems()):\n if i > 0:\n s += ' '\n s += k + ' ' + str(v)\n return s", "def printMap(values, klab, vlab, precision, offset=16):\n\tprint(klab.ljust(offset, \" \") + vlab)\n\tfor k in values.keys():\n\t\tv = values[k]\n\t\tks = toStr(k, precision).ljust(offset, \" \")\n\t\tvs = toStr(v, precision)\n\t\tprint(ks + vs)", "def _format_entries(self):\n\n def format_item(key, value):\n if value is None:\n return str(key)\n else:\n return '%s -> %x' % (key, value,)\n\n items = self._entries.items()\n items.sort()\n return '{%s}' % (', '.join([format_item(*item) for item in items]),)", "def format_dict(d: Dict[str, float], format: str=\".4f\") -> Dict[str, float]:\n return dict((k, float(f'{v:{format}}')) for k,v in d.items())", "def __str__(self):\n return str(self._key) + \", \" + str(self._value[0]) + \", \" + str(self._value[1])", "def __str__(self):\r\n s = ''\r\n for i, (k, v) in enumerate(self.meters.items()):\r\n if i > 0:\r\n s += ' '\r\n s += k + ' ' + str(v)\r\n return s", "def __str__(self):\n s = ''\n for i, (k, v) in enumerate(self.meters.items()):\n if i > 0:\n s += ' '\n s += k + ' ' + str(v)\n return s", "def metadata_item_format(value):\n try:\n data_name, data_value = value.split('=')\n except ValueError:\n message = (\"Incorrectly formatted metadata. \"\n \"Argmuent values should be in the format a=b c=d\")\n raise ValueError(message)\n else:\n return {'name': data_name, 'value': data_value}", "def format_value(self, value: float) -> str:\r\n ...", "def format_value(self, value: float) -> str:\r\n ...", "def format_to_store(key, value):\n key = key.upper()\n if not isparam(key):\n raise ValueError(\"invalid key %r\" % key)\n\n if key in ['RA_ERR', 'RAJ_ERR']:\n if isinstance(value, basestring):\n floatvalue = hms_rad(0., 0., value)\n elif isinstance(value, float):\n floatvalue = value\n else:\n raise TypeError('invalid %s value type %r' % (key, value))\n elif key in ['DEC_ERR', 'DECJ_ERR']:\n if isinstance(value, basestring):\n floatvalue = dms_rad(0., 0., value)\n elif isinstance(value, float):\n floatvalue = value\n else:\n raise TypeError('invalid %s value type %r' % (key, value))\n else:\n try:\n floatvalue = float(value)\n except ValueError:\n if key in ['RA', 'RAJ']:\n floatvalue = hms_rad(value)\n elif key in ['DEC', 'DECJ']:\n floatvalue = dms_rad(value)\n elif isinstance(value, basestring):\n floatvalue = value\n else:\n raise ValueError('invalid PAR %s value %r.' % (key, value))\n return floatvalue", "def comma(self, key):\n\n if \"~\" in key or key == \"title\": v = self(key, u\"\")\n else: v = self.get(key, u\"\")\n if isinstance(v, int): return v\n elif isinstance(v, float): return v\n else: return v.replace(\"\\n\", \", \")", "def to_dump(self):\n s = []\n for k in self.keys():\n if isinstance(self[k], int) or isinstance(self[k], long):\n s.append(\"%s=%d\" % (k, self[k]))\n elif isinstance(self[k], float):\n s.append(\"%s=%f\" % (k, self[k]))\n else:\n for v2 in self.list(k):\n if isinstance(v2, str):\n s.append(\"%s=%s\" % (k, v2))\n else:\n s.append(\"%s=%s\" % (k, util.encode(v2)))\n s.append(\"~format=%s\" % self.format)\n s.append(\"\")\n return \"\\n\".join(s)", "def format_to_print(key, value):\n if value < 0:\n sign = \"-\"\n else:\n sign = \"\"\n\n key = key.upper()\n if key.replace(\"_ERR\", \"\") not in (FLOAT_PARAMS + STR_PARAMS):\n raise ValueError(\"invalid key %r\" % key)\n\n if key in FLOAT_PARAMS or isinstance(value, basestring):\n return str(value)\n elif key in ['RA', 'RAJ', 'RA_ERR', 'RAJ_ERR']:\n sec = value / SIDFREQ\n if '_ERR' in key:\n return str(sec)\n m, s = divmod(sec, 60)\n h, m = divmod(m, 60)\n sign = \"\"\n if s >= 9.9995:\n return \"%s%.2d:%.2d:%.5f\" % (sign, h, m, s)\n else:\n return \"%s%.2d:%.2d:0%.5f\" % (sign, h, m, s)\n elif key in ['DEC', 'DECJ', 'DEC_ERR', 'DECJ_ERR']:\n # taken from: lscsoft/src/lalsuite/lalapps/src/pulsar\n # /HeterodyneSearch/pulsarpputils.py\n arc = np.degrees(np.fmod(np.fabs(value), np.pi))\n d = int(arc)\n arc = (arc - d) * 60.0\n m = int(arc)\n s = (arc - m) * 60.0\n if '_ERR' in key:\n return str(s)\n if s >= 9.9995:\n return \"%s%.2d:%.2d:%.5f\" % (sign, d, m, s)\n else:\n return \"%s%.2d:%.2d:0%.5f\" % (sign, d, m, s)\n else:\n raise TypeError(\"cannot format argument %s with value %r\"\n % (key, value))", "def format(self, valDict):\n return self._formatStr % valDict", "def _format_and_kws(fmt):\n fmt, kws = fmt, {}\n if fmt and \":\" in fmt:\n fmt, kwrepr = fmt.split(\":\", 1)\n for kw in kwrepr.split(\",\"):\n if \"=\" in kw:\n k, v = kw.split(\"=\")\n kws[k] = v\n elif kw.startswith(\"-\"):\n kws[kw[1:]] = False\n elif kw.startswith(\"+\"):\n kws[kw[1:]] = True\n else: # same as \"+\"\n kws[kw] = True\n return fmt, kws", "def _format_value(self, v):\n return \"%.4g\" % v", "def prettyVal( value, key=None):\n\n frmt = '%.4f %c'\n if value <= 1E-9: # Pico\n s = frmt % (value * 1E12, 'p')\n elif value < 1E-6: # Nano\n s = frmt % (value * 1E9, 'n')\n elif value < 1E-3: # Micro\n s = frmt % (value * 1E6, 'u')\n elif value < 1: # Milli\n s = frmt % (value * 1E3, 'm')\n else: # (whole units)\n s = frmt % (value, ' ')\n\n if key is not None: # Add unit suffix if given unit\n if key[0] == 'C':\n s += 'F' # Capacitors have units Farads\n if value < 1e-12 or value > 1e-3:\n s += ' BAD!' # outside of reasonable capacitors\n elif key[0] == 'L':\n s += 'H' # Inductors have units Henries\n if value < 1e-12 or value > 1e-3:\n s += ' BAD!' # outside of reasonable henries\n\n return s", "def get_values_from_dict(self, values):\n location = self._get_location_dest_from_dict(values)\n return {\n \"location\": location,\n }", "def format(self):\r\n output = {}\r\n\r\n for k, v in self:\r\n if type(v) == collections.InstrumentedList: # pragma: no cover\r\n output[k] = [item.to_dict() for item in v]\r\n elif isinstance(v, (date, datetime, time)): # pragma: no cover\r\n output[k] = v.isoformat()\r\n elif isinstance(v, (float, Decimal)): # pragma: no cover\r\n output[k] = str(v)\r\n else:\r\n output[k] = v\r\n\r\n return output", "def print_formatted_values(**kwargs):\n string = ', '.join([f'{k}: {format_number(kwargs[k])}' for k in kwargs])\n print(string)", "def __str__(self):\n return '({0},{1})'.format(self.key, self.value)", "def format_location(location):\n local = location.split()\n if len(local) > 1:\n if len(local) == 2 and len(local[1]) == 2:\n location = f\"{local[0].title()} {local[1].upper()}\"\n elif len(local) == 3 and len(local[2]) == 2:\n location = f\"{local[0].title()} {local[1].title()} {local[2].upper()}\"\n else:\n location = location.title()\n else:\n location = local[0].title()\n return location", "def __str__(self):\n return \"Location(%s, %s)\" % (self.latitude, self.longitude)", "def format_list(self, key, val, spacer):\n vals = list(map(str, val))\n\n if self.is_paired_list(key):\n # join the values together so each line has a pair\n vals = zip(vals[::2], vals[1::2])\n vals = [\"%s %s\" % (v[0], v[1]) for v in vals]\n \n s = self.newlinechar.join([spacer + v for v in vals])\n\n return s", "def _format_values_in_map(self, dict_values: dict) -> dict:\n\n for key in dict_values.keys():\n if str(key).lower() == \"exception\":\n dict_values[key] = self._format_exception(str(dict_values[key]))\n break\n\n new_map = {}\n for key, value in dict_values.items():\n value = str(value)\n if ' ' in value:\n value = f'\"{value}\"'\n\n new_map[key] = f\"{key.replace(' ', '')}={value}\"\n return new_map", "def _fmt_map(self, string_list):\n return self._fmt_csv(string_list, list_braces=\"{}\")" ]
[ "0.59635484", "0.5520702", "0.5457944", "0.54207", "0.5350413", "0.5347729", "0.5347187", "0.5327935", "0.53164715", "0.53077406", "0.5262094", "0.5207229", "0.5207229", "0.51810014", "0.5175114", "0.51656723", "0.5141616", "0.51369876", "0.51341087", "0.51255035", "0.51153076", "0.5109819", "0.5100063", "0.50920975", "0.50879765", "0.50865203", "0.50863916", "0.5081243", "0.5016365", "0.50103474" ]
0.568076
1
Formats the location using the long geocommit format >>> l = Location(42.1, 23.5, "test") >>> l.format_long_geocommit()
def format_long_geocommit(self): geocommit = "geocommit (1.0)\n" geocommit += self.format_geocommit(": ", "\n") geocommit += "\n\n" return geocommit
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def format_short_geocommit(self):\r\n geocommit = \"geocommit(1.0): \"\r\n geocommit += self.format_geocommit(\" \", \", \")\r\n geocommit += \";\"\r\n\r\n return geocommit", "def format_latlon(lat: float, lon: float) -> str:\n if lat < 0:\n latdir = \"S\"\n else:\n latdir = \"N\"\n if lon < 0:\n londir = \"W\"\n else:\n londir = \"E\"\n return \"{:1.6f}&deg;{}, {:1.6f}&deg;{}\".format(abs(lat), latdir, abs(lon), londir)", "def format_location(location):\n local = location.split()\n if len(local) > 1:\n if len(local) == 2 and len(local[1]) == 2:\n location = f\"{local[0].title()} {local[1].upper()}\"\n elif len(local) == 3 and len(local[2]) == 2:\n location = f\"{local[0].title()} {local[1].title()} {local[2].upper()}\"\n else:\n location = location.title()\n else:\n location = local[0].title()\n return location", "def format_geocommit(self, keyval_separator, entry_separator):\r\n end = entry_separator\r\n sep = keyval_separator\r\n\r\n msg = \"lat\" + sep + str(self.lat) + end\r\n msg += \"long\" + sep + str(self.long) + end\r\n\r\n for attr in self.optional_keys:\r\n if hasattr(self, attr):\r\n val = getattr(self, attr)\r\n if not val is None:\r\n msg += attr + sep + str(val) + end\r\n\r\n # no value separator after last value\r\n msg += \"src\" + sep + str(self.src)\r\n\r\n return msg", "def test_get_formatted_location(self):\n\t\tformatted_location = get_formatted_location('seoul', 'south korea')\n\t\tself.assertEqual(formatted_location, 'Seoul, South Korea')", "def short_def(self):\r\n return f\"{self.lat}, {self.lon}\"", "def minor_def(self):\r\n if self.pronunciation:\r\n return f\"{self.name}, {self.lat}, {self.lon}, {self.pronunciation}\".rstrip(\", \")\r\n return f\"{self.name}, {self.lat}, {self.lon}\"", "def GetLatLongString(ddvalue,lltype='latitude'):\n deg=int(abs(ddvalue))\n min=int((abs(ddvalue)-deg)*60)\n sec=int((abs(ddvalue)-deg-(float(min)/60.0))*3600.0)\n if lltype == 'latitude':\n if numpy.sign(ddvalue) == -1:\n ch='S'\n else:\n ch='N'\n else:\n if numpy.sign(ddvalue) == -1:\n ch='W'\n else:\n ch='E'\n\n nstr=\"%dd%d'%.1f''%s\" % (deg,min,sec,ch)\n return nstr", "def _decode(geohash):\n lat_val, lng_val, lat_err, lng_err = _decode_val_err(geohash)\r\n precision = _get_precision(lng_err)\n lat_val = \"%.*f\" % (precision, lat_val)\r\n lng_val = \"%.*f\" % (precision, lng_val)\r\n return lat_val, lng_val", "def from_short_format(data):\r\n m = re.search(\"geocommit\\(1\\.0\\): ((?:[a-zA-Z0-9_-]+ [^,;]+, )*)([a-zA-Z0-9_-]+ [^,;]+);\", data)\r\n\r\n if m is None:\r\n return None\r\n\r\n values = m.group(1) + m.group(2)\r\n\r\n data = dict()\r\n\r\n for keyval in re.split(\",\\s+\", values):\r\n key, val = re.split(\"\\s+\", keyval, 1)\r\n data[key] = val\r\n\r\n if not data.has_key(\"lat\") or not data.has_key(\"long\") or not data.has_key(\"src\"):\r\n\r\n return None\r\n\r\n l = Location(data[\"lat\"], data[\"long\"], data[\"src\"])\r\n\r\n for key in l.optional_keys:\r\n if data.has_key(key):\r\n setattr(l, key, data[key])\r\n\r\n return l", "def _reformat_to_latlong(latlong, use_list=False):\n if _is_null_latlong(latlong):\n return np.nan\n\n if isinstance(latlong, str):\n try:\n # Serialized latlong columns from csv or parquet will be strings, so null values will be\n # read as the string 'nan' in pandas and Dask and 'NaN' in Koalas\n # neither of which which is interpretable as a null value\n if \"nan\" in latlong:\n latlong = latlong.replace(\"nan\", \"None\")\n if \"NaN\" in latlong:\n latlong = latlong.replace(\"NaN\", \"None\")\n latlong = ast.literal_eval(latlong)\n except ValueError:\n pass\n\n if isinstance(latlong, (tuple, list)):\n if len(latlong) != 2:\n raise ValueError(\n f\"LatLong values must have exactly two values. {latlong} does not have two values.\"\n )\n\n latitude, longitude = map(_to_latlong_float, latlong)\n\n # (np.nan, np.nan) should be counted as a single null value\n if pd.isnull(latitude) and pd.isnull(longitude):\n return np.nan\n\n if use_list:\n return [latitude, longitude]\n return (latitude, longitude)\n\n raise ValueError(\n f\"LatLongs must either be a tuple, a list, or a string representation of a tuple. {latlong} does not fit the criteria.\"\n )", "def _format_for_list_long(self):\n if isinstance(self.resource, resource_reference.PrefixResource):\n # Align PrefixResource URLs with ObjectResource URLs.\n return LONG_LIST_ROW_FORMAT.format(\n size='', creation_time='',\n url=self.resource.storage_url.url_string, metageneration='',\n etag='')\n\n creation_time = resource_util.get_formatted_timestamp_in_utc(\n self.resource.creation_time)\n\n if self._all_versions:\n url_string = self.resource.storage_url.url_string\n metageneration_string = ' metageneration={}'.format(\n str(self.resource.metageneration))\n else:\n url_string = self.resource.storage_url.versionless_url_string\n metageneration_string = ''\n\n if self._include_etag:\n etag_string = ' etag={}'.format(str(self.resource.etag))\n else:\n etag_string = ''\n\n if self._readable_sizes and self.resource.size is not None:\n size = shim_format_util.get_human_readable_byte_value(\n self.resource.size, use_gsutil_style=self._use_gsutil_style)\n else:\n # Also handles None values.\n size = str(self.resource.size)\n\n # Full example (add 9 spaces of padding to the left):\n # 8 2020-07-27T20:58:25Z gs://b/o metageneration=4 etag=CJqt6aup7uoCEAQ=\n return LONG_LIST_ROW_FORMAT.format(\n size=size,\n creation_time=creation_time,\n url=url_string,\n metageneration=metageneration_string,\n etag=etag_string)", "def lnglat(self):\n if not self.latlng:\n return \"\"\n\n try:\n lat, lng = self.latlng.split(\", \")\n except ValueError:\n return \"\"\n\n return f\"{lng}, {lat}\"", "def clean_longitude(self):\n lLongitude = self.cleaned_data['longitude']\n if lLongitude:\n lValue = lLongitude.strip()\n if lValue:\n lRegEx = re.compile(CO_ORD_REGEX)\n if lRegEx.match(lValue) == None:\n raise forms.ValidationError(\"Please enter the location in decimal notation, for example -1.82182 If it ends with E it's positive, if W, then it's negative.\")\n return lLongitude", "def get_location_str(self) -> tuple:\r\n if self.data is None:\r\n return (None, None)\r\n\r\n lat = self.data['GPSInfo']['GPSLatitude']\r\n lon = self.data['GPSInfo']['GPSLongitude']\r\n \r\n # Convert from Degrees, minutes, seconds to standard form\r\n latitude = (lat[0][0] / lat[0][1]) \\\r\n + (lat[1][0] /lat[1][1] / 60) \\\r\n + (lat[2][0] / lat[2][1] / 3600)\r\n \r\n longitude = (lon[0][0] / lon[0][1]) \\\r\n + (lon[1][0] / lon[1][1] / 60) \\\r\n + (lon[2][0] / lon[2][1] / 3600)\r\n\r\n # Make the results presentable\r\n latitude = str(round(latitude, 6)) \\\r\n + chr(176) + ' ' \\\r\n + self.data['GPSInfo']['GPSLatitudeRef']\r\n \r\n longitude = str(round(longitude, 6)) \\\r\n + chr(176) + ' ' \\\r\n + self.data['GPSInfo']['GPSLongitudeRef']\r\n \r\n return (latitude, longitude)", "def location_build(self, location: Location) -> str:\n if not location:\n return \"{}\"\n if not location.external_id and location.lat:\n try:\n location = self.location_search(location.lat, location.lng)[0]\n except IndexError:\n pass\n data = {\n \"name\": location.name,\n \"address\": location.address,\n \"lat\": location.lat,\n \"lng\": location.lng,\n \"external_source\": location.external_id_source,\n \"facebook_places_id\": location.external_id,\n }\n return json.dumps(data, separators=(\",\", \":\"))", "def _normalize_location(lat: float, lon: float):\n latitude = \"{0:.3f}\".format(round(lat, 3))\n longitude = \"{0:.3f}\".format(round(lon, 3))\n return latitude + \":\" + longitude", "def short_location(self):\n bc = self.barcamp\n location = AttributeMapper(bc.location)\n if location.name and location.city:\n return \"%s, %s\" %(location.name, location.city)\n else:\n return self.handler._(\"location to be announced\")", "def annotate_long(self, longDf):\n\n speciesMask = 1 if self.species == 'H.sapiens' else 0\n leaMask = 1 if longDf.iloc[0]['Ligand'] in DEFAULT_LEAS else 0\n ligMask = 1 if longDf.iloc[0]['Ligand'][:2] in self.lipid_patterns else 0\n\n longDf['species'] = np.full(len(longDf.index), speciesMask)\n longDf['defaultLea'] = np.full(len(longDf.index), leaMask)\n longDf['defaultLigand'] = np.full(len(longDf.index), ligMask)\n\n return longDf", "def get_formatted_location(city, country):\n\tformatted_location = city + \", \" + country\n\treturn formatted_location.title()", "def getLong(self, name: unicode) -> long:\n ...", "def parse_location(location):\n city, state = location.strip().split(',')\n return f\"{city.strip().replace(' ', '-')}-{state.strip().replace(' ', '-')}\"", "def _format_market_id(self, marketId):\n return \"{}:{}\".format(\"LON\", marketId.split(\"-\")[0])", "def pretty_location(data):\n\n issue = data.get(\"issue\", \"\")\n if issue:\n issue = \"(%s)\" % issue\n\n pages = data.get(\"pageInfo\", \"\")\n if \"pageInfo\" in data and pages:\n pages = \":\" + pages\n\n location = u\"{title} {volume}{issue}{pages} ({year})\".format(\n title=data.get(\"journalTitle\", \"\"),\n issue=issue,\n volume=data.get(\"journalVolume\", \"\"),\n pages=pages,\n year=data[\"pubYear\"],\n )\n location = location.replace(\" \", \" \")\n if location.endswith(\".\"):\n return location[0:-1]\n return location", "def longitude(self):\n return self._state[1]", "def decode(geohash):\r\n try:\r\n lat_val, lng_val, lat_err, lng_err = decode_val_err(geohash)\r\n precision = _get_precision(lng_err)\r\n lat_val = \"%.*f\" % (precision, lat_val)\r\n lng_val = \"%.*f\" % (precision, lng_val)\r\n return lat_val, lng_val\r\n except:\r\n print(\"Unable to decode!\") # TODO better error message\r", "def getLocationString(self):\n street = ' '.join(self.context.getAddress().strip().split())\n # Remove Postfach from street, otherwise Google geocoder API will\n # return wrong results\n street = street.replace('Postfach', '').replace('\\r','').strip()\n zip_code = self.context.getZip()\n city = self.context.getCity()\n country = self.context.getCountry()\n\n # We need at least something other than country to be defined,\n # otherwise we can't do a meaningful geocode lookup\n if not (street or zip_code or city):\n return ''\n\n # Concatenate only the fields with a value into the location string\n location = country\n for field in [city, zip_code, street]:\n if field.strip():\n location = \"%s, %s\" % (field.strip(), location)\n\n return location", "def __str__(self):\n return f'{self.location}'", "def __str__(self):\n return \"Location(%s, %s)\" % (self.latitude, self.longitude)", "def coords_format(format):\n if format == 'galactic':\n return 'galactic'\n elif format in ['fk5','icrs']:\n return 'celestial'" ]
[ "0.6449494", "0.5818637", "0.57956123", "0.57563007", "0.5687247", "0.55729204", "0.55175114", "0.5450161", "0.5328889", "0.53174734", "0.5223536", "0.52226293", "0.51880616", "0.5184553", "0.51536745", "0.51523906", "0.5143899", "0.513625", "0.5133939", "0.51274645", "0.51066715", "0.50947565", "0.50608677", "0.5025524", "0.5011686", "0.5004953", "0.49778914", "0.4977263", "0.49435335", "0.4939681" ]
0.7905342
0
Formats the location using the long geocommit format >>> l = Location(42.1, 23.5, "test") >>> l.format_short_geocommit()
def format_short_geocommit(self): geocommit = "geocommit(1.0): " geocommit += self.format_geocommit(" ", ", ") geocommit += ";" return geocommit
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def format_long_geocommit(self):\r\n geocommit = \"geocommit (1.0)\\n\"\r\n geocommit += self.format_geocommit(\": \", \"\\n\")\r\n geocommit += \"\\n\\n\"\r\n\r\n return geocommit", "def short_def(self):\r\n return f\"{self.lat}, {self.lon}\"", "def from_short_format(data):\r\n m = re.search(\"geocommit\\(1\\.0\\): ((?:[a-zA-Z0-9_-]+ [^,;]+, )*)([a-zA-Z0-9_-]+ [^,;]+);\", data)\r\n\r\n if m is None:\r\n return None\r\n\r\n values = m.group(1) + m.group(2)\r\n\r\n data = dict()\r\n\r\n for keyval in re.split(\",\\s+\", values):\r\n key, val = re.split(\"\\s+\", keyval, 1)\r\n data[key] = val\r\n\r\n if not data.has_key(\"lat\") or not data.has_key(\"long\") or not data.has_key(\"src\"):\r\n\r\n return None\r\n\r\n l = Location(data[\"lat\"], data[\"long\"], data[\"src\"])\r\n\r\n for key in l.optional_keys:\r\n if data.has_key(key):\r\n setattr(l, key, data[key])\r\n\r\n return l", "def short_location(self):\n bc = self.barcamp\n location = AttributeMapper(bc.location)\n if location.name and location.city:\n return \"%s, %s\" %(location.name, location.city)\n else:\n return self.handler._(\"location to be announced\")", "def format_latlon(lat: float, lon: float) -> str:\n if lat < 0:\n latdir = \"S\"\n else:\n latdir = \"N\"\n if lon < 0:\n londir = \"W\"\n else:\n londir = \"E\"\n return \"{:1.6f}&deg;{}, {:1.6f}&deg;{}\".format(abs(lat), latdir, abs(lon), londir)", "def format_location(location):\n local = location.split()\n if len(local) > 1:\n if len(local) == 2 and len(local[1]) == 2:\n location = f\"{local[0].title()} {local[1].upper()}\"\n elif len(local) == 3 and len(local[2]) == 2:\n location = f\"{local[0].title()} {local[1].title()} {local[2].upper()}\"\n else:\n location = location.title()\n else:\n location = local[0].title()\n return location", "def test_get_formatted_location(self):\n\t\tformatted_location = get_formatted_location('seoul', 'south korea')\n\t\tself.assertEqual(formatted_location, 'Seoul, South Korea')", "def format_geocommit(self, keyval_separator, entry_separator):\r\n end = entry_separator\r\n sep = keyval_separator\r\n\r\n msg = \"lat\" + sep + str(self.lat) + end\r\n msg += \"long\" + sep + str(self.long) + end\r\n\r\n for attr in self.optional_keys:\r\n if hasattr(self, attr):\r\n val = getattr(self, attr)\r\n if not val is None:\r\n msg += attr + sep + str(val) + end\r\n\r\n # no value separator after last value\r\n msg += \"src\" + sep + str(self.src)\r\n\r\n return msg", "def minor_def(self):\r\n if self.pronunciation:\r\n return f\"{self.name}, {self.lat}, {self.lon}, {self.pronunciation}\".rstrip(\", \")\r\n return f\"{self.name}, {self.lat}, {self.lon}\"", "def get_formatted_location(city, country):\n\tformatted_location = city + \", \" + country\n\treturn formatted_location.title()", "def shortHelpString(self):\r\n return self.tr(\r\n 'This processing tool queries the PDOK Locatieserver (PDOK-LS) reverse geocoder service for each\\\r\n point in the input layer and adds the selected fields of the reverse geocoder result to the point.\\n\\n\\\r\n See also the PDOK Locatieserver reverse geocoding API <a href=\"https://github.com/PDOK/locatieserver/wiki/API-Reverse-Geocoder\">documentation</a> \\n\\\r\n Parameters:\\n\\n\\\r\n <ul><li><b>Input point layer:</b> for each point the PDOK-LS reverse geocoder service will be queried</li>\\\r\n <li><b>Fields:</b> fields to add to input point layer from reverse geocoder response, defaults to \"weergavenaam\" \\\r\n (note that in the resulting output weergavenaam is remapped to \"weergavenaam_{result_type}\")</li>\\\r\n <li><b>Result type to query</b></li>\\\r\n <li><b>Score treshold, optional:</b> objects returned by the PDOK-LS geocoder each have a score, \\\r\n to indicate how well they match the query. Results with a score lower than the treshold \\\r\n are excluded</li>\\\r\n <li><b>Output point layer:</b> output layer with fields added from the PDOK-LS reverse geocoder \\\r\n response, projection same as input point layer</li></ul>\\\r\n '\r\n )", "def GetLatLongString(ddvalue,lltype='latitude'):\n deg=int(abs(ddvalue))\n min=int((abs(ddvalue)-deg)*60)\n sec=int((abs(ddvalue)-deg-(float(min)/60.0))*3600.0)\n if lltype == 'latitude':\n if numpy.sign(ddvalue) == -1:\n ch='S'\n else:\n ch='N'\n else:\n if numpy.sign(ddvalue) == -1:\n ch='W'\n else:\n ch='E'\n\n nstr=\"%dd%d'%.1f''%s\" % (deg,min,sec,ch)\n return nstr", "def shorten_latlon(text):\n return LATLNG_RE.sub(do_shorten_latlon, text)", "def _normalize_location(lat: float, lon: float):\n latitude = \"{0:.3f}\".format(round(lat, 3))\n longitude = \"{0:.3f}\".format(round(lon, 3))\n return latitude + \":\" + longitude", "def location_build(self, location: Location) -> str:\n if not location:\n return \"{}\"\n if not location.external_id and location.lat:\n try:\n location = self.location_search(location.lat, location.lng)[0]\n except IndexError:\n pass\n data = {\n \"name\": location.name,\n \"address\": location.address,\n \"lat\": location.lat,\n \"lng\": location.lng,\n \"external_source\": location.external_id_source,\n \"facebook_places_id\": location.external_id,\n }\n return json.dumps(data, separators=(\",\", \":\"))", "def __str__(self):\n return \"Location(%s, %s)\" % (self.latitude, self.longitude)", "def __str__(self):\n return f'{self.location}'", "def pretty_location(data):\n\n issue = data.get(\"issue\", \"\")\n if issue:\n issue = \"(%s)\" % issue\n\n pages = data.get(\"pageInfo\", \"\")\n if \"pageInfo\" in data and pages:\n pages = \":\" + pages\n\n location = u\"{title} {volume}{issue}{pages} ({year})\".format(\n title=data.get(\"journalTitle\", \"\"),\n issue=issue,\n volume=data.get(\"journalVolume\", \"\"),\n pages=pages,\n year=data[\"pubYear\"],\n )\n location = location.replace(\" \", \" \")\n if location.endswith(\".\"):\n return location[0:-1]\n return location", "def __str__(self):\n return \"Location(latitude={},longitude={})\".format(\n self.latitude, self.longitude\n )", "def getLocationString(self):\n street = ' '.join(self.context.getAddress().strip().split())\n # Remove Postfach from street, otherwise Google geocoder API will\n # return wrong results\n street = street.replace('Postfach', '').replace('\\r','').strip()\n zip_code = self.context.getZip()\n city = self.context.getCity()\n country = self.context.getCountry()\n\n # We need at least something other than country to be defined,\n # otherwise we can't do a meaningful geocode lookup\n if not (street or zip_code or city):\n return ''\n\n # Concatenate only the fields with a value into the location string\n location = country\n for field in [city, zip_code, street]:\n if field.strip():\n location = \"%s, %s\" % (field.strip(), location)\n\n return location", "def get_str_location(location):\n lng, lat = get_ob_value_primitive(location, 'Longitude'), get_ob_value_primitive(location, 'Latitude')\n try:\n if lat is not None and lng is not None:\n return 'POINT(' + str(float(lng)) + ', ' + str(float(lat)) + ')'\n return None\n except ValueError:\n raise ValueError(f'Invalid Latitude or Longitude, got (Latitude:\\'{lat}\\', Longitude:\\'{lng}\\')')", "def shortHelpString(self):\r\n return self.tr(\"This tool takes an address point layer, and buffers it a selectable distance (default is 5km) to simulate a lockdown movement restriction. \\nIt then counts Hospitals and Grocery Stores, as well as significant (>1.5ha) areas of parkland (significance size for parkland also adjustable). \\nA lockdown liveability score is then calculated for each address.\\n After processing, all files will appear in directory of input files. final_Address is layer with results, needs to be manually added after processing.\")", "def _format_point_postgis(lat: float, lon: float) -> Point:\n return sa.cast(\"POINT({} {})\".format(lon, lat), ga.types.Geography)", "def get_location_str(self) -> tuple:\r\n if self.data is None:\r\n return (None, None)\r\n\r\n lat = self.data['GPSInfo']['GPSLatitude']\r\n lon = self.data['GPSInfo']['GPSLongitude']\r\n \r\n # Convert from Degrees, minutes, seconds to standard form\r\n latitude = (lat[0][0] / lat[0][1]) \\\r\n + (lat[1][0] /lat[1][1] / 60) \\\r\n + (lat[2][0] / lat[2][1] / 3600)\r\n \r\n longitude = (lon[0][0] / lon[0][1]) \\\r\n + (lon[1][0] / lon[1][1] / 60) \\\r\n + (lon[2][0] / lon[2][1] / 3600)\r\n\r\n # Make the results presentable\r\n latitude = str(round(latitude, 6)) \\\r\n + chr(176) + ' ' \\\r\n + self.data['GPSInfo']['GPSLatitudeRef']\r\n \r\n longitude = str(round(longitude, 6)) \\\r\n + chr(176) + ' ' \\\r\n + self.data['GPSInfo']['GPSLongitudeRef']\r\n \r\n return (latitude, longitude)", "def __str__(self):\n\n return f'{self.location}'", "def coords_format(format):\n if format == 'galactic':\n return 'galactic'\n elif format in ['fk5','icrs']:\n return 'celestial'", "def _reformat_to_latlong(latlong, use_list=False):\n if _is_null_latlong(latlong):\n return np.nan\n\n if isinstance(latlong, str):\n try:\n # Serialized latlong columns from csv or parquet will be strings, so null values will be\n # read as the string 'nan' in pandas and Dask and 'NaN' in Koalas\n # neither of which which is interpretable as a null value\n if \"nan\" in latlong:\n latlong = latlong.replace(\"nan\", \"None\")\n if \"NaN\" in latlong:\n latlong = latlong.replace(\"NaN\", \"None\")\n latlong = ast.literal_eval(latlong)\n except ValueError:\n pass\n\n if isinstance(latlong, (tuple, list)):\n if len(latlong) != 2:\n raise ValueError(\n f\"LatLong values must have exactly two values. {latlong} does not have two values.\"\n )\n\n latitude, longitude = map(_to_latlong_float, latlong)\n\n # (np.nan, np.nan) should be counted as a single null value\n if pd.isnull(latitude) and pd.isnull(longitude):\n return np.nan\n\n if use_list:\n return [latitude, longitude]\n return (latitude, longitude)\n\n raise ValueError(\n f\"LatLongs must either be a tuple, a list, or a string representation of a tuple. {latlong} does not fit the criteria.\"\n )", "def print_lat(lat):\n s = \"\"\n # FILL\n return s", "def format_address(**args):\n #Begin with the organisation and PO Box number, if applicable.\n address = ''.join([args[entry] + '\\n' \n for entry in ['organisation', 'PO box']\n if args.get(entry)])\n #Format building name/number components.\n address += format_building_components(*[args.get(x) for x in \n ['sub-building name', \n 'building name', \n 'building number',\n 'concatenation indicator']])\n #Add thoroughfare (if present), locality/town and postcode.\n address += ''.join([args[entry] + '\\n' \n for entry in ['dependent thoroughfare', \n 'thoroughfare',\n 'double dependent locality',\n 'dependent locality',\n 'town',\n 'postcode']\n if args.get(entry)])\n return address.strip()", "def full_def(self):\r\n if self.pronunciation:\r\n return f\"{self.name}, {self.lat}, {self.lon}, {self.heading.lstrip('!') or 0}, {self.pronunciation}\".rstrip(\", \")\r\n elif self.heading.lstrip('!'):\r\n return f\"{self.name}, {self.lat}, {self.lon}, {self.heading.lstrip('!')}\"\r\n return f\"{self.name}, {self.lat}, {self.lon}\"" ]
[ "0.78306407", "0.64534444", "0.62554103", "0.61308855", "0.60158163", "0.5945876", "0.5859429", "0.57603073", "0.5721214", "0.55733645", "0.55531913", "0.55373216", "0.55149174", "0.54258066", "0.54249513", "0.54163617", "0.53930986", "0.5350871", "0.5336369", "0.5322799", "0.52973944", "0.528961", "0.52696174", "0.52444565", "0.5243007", "0.5216069", "0.51960593", "0.5187486", "0.51732564", "0.5162459" ]
0.7280461
1
Parses a string in short format to create an instance of the class. >>> l = Location.from_short_format(
def from_short_format(data): m = re.search("geocommit\(1\.0\): ((?:[a-zA-Z0-9_-]+ [^,;]+, )*)([a-zA-Z0-9_-]+ [^,;]+);", data) if m is None: return None values = m.group(1) + m.group(2) data = dict() for keyval in re.split(",\s+", values): key, val = re.split("\s+", keyval, 1) data[key] = val if not data.has_key("lat") or not data.has_key("long") or not data.has_key("src"): return None l = Location(data["lat"], data["long"], data["src"]) for key in l.optional_keys: if data.has_key(key): setattr(l, key, data[key]) return l
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_location(location):\n city, state = location.strip().split(',')\n return f\"{city.strip().replace(' ', '-')}-{state.strip().replace(' ', '-')}\"", "def from_str(cls, string):", "def parse_location(location_str):\n def floatify(latlon):\n \"\"\" Turns a latlon string into a float \"\"\"\n sign = -2. * (latlon[-1].lower() in ['s', 'w']) + 1\n return float(latlon[:-1]) * sign\n points = location_str.strip().split(',')\n if not len(points) == 2:\n raise BadQuery(\"Expected four comma seperated values \"\n \"defining a single point.\")\n\n is_lat = lambda x: x[-1].lower() in ['n', 's']\n lat = filter(is_lat, points)\n if not len(lat) == 1:\n raise BadQuery(\"Expected two latitudes (determined by \" +\n \"values ending in 'N' or 'S'\")\n is_lon = lambda x: x[-1].lower() in ['e', 'w']\n lon = filter(is_lon, points)\n if not len(lon) == 1:\n raise BadQuery(\"Expected two longitudes (determined by \" +\n \"values ending in 'E' or 'W'\")\n lat = floatify(lat[0])\n lon = floatify(lon[0])\n\n # make sure latitude is in range.\n if (lat > 90.) or (lat < -90):\n raise BadQuery(\"Latitude must be within -90 and 90, got %s\" %\n str(lat))\n # we let the user use either longitudes of 0 to 360\n # or -180 to 180, then convert to nautical (-180 to 180).\n if lon > 360. or lon < -180.:\n raise BadQuery(\"Longitudes must be within -180 and 360, got %s\" %\n str(lon))\n # make sure lons end up in -180 to 180.\n lon = np.mod(lon + 180., 360.) - 180.\n\n location = {'latitude': lat,\n 'longitude': lon}\n return location", "def from_location(self, location: str) -> Location:\n return Location({\n 'location': location,\n '': 'Location'\n })", "def from_str(cls, s):\n raise NotImplementedError", "def from_string(cls, alg_str):\n try:\n return cls(int(alg_str[1]) - 1, ord(alg_str[0]) - 97)\n except ValueError as e:\n raise ValueError(\"Location.from_string {} invalid: {}\".format(alg_str, e))", "def from_str(s: str) -> \"Lineage\":\n match = LINEAGE_REGEX.search(s)\n if not match:\n raise InvalidLineageString(\n f\"Lineage string {s} is not in the expected format.\"\n )\n major = match.group(\"major\")\n minor = match.group(\"minor\") or None\n return Lineage(major=major, minor=minor)", "def parseString(self, s):\n pass", "def from_str(cls, line) -> \"VersionStructure\":\n major, minor, patch = [int(item) for item in line.split(\".\")]\n return cls(major=major, minor=minor, patch=patch)", "def parse(cls, s):\n raise NotImplementedError", "def parse(s):\n return s", "def FromHumanReadable(cls, string: Text):\n precondition.AssertType(string, Text)\n return _GetFactory(cls).FromHumanReadable(string)", "def from_string(cls, dlstr):\n raise NotImplementedError(\"Should be implemented by subclass\")", "def street_parser(*street_data):\n\n # parsing tuples\n if len(street_data) == 2:\n if not isinstance(street_data[0], str) and not isinstance(street_data[1], str):\n raise WrongInput(\"Invalid format\")\n # street name as the tuple's first item\n strname, strnumber = street_data\n # street number as the tuple's first item\n if street_data[0][0] in digits:\n strname, strnumber = strnumber, strname\n\n # parsing strings\n else:\n if not isinstance(street_data[0], str):\n raise WrongInput(\"Invalid format\")\n if not street_data[0]:\n raise WrongInput(\"Input cannot be blank\")\n\n # string starting with street number\n if street_data[0][0] in digits:\n street_pattern = re.compile(r'''\n ^ # beginning of string\n (\\d+) # street number is any number of digits\n \\W+ # separator\n (\\w+\\W*\\w*\\W*) # street name is one or more words with optional separators\n $ # end of string\n ''', re.VERBOSE)\n street_obj = street_pattern.search(street_data[0])\n strnumber, strname = street_obj.groups()\n\n # string starting with street name\n else:\n street_pattern = re.compile(r'''\n ^ # beginning of string\n (\\w+\\W*\\w*\\s*) # street name is one or more words with optional separators\n \\W+ # separator\n (\\d+) # street number is any number of digits\n $ # end of string\n ''', re.VERBOSE)\n street_obj = street_pattern.search(street_data[0])\n (strname, strnumber) = street_obj.groups()\n\n # replace specific words in street name with their abbreviates\n strname = strname.lower()\n special = {r'\\baleje\\b': 'Al.', r'\\bavenue\\b': 'Av.', r'\\broad\\b': 'Rd.', r'\\bsquare\\b': 'Sq.',\n r'\\bstreet\\b': 'St.', r'\\bdrive\\b': 'Dr.'}\n for key in special:\n strname = re.sub(key, special[key], strname)\n return strname.title(), strnumber", "def from_string(cls, dlstr):\n\n NotImplementedError(\"Should be implemented by subclass\")", "def fromString(cls, string):\n raise NotImplementedError(\n 'fromString is not implemented on %r' % (cls.__name__,))", "def fromString(cls, s):\n try:\n lines = s.splitlines()\n assert len(lines) > 1\n assert lines[0][0] == cls.DELIMITER\n name = lines[0][1:]\n sequence = \"\".join(lines[1:])\n return FastaRecord(name, sequence)\n except AssertionError:\n raise ValueError(\"String not recognized as a valid FASTA record\")", "def decode(self, shortUrl: str) -> str:\n short = shortUrl.split('/')[-1]\n if short in short2long:\n return short2long[short]\n else:\n return None", "def from_str(cls, timestamp_str):\n units = timestamp_str.split(\":\")\n seconds_ms = units[-1].split(\".\")\n hours = int(units[0])\n minutes = int(units[1])\n seconds = int(seconds_ms[0])\n milliseconds = int(seconds_ms[1])\n return cls(hours, minutes, seconds, milliseconds)", "def fromStr(cls, s):\n assert isinstance(s, str), 'incorrect type of arg s: should be type str, is type {}'.format(type(s))\n s = [ int(n) for n in s.split('.') ]\n return cls(*s)", "def from_string(cls, string):\n normalised = cls.normalise_string(string)\n return cls.from_normalised_string(normalised)", "def from_str ( cls, s, strict=False ):\n readable, writable, executable = False, False, False\n\n if strict:\n _s = s.lower()\n readable = _s[0] == 'r'\n writable = _s[1] == 'w'\n executable = _s[2] == 'x'\n\n elif s:\n for char in s.lower():\n if char == 'r':\n readable = True\n elif char == 'w':\n writable = True\n elif char == 'x':\n executable = True\n # -- end for\n # -- end if\n\n return cls ( readable, writable, executable )", "def parseString(self, s):\n return self.parser.parseString(s)", "def _parse_name(string):\n\n name = tile_name(string)\n\n if not validate_name(name):\n raise ValueError(\"Not a valid tile name!\")\n\n (unit, northing, easting) = name.split(\"_\")\n northing = _enlarge_ordinate(northing, unit)\n easting = _enlarge_ordinate(easting, unit)\n size = TILE_SIZES[unit]\n\n return TileInfo(northing, easting, size, unit)", "def decode(self, shortUrl):\n shortUrl = shortUrl[-6:]\n if shortUrl in self.short_to_long:\n return self.short_to_long[shortUrl]", "def from_str(cls, line):\n match = cls._re.search(line)\n if not match:\n return cls(None, None)\n groups = [int(d) for d in match.groups()]\n ymdhm1 = groups[:5]\n ymdhm2 = groups[5:10]\n hm3 = groups[10:]\n return cls(\n datetime.datetime(*ymdhm1),\n datetime.datetime(*ymdhm2),\n hm3[0] * 60 + hm3[1],\n )", "def __init__(self, string):\n self.string = string\n self.curr = None\n self.locations = ['Clothier','Bond Complex','Black Cultural Center','Kohlberg','No Location','Lang Music Building','Lang Performing Arts Center','McCabe Library','Friends Meeting House','Parrish','Matchbox','Lamb-Miller Fieldhouse','Trotters','Science Center','Alice Paul','Lang Center']\n self.days = ['Monday','Tuesday','Wednesday','Thursday','Friday','Saturday','Sunday']\n self.times = ['AM','PM','All Day']\n self.events = curr_events\n self.currloc = \"\" #Hotfix", "def short_location(self):\n bc = self.barcamp\n location = AttributeMapper(bc.location)\n if location.name and location.city:\n return \"%s, %s\" %(location.name, location.city)\n else:\n return self.handler._(\"location to be announced\")", "def from_string(cls, s):\n for num, text in cls._STATUS2STR.items():\n if text == s:\n return cls(num)\n else:\n raise ValueError(\"Wrong string %s\" % s)", "def parse_location(keyword_args, lineno=None):\n name = keyword_args[\"name\"]\n if keyword_args.get(\"random\"):\n x, y = random.uniform(-1, 1), random.uniform(-1, 1)\n else:\n try:\n x = float(keyword_args[\"x\"])\n except KeyError:\n error(name=\"Malformed 'loc' command\",\n msg=\"The location command is malformed. Maybe you forgot to \"\n \"specify 'x='?\",\n lineno=lineno)\n except ValueError:\n error(name=\"Not a number\",\n msg=\"You did not specify a number for the x coordinate\",\n lineno=lineno)\n try:\n y = float(keyword_args[\"y\"])\n except KeyError:\n error(name=\"Malformed 'loc' command\",\n msg=\"The location command is malformed. Maybe you forgot to \"\n \"specify 'y='?\",\n lineno=lineno)\n except ValueError:\n error(name=\"Not a number\",\n msg=\"You did not specify a number for the y coordinate\",\n lineno=lineno)\n if obj_dict['point'].get(name):\n o = obj_dict['point'][name]\n else:\n o = primitives.Point(keyword_args[\"name\"])\n obj_dict['point'][name] = o\n ret = o\n\n o.x = x\n o.y = y\n\n return [\"point_\"+ret.name]" ]
[ "0.5796067", "0.57846785", "0.5758145", "0.5640593", "0.5627394", "0.5549736", "0.53233117", "0.5281842", "0.526684", "0.52663153", "0.5237364", "0.51908535", "0.51495135", "0.5133941", "0.511369", "0.5065242", "0.5057992", "0.50402415", "0.5021159", "0.5001185", "0.49983492", "0.49377656", "0.49365884", "0.4923756", "0.4920214", "0.48819992", "0.48764423", "0.48743972", "0.4871381", "0.486821" ]
0.6709516
0
Creates a JSON request string for location information from google. The access points are a map from mac addresses to access point information dicts. >>> wlp = WifiLocationProvider() >>> wlp.request_dict()["wifi_towers"]
def request_dict(self): ap_map = self.get_access_points() if not ap_map: return None request = dict() request["version"] = "1.1.0" request["host"] = "localhost" request["request_address"] = True request["address_language"] = "en_GB" request["wifi_towers"] = ap_map.values() if self.access_token: request["access_token"] = self.access_token return request
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def google(self):\r\n prefix ='https://maps.googleapis.com/maps/api/staticmap?center='\r\n middle = '&zoom=14&size=400x400&markers='\r\n suffix = '&key=AIzaSyD5nqmDGFH1SUZxJAYVtFHP7RNjjFE9CHg'\r\n marker = '+'.join(self.placeToSearch) # marker in google format, no space but + separator\r\n request = prefix + marker+middle+marker+suffix\r\n\r\n return request", "def google_map_api(request):\n\treturn {\n\t\t'GOOGLE_MAPS_API' : settings.GOOGLE_MAPS_API,\n\t}", "def geocode(location):\n\n\ttxt = fetch_mapzen_response(location)\n\tmydict = parse_mapzen_response(txt)\n\tmydict['query_text'] = location\n\treturn mydict", "def get_geo_data(request):\n\n # Note that geoip2 (from maximind) doesn't work on GAE because there is a\n # C lib in there apparently.\n # We can use Appengine's added headers to do that work though thankfully.\n geo = dict()\n geo['region'] = request.headers.get(\"X-AppEngine-Region\", \"unknown\")\n geo['city'] = request.headers.get(\"X-AppEngine-City\", \"unknown\")\n geo['country'] = request.headers.get(\"X-AppEngine-Country\", \"unknown\")\n geo['city_lat_long'] = request.headers.get(\"X-AppEngine-CityLatLong\", \"unknown\")\n\n return geo", "def request_directions(start_location, end_location):\n now = datetime.datetime.now()\n directions_response = GMAPS.directions(start_location, end_location,\n mode=\"walking\", departure_time=now)\n parsed_directions_response = parse_directions_response(directions_response)\n return parsed_directions_response", "def send_map_data():\n\n avg_lat = 37.75774\n avg_long = -122.43870\n bounds = {'lat': [-122.54, -122.35], 'lng': [37.67, 37.84]}\n center = {'lat': avg_lat, 'lng': avg_long}\n\n return jsonify({'map_center': center,\n 'map_bounds': bounds,\n })", "def get_driving_map(start_lng, start_lat, end_lng, end_lat):\r\n geojson_str = get_driving_directions(start_lng, start_lat, end_lng, end_lat)\r\n return (\r\n f\"https://api.mapbox.com/styles/v1/mapbox/streets-v11/static/\"\r\n f\"geojson({geojson_str})/auto/640x640?access_token={MAPBOX_TOKEN}\"\r\n ), geojson_str", "def getting_location_key(latitude, longitude):\n\n API_Key = \"zIGuOeUd0aE4O621Gj1KGDc6JiZ3PAGb\"\n http_request = f\"http://dataservice.accuweather.com/locations/v1/cities/geoposition/search?apikey={API_Key}&q={latitude}%2C%20{longitude}&language=pt-br\"\n\n location_key_request = requests.get(http_request)\n\n if location_key_request.status_code != 200:\n print(\"It was not possible to retrive your location key. Please, try again later!!\")\n exit()\n\n else:\n location_key_response = location_key_request.json()\n\n location_key = location_key_response['Key']\n \n # EXTRACTING LOCATION INFORMATION --\n country = location_key_response['Country']['LocalizedName']\n state = location_key_response['AdministrativeArea']['ID']\n city = location_key_response['ParentCity']['LocalizedName']\n neighbourhood = location_key_response['LocalizedName']\n\n # PRINTING LOCATION INFORMATION --\n title_printer(\" ---- LOCATION ----\")\n time.sleep(0.5)\n\n print(\"Country\", end=\"\")\n print(f\"{country:.>73}\")\n time.sleep(0.5)\n\n print(\"State\", end=\"\")\n print(f\"{state:.>75}\")\n time.sleep(0.5)\n\n print(\"City\", end=\"\")\n print(f\"{city:.>76}\")\n time.sleep(0.5)\n\n print(\"Region\", end=\"\")\n print(f\"{neighbourhood:.>74}\")\n time.sleep(0.5)\n\n return location_key", "def google_maps(request):\n gmaps_api_key = getattr(settings, 'GOOGLE_MAPS_API', False)\n return {\n 'GOOGLE_MAPS_API': gmaps_api_key,\n 'google_maps': gmaps_api_key\n }", "def request_map():\n\n rospy.loginfo(\"Requesting the map\")\n rospy.wait_for_service('dynamic_map')\n getMap = rospy.ServiceProxy('dynamic_map', GetMap)\n g = getMap().map\n\n return g", "def geocode(location):\n GeoDict = parse_mapzen_response(fetch_mapzen_response(location))\n GeoDict['query_text'] = location\n return GeoDict", "def geocode(location_dict):\n query = \"https://nominatim.openstreetmap.org/search\"\n\n if location_dict.get(' country_code') != \" \": #ISO 3166-1alpha2 code\n query += \"countrycodes=\" + location_dict.get(' country_code')[1:] + \"&\"\n if location_dict.get(' city_name') != \" \":\n query += \"?city=\" + location_dict.get(' city_name')[1:] + \"&\"\n # if location_dict.get(\" zip_code\") != \"\":\n # query += \"postalcode=\" + location_dict(\" zip_code\")[1:] + \"&\"\n\n else:\n query += \"?q=\" + location_dict.get(\"name\").replace(\n \"-\", \" \") + \"&\" # second try?\n if location_dict.get('street_address') != \" \":\n query += \"?street=\" + location_dict.get('street_address') + \"&\"\n\n return requests.get(query + \"&format=json&limit=1\").json()", "def getLocationInfo():\n try:\n response = requests.get('http://ip-api.com/json')\n return response.json()\n except Exception as e:\n print(\"Could not fetch location details. \\nKindly check your internet connection.\")\n return {}", "def _get_location_details(self, location):\n resp = requests.get(\n self.base_url,\n params = {\n 'address': ''.join(location.split(' ')),\n 'key': GOOGLE_API_KEY,\n }\n )\n return resp.json()", "def get_request(location):\n endpoint = lnpay_py.__ENDPOINT_URL__ + location\n headers = {\n 'X-Api-Key': lnpay_py.__PUBLIC_API_KEY__,\n 'X-LNPay-sdk': lnpay_py.__VERSION__\n }\n\n r = requests.get(url=endpoint, headers=headers)\n return r.json()", "def googlemaps(request):\n assert False\n # return {\"GOOGLEMAPS_API_KEY\": settings.GOOGLEMAPS_API_KEY}", "def fetch(self, radius: int) -> dict:\n # convert radius integer to string\n radius: str = f\"{radius}mi\" \n # set empty dict\n geocodes: dict = {}\n # iterate through instantiated locations list\n # set search parameters to pass to callGoogle method\n for location in self.locations:\n\n params: dict = {\n\n 'address': location,\n 'sensor': 'false',\n 'key': self.__api_key['google_key']\n\n }\n # define key value pairs | city - geocode\n geocodes[location]: str = f\"{callGoogle(endpoint=self.__api_endpoint, params=params)},{radius}\"\n\n return geocodes", "def gmaps_rest(query, mode=\"place\"):\n # https://www.google.com/maps/embed/v1/MODE?key=YOUR_API_KEY&parameters\n base_url = \"https://www.google.com/maps/embed/v1\"\n key = f\"key={_gmaps_key}\"\n q_dict = {\"q\": query, \"zoom\": \"7\"}\n # q = query_encode(urllib.parse.urlencode(q_dict))\n q = urllib.parse.urlencode(q_dict).replace(\"%2C+\", \",\")\n url = f\"{base_url}/{mode}?{key}&{q}\"\n return url", "def build_google_payload(latitude, longitude):\n coordinates = latitude + ',' + longitude\n payload = 'latlng=' + coordinates + \"&language=es&client=\" + GOOGLE_INFO['client'] + \"&signature=\" + GOOGLE_INFO['signature'] + \"=&result_type=route\"\n return payload", "def making_map():\n account = request.form['tweecount']\n number = request.form['num_friends']\n n_map = map(authorization(account, number))\n return n_map", "def fetchGeoData():\n if request.method ==\"POST\":\n result = {}\n if request.get_json():\n post_requests = request.get_json()\n print(post_requests)\n result = db.getmapdata(post_requests['attr']) \n return result", "def traffic_geo(self, **kwargs):\n self.logger.debug(f\"Get basic Geo usage data\")\n url_path = 'traffic/geo'\n body = self._make_body(kwargs)\n return self._common_post(request_path=url_path, body=body)", "def get_weather_data() -> dict:\n # Creating the url for the api call\n api_key = \"96bba64ba34672da132c1a987ad2fee6\"\n lat = 49.24\n long = -123.15\n config = '&units=metric'\n url = f'https://api.openweathermap.org/data/2.5/onecall?lat={lat}&lon={long}&appid={api_key}{config}'\n\n # Querying and JSON parsing\n api_return = requests.get(url)\n weather_data = api_return.json()\n return weather_data", "def get_static_map(start_lng, start_lat, end_lng, end_lat):\r\n geojson_str = get_map_directions(start_lng, start_lat, end_lng, end_lat)\r\n return (\r\n f\"https://api.mapbox.com/styles/v1/mapbox/streets-v11/static/\"\r\n f\"geojson({geojson_str})/auto/640x640?access_token={MAPBOX_TOKEN}\"\r\n ), geojson_str", "def status_location_info_nonfriends():\n\n\n user_id = session[\"user_id\"]\n print \"user id\", session[\"user_id\"]\n result = Update.get_all_updates(user_id)\n print \"result:\", result\n strangers = {\n status.update_id: {\n \"userName\": status.user.first_name,\n \"post\": status.post,\n \"postedAt\": status.time,\n \"postedCounty\": status.posted_county,\n \"postedLat\": status.posted_lat,\n \"postedLng\": status.posted_lng\n }\n for status in Update.get_all_updates(user_id)}\n pprint(strangers)\n\n return jsonify(strangers)", "def json(self):\r\n return {\"id\": self.id, \"code\": self.code, \"description\": self.description, \"xCoor\": self.x_coor, \"yCoor\": self.y_coor, \"latitude\": self.latitude,\r\n \"longitude\": self.longitude, \"waterschapId\": self.waterschap_id, \"watertypeId\": self.watertype_id, \"watertypeKrwId\": self.watertype_krw_id}", "def getLocation(self):\n send_url = 'https://ipinfo.io'\n r = requests.get(send_url)\n resp = json.loads(r.text)\n logging.info(\"GeoLoc: {}\".format(resp))\n return resp", "def buildRequestToDict(self, uID, request, approval):\n result = {}\n result['uID'] = uID\n result['request'] = request\n result['approval'] = approval\n return result", "def generate_url(self):\n if self.has_marker:\n marker_param = f'mlat={self.mlat}&mlng={self.mlng}&'\n else:\n marker_param = ''\n if self.start:\n start_param = 'start=true&'\n else:\n start_param = ''\n url = f'{MapController.MAP_URL}?{start_param}clat={self.clat}&clng={self.clng}&{marker_param}zoom={self.zoom}'\n return url", "def callGoogle(endpoint: str, params: dict) -> str:\n # hit API \n call = requests.get(endpoint, params=params)\n response = call.json()\n # grab first element in payload\n result: dict = response['results'][0]\n # format lat and lng to a string\n return f\"{result['geometry']['location']['lat']},{result['geometry']['location']['lng']}\"" ]
[ "0.54804015", "0.53161436", "0.5291573", "0.5062097", "0.50081235", "0.49757034", "0.49650696", "0.49560982", "0.49435157", "0.49330306", "0.49222854", "0.48264238", "0.4785291", "0.4694317", "0.46646068", "0.4656133", "0.46497852", "0.4645924", "0.46358958", "0.46351212", "0.46132883", "0.45809332", "0.45671955", "0.45591545", "0.45520145", "0.4540025", "0.45317042", "0.4518262", "0.45169958", "0.4494834" ]
0.70129
0
Extract features from points.
def extract_feat(self, points, img_metas=None): voxels, num_points, coors = self.voxelize(points) voxel_features = self.voxel_encoder(voxels, num_points, coors) batch_size = coors[-1, 0].item() + 1 x = self.middle_encoder(voxel_features, coors, batch_size) x = self.backbone(x) if self.with_neck: x = self.neck(x) return x
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def extract_feat(self, points, img, img_metas):\n img_feats = self.extract_img_feat(img, img_metas)\n pts_feats = self.extract_pts_feat(points, img_feats, img_metas)\n return (img_feats, pts_feats)", "def extract_features(input_feature_map, points=conv43Points):\n arr = []\n for y,x in points:\n arr.append(input_feature_map[:,y,x,:])\n return tf.stack(arr, axis=1, name=\"extracted_features\"), len(points)", "def extract_feat(self, points, img_metas):\n voxel_dict = self.voxelize(points)\n voxel_features = self.voxel_encoder(voxel_dict['voxels'],\n voxel_dict['num_points'],\n voxel_dict['coors'])\n batch_size = voxel_dict['coors'][-1, 0].item() + 1\n feats_dict = self.middle_encoder(voxel_features, voxel_dict['coors'],\n batch_size)\n x = self.backbone(feats_dict['spatial_features'])\n if self.with_neck:\n neck_feats = self.neck(x)\n feats_dict.update({'neck_feats': neck_feats})\n return feats_dict, voxel_dict", "def extractFeatures(self, datum):\n abstract", "def extract_pts_feat(self, pts: Tensor) -> Tuple[Tensor]:\n x = self.pts_backbone(pts)\n if self.with_pts_neck:\n x = self.pts_neck(x)\n\n seed_points = x['fp_xyz'][-1]\n seed_features = x['fp_features'][-1]\n seed_indices = x['fp_indices'][-1]\n\n return (seed_points, seed_features, seed_indices)", "def extract_features(self, inputs):\n pass", "def extract_feats(self, points, img_metas, imgs=None):\n if imgs is None:\n imgs = [None] * len(img_metas)\n img_feats, pts_feats = multi_apply(self.extract_feat, points, imgs,\n img_metas)\n return img_feats, pts_feats", "def extract_pts_feat(self, pts, img_feats, img_metas):\n if not self.with_pts_bbox:\n return None\n voxels, num_points, coors = self.voxelize(pts)\n voxel_features = self.pts_voxel_encoder(voxels, num_points, coors,\n img_feats, img_metas)\n batch_size = coors[-1, 0] + 1\n x = self.pts_middle_encoder(voxel_features, coors, batch_size)\n x = self.pts_backbone(x)\n if self.with_pts_neck:\n x = self.pts_neck(x)\n return x", "def set_to_features(X_set):\n ext = Extractor()\n features = []\n for i in range(len(X_set)):\n print(i, \" out of \", len(X_set))\n bag_of_features = [ext.extract(X_set[i][j]) for j in range(len(X_set[i]))]\n\n features.append(bag_of_features)\n\n return features", "def extract_features(self, inputs):\n x = self.conv1(inputs)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = self.avgpool(x)\n return x", "def extract_features(self):\n self.extract_features_static()\n self.extract_features_dynamic()", "def find_features(pyr):\n feature_pnts = spread_out_corners(pyr[0], SPREAD_N, SPREAD_M ,SPREAD_CORNERS_RADIUS)\n descriptors = sample_descriptor(pyr[2], feature_pnts, SAMPLE_RAD)\n return feature_pnts, descriptors", "def get_features_fast(self, frame_gray):\n keypoints = self.fast.detect(frame_gray, None)\n\n return np.float32(\n [kp.pt for kp in keypoints]\n ).reshape(-1, 1, 2)", "def reach_points_as_features(self) -> List[Feature]:\n return [pt.as_feature for pt in self._reach_points]", "def load_pts_features(path):\n\n #\n # Your code here\n #\n\n pts = [np.empty((123, 2)), np.empty((123, 2))]\n feats = [np.empty((123, 128)), np.empty((123, 128))]\n\n return pts, feats", "def extract_features(self, *args, **kwargs):\n return self(*args, **kwargs)", "def getFeatures(self,layer): \n numFeatures = layer.GetFeatureCount()\n features = []\n for i in range(numFeatures):\n feature = layer.GetNextFeature()\n if feature is not None:\n geomRef = feature.GetGeometryRef()\n if((geomRef is not None and geomRef.GetPointCount() != 0)):\n features.append(self.getFeatureInfo(feature))\n return features", "def extract_features(x, cnn):\n features = []\n prev_feat = x\n for i, layer in enumerate(cnn.net.layers[:-2]):\n next_feat = layer(prev_feat)\n features.append(next_feat)\n prev_feat = next_feat\n return features", "def get_features(fincoords, vector, orientations):\n\n\tfeatures = []\n\tfeature_obj = []\n\tfor i,point in enumerate(fincoords):\n\t\tfeature = FeatureBase(np.array([point[0], point[1]]), point[2], orientations)\n\t\tvec = feature.make_feature_vector(vector[i])\n\t\tfeatures.append(vec)\n\t\tfeature_obj.append(feature)\n\n\treturn features, feature_obj", "def extract_features(self, images: List[np.ndarray]) -> List[np.ndarray]:\n pass", "def _extract_features(self):\n # print(os.getpid())\n return {n:self._extract_feature(f) for (n,f) in self.features.items()}", "def find_features(pyr):\n\n feature_lst = spread_out_corners(pyr[0], 3, 3, 3)\n descriptors = sample_descriptor(pyr[2],feature_lst/4,3)\n return [feature_lst,descriptors]", "def extract_features(self, inputs):\n x = self.conv1(inputs)\n x = self.maxpool1(x)\n x = self.conv2(x)\n x = self.conv3(x)\n x = self.maxpool2(x)\n\n x = self.inception3a(x)\n x = self.inception3b(x)\n x = self.maxpool3(x)\n x = self.inception4a(x)\n\n x = self.inception4b(x)\n x = self.inception4c(x)\n x = self.inception4d(x)\n\n x = self.inception4e(x)\n x = self.maxpool4(x)\n x = self.inception5a(x)\n x = self.inception5b(x)\n x = self.avgpool(x)\n x = torch.flatten(x,1)\n x = self.dropout(x)\n return x", "def point_sample_fine_grained_features(features_list, feature_scales, boxes, point_coords):\n cat_boxes = Boxes.cat(boxes)\n num_boxes = [len(b) for b in boxes]\n\n point_coords_wrt_image = get_point_coords_wrt_image(cat_boxes.tensor, point_coords)\n split_point_coords_wrt_image = torch.split(point_coords_wrt_image, num_boxes)\n\n point_features = []\n for idx_img, point_coords_wrt_image_per_image in enumerate(split_point_coords_wrt_image):\n point_features_per_image = []\n for idx_feature, feature_map in enumerate(features_list):\n h, w = feature_map.shape[-2:]\n scale = torch.tensor([w, h], device=feature_map.device) / feature_scales[idx_feature]\n point_coords_scaled = point_coords_wrt_image_per_image / scale\n point_features_per_image.append(\n point_sample(\n feature_map[idx_img].unsqueeze(0),\n point_coords_scaled.unsqueeze(0),\n align_corners=False,\n )\n .squeeze(0)\n .transpose(1, 0)\n )\n point_features.append(cat(point_features_per_image, dim=1))\n\n return cat(point_features, dim=0), point_coords_wrt_image", "def read_features(self):\r\n def unpack_keypoint(data):\r\n try:\r\n kpts = data['keypoints']\r\n desc = data['descriptors']\r\n keypoints = [cv.KeyPoint(x, y, _size, _angle, _response, int(_octave), int(_class_id))\r\n for x, y, _size, _angle, _response, _octave, _class_id in list(kpts)]\r\n return keypoints, np.array(desc)\r\n except(IndexError):\r\n return np.array([]), np.array([])\r\n try:\r\n data = np.load(self.features_path + self.id + \".npz\")\r\n self.keypoints, self.descriptors = unpack_keypoint(data)\r\n logging.info(f\"Existing features for {self.name} found in features directory.\")\r\n except FileNotFoundError:\r\n logging.info(f\"Features for {self.name} not found in {self.features_path}.\")", "def get_conv_features(self, X):\n convfeatures = blah\n return convfeatures", "def extract_features(self, src_tokens, **kwargs):\n return self.decoder.extract_features(src_tokens, **kwargs)", "def features_keypoints(image, keypoints, window_size):\n kps = [cv2.KeyPoint(x, y, window_size) for x, y in keypoints]\n img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n descriptor = cv2.xfeatures2d.SIFT_create()\n _, features = descriptor.compute(img, kps)\n return features", "def extract_feat(self, img):\n x = self.backbone(img)\n y = self.backbone_gan(img)\n if self.with_feature_selection:\n x, y = self.feature_selection(x, y)\n if self.with_neck:\n x = self.neck(x)\n return x, y", "def get_features(self):\n if self.strokes is False:\n print('Isolating strokes')\n self.isolate_strokes()\n # List of features to use (sm1 omitted because always nan)\n feature_names = ('zrc', 'centroid',\n 'cm0', 'cm1', 'cm2', 'cm3', 'cm4',\n 'sm0', 'sm2')\n features_list = []\n for istroke in self.strokes:\n if not self.isGoodFrame(istroke):\n continue\n ifeature_dic = self.extract_features_from_frame(istroke)\n ifeature_list = []\n for ifeature in feature_names:\n ifeature_list.append(ifeature_dic[ifeature])\n features_list.append(ifeature_list)\n return {'feature_names': feature_names,\n 'feature_table': np.array(features_list)}" ]
[ "0.76258445", "0.75479424", "0.7520793", "0.7304446", "0.72170246", "0.7079929", "0.68441653", "0.6794022", "0.6782572", "0.6651835", "0.6640643", "0.66359365", "0.65395874", "0.6499239", "0.6448742", "0.6447992", "0.642975", "0.6423805", "0.63974905", "0.6388154", "0.63501143", "0.63486075", "0.6322734", "0.63058335", "0.6304647", "0.62674403", "0.6255512", "0.6227196", "0.62032735", "0.6177933" ]
0.7565338
1
Apply hard voxelization to points.
def voxelize(self, points): voxels, coors, num_points = [], [], [] for res in points: res_voxels, res_coors, res_num_points = self.voxel_layer(res) voxels.append(res_voxels) coors.append(res_coors) num_points.append(res_num_points) voxels = torch.cat(voxels, dim=0) num_points = torch.cat(num_points, dim=0) coors_batch = [] for i, coor in enumerate(coors): coor_pad = F.pad(coor, (1, 0), mode='constant', value=i) coors_batch.append(coor_pad) coors_batch = torch.cat(coors_batch, dim=0) return voxels, num_points, coors_batch
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def voxelize(self, points):\n voxels, coors, num_points = [], [], []\n for res in points:\n res_voxels, res_coors, res_num_points = self.pts_voxel_layer(res)\n voxels.append(res_voxels)\n coors.append(res_coors)\n num_points.append(res_num_points)\n voxels = torch.cat(voxels, dim=0)\n num_points = torch.cat(num_points, dim=0)\n coors_batch = []\n for i, coor in enumerate(coors):\n coor_pad = F.pad(coor, (1, 0), mode='constant', value=i)\n coors_batch.append(coor_pad)\n coors_batch = torch.cat(coors_batch, dim=0)\n return voxels, num_points, coors_batch", "def V(self, point = -1):\n return self.solution('V', point)", "def voxelize(self, points):\n voxels, coors, num_points, voxel_centers = [], [], [], []\n for res in points:\n res_voxels, res_coors, res_num_points = self.voxel_layer(res)\n res_voxel_centers = (\n res_coors[:, [2, 1, 0]] + 0.5) * res_voxels.new_tensor(\n self.voxel_layer.voxel_size) + res_voxels.new_tensor(\n self.voxel_layer.point_cloud_range[0:3])\n voxels.append(res_voxels)\n coors.append(res_coors)\n num_points.append(res_num_points)\n voxel_centers.append(res_voxel_centers)\n\n voxels = torch.cat(voxels, dim=0)\n num_points = torch.cat(num_points, dim=0)\n voxel_centers = torch.cat(voxel_centers, dim=0)\n coors_batch = []\n for i, coor in enumerate(coors):\n coor_pad = F.pad(coor, (1, 0), mode='constant', value=i)\n coors_batch.append(coor_pad)\n coors_batch = torch.cat(coors_batch, dim=0)\n\n voxel_dict = dict(\n voxels=voxels,\n num_points=num_points,\n coors=coors_batch,\n voxel_centers=voxel_centers)\n return voxel_dict", "def voxelize(points,leaf = 0.1):\n if (type(points) == pclpy.pcl.PointCloud.PointXYZRGB):\n cloud = points\n voxel_filter = pclpy.pcl.filters.VoxelGrid.PointXYZRGB()\n filtered_pointcloud = pclpy.pcl.PointCloud.PointXYZRGB()\n else:\n cloud = pclpy.pcl.PointCloud.PointXYZ(points)\n voxel_filter = pclpy.pcl.filters.VoxelGrid.PointXYZ()\n filtered_pointcloud = pclpy.pcl.PointCloud.PointXYZ()\n \n voxel_filter.setLeafSize(leaf,leaf,leaf)\n voxel_filter.setInputCloud(cloud)\n \n voxel_filter.filter(filtered_pointcloud)\n if type(points) == pclpy.pcl.PointCloud.PointXYZRGB:\n return filtered_pointcloud\n else:\n return filtered_pointcloud.xyz", "def evaluate(self, points):\n points = np.array(points, np.float64)\n output_shape = points.shape[1:]\n points.shape = (points.shape[0], seq_prod(output_shape))\n cmapi = self.image.coordmap.inverse()\n voxels = cmapi(points.T).T\n V = map_coordinates(self.data,\n voxels,\n order=self.order,\n mode=self.mode,\n cval=self.cval,\n prefilter=False)\n # ndimage.map_coordinates returns a flat array,\n # it needs to be reshaped to the original shape\n V.shape = output_shape\n return V", "def distort_vxs(self):\n self.dvxs = self.vxs.copy()\n self.dvxs[:, 0] += self.perlin()\n self.dvxs[:, 1] += self.perlin()", "def get_feature_vector(self, mode=\"binary\"):\n voxel_n = np.ravel_multi_index([self.voxel_x, self.voxel_y, self.voxel_z], self.x_y_z)\n if mode == \"binary\":\n vector = np.zeros(self.n_voxels)\n vector[np.unique(voxel_n)] = 1\n vector = vector.reshape(self.x_y_z)\n return vector\n\n elif mode == \"binary_with_nopoints\":\n vector = np.zeros(self.n_voxels)\n vector[np.unique(voxel_n)] = 1\n vector = vector.reshape(self.x_y_z)\n tot_bounds = abs(self.bounds[0]) + abs(self.bounds[1])\n # TODO can be parallelised\n non_points = []\n for point in self.points_inside_bounds:\n start, end = get_points_from_bounds(self.bounds[0], self.bounds[1], self.origin, point)\n start_projected_voxelgrid = (start - self.bounds[0])\n end_projected_voxelgrid = (end - self.bounds[0])\n\n assert np.all(start_projected_voxelgrid + PRECISION >= 0), 'Start / end point for nopoints calculation out of bounds: {} / {}'.format(start_projected_voxelgrid + PRECISION, tot_bounds)\n assert np.all(end_projected_voxelgrid + PRECISION >= 0), 'Start / end point for nopoints calculation out of bounds: {} / {}'.format(end_projected_voxelgrid + PRECISION, tot_bounds)\n assert np.all(start_projected_voxelgrid - PRECISION <= tot_bounds), 'Start / end point for nopoints calculation out of bounds: {} / {}'.format(start_projected_voxelgrid, tot_bounds)\n assert np.all(end_projected_voxelgrid - PRECISION <= tot_bounds), 'Start / end point for nopoints calculation out of bounds: {} / {}'.format(end_projected_voxelgrid, tot_bounds)\n\n start_projected_voxelgrid = np.clip(start_projected_voxelgrid, 0, tot_bounds - PRECISION)\n end_projected_voxelgrid = np.clip(end_projected_voxelgrid, 0, tot_bounds - PRECISION)\n\n new_non_points = list(supercover_line(start_projected_voxelgrid, end_projected_voxelgrid, self.sizes))\n non_points.extend(new_non_points)\n # if not np.all(np.array(new_non_points) >= 0) or not np.all(np.array(new_non_points).max(axis=0) < vector.shape):\n # print('Non-point detected with indices under 0 or over size')\n # print('start = {}'.format(start_projected_voxelgrid))\n # print('end = {}'.format(end_projected_voxelgrid))\n # print('Max Size: {}'.format(vector.shape))\n # print('Wrong points:')\n # print(np.array(new_non_points))\n # raise Exception()\n\n # convert only cells that are 0 to -1, NOT 1 to -1\n non_points = np.unique(np.array(non_points), axis=0).astype(int)\n\n temp = vector[non_points[:, 0], non_points[:, 1], non_points[:, 2]]\n temp[temp == 0] = -1\n vector[non_points[:, 0], non_points[:, 1], non_points[:, 2]] = temp\n return vector\n elif mode == \"density\":\n vector = np.zeros(self.n_voxels)\n count = np.bincount(voxel_n)\n vector[:len(count)] = count\n vector /= len(voxel_n)\n vector = vector.reshape(self.x_y_z)\n return vector\n # elif mode == \"TDF\":\n # vector = np.zeros(self.n_voxels)\n # # truncation = np.linalg.norm(self.shape)\n # kdt = cKDTree(self.points_inside_bounds)\n # vector, i = kdt.query(self.voxel_centers, n_jobs=-1)\n # vector = vector.reshape(self.x_y_z)\n # return vector\n elif mode.endswith(\"_max\"):\n vector = np.zeros(self.n_voxels)\n if not is_numba_avaliable:\n raise ImportError(\"numba is required to compute {}\".format(mode))\n axis = {\"x_max\": 0, \"y_max\": 1, \"z_max\": 2}\n vector = groupby_max(self.points_inside_bounds, voxel_n, axis[mode], vector)\n vector = vector.reshape(self.x_y_z)\n return vector\n elif mode.endswith(\"_mean\"):\n vector = np.zeros(self.n_voxels)\n if not is_numba_avaliable:\n raise ImportError(\"numba is required to compute {}\".format(mode))\n axis = {\"x_mean\": 0, \"y_mean\": 1, \"z_mean\": 2}\n voxel_sum = groupby_sum(self.points_inside_bounds, voxel_n, axis[mode], np.zeros(self.n_voxels))\n voxel_count = groupby_count(self.points_inside_bounds, voxel_n, np.zeros(self.n_voxels))\n vector = np.nan_to_num(voxel_sum / voxel_count)\n vector = vector.reshape(self.x_y_z)\n return vector\n\n else:\n raise NotImplementedError(\"{} is not a supported feature vector mode\".format(mode))", "def modifyPoint(self, *args, **kwargs):\n ...", "def _force_rescale(self, setpoint_x, setpoint_y):", "def lift(point):\n return gs.copy(point)", "def test_set_vx_to_vx_plus_vy(self, cpu):\n for x in range(0x0, 0xF):\n for y in range(0x0, 0xF):\n if x != y:\n cpu.opcode = 0x8004 | (x << 8) | (y << 4)\n for v1 in range(0x0, 0xFF):\n for v2 in range(0x0, 0xFF):\n cpu.V_register[x] = v1\n cpu.V_register[y] = v2\n cpu.set_vx_to_vx_plus_vy()\n value = v1 + v2\n if value > 0xFF:\n assert(cpu.V_register[0xF] == 1)\n assert(cpu.V_register[x] == value & 0xFF)\n else:\n assert(cpu.V_register[0xF] == 0)\n assert(cpu.V_register[x] == value)", "def fvp(self, xs, gs, **kwargs):\n raise NotImplementedError", "def apply(self, point):\r\n return self*point", "def __call__(self, *points):\n if points != ():\n if type(points[0]) == list:\n points = points[0]\n if len(points) == 0:\n raise Exception(\"A vector can only be applied to a point or a list of points.\")\n elif len(points) == 1:\n return points[0] + self\n else:\n return [point + self for point in points]", "def apply_pt(self, x_pt, y_pt):\n return ( self.matrix[0][0]*x_pt + self.matrix[0][1]*y_pt + self.vector[0],\n self.matrix[1][0]*x_pt + self.matrix[1][1]*y_pt + self.vector[1] )", "def f() -> None:\n x = torch.rand(7, 3).to(self.device)\n kernel = ScaleKernel(MaternKernel())\n allocator = GreedyVarianceReduction()\n allocator.allocate_inducing_points(x, kernel, 4, x.shape[:-2])", "def test_set_vx_to_vx_minus_vy(self, cpu):\n for x in range(0x0, 0xF):\n for y in range(0x0, 0xF):\n if x != y:\n cpu.opcode = 0x8005 | (x << 8) | (y << 4)\n for v1 in range(0x0, 0xFF):\n for v2 in range(0x0, 0xFF):\n cpu.V_register[x] = v1\n cpu.V_register[y] = v2\n cpu.set_vx_to_vx_minus_vy()\n value = v1 - v2\n if value > 0:\n assert(cpu.V_register[0xF] == 1)\n else:\n assert(cpu.V_register[0xF] == 0)\n if value >= 0:\n assert(cpu.V_register[x] == value)\n else:\n assert(cpu.V_register[x] == 0x100 + value)", "def LD_Vx_Vy(self, x, y):\n\t\tself.V[x] = self.V[y]", "def test_set_vx_to_vx_or_vy(self, cpu):\n for x in range(0x0, 0xF):\n for y in range(0x0, 0xF):\n if x != y:\n cpu.opcode = 0x8003 | (x << 8) | (y << 4)\n for v1 in range(0x0, 0xFF):\n for v2 in range(0x0, 0xFF):\n cpu.V_register[x] = v1\n cpu.V_register[y] = v2\n cpu.set_vx_to_vx_or_vy()\n assert(cpu.V_register[x] == v1 | v2)", "def test_set_vx_to_vy_minus_vx(self, cpu):\n for x in range(0x0, 0xF):\n for y in range(0x0, 0xF):\n if x != y:\n cpu.opcode = 0x8007 | (x << 8) | (y << 4)\n for v1 in range(0x0, 0xFF):\n for v2 in range(0x0, 0xFF):\n cpu.V_register[x] = v1\n cpu.V_register[y] = v2\n cpu.set_vx_to_vy_minus_vx()\n value = v2 - v1\n if value > 0:\n assert(cpu.V_register[0xF] == 1)\n else:\n assert(cpu.V_register[0xF] == 0)\n if value >= 0:\n assert(cpu.V_register[x] == value)\n else:\n assert(cpu.V_register[x] == 0x100 + value)", "def _updateProjectedPts(self):\n for pointSetName in self.pointSets:\n pointSet = self.pointSets[pointSetName]\n proj_pts = self._evaluatePoints(\n pointSet.u,\n pointSet.v,\n pointSet.t,\n pointSet.uvlimits0,\n pointSet.tlimits0,\n pointSet.bodyID,\n pointSet.faceID,\n pointSet.edgeID,\n pointSet.nPts,\n )\n pointSet.proj_pts = proj_pts", "def applyToPoint(self, point):\n return self + point", "def compute_V(self, x):\n assert x.ndim == 3\n return self.potential_net(x.reshape(x.size(0), -1))", "def set_vx_to_vx_or_vy(self, cpu):\n for x in range(0x0, 0xF):\n for y in range(0x0, 0xF):\n if x != y:\n cpu.opcode = 0x8001 | (x << 8) | (y << 4)\n for v1 in range(0x0, 0xFF):\n for v2 in range(0x0, 0xFF):\n cpu.V_register[x] = v1\n cpu.V_register[y] = v2\n cpu.set_vx_to_vx_or_vy()\n assert(cpu.V_register[x] == v1 | v2)", "def apply(self, points):\n pshape = numpy.shape(points)\n homogeneous = 1\n if len(pshape) == 1:\n if pshape[0] == 3:\n points = numpy.array(numpy.concatenate((points, numpy.ones(1, 'f')), 1))\n homogeneous = 0\n elif len(pshape) == 2:\n if pshape[1] == 3:\n points = numpy.array(numpy.concatenate(\n (numpy.array(points), numpy.ones((pshape[0], 1), 'f')), 1))\n homogeneous = 0\n mtx = self.getMatrix((4, 4), transpose=1)\n newpoints = numpy.dot(points, mtx)\n if homogeneous:\n return newpoints\n else:\n # strip the final one off the coordinates\n if len(pshape) == 1:\n return newpoints[:3]\n else:\n newpoints = [x[:3] for x in newpoints]\n return newpoints", "def apply(self, point):\n m = numpy.dot(self.matrix, numpy.array([point[0], point[1], 1.0]).transpose())\n return pygame.Vector2(m[0], m[1])", "def V_vect(self, points):\n return self.A_conf*norm(points)*self.isOutside(points)", "def test_pointnum2():\n shape = paramak.CapsuleVacuumVessel(outer_start_point=(100, -100), radius=400, thickness=25)\n assert len(shape.points) == 12\n assert len(shape.processed_points) == 13", "def apply(self, f):\n for v in self.vertices:\n v.x, v.y, v.z = f(v.coords())", "def test_set_vx_to_vy(self, cpu):\n for x in range(0x0, 0xF):\n for y in range(0x0, 0xF):\n if x != y:\n cpu.opcode = 0x8000 | (x << 8) | (y << 4)\n for v in range(0x0, 0xFF):\n cpu.V_register[y] = v\n cpu.set_vx_to_vy()\n assert(cpu.V_register[x] == v)" ]
[ "0.6439414", "0.6234096", "0.61848783", "0.61010504", "0.6074771", "0.58545244", "0.5738699", "0.5695123", "0.56734055", "0.5653354", "0.5604796", "0.56036615", "0.55895144", "0.557377", "0.55472815", "0.5544818", "0.55416125", "0.54999644", "0.54610586", "0.54565", "0.54349375", "0.5434413", "0.5432061", "0.54310894", "0.54175425", "0.5401518", "0.5397699", "0.5371708", "0.5365173", "0.536468" ]
0.6423426
1
Load the grid data from the sample earth_relief file.
def fixture_grid(): return load_earth_relief(registration="pixel")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fixture_grid():\n return load_earth_relief(registration=\"gridline\")", "def load_train_grid40(return_imsize=True):\n file_base_path = os.path.join(rlvision.RLVISION_DATA,\n \"train\", \"gridworld_40\", \"gridworld_40\")\n\n if not os.path.isfile(file_base_path+\".h5\"):\n raise ValueError(\"The dataset %s is not existed!\" %\n (file_base_path+\".h5\"))\n\n db = h5py.File(file_base_path+\".h5\", mode=\"r\")\n\n # load dataset\n data = None\n value = None\n start_tot = []\n traj_tot = []\n goal_tot = []\n for split in xrange(1, 6):\n if data is None:\n data = db[\"grid_data_split_\"+str(split)]['data']\n else:\n data = np.vstack((data,\n db[\"grid_data_split_\"+str(split)]['data']))\n if value is None:\n value = db[\"grid_data_split_\"+str(split)]['value']\n else:\n value = np.vstack((value,\n db[\"grid_data_split_\"+str(split)]['value']))\n\n with open(file_base_path+\"_start_%i.pkl\" % (split), \"r\") as f:\n start_pos_list = pickle.load(f)\n f.close()\n start_tot += start_pos_list\n\n with open(file_base_path+\"_traj_%i.pkl\" % (split), \"r\") as f:\n traj_list = pickle.load(f)\n f.close()\n traj_tot += traj_list\n\n with open(file_base_path+\"_goal_%i.pkl\" % (split), \"r\") as f:\n goal_list = pickle.load(f)\n f.close()\n goal_tot += goal_list\n\n if return_imsize:\n return data, value, start_tot, traj_tot, goal_tot, (40, 40)\n else:\n return data, value, start_tot, traj_tot, goal_tot", "def load_train_grid28(return_imsize=True):\n file_base_path = os.path.join(rlvision.RLVISION_DATA,\n \"train\", \"gridworld_28\", \"gridworld_28\")\n\n if not os.path.isfile(file_base_path+\".h5\"):\n raise ValueError(\"The dataset %s is not existed!\" %\n (file_base_path+\".h5\"))\n\n db = h5py.File(file_base_path+\".h5\", mode=\"r\")\n\n # load dataset\n data = None\n value = None\n start_tot = []\n traj_tot = []\n goal_tot = []\n for split in xrange(1, 6):\n if data is None:\n data = db[\"grid_data_split_\"+str(split)]['data']\n else:\n data = np.vstack((data,\n db[\"grid_data_split_\"+str(split)]['data']))\n if value is None:\n value = db[\"grid_data_split_\"+str(split)]['value']\n else:\n value = np.vstack((value,\n db[\"grid_data_split_\"+str(split)]['value']))\n\n with open(file_base_path+\"_start_%i.pkl\" % (split), \"r\") as f:\n start_pos_list = pickle.load(f)\n f.close()\n start_tot += start_pos_list\n\n with open(file_base_path+\"_traj_%i.pkl\" % (split), \"r\") as f:\n traj_list = pickle.load(f)\n f.close()\n traj_tot += traj_list\n\n with open(file_base_path+\"_goal_%i.pkl\" % (split), \"r\") as f:\n goal_list = pickle.load(f)\n f.close()\n goal_tot += goal_list\n\n if return_imsize:\n return data, value, start_tot, traj_tot, goal_tot, (28, 28)\n else:\n return data, value, start_tot, traj_tot, goal_tot", "def load_train_grid8(return_imsize=True):\n file_base_path = os.path.join(rlvision.RLVISION_DATA,\n \"train\", \"gridworld_8\", \"gridworld_8\")\n\n # load dataset\n if not os.path.isfile(file_base_path+\".h5\"):\n raise ValueError(\"The dataset %s is not existed!\" %\n (file_base_path+\".h5\"))\n\n db = h5py.File(file_base_path+\".h5\", mode=\"r\")\n\n with open(file_base_path+\"_start.pkl\", \"r\") as f:\n start_pos_list = pickle.load(f)\n f.close()\n\n with open(file_base_path+\"_traj.pkl\", \"r\") as f:\n traj_list = pickle.load(f)\n f.close()\n\n with open(file_base_path+\"_goal.pkl\", \"r\") as f:\n goal_list = pickle.load(f)\n f.close()\n\n if return_imsize is True:\n return (db['data'], db['value'], start_pos_list, traj_list,\n goal_list, (8, 8))\n else:\n return (db['data'], db['value'], start_pos_list, traj_list,\n goal_list)", "def ReadGrid(self, grdfile):\n nc = Dataset(grdfile,'r')\n \n self.xv = nc.variables['xv'][:]\n self.yv = nc.variables['yv'][:]\n self.xp = nc.variables['xp'][:]\n self.yp = nc.variables['yp'][:]\n self.xe = nc.variables['xe'][:]\n self.ye = nc.variables['ye'][:]\n self.dz = nc.variables['dz'][:] \n self.dv = nc.variables['dv'][:]\n self.Ac = nc.variables['Ac'][:]\n self.Nk = nc.variables['Nk'][:]\n self.face = nc.variables['face'][:]\n self.mark = nc.variables['mark'][:]\n\tself.cells = nc.variables['cells'][:]\n \n self.Nc = len(self.xv)\n self.Np = len(self.xp)\n self.Ne = len(self.xe)\n self.Nk = len(self.dz)\n self.numsides = self.face.shape[1]", "def load_target_grid(self):\n\n # load the target grid name (expected to be in the settings.txt file)\n self.grid_name = (self.st['directory_metadata'][0] +\n self.st[\"target_grid\"][0])\n\n if os.path.exists(self.grid_name):\n\n # open the metadata file\n self.file = netCDF4.Dataset(self.grid_name)\n\n # laod lat/lon\n self.lat = self.file.variables[\"latitude\"][:, :]\n self.lon = self.file.variables[\"longitude\"][:, :]\n\n try:\n\n # Atributos globais para serem lidos no thredds\n self.GRIDTYPE = getattr(self.file, \"GRIDTYPE\")\n self.MAP_PROJ = getattr(self.file, \"MAP_PROJ\")\n self.CEN_LON = getattr(self.file, \"CEN_LON\")\n self.MAP_PROJ_CHAR = getattr(self.file, \"MAP_PROJ_CHAR\")\n self.STAND_LON = getattr(self.file, \"STAND_LON\")\n self.TRUELAT1 = getattr(self.file, \"TRUELAT1\")\n self.TRUELAT2 = getattr(self.file, \"TRUELAT2\")\n self.CEN_LAT = getattr(self.file, \"CEN_LAT\")\n self.DX = getattr(self.file, \"DX\")\n self.DY = getattr(self.file, \"DY\")\n self.MOAD_CEN_LAT = getattr(self.file, \"MOAD_CEN_LAT\")\n\n except ValueError:\n pass\n\n # Close the file\n self.file.close()\n\n else:\n\n l1 = \"WARNING\"\n l2 = \"Target Grid: %s not found\" % self.grid_name\n l3 = \"Can't proceed\"\n l4 = \"Shutting down the program\"\n print(\"\")\n print(int(max([len(l1), len(l2), len(l3), len(l4)]) / 2 -\n len(l1) / 2) * \" \" + l1)\n print(l2)\n print(l3)\n print(l4)\n print(\"\")\n sys.exit()", "def load_train_grid16(return_imsize=True):\n file_base_path = os.path.join(rlvision.RLVISION_DATA,\n \"train\", \"gridworld_16\", \"gridworld_16\")\n\n if not os.path.isfile(file_base_path+\".h5\"):\n raise ValueError(\"The dataset %s is not existed!\" %\n (file_base_path+\".h5\"))\n\n db = h5py.File(file_base_path+\".h5\", mode=\"r\")\n\n # load dataset\n data = None\n value = None\n start_tot = []\n traj_tot = []\n goal_tot = []\n for split in xrange(1, 6):\n if data is None:\n data = db[\"grid_data_split_\"+str(split)]['data']\n else:\n data = np.vstack((data,\n db[\"grid_data_split_\"+str(split)]['data']))\n if value is None:\n value = db[\"grid_data_split_\"+str(split)]['value']\n else:\n value = np.vstack((value,\n db[\"grid_data_split_\"+str(split)]['value']))\n\n with open(file_base_path+\"_start_%i.pkl\" % (split), \"r\") as f:\n start_pos_list = pickle.load(f)\n f.close()\n start_tot += start_pos_list\n\n with open(file_base_path+\"_traj_%i.pkl\" % (split), \"r\") as f:\n traj_list = pickle.load(f)\n f.close()\n traj_tot += traj_list\n\n with open(file_base_path+\"_goal_%i.pkl\" % (split), \"r\") as f:\n goal_list = pickle.load(f)\n f.close()\n goal_tot += goal_list\n\n if return_imsize:\n return data, value, start_tot, traj_tot, goal_tot, (16, 16)\n else:\n return data, value, start_tot, traj_tot, goal_tot", "def load_grd(filename):\n with open(filename, 'r') as f:\n meta = {}\n meta['header'] = []\n meta['header'].append(f.readline().rstrip('\\n'))\n while meta['header'][-1] != '++++':\n meta['header'].append(f.readline().rstrip('\\n'))\n # These determine the type of grid and the field format.\n meta['KTYPE'] = int(f.readline().split()[0])\n if meta['KTYPE'] != 1:\n raise ValueError(\"Not implemented.\")\n meta['NSET'], meta['ICOMP'], meta['NCOMP'], meta['IGRID'] = [int(s) for s in f.readline().split()]\n # The grid center in units of the x and y grid spacing.\n meta['IX'], meta['IY'] = [int(s) for s in f.readline().split()]\n # These are the x and y grid limits: S is lower, and E is upper.\n meta['XS'], meta['YS'], meta['XE'], meta['YE'] = [float(s) for s in f.readline().split()]\n # These are the numbers of grid points in x and y.\n meta['NX'], meta['NY'], meta['KLIMIT'] = [int(s) for s in f.readline().split()]\n # Implement this to read elliptically truncated grids.\n if meta['KLIMIT'] != 0:\n raise ValueError(\"Not implemented.\")\n # Load the field data. This returns an array with shape (NX * NY, 2 * NCOMP).\n conv = dict([(column, string_to_float) for column in range(2 * meta['NCOMP'])])\n data = np.loadtxt(f, dtype=float, converters=conv)\n # Determine the grid spacing and center values.\n meta['DX'] = (meta['XE'] - meta['XS']) / (meta['NX'] - 1)\n meta['DY'] = (meta['YE'] - meta['YS']) / (meta['NY'] - 1)\n meta['XCEN'] = meta['DX'] * meta['IX']\n meta['YCEN'] = meta['DY'] * meta['IY']\n # Reshape the data.\n map = np.empty((meta['NX'], meta['NY'], meta['NCOMP']),\n dtype=np.complex)\n for component in range(meta['NCOMP']):\n column = data[:, 2 * component] + 1j * data[:, 2 * component + 1]\n map[:, :, component] = column.reshape(meta['NX'], meta['NY'], order='F')\n return meta, map", "def _load_grdfile(casename=None):\n \n data={} \n\n if casename==None:\n print('_load_grdfile requires a filename to load.')\n return\n try:\n fp=open(casename+'_grd.dat','r')\n except IOError:\n print('_load_grdfiles: invalid case name.')\n return data\n\n nodes_str=fp.readline().split('=')\n elements_str=fp.readline().split('=')\n nnodes=int(nodes_str[1])\n nele=int(elements_str[1])\n t_data1=np.genfromtxt(casename+'_grd.dat',skip_header=2, skip_footer=nnodes,dtype='int64')\n t_data2=np.genfromtxt(casename+'_grd.dat',skip_header=2+nele,dtype='float64')\n fp.close()\n\n data['nnodes']=nnodes\n data['nele']=nele\n data['nodexy']=t_data2[:,1:3]\n data['x']=t_data2[:,1]\n data['y']=t_data2[:,2]\n data['nv']=t_data1[:,1:4].astype(int)-1\n data['trigridxy'] = mplt.Triangulation(data['x'], data['y'],data['nv'])\n \n return data", "def _load_grid(self):\n\n grid_metrics = ['nbe', 'ntsn', 'nbsn', 'ntve', 'nbve', 'art1', 'art2', 'a1u', 'a2u']\n grid_variables = ['lon', 'lat', 'x', 'y', 'lonc', 'latc', 'xc', 'yc',\n 'h', 'siglay', 'siglev']\n\n # Get the grid data.\n for grid in grid_variables:\n try:\n setattr(self.grid, grid, self.ds.variables[grid][:])\n # Save the attributes.\n attributes = type('attributes', (object,), {})()\n for attribute in self.ds.variables[grid].ncattrs():\n setattr(attributes, attribute, getattr(self.ds.variables[grid], attribute))\n setattr(self.atts, grid, attributes)\n except KeyError:\n # Make zeros for this missing variable so we can convert from the non-missing data below.\n if grid.endswith('c'):\n setattr(self.grid, grid, np.zeros(self.dims.nele).T)\n else:\n setattr(self.grid, grid, np.zeros(self.dims.node).T)\n except ValueError as value_error_message:\n warn('Variable {} has a problem with the data. Setting value as all zeros.'.format(grid))\n print(value_error_message)\n setattr(self.grid, grid, np.zeros(self.ds.variables[grid].shape))\n\n # Load the grid metrics data separately as we don't want to set a bunch of zeros for missing data.\n for metric in grid_metrics:\n if metric in self.ds.variables:\n setattr(self.grid, metric, self.ds.variables[metric][:])\n # Save the attributes.\n attributes = type('attributes', (object,), {})()\n for attribute in self.ds.variables[metric].ncattrs():\n setattr(attributes, attribute, getattr(self.ds.variables[metric], attribute))\n setattr(self.atts, metric, attributes)\n\n # Fix the indexing and shapes of the grid metrics variables. Only transpose and offset indexing for nbe.\n try:\n if metric == 'nbe':\n setattr(self.grid, metric, getattr(self.grid, metric).T - 1)\n else:\n setattr(self.grid, metric, getattr(self.grid, metric))\n except AttributeError:\n # We don't have this variable, so just pass by silently.\n pass\n\n try:\n self.grid.nv = self.ds.variables['nv'][:].astype(int) # force integers even though they should already be so\n self.grid.triangles = copy.copy(self.grid.nv.T - 1) # zero-indexed for python\n except KeyError:\n # If we don't have a triangulation, make one.\n triangulation = tri.Triangulation(self.grid.lon, self.grid.lat)\n self.grid.triangles = triangulation.triangles\n self.grid.nv = self.grid.triangles.T + 1\n\n # Fix broken triangulations if necessary.\n if self.grid.nv.min() != 1:\n if self._debug:\n print('Fixing broken triangulation. Current minimum for nv is {} and for triangles is {} but they '\n 'should be 1 and 0, respectively.'.format(self.grid.nv.min(), self.grid.triangles.min()))\n self.grid.nv = (self.ds.variables['nv'][:].astype(int) - self.ds.variables['nv'][:].astype(int).min()) + 1\n self.grid.triangles = copy.copy(self.grid.nv.T) - 1\n\n # If we've been given an element dimension to subsample in, fix the triangulation here. We should really do\n # this for the nodes too.\n if 'nele' in self._dims:\n if self._debug:\n print('Fix triangulation table as we have been asked for only specific elements.')\n print('Triangulation table minimum/maximum: {}/{}'.format(self.grid.nv[:, self._dims['nele']].min(),\n self.grid.nv[:, self._dims['nele']].max()))\n # Redo the triangulation here too.\n new_nv = copy.copy(self.grid.nv[:, self._dims['nele']])\n for i, new in enumerate(np.unique(new_nv)):\n new_nv[new_nv == new] = i\n self.grid.nv = new_nv + 1\n self.grid.triangles = new_nv.T\n\n # Update dimensions to match those we've been given, if any. Omit time here as we shouldn't be touching that\n # dimension for any variable in use in here.\n for dim in self._dims:\n if dim != 'time':\n setattr(self.dims, dim, len(self._dims[dim]))\n\n # Add compatibility for FVCOM3 (these variables are only specified on the element centres in FVCOM4+ output\n # files). Only create the element centred values if we have the same number of nodes as in the triangulation.\n # This does not occur if we've been asked to extract an incompatible set of nodes and elements, for whatever\n # reason (e.g. testing). We don't add attributes for the data if we've created it as doing so is a pain.\n for var in 'h_center', 'siglay_center', 'siglev_center':\n try:\n setattr(self.grid, var, self.ds.variables[var][:])\n # Save the attributes.\n attributes = type('attributes', (object,), {})()\n for attribute in self.ds.variables[var].ncattrs():\n setattr(attributes, attribute, getattr(self.ds.variables[var], attribute))\n setattr(self.atts, var, attributes)\n except KeyError:\n if self.grid.nv.max() == len(self.grid.x):\n try:\n setattr(self.grid, var, nodes2elems(getattr(self.grid, var.split('_')[0]), self.grid.triangles))\n except IndexError:\n # Maybe the array's the wrong way around. Flip it and try again.\n setattr(self.grid, var, nodes2elems(getattr(self.grid, var.split('_')[0]).T, self.grid.triangles))\n\n # Convert the given W/E/S/N coordinates into node and element IDs to subset.\n if self._bounding_box:\n self._dims['node'] = np.argwhere((self.grid.lon > self._dims['wesn'][0]) &\n (self.grid.lon < self._dims['wesn'][1]) &\n (self.grid.lat > self._dims['wesn'][2]) &\n (self.grid.lat < self._dims['wesn'][3])).flatten()\n self._dims['nele'] = np.argwhere((self.grid.lonc > self._dims['wesn'][0]) &\n (self.grid.lonc < self._dims['wesn'][1]) &\n (self.grid.latc > self._dims['wesn'][2]) &\n (self.grid.latc < self._dims['wesn'][3])).flatten()\n\n # If we've been given dimensions to subset in, do that now. Loading the data first and then subsetting\n # shouldn't be a problem from a memory perspective because if you don't have enough memory for the grid data,\n # you probably won't have enough for actually working with the outputs. Also update dimensions to match the\n # given dimensions.\n if 'node' in self._dims:\n self.dims.node = len(self._dims['node'])\n for var in 'x', 'y', 'lon', 'lat', 'h', 'siglay', 'siglev':\n try:\n node_index = self.ds.variables[var].dimensions.index('node')\n var_shape = [i for i in np.shape(self.ds.variables[var])]\n var_shape[node_index] = self.dims.node\n if 'siglay' in self._dims and 'siglay' in self.ds.variables[var].dimensions:\n var_shape[self.ds.variables[var].dimensions.index('siglay')] = self.dims.siglay\n elif 'siglev' in self._dims and 'siglev' in self.ds.variables[var].dimensions:\n var_shape[self.ds.variables[var].dimensions.index('siglev')] = self.dims.siglev\n _temp = np.empty(var_shape)\n if 'siglay' in self.ds.variables[var].dimensions:\n for ni, node in enumerate(self._dims['node']):\n if 'siglay' in self._dims:\n _temp[..., ni] = self.ds.variables[var][self._dims['siglay'], node]\n else:\n _temp[..., ni] = self.ds.variables[var][:, node]\n elif 'siglev' in self.ds.variables[var].dimensions:\n for ni, node in enumerate(self._dims['node']):\n if 'siglev' in self._dims:\n _temp[..., ni] = self.ds.variables[var][self._dims['siglev'], node]\n else:\n _temp[..., ni] = self.ds.variables[var][:, node]\n else:\n for ni, node in enumerate(self._dims['node']):\n _temp[..., ni] = self.ds.variables[var][..., node]\n except KeyError:\n if 'siglay' in var:\n _temp = np.empty((self.dims.siglay, self.dims.node))\n elif 'siglev' in var:\n _temp = np.empty((self.dims.siglev, self.dims.node))\n else:\n _temp = np.empty(self.dims.node)\n setattr(self.grid, var, _temp)\n if 'nele' in self._dims:\n self.dims.nele = len(self._dims['nele'])\n for var in 'xc', 'yc', 'lonc', 'latc', 'h_center', 'siglay_center', 'siglev_center':\n try:\n nele_index = self.ds.variables[var].dimensions.index('nele')\n var_shape = [i for i in np.shape(self.ds.variables[var])]\n var_shape[nele_index] = self.dims.nele\n if 'siglay' in self._dims and 'siglay' in self.ds.variables[var].dimensions:\n var_shape[self.ds.variables[var].dimensions.index('siglay')] = self.dims.siglay\n elif 'siglev' in self._dims and 'siglev' in self.ds.variables[var].dimensions:\n var_shape[self.ds.variables[var].dimensions.index('siglev')] = self.dims.siglev\n _temp = np.empty(var_shape)\n if 'siglay' in self.ds.variables[var].dimensions:\n for ni, nele in enumerate(self._dims['nele']):\n if 'siglay' in self._dims:\n _temp[..., ni] = self.ds.variables[var][self._dims['siglay'], nele]\n else:\n _temp[..., ni] = self.ds.variables[var][:, nele]\n elif 'siglev' in self.ds.variables[var].dimensions:\n for ni, nele in enumerate(self._dims['nele']):\n if 'siglev' in self._dims:\n _temp[..., ni] = self.ds.variables[var][self._dims['siglev'], nele]\n else:\n _temp[..., ni] = self.ds.variables[var][:, nele]\n else:\n for ni, nele in enumerate(self._dims['nele']):\n _temp[..., ni] = self.ds.variables[var][..., nele]\n except KeyError:\n # FVCOM3 files don't have h_center, siglay_center and siglev_center, so make var_shape manually.\n if var.startswith('siglev'):\n var_shape = [self.dims.siglev, self.dims.nele]\n elif var.startswith('siglay'):\n var_shape = [self.dims.siglay, self.dims.nele]\n else:\n var_shape = self.dims.nele\n _temp = np.zeros(var_shape)\n setattr(self.grid, var, _temp)\n\n # Check if we've been given vertical dimensions to subset in too, and if so, do that. Check we haven't\n # already done this if the 'node' and 'nele' sections above first.\n for var in 'siglay', 'siglev', 'siglay_center', 'siglev_center':\n short_dim = copy.copy(var)\n # Assume we need to subset this one unless 'node' or 'nele' are missing from self._dims. If they're in\n # self._dims, we've already subsetted in the 'node' and 'nele' sections above, so doing it again here\n # would fail.\n subset_variable = True\n if 'node' in self._dims or 'nele' in self._dims:\n subset_variable = False\n # Strip off the _center to match the dimension name.\n if short_dim.endswith('_center'):\n short_dim = short_dim.split('_')[0]\n if short_dim in self._dims:\n if short_dim in self.ds.variables[var].dimensions and subset_variable:\n _temp = getattr(self.grid, var)[self._dims[short_dim], ...]\n setattr(self.grid, var, _temp)\n\n # Check ranges and if zero assume we're missing that particular type, so convert from the other accordingly.\n self.grid.lon_range = np.ptp(self.grid.lon)\n self.grid.lat_range = np.ptp(self.grid.lat)\n self.grid.lonc_range = np.ptp(self.grid.lonc)\n self.grid.latc_range = np.ptp(self.grid.latc)\n self.grid.x_range = np.ptp(self.grid.x)\n self.grid.y_range = np.ptp(self.grid.y)\n self.grid.xc_range = np.ptp(self.grid.xc)\n self.grid.yc_range = np.ptp(self.grid.yc)\n\n # Only do the conversions when we have more than a single point since the relevant ranges will be zero with\n # only one position.\n if self.dims.node > 1:\n if self.grid.lon_range == 0 and self.grid.lat_range == 0:\n self.grid.lon, self.grid.lat = lonlat_from_utm(self.grid.x, self.grid.y, zone=self._zone)\n if self.grid.lon_range == 0 and self.grid.lat_range == 0:\n self.grid.x, self.grid.y, _ = utm_from_lonlat(self.grid.lon, self.grid.lat)\n if self.dims.nele > 1:\n if self.grid.lonc_range == 0 and self.grid.latc_range == 0:\n self.grid.lonc, self.grid.latc = lonlat_from_utm(self.grid.xc, self.grid.yc, zone=self._zone)\n if self.grid.lonc_range == 0 and self.grid.latc_range == 0:\n self.grid.xc, self.grid.yc, _ = utm_from_lonlat(self.grid.lonc, self.grid.latc)", "def load_data(self, map_name, grid_name, tp_name):\n \n self.map= TiledMap(path.join(self.map_folder, map_name))\n self.map_img = self.map.make_map()\n self.map_img2 = self.map_img\n #self.noisy_map_img = noisy(\"gauss\", pg.surfarray.array3d(self.map_img))\n self.noisy_map_img = make_noisy(pg.surfarray.array3d(self.map_img))\n self.map_rect = self.map_img.get_rect()\n \n with open(path.join(self.map_folder, tp_name), 'rt') as f:\n # destinations is a dict mapping each tilemap teleport coordinate to\n # the destination tilemap coordinate\n self.destinations = eval(f.read())\n\n self.grid= OccupancyGrid(self, path.join(self.map_folder, grid_name)) #down here because it needs destinations\n self.graph = self.grid.make_graph()\n\n #sounds\n self.wall_channel=pg.mixer.Channel(0)\n self.wall_sound=pg.mixer.Sound(WALL_THUD_SOUND)\n self.teleport_channel=pg.mixer.Channel(1)\n self.teleport_sound=pg.mixer.Sound(TELEPORT_SOUND)", "def load_data(\n self, file_path: str = os.path.join(os.getcwd(), \"data_breast_cancer.p\")\n ) -> None:\n with open(file_path, \"rb\") as file:\n data = pickle.load(file)\n self.x_train, self.y_train = data[\"x_train\"], data[\"y_train\"]\n self.x_test, self.y_test = data[\"x_test\"], data[\"y_test\"]", "def load_grid(self, elem_file, elec_file):\n self.load_elem_file(elem_file)\n self.load_elec_file(elec_file)", "def load(self,train_ratio=.9,seed=123):\n\t\tdata_dir = cfg.DATA_DIR\n\n\t\tif isinstance(DATASETS.get(self.data_name),dict):\n\t\t\t\n\t\t\tif len(DATASETS.get(self.data_name))==2: # Relevant only for MNIST\n\t\t\t\ttrain_fpath = os.path.join(data_dir,DATASETS.get(self.data_name).get('train'))\n\t\t\t\ttest_fpath = os.path.join(data_dir,DATASETS.get(self.data_name).get('test'))\n\t\t\t\n\t\t\telse: # Only relevant for binarized MNIST\n\t\t\t\ttrain_fpath = os.path.join(data_dir,DATASETS.get(self.data_name).get('train'))\n\t\t\t\ttest_fpath = os.path.join(data_dir,DATASETS.get(self.data_name).get('test'))\n\t\t\t\tvalid_fpath = os.path.join(data_dir,DATASETS.get(self.data_name).get('valid'))\n\t\telse:\n\t\t\tfpath = os.path.join(data_dir,DATASETS.get(self.data_name))\n\n\t\tprint(f\"Trying to load {self.data_name} from directory(ies):\")\n\t\t\n\t\tif self.data_name == \"freyfaces\":\n\t\t\t# Load freyfaces\n\t\t\tprint(f\"...from {os.path.join(data_dir,fpath)}\")\n\t\t\tf = open(fpath,'rb')\n\t\t\tdata = pickle.load(f,encoding='latin1')\n\t\t\tf.close()\n\n\t\t\t# This block is directly from the VRBound repository\n\t\t\tnp.random.seed(seed)\n\t\t\tnp.random.shuffle(data)\n\t\t\tif train_ratio==1 or (0>train_ratio or 1<train_ratio):\n\t\t\t\tprint(f\"Train split ratio {train_ratio} or test value is invalid!\")\n\t\t\t\traise Exception\n\t\t\tnum_train = int(train_ratio* data.shape[0])\n\n\t\t\tdata_train = data[:num_train]\n\t\t\tdata_test = data[num_train:]\n\t\t\t# End of copy\n\n\t\telif self.data_name == \"silhouettes\":\n\t\t\t# Load silhouettes data\n\t\t\tprint(f\"...from {os.path.join(data_dir,fpath)}\")\n\n\t\t\t# These lines are also from VRBound repository\n\t\t\tdata = loadmat(fpath) \n\t\t\tdata = 1-data.get('X')\n\n\t\t\t# This block is directly from the VRBound repository\n\t\t\tnp.random.seed(seed)\n\t\t\tnp.random.shuffle(data)\n\t\t\tif train_ratio==1 or (0>train_ratio or 1<train_ratio):\n\t\t\t\tprint(f\"Train split ratio {train_ratio} or test value is invalid!\")\n\t\t\t\traise Exception\n\t\t\tnum_train = int(train_ratio* data.shape[0])\n\n\n\t\t\tdata_train = data[:num_train]\n\t\t\tdata_test = data[num_train:]\n\t\t\t# End of copy\n\n\t\telif self.data_name == \"mnist\":\n\t\t\tprint(\"MNIST data is already train/test split - training ratio input ignored!\")\n\t\t\tprint(f\"...from {os.path.join(data_dir,DATASETS.get(self.data_name)['train'])}\")\n\n\t\t\tdata_train, _ = MNIST(train_fpath).load_training() # We don't care about what the labels are; overwrite\n\t\t\tdata_test, _ = MNIST(test_fpath).load_testing()\n\n\t\telif self.data_name == \"mnist_binary\":\n\t\t\tprint(\"MNIST data is already train/test split - training ratio input ignored!\")\n\t\t\tprint(f\"...from {os.path.join(train_fpath.split('/')[-2])}\")\n\t\t\t# This is directly from the iwae codebase\n\t\t\tdef lines_to_np_array(lines):\n\t\t\t return np.array([[int(i) for i in line.split()] for line in lines])\n\t\t\twith open(train_fpath) as f:\n\t\t\t lines = f.readlines()\n\t\t\ttrain_data = lines_to_np_array(lines).astype('float32')\n\t\t\twith open(test_fpath) as f:\n\t\t\t lines = f.readlines()\n\t\t\tvalidation_data = lines_to_np_array(lines).astype('float32')\n\t\t\twith open(valid_fpath) as f:\n\t\t\t lines = f.readlines()\n\t\t\tdata_test = lines_to_np_array(lines).astype('float32')\n\n\t\t\tdata_train= np.concatenate([train_data, validation_data], axis=0)\n\n\t\telif self.data_name == \"omniglot\":\n\t\t\tprint(f\"...from {os.path.join(data_dir,fpath)}\")\n\t\t\tprint(\"Omniglot data is already train/test split - training ratio input ignored!\")\n\n\t\t\tdata = loadmat(fpath)\n\n\t\t\t# From iwae repository\n\t\t\tdata_train = data['data'].T.astype('float32').reshape((-1, 28, 28)).reshape((-1, 28*28), order='F') \n\t\t\tdata_test = data['testdata'].T.astype('float32').reshape((-1, 28, 28)).reshape((-1, 28*28), order='F')\n\t\t\n\t\treturn data_train, data_test", "def load_data():\n global batch_size, num_batches\n # import data\n data, labels = original_clean()\n test_data = data[:test_size, :]\n test_labels = labels[:test_size]\n\n data = data[test_size:, :]\n\n # make landmarks with points with most neighbors\n N = NearestNeighbors(n_neighbors=k_start).fit(data).kneighbors_graph(data).todense()\n N = np.array(N)\n num_connections = N.sum(axis=0).argsort()[::-1] # see how many neighbors each point has\n top_landmarks_idxs = num_connections[:num_lm] # sort in descending order\n land_marks = data[top_landmarks_idxs, :] # pick the top ones\n data = np.delete(data, top_landmarks_idxs, axis=0) # delete the landmarks\n # find the nearest landmarks for the landmarks\n landmark_neighbors = NearestNeighbors(n_neighbors=k_lm).fit(land_marks).kneighbors_graph(land_marks).todense()\n # break data into batches, create empty holders\n batch_loader = np.zeros((num_batches, batch_size + num_lm, n))\n batch_graph = np.zeros((num_batches, batch_size + num_lm, batch_size + num_lm))\n # create the full neighborhood graph for each batch\n for i in range(num_batches):\n holder = data[batch_size * i: batch_size * (i + 1)]\n # find the nearest landmarks for the rest of the points\n holder_graph = NearestNeighbors(n_neighbors=k_other).fit(land_marks).kneighbors_graph(holder).todense()\n for j in range(batch_size): # copy over the holder graph\n for l in range(num_lm):\n if holder_graph[j, l] == 1:\n batch_graph[i, j, l + batch_size] = 1\n batch_graph[i, l + batch_size, j] = 1\n for j in range(num_lm): # copy over landmark neighbors\n for l in range(j, num_lm):\n if landmark_neighbors[j, l] == 1 and j != l:\n batch_graph[i, j + batch_size, l + batch_size] = 1\n batch_graph[i, l + batch_size, j + batch_size] = 1\n holder = np.concatenate((holder, land_marks))\n batch_loader[i] = holder\n batch_size += num_lm # adjust the batch size\n return batch_loader, data, batch_graph, landmark_neighbors, test_data, test_labels, land_marks", "def read_grid(filename_grid, dim=2, slc=None):\n ## get shape and slice\n fid = h5py.File(filename_grid, 'r')\n if dim==2:\n varnames = ['x', 'y', 'ep']\n if slc is None: slc = np.s_[0,:,:]\n if dim==3:\n varnames = ['x', 'y', 'z', 'ep']\n if slc is None: slc = np.s_[:,:,:]\n\n dset = fid.get(varnames[0])\n shape = dset[slc].shape\n (nx,ny,nz) = dset.shape\n ## read variables\n grid = {}\n for varname in varnames:\n try:\n dset = fid.get(varname)\n grid[varname] = np.zeros(shape)\n dset.read_direct(grid[varname], source_sel=slc)\n grid[varname] = grid[varname].transpose()\n except:\n pass\n fid.close()\n return grid, nx, ny, nz", "def load_eigenstrat_data(file_root):\n\n ind_file=open(file_root+\".ind\", \"r\")\n snp_file=open(file_root+\".snp\", \"r\")\n gen_file=open(file_root+\".geno\", \"r\")\n \n sample_names=ind_file.readlines()\n sample_names=[x.strip() for x in sample_names]\n sample_names=[x.split()[0] for x in sample_names]\n ind_file.close()\n \n snp_data=snp_file.readlines()\n snp_data=[x.strip() for x in snp_data]\n snp_names=[x.split()[0] for x in snp_data]\n snp_pos=[int(x.split()[3]) for x in snp_data]\n snp_file.close()\n\n genotype_data=np.genfromtxt(file_root+\".geno\", dtype=np.int, delimiter=1)\n genotype_data[genotype_data==9]=3\n return {\"sample_names\":sample_names, \"snp_names\":snp_names, \"snp_pos\":snp_pos, \"genotype_data\":genotype_data}", "def load_ne_features(filter='all', rebuild=False):\n # check database to read\n if filter == \"all\":\n db_file_path = PERSONS_NE_DB\n elif filter == \"punct\":\n db_file_path = PERSONS_NE_PUNCT_DB\n elif filter == \"interj\":\n db_file_path = PERSONS_NE_INTERJ_DB\n elif filter == \"coref\":\n db_file_path = PERSONS_NE_COREF_DB\n else:\n raise ValueError(\"Unknown 'filter' value. Must be in {'all', 'punct', 'interj'}\")\n\n # build all datasets if not already exists\n if not isfile(db_file_path) or rebuild:\n print(\"Building named entities datasets...\")\n build_ne_db()\n\n # Load specific dataset if exists\n scenes_ids, scenes_persons, scenes_ne = [], [], []\n if isfile(db_file_path):\n print(\"Loading named entities dataset from file \\'{}\\'\".format(db_file_path))\n scenes_ids, scenes_persons, scenes_ne = read_features_from_csv(db_file_path)\n # filter locutors\n scenes_persons = [list({person if person in PERSONS else UNKNOWN_STATE for person in persons}) for persons in scenes_persons]\n\n return scenes_ids, scenes_persons, scenes_ne", "def fixture_grid_360(grid):\n _grid = grid.copy() # get a copy of original earth_relief grid\n _grid.encoding.pop(\"source\") # unlink earth_relief netCDF source\n _grid[\"lon\"] = np.arange(0, 361, 1) # convert longitude from -180:180 to 0:360\n return _grid", "def load_data(self):\n\n self._load_train_data()\n self._load_test_data()", "def _load_training_data(self):\n self._save_training_data()", "def test_grdimage_file():\n fig = Figure()\n fig.grdimage(\n \"@earth_relief_01d_g\",\n cmap=\"ocean\",\n region=[-180, 180, -70, 70],\n projection=\"W0/10i\",\n shading=True,\n )\n return fig", "def load(self):\n cwd = os.getcwd()\n path = os.path.join(*[cwd, 'data', 'weighted_clusters',\n f\"weighted_clusters_WIJK{self.input}.dat\"])\n sys.path.append(path)\n\n with open(path, \"rb\") as f:\n unpickler = pickle.Unpickler(f)\n house_batt = unpickler.load()\n\n self.houses, self.batteries = house_batt[0], house_batt[1]", "def load_data(self):\n self.data = self.read_var(self.datavar)\n self.test_shape(self.datavar, self.data.shape, 2)", "def load_data(self):\n\n\t\tboard_data = np.load(os.path.join(\n\t\t\t'reinforcement_learning_data_final',\n\t\t\t'board_data.dat'))\n\t\tcows_data = np.load(os.path.join(\n\t\t\t'reinforcement_learning_data_final',\n\t\t\t'cows_data.dat'))\n\t\tlabels = np.load(os.path.join(\n\t\t\t'reinforcement_learning_data_final',\n\t\t\t'labels.dat'))\n\t\tlabels = labels.reshape((len(labels), 1))\n\n\t\tpermutation = np.random.permutation(len(labels))\n\n\t\treturn (board_data[permutation],\n\t\t cows_data[permutation],\n\t\t labels[permutation])", "def load_data_from_files(self):\n # separated method to allow mock easier\n logger.info(\"Loading data...\")\n parent = Path(__file__).parent\n path = parent / \"resources\" / \"scores.txt\"\n self.scorer.load_from_file(path)\n path = parent / \"resources\" / \"american-english-large.txt\"\n self.trie.load_from_file(path)\n path = parent / \"resources\" / \"reels.txt\"\n self.reels = Reel.get_from_file(path)\n logger.info(\"Data loaded!\")", "def load_cleaned_data(self):\n try:\n self.train = pd.read_pickle('../input/train_clean.pkl')\n self.test = pd.read_pickle('../input/test_clean.pkl')\n except FileNotFoundError:\n self.load_raw_data()", "def _load_dataset(self, path):\n\t\twhile True:\n\t\t\t\n\t\t\ttry:\n\t\t\t\tX_test = np.load(\"data/X_test.npy\")\n\t\t\t\tY_test = np.load(\"data/Y_test.npy\")\n\t\t\t\tbreak\n\n\t\t\texcept FileNotFoundError:\n\n\t\t\t\tX_test = np.zeros((10000,64,64,3))\n\t\t\t\tY_test = []\n\n\t\t\t\t\n\t\t\t\twith open(path, 'rb') as fo:\n\t\t\t\t\ttemp_element = pickle.load(fo, encoding='bytes')\n\n\t\t\t\ttemp_data = temp_element[b'data']\n\t\t\t\tY_test.extend(temp_element[b'labels'])\n\n\t\t\t\tfor j in range(10000):\n\t\t\t\t\tX_test[j] = self._reshape(temp_data[j])\n\n\t\t\t\tY_test = np.eye(10)[np.array(Y_test)]\n\t\t\t\t\n\t\t\t\tnp.save(\"./data/X_test\", X_test)\n\t\t\t\tnp.save(\"./data/Y_test\", Y_test)\n\n\t\t\t\tbreak\n\n\n\t\treturn X_test, Y_test", "def load_data(self):\n sets = ['train', 'val']\n images = []\n labels = []\n self.labels_dic = {}\n file = open(self.path + 'wnids.txt')\n train_labels = file.read().split()\n if self.train:\n for fn in range(self.num_classes):\n f = train_labels[fn]\n for i in os.listdir(self.path + 'train/' + f + '/images/'):\n images.append(Image.open(self.path + 'train/' + f + '/images/' + i))\n labels.append(f)\n #image label n link to folder names of TinyImageNet\n self.labels_dic[f] = fn\n\n else:\n for fn in range(self.num_classes):\n f = train_labels[fn]\n self.labels_dic[f] = fn\n file_val = open(self.path + 'val/val_annotations.txt')\n val_labels = file_val.read().split('\\n')\n for im in val_labels:\n im_data = im.split(\"\t\")[:2]\n if len(im_data) < 2:\n continue\n if im_data[1] in self.labels_dic:\n images.append(Image.open(self.path + 'val/images/' + im_data[0]))\n labels.append(im_data[1])\n\n self.images = images\n self.labels = labels", "def _load_data(self):\n\n from sklearn.datasets import fetch_openml\n mnist = fetch_openml('mnist_784', cache=True)\n # data_x = np.array(final_data_df)\n feat_data = np.array(mnist.data).astype('float32')\n target_data = mnist.target.astype('int64')\n shuffling_index = np.arange(feat_data.shape[0])\n np.random.shuffle(shuffling_index)\n feat_data = feat_data[shuffling_index]\n target_data = target_data[shuffling_index]\n\n cur_data_list = []\n cur_target_list = []\n for i in range(10):\n cur_mask = target_data == i\n cur_data_list.append(feat_data[cur_mask][:500])\n cur_target_list.append(target_data[cur_mask][:500])\n feat_data = np.concatenate(cur_data_list)\n target_data = np.concatenate(cur_target_list)\n\n self.data_x = feat_data\n self.data_y = self.to_one_hot_encoding(target_data)\n self.numerical_idx = np.arange(784)\n self.non_num_idx = None\n\n # Calculate adjacency matrix\n self.meta_inf = self.data_x.astype('float32')\n\n if self.args.graph_type:\n self.adj = self.get_adjacency()" ]
[ "0.71628165", "0.65157515", "0.64941037", "0.6290516", "0.6283898", "0.6261977", "0.6137829", "0.59520215", "0.5852507", "0.58480215", "0.5800172", "0.5742071", "0.57125926", "0.57125765", "0.5712251", "0.5711601", "0.5700506", "0.5699708", "0.5662421", "0.5648323", "0.561772", "0.56091785", "0.55799615", "0.5573006", "0.55643636", "0.5562382", "0.5554506", "0.5547737", "0.55193967", "0.5515129" ]
0.7127945
1
grdfilter an input DataArray, and output as DataArray.
def test_grdfilter_dataarray_in_dataarray_out(grid): result = grdfilter(grid=grid, filter="g600", distance="4") # check information of the output grid assert isinstance(result, xr.DataArray) assert result.coords["lat"].data.min() == -89.5 assert result.coords["lat"].data.max() == 89.5 assert result.coords["lon"].data.min() == -179.5 assert result.coords["lon"].data.max() == 179.5 npt.assert_almost_equal(result.data.min(), -6147.4907, decimal=2) npt.assert_almost_equal(result.data.max(), 5164.06, decimal=2) assert result.sizes["lat"] == 180 assert result.sizes["lon"] == 360
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_grdfilter_dataarray_in_file_out(grid):\n with GMTTempFile(suffix=\".nc\") as tmpfile:\n result = grdfilter(grid, outgrid=tmpfile.name, filter=\"g600\", distance=\"4\")\n assert result is None # grdfilter returns None if output to a file\n result = grdinfo(tmpfile.name, per_column=True)\n assert (\n result == \"-180 180 -90 90 -6147.49072266 5164.06005859 1 1 360 180 1 1\\n\"\n )", "def test_grdfilter_file_in_dataarray_out():\n outgrid = grdfilter(\n \"@earth_relief_01d\", region=\"0/180/0/90\", filter=\"g600\", distance=\"4\"\n )\n assert isinstance(outgrid, xr.DataArray)\n assert outgrid.gmt.registration == 1 # Pixel registration\n assert outgrid.gmt.gtype == 1 # Geographic type\n # check information of the output DataArray\n # the '@earth_relief_01d' is in pixel registration, so the grid range is\n # not exactly 0/180/0/90\n assert outgrid.coords[\"lat\"].data.min() == 0.5\n assert outgrid.coords[\"lat\"].data.max() == 89.5\n assert outgrid.coords[\"lon\"].data.min() == 0.5\n assert outgrid.coords[\"lon\"].data.max() == 179.5\n npt.assert_almost_equal(outgrid.data.min(), -6147.4907, decimal=2)\n npt.assert_almost_equal(outgrid.data.max(), 5164.06, decimal=2)\n assert outgrid.sizes[\"lat\"] == 90\n assert outgrid.sizes[\"lon\"] == 180", "def __call__(self,x):\n arr = np.array(x,copy=False,dtype=float)\n res = self._filterfunc(arr.ravel(),*self.parvals)\n return res.reshape(arr.shape)", "def filterf(self):\n from scipy.ndimage.filters import gaussian_filter as gf\n self._obj['u'] = xr.DataArray(gf(self._obj['u'],1),dims=('x','y'))\n self._obj['v'] = xr.DataArray(gf(self._obj['v'],1),dims=('x','y'))\n return self._obj", "def filter(self, data):\n self.data = pysap.Image(data=self.flt.filter(data))", "def filter_data(self):\n self.data = filter_pandas(self.data, self.filters)", "def __call__(self,x):\n\n arr = np.array(x,copy=False,dtype=float)\n return self._filterfunc(arr,*self.parvals)", "def test_grdfilter_file_in_file_out():\n with GMTTempFile(suffix=\".nc\") as tmpfile:\n result = grdfilter(\n \"@earth_relief_01d\",\n outgrid=tmpfile.name,\n region=[0, 180, 0, 90],\n filter=\"g600\",\n distance=\"4\",\n )\n assert result is None # return value is None\n assert os.path.exists(path=tmpfile.name) # check that outgrid exists\n result = grdinfo(tmpfile.name, per_column=True)\n assert result == \"0 180 0 90 -6147.49072266 5164.06005859 1 1 180 90 1 1\\n\"", "def filter(self):\n self.data = self.data.loc[~self.data.isnull().any(1),:]", "def filter(self, filterarray):\n return FeatureSet(list(np.array(self)[np.array(filterarray)]))", "def FilterFXSeries(self):\r\n filtFX=self.data[self.data.columns[0]].tolist()\r\n return filtFX", "def cloud_filter(array, bqa):\n array_dest = array.copy()\n array_dest[np.where((bqa != 2720) & (bqa != 2724) & (bqa != 2728) & (bqa != 2732)) ] = 'nan'\n return array_dest", "def eo_filter(source):\n nodata_bools = source.apply(lambda array: array == array.nodata).to_array(dim='band')\n\n nothingness = nodata_bools.all(dim='band')\n noncontiguous = nodata_bools.any(dim='band')\n\n return np.uint8(NO_DATA) * nothingness | np.uint8(MASKED_NO_CONTIGUITY) * noncontiguous", "def get_data(self):\n self.update_filter_inds()\n return self.data.iloc[:, self.filter_inds.ravel()].reset_index(drop=True)", "def filter_data(data: AnnData) -> None:\n\n assert \"passed_qc\" in data.obs\n data._inplace_subset_obs(data.obs[\"passed_qc\"].values)\n data._inplace_subset_var((data.var[\"n_cells\"] > 0).values)\n logger.info(\n \"After filteration, {nc} cells and {ng} genes are kept. Among {ng} genes, {nrb} genes are robust.\".format(\n nc=data.shape[0], ng=data.shape[1], nrb=data.var[\"robust\"].sum()\n )\n )", "def apply3filter(array, filter_):\n s = int(len(filter_)/2)\n width = len(array[0])\n height = len(array)\n new_array = np.array(np.zeros((height,width)))\n for row in range(s, (height-s)):\n for col in range(s, (width-s)):\n new_array[row,col] = np.sum(filter_ * array[(row-s):(row+s+1),(col-s):(col+s+1)])\n return new_array", "def trans(array,dim):\n return array[filter(lambda x: x != dim,range(len(array)) ) ]", "def filter(data,col,low,high):\n inds = np.where(data[:,col]>=low)\n data_trim = data[inds]\n inds = np.where(data_trim[:,col]<=high)\n data_trim = data_trim[inds]\n return data_trim", "def filter(data,col,low,high):\n inds = np.where(data[:,col]>=low)\n data_trim = data[inds]\n inds = np.where(data_trim[:,col]<=high)\n data_trim = data_trim[inds]\n return data_trim", "def gaussian_filter(stddev, array):\n\n return astropy.convolution.convolve(\n array, astropy.convolution.Gaussian2DKernel(stddev))", "def filt2(X, yvals, xvals, ny, nx):\n\n Y = dofilter2(X,nx,ny)\n \n Xnew = dofilter2(X,nx%2,ny%2)\n xvalsnew = dofilter2(xvals,ny%2,nx%2)\n yvalsnew = dofilter2(yvals,ny%2,nx%2)\n\n return Xnew, Y, yvalsnew, xvalsnew", "def remove_data(ds, nh_lim, sh_lim, time_max, lat_name='lat', time_name='time'):\n return xr.where((ds[lat_name] < nh_lim) &\n (ds[lat_name] > sh_lim) &\n (ds[time_name] < pd.to_datetime([time_max]).values),\n np.nan,\n ds)", "def _filter_data(raw_data):\n legal_inventory = digits + '.'\n filtered_data = list()\n # Only retain numeric information\n for data_point in raw_data:\n skip = False\n for symbol in list(str(data_point)):\n if symbol not in legal_inventory:\n skip = True\n if not skip:\n filtered_data.append(dtype(data_point))\n return np.array(filtered_data)", "def filter_data(data,filters):\n final_filter = pd.Series(np.array([True] * data.shape[0]))\n for attribute, value in filters:\n final_filter &= data[attribute] == value\n return data[final_filter]", "def filter_isolated_pixels(array):\n filtered_array = np.copy(array)\n id_regions, num_ids = ndimage.label(filtered_array,\n structure=np.ones((3, 3)))\n id_sizes = np.array(ndimage.sum(array, id_regions, range(num_ids+1)))\n area_mask = (id_sizes == 1)\n filtered_array[area_mask[id_regions]] = 0\n return filtered_array", "def transform(self, X): # noqa: E501\n X = np.atleast_2d(X)\n\n if X.ndim > 3:\n raise ValueError(\n \"Array must be of at max 3 dimensions instead \"\n \"got %s dimensional matrix\" % (X.ndim)\n )\n\n shape = X.shape\n X = X.reshape(-1, shape[-1])\n X = filter_data(\n X,\n self.sfreq,\n self.l_freq,\n self.h_freq,\n filter_length=self.filter_length,\n l_trans_bandwidth=self.l_trans_bandwidth,\n h_trans_bandwidth=self.h_trans_bandwidth,\n n_jobs=self.n_jobs,\n method=self.method,\n iir_params=self.iir_params,\n copy=False,\n fir_window=self.fir_window,\n fir_design=self.fir_design,\n )\n return X.reshape(shape)", "def test_grdfilter_fails():\n with pytest.raises(GMTInvalidInput):\n grdfilter(np.arange(10).reshape((5, 2)))", "def get_data_filter(args):\n diff_data(args, \"filter\")", "def apply_1d_filter(bfilter, timage):\n image_length = len(timage)\n ovrlay = int(bfilter.shape[0] / 2)\n tmp_array = np.zeros(image_length + 2 * ovrlay)\n tmp_array[ovrlay:-ovrlay] = timage\n res_array = np.zeros(image_length )\n for i in np.arange(image_length) + ovrlay:\n local_matrix = tmp_array[i - ovrlay:i + ovrlay + 1]\n res_array[i - ovrlay] = sum(local_matrix * bfilter)\n return res_array", "def filter(self, img: np.ndarray) -> np.ndarray:\n raise NotImplemented" ]
[ "0.72438073", "0.71895283", "0.6108349", "0.6072146", "0.60382855", "0.6018336", "0.58795875", "0.58754164", "0.5870362", "0.5869875", "0.5862751", "0.5731808", "0.56614274", "0.5651315", "0.56284916", "0.5540049", "0.55231196", "0.5522951", "0.5522951", "0.54774076", "0.5467848", "0.54674476", "0.54481435", "0.5433857", "0.5431419", "0.5417644", "0.54148173", "0.5404268", "0.5395509", "0.53636134" ]
0.75263023
0
grdfilter an input DataArray, and output to a grid file.
def test_grdfilter_dataarray_in_file_out(grid): with GMTTempFile(suffix=".nc") as tmpfile: result = grdfilter(grid, outgrid=tmpfile.name, filter="g600", distance="4") assert result is None # grdfilter returns None if output to a file result = grdinfo(tmpfile.name, per_column=True) assert ( result == "-180 180 -90 90 -6147.49072266 5164.06005859 1 1 360 180 1 1\n" )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_grdfilter_file_in_dataarray_out():\n outgrid = grdfilter(\n \"@earth_relief_01d\", region=\"0/180/0/90\", filter=\"g600\", distance=\"4\"\n )\n assert isinstance(outgrid, xr.DataArray)\n assert outgrid.gmt.registration == 1 # Pixel registration\n assert outgrid.gmt.gtype == 1 # Geographic type\n # check information of the output DataArray\n # the '@earth_relief_01d' is in pixel registration, so the grid range is\n # not exactly 0/180/0/90\n assert outgrid.coords[\"lat\"].data.min() == 0.5\n assert outgrid.coords[\"lat\"].data.max() == 89.5\n assert outgrid.coords[\"lon\"].data.min() == 0.5\n assert outgrid.coords[\"lon\"].data.max() == 179.5\n npt.assert_almost_equal(outgrid.data.min(), -6147.4907, decimal=2)\n npt.assert_almost_equal(outgrid.data.max(), 5164.06, decimal=2)\n assert outgrid.sizes[\"lat\"] == 90\n assert outgrid.sizes[\"lon\"] == 180", "def test_grdfilter_dataarray_in_dataarray_out(grid):\n result = grdfilter(grid=grid, filter=\"g600\", distance=\"4\")\n # check information of the output grid\n assert isinstance(result, xr.DataArray)\n assert result.coords[\"lat\"].data.min() == -89.5\n assert result.coords[\"lat\"].data.max() == 89.5\n assert result.coords[\"lon\"].data.min() == -179.5\n assert result.coords[\"lon\"].data.max() == 179.5\n npt.assert_almost_equal(result.data.min(), -6147.4907, decimal=2)\n npt.assert_almost_equal(result.data.max(), 5164.06, decimal=2)\n assert result.sizes[\"lat\"] == 180\n assert result.sizes[\"lon\"] == 360", "def test_grdfilter_file_in_file_out():\n with GMTTempFile(suffix=\".nc\") as tmpfile:\n result = grdfilter(\n \"@earth_relief_01d\",\n outgrid=tmpfile.name,\n region=[0, 180, 0, 90],\n filter=\"g600\",\n distance=\"4\",\n )\n assert result is None # return value is None\n assert os.path.exists(path=tmpfile.name) # check that outgrid exists\n result = grdinfo(tmpfile.name, per_column=True)\n assert result == \"0 180 0 90 -6147.49072266 5164.06005859 1 1 180 90 1 1\\n\"", "def to_cdo_grid(self, outfile):", "def filter_data(self):\n self.data = filter_pandas(self.data, self.filters)", "def onGrid(self, event):\n dlg = wx.FileDialog(self, wildcard=\"*.csv\", style=wx.SAVE)\n if dlg.ShowModal() == wx.ID_OK:\n path = dlg.GetPath()\n self.model.exportToGrid(path)\n dlg.Destroy()", "def grid_results(infile, resolution = 0.01, clip_shp = None, \n overwrite=True, contour=False):\n outfile = infile.rstrip('().csv') + '_gridded.tif'\n # if not overwrite:\n if os.path.isfile(outfile):\n if not overwrite:\n print('Not creating file %s as already exists' % outfile)\n print('To re-create file (e.g if inputs changed) set overwrite=True)')\n return\n else:\n try:\n os.remove(outfile)\n os.remove((outfile.rstrip('.tif') + '_clip.tif'))\n except:\n pass\n data = np.genfromtxt(infile, delimiter=',')\n max_lon = max(data[:,0])\n min_lon = min(data[:,0])\n max_lat = max(data[:,1])\n min_lat = min(data[:,1])\n #print max_lon, min_lon, max_lat, min_lat\n xi = np.arange(min_lon, max_lon, resolution)\n yi = np.arange(min_lat, max_lat, resolution)\n XI,YI = np.meshgrid(xi,yi)\n xsize = len(xi)\n ysize = len(yi)\n\n print('Interpolating results')\n gridded_results = griddata((data[:,0],data[:,1]),data[:,2],(XI,YI),method='linear')\n #print gridded_results\n #outfile = infile.rstrip('().csv') + '_gridded.tif'\n print('Writing gridded data to %s' % outfile)\n driver = gdal.GetDriverByName('GTiff')\n ds = driver.Create(outfile, xsize, ysize, 1, gdal.GDT_Float32)\n srs = osr.SpatialReference()\n srs.ImportFromEPSG(4326)\n ds.SetProjection(srs.ExportToWkt())\n gt = [(min_lon - (resolution/2)), resolution, 0, \n (min_lat - (resolution/2)), 0, resolution]\n ds.SetGeoTransform(gt)\n outband=ds.GetRasterBand(1)\n outband.SetStatistics(np.min(gridded_results), np.max(gridded_results), np.average(gridded_results), np.std(gridded_results))\n outband.WriteArray(gridded_results)\n # Need to close output dataset before we can do clipping\n ds = None\n # now clip by shapefile\n if clip_shp is not None:\n clipfile = outfile.rstrip('.tif') + '_clip.tif'\n cmd = ['gdalwarp',\n '-cutline',\n clip_shp,\n '-crop_to_cutline',\n '-dstalpha',\n outfile,\n clipfile]\n print(cmd)\n call(cmd, shell=False)\n if contour is True:\n cmd = 'gdal_contour -i 1 -off 0.5 %s %s.shp' % (outfile, outfile.rstrip('.tif'))\n print(cmd)\n call(cmd, shell=True)\n cmd = 'gdal_contour -i 1 -off 0.5 %s %s.shp' % (clipfile, clipfile.rstrip('.tif'))\n print(cmd)\n call(cmd, shell=True)", "def output_netcdf(forecast,proj_dict,grid_dict,start_hour,end_hour,\n stride,size,run_date,target_dataset,smoothing,config):\n for d,date in enumerate(run_date):\n date_outpath = config.forecast_out_path+'20{0}/netcdf/'.format(\n date)\n \n if not os.path.exists(date_outpath):\n os.makedirs(date_outpath)\n \n map_data = make_proj_grids(proj_dict,grid_dict)\n lons = map_data[\"lon\"]\n lats = map_data[\"lat\"]\n \n filtered_forecast = gaussian_filter(forecast[d],smoothing,mode='constant')\n \n filename = date_outpath + \"{0}_{6}_Hail_{1}_Cali_NMEP_{2}mm_{3}_Hours_{4}-{5}.nc\".format(\n config.ensemble_name,\n target_dataset,\n size,\n date,\n start_hour,end_hour,config.forecast_model_names)\n\n \n out_file = Dataset(filename, \"w\")\n out_file.createDimension(\"x\", filtered_forecast.shape[0])\n out_file.createDimension(\"y\", filtered_forecast.shape[1])\n out_file.createVariable(\"Longitude\", \"f4\", (\"x\", \"y\"))\n out_file.createVariable(\"Latitude\", \"f4\",(\"x\", \"y\"))\n out_file.createVariable(\"Data\", \"f4\", (\"x\", \"y\"))\n out_file.variables[\"Longitude\"][:,:] = lons\n out_file.variables[\"Latitude\"][:,:] = lats\n out_file.variables[\"Data\"][:,:] = filtered_forecast\n out_file.projection = proj_dict[\"proj\"]\n out_file.lon_0 = proj_dict[\"lon_0\"]\n out_file.lat_0 = proj_dict[\"lat_0\"]\n out_file.lat_1 = proj_dict[\"lat_1\"]\n out_file.lat_2 = proj_dict[\"lat_2\"]\n out_file.close()\n \n print(\"Writing to \" + filename)\n return", "def write_filters(self, session):\n\n w = self._dual.get_op('w')\n weights_values = session.run(w)\n weights_transpose = np.transpose(weights_values)\n\n filter_height = self._input_shape_visualisation[1]\n filter_width = self._input_shape_visualisation[2]\n np_write_filters(weights_transpose, [filter_height, filter_width])", "def filter(self, data):\n self.data = pysap.Image(data=self.flt.filter(data))", "def run_filter_data(\n data: AnnData,\n output_filt: str = None,\n plot_filt: str = None,\n plot_filt_figsize: Tuple[int, int] = None,\n mito_prefix: str = \"MT-\",\n min_genes: int = 500,\n max_genes: int = 6000,\n min_umis: int = 100,\n max_umis: int = 600000,\n percent_mito: float = 10.0,\n percent_cells: float = 0.05,\n) -> None:\n\n start = time.time()\n\n qc_metrics(\n data,\n mito_prefix,\n min_genes,\n max_genes,\n min_umis,\n max_umis,\n percent_mito,\n percent_cells,\n )\n\n if output_filt is not None:\n writer = pd.ExcelWriter(output_filt + \".filt.xlsx\", engine=\"xlsxwriter\")\n df_cells, df_genes = get_filter_stats(data)\n df_cells.to_excel(writer, sheet_name=\"Cell filtration stats\")\n df_genes.to_excel(writer, sheet_name=\"Gene filtration stats\")\n writer.save()\n logger.info(\"Filtration results are written.\")\n\n if plot_filt is not None:\n generate_filter_plots(data, plot_filt, plot_filt_figsize)\n\n filter_data(data)\n\n end = time.time()\n logger.info(\"filter_data is finished. Time spent = {:.2f}s.\".format(end - start))", "def test_grdfilter_fails():\n with pytest.raises(GMTInvalidInput):\n grdfilter(np.arange(10).reshape((5, 2)))", "def filter_data(data: AnnData) -> None:\n\n assert \"passed_qc\" in data.obs\n data._inplace_subset_obs(data.obs[\"passed_qc\"].values)\n data._inplace_subset_var((data.var[\"n_cells\"] > 0).values)\n logger.info(\n \"After filteration, {nc} cells and {ng} genes are kept. Among {ng} genes, {nrb} genes are robust.\".format(\n nc=data.shape[0], ng=data.shape[1], nrb=data.var[\"robust\"].sum()\n )\n )", "def filter_non_traffic_charges_grid(self, column_name, filter_item_text):\n self.grid_filter_with_textbox(self.non_traffic_charges_grid_div_id, column_name, filter_item_text)", "def export_grid(self, vtk_fname='GRID', toVTK=True, toNumpy=True):\r\n print('Exporting grids')\r\n tID = 0\r\n # Start by exporting input properties (from read_prop() or read_ext_prop())\r\n # In VTK files, these props will only be visible at only the first timestep\r\n dp = []\r\n propIds = []\r\n for prop in self.out_props:\r\n if type(self.out_props[prop]) is not dict:\r\n data = np.array(self.out_props[prop])\r\n # Save to Numpy\r\n if toNumpy:\r\n self.export_prop(data, prop, tID)\r\n # Add property data to vts structured grid\r\n if toVTK:\r\n propIds = self._prep_vtk(data, prop, propIds)\r\n self._check_out('vtk')\r\n else:\r\n dp.append(prop)\r\n\r\n # Export time-series output properties (from read_out_props())\r\n for t in self.times:\r\n for prop in self.out_props:\r\n if prop in dp:\r\n data = np.array(self.out_props[prop][t], order='F')\r\n # Save to Numpy\r\n if toNumpy:\r\n # self.export_prop(data, prop, tID)\r\n self.export_prop(data, prop, t)\r\n # Add property data to vts structured grid\r\n if toVTK:\r\n propIds = self._prep_vtk(data, prop, propIds)\r\n # Save to VTK\r\n if toVTK:\r\n if tID == 0:\r\n self._check_out('vtk')\r\n # self.exportVTK(os.path.join(self.out_dir, 'vtk', vtk_fname + str(tID)))\r\n self.exportVTK(os.path.join(self.out_dir, 'vtk', vtk_fname + str(t)))\r\n for id in propIds:\r\n self.Grid.GetCellData().RemoveArray(id)\r\n tID += 1\r\n propIds = []", "def load_filter():\n if not os.path.isfile(FILTER):\n print('no filter found, creating square grid')\n return []\n with open(FILTER, 'r') as ff:\n reader = csv.reader(ff)\n l = list(reader)\n ar = numpy.asarray(l)\n # ar = numpy.transpose(ar, (0, 1))\n # ar = numpy.flip(ar, 1)\n # ar = numpy.rot90(ar, k=3, axes=(0, 1))\n # ar = numpy.swapaxes(ar, 0, 1)\n f = list(map(list, ar))\n return f", "def preprocess(self):\n filtered_data = pd.read_csv(self.input)\n\n if self.config.getboolean(\"filterMissingsInGenes\"):\n # first filter out the genes that have more missings than threshold\n filtered_data = self.filterMissings(self.config[\"threshold\"], filtered_data)\n if self.config.getboolean(\"filterMissingsInSamples\"):\n # second transpose matrix and filter out samples that have more missings than threshold\n filtered_samples = self.filterMissings(self.config[\"threshold\"], filtered_data.T)\n filtered_data = filtered_samples.T\n\n # transpose back into original orientation and save\n filePrefix = self.input.split(\"/\")[-1].split(\".\")[\n 0] # split path by / to receive filename, split filename by . to receive filename without ending\n filename = self.output + filePrefix + \"_filtered.csv\"\n filtered_data.to_csv(filename, index=False)\n return filename", "def filter_quality(grid, qmin=0.0, qmax=float(\"inf\"), array=\"Quality\"):\n threshold = vtk.vtkThreshold()\n threshold.SetInputData(grid)\n threshold.ThresholdBetween(qmin, qmax)\n threshold.SetInputArrayToProcess(0, 0, 0, vtk.vtkDataObject.FIELD_ASSOCIATION_CELLS, array)\n threshold.Update()\n return threshold.GetOutput()", "def on_export_button(self, event):\n wildcard = \"Filtered _iso_res_filt.csv file (*_iso_res_filt.csv)|*_iso_res_filt.csv|\"\\\n \"All files (*.*)|*.*|\"\n defFile = self.datafile[:-4]+'_filt.csv'\n dlg = wx.FileDialog(\n self, message=\"Save file as ...\", \n defaultDir=self.currentDirectory, \n defaultFile=defFile, wildcard=wildcard, style=wx.SAVE\n )\n if dlg.ShowModal() == wx.ID_OK:\n path = dlg.GetPath()\n self.recalcAll()\n self.redrawAll()\n self.dataFrame['priorFilter'] = self.dataFrame['allFPass']\n self.dataFrame.to_csv(path, index=False)\n summaryCSVPath = path.split('.')[0] + '_median_[' + ''.join(self.calcNum) + ']_[' + ''.join(self.calcDen) + '].csv'\n self.writeSummaryCSV(summaryCSVPath)\n \n dlg.Destroy()", "def get_array_grid(self):\n print('Making array grid')\n grid_list = []\n or_list = [0, 0, 0]\n far_list = [0, 0, 0]\n\n for root, subdirs, files in os.walk(self.stem):\n for filename in files:\n if self.probe in filename and self.prot_name in filename and 'ccp4' in filename:\n if ('frequency' not in filename) and ('ranges' not in filename):\n grid_list.append(join(self.stem, filename))\n g = Grid.from_file(join(self.stem, filename))\n _or_list = [g.bounding_box[0][j] for j in range(3)]\n _far_list = [g.bounding_box[1][m] for m in range(3)]\n\n for i in range(3):\n or_list[i] = min(or_list[i], _or_list[i])\n far_list[i] = max(far_list[i], _far_list[i])\n\n self.grid_list = grid_list\n self.spacing = g.spacing\n self.tup_max_length = len(grid_list)\n self.array_grid_origin = (or_list[0], or_list[1], or_list[2])\n self.array_grid_far_corner = (far_list[0], far_list[1], far_list[2])", "def save_data(self) -> None:\n # Construct a grid in physical space\n rvals = np.logspace(start=-3,\n stop=2.5,\n num=21,\n endpoint=True)\n # Compute C, D, K1 and F on that grid\n Cvals = np.array([self.compute_C(r, Suppression.RAW) for r in rvals])\n Dvals = np.array([self.compute_D(r, Suppression.RAW) for r in rvals])\n K1vals = np.array([self.compute_K1(r, Suppression.RAW) for r in rvals])\n Fvals = np.array([self.compute_F(r, Suppression.RAW) for r in rvals])\n # Save them to file\n df = pd.DataFrame([rvals, Cvals[:, 0], Dvals[:, 0], K1vals[:, 0], Fvals[:, 0],\n Cvals[:, 1], Dvals[:, 1], K1vals[:, 1], Fvals[:, 1]]).transpose()\n df.columns = ['r', 'C(r)', 'D(r)', 'K1(r)', 'F(r)', 'dC(r)', 'dD(r)', 'dK1(r)', 'dF(r)']\n df.to_csv(self.file_path(self.filename + '.csv'), index=False)", "def filter_resolution_grid(self, column_name, filter_item_text):\n self.grid_filter_with_textbox(self.resolution_grid_div_id, column_name, filter_item_text)", "def write_grid(self):\n \n self.fout = self.create_savename()\n ncout = Dataset(self.fout, 'w')\n print('Writing: %s' % self.fout)\n \n # Create dimensions\n lon = ncout.createDimension(self.xvar, self.nx)\n lat = ncout.createDimension(self.yvar, self.ny)\n depth = ncout.createDimension(self.zvar, self.nz)\n tdim = ncout.createDimension('time', None)\n bndsDim = ncout.createDimension('bnds', 2)\n\n # Create variables\n varx = ncout.createVariable(self.xvar, 'float64', (self.xvar,))\n vary = ncout.createVariable(self.yvar, 'float64', (self.yvar,))\n varz = ncout.createVariable(self.zvar, 'float64', (self.zvar,))\n\n varx.standard_name = 'longitude'\n varx.units = 'degrees'\n ncout.variables['LONGITUDE'].bounds = 'lon_bnds'\n lonBndsVar = ncout.createVariable('lon_bnds', 'float64', (self.xvar, 'bnds'))\n xboundaries = np.concatenate([self.xminbounds, np.reshape(self.xmaxbounds[-1],(1,1))[0]])\n lonBndsVar[:,:] = np.array([xboundaries[:-1], xboundaries[1:]]).T\n\n vary.standard_name = 'latitude'\n vary.units = 'degrees'\n ncout.variables['LATITUDE'].bounds = 'lat_bnds'\n latBndsVar = ncout.createVariable('lat_bnds', 'float64', (self.yvar, 'bnds'))\n yboundaries = np.concatenate([self.yminbounds, np.reshape(self.ymaxbounds[-1],(1,1))[0]])\n latBndsVar[:,:] = np.array([yboundaries[:-1], yboundaries[1:]]).T\n \n varz.standard_name = 'depth'\n varz.units = 'metres'\n ncout.variables['DEPH_CORRECTED'].bounds = 'depth_bnds'\n depthBndsVar = ncout.createVariable('depth_bnds', 'float64', (self.zvar, 'bnds'))\n zboundaries = np.concatenate([self.zminbounds, np.reshape(self.zmaxbounds[-1],(1,1))[0]])\n depthBndsVar[:,:] = np.array([zboundaries[:-1], zboundaries[1:]]).T\n\n vartmean = ncout.createVariable('tmean', 'float32', ('time',self.zvar,self.yvar,self.xvar))\n varmtmean = ncout.createVariable(self.datavar, 'float32', ('time',self.zvar,self.yvar,self.xvar))\n varsum = ncout.createVariable('sum', 'float32', ('time',self.zvar,self.yvar,self.xvar))\n varmsum = ncout.createVariable('meansum', 'float32', ('time',self.zvar,self.yvar,self.xvar))\n varcount = ncout.createVariable('count', 'float32', ('time',self.zvar,self.yvar,self.xvar))\n# varmax = ncout.createVariable('gmax', 'float32', ('time', self.zvar, self.yvar, self.xvar))\n# varmin = ncout.createVariable('gmin', 'float32', ('time', self.zvar, self.yvar, self.xvar))\n# varmed = ncout.createVariable('median', 'float32', ('time', self.zvar, self.yvar, self.xvar))\n varpcount = ncout.createVariable('pcount', 'float32', ('time', self.zvar, self.yvar, self.xvar))\n vartime = ncout.createVariable('time', 'float64', ('time',))\n vartime.units = 'hours since 0001-01-01 00:00:00'\n vartime.calendar = 'gregorian'\n\n # Write to variables\n varx[:] = self.xgrid\n vary[:] = self.ygrid\n varz[:] = self.zgrid\n vartmean[:] = self.grid_tmean[np.newaxis]\n varmtmean[:] = self.grid_meantmean[np.newaxis]\n varsum[:] = self.grid_sum[np.newaxis]\n varmsum[:] = self.grid_meansum[np.newaxis]\n varcount[:] = self.grid_count[np.newaxis]\n varpcount[:] = self.grid_pcount[np.newaxis]\n# varmax[:] = self.grid_max[np.newaxis]\n# varmin[:] = self.grid_min[np.newaxis]\n# varmed[:] = self.grid_med[np.newaxis]\n vartime[:] = date2num(self.dt, units=vartime.units, calendar=vartime.calendar)\n \n # Add global attributes\n ncout.history = 'Created ' + time.ctime(time.time())\n \n # Save\n ncout.close()", "def export_array(in_array, output_path):\n global proj, geotrans, row, col\n proj = band.GetProjection()\n geotrans = band.GetGeoTransform()\n row = band.RasterYSize\n col = band.RasterXSize\n driver = gdal.GetDriverByName(\"GTiff\")\n outdata = driver.Create(output_path, col, row, 1)\n outband = outdata.GetRasterBand(1)\n outband.SetNoDataValue(-9999)\n outband.WriteArray(in_array)\n # Georeference the image\n outdata.SetGeoTransform(geotrans)\n # Write projection information\n outdata.SetProjection(proj)\n outdata.FlushCache()\n outdata = None", "def filterIEDBFile(filename, field, search):\n X = pd.read_csv(filename)\n cols = ['PubMed ID','Author','Journal','Year','T Cell ID','MHC Allele Name',\n 'Epitope Linear Sequence','Epitope Source Organism Name']\n y = X[X[field].str.contains(search)]\n print y[cols]\n y.to_csv('filtered.csv',cols=cols)\n return y", "def write_grid2d(grid_file, grid2d):\n with grid_file.open('w') as f:\n for row in grid2d['label']:\n f.write('\\t'.join(row) + '\\n')", "def filter_charges_grid(self, column_name, filter_item_text):\n self.grid_filter_with_textbox(self.charges_grid_div_id, column_name, filter_item_text, self.adjustment_folders_column_position)", "def _gap_filter(self):\n res = self.cfg.resolution\n xedges = np.linspace(self.lrx[0]-res/2., self.lrx[-1]+res/2.0, len(self.lrx)+1)\n yedges = np.linspace(self.lry[0]-res/2., self.lry[-1]+res/2.0, len(self.lry)+1)\n\n # Calculates point density of als shots per DEM grid cell\n self.rzhist, xe, ye = np.histogram2d(self.x[self.nonan].flatten(),\n self.y[self.nonan].flatten(),\n bins=[xedges, yedges])\n self.rzhist = self.rzhist.transpose()\n data_mask = self.rzhist > 0.0\n\n filter_algorithm = self.cfg.gap_filter[\"algorithm\"]\n if filter_algorithm == \"maximum_filter\":\n data_mask = maximum_filter(data_mask, **self.cfg.gap_filter[\"keyw\"])\n else:\n raise NotImplementedError(\"Filter algorithm: %s\" % filter_algorithm)\n\n self.dem_mask = ~data_mask", "def sph2grd(data, **kwargs):\n with GMTTempFile(suffix=\".nc\") as tmpfile:\n with Session() as lib:\n file_context = lib.virtualfile_from_data(check_kind=\"vector\", data=data)\n with file_context as infile:\n if (outgrid := kwargs.get(\"G\")) is None:\n kwargs[\"G\"] = outgrid = tmpfile.name # output to tmpfile\n lib.call_module(\n module=\"sph2grd\", args=build_arg_string(kwargs, infile=infile)\n )\n\n return load_dataarray(outgrid) if outgrid == tmpfile.name else None", "def filter_targets(file_name=\"database/master_targets.csv\"):\n tab = Table.read(file_name,format=\"csv\")\n tab = tab.filled(-99999.)\n new_data = []\n for i in tab:\n mag = i['MAGNITUDE'] >= 6. and i['MAGNITUDE'] <= 13.\n width = i['WIDTH1'] <= 0.05 and i['WIDTH2'] <= 0.05\n period = i['PERIOD'] >= 5. and i['PERIOD'] <= 100.\n depth = i['DEPTH1'] >= 0.1 and i['DEPTH2'] >= 0.05\n dec = int(i['DEC'][0:3]) < 30 \n if mag and width and period and depth and dec:\n new_data.append(list(i))\n print(\"Targets filtered from original {} to {}\".format(len(tab),len(new_data)))\n return new_data" ]
[ "0.74676794", "0.6965203", "0.6943156", "0.57479334", "0.5405306", "0.5393092", "0.53729516", "0.5346795", "0.5268873", "0.5247941", "0.5225041", "0.52224445", "0.52160496", "0.5199661", "0.51719517", "0.5155658", "0.5155277", "0.5070777", "0.50582415", "0.50448567", "0.50338966", "0.5019366", "0.5006376", "0.50021225", "0.4999007", "0.49921283", "0.498821", "0.49853054", "0.49849498", "0.4941107" ]
0.77258784
0
grdfilter an input grid file, and output as DataArray.
def test_grdfilter_file_in_dataarray_out(): outgrid = grdfilter( "@earth_relief_01d", region="0/180/0/90", filter="g600", distance="4" ) assert isinstance(outgrid, xr.DataArray) assert outgrid.gmt.registration == 1 # Pixel registration assert outgrid.gmt.gtype == 1 # Geographic type # check information of the output DataArray # the '@earth_relief_01d' is in pixel registration, so the grid range is # not exactly 0/180/0/90 assert outgrid.coords["lat"].data.min() == 0.5 assert outgrid.coords["lat"].data.max() == 89.5 assert outgrid.coords["lon"].data.min() == 0.5 assert outgrid.coords["lon"].data.max() == 179.5 npt.assert_almost_equal(outgrid.data.min(), -6147.4907, decimal=2) npt.assert_almost_equal(outgrid.data.max(), 5164.06, decimal=2) assert outgrid.sizes["lat"] == 90 assert outgrid.sizes["lon"] == 180
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_grdfilter_dataarray_in_file_out(grid):\n with GMTTempFile(suffix=\".nc\") as tmpfile:\n result = grdfilter(grid, outgrid=tmpfile.name, filter=\"g600\", distance=\"4\")\n assert result is None # grdfilter returns None if output to a file\n result = grdinfo(tmpfile.name, per_column=True)\n assert (\n result == \"-180 180 -90 90 -6147.49072266 5164.06005859 1 1 360 180 1 1\\n\"\n )", "def test_grdfilter_file_in_file_out():\n with GMTTempFile(suffix=\".nc\") as tmpfile:\n result = grdfilter(\n \"@earth_relief_01d\",\n outgrid=tmpfile.name,\n region=[0, 180, 0, 90],\n filter=\"g600\",\n distance=\"4\",\n )\n assert result is None # return value is None\n assert os.path.exists(path=tmpfile.name) # check that outgrid exists\n result = grdinfo(tmpfile.name, per_column=True)\n assert result == \"0 180 0 90 -6147.49072266 5164.06005859 1 1 180 90 1 1\\n\"", "def test_grdfilter_dataarray_in_dataarray_out(grid):\n result = grdfilter(grid=grid, filter=\"g600\", distance=\"4\")\n # check information of the output grid\n assert isinstance(result, xr.DataArray)\n assert result.coords[\"lat\"].data.min() == -89.5\n assert result.coords[\"lat\"].data.max() == 89.5\n assert result.coords[\"lon\"].data.min() == -179.5\n assert result.coords[\"lon\"].data.max() == 179.5\n npt.assert_almost_equal(result.data.min(), -6147.4907, decimal=2)\n npt.assert_almost_equal(result.data.max(), 5164.06, decimal=2)\n assert result.sizes[\"lat\"] == 180\n assert result.sizes[\"lon\"] == 360", "def ReadGrid(self, grdfile):\n nc = Dataset(grdfile,'r')\n \n self.xv = nc.variables['xv'][:]\n self.yv = nc.variables['yv'][:]\n self.xp = nc.variables['xp'][:]\n self.yp = nc.variables['yp'][:]\n self.xe = nc.variables['xe'][:]\n self.ye = nc.variables['ye'][:]\n self.dz = nc.variables['dz'][:] \n self.dv = nc.variables['dv'][:]\n self.Ac = nc.variables['Ac'][:]\n self.Nk = nc.variables['Nk'][:]\n self.face = nc.variables['face'][:]\n self.mark = nc.variables['mark'][:]\n\tself.cells = nc.variables['cells'][:]\n \n self.Nc = len(self.xv)\n self.Np = len(self.xp)\n self.Ne = len(self.xe)\n self.Nk = len(self.dz)\n self.numsides = self.face.shape[1]", "def load_filter():\n if not os.path.isfile(FILTER):\n print('no filter found, creating square grid')\n return []\n with open(FILTER, 'r') as ff:\n reader = csv.reader(ff)\n l = list(reader)\n ar = numpy.asarray(l)\n # ar = numpy.transpose(ar, (0, 1))\n # ar = numpy.flip(ar, 1)\n # ar = numpy.rot90(ar, k=3, axes=(0, 1))\n # ar = numpy.swapaxes(ar, 0, 1)\n f = list(map(list, ar))\n return f", "def read_unstructured_grid(filepath):\n reader =vtk.vtkXMLUnstructuredGridReader()\n reader.SetFileName(filepath)\n reader.Update()\n grid = reader.GetOutput()\n append_filter = vtk.vtkAppendFilter()\n append_filter.AddInputData(grid)\n append_filter.Update()\n grid = append_filter.GetOutput()\n\n return grid", "def read_region_mask(grid='Nh50km'):\n\n mask_path = ('/oldhome/apbarret/data/seaice_indices/'\n 'Arctic_region_mask_Meier_AnnGlaciol2007_Nh50km.dat')\n nrow = 360\n ncol = 360\n \n result = xr.DataArray(np.fromfile(mask_path, dtype=float).reshape(nrow,ncol),\n dims=['x','y'])\n return result", "def get_2Darray_hdf5(file,cols='Null',nrows='Null',verbose=False):\n if verbose:\n print (\"reading data from hdf5 file {} for filters:\".format(file))\n for col in cols:\n print(col)\n df = pd.read_hdf(file,\"df\")\n smalldf = df.loc[:,cols]\n outarray = smalldf.values #if we switch to pandas 0.24 or higher\n #this could be replaced with smalldf.to_numpy()\n return outarray", "def get_grid_data(grid):\n indir = '/home/ivan/Tools/scrip/mapping/grids'\n infile = os.path.join(indir, grid + '.nc')\n fp = Nio.open_file(infile,'r')\n nlon, nlat = fp.variables['grid_dims'][:]\n tlat = fp.variables['grid_center_lat'][:]\n tlon = fp.variables['grid_center_lon'][:]\n fp.close()\n tlat = N.reshape(tlat,(nlat,nlon))[:,0]\n tlon = N.reshape(tlon,(nlat,nlon))[0,:]\n return nlon, nlat, tlon, tlat", "def load_grd(filename):\n with open(filename, 'r') as f:\n meta = {}\n meta['header'] = []\n meta['header'].append(f.readline().rstrip('\\n'))\n while meta['header'][-1] != '++++':\n meta['header'].append(f.readline().rstrip('\\n'))\n # These determine the type of grid and the field format.\n meta['KTYPE'] = int(f.readline().split()[0])\n if meta['KTYPE'] != 1:\n raise ValueError(\"Not implemented.\")\n meta['NSET'], meta['ICOMP'], meta['NCOMP'], meta['IGRID'] = [int(s) for s in f.readline().split()]\n # The grid center in units of the x and y grid spacing.\n meta['IX'], meta['IY'] = [int(s) for s in f.readline().split()]\n # These are the x and y grid limits: S is lower, and E is upper.\n meta['XS'], meta['YS'], meta['XE'], meta['YE'] = [float(s) for s in f.readline().split()]\n # These are the numbers of grid points in x and y.\n meta['NX'], meta['NY'], meta['KLIMIT'] = [int(s) for s in f.readline().split()]\n # Implement this to read elliptically truncated grids.\n if meta['KLIMIT'] != 0:\n raise ValueError(\"Not implemented.\")\n # Load the field data. This returns an array with shape (NX * NY, 2 * NCOMP).\n conv = dict([(column, string_to_float) for column in range(2 * meta['NCOMP'])])\n data = np.loadtxt(f, dtype=float, converters=conv)\n # Determine the grid spacing and center values.\n meta['DX'] = (meta['XE'] - meta['XS']) / (meta['NX'] - 1)\n meta['DY'] = (meta['YE'] - meta['YS']) / (meta['NY'] - 1)\n meta['XCEN'] = meta['DX'] * meta['IX']\n meta['YCEN'] = meta['DY'] * meta['IY']\n # Reshape the data.\n map = np.empty((meta['NX'], meta['NY'], meta['NCOMP']),\n dtype=np.complex)\n for component in range(meta['NCOMP']):\n column = data[:, 2 * component] + 1j * data[:, 2 * component + 1]\n map[:, :, component] = column.reshape(meta['NX'], meta['NY'], order='F')\n return meta, map", "def readData():\n fileName = sys.argv[1]\n inputArray = []\n with open(fileName) as csvFile:\n reader = csv.reader(csvFile)\n arraySlice = []\n for row in reader:\n arraySlice = (row[235:587])\n if arraySlice[0] != \"\":\n arraySlice = [float(i) for i in arraySlice]\n inputArray.append(arraySlice)\n csvFile.close()\n return inputArray", "def read_from_file(self,grd_fn):\n self.grd_fn = grd_fn\n self.fp = open(self.grd_fn,'rt')\n hdr = self.fp.readline().strip() #header &GRD_2008 or &LISTGRD\n\n if hdr == self.hdr_08:\n print( \"Will read 2008 format for grid\" )\n n_parms = 11\n elif hdr == self.hdr_old:\n print( \"Will read old UnTRIM grid format\" )\n n_parms = 10\n\n for i in range(n_parms): # ignore TNE and TNS in new format files\n l = self.fp.readline()\n lhs,rhs = l.split('=')\n val = rhs.strip().strip(',')\n varname = lhs.strip()\n print( \"%s=%s\"%(varname,val) )\n\n if varname=='NV':\n Nvertices = int(val)\n elif varname=='NE':\n Npolys = int(val)\n elif varname=='NS':\n Nsides = int(val)\n elif varname=='NBC':\n Nboundary_poly = int(val)\n elif varname=='NSI':\n Ninternal_sides = int(val)\n elif varname=='NSF':\n Nflow_sides = int(val)\n elif varname=='NBC':\n Nbc = int(val)\n elif varname=='ANGLE':\n self.angle = float(val)\n elif varname=='LOCATION':\n self.location = val\n elif varname=='NR': ## these are read, but not used\n Nred = int(val)\n elif varname=='TNE':\n TNE=int(val)\n elif varname=='TNS':\n TNS=int(val)\n # others: HLAND for older fmt.\n \n while 1:\n s = self.fp.readline().strip() # header: /\n if s == '/':\n break\n\n # We know the size of everything, and can ask UnstructuredGrid to allocate\n # arrays now, with the 'special' meaning that passing an integer means allocate\n # the array of that size, full of zeros.\n # this allocates\n # self.nodes, self.edges, self.cells\n self.from_simple_data(points = Nvertices,edges = Nsides, cells = Npolys)\n\n for v in range(Nvertices):\n Cv = self.fp.readline().split()\n if hdr == self.hdr_08:\n vertex_num = int(Cv.pop(0))\n if vertex_num != v+1:\n print( \"Mismatched vertex numbering: %d != %d\"%(vertex_num,v+1) )\n self.nodes['x'][v,0] = float(Cv[0])\n self.nodes['x'][v,1] = float(Cv[1])\n \n print( \"Npolys\",Npolys )\n self.cells['edges'] = self.UNKNOWN # initialize all\n self.cells['nodes'] = self.UNKNOWN\n \n for c in range(Npolys):\n l = self.fp.readline()\n Cp = l.split()\n if hdr == self.hdr_08:\n poly_num = int(Cp.pop(0))\n if poly_num-1 != c:\n print( \"Mismatched polygon id: %fd != %d\"%(poly_num,c+1) )\n \n numsides = int(Cp[0])\n\n self.cells['_center'][c,0] = float(Cp[1])\n self.cells['_center'][c,1] = float(Cp[2])\n\n if hdr == self.hdr_old:\n # vertex index is Cp[3,5,7,9]\n # the others, 4,6,8,10, are edges, right?\n # convert to 0 based indices here\n\n # This is probably wrong! I think it's actually reading the\n # sides\n self.cells['edges'][c,0] = int(Cp[4]) - 1\n self.cells['edges'][c,1] = int(Cp[6]) - 1 \n self.cells['edges'][c,2] = int(Cp[8]) - 1\n if numsides == 4:\n self.cells['edges'][c,3] = int(Cp[10]) - 1 \n else:\n self.cells['edges'][c,3]=self.UNDEFINED\n #HERE - need to copy that to self.cells['nodes']\n else:\n for ei in range(numsides):\n self.cells['nodes'][c,ei] = int(Cp[3+ei]) - 1\n self.cells['edges'][c,ei] = int(Cp[3+numsides+ei]) - 1\n self.cells['nodes'][c,numsides:]=self.UNDEFINED\n self.cells['edges'][c,numsides:]=self.UNDEFINED\n \n # choose some large, above-sea-level depth\n self.cells['depth_mean'] = -1000 # not sure this is doing anything...\n\n for e in range(Nsides):\n Cs = self.fp.readline().split()\n if hdr == self.hdr_08:\n # side num = int(Cs.pop(0))\n Cs.pop(0)\n elif hdr == self.hdr_old:\n # side depth?\n edge_depth = self.edges['depth_mean'][e] = float(Cs.pop(0))\n \n self.edges['nodes'][e,0] = int(Cs[0])-1 # vertex indices\n self.edges['nodes'][e,1] = int(Cs[1])-1\n \n self.edges['cells'][e,0] = int(Cs[2])-1 # cell neighbors\n self.edges['cells'][e,1] = int(Cs[3])-1\n\n if hdr == self.hdr_old:\n for nc in self.edges['cells'][e]:\n if nc >= 0 and edge_depth > self.cells['depth_mean'][nc]:\n self.cells['depth_mean'][nc] = edge_depth\n\n if hdr==self.hdr_old:\n # old format - have to infer cell nodes from edges\n self.make_cell_nodes_from_edge_nodes()\n\n # Try to make sense of the marks and red/black:\n self.cells['red'][:Nred] = True\n self.cells['mark'][:Nboundary_poly] = self.BOUNDARY\n self.edges['mark'][:Ninternal_sides] = 0\n self.edges['mark'][Ninternal_sides:Nflow_sides] = self.FLOW\n self.edges['mark'][Nflow_sides:] = self.LAND\n\n # Bathymetry:\n if hdr == self.hdr_08:\n # make a cheap tokenizer to read floats across lines\n # note that it's up to the user to know that all values from\n # the line are read, and not to get the iterator until you're\n # ready for some values to be read\n def tokenizer():\n while True:\n for item in self.fp.readline().split():\n yield item\n for c in range(Npolys):\n check_c,nis = [int(s) for s in self.fp.readline().split()]\n if check_c != c+1:\n print(\"ERROR: while reading cell subgrid, cell index mismatch: %s vs. %d\"%(c+1,check_c))\n \n next_token = tokenizer().next\n areas = np.array( [float(next_token()) for sg in range(nis)] )\n depths = np.array( [float(next_token()) for sg in range(nis)] )\n \n self.cells['depth_mean'][c] = np.sum(areas*depths) / np.sum(areas)\n self.cells['_area'][c] = np.sum(areas)\n self.cells['depth_max'][c] = depths.max()\n self.cells['subgrid'][c] = (areas,depths)\n for e in range(Nflow_sides):\n l = self.fp.readline()\n # print \"%d/%d - Read line: %s\"%(e,self.Nsides,l)\n check_e,nis = [int(s) for s in l.split()]\n if check_e != e+1:\n print( \"ERROR: While reading edge subgrid, edge index mismatch: %s vs. %s\"%(e+1,check_e) )\n next_token = tokenizer().next\n lengths = np.array( [float(next_token()) for sg in range(nis)] )\n depths = np.array( [float(next_token()) for sg in range(nis)] )\n if sum(lengths)<=0:\n print( \"edge %d has bad lengths\"%e )\n self.edges['depth_mean'][e] = np.sum(lengths*depths) / sum(lengths)\n self.edges['depth_max'][e] = depths.max()\n self.edges['subgrid'][e] = (lengths,depths)\n # and land boundaries get zeros.\n for e in range(Nflow_sides,Nsides):\n self.edges['depth_mean'][e] = 0.0\n self.edges['depth_max'][e] = 0.0\n self.edges['subgrid'][e] = ([],[])", "def read_ice_grid(path):\n grid = xr.open_rasterio(path).squeeze()\n # Deproject coords\n proj = Proj('+proj=merc +lon_0=0 +k=1 +x_0=0 ' +\n '+y_0=-24 +datum=WGS84 +units=m +no_defs')\n lon, lat = proj(grid.coords['x'].values, grid.coords['y'].values,\n inverse=True)\n grid = grid.assign_coords(x=lon, y=lat)\n return grid", "def get_array_grid(self):\n print('Making array grid')\n grid_list = []\n or_list = [0, 0, 0]\n far_list = [0, 0, 0]\n\n for root, subdirs, files in os.walk(self.stem):\n for filename in files:\n if self.probe in filename and self.prot_name in filename and 'ccp4' in filename:\n if ('frequency' not in filename) and ('ranges' not in filename):\n grid_list.append(join(self.stem, filename))\n g = Grid.from_file(join(self.stem, filename))\n _or_list = [g.bounding_box[0][j] for j in range(3)]\n _far_list = [g.bounding_box[1][m] for m in range(3)]\n\n for i in range(3):\n or_list[i] = min(or_list[i], _or_list[i])\n far_list[i] = max(far_list[i], _far_list[i])\n\n self.grid_list = grid_list\n self.spacing = g.spacing\n self.tup_max_length = len(grid_list)\n self.array_grid_origin = (or_list[0], or_list[1], or_list[2])\n self.array_grid_far_corner = (far_list[0], far_list[1], far_list[2])", "def grid_results(infile, resolution = 0.01, clip_shp = None, \n overwrite=True, contour=False):\n outfile = infile.rstrip('().csv') + '_gridded.tif'\n # if not overwrite:\n if os.path.isfile(outfile):\n if not overwrite:\n print('Not creating file %s as already exists' % outfile)\n print('To re-create file (e.g if inputs changed) set overwrite=True)')\n return\n else:\n try:\n os.remove(outfile)\n os.remove((outfile.rstrip('.tif') + '_clip.tif'))\n except:\n pass\n data = np.genfromtxt(infile, delimiter=',')\n max_lon = max(data[:,0])\n min_lon = min(data[:,0])\n max_lat = max(data[:,1])\n min_lat = min(data[:,1])\n #print max_lon, min_lon, max_lat, min_lat\n xi = np.arange(min_lon, max_lon, resolution)\n yi = np.arange(min_lat, max_lat, resolution)\n XI,YI = np.meshgrid(xi,yi)\n xsize = len(xi)\n ysize = len(yi)\n\n print('Interpolating results')\n gridded_results = griddata((data[:,0],data[:,1]),data[:,2],(XI,YI),method='linear')\n #print gridded_results\n #outfile = infile.rstrip('().csv') + '_gridded.tif'\n print('Writing gridded data to %s' % outfile)\n driver = gdal.GetDriverByName('GTiff')\n ds = driver.Create(outfile, xsize, ysize, 1, gdal.GDT_Float32)\n srs = osr.SpatialReference()\n srs.ImportFromEPSG(4326)\n ds.SetProjection(srs.ExportToWkt())\n gt = [(min_lon - (resolution/2)), resolution, 0, \n (min_lat - (resolution/2)), 0, resolution]\n ds.SetGeoTransform(gt)\n outband=ds.GetRasterBand(1)\n outband.SetStatistics(np.min(gridded_results), np.max(gridded_results), np.average(gridded_results), np.std(gridded_results))\n outband.WriteArray(gridded_results)\n # Need to close output dataset before we can do clipping\n ds = None\n # now clip by shapefile\n if clip_shp is not None:\n clipfile = outfile.rstrip('.tif') + '_clip.tif'\n cmd = ['gdalwarp',\n '-cutline',\n clip_shp,\n '-crop_to_cutline',\n '-dstalpha',\n outfile,\n clipfile]\n print(cmd)\n call(cmd, shell=False)\n if contour is True:\n cmd = 'gdal_contour -i 1 -off 0.5 %s %s.shp' % (outfile, outfile.rstrip('.tif'))\n print(cmd)\n call(cmd, shell=True)\n cmd = 'gdal_contour -i 1 -off 0.5 %s %s.shp' % (clipfile, clipfile.rstrip('.tif'))\n print(cmd)\n call(cmd, shell=True)", "def test_grdfilter_fails():\n with pytest.raises(GMTInvalidInput):\n grdfilter(np.arange(10).reshape((5, 2)))", "def read_grid(filename_grid, dim=2, slc=None):\n ## get shape and slice\n fid = h5py.File(filename_grid, 'r')\n if dim==2:\n varnames = ['x', 'y', 'ep']\n if slc is None: slc = np.s_[0,:,:]\n if dim==3:\n varnames = ['x', 'y', 'z', 'ep']\n if slc is None: slc = np.s_[:,:,:]\n\n dset = fid.get(varnames[0])\n shape = dset[slc].shape\n (nx,ny,nz) = dset.shape\n ## read variables\n grid = {}\n for varname in varnames:\n try:\n dset = fid.get(varname)\n grid[varname] = np.zeros(shape)\n dset.read_direct(grid[varname], source_sel=slc)\n grid[varname] = grid[varname].transpose()\n except:\n pass\n fid.close()\n return grid, nx, ny, nz", "def import_grid(file_name):\n\n return FileReader(file_name=file_name).grid", "def read_grid2d(grid_file):\n labels = []\n with grid_file.open('r') as f:\n for row in f.readlines():\n labels.append([x.strip() for x in row.split('\\t')])\n\n labels = array(labels)\n grid2d = make_grid(labels.shape[0], labels.shape[1])\n grid2d['label'] = labels\n return grid2d", "def read_grid(filename):\r\n with open(filename) as infile:\r\n lines = infile.read().splitlines()\r\n\r\n grid = [[int(bit) for bit in line.split()] for line in lines]\r\n return grid", "def read_grid(filename):\r\n with open(filename) as infile:\r\n lines = infile.read().splitlines()\r\n\r\n grid = [[int(bit) for bit in line.split()] for line in lines]\r\n return grid", "def to_cdo_grid(self, outfile):", "def load_field(self, filename,unmask=True,timeslice=None,fieldname=None,\n check_for_grid_info=False,grid_info=None,grid_type='HD',\n **grid_kwargs):\n\n print(\"Reading input from {0}\".format(filename))\n grid = gd.makeGrid(grid_type,**grid_kwargs)\n return np.loadtxt(filename,np.float64).reshape(grid.get_grid_dimensions())", "def read_raster (self, filename):\n raster = gdal.Open (filename)\n band = raster.GetRasterBand(1)\n x = band.ReadAsArray () \n nodata_val = band.GetNoDataValue () # get the missing data flag\n x [x == nodata_val] = np.nan # set missing data properly\n return (x)", "def sph2grd(data, **kwargs):\n with GMTTempFile(suffix=\".nc\") as tmpfile:\n with Session() as lib:\n file_context = lib.virtualfile_from_data(check_kind=\"vector\", data=data)\n with file_context as infile:\n if (outgrid := kwargs.get(\"G\")) is None:\n kwargs[\"G\"] = outgrid = tmpfile.name # output to tmpfile\n lib.call_module(\n module=\"sph2grd\", args=build_arg_string(kwargs, infile=infile)\n )\n\n return load_dataarray(outgrid) if outgrid == tmpfile.name else None", "def readArray(input):\n data = gdal.Open(input)\n band = data.GetRasterBand(1)\n \n return band.ReadAsArray()", "def get_nc_BGrid_GFDL(grdfile, name='GFDL_CM2.1_North_Pacific', area='regional', \\\n xrange=(60,175), yrange=(120, 190), ystart=235):\n\n nc = pyroms.io.Dataset(grdfile)\n\n lon_t = nc.variables['geolon_t'][:]\n lat_t = nc.variables['geolat_t'][:]\n lon_uv = nc.variables['geolon_c'][:]\n lat_uv = nc.variables['geolat_c'][:]\n\n h = nc.variables['ht'][:]\n\n f = nc.variables['coriolis_param'][:]\n\n kmt = nc.variables['kmt'][:]\n z_t = nc.variables['st_ocean'][:]\n z_t_edges = nc.variables['st_edges_ocean'][:]\n\n kmu = nc.variables['kmu'][:]\n z_uv = nc.variables['sw_ocean'][:]\n z_uv_edges = nc.variables['sw_edges_ocean'][:]\n\n # compute mask at t-point\n M_t, L_t = kmt.shape\n N_t = z_t.shape[0]\n mask_t = np.zeros((N_t, M_t, L_t))\n for j in range(M_t):\n for i in range(L_t):\n try:\n mask_t[0:int(kmt[j,i]), j,i] = 1\n except:\n mask_t[:, j,i] = 0\n\n # compute mask at uv-point\n M_uv, L_uv = kmu.shape\n N_uv = z_uv.shape[0]\n mask_uv = np.zeros((N_uv, M_uv, L_uv))\n for j in range(M_uv):\n for i in range(L_uv):\n try:\n mask_uv[0:int(kmu[j,i]), j,i] = 1\n except:\n mask_uv[:, j,i] = 0\n\n if area == 'npolar':\n #add two rows in the north and the south\n lon_t = lon_t[np.r_[0,0,:np.size(lon_t,0),-1,-1]]\n lon_t = lon_t[:,np.r_[0,:np.size(lon_t,1),-1]]\n lon_t[:,0] = lon_t[:,1] - (lon_t[:,2]-lon_t[:,1])\n lon_t[:,-1] = lon_t[:,-2] + (lon_t[:,-2]-lon_t[:,-3])\n lat_t = lat_t[np.r_[0,0,:np.size(lat_t,0),-1,-1]]\n lat_t = lat_t[:,np.r_[0,:np.size(lat_t,1),-1]]\n lat_t[0,:] = -85\n lat_t[1,:] = -80\n lat_t[-2,:] = 90\n lat_t[-1,:] = 91\n lon_uv = lon_uv[np.r_[0,0,:np.size(lon_uv,0),-1,-1]]\n lon_uv = lon_uv[:,np.r_[0,:np.size(lon_uv,1),-1]]\n lon_uv[:,0] = lon_uv[:,1] - (lon_uv[:,2]-lon_t[:,1])\n lon_uv[:,-1] = lon_uv[:,-2] + (lon_uv[:,-2]-lon_uv[:,-3])\n lat_uv = lat_uv[np.r_[0,0,:np.size(lat_uv,0),-1,-1]]\n lat_uv = lat_uv[:,np.r_[0,:np.size(lat_uv,1),-1]]\n lat_uv[0,:] = -85\n lat_uv[1,:] = -80\n lat_uv[-2,:] = 90\n lat_uv[-1,:] = 91\n mask_t = mask_t[:,np.r_[0,0,:np.size(mask_t,1),-1,-1],:]\n mask_t = mask_t[:,:,np.r_[0,:np.size(mask_t,2),-1]]\n mask_t[:,:,0] = mask_t[:,:,-2]\n mask_t[:,:,-1] = mask_t[:,:,1]\n mask_uv = mask_uv[:,np.r_[0,0,:np.size(mask_uv,1),-1,-1],:]\n mask_uv = mask_uv[:,:,np.r_[0,:np.size(mask_uv,2),-1]]\n mask_uv[:,:,0] = mask_uv[:,:,-2]\n mask_uv[:,:,-1] = mask_uv[:,:,1]\n h = h[np.r_[0,0,:np.size(h,0),-1,-1]]\n h = h[:,np.r_[0,:np.size(h,1),-1]]\n h[:,0] = h[:,-2]\n h[:,-1] = h[:,1]\n f = f[np.r_[0,0,:np.size(f,0),-1,-1]]\n f = f[:,np.r_[0,:np.size(f,1),-1]]\n f[:,0] = f[:,-2]\n f[:,-1] = f[:,1]\n m,l = h.shape\n xrange=(1,l-2)\n yrange=(ystart+2,m-2)\n\n if area == 'tripole':\n #add two rows in the north and the south\n fold1 = L_t//2\n lon_t = lon_t[np.r_[0,0,:np.size(lon_t,0),-1,-1]]\n lon_t[-2,:fold1] = lon_t[-3,L_t:fold1-1:-1]\n lon_t[-2,L_t:fold1-1:-1] = lon_t[-3,:fold1]\n lon_t[-1,:fold1] = lon_t[-4,L_t:fold1-1:-1]\n lon_t[-1,L_t:fold1-1:-1] = lon_t[-4,:fold1]\n\n lon_t = lon_t[:,np.r_[0,:np.size(lon_t,1),-1]]\n lon_t[:,0] = lon_t[:,1] - (lon_t[:,2]-lon_t[:,1])\n lon_t[:,-1] = lon_t[:,-2] + (lon_t[:,-2]-lon_t[:,-3])\n lat_t = lat_t[np.r_[0,0,:np.size(lat_t,0),-1,-1]]\n lat_t = lat_t[:,np.r_[0,:np.size(lat_t,1),-1]]\n lat_t[0,:] = -85\n lat_t[1,:] = -80\n lat_t[-2,:] = lat_t[-3,:]\n lat_t[-1,:] = lat_t[-4,:]\n lon_uv = lon_uv[np.r_[0,0,:np.size(lon_uv,0),-1,-1]]\n\n lon_uv[-2,:fold1] = lon_uv[-4,L_t:fold1-1:-1]\n lon_uv[-2,L_t:fold1-1:-1] = lon_uv[-4,:fold1]\n lon_uv[-1,:fold1] = lon_uv[-5,L_t:fold1-1:-1]\n lon_uv[-1,L_t:fold1-1:-1] = lon_uv[-5,:fold1]\n\n lon_uv = lon_uv[:,np.r_[0,:np.size(lon_uv,1),-1]]\n lon_uv[:,0] = lon_uv[:,1] - (lon_uv[:,2]-lon_t[:,1])\n lon_uv[:,-1] = lon_uv[:,-2] + (lon_uv[:,-2]-lon_uv[:,-3])\n lat_uv = lat_uv[np.r_[0,0,:np.size(lat_uv,0),-1,-1]]\n lat_uv = lat_uv[:,np.r_[0,:np.size(lat_uv,1),-1]]\n lat_uv[0,:] = -85\n lat_uv[1,:] = -80\n lat_uv[-2,:] = lat_uv[-3,:]\n lat_uv[-1,:] = lat_uv[-4,:]\n mask_t = mask_t[:,np.r_[0,0,:np.size(mask_t,1),-1,-1],:]\n mask_t = mask_t[:,:,np.r_[0,:np.size(mask_t,2),-1]]\n mask_t[:,:,0] = mask_t[:,:,-2]\n mask_t[:,:,-1] = mask_t[:,:,1]\n mask_uv = mask_uv[:,np.r_[0,0,:np.size(mask_uv,1),-1,-1],:]\n mask_uv = mask_uv[:,:,np.r_[0,:np.size(mask_uv,2),-1]]\n mask_uv[:,:,0] = mask_uv[:,:,-2]\n mask_uv[:,:,-1] = mask_uv[:,:,1]\n h = h[np.r_[0,0,:np.size(h,0),-1,-1]]\n h = h[:,np.r_[0,:np.size(h,1),-1]]\n h[:,0] = h[:,-2]\n h[:,-1] = h[:,1]\n f = f[np.r_[0,0,:np.size(f,0),-1,-1]]\n f = f[:,np.r_[0,:np.size(f,1),-1]]\n f[:,0] = f[:,-2]\n f[:,-1] = f[:,1]\n m,l = h.shape\n xrange=(1,l-2)\n yrange=(ystart+2,m-2)\n\n return BGrid_GFDL(lon_t, lat_t, lon_uv, lat_uv, \\\n mask_t, mask_uv, h, z_t, z_t_edges, \\\n z_uv, z_uv_edges, f, \\\n name, xrange=xrange, yrange=yrange)", "def get_data():\n return np.genfromtxt(FILENAME, delimiter=',', skip_header=1)", "def get_grid_data_popdiag(grid):\n# indir = '/CESM/bgcwg/obgc_diag/mapping/grids'\n indir = '/glade/p/cesm/bgcwg/obgc_diag/mapping/grids'\n infile = os.path.join(indir, grid + '.nc')\n fp = Nio.open_file(infile,'r')\n nlon, nlat = fp.variables['grid_dims'][:]\n tlat = fp.variables['grid_center_lat'][:]\n tlon = fp.variables['grid_center_lon'][:]\n fp.close()\n tlat = N.reshape(tlat,(nlat,nlon))[:,0]\n tlon = N.reshape(tlon,(nlat,nlon))[0,:]\n return nlon, nlat, tlon, tlat", "def filter_targets(file_name=\"database/master_targets.csv\"):\n tab = Table.read(file_name,format=\"csv\")\n tab = tab.filled(-99999.)\n new_data = []\n for i in tab:\n mag = i['MAGNITUDE'] >= 6. and i['MAGNITUDE'] <= 13.\n width = i['WIDTH1'] <= 0.05 and i['WIDTH2'] <= 0.05\n period = i['PERIOD'] >= 5. and i['PERIOD'] <= 100.\n depth = i['DEPTH1'] >= 0.1 and i['DEPTH2'] >= 0.05\n dec = int(i['DEC'][0:3]) < 30 \n if mag and width and period and depth and dec:\n new_data.append(list(i))\n print(\"Targets filtered from original {} to {}\".format(len(tab),len(new_data)))\n return new_data" ]
[ "0.82479215", "0.73520637", "0.7201283", "0.63456845", "0.63165814", "0.585526", "0.576018", "0.57303125", "0.56888694", "0.5629014", "0.5605087", "0.55669653", "0.5533368", "0.5529867", "0.55254185", "0.5519094", "0.54980975", "0.5455956", "0.5448404", "0.54415244", "0.54415244", "0.5399251", "0.53923297", "0.53886586", "0.5382563", "0.53672916", "0.535062", "0.53469735", "0.5327741", "0.53146297" ]
0.76401544
1
grdfilter an input grid file, and output to a grid file.
def test_grdfilter_file_in_file_out(): with GMTTempFile(suffix=".nc") as tmpfile: result = grdfilter( "@earth_relief_01d", outgrid=tmpfile.name, region=[0, 180, 0, 90], filter="g600", distance="4", ) assert result is None # return value is None assert os.path.exists(path=tmpfile.name) # check that outgrid exists result = grdinfo(tmpfile.name, per_column=True) assert result == "0 180 0 90 -6147.49072266 5164.06005859 1 1 180 90 1 1\n"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_grdfilter_dataarray_in_file_out(grid):\n with GMTTempFile(suffix=\".nc\") as tmpfile:\n result = grdfilter(grid, outgrid=tmpfile.name, filter=\"g600\", distance=\"4\")\n assert result is None # grdfilter returns None if output to a file\n result = grdinfo(tmpfile.name, per_column=True)\n assert (\n result == \"-180 180 -90 90 -6147.49072266 5164.06005859 1 1 360 180 1 1\\n\"\n )", "def test_grdfilter_file_in_dataarray_out():\n outgrid = grdfilter(\n \"@earth_relief_01d\", region=\"0/180/0/90\", filter=\"g600\", distance=\"4\"\n )\n assert isinstance(outgrid, xr.DataArray)\n assert outgrid.gmt.registration == 1 # Pixel registration\n assert outgrid.gmt.gtype == 1 # Geographic type\n # check information of the output DataArray\n # the '@earth_relief_01d' is in pixel registration, so the grid range is\n # not exactly 0/180/0/90\n assert outgrid.coords[\"lat\"].data.min() == 0.5\n assert outgrid.coords[\"lat\"].data.max() == 89.5\n assert outgrid.coords[\"lon\"].data.min() == 0.5\n assert outgrid.coords[\"lon\"].data.max() == 179.5\n npt.assert_almost_equal(outgrid.data.min(), -6147.4907, decimal=2)\n npt.assert_almost_equal(outgrid.data.max(), 5164.06, decimal=2)\n assert outgrid.sizes[\"lat\"] == 90\n assert outgrid.sizes[\"lon\"] == 180", "def grid_results(infile, resolution = 0.01, clip_shp = None, \n overwrite=True, contour=False):\n outfile = infile.rstrip('().csv') + '_gridded.tif'\n # if not overwrite:\n if os.path.isfile(outfile):\n if not overwrite:\n print('Not creating file %s as already exists' % outfile)\n print('To re-create file (e.g if inputs changed) set overwrite=True)')\n return\n else:\n try:\n os.remove(outfile)\n os.remove((outfile.rstrip('.tif') + '_clip.tif'))\n except:\n pass\n data = np.genfromtxt(infile, delimiter=',')\n max_lon = max(data[:,0])\n min_lon = min(data[:,0])\n max_lat = max(data[:,1])\n min_lat = min(data[:,1])\n #print max_lon, min_lon, max_lat, min_lat\n xi = np.arange(min_lon, max_lon, resolution)\n yi = np.arange(min_lat, max_lat, resolution)\n XI,YI = np.meshgrid(xi,yi)\n xsize = len(xi)\n ysize = len(yi)\n\n print('Interpolating results')\n gridded_results = griddata((data[:,0],data[:,1]),data[:,2],(XI,YI),method='linear')\n #print gridded_results\n #outfile = infile.rstrip('().csv') + '_gridded.tif'\n print('Writing gridded data to %s' % outfile)\n driver = gdal.GetDriverByName('GTiff')\n ds = driver.Create(outfile, xsize, ysize, 1, gdal.GDT_Float32)\n srs = osr.SpatialReference()\n srs.ImportFromEPSG(4326)\n ds.SetProjection(srs.ExportToWkt())\n gt = [(min_lon - (resolution/2)), resolution, 0, \n (min_lat - (resolution/2)), 0, resolution]\n ds.SetGeoTransform(gt)\n outband=ds.GetRasterBand(1)\n outband.SetStatistics(np.min(gridded_results), np.max(gridded_results), np.average(gridded_results), np.std(gridded_results))\n outband.WriteArray(gridded_results)\n # Need to close output dataset before we can do clipping\n ds = None\n # now clip by shapefile\n if clip_shp is not None:\n clipfile = outfile.rstrip('.tif') + '_clip.tif'\n cmd = ['gdalwarp',\n '-cutline',\n clip_shp,\n '-crop_to_cutline',\n '-dstalpha',\n outfile,\n clipfile]\n print(cmd)\n call(cmd, shell=False)\n if contour is True:\n cmd = 'gdal_contour -i 1 -off 0.5 %s %s.shp' % (outfile, outfile.rstrip('.tif'))\n print(cmd)\n call(cmd, shell=True)\n cmd = 'gdal_contour -i 1 -off 0.5 %s %s.shp' % (clipfile, clipfile.rstrip('.tif'))\n print(cmd)\n call(cmd, shell=True)", "def test_grdfilter_dataarray_in_dataarray_out(grid):\n result = grdfilter(grid=grid, filter=\"g600\", distance=\"4\")\n # check information of the output grid\n assert isinstance(result, xr.DataArray)\n assert result.coords[\"lat\"].data.min() == -89.5\n assert result.coords[\"lat\"].data.max() == 89.5\n assert result.coords[\"lon\"].data.min() == -179.5\n assert result.coords[\"lon\"].data.max() == 179.5\n npt.assert_almost_equal(result.data.min(), -6147.4907, decimal=2)\n npt.assert_almost_equal(result.data.max(), 5164.06, decimal=2)\n assert result.sizes[\"lat\"] == 180\n assert result.sizes[\"lon\"] == 360", "def onGrid(self, event):\n dlg = wx.FileDialog(self, wildcard=\"*.csv\", style=wx.SAVE)\n if dlg.ShowModal() == wx.ID_OK:\n path = dlg.GetPath()\n self.model.exportToGrid(path)\n dlg.Destroy()", "def to_cdo_grid(self, outfile):", "def preprocess(self):\n filtered_data = pd.read_csv(self.input)\n\n if self.config.getboolean(\"filterMissingsInGenes\"):\n # first filter out the genes that have more missings than threshold\n filtered_data = self.filterMissings(self.config[\"threshold\"], filtered_data)\n if self.config.getboolean(\"filterMissingsInSamples\"):\n # second transpose matrix and filter out samples that have more missings than threshold\n filtered_samples = self.filterMissings(self.config[\"threshold\"], filtered_data.T)\n filtered_data = filtered_samples.T\n\n # transpose back into original orientation and save\n filePrefix = self.input.split(\"/\")[-1].split(\".\")[\n 0] # split path by / to receive filename, split filename by . to receive filename without ending\n filename = self.output + filePrefix + \"_filtered.csv\"\n filtered_data.to_csv(filename, index=False)\n return filename", "def ReadGrid(self, grdfile):\n nc = Dataset(grdfile,'r')\n \n self.xv = nc.variables['xv'][:]\n self.yv = nc.variables['yv'][:]\n self.xp = nc.variables['xp'][:]\n self.yp = nc.variables['yp'][:]\n self.xe = nc.variables['xe'][:]\n self.ye = nc.variables['ye'][:]\n self.dz = nc.variables['dz'][:] \n self.dv = nc.variables['dv'][:]\n self.Ac = nc.variables['Ac'][:]\n self.Nk = nc.variables['Nk'][:]\n self.face = nc.variables['face'][:]\n self.mark = nc.variables['mark'][:]\n\tself.cells = nc.variables['cells'][:]\n \n self.Nc = len(self.xv)\n self.Np = len(self.xp)\n self.Ne = len(self.xe)\n self.Nk = len(self.dz)\n self.numsides = self.face.shape[1]", "def main(input_file_path, layer='all', out=None, grid_id_name='GRIDMET_ID',\n buffer=25, scale_factor=0.1, function='invdist', smooth=0, params=None,\n grid_res=None, z_stats=True, res_plot=True, overwrite=False, \n options=None, grid_meta_path=None):\n # build fishnet for interpolation\n make_grid(input_file_path, \n grid_id_name=grid_id_name,\n grid_meta_path=grid_meta_path, \n buffer=buffer, \n overwrite=overwrite,\n grid_res=grid_res)\n \n # run spatial interpolation depending on options\n interpolate(\n input_file_path, \n layer=layer, \n out=out,\n scale_factor=scale_factor, \n function=function, \n smooth=smooth,\n params=params,\n buffer=buffer,\n z_stats=z_stats,\n res_plot=res_plot,\n grid_id_name=grid_id_name,\n grid_res=grid_res,\n options=options,\n grid_meta_path=grid_meta_path)", "def output_results(in_file, csv_path, grid, months, left_side):\n file_name = os.path.basename(in_file)\n\n base_name, _ = os.path.splitext(file_name)\n img_path = os.path.join('output', base_name + '_out.png')\n\n with open(csv_path, 'a', newline='') as csv_file:\n writer = csv.writer(csv_file)\n\n fig, ax = plt.subplots(figsize=(10, 15.45), frameon=False)\n ax.imshow(grid.image, cmap=plt.cm.gray)\n ax.axis('off')\n\n color_row_labels(left_side, ax)\n\n for month_idx, month in enumerate(months):\n color_col_labels(month, ax)\n color_grid_cells(month, month_idx, ax, base_name, writer)\n\n fig.savefig(img_path, dpi=300, bbox_inches='tight')", "def _ensure_grid_file(self, clobber, **kwargs):\n if os.path.exists(self.grid_file) and not clobber:\n print(f'exists: {self.grid_file}')\n return\n \n # generate file if needed\n if self.grid_name in ['POP_gx1v6', 'POP_gx1v7', 'POP_gx3v7',]:\n dso = pop_tools.get_grid(self.grid_name, scrip=True) \n \n elif 'latlon' in self.grid_name: \n dso = latlon_to_scrip(**kwargs) \n \n else:\n raise ValueError('unknown grid') \n\n print(f'writing: {self.grid_file}')\n dso.to_netcdf(self.grid_file)", "def load_filter():\n if not os.path.isfile(FILTER):\n print('no filter found, creating square grid')\n return []\n with open(FILTER, 'r') as ff:\n reader = csv.reader(ff)\n l = list(reader)\n ar = numpy.asarray(l)\n # ar = numpy.transpose(ar, (0, 1))\n # ar = numpy.flip(ar, 1)\n # ar = numpy.rot90(ar, k=3, axes=(0, 1))\n # ar = numpy.swapaxes(ar, 0, 1)\n f = list(map(list, ar))\n return f", "def read_unstructured_grid(filepath):\n reader =vtk.vtkXMLUnstructuredGridReader()\n reader.SetFileName(filepath)\n reader.Update()\n grid = reader.GetOutput()\n append_filter = vtk.vtkAppendFilter()\n append_filter.AddInputData(grid)\n append_filter.Update()\n grid = append_filter.GetOutput()\n\n return grid", "def repair_netcdf(fname):\n\n\t# ========== Set the path and the file name ==========\n\t# fname = \"%s_%s_%s_r1i1p1_%s_1950_2050_%s_regrid.nc\" %(var, model, sen, units, sen)\n\tfout = \"%s_setgrid\" % (fname)\n\n\t\n\t# ========== Create a list of files to cleanup ==========\n\tcleanup = []\n\n\t# ========== Check if the file exists ==========\n\tif not os.path.isfile(fname+\".nc\"):\n\t\t# check if the file exists with a different name\n\t\traise IOError(\"WARNING: The file %s cannot be found\"% fname)\n\n\t\n\t# ========== Read longitude from NC file ==========\n\tfh = Dataset(fname+\".nc\", mode='r')\n\ttry:\n\t\tlon = fh.variables['longitude'][:]\n\texcept:\n\t\ttry:\n\t\t\tlon = fh.variables['lon'][:]\n\t\texcept:\n\t\t\tlon = fh.variables['easting'][:] #easting\n\n\n\n\n\t# ========== Create a new grid ==========\n\t# Save the current grid\n\tsubp.call(\"cdo griddes %s.nc > %sGriddes\" % (fname, fname), shell=True)\n\t# add the griddes to the cleanup \n\tcleanup.append(\"%sGriddes\" % fname)\n\n\t# open the current grid\n\tgfile = open(\"%sGriddes\" % fname, \"r\") \n\t# Split the lines of the grid file\n\tginfo = gfile.read().splitlines()\n\t\n\t#Some models have no lat/lon bounds, skip in this case and copy\n\t#\"regrid\" file as \"setgrid\"\n\tif not (any([n.startswith(\"xbounds\") for n in ginfo]) and \n\t\t any([n.startswith(\"ybounds\") for n in ginfo])):\n\t\tsubp.call(\"cp %s.nc %s.nc\" % (fname, fout), shell=True)\n\t\tcleanup.append(\"%s.nc\" % fname)\n\t\treturn cleanup\t\n\t\n\t# Check and see if the start is known\n\tif (\n\t\tany([n.startswith(\"xfirst\") for n in ginfo])\n\t\t) and (\n\t\tany([n.startswith(\"xinc\") for n in ginfo])\n\t\t):\n\t\taddxdet = False\n\t\t# Set the lines to be removed\n\t\tbadel = [\"xvals\", \"yvals\", \" \", \"xbounds\", \"ybounds\"]\n\telse:\n\t\taddxdet = True\n\t\t# Set the lines to be removed\n\t\tbadel = [\"xvals\", \"yvals\", \" \", \"xbounds\", \"ybounds\", \"xfirst\", \"xinc\"]\n\n\t# Create list to hold the new grid details\n\tnew_grid = []\n\n\tfor ginf in ginfo:\n\t\ttest = []\n\t\tfor be in badel:\n\t\t\tif ginf.startswith(be):\n\t\t\t\ttest.append(False)\n\t\t\telif ginf == \"#\":\n\t\t\t\ttest.append(False)\n\t\t\telse:\n\t\t\t\ttest.append(True)\n\t\t\n\t\tif all(test):\n\t\t\tnew_grid.append(ginf)\n\t# Add the additional x variables\n\tif addxdet:\n\t\t# work out the model from the fname\n\t\tmodel = fname.split(\"/\")[-2]\n\t\tnew_grid.append('xfirst = -180')\n\t\tnew_grid.append('xinc = %s' % str(\n\t\t\tfloat(lon) ))\n\t\n\n\t# Check the y values, if they are missing use the ones in the original grid file\n\tif not (any([n.startswith(\"yfirst\") for n in ginfo])):\n\t\t# print (\"Seting the y bounds\")\n\t\tvals = []\n\t\tfor glov in range(0,len(ginfo)):\n\t\t\tif ginfo[glov].startswith(\"yvals\"):\n\t\t\t\tvals.append(glov)\n\t\t\telif ginfo[glov].startswith(\"ybounds\"):\n\t\t\t\tvals.append(glov)\n\t\tif len (vals) == 2:\n\t\t\tfor yv in ginfo[vals[0]:vals[1]]:\n\t\t\t\tnew_grid.append(yv)\n\n\t\telse:\n\t\t\tprint(\"\\n\")\n\t\t\traise IndexError(\"Bounding is incorrect\")\n\n\t# Save the grid out\n\tnewgrid = save_grid(fname, new_grid)\n\tcleanup.append(newgrid)\n\n\t# ========== Set the new grid file ==========\n\t# Save the current grid\n\tsubp.call(\"cdo setgrid,%sGridFix %s.nc %s.nc\" % (fname, fname, fout), shell=True)\n\t\n\tif not os.path.isfile(\"%s.nc\" % fout):\n\t\traise IOError(\"The output file was not created, going interactive\")\n\t\n\t# ========== return the files to be removed ==========\n\tcleanup.append(\"%s.nc\" % fname)\n\treturn cleanup", "def exportECL(self, fname):\r\n\r\n # TODO add consistency of dimensions across the inputs\r\n self.ne, self.nn, self.nz = np.array(self.Grid.GetDimensions()) - 1 # ECLIPSE\r\n filename, ext = os.path.splitext(fname)\r\n if self.GridType == \"vtkStructuredGrid\":\r\n with io.open(filename + \".GRDECL\", 'w', newline='\\r\\n') as f:\r\n f.write('-- Generated [\\n')\r\n f.write('-- Format : ECLIPSE keywords (grid geometry and properties) (ASCII)\\n')\r\n # f.write('-- Exported by : Petrel 2013.7 (64-bit) Schlumberger\\n'\r\n f.write('-- Exported by : ReGrid v.' + version + \"\\n\")\r\n f.write('-- User name : ' + getpass.getuser() + \"\\n\")\r\n f.write('-- Date : ' + datetime.now().strftime(\"%A, %B %d %Y %H:%M:%S\") + \"\\n\")\r\n f.write('-- Project : ' + \"ReGrid project\\n\")\r\n f.write('-- Grid : ' + \"Description\\n\")\r\n f.write('-- Generated ]\\n\\n')\r\n\r\n f.write('SPECGRID -- Generated : ReGrid\\n')\r\n f.write(' %i %i %i 1 F /\\n\\n' % (self.ne, self.nn, self.nz))\r\n f.write('COORDSYS -- Generated : ReGrid\\n')\r\n f.write(' 1 4 /\\n\\n') # what is this line?\r\n\r\n f.write('COORD -- Generated : ReGrid\\n')\r\n nz = self.nz\r\n fstr = str(\" \")\r\n\r\n for iy in range(self.nn):\r\n for ix in range(self.ne):\r\n p0 = self.Grid.GetCell(ix, iy, 0).GetPoints().GetPoint(0)\r\n fstr = self.printCOORDS(f, p0, fstr)\r\n p1 = self.Grid.GetCell(ix, iy, nz - 1).GetPoints().GetPoint(4)\r\n fstr = self.printCOORDS(f, p1, fstr)\r\n # outside edge on far x\r\n p2 = self.Grid.GetCell(ix, iy, 0).GetPoints().GetPoint(1)\r\n fstr = self.printCOORDS(f, p2, fstr)\r\n p3 = self.Grid.GetCell(ix, iy, nz - 1).GetPoints().GetPoint(5)\r\n fstr = self.printCOORDS(f, p3, fstr)\r\n # outside edge on far y\r\n for ix in range(self.ne):\r\n p8 = self.Grid.GetCell(ix, iy, 0).GetPoints().GetPoint(3)\r\n fstr = self.printCOORDS(f, p8, fstr)\r\n p9 = self.Grid.GetCell(ix, iy, nz - 1).GetPoints().GetPoint(7)\r\n fstr = self.printCOORDS(f, p9, fstr)\r\n # outside edge on far northeast\r\n p14 = self.Grid.GetCell(ix, iy, 0).GetPoints().GetPoint(2)\r\n fstr = self.printCOORDS(f, p14, fstr)\r\n p15 = self.Grid.GetCell(ix, iy, nz - 1).GetPoints().GetPoint(6)\r\n fstr = self.printCOORDS(f, p15, fstr)\r\n f.write(fstr)\r\n fstr = \" \"\r\n f.write(\" /\")\r\n f.write(\"\\n\")\r\n f.write(\"\\n\")\r\n\r\n f.write('ZCORN -- Generated : ReGrid\\n')\r\n for iz in range(self.nz):\r\n for iy in range(self.nn):\r\n # front face\r\n for ix in range(self.ne):\r\n p0 = self.Grid.GetCell(ix, iy, iz).GetPoints().GetPoint(0)\r\n p1 = self.Grid.GetCell(ix, iy, iz).GetPoints().GetPoint(1)\r\n fstr = self.printCOORDS(f, [p0[2]], fstr)\r\n fstr = self.printCOORDS(f, [p1[2]], fstr)\r\n # back face\r\n for ix in range(self.ne):\r\n p0 = self.Grid.GetCell(ix, iy, iz).GetPoints().GetPoint(3)\r\n p1 = self.Grid.GetCell(ix, iy, iz).GetPoints().GetPoint(2)\r\n fstr = self.printCOORDS(f, [p0[2]], fstr)\r\n fstr = self.printCOORDS(f, [p1[2]], fstr)\r\n # bottom layer\r\n for iy in range(self.nn):\r\n # front face\r\n for ix in range(self.ne):\r\n p0 = self.Grid.GetCell(ix, iy, iz).GetPoints().GetPoint(4)\r\n p1 = self.Grid.GetCell(ix, iy, iz).GetPoints().GetPoint(5)\r\n fstr = self.printCOORDS(f, [p0[2]], fstr)\r\n fstr = self.printCOORDS(f, [p1[2]], fstr)\r\n # back face\r\n for ix in range(self.ne):\r\n p0 = self.Grid.GetCell(ix, iy, iz).GetPoints().GetPoint(7)\r\n p1 = self.Grid.GetCell(ix, iy, iz).GetPoints().GetPoint(6)\r\n fstr = self.printCOORDS(f, [p0[2]], fstr)\r\n fstr = self.printCOORDS(f, [p1[2]], fstr)\r\n f.write(fstr)\r\n fstr = \" \"\r\n f.write(\" /\")\r\n f.write(\"\\n\")\r\n f.write(\"\\n\")\r\n f.write('ACTNUM -- Generated : ReGrid\\n')\r\n\r\n c = -999\r\n N = 0\r\n for iac in self.ActiveCells.flatten(order='F'):\r\n if iac == c:\r\n N += 1\r\n else:\r\n if c != -999:\r\n fstr = self.printAC(f, c, N, fstr)\r\n c = iac\r\n N = 1\r\n fstr = self.printAC(f, c, N, fstr)\r\n f.write(fstr)\r\n f.write(\" /\")\r\n f.write(\"\\n\")\r\n f.write(\"\\n\")\r\n else:\r\n print(\"Only structured grids can be converted to ECLIPSE files\")", "def smoothen_raster(in_path, out_path, radius=2):\n cmd = \"saga_cmd grid_filter 1 -INPUT {} -RESULT {} -KERNEL_TYPE 0 -KERNEL_RADIUS {}\".format(\n in_path, out_path, radius\n )\n os.system(cmd)", "def import_grid(file_name):\n\n return FileReader(file_name=file_name).grid", "def CC_2Dfilter(\n h5path_labels,\n map_propnames,\n criteria,\n h5path_int='',\n slicedim=0,\n usempi=False,\n outputfile='',\n protective=False,\n ):\n\n (min_area,\n max_area,\n max_intensity_mb,\n max_eccentricity,\n min_solidity,\n min_euler_number,\n min_extent) = criteria\n\n # prepare mpi\n mpi_info = utils.get_mpi_info(usempi)\n\n # TODO: check output path\n\n # open data for reading\n h5file_mm, ds_mm, _, _ = utils.h5_load(h5path_labels, comm=mpi_info['comm'])\n if h5path_int:\n h5file_mb, ds_mb, _, _ = utils.h5_load(h5path_int, comm=mpi_info['comm'])\n else:\n ds_mb = None\n # mask used as intensity image in mean_intensity criterium\n\n # get the maximum labelvalue in the input\n root = h5path_labels.split('.h5')[0]\n maxlabel = get_maxlabel(root, ds_mm)\n\n # prepare mpi\n n_slices = ds_mm.shape[slicedim]\n series = np.array(range(0, n_slices), dtype=int)\n if mpi_info['enabled']:\n series = utils.scatter_series(mpi_info, series)[0]\n if mpi_info['rank'] == 0:\n fws_reduced = np.zeros((maxlabel + 1, len(map_propnames)),\n dtype='float')\n else:\n fws_reduced = None\n\n fws = np.zeros((maxlabel + 1, len(map_propnames)),\n dtype='float')\n\n mapall = criteria.count(None) == len(criteria)\n\n # pick labels observing the constraints\n go2D = ((max_eccentricity is not None) or\n (min_solidity is not None) or\n (min_euler_number is not None) or\n mapall)\n if go2D:\n\n for i in series:\n slcMM = utils.get_slice(ds_mm, i, slicedim)\n if h5path_int:\n slcMB = utils.get_slice(ds_mb, i, slicedim) # , 'bool'\n else:\n slcMB = None\n fws = check_constraints(slcMM, fws, map_propnames,\n criteria, slcMB, mapall)\n if mpi_info['enabled']:\n mpi_info['comm'].Reduce(fws, fws_reduced, op=MPI.MAX, root=0)\n else:\n fws_reduced = fws\n\n else:\n\n if mpi_info['rank'] == 0:\n fws = check_constraints(ds_mm, fws, map_propnames,\n criteria, ds_mb, mapall)\n fws_reduced = fws\n\n # write the forward maps to a numpy vector\n if mpi_info['rank'] == 0:\n slc = int(n_slices/2)\n slcMM = ds_mm[slc, :, :]\n slcMB = ds_mb[slc, :, :] if h5path_int else None\n datatypes = get_prop_datatypes(slcMM, map_propnames, slcMB)\n for i, propname in enumerate(map_propnames):\n root = outputfile.split('.h5')[0]\n nppath = '{}_{}.npy'.format(root, propname)\n outarray = np.array(fws_reduced[:, i], dtype=datatypes[i])\n np.save(nppath, outarray)\n\n # close and return\n h5file_mm.close()\n if h5path_int:\n h5file_mb.close()\n\n if mpi_info['rank'] == 0:\n return outarray", "def export_grid(self, vtk_fname='GRID', toVTK=True, toNumpy=True):\r\n print('Exporting grids')\r\n tID = 0\r\n # Start by exporting input properties (from read_prop() or read_ext_prop())\r\n # In VTK files, these props will only be visible at only the first timestep\r\n dp = []\r\n propIds = []\r\n for prop in self.out_props:\r\n if type(self.out_props[prop]) is not dict:\r\n data = np.array(self.out_props[prop])\r\n # Save to Numpy\r\n if toNumpy:\r\n self.export_prop(data, prop, tID)\r\n # Add property data to vts structured grid\r\n if toVTK:\r\n propIds = self._prep_vtk(data, prop, propIds)\r\n self._check_out('vtk')\r\n else:\r\n dp.append(prop)\r\n\r\n # Export time-series output properties (from read_out_props())\r\n for t in self.times:\r\n for prop in self.out_props:\r\n if prop in dp:\r\n data = np.array(self.out_props[prop][t], order='F')\r\n # Save to Numpy\r\n if toNumpy:\r\n # self.export_prop(data, prop, tID)\r\n self.export_prop(data, prop, t)\r\n # Add property data to vts structured grid\r\n if toVTK:\r\n propIds = self._prep_vtk(data, prop, propIds)\r\n # Save to VTK\r\n if toVTK:\r\n if tID == 0:\r\n self._check_out('vtk')\r\n # self.exportVTK(os.path.join(self.out_dir, 'vtk', vtk_fname + str(tID)))\r\n self.exportVTK(os.path.join(self.out_dir, 'vtk', vtk_fname + str(t)))\r\n for id in propIds:\r\n self.Grid.GetCellData().RemoveArray(id)\r\n tID += 1\r\n propIds = []", "def filterBedFile(inputBed, outputBed, scoreFilterSingle, scoreFilterMultiple, newName=\"\"):\n\n out = open(outputBed, \"w\")\n\n count = 0\n for line in open(inputBed):\n count += 1\n if line.startswith(\"track\"):\n if count > 1:\n continue\n\n if newName != \"\":\n pieces = line.split()\n pieces.pop(1)\n pieces.insert(1, \"name='%s'\" % newName)\n pieces.pop(2)\n pieces.insert(2, \"description='%s'\" % newName) \n newTrack = \" \".join(pieces)\n out.write(newTrack)\n out.write(\"\\n\")\n continue\n\n pieces = line.split(\"\\t\")\n\n numCollapsed = 0\n if pieces[3].find(\"|junc=\") > 0:\n numCollapsed = int(pieces[3].split(\"junc=\")[-1])\n\n score = float(pieces[4])\n if (numCollapsed < 2) and (score <= scoreFilterSingle):\n continue\n elif score <= scoreFilterMultiple:\n continue\n\n out.write(\"\\t\".join(pieces))\n # if split on \"\\t\" then \"\\n\" still there. otherwise need this.\n #out.write(\"\\n\")", "def write_projections(self, filename, binning=1, lowpassFilter=None, verbose=False):\n from pytom.basic.files import read_em, write_em\n from pytom.basic.filter import filter as filterFunction\n import pytom_freqweight\n from pytom.basic.transformations import resize\n\n if binning:\n imdim = int(float(self._imdim) / float(binning) + .5)\n else:\n imdim = self._imdim\n\n # design lowpass filter\n if lowpassFilter:\n if lowpassFilter > 1.:\n lowpassFilter = 1.\n print(\"Warning: lowpassFilter > 1 - set to 1 (=Nyquist)\")\n # weighting filter: arguments: (angle, cutoff radius, dimx, dimy,\n lpf = pytom_freqweight.weight(0.0, lowpassFilter*imdim/2, imdim, imdim/2+1, 1, lowpassFilter/5.*imdim)\n\n for (ii, projection) in enumerate(self._ProjectionList):\n if projection._filename.split('.')[-1] == 'st':\n from pytom.basic.files import EMHeader, read\n header = EMHeader()\n header.set_dim(x=imdim, y=imdim, z=1)\n idx = projection._index\n if verbose:\n print(\"reading in projection %d\" % idx)\n image = read(file=projection._filename, subregion=[0, 0, idx - 1, self._imdim, self._imdim, 1],\n sampling=[0, 0, 0], binning=[0, 0, 0])\n else:\n # read projection files\n (image, header) = read_em(projection._filename)\n if not (binning == 1) or (binning == None):\n image = resize(volume=image, factor=1 / float(binning))[0]\n if lowpassFilter:\n filtered = filterFunction(volume=image, filterObject=lpf, fourierOnly=False)\n image = filtered[0]\n\n tiltAngle = projection._tiltAngle\n if verbose:\n print(\"tiltAngle=%2.2f\" % tiltAngle)\n header.set_tiltangle(tiltAngle)\n newFilename = (filename + \"_\" + str(projection.getIndex()) + '.em')\n write_em(filename=newFilename, data=image, header=header)", "def regrid(self, grid=None, method=\"bil\"):\n\n del_grid = None\n if grid is None:\n raise ValueError(\"No grid was supplied\")\n\n grid_type = None\n\n # find the grid type\n if isinstance(grid, pd.DataFrame):\n grid_type = \"df\"\n\n # If the grid is an xarray object, we need to convert it to .nc\n if isinstance(grid, xr.Dataset):\n grid_type = \"xr\"\n temp_nc = temp_file(\"nc\")\n grid.to_netcdf(temp_nc)\n grid = temp_nc\n del_grid = copy.deepcopy(grid)\n nc_safe.append(del_grid)\n\n if type(grid) is str:\n if os.path.exists(grid) == False:\n raise ValueError(\"grid file supplied does not exist\")\n grid_type = \"nc\"\n\n if \"DataSet\" in str(type(grid)):\n grid.run()\n if type(grid.current) is str:\n grid = grid.current\n else:\n grid = grid.current[0]\n warnings.warn(message=\"The first file in dataset used for regridding!\")\n grid_type = \"nc\"\n\n if grid_type is None:\n raise ValueError(\"grid supplied is not valid\")\n\n # check that the remapping method is valid\n if (method in {\"bil\", \"dis\", \"nn\"}) == False:\n raise ValueError(\"remapping method is invalid. Please check\")\n\n # check the number of grids in the dataset\n\n # Do do the horizontal regridding\n\n grid_split = dict()\n\n self.run()\n\n if type(self.current) is list:\n orig_files = copy.deepcopy(self.current)\n else:\n orig_files = [copy.deepcopy(self.current)]\n\n for ff in self:\n cdo_result = subprocess.run(\n f\"cdo griddes {ff}\",\n shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n ).stdout\n cdo_result = str(cdo_result)\n if cdo_result in grid_split:\n grid_split[cdo_result].append(ff)\n else:\n grid_split[cdo_result] = [ff]\n\n if grid is not None:\n # first generate the grid\n if grid_type == \"df\":\n target_grid = generate_grid(grid)\n del_grid = copy.deepcopy(target_grid)\n nc_safe.append(del_grid)\n else:\n target_grid = grid\n new_files = []\n\n for key in grid_split:\n # first we need to generate the weights for remapping\n # and add this to the files created list and self.weights\n tracker = open_data(grid_split[key], suppress_messages=True)\n\n weights_nc = temp_file(\"nc\")\n\n if type(tracker.current) is list:\n cdo_command = (\n f\"cdo -gen{method},{target_grid} {tracker.current[0]} {weights_nc}\"\n )\n else:\n cdo_command = (\n f\"cdo -gen{method},{target_grid} {tracker.current} {weights_nc}\"\n )\n\n weights_nc = run_cdo(cdo_command, target=weights_nc)\n\n cdo_command = f\"cdo -remap,{target_grid},{weights_nc}\"\n\n tracker._execute = True\n\n nc_safe.append(weights_nc)\n\n run_this(cdo_command, tracker, output=\"ensemble\")\n\n nc_safe.remove(weights_nc)\n\n if type(tracker.current) is str:\n new_files += [tracker.current]\n else:\n new_files += tracker.current\n\n for ff in new_files:\n nc_safe.append(ff)\n\n self.history += tracker.history\n\n self._hold_history = copy.deepcopy(self.history)\n\n if del_grid is not None:\n if del_grid in nc_safe:\n nc_safe.remove(del_grid)\n\n for ff in new_files:\n if ff in nc_safe:\n nc_safe.remove(ff)\n\n self.current = new_files\n\n cleanup()\n self.disk_clean()", "def main(in_path, keep_path, out_path):\n\t# First open the input csv\n\tcsv_hndl = lambda x: np.array([np.array(r) for r in x])\n\tdata, headers = read_csv(in_path, csv_hndl, use_headers=True, delimiter=',')\n\n\t# Read headers to keep\n\tkeeps = []\n\n\t# Regex for ignoring comments\n\tcmnt_re = re.compile(\"^#\")\n\n\t# Open and read the file\n\twith open(keep_path) as f_obj:\n\t\tfor line in f_obj:\n\t\t\tline = line.strip()\n\t\t\t# If line is commented out, ignore\n\t\t\tif cmnt_re.match(line):\n\t\t\t\tcontinue\n\t\t\t# Otherwise add to list of keeps\n\t\t\tkeeps.append(line)\n\n\t# Prune the csv\n\tnew_data, new_headers = prune_csv(data,headers,keeps)\n\n\t# Write to output csv file\n\twrite_csv(\n\t\tout_path, \n\t\tnew_data, \n\t\tnew_headers, \n\t\tdelimiter=',', \n\t\tquotechar='\"',\n\t\tquoting=csv.QUOTE_MINIMAL\n\t)", "def get_nc_BGrid_GFDL(grdfile, name='GFDL_CM2.1_North_Pacific', area='regional', \\\n xrange=(60,175), yrange=(120, 190), ystart=235):\n\n nc = pyroms.io.Dataset(grdfile)\n\n lon_t = nc.variables['geolon_t'][:]\n lat_t = nc.variables['geolat_t'][:]\n lon_uv = nc.variables['geolon_c'][:]\n lat_uv = nc.variables['geolat_c'][:]\n\n h = nc.variables['ht'][:]\n\n f = nc.variables['coriolis_param'][:]\n\n kmt = nc.variables['kmt'][:]\n z_t = nc.variables['st_ocean'][:]\n z_t_edges = nc.variables['st_edges_ocean'][:]\n\n kmu = nc.variables['kmu'][:]\n z_uv = nc.variables['sw_ocean'][:]\n z_uv_edges = nc.variables['sw_edges_ocean'][:]\n\n # compute mask at t-point\n M_t, L_t = kmt.shape\n N_t = z_t.shape[0]\n mask_t = np.zeros((N_t, M_t, L_t))\n for j in range(M_t):\n for i in range(L_t):\n try:\n mask_t[0:int(kmt[j,i]), j,i] = 1\n except:\n mask_t[:, j,i] = 0\n\n # compute mask at uv-point\n M_uv, L_uv = kmu.shape\n N_uv = z_uv.shape[0]\n mask_uv = np.zeros((N_uv, M_uv, L_uv))\n for j in range(M_uv):\n for i in range(L_uv):\n try:\n mask_uv[0:int(kmu[j,i]), j,i] = 1\n except:\n mask_uv[:, j,i] = 0\n\n if area == 'npolar':\n #add two rows in the north and the south\n lon_t = lon_t[np.r_[0,0,:np.size(lon_t,0),-1,-1]]\n lon_t = lon_t[:,np.r_[0,:np.size(lon_t,1),-1]]\n lon_t[:,0] = lon_t[:,1] - (lon_t[:,2]-lon_t[:,1])\n lon_t[:,-1] = lon_t[:,-2] + (lon_t[:,-2]-lon_t[:,-3])\n lat_t = lat_t[np.r_[0,0,:np.size(lat_t,0),-1,-1]]\n lat_t = lat_t[:,np.r_[0,:np.size(lat_t,1),-1]]\n lat_t[0,:] = -85\n lat_t[1,:] = -80\n lat_t[-2,:] = 90\n lat_t[-1,:] = 91\n lon_uv = lon_uv[np.r_[0,0,:np.size(lon_uv,0),-1,-1]]\n lon_uv = lon_uv[:,np.r_[0,:np.size(lon_uv,1),-1]]\n lon_uv[:,0] = lon_uv[:,1] - (lon_uv[:,2]-lon_t[:,1])\n lon_uv[:,-1] = lon_uv[:,-2] + (lon_uv[:,-2]-lon_uv[:,-3])\n lat_uv = lat_uv[np.r_[0,0,:np.size(lat_uv,0),-1,-1]]\n lat_uv = lat_uv[:,np.r_[0,:np.size(lat_uv,1),-1]]\n lat_uv[0,:] = -85\n lat_uv[1,:] = -80\n lat_uv[-2,:] = 90\n lat_uv[-1,:] = 91\n mask_t = mask_t[:,np.r_[0,0,:np.size(mask_t,1),-1,-1],:]\n mask_t = mask_t[:,:,np.r_[0,:np.size(mask_t,2),-1]]\n mask_t[:,:,0] = mask_t[:,:,-2]\n mask_t[:,:,-1] = mask_t[:,:,1]\n mask_uv = mask_uv[:,np.r_[0,0,:np.size(mask_uv,1),-1,-1],:]\n mask_uv = mask_uv[:,:,np.r_[0,:np.size(mask_uv,2),-1]]\n mask_uv[:,:,0] = mask_uv[:,:,-2]\n mask_uv[:,:,-1] = mask_uv[:,:,1]\n h = h[np.r_[0,0,:np.size(h,0),-1,-1]]\n h = h[:,np.r_[0,:np.size(h,1),-1]]\n h[:,0] = h[:,-2]\n h[:,-1] = h[:,1]\n f = f[np.r_[0,0,:np.size(f,0),-1,-1]]\n f = f[:,np.r_[0,:np.size(f,1),-1]]\n f[:,0] = f[:,-2]\n f[:,-1] = f[:,1]\n m,l = h.shape\n xrange=(1,l-2)\n yrange=(ystart+2,m-2)\n\n if area == 'tripole':\n #add two rows in the north and the south\n fold1 = L_t//2\n lon_t = lon_t[np.r_[0,0,:np.size(lon_t,0),-1,-1]]\n lon_t[-2,:fold1] = lon_t[-3,L_t:fold1-1:-1]\n lon_t[-2,L_t:fold1-1:-1] = lon_t[-3,:fold1]\n lon_t[-1,:fold1] = lon_t[-4,L_t:fold1-1:-1]\n lon_t[-1,L_t:fold1-1:-1] = lon_t[-4,:fold1]\n\n lon_t = lon_t[:,np.r_[0,:np.size(lon_t,1),-1]]\n lon_t[:,0] = lon_t[:,1] - (lon_t[:,2]-lon_t[:,1])\n lon_t[:,-1] = lon_t[:,-2] + (lon_t[:,-2]-lon_t[:,-3])\n lat_t = lat_t[np.r_[0,0,:np.size(lat_t,0),-1,-1]]\n lat_t = lat_t[:,np.r_[0,:np.size(lat_t,1),-1]]\n lat_t[0,:] = -85\n lat_t[1,:] = -80\n lat_t[-2,:] = lat_t[-3,:]\n lat_t[-1,:] = lat_t[-4,:]\n lon_uv = lon_uv[np.r_[0,0,:np.size(lon_uv,0),-1,-1]]\n\n lon_uv[-2,:fold1] = lon_uv[-4,L_t:fold1-1:-1]\n lon_uv[-2,L_t:fold1-1:-1] = lon_uv[-4,:fold1]\n lon_uv[-1,:fold1] = lon_uv[-5,L_t:fold1-1:-1]\n lon_uv[-1,L_t:fold1-1:-1] = lon_uv[-5,:fold1]\n\n lon_uv = lon_uv[:,np.r_[0,:np.size(lon_uv,1),-1]]\n lon_uv[:,0] = lon_uv[:,1] - (lon_uv[:,2]-lon_t[:,1])\n lon_uv[:,-1] = lon_uv[:,-2] + (lon_uv[:,-2]-lon_uv[:,-3])\n lat_uv = lat_uv[np.r_[0,0,:np.size(lat_uv,0),-1,-1]]\n lat_uv = lat_uv[:,np.r_[0,:np.size(lat_uv,1),-1]]\n lat_uv[0,:] = -85\n lat_uv[1,:] = -80\n lat_uv[-2,:] = lat_uv[-3,:]\n lat_uv[-1,:] = lat_uv[-4,:]\n mask_t = mask_t[:,np.r_[0,0,:np.size(mask_t,1),-1,-1],:]\n mask_t = mask_t[:,:,np.r_[0,:np.size(mask_t,2),-1]]\n mask_t[:,:,0] = mask_t[:,:,-2]\n mask_t[:,:,-1] = mask_t[:,:,1]\n mask_uv = mask_uv[:,np.r_[0,0,:np.size(mask_uv,1),-1,-1],:]\n mask_uv = mask_uv[:,:,np.r_[0,:np.size(mask_uv,2),-1]]\n mask_uv[:,:,0] = mask_uv[:,:,-2]\n mask_uv[:,:,-1] = mask_uv[:,:,1]\n h = h[np.r_[0,0,:np.size(h,0),-1,-1]]\n h = h[:,np.r_[0,:np.size(h,1),-1]]\n h[:,0] = h[:,-2]\n h[:,-1] = h[:,1]\n f = f[np.r_[0,0,:np.size(f,0),-1,-1]]\n f = f[:,np.r_[0,:np.size(f,1),-1]]\n f[:,0] = f[:,-2]\n f[:,-1] = f[:,1]\n m,l = h.shape\n xrange=(1,l-2)\n yrange=(ystart+2,m-2)\n\n return BGrid_GFDL(lon_t, lat_t, lon_uv, lat_uv, \\\n mask_t, mask_uv, h, z_t, z_t_edges, \\\n z_uv, z_uv_edges, f, \\\n name, xrange=xrange, yrange=yrange)", "def export(**kwargs):\n\n import os\n\n interface = None # Holds the actual FileInterface for the specified data format\n vertex_index_to_file_key_map = None\n element_index_to_file_key_map = None\n\n if 'file_name' in kwargs:\n fname = kwargs['file_name']\n else:\n raise ValueError(\"file_name must be specified.\")\n \n extension = os.path.splitext(fname)[1].lower()\n\n if extension=='.msh':\n from bempp.api.file_interfaces import gmsh\n interface = gmsh.GmshInterface()\n \n if int('grid' in kwargs) + int('grid_function' in kwargs) != 1:\n raise ValueError(\"Exactly one of 'grid' or 'grid_function' must be specified\")\n\n if 'grid' in kwargs:\n grid = kwargs['grid']\n elif 'grid_function' in kwargs:\n grid = kwargs['grid_function'].grid\n\n number_of_vertices = grid.leaf_view.entity_count(2)\n number_of_elements = grid.leaf_view.entity_count(0)\n\n offset = interface.index_offset\n\n if 'vertex_index_to_file_key_map' in kwargs:\n vertex_index_to_file_key_map = kwargs['vertex_index_to_file_key_map']\n else:\n vertex_index_to_file_key_map = range(offset,number_of_vertices+offset)\n if 'element_index_to_file_key_map' in kwargs:\n element_index_to_file_key_map = kwargs['element_index_to_file_key_map']\n else:\n element_index_to_file_key_map = range(offset,number_of_elements+offset)\n\n # Create the vertex and element structure\n\n from collections import OrderedDict\n\n vertex_iterator = grid.leaf_view.entity_iterator(2)\n element_iterator = grid.leaf_view.entity_iterator(0)\n index_set = grid.leaf_view.index_set()\n\n vertices = OrderedDict([(vertex_index_to_file_key_map[index_set.entity_index(vertex)],vertex.geometry.corners[:,0])\n for vertex in vertex_iterator])\n elements = OrderedDict([(element_index_to_file_key_map[index_set.entity_index(element)],\n {'data':[vertex_index_to_file_key_map[index_set.sub_entity_index(element,n,2)] for n in range(3)],\n 'domain_index':element.domain}) for element in element_iterator])\n\n interface.add_grid_data(vertices,elements)\n\n # Evaluate data\n\n if 'grid_function' in kwargs:\n fun = kwargs['grid_function']\n data_type = kwargs.get('data_type',interface.default_data_type)\n\n if 'transformation' in kwargs:\n transformation = kwargs['transformation']\n else:\n transformation = lambda x: x\n\n index_set = grid.leaf_view.index_set()\n\n if data_type == 'element_node':\n local_coordinates = _np.array([[0,1,0],[0,0,1]])\n data = OrderedDict.fromkeys(element_index_to_file_key_map)\n\n for element in grid.leaf_view.entity_iterator(0):\n data[element_index_to_file_key_map[index_set.entity_index(element)]] = transformation(\n fun.evaluate(element,local_coordinates))\n interface.add_element_node_data(data,kwargs.get('label','element_node_data'))\n elif data_type == 'node':\n local_coordinates = _np.array([[0,1,0],[0,0,1]])\n data = OrderedDict.fromkeys(vertex_index_to_file_key_map)\n for element in grid.leaf_view.entity_iterator(0):\n local_data = transformation(fun.evaluate(element,local_coordinates))\n for i in range(3):\n data[vertex_index_to_file_key_map[index_set.sub_entity_index(element,i,2)]] = local_data[:,i]\n interface.add_node_data(data,kwargs.get('label','node_data'))\n elif data_type == 'element':\n local_coordinates = _np.array([[1./3],[1./3]])\n data = OrderedDict.fromkeys(element_index_to_file_key_map)\n\n for element in grid.leaf_view.entity_iterator(0):\n data[element_index_to_file_key_map[index_set.entity_index(element)]] = transformation(\n fun.evaluate(element,local_coordinates).ravel())\n interface.add_element_data(data,kwargs.get('label','element_data'))\n else:\n raise ValueError(\"data_type must be one of 'node', 'element', or 'element_node'\")\n\n interface.write(kwargs['file_name'])", "def load_grd(filename):\n with open(filename, 'r') as f:\n meta = {}\n meta['header'] = []\n meta['header'].append(f.readline().rstrip('\\n'))\n while meta['header'][-1] != '++++':\n meta['header'].append(f.readline().rstrip('\\n'))\n # These determine the type of grid and the field format.\n meta['KTYPE'] = int(f.readline().split()[0])\n if meta['KTYPE'] != 1:\n raise ValueError(\"Not implemented.\")\n meta['NSET'], meta['ICOMP'], meta['NCOMP'], meta['IGRID'] = [int(s) for s in f.readline().split()]\n # The grid center in units of the x and y grid spacing.\n meta['IX'], meta['IY'] = [int(s) for s in f.readline().split()]\n # These are the x and y grid limits: S is lower, and E is upper.\n meta['XS'], meta['YS'], meta['XE'], meta['YE'] = [float(s) for s in f.readline().split()]\n # These are the numbers of grid points in x and y.\n meta['NX'], meta['NY'], meta['KLIMIT'] = [int(s) for s in f.readline().split()]\n # Implement this to read elliptically truncated grids.\n if meta['KLIMIT'] != 0:\n raise ValueError(\"Not implemented.\")\n # Load the field data. This returns an array with shape (NX * NY, 2 * NCOMP).\n conv = dict([(column, string_to_float) for column in range(2 * meta['NCOMP'])])\n data = np.loadtxt(f, dtype=float, converters=conv)\n # Determine the grid spacing and center values.\n meta['DX'] = (meta['XE'] - meta['XS']) / (meta['NX'] - 1)\n meta['DY'] = (meta['YE'] - meta['YS']) / (meta['NY'] - 1)\n meta['XCEN'] = meta['DX'] * meta['IX']\n meta['YCEN'] = meta['DY'] * meta['IY']\n # Reshape the data.\n map = np.empty((meta['NX'], meta['NY'], meta['NCOMP']),\n dtype=np.complex)\n for component in range(meta['NCOMP']):\n column = data[:, 2 * component] + 1j * data[:, 2 * component + 1]\n map[:, :, component] = column.reshape(meta['NX'], meta['NY'], order='F')\n return meta, map", "def write_grid2d(grid_file, grid2d):\n with grid_file.open('w') as f:\n for row in grid2d['label']:\n f.write('\\t'.join(row) + '\\n')", "def create_filtered_network_file(network_file_prefix, filtered_network_file, ueids):\n network_file_method_attribute = network_file_prefix + \"_method_id.eda\"\n network_file_source_attribute = network_file_prefix + \"_source.eda\"\n #biana_output_converter.filter_network_by_interaction_type(network_attribute_file_name = network_file_method_attribute, network_out_file_name = network_file_prefix + \"_y2h.sif\", interaction_type=\"y2h\")\n #biana_output_converter.filter_network_by_interaction_type(network_attribute_file_name = network_file_method_attribute, network_out_file_name = network_file_prefix + \"_tap.sif\", interaction_type=\"tap\")\n #biana_output_converter.filter_network_by_interaction_type(network_attribute_file_name = network_file_method_attribute, network_out_file_name = network_file_prefix + \"_no_tap.sif\", interaction_type=\"tap\", reverse_selection=True)\n #biana_output_converter.filter_network_by_interaction_type(network_attribute_file_name = network_file_method_attribute, network_out_file_name = filtered_network_file + \".no_tap\", interaction_type=\"tap\", reverse_selection=True)\n valid_ids = set([0,4,96,676,729,19,6,7,858,59,109]) # TAP\n biana_output_converter.filter_network_by_interaction_attribute_value(network_attribute_file_name = network_file_method_attribute, network_out_file_name = filtered_network_file + \".no_tap\", accept_attribute_value = lambda x: int(x) not in valid_ids)\n\n #interaction_to_sources = get_interaction_sources(network_file_source_attribute)\n with open(filtered_network_file, 'w') as f:\n for line in open(filtered_network_file + \".no_tap\"):\n id1, dummy, id2 = line.split()\n # Filter self interactions\n if id1 == id2:\n continue\n # Remove singleton interacions (that has evidence only from one database)\n #id_pair = sorted([id1, id2])\n #if is_singleton(interaction_to_sources[(id_pair[0], id_pair[1])]):\n # continue\n # Do not include ambigous user entities\n if id1 in ueids and id2 in ueids:\n f.write(line)\n return", "def test_erai_grid_snow_file_write(self):\n out_grid_file = os.path.join(self.writeDirectory, 'swe_grid_erai.asc')\n self.l2g.lsm_var_to_grid(out_grid_file=out_grid_file,\n lsm_data_var='sp',\n gssha_convert_var='swe')\n\n # Test\n compare_grid_file = os.path.join(self.readDirectory, 'swe_grid_erai.asc')\n self._compare_files(out_grid_file, compare_grid_file, precision=5)", "def filter_by_regular(filename):\n turnstile_data = pandas.read_csv(filename)\n turnstile_data = turnstile_data[turnstile_data.DESCn == 'REGULAR']\n return turnstile_data" ]
[ "0.704675", "0.68028927", "0.5841552", "0.58407056", "0.57574695", "0.5738572", "0.5637733", "0.5556143", "0.55201113", "0.54455745", "0.54209566", "0.5312087", "0.529742", "0.52541614", "0.5249761", "0.52250904", "0.5192693", "0.5181852", "0.5169016", "0.51613814", "0.51613796", "0.5157813", "0.51459104", "0.5141729", "0.513837", "0.5136057", "0.5114844", "0.5107493", "0.51067543", "0.50965285" ]
0.7821418
0
Check that grdfilter fails correctly.
def test_grdfilter_fails(): with pytest.raises(GMTInvalidInput): grdfilter(np.arange(10).reshape((5, 2)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_filter_errors(self):\n\n with self.assertRaises(ValueError):\n self.test_table.filter()\n\n with self.assertRaises(ValueError):\n self.test_table.filter(mode='wrongmode', Property='Property')", "def test_filter_function_settings_fail(self):\n with self.assertRaises(TypeError):\n self.es.register_filter('test')", "def test_no():\n errors = generate_errors(10, 5)\n assert NoFiltering().filter(errors) == errors", "def _validate_filter(filter):\n\n if filter.HasField('composite_filter'):\n for sub_filter in filter.composite_filter.filters:\n _validate_filter(sub_filter)\n elif filter.HasField('property_filter'):\n if filter.property_filter.op in UNSUPPORTED_OPERATORS:\n raise ValueError('Query cannot have any inequality filters.')\n else:\n pass", "def test_non_existing_filter(self):\n res = export_clips(fid=42)\n self.assertEqual(res.status_code, 204)", "def test_invalid_filter_shape(self):\r\n self.assertRaises(AssertionError, self.validate,\r\n (3, 2, 8, 8), (4, 3, 5, 5),\r\n 'valid')", "def test_bad_filter_names(tool):\n\n for cmd in (\"filter\", \"stats\", \"report\"):\n for argname in (\"rfilt\", \"rsel\", \"cfilt\", \"csel\"):\n # 'report' command don't have 'cfilt' and 'csel' arguments.\n if cmd == \"report\" and argname.startswith(\"c\"):\n continue\n # Need only one good testdata path.\n args = f\"--{argname} 'bad_filter' {tool.good_paths[0]}\"\n with pytest.raises(Exceptions.Error):\n tool.command(cmd, args)", "def test_filter_args_error_msg():\r\n nose.tools.assert_raises(ValueError, filter_args, f, [])", "def test_non_existing_filter(self):\n res = export_filter(fid=42)\n self.assertEqual(res.status_code, 204)", "def check_errors(self) -> None:", "def check_filter(self, filter):\n if filter is None:\n return True\n if not _valid_filter(filter):\n raise ValueError(filter)\n elif not self._filter_supported(filter):\n msg = \"{} not indexed for filter: '{}'.\"\n raise RuntimeError(msg.format(type(self).__name__, filter))", "def testUsingFilterTool(self):\n pass", "def _filter(self, entry):\n host = entry.get('@source_host', '')\n\n # errors will most likely come from job-s1\n if not is_from_production_host(host):\n return False\n\n return True", "def validate_filterval(filterval):\n if filterval != 'description' and filterval != 'fulldescription' and filterval != 'completed':\n return False\n else:\n return True", "def _check_filter_function(self, stats):\n\n if stats['filter_function'] is None:\n LOG.debug(\"Filter function not set :: passing host.\")\n return True\n\n try:\n filter_result = self._run_evaluator(stats['filter_function'],\n stats)\n except Exception as ex:\n # Warn the admin for now that there is an error in the\n # filter function.\n LOG.warning(\"Error in filtering function \"\n \"'%(function)s' : '%(error)s' :: failing host.\",\n {'function': stats['filter_function'],\n 'error': ex, })\n return False\n\n msg = \"Filter function result for host %(host)s: %(result)s.\"\n args = {'host': stats['host_stats']['host'],\n 'result': str(filter_result)}\n LOG.info(msg, args)\n\n return filter_result", "def test_filter_false(self):\n self.es.register_filter(foo=False)\n self.assertFalse(self.es.streamfilter(self.data))", "def verify_aggWasterwaterPathway(self):\n self.c.execute('''SELECT aggCode, (aggC1+aggC2+aggPercWithoutTreatment)\n FROM Agglomerations\n WHERE (aggC1 + aggC2 + aggPercWithoutTreatment) != 100 \n AND aggState=1\n ''')\n res = self.c.fetchall()\n if (len(res) > 0):\n return [False,\n \"In the agglomeration '%s' aggC1 + aggC2 + aggPercWithoutTreatment is equal to must be equal '%s' instead of 100%%\",\n res]\n else:\n return [True]", "def check_filterconfig(filterconfig, config):\n for f in filterconfig[\"filters\"]:\n if f[\"name\"] != \"frequency\":\n continue\n\n missing_freq_groups = set(iter_freq_groups(f[\"config\"][\"groups\"])) - set(\n iter_freq_groups(config[\"frequencies\"][\"groups\"])\n )\n assert not missing_freq_groups, \"Missing frequency group(s) in global config: {}\".format(\n missing_freq_groups\n )", "def test_check_data_over_specifying_percentiles(self):\n msg = \"Cannot specify both no_of_percentiles and percentiles\"\n with self.assertRaisesRegex(ValueError, msg):\n Plugin().process(self.cube, no_of_percentiles=3, percentiles=[25, 50, 75])", "def test_filterSamples_strict(self):\r\n with self.assertRaises(ValueError):\r\n self.overview_map.filterSamples(['PC.356', 'abc123'])\r\n\r\n with self.assertRaises(ValueError):\r\n self.empty_map.filterSamples(['foo'])", "def filter_errlog(columns):\r\n return bool( min(0, int(columns[POS_RETCODE])) )", "def FilterError(self, time_ranges, start_time):\r\n # Always add it to total time_range\r\n self.total_time_range.errors += 1\r\n\r\n for time_range in time_ranges:\r\n if time_range.TimeisValid(start_time):\r\n time_range.errors += 1\r\n return\r\n\r\n logging.warning(\"Error does not match any filters\")", "def test_filterSamples_strict(self):\n with self.assertRaises(ValueError):\n self.overview_map.filterSamples(['PC.356', 'abc123'])\n\n with self.assertRaises(ValueError):\n self.empty_map.filterSamples(['foo'])", "def test_filter():\n with pytest.raises(TypeError):\n Filter(description=\"some description\")", "def filter_unknown_bases(self):\n self.failed[\"unknowns\"] = self.stats.index[\n self.stats[\"unknowns\"] > self.tolerance[\"unknowns\"]\n ]\n self.passed = self.stats.drop(self.failed[\"unknowns\"])", "def test_feature_is_filtered(self):\n\n # Duplicate 1st row in var and assigned to 2nd\n self.validator.adata.var[\"feature_is_filtered\"][0] = True\n for i in range(self.validator.adata.X.shape[0]):\n self.validator.adata.X[i, 0] = 0\n self.validator.adata.X[0, 0] = 1\n\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', \"\n \"but there are 1 non-zero values in the corresponding columns of the matrix 'X'. \"\n \"All values for these features must be 0.\"\n ],\n )", "def filterMissing(vcfDict, newVCFdf, filters, log_file, filterType): \n #logic check\n print(\"Pre-filter: {}\".format(newVCFdf.shape))\n \n axis_variable=1\n if filterType=='markers':\n axis_variable=0\n fail_counter=0\n log_file.write(\"Failed {}\\n\".format(filterType))\n for i, frequencyDict in vcfDict.items():\n missingFreq=frequencyDict.get('.')\n if type(missingFreq)==float and missingFreq > filters:\n newVCFdf.drop([i],axis=axis_variable, inplace=True)\n fail_counter+=1\n if filterType=='individuals':\n individualMissingStats=\"{}\\t{}\\n\".format(i, frequencyDict['.'])\n log_file.write(individualMissingStats)\n else:\n log_file.write(\"No missing {} data found for {}\\n\".format(filterType, i))\n log_file.write(\"\\nFailed {} Percent: {:.2f}\\n\".format(filterType, fail_counter/len(vcfDict)*100)) \n print(\"\\nFailed {} Percent: {:.2f}\\n\".format(filterType, fail_counter/len(vcfDict)*100))\n individualDict, markerDict=processDataFrame(newVCFdf, FilterStep=1)\n\n #logic check\n print(\"Post-filter: {}\".format(newVCFdf.shape))\n\n log_file.flush()\n return individualDict, markerDict", "def test_get_filtered_val_not_filtered(ft_ll_mvar_1d, Yt_mvar_diffuse_missing):\n kf = Filter(ft_ll_mvar_1d, Yt_mvar_diffuse_missing, for_smoother=True)\n with pytest.raises(TypeError) as error:\n kf.get_filtered_val()\n expected_result = 'The Kalman filter object is not fitted yet'\n result = str(error.value)\n assert result == expected_result", "def checkbands() :\n dontThrowException = False \n success = s.checkConfig(dontThrowException)\n return success", "def test_no_filter(self):\r\n\r\n d1 = {\"% IDENTITY\": \"97.6\"}\r\n d2 = {\"% IDENTITY\": \"0.0\"}\r\n d3 = {\"% IDENTITY\": \"100.0\"}\r\n\r\n self.assertTrue(no_filter(d1))\r\n self.assertTrue(no_filter(d2))\r\n self.assertTrue(no_filter(d3))" ]
[ "0.67835367", "0.6310252", "0.62462205", "0.61204123", "0.6087536", "0.60227674", "0.59823984", "0.59056413", "0.58599424", "0.57920605", "0.573395", "0.5714643", "0.5699136", "0.5680347", "0.5672416", "0.56558776", "0.56553173", "0.56384057", "0.563598", "0.55971026", "0.55884373", "0.55520475", "0.5549036", "0.553329", "0.55209523", "0.5518941", "0.55035734", "0.5500716", "0.5494012", "0.5493769" ]
0.71679807
0
Generate Pydantic Model files given the Postman Collection input file.
def generate_models(input_file): if not os.path.exists(input_file): console.print( f":pile_of_poo: [bold red]No file found at the given path:[/bold red] [i yellow]{input_file}[/i yellow]" ) exit(1) # TODO: Add try/catch for other possible errors collection = postman.load_postman_collection_from_file(input_file) folders = postman.map_response_bodies_to_folders(collection) written_path = postman.write_collection_models_to_files(folders) console.print(":smiley: SUCCESS!", style="bold green") console.print("Models written to:", list(set(written_path)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def input_models():\n return [\n PDBFile(\n Path(golden_data, \"protdna_complex_1.pdb\"),\n path=golden_data,\n score=42.0,\n restr_fname=Path(golden_data, \"example_ambig_1.tbl\")\n ),\n PDBFile(\n Path(golden_data, \"protdna_complex_2.pdb\"),\n path=golden_data,\n score=28.0,\n restr_fname=Path(golden_data, \"example_ambig_2.tbl\")\n )]", "def _convert(self, fn, suffix='json', path='jsonschema', name=None,\n root_class_name=None, data_files=[], target_class=None):\n ie = JsonSchemaImportEngine()\n d = os.path.join(INPUT_DIR, path)\n schema = ie.load(os.path.join(d, f'{fn}.{suffix}'), name=name, format=suffix, root_class_name=root_class_name)\n model_path = os.path.join(OUTPUT_DIR, f'{fn}.yaml')\n write_schema(schema, model_path)\n roundtrip_path = os.path.join(OUTPUT_DIR, f'{fn}.roundtrip.json')\n with open(roundtrip_path, 'w') as stream:\n stream.write(JsonSchemaGenerator(model_path).serialize())\n python_path = os.path.join(OUTPUT_DIR, f'{fn}.py')\n with open(python_path, 'w') as stream:\n stream.write(PythonGenerator(model_path).serialize())\n compile_python(python_path)\n # TODO: test data_files\n return schema", "def mongoalchemy(schema_file, output=\"-\"):\n schema = read_yaml(schema_file)\n with open_output_stream(output) as f:\n print(Template(models_template).render(schema=schema), f)", "def main():\n os.makedirs(PATH)\n fetch_data()\n convert_to_json(model_list, 'models.json', is_model=True)\n convert_to_json(backend_list, 'backends.json')\n convert_to_json(type_list, 'types.json')\n convert_to_json(featurizer_list, 'featurizers.json')", "def process_input_files(list_input_files):\n global dict_models_results\n global list_spacy_docs\n \n for input_file in list_input_files:\n prefix = prefix_from_filename(input_file)\n \n with open(input_file) as f:\n list_cases = json.load(f)\n dict_models_results[prefix] = list_cases\n \n \n #extract list of questions from all vignettes and create a mapping page -> vignette question\n dict_questions = {}\n for prefix, list_cases in dict_models_results.items():\n for vignette in list_cases:\n dict_questions[vignette[\"book_page\"]] = vignette[\"question\"]\n \n \n for book_page,question in dict_questions.items():\n doc_q = load_bner_onto_tokens_extension(question, book_page)\n list_spacy_docs.append(doc_q)\n \n return", "async def parse_files(file):\n data = yaml.full_load(file)\n try:\n new_data = {\n \"task_name\": data[\"metadata\"][\"name\"],\n \"task_type\": data[\"kind\"],\n \"scheduled_at\": data[\"spec\"].get(\"schedule\"),\n }\n\n except KeyError as e:\n raise KeyError(f\"Invalid yaml file uploded \\n {e}\")\n model = TaskModel(**new_data)\n return model", "def generate(self):\n try:\n self._parse_groups()\n self._parse_types()\n self._parse_enums()\n self._parse_features()\n self._parse_extensions()\n self._add_extra_enums()\n self._parse_and_build_commands()\n self._build_all_enums()\n self._build_enum_groups()\n self._generate_files()\n except Exception as exception:\n print('Generate failed: {}'.format(str(exception)))\n raise", "def process_files(args):\n coll = build_collection(args.data_path, args.include_online_only)\n\n for import_file in args.imports:\n _, ext = os.path.splitext(import_file)\n import_serializer_class = ser_interface.MtgSsmSerializer \\\n .by_extension_and_format(ext, args.import_format)\n import_serializer = import_serializer_class(coll)\n print('Importing counts from import: %s' % import_file)\n import_serializer.read_from_file(import_file)\n\n _, ext = os.path.splitext(args.collection)\n serializer_class = ser_interface.MtgSsmSerializer.by_extension_and_format(\n ext, args.format)\n serializer = serializer_class(coll)\n\n if os.path.exists(args.collection):\n print('Reading counts from existing file.')\n serializer.read_from_file(args.collection)\n backup_name = args.collection + '.bak-{:%Y%m%d_%H%M%S}'.format(\n datetime.datetime.now())\n print('Moving existing collection to backup: %s' % backup_name)\n shutil.move(args.collection, backup_name)\n\n print('Writing collection to file.')\n serializer.write_to_file(args.collection)", "def process_proto_file(proto_file) -> Iterable[OutputFile]:\n\n _, package_root = build_node_tree(proto_file)\n output_filename = _proto_filename_to_generated_header(proto_file.name)\n generator = NanopbCodeGenerator(output_filename)\n codegen.generate_package(proto_file, package_root, generator)\n\n codegen.package_stubs(package_root, generator.output, StubGenerator())\n\n return [generator.output]", "def createClassFile( p ):\n create_modules( p[\"package\"] )\n name = p[\"protocol\"][\"name\"]\n name.lower()\n path = os.path.join( *p[\"package\"].split( \".\" ) )\n with open( \"./%s/%s.py\" % ( path, name ), \"w\" ) as f:\n for i in p[\"imports\"]:\n createClassFile( i )\n\n c = Klass( package=p[\"package\"], includes=p[\"imports\"], **p[\"protocol\"] )\t\n\n f.write( c.generate() )", "def buildModelFromFile(fname):\n directory = os.path.dirname(fname)\n\n f = open(fname, \"r\")\n in_map = yaml.safe_load(f)\n f.close()\n\n expression = \"\"\n\n return build_model_from_dict(in_map)", "def generate(request, response):\n for file_descriptor in request.proto_file:\n LOGGER.info('Processing \"{}\".', file_descriptor.name)\n if file_descriptor.name not in request.file_to_generate:\n LOGGER.info(\n 'File \"{}\" is not supposed to be processed, skipping.',\n file_descriptor.name\n )\n continue\n\n target_dir = path.dirname(path.normpath(\n file_descriptor.name\n ))\n filename, _ = path.splitext(path.basename(file_descriptor.name))\n\n pb_module = filename + '_pb2'\n content = MESSAGES_TEMPLATE.render(\n PB_MODULE=pb_module,\n FILE_DESCRIPTOR=file_descriptor\n )\n\n target_path = path.join(target_dir, filename + '.py')\n\n output_file = response.file.add()\n output_file.name = target_path\n output_file.content = content\n\n LOGGER.info('Writing file \"{}\".', target_path)", "def create_models( self ):", "def main():\n args = utils.read_arguments(__doc__)\n documents = []\n filenames = list(traverse_directory(args[\"input_dirpath\"],'*clean*.txt'))\n labels_dirname = args[\"labels_dirpath\"]\n labels_from_json = get_all_labels_from_json(labels_dirname)\n for filename in tqdm(filenames):\n with AnnotatedIBMFactory(filename) as instance_extractor:\n filename_key = filename.split(\"/\")[-1]\n document = instance_extractor.build_document(\n labels_from_json[filename_key])\n documents.append(document)\n utils.pickle_to_file(documents, args['output_file'])", "def gen_model():\n\n\tmodel = skipthoughts.load_model()\n\treturn model", "def _generate_model(self, specs, experiment = None, filename = 'dist/app/Model.hs'):\n with open(filename, \"w\") as file:\n self._write_model(file, specs, experiment = experiment)", "def read_model(self):\n filename=self.name + '_words'\n self.words=file_read(filename)\n\n filename2= self.name+'_word_lengths'\n self.word_lengths=file_read(filename2)\n\n filename3=self.name+'_stems'\n self.stems=file_read(filename3)\n\n filename4=self.sentence_lengths+'_sentence_lengths'\n self.setence_lengths=file_read(filename4)\n\n filename5= self.endings+'_endings'\n self.endings=file_read(filename5)", "def genCode(self, fileName, allowedTypes, genGraph = 1, isRootNode = 0, \r\n metaModelName = None, export = 0, newTypes = None, \r\n nodesToGenList = [], openModelStringList=[], attrGenFix=False):\r\n file = open(fileName, \"w+t\" )\r\n\r\n dir, fil = os.path.split(fileName)\r\n funcName = string.split (fil, \".\")\t\t\t\t\t# compose class name\r\n\r\n if export == 0:\r\n file.write('\"\"\"\\n')\r\n file.write(\"__\"+ fil +\"_____________________________________________________\\n\")\r\n file.write(\"\\n\") \r\n file.write(\"Automatically generated AToM3 Model File (Do not modify directly)\\n\")\r\n file.write(\"Author: \"+USER_NAME+\"\\n\")\r\n file.write(\"Modified: \"+time.asctime()+\"\\n\") \r\n file.write(\"__\"+ len(fil)*\"_\" +\"_____________________________________________________\\n\")\r\n file.write('\"\"\"\\n')\r\n #file.write('from graph_ASG_ERmetaMetaModel import *\\n')\t\t# just for the case!\r\n file.write('from stickylink import *\\n')\t\t\t\t# necessary if we describe some graphLinks...\r\n file.write('from widthXfillXdecoration import *\\n')\t\t\t# necessary if we describe some graphLinks...\r\n\r\n # import the subclass ...\r\n if( self.getClass() not in self.nodeTypes ):\r\n file.write('from '+self.getClass()+' import *\\n')\r\n \r\n # import all the node types...\r\n for nodetype in self.nodeTypes:\r\n if( self.listNodes[nodetype] != [] ): \r\n file.write('from '+nodetype+' import *\\n') \r\n \r\n # Import all the graphical appearences of the node types... that\r\n # are actually used! \r\n # Added by Denis Dube, last modified on Sept. 9, 2004\r\n if( genGraph ): \r\n # STEP 1: Find all graphObjects used in the model\r\n graph_objectDict = dict()\r\n for nodetype in self.listNodes.keys():\r\n for node in self.listNodes[nodetype]:\r\n if( node.graphClass_ ):\r\n graph_objectDict[ node.graphObject_.getGraphClassName() ]=1\r\n # STEP 2: Create the import statements for each graphObject\r\n for graphObject in graph_objectDict.keys():\r\n file.write('from '+graphObject+' import *\\n')\r\n # NOTE: I think the next two statements are caution overkill...\r\n #file.write('try: from '+graphObject+' import *\\n')\r\n #file.write('except: print \"WARNING: unable to load the graphical appearence file: '+graphObject+'.py\" \\n')\r\n \r\n # import the basic types...\r\n for typ in allowedTypes.keys():\r\n typeInstance, params = allowedTypes[typ]\r\n typeName = typeInstance.__name__\r\n file.write('from '+typeName+' import *\\n')\r\n \r\n # Generate the ASG constructor\r\n if( attrGenFix ):\r\n self.__genASGconstructor( file, funcName ) \r\n else:\r\n # Old way\r\n file.write('\\ndef '+funcName[0]+'(self, rootNode):\\n')\r\n \r\n # Generate code for the ASGroot attributes\r\n if( isRootNode ): \r\n # Should attrGenFix be always true? More testing required\r\n #todo: attrGenFix == True always?\r\n if( attrGenFix ): self.__genAttributesROOT( file )\r\n else: self.genAttributesCode(file, genGraph, \"rootNode\")\r\n\r\n self.writeGraph2File(file, genGraph, isRootNode, None, \" \", 1, funcName[0], nodesToGenList=nodesToGenList)\r\n\r\n # generate code for the sub-models\r\n counter = 0\r\n if( not nodesToGenList ):\r\n for nodetype in self.nodeTypes:\r\n for node in self.listNodes[nodetype]: \r\n newFile = funcName[0]+str(counter)\r\n res = node.genCode(os.path.join(dir, newFile+'.py'), allowedTypes, genGraph, 0)\r\n counter = counter + 1\r\n else: \r\n for node in nodesToGenList:\r\n newFile = funcName[0]+str(counter)\r\n res = node.genCode(os.path.join(dir, newFile+'.py'), allowedTypes, genGraph, 0)\r\n counter = counter + 1\r\n \r\n\r\n if isRootNode:\r\n hierarchical = self.isHierarchical()\r\n if export == 0:\r\n if hierarchical:\r\n file.write('def main'+funcName[0]+'(self, ASGroot):\\n')\r\n # file.write(' self.ASGroot = '+self.getClass()+'(self)\\n')\r\n file.write(' self.'+funcName[0]+'(self, ASGroot)\\n\\n')\r\n file.write(' self.'+funcName[0]+'_connections(self, ASGroot)\\n\\n')\r\n file.write('newfunction = main'+funcName[0]+'\\n\\n')\r\n else:\r\n file.write('newfunction = '+funcName[0]+'\\n\\n')\r\n if newTypes and len(newTypes)>0: # generate a list of newly added types\r\n file.write('loadedTypes = [')\r\n counter = 0\r\n for nt in newTypes:\r\n if counter > 0: file.write(',')\r\n file.write(str(nt))\r\n counter = counter + 1\r\n file.write(']\\n')\r\n \r\n self.genLoadedMMName( file )\r\n if( attrGenFix ): file.write( '\\natom3version = \\'0.3\\'\\n' )\r\n file.close()\r\n return funcName[0] \t\t\t\t# this indicates that we've done something\r", "def process_input_file(filename):\n\n # Parse the input file\n try:\n ast = parser.parse(open(filename, 'r').read())\n except pyparsing.ParseBaseException as e:\n print \"Parse error in %s: %s\" % (os.path.basename(filename), str(e))\n sys.exit(1)\n\n ofinput = of_g.OFInput()\n\n # Now for each structure, generate lists for each member\n for s in ast:\n if s[0] == 'struct':\n name = s[1].replace(\"ofp_\", \"of_\", 1)\n members = [dict(m_type=x[0], name=x[1]) for x in s[2]]\n ofinput.classes[name] = members\n ofinput.ordered_classes.append(name)\n if name in type_maps.inheritance_map:\n # Clone class into header class and add to list\n ofinput.classes[name + \"_header\"] = members[:]\n ofinput.ordered_classes.append(name + \"_header\")\n elif s[0] == 'metadata':\n if s[1] == 'version':\n log(\"Found version: wire version \" + s[2])\n if s[2] == 'any':\n ofinput.wire_versions.update(of_g.wire_ver_map.keys())\n elif int(s[2]) in of_g.supported_wire_protos:\n ofinput.wire_versions.add(int(s[2]))\n else:\n debug(\"Unrecognized wire protocol version\")\n sys.exit(1)\n found_wire_version = True\n\n if not ofinput.wire_versions:\n debug(\"Missing #version metadata\")\n sys.exit(1)\n\n return ofinput", "def _get_models_from_metafile(dir: str):\n meta_indexes = load(osp.join(dir, 'model-index.yml'))\n for meta_path in meta_indexes['Import']:\n # meta_path example: mmcls/.mim/configs/conformer/metafile.yml\n meta_path = osp.join(dir, meta_path)\n metainfo = load(meta_path)\n yield from metainfo['Models']", "def main():\n\t# import training data\n\tfiles = [INPATH + f for f in os.listdir(INPATH) if \".json\" in f]\n\n\t# import books\n\tprint(\"Loading training data...\")\n\tbookList = loadBooks(files)\n\tprint(\"Load complete.\")\n\n\t# loop through element types and store data structure\n\tfor key, value in ELEMENTS.items():\n\t\tprint(\"Generating: %s\" % key)\n\n\t\t# set file outpath\n\t\toutfile = \"%s.json\" % key\n\t\toutpath = OUTPATH % outfile\n\n\t\tgenerateTrain(bookList, key, value, outpath)", "def test_models_datatypes(self) -> None:\n directory = os.path.join(PAYLOAD_DIRECTORY, 'datatype_payloads')\n for filename in os.listdir(directory):\n filepath = os.path.join(directory, filename)\n # Load the payload\n with open(filepath, encoding='utf-8') as payload_file:\n payload: Dict[str, Any] = json.load(payload_file)\n # Extract the collection name\n _ = payload.pop('returned')\n _ = payload.pop('timing', None)\n collection = list(payload.keys())[0][:-5]\n # Find the appropriate class for this collection\n type_: Ps2Object\n cls_: Optional[Type[RESTPayload]] = None\n for name in ps2.__dict__['__all__']:\n type_ = getattr(ps2, name)\n if not hasattr(type_, 'collection'):\n continue\n if type_.collection == collection:\n # pylint: disable=protected-access\n cls_ = type_._model # type: ignore\n assert cls_ is not None, (\n f'Type for collection \"{collection}\" not found')\n # Instantiate any payloads found\n for data in payload[f'{collection}_list']:\n instance = cls_(**data)\n self.assertIsInstance(instance, RESTPayload)", "def _generate_objects_file(self):\n xmls = glob(f'{ROOT}/Annotations/**/*.xml', recursive=True)", "def run(cls, model):\n label = model.label\n print(\"stage1: {label} model: initializing\".format(label=label))\n\n defs_input = model.define_api() # input, original definitions\n\n print(\"stage1: {label} model: analyzing API\".format(label=label))\n\n # Compute any needed derivatives which are not already in the API\n # and for which we have the defs.\n defs = defs_input.copy() # output, final optimized definitions\n for j, key in enumerate(sorted(defs_input.keys(), key=symutil.sortkey), start=1): # sort for progress readability\n name = symutil.derivatives_to_names_in(key) # key is a Symbol or a Derivative\n expr = defs_input[key]\n\n print(\"stage1: ({iteration:d}/{total:d}) {label} model: processing {name}\".format(iteration=j,\n total=len(defs_input.keys()),\n label=label, name=name))\n\n defs[key] = cls.process(expr, defs, model.simplify)\n\n # Delete identically zero definitions\n zero = sy.S.Zero\n defs = {k: v for k, v in defs.items() if v != zero}\n\n print(\"stage1: {label} model: generating code\".format(label=label))\n\n basename = \"mgs_{label}_impl\".format(label=label) # filename without extension\n name_expr_pairs = cls.make_name_expr_pairs(defs)\n generated_code = codegen(name_expr_pairs,\n language=\"f95\",\n project=\"elmer-mgs-galfenol\",\n prefix=basename)\n\n return [(label, filename, cls.finalize(content))\n for filename, content in generated_code]", "def build_model():", "def main(input_params):\n\n store = kgenlib.BaseStore()\n\n input_files = input_params[\"files\"]\n output_file = input_params.get(\"output_file\")\n\n for file in input_files:\n store.add(kgenlib.BaseStore.from_yaml_file(file))\n\n mutations = input_params.get(\"mutations\", {})\n store.process_mutations(mutations)\n return store.dump(output_filename=output_file)", "def assemble(metadata_file):\n\n def read(file):\n with open(file) as yaml:\n return load(yaml.read())\n\n def add_name(info):\n info['name'] = slugify(info['title'], separator='_')\n return info\n\n def get_files(filetype):\n filename = metadata_file.replace('metadata', filetype)\n folder = dirname(metadata_file)\n schema_files_pattern = join(folder, filename)\n return glob(schema_files_pattern)\n\n descriptor = add_name(read(metadata_file))\n resources = [add_name(read(file)) for file in get_files('resource')]\n model = get_files('model')\n\n descriptor['resources'] = resources\n if model and len(model) == 1:\n descriptor['model'] = model.pop()\n\n return DataPackage(descriptor)", "def gen_review_data(fp: str) -> None:\n with open(fp, encoding='utf-8') as f:\n for line in f:\n data = json.loads(line)\n utils.preprocess_raw_json(data)\n doc = {\n \"_index\": \"review\",\n \"_source\": data\n }\n yield doc", "def __init__(self, schema, input_files, output_path):\n self.schema = schema\n self.input_files = input_files\n self.output_path = output_path", "def parse_model_files():\n a_copy = PY_FILES[::]\n for f in a_copy:\n if 'model' in f:\n MODEL_FILES.append(f)\n PY_FILES.remove(f)" ]
[ "0.56165767", "0.5588253", "0.5478929", "0.54493415", "0.5426958", "0.53957415", "0.53064173", "0.52855295", "0.52591866", "0.52293295", "0.5166549", "0.50401545", "0.5026971", "0.50226295", "0.5017243", "0.4993968", "0.4981129", "0.49646968", "0.4943805", "0.4927351", "0.49195746", "0.4914993", "0.48990965", "0.48882422", "0.48761278", "0.48731285", "0.4858692", "0.48397392", "0.48391703", "0.48341733" ]
0.7841486
0
Tests that update_status creates a correctly formatted url. Compares the url created by update_status to correct_url
def test_update_status(self): content_url = 'https://api.github.com' status = 'success' token = '123' correct_url = 'https://123:[email protected]/' post_req = update_status(content_url, status, token) self.assertEqual(correct_url, post_req.url) """ Tests that the POST request will be invalid if the url is not linked to a PR as stated in the API and if the access token is not valid. In this case the POST request JSON data will have the form {"message":"Bad credentials",... """ self.assertEqual(post_req.json()['message'], 'Bad credentials') """ NOTE: this test might fail if the server for the repo Test-server1 is not running. Tests that the POST request will be invalid if the url is linked to a PR as stated in the API and if the access token is valid. In this case the POST request JSON data will have the form {'url': ...,'state': 'success'. """ content_url = 'https://api.github.com/repos/A1337li/Test-server1/statuses/4f22d54572b09dd559f953f5f5de675752a1dc4f' token = '254fe0318d9bd3e107899127fcd63ff1dedfb44d' post_req = update_status(content_url, status, token) #self.assertEqual(post_req.json()['state'], 'success') post_req = update_status(content_url, 'hello', token) self.assertEqual(post_req, None)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _checkServiceURL(self, serviceName, options):\n url = self._getURL(serviceName, options)\n system = options['System']\n module = options['Module']\n self.log.info(\"Checking URLs for %s/%s\" % (system, module))\n urlsConfigPath = os.path.join('/Systems', system, self.setup, 'URLs', module)\n urls = gConfig.getValue(urlsConfigPath, [])\n self.log.debug(\"Found configured URLs for %s: %s\" % (module, urls))\n self.log.debug(\"This URL is %s\" % url)\n runitStatus = options['RunitStatus']\n wouldHave = 'Would have ' if not self.commitURLs else ''\n if runitStatus == 'Run' and url not in urls:\n urls.append(url)\n message = \"%sAdded URL %s to URLs for %s/%s\" % (wouldHave, url, system, module)\n self.log.info(message)\n self.accounting[serviceName + \"/URL\"][\"Treatment\"] = message\n self.csAPI.modifyValue(urlsConfigPath, \",\".join(urls))\n if runitStatus == 'Down' and url in urls:\n urls.remove(url)\n message = \"%sRemoved URL %s from URLs for %s/%s\" % (wouldHave, url, system, module)\n self.log.info(message)\n self.accounting[serviceName + \"/URL\"][\"Treatment\"] = message\n self.csAPI.modifyValue(urlsConfigPath, \",\".join(urls))", "def test_main_overview_status_url(self):\n\n # change config\n set_main_overview('status')\n\n # login testuser\n self.client.login(\n username='testuser_main_overview', password='RYgxCfV2NRcfXlJvsSHP'\n )\n # get reverse url\n url = reverse('main_overview')\n # compare url\n self.assertEqual(url, '/main_overview/')\n # create url\n destination = urllib.parse.quote('/config/status/')\n # get response\n response = self.client.get('/main_overview/')\n # compare redirect\n self.assertRedirects(\n response, destination, status_code=302, target_status_code=200\n )", "def url_check_tester(client, url, status_code):\n response = client.get(url)\n assert response.status_code == status_code, \\\n f'Unexpected status code for {url}'\n assert response.data == b''", "def test_update_short_url(self):\n old_target = 'http://old.com'\n new_target = 'http://new.com'\n\n old_short_url = ShortUrl.objects.create(target=old_target)\n\n client = RequestsClient()\n response = client.patch('http://testserver/api/v1/urls/%s' % old_short_url.hash, json={'target': new_target})\n assert response.status_code == 200\n\n short_url = dict(response.json())\n self.assertEqual(short_url.get('target'), new_target)", "def test_url_tranform(self):\r\n response = self.client.get('/courses/edx/math101/2014/wiki/math101/')\r\n self.assertIn('/courses/edx/math101/2014/wiki/math101/_edit/', response.content)\r\n self.assertIn('/courses/edx/math101/2014/wiki/math101/_settings/', response.content)", "def test_absolute_url(self):\n response = self.client.get(self.htsv.get_absolute_url())\n self.assertEqual(response.status_code, 200)", "def test_url_existence(self):\n self.assertEquals(self.response.status_code, 200)", "def test_correct_url(self, ip_address, bool_value):\n self.assertEqual(check_correct_url(ip_address), bool_value)", "def test_api_urls():\n # Test the status message - 404 not good , 200 good\n assert API_RH.create_req().status_code == 200, \"The tests for URLs were successful\"", "def test_computed_url(self):\n t = BuildVersionRequest()\n self.assertEqual(\"version/build\", t.url_path())", "def test_upload_step__invalid_url(self):\n # Set Up\n self.go_to_step(FeedUpdateWizard.UPLOAD_STEP)\n\n # Test\n response = self.client.post(\n self.WIZARD_URL,\n {\n self.WIZARD_CURRENT_STEP: FeedUpdateWizard.UPLOAD_STEP,\n self.SELECTED_ITEM: self.ITEM_URL_LINK,\n \"url_link\": \"incorrect-url\",\n \"submit\": \"submit\",\n },\n )\n\n # Assert\n self.assertEqual(response.status_code, 200)\n error_data = response.context_data[\"wizard\"][\"form\"].errors.get_json_data()\n self.assertEqual(\n response.context[\"wizard\"][\"steps\"].current, FeedUpdateWizard.UPLOAD_STEP\n )\n self.assertEqual(\n error_data,\n {\n \"url_link\": [\n {\"message\": \"Enter a valid URL to your data set\", \"code\": \"invalid\"}\n ]\n },\n )", "def test_is_url(self):\n\n url = \"https://shadowrun.needs.management\"\n self.assertTrue(run(verification.is_url(url)))\n\n url = \"https:// www.google.com\"\n self.assertFalse(run(verification.is_url(url)))", "def test_format_price_api_url_returns_formatted_url(self):\n expected_url = 'https://api.coinbase.com/v2/prices/buy-BTC/USD'\n url = format_price_api_url(CURRENCY_BTC, EXCHANGE_RATE_USD, PRICE_BUY)\n self.assertEqual(expected_url, url)", "def test_update_ignore_url(self):\n instance = _create_image_instance()\n\n # Retrieve/Update URL\n url = self.URL + f\"{instance.pk}/\"\n\n retrieve_response = self.client.get(url)\n retrieve_data = retrieve_response.data\n retrieve_url = retrieve_data[\"image\"]\n\n update_response = self.client.patch(url, retrieve_data, format=\"json\")\n update_data = update_response.data\n update_url = update_data[\"image\"]\n self.assertEqual(update_response.status_code, status.HTTP_200_OK)\n self.assertEqual(retrieve_url, update_url)", "def test_filter_status(self):\n self.factory.create()\n obj = self.factory.create()\n response = self._get(get_kwargs={'status': obj.status.pk})\n self.assertRedirectsNoFollow(response, obj.get_absolute_url())", "def get_server_write_status_url(self):\n write_url: str = self.bot_data_file[\"bot_status\"][\"server_state_saving\"][\"writeStateUrl\"]\n print(\"Api:\" + self.empty_api_key)\n print(\"Url:\" + self.empty_url)\n if self.get_bot_save_state_to_server() and write_url.startswith(self.empty_url):\n print(\n \"save_state_to_server IS TRUE BUT STATUS WRITE URL STARTS WITH 'http://URL/' SO IS NOT VALID - ABORTING\")\n quit(1)\n return write_url", "def test_computed_url(self):\n t = self.create_request_object()\n self.assertEqual(\"metadata/libraries/Fixitol(Dev)/versions/1234\", t.url_path())", "def test_repo_updated():\n\n status = \"The following updates were applied\"\n report_status = BehavioralUtils.check_repo_updated('drupal', 'builds')\n assert report_status == status", "def test_file_url_status_error(self):\n file_url = \"file_url\"\n status = FileManagementStatus(\n FileManagementStatusType.ERROR,\n FileManagementErrorType.MALFORMED_URL,\n )\n expected_topic = (\n self.factory.common_topic + WAPMF.FILE_URL_DOWNLOAD_STATUS\n )\n expected_payload = json.dumps(\n {\n \"fileUrl\": file_url,\n \"status\": status.status.value,\n \"error\": status.error.value,\n }\n )\n expected_message = Message(expected_topic, expected_payload)\n serialized_message = self.factory.make_from_file_url_status(\n file_url, status\n )\n\n self.assertEqual(expected_message, serialized_message)", "def test_update_task_docs_not_invalid_url(self):\n task_id = util.MOCK_UUID_5\n rv = TEST_CLIENT.patch(\n f\"/tasks/{task_id}\",\n json={\"docs\": \"notAValidUrl\"},\n )\n result = rv.json()\n expected = {\"code\": \"NotValidUrl\", \"message\": \"Input is not a valid URL\"}\n self.assertEqual(result, expected)\n self.assertEqual(rv.status_code, 400)", "def test_upload_step__valid_url(self):\n # Set Up\n self.go_to_step(FeedUpdateWizard.UPLOAD_STEP)\n\n # Test\n url_link = (\n \"http://product.itoworld.com/product/data/files/\"\n \"ea_20-204-_-y08-1.xml?t=file&g=test_txc&p=:ea_20-204-_-y08-1.xml\"\n \"&u=144&key=4e9207c6cb0f7157ef85c657dddad3bd\"\n )\n\n response = self.client.post(\n self.WIZARD_URL,\n {\n self.WIZARD_CURRENT_STEP: FeedUpdateWizard.UPLOAD_STEP,\n self.SELECTED_ITEM: self.ITEM_URL_LINK,\n \"url_link\": url_link,\n \"submit\": \"submit\",\n },\n )\n\n # Assert\n self.assertEqual(response.status_code, 200)\n self.assertEqual(\n response.context[\"wizard\"][\"steps\"].current, FeedUpdateWizard.COMMENT_STEP\n )", "def test_api_object_url_1(self, api_object, server_address):\n expected_url = 'https://{}/api/domain/{}/'.format(server_address, api_object.uuid_)\n assert api_object.api_object_url == expected_url", "def test_success(self):\n response = self.client.post(\n reverse('url_shortener'),\n data=json.dumps({'url': 'https://www.techcrunch.com/some-slug-here-starting-from-s'}),\n content_type='application/json'\n )\n self.assertEqual(response.status_code, 201)\n\n response = self.client.post(\n reverse('url_shortener'),\n data=json.dumps({\n 'url': 'https://www.techcrunch.com/some-other-slug-here-starting-again-from-s'\n }),\n content_type='application/json'\n )\n self.assertEqual(response.status_code, 201)\n\n response = self.client.post(\n reverse('url_shortener'),\n data=json.dumps({'url': 'https://www.techcrunch.com/some-third-long-slug'}),\n content_type='application/json'\n )\n self.assertEqual(response.status_code, 201)\n\n content = json.loads(response.content)\n self.assertEqual(content.get('shortened_url'), '{}/{}'.format(SITE_URL, 'oaf'))", "def test_status_code(self):\n formatted_status_code = get_status_code('python')\n self.assertEqual(formatted_status_code, 200) #compares the test result with the result expected", "def test_already_shortened_success(self):\n # Shorten a URL\n self.client.post(\n reverse('url_shortener'),\n data=json.dumps({'url': 'https://www.techcrunch.com/a-slug-here-starting-from-a'}),\n content_type='application/json'\n )\n\n # Try to shorten the same URL again.\n response = self.client.post(\n reverse('url_shortener'),\n data=json.dumps({'url': 'https://www.techcrunch.com/a-slug-here-starting-from-a'}),\n content_type='application/json'\n )\n self.assertEqual(response.status_code, 200)\n content = json.loads(response.content)\n self.assertEqual(content.get('shortened_url'), '{}/{}'.format(SITE_URL, 'a_test'))", "def check_response_valid_update(response: HTTPResponse) -> bool:\n return response.status_code == 200", "def test_webhook_bad_status_update(self):\n payload = json.dumps({\n 'matrix': [\n {\n 'config': {\n 'env': [\n 'REVIEWBOARD_STATUS_UPDATE_ID=%d'\n % (self.status_update.pk + 1),\n 'REVIEWBOARD_TRAVIS_INTEGRATION_CONFIG_ID=%d'\n % self.config.pk,\n ],\n },\n },\n ],\n })\n self.spy_on(TravisCIWebHookView._validate_signature,\n owner=TravisCIWebHookView,\n call_fake=lambda self, request, integration_config: True)\n\n rsp = self.client.post(self.webhook_url, {'payload': payload})\n\n self.assertEqual(rsp.status_code, 400)\n self.assertEqual(\n rsp.content,\n b'Unable to find matching status update ID %d.'\n % (self.status_update.pk + 1))", "def test_update_short_url_with_device(self):\n old_target = 'http://old.com'\n old_mobile_target = 'http://mobile.old.com'\n old_tablet_target = 'http://tablet.old.com'\n old_desktop_target = 'http://desktop.old.com'\n\n new_target = 'http://new.com'\n new_mobile_target = 'http://mobile.new.com'\n new_tablet_target = 'http://tablet.new.com'\n new_desktop_target = 'http://desktop.new.com'\n\n new_data = {\n 'target': new_target,\n \"mobile_url\": {\"target\": new_mobile_target},\n \"tablet_url\": {\"target\": new_tablet_target},\n \"desktop_url\": {\"target\": new_desktop_target}\n }\n\n\n old_short_url = ShortUrl.objects.create(target=old_target)\n old_short_url.mobile_url = old_mobile_target\n old_short_url.tablet_url = old_tablet_target\n old_short_url.desktop_url = old_desktop_target\n old_short_url.save()\n\n client = RequestsClient()\n response = client.patch('http://testserver/api/v1/urls/%s' % old_short_url.hash, json=new_data)\n assert response.status_code == 200\n\n short_url = dict(response.json())\n self.assertEqual(short_url.get('target'), new_target)\n self.assertEqual(short_url.get('mobile_url').get('target'), new_mobile_target)\n self.assertEqual(short_url.get('tablet_url').get('target'), new_tablet_target)\n self.assertEqual(short_url.get('desktop_url').get('target'), new_desktop_target)", "def test_public_status_page_patch_public_status_page(self):\n pass", "def test_existing_url_entry_error(self):\r\n self._login_admin()\r\n\r\n test_url = u\"http://bmark.us/test\"\r\n existing_url_message = \"URL already Exists\"\r\n\r\n # Add The Bookmark Once\r\n res = self.app.post(\r\n '/admin/new_error',\r\n params={\r\n 'url': test_url,\r\n 'description': '',\r\n 'extended': '',\r\n 'tags': ''\r\n })\r\n self.assertEqual(\r\n res.status,\r\n \"302 Found\",\r\n msg='recent status is 302 Found, ' + res.status)\r\n\r\n # Add the Bookmark Again\r\n res = self.app.post(\r\n '/admin/new_error',\r\n params={\r\n 'url': test_url,\r\n 'description': '',\r\n 'extended': '',\r\n 'tags': ''\r\n })\r\n self.assertIn(existing_url_message, res.body)" ]
[ "0.6494839", "0.63909346", "0.62975353", "0.61703724", "0.61658883", "0.61041194", "0.6101163", "0.6069293", "0.59918106", "0.5973078", "0.5959037", "0.59548736", "0.59496254", "0.5906024", "0.5898634", "0.58770066", "0.58564144", "0.58474356", "0.58468753", "0.5846074", "0.58395255", "0.582446", "0.5822649", "0.5815307", "0.57672054", "0.57508737", "0.5747876", "0.57465583", "0.57373875", "0.57078564" ]
0.7175717
0
Helper method for making a request to the Blockstore REST API
def api_request(method, url, **kwargs): if not settings.BLOCKSTORE_API_AUTH_TOKEN: raise ImproperlyConfigured("Cannot use Blockstore unless BLOCKSTORE_API_AUTH_TOKEN is set.") kwargs.setdefault('headers', {})['Authorization'] = f"Token {settings.BLOCKSTORE_API_AUTH_TOKEN}" response = requests.request(method, url, **kwargs) if response.status_code == 404: raise NotFound response.raise_for_status() if response.status_code == 204: return None # No content return response.json()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getBlocks(request):\n if request.method == 'GET':\n blockName = request.GET.get('block', '')\n bid = request.GET.get('bid', '')\n districtName=request.GET.get('district', '')\n stateName=request.GET.get('state', '')\n limit=request.GET.get('limit', '')\n if limit == '':\n limit=50\n else:\n limit=int(limit)\n if bid=='':\n blocks = Block.objects.filter(name__icontains=blockName, district__name__icontains = districtName, district__state__name__icontains=stateName)\n else:\n blocks = Block.objects.filter(id = bid)\n\n blocks = blocks[:limit]\n serializer = SelectBlockSerializer(blocks, many=True)\n return JsonResponse(serializer.data, safe=False)", "def getBlocks(request):\n if request.method == 'GET':\n blockName = request.GET.get('block', '')\n bid = request.GET.get('bid', '')\n districtName=request.GET.get('district', '')\n stateName=request.GET.get('state', '')\n limit=request.GET.get('limit', '')\n if limit == '':\n limit=50\n else:\n limit=int(limit)\n if bid=='':\n blocks = Block.objects.filter(name__icontains=blockName, district__name__icontains = districtName, district__state__name__icontains=stateName)\n else:\n blocks = Block.objects.filter(id = bid)\n\n blocks = blocks[:limit]\n serializer = SelectBlockSerializer(blocks, many=True)\n return JsonResponse(serializer.data, safe=False)", "def GetBlock(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def _call(self, method, endpoint, content=None, params=None):\n\t\tparams = params or {}\n\t\tcontent = content or {}\n\n\t\tjson_data = json.dumps(content)\n\t\tendpoint = endpoint.strip(\"/\")\n\t\theaders = {\"X-Signed-Request-Hash\": self.__sign(method, endpoint, json_data)}\n\n\t\tresponse = self.session.request(\n\t\t\tmethod=method,\n\t\t\turl=(CloudClient.BASE_API + endpoint),\n\t\t\theaders = headers,\n\t\t\tparams=params,\n\t\t\tdata = json_data\n\t\t)\n\n\t\treturn WeeblyCloudResponse(self.session, response)", "def bm_api(method, **kwargs):\n if \"url\" in kwargs:\n kwargs['url'] = BLAZEMETER_API_URL + kwargs['url']\n else:\n LOGGER.error(\"Must provide url to bm_api()\")\n return None\n\n try:\n LOGGER.debug(\"Making request with method = {method}, {kwargs}\")\n response = requests.request(method, **kwargs, auth=get_authentication())\n if response.json().get(\"error\"):\n LOGGER.error(\"Error making request, received response: %s\", response.json()['error'])\n return None\n return response.json()\n except ValueError as value_error:\n LOGGER.error(value_error)", "def blockchain_requests(self, api_port, endpoint):\n SERVER_IP = '129.108.7.2'\n url = \"http://\" + SERVER_IP + \":\" + str(api_port) + endpoint\n #print(\"URL requesting: \" + url)\n r = requests.get(url)\n return r.json()", "def _api_call(self, **kwargs):\n params = {\n 'format': 'json',\n }\n params.update(kwargs)\n r = requests.get(self.api_base_url, params=params)\n return r.json()", "def __request(self,endpoint):\n apiRequest = requests.get(\"%s/%s\" % (self.baseurl,endpoint), \n auth=requests.auth.HTTPBasicAuth(self.api_id, self.api_secret))\n try:\n json = apiRequest.json()\n return json\n except JSONDecodeError:\n print(\"Failed to download or failed to parse JSON.\")\n print(apiRequest)\n return None", "def send_api_request(self, url, **kwargs):\n\n params = self._params.copy()\n dct = {k: kwargs[k] for k in kwargs if kwargs[k] is not None}\n params.update(dct)\n\n res = requests.get(url, params=params)\n if res.status_code != 200:\n try:\n error = res.json()['error']\n except ValueError:\n error = None\n raise SwrveApiException(error, res.status_code, url, params)\n\n return res.json()", "def get_block(blockhash):\n return requests.get(BASE+f'/api/block/{blockhash}').json()", "def _hit(self, method, urlpath, body=None, parse_json=True):\n url = self.api_base_url + urlpath\n logger.debug(\"Hitting the store: %s %s %s\", method, url, body)\n resp = self._auth_client.request(method, url, body)\n if not resp.ok:\n raise CommandError(self._parse_store_error(resp))\n\n logger.debug(\"Store ok: %s\", resp.status_code)\n if parse_json:\n # XXX Facundo 2020-06-30: we need to wrap this .json() call, and raise UnknownError\n # (after logging in debug the received raw response). This would catch weird \"html\"\n # responses, for example, without making charmcraft to crash. Related: issue #73.\n data = resp.json()\n else:\n data = resp.text\n return data", "def request(self, verb, address, params=None, data=None):\n return BWUser.bare_request(verb=verb, address_root=self.api_url,\n address_suffix=address,\n access_token=self.token,\n params=params or dict(),\n data=data or dict())", "def query_api(location):\n #bearer_token = obtain_bearer_token(API_HOST, TOKEN_PATH)\n bearer_token ='SHdrjUqMJXqXBKUc7bGIplM8y6tnbwZbXXDbWPCd9wWMP8tX9PdJrC5MZHwJRhb7jMtLjXxT-hsWjNf2OkdiDWd30HsS84AVI5iRnrpxkak3HbWXAdUKvraQ_wgXWXYx'\n response = transaction_search(bearer_token,location)\n response = response.get('businesses')\n return response", "def _request(self, url, **kwargs):\n headers = {'PRIVATE-TOKEN': self.token}\n response = make_request(self.base_url + url, headers=headers, **kwargs)\n logging.info('Requested: {0}'.format(url))\n logging.info('Method: {0}'.format(kwargs.get('method', 'GET')))\n logging.info(response.content)\n return json.loads(response.content)", "def apiquery(self, product_url, params={}):\n requesturl = self.config['host'] + product_url\n timestamp = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')\n signature = hmac.new(self.config['key'],\n ''.join([self.config['username'], timestamp, product_url]),\n digestmod=hashlib.sha1).hexdigest()\n params['timestamp'] = timestamp\n params['signature'] = signature\n params['api_username'] = self.config['username']\n if 'format' not in params.keys():\n params['format'] = self.config['dataformat']\n req = requests.get(requesturl, params=params)\n if req.status_code != requests.codes.ok:\n try:\n json_response = req.json()\n self.raise_best_exception(json_response)\n except KeyError:\n raise UnexpectedError(req.status_code, req.text)\n return req", "def _request(self, path, method='GET', body=None, headers=None):\n url = '{}{}'.format(self._url_base, path)\n headers = self._headers() if headers is None else headers\n response, content = super(DSBaseService, self)._request(url,\n method=method,\n body=str(body).replace(\"'\", '\"'),\n headers=headers)\n if int(response['status']) == 200:\n return json.loads(content)\n else:\n raise RuntimeError('{} responded with status code {}'.format(url, response['status']))", "def request(self, endpoint, verb=None, **req_kwargs):\n req_kwargs['headers'] = {'Authorization': 'Bearer ' + self.token,\n 'Accept': 'application/json;charset=utf-8'}\n resp = super(BahnPark, self).request(endpoint, verb=verb,\n **req_kwargs)\n resp.raise_for_status()\n return resp.json()", "def test_products_get(self):\n query_string = [('latitude', 1.2),\n ('longitude', 1.2)]\n response = self.client.open(\n '/v1/products',\n method='GET',\n query_string=query_string)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def _get_good_request(self):\r\n session = DBSession()\r\n prms = {\r\n 'url': u'http://google.com',\r\n 'description': u'This is my google desc',\r\n 'extended': u'And some extended notes about it in full form',\r\n 'tags': u'python search',\r\n 'api_key': API_KEY,\r\n 'content': 'bmark content is the best kind of content man',\r\n }\r\n\r\n req_params = urllib.urlencode(prms)\r\n res = self.testapp.post('/api/v1/admin/bmark',\r\n params=req_params)\r\n session.flush()\r\n transaction.commit()\r\n from bookie.bcelery import tasks\r\n tasks.reindex_fulltext_allbookmarks(sync=True)\r\n return res", "async def test_get_booking(client):\n headers = { \n 'Accept': 'application/json',\n 'Authorization': 'Bearer special-key',\n }\n response = await client.request(\n method='GET',\n path='/vms/api/v1/bookings/{booking_id}'.format(booking_id='booking_id_example'),\n headers=headers,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')", "def bloomberg(site):\n uri = \"https://www.bloomberg.com/markets/api/bulk-time-series/price/\"\n endpoint = (\n \"USDCNY%3ACUR,USDRUB%3ACUR,USDJPY%3ACUR,USDEUR%3ACUR,USDKRW%3ACUR\"\n + \",XAUUSD%3ACUR,XAGUSD%3ACUR\"\n )\n url = uri + endpoint\n headers = {\n \"authority\": \"www.bloomberg.com\",\n \"method\": \"GET\",\n \"path\": (\n \"/markets/api/comparison/data?securities=\"\n + \"USDCNY%3ACUR,USDRUB%3ACUR,USDJPY%3ACUR,USDEUR%3ACUR,USDKRW%3ACUR\"\n + \",XAUUSD%3ACUR,XAGUSD%3ACUR\"\n + \"&securityType=CURRENCY&locale=en\"\n ),\n \"scheme\": \"https\",\n \"accept\": (\n \"text/html,application/xhtml+xml,application/xml;q=0.9,image/\"\n + \"webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9\"\n ),\n \"accept-encoding\": \"gzip, deflate, br\",\n \"accept-language\": \"en-US,en;q=0.9\",\n \"cache-control\": \"max-age=0\",\n \"cookie\": (\n \"bbAbVisits=1; _pxhd=e24b47c64d37711c147cfb3c4b35c845563d2f9831b\"\n + \"03d9189f8cd761bc2be4f:d78eeb01-34c9-11ea-8f86-51d2aad9afb3; _px\"\n + \"vid=d78eeb01-34c9-11ea-8f86-51d2aad9afb3; _reg-csrf=s%3Ab0pWvbcs\"\n + \"UtrjYeJ0T2GrTaaD.8kaQlvHchJ1D%2FZZMaQWQiTizJTxrqqyzzuEZHEvlQNw;\"\n + \" agent_id=7989385a-d6d9-4446-b7aa-3c937407862b;\"\n + \" session_id=5702901e-d5fe-41e7-b259-df46322015e0;\"\n + \" session_key=3179869387f4c4ec4385e0d16222f0e59f48c47f;\"\n + \" _user-status=anonymous; _is-ip-whitelisted=false;\"\n + \" _user-ip=91.132.137.116; trc_cookie_storage=taboola%2520global%253\"\n + \"Auser-id%3D2f4acdc6-7c3c-412c-8766-d9c80dcffc38-tuct513df3e;\"\n + \" bdfpc=004.0586371899.1578785723722;\"\n + \" _reg-csrf-token=4ZxUa9q8-fkNXQkoHHXhnobWne1sDlIVcKEQ\"\n ),\n \"dnt\": \"1\",\n \"if-none-match\": 'W/\"lZU52eQYxjadyNKGCyftEg==\"',\n \"sec-fetch-mode\": \"navigate\",\n \"sec-fetch-site\": \"none\",\n \"sec-fetch-user\": \"?1\",\n \"upgrade-insecure-requests\": \"1\",\n \"user-agent\": (\n \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\"\n + \" (KHTML, like Gecko) Chrome/79.0.3945.79 Safari/537.36 OPR/66.0.3515.27\"\n ),\n }\n try:\n session = requests.Session()\n session.headers = headers\n cfscrape_requests = cfscrape.create_scraper(sess=session)\n ret = cfscrape_requests.get(url, timeout=(15, 15)).json()\n data = {}\n for item in ret:\n symbol = item[\"id\"].replace(\":CUR\", \"\")\n symbol = symbol[:3] + \":\" + symbol[-3:]\n data[symbol] = float(item[\"lastPrice\"])\n data[\"USD:XAG\"] = 1 / data.pop(\"XAG:USD\")\n data[\"USD:XAU\"] = 1 / data.pop(\"XAU:USD\")\n data = refine_data(data)\n print(site, data)\n race_write(f\"{site}_forex.txt\", json_dumps(data))\n except:\n print(f\"{site} failed to load\")", "def _request(self, opts, query, query_key='q'):\n params = opts['params']\n params[query_key] = query\n resp = requests.get(opts['url'], params=params, headers=self._headers)\n if not resp.ok:\n raise Exception(\"Server threw an error for: {}\".format(resp.url))\n return resp.json()", "def test_19(self):\n assert 'True' == Api.requestBlock('test-19')", "async def get_inventory(request: web.Request, ) -> web.Response:\n return web.Response(status=200)", "def test_abbeys_get(self):\n query_string = [('label', 'label_example'),\n ('page', 1),\n ('per_page', 100)]\n headers = { \n 'Accept': 'application/json',\n }\n response = self.client.open(\n '/v0.0.1/abbeys',\n method='GET',\n headers=headers,\n query_string=query_string)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def api( self, method, argc, **kwargs ):\n url = self.btce_url + argc + '/'\n body = urllib.urlencode(kwargs)\n sign = self.hash_hmac( body )\n headers = dict( Sign = sign, Uid = self.uid )\n if method == 'POST':\n response = requests.post( url,\n data = body,\n headers = headers,\n )\n elif method == 'GET':\n response = requests.get( url,\n headers = headers,\n )\n return response.text", "def call(\n uri,\n request_data=None,\n method='GET',\n only_response_code=False,\n rest_url=environ.get('BITBUCKET_REST_URL', None),\n username=environ.get('BITBUCKET_USERNAME', None),\n password=environ.get('BITBUCKET_PASSWORD', None),\n verify_certificate=not environ.get('BITBUCKET_IGNORE_CERTIFICATE', None),\n):\n\n m = {'DELETE': delete,\n 'GET': get,\n 'POST': post,\n 'PUT': put,\n }[method]\n\n params = {'url': f'{rest_url}{uri}', 'auth': (username, password), 'verify': verify_certificate}\n if request_data:\n params['json'] = request_data\n\n response = m(**params)\n return response.status_code if only_response_code else response.json() if response.text else response.status_code()", "def _request(self, endpoint, params=dict(), data=None):\n client_value = \"pyGreyNoise v%s\" % (str(self.CLIENT_VERSION))\n headers = {'X-Request-Client': 'pyGreyNoise', 'key': self.api_key}\n url = '/'.join([self.BASE_URL, self.API_VERSION, endpoint])\n self._log.debug('Requesting: %s', url)\n response = requests.get(url, headers=headers, timeout=7, params=params,\n data=data)\n if response.status_code not in range(200, 299):\n raise RequestFailure(response.status_code, response.content)\n try:\n loaded = json.loads(response.content)\n except Exception as error:\n raise InvalidResponse(error)\n return loaded", "def get_request(req_context, uri):\n headers = { 'Accept': \"application/json\", \n 'User-Agent': \"testApp\"\n }\n if config.ENVIRONMENT == \"Sandbox\":\n base_url = \"https://sandbox-quickbooks.api.intuit.com/v3/company/\"\n else:\n base_url = \"https://quickbooks.api.intuit.com/v3/company/\"\n url = base_url + req_context.realm_id + uri\n print(url)\n if config.AUTH_TYPE == \"OAuth2\":\n headers['Authorization'] = \"Bearer \" + req_context.access_token\n req = requests.get(url, headers=headers)\n else:\n auth = OAuth1(req_context.consumer_key, req_context.consumer_secret, req_context.access_key, req_context.access_secret)\n req = requests.get(url, auth=auth, headers=headers)\n return req", "def request(self, method, url, *args, **kwargs):\n full_url = urljoin(self.base_url, url)\n if 'data' in kwargs:\n kwargs['data'] = self._encode_data(kwargs['data'])\n return super(Client, self).request(method, full_url, *args, **kwargs)" ]
[ "0.63156694", "0.63156694", "0.58579916", "0.5850182", "0.58061534", "0.5766906", "0.57296586", "0.57222426", "0.5660935", "0.56325656", "0.55943286", "0.5584792", "0.5576492", "0.5573743", "0.5564736", "0.55544657", "0.5547002", "0.5537741", "0.55332834", "0.55300415", "0.55152696", "0.54764104", "0.5470277", "0.5463453", "0.5453511", "0.54489994", "0.5448452", "0.54480445", "0.5430887", "0.54260004" ]
0.7547545
0
Given data about a Collection returned by any blockstore REST API, convert it to a Collection instance.
def _collection_from_response(data): return Collection(uuid=UUID(data['uuid']), title=data['title'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def return_collection(self, collection, request, environ, start_response,\n response_headers):\n response_type = self.content_negotiation(\n request, environ, self.ValueTypes)\n if response_type is None:\n return self.odata_error(\n request, environ, start_response, \"Not Acceptable\",\n 'xml, json or plain text formats supported', 406)\n if response_type == \"application/json\":\n data = '{\"d\":%s}' % ' '.join(\n collection.generate_collection_in_json(request.version))\n else:\n e = core.Collection(None)\n e.set_xmlname((core.ODATA_METADATA_NAMESPACE, collection.name))\n doc = core.Document(root=e)\n for value in collection:\n p = e.add_child(core.Property)\n p.set_xmlname((core.ODATA_DATASERVICES_NAMESPACE,\n value.p_def.name))\n p.set_value(value)\n data = str(doc)\n data = data.encode('utf-8')\n response_headers.append((\"Content-Type\", str(response_type)))\n response_headers.append((\"Content-Length\", str(len(data))))\n start_response(\"%i %s\" % (200, \"Success\"), response_headers)\n return [data]", "def _translate_to_collection(\n self,\n collection,\n recursive=False,\n run_conditions=[],\n resource_conditions=[],\n variety_conditions=[],\n ):\n\n run_list = []\n if recursive:\n run_conditions.extend(\n [\n (\"collection_id =\", collection[\"id\"]),\n ]\n )\n _logger.debug(\"Loading run with conditions: {0}\".format(run_conditions))\n run_list = self.load_runs(\n recursive=recursive,\n run_conditions=run_conditions,\n resource_conditions=resource_conditions,\n variety_conditions=variety_conditions,\n )\n\n res = RunCollection(collection[\"name\"], data=run_list)\n res.set_id(collection[\"id\"])\n\n return res", "def collection(collection_id):\n\tcollection = models.Collection.query.get(collection_id)\n\tif not collection:\n\t\tabort(404)\n\treturn jsonify(collection.dictionary())", "def collection(self):\n return self._collection", "def _get_collection(self, collection_uri, request_headers=None):\n\n # get the collection\n status, headers, thecollection = self._rest_get(collection_uri)\n\n if status != 200:\n msg = self._get_extended_error(thecollection)\n raise exception.IloError(msg)\n\n while status < 300:\n # verify expected type\n # Don't limit to version 0 here as we will rev to 1.0 at some\n # point hopefully with minimal changes\n ctype = self._get_type(thecollection)\n if (ctype not in ['Collection.0', 'Collection.1']):\n raise exception.IloError(\"collection not found\")\n\n # if this collection has inline items, return those\n # NOTE: Collections are very flexible in how the represent\n # members. They can be inline in the collection as members\n # of the 'Items' array, or they may be href links in the\n # links/Members array. The could actually be both. Typically,\n # iLO implements the inline (Items) for only when the collection\n # is read only. We have to render it with the href links when an\n # array contains PATCHable items because its complex to PATCH\n # inline collection members.\n\n if 'Items' in thecollection:\n # iterate items\n for item in thecollection['Items']:\n # if the item has a self uri pointer,\n # supply that for convenience.\n memberuri = None\n if 'links' in item and 'self' in item['links']:\n memberuri = item['links']['self']['href']\n yield 200, None, item, memberuri\n\n # else walk the member links\n elif ('links' in thecollection and\n 'Member' in thecollection['links']):\n # iterate members\n for memberuri in thecollection['links']['Member']:\n # for each member return the resource indicated by the\n # member link\n status, headers, member = self._rest_get(memberuri['href'])\n yield status, headers, member, memberuri['href']\n\n # page forward if there are more pages in the collection\n if ('links' in thecollection and\n 'NextPage' in thecollection['links']):\n next_link_uri = (collection_uri + '?page=' + str(\n thecollection['links']['NextPage']['page']))\n status, headers, thecollection = self._rest_get(next_link_uri)\n\n # else we are finished iterating the collection\n else:\n break", "def collection(self, collection):\r\n\t\tself.fetch_collections()\r\n\t\tif collection is not None:\r\n\t\t\treturn self._collections[collection]\r\n\t\treturn None", "def get_collection(self, collection):\n return self.database[collection]", "def collection(cls, c: \"Collection_Type\") -> \"Link\":\n return cls(pystac.RelType.COLLECTION, c, media_type=pystac.MediaType.JSON)", "def get_collection(self, collection_name):\r\n collection = None\r\n if collection_name == 'talent5__staff_collection':\r\n collection = self.talent5__staff_collection\r\n elif collection_name == 'talent5__encode_collection':\r\n collection = self.talent5__encode_collection\r\n elif collection_name == 'talent5__count_collection':\r\n collection = self.talent5__count_collection\r\n elif collection_name == 'talent5__feedback_collection':\r\n collection = self.talent5__feedback_collection\r\n\r\n return collection", "def test_serialize_and_deserialize_returns_unchanged_collection(\n self\n ) -> None:\n self.assertEqual(\n self.collection.to_dict(),\n collection_domain.Collection.deserialize(\n self.collection.serialize()).to_dict())", "def setCollection(self, collection):\n self.collectionName = collection[\"name\"]\n self.collectionType = collection[\"type\"]\n return", "def _get_collection(self) -> Any:\n return self.anki.Collection(self.db_location, log=True)", "def __init__(self, collection):\n self.collection = collection", "def get_collection(self, collection_id):\n sql = \"\"\"SELECT collection.collection_id, collection.type,\n collection.name, collection.path,\n collection.doc,\n collection.version, collection.scope,\n collection.namedargs,\n collection.doc_format\n FROM collection_table as collection\n WHERE collection_id == ? OR collection.name like ?\n \"\"\"\n cursor = self._execute(sql, (collection_id, collection_id))\n # need to handle the case where we get more than one result...\n sql_result = cursor.fetchone()\n return {\n \"collection_id\": sql_result[0],\n \"type\": sql_result[1],\n \"name\": sql_result[2],\n \"path\": sql_result[3],\n \"doc\": sql_result[4],\n \"version\": sql_result[5],\n \"scope\": sql_result[6],\n \"namedargs\": sql_result[7],\n \"doc_format\": sql_result[8]\n }\n return sql_result", "def get_collection(self, coll_id):\n cond = SQLBinaryExpr(COL_NAME_COLL_COLLID, OP_EQ, \":1\")\n collection_list = self.select_generic_data([\"*\"], [TABLE_NAME_COLL], where=cond, sqlparams={\"1\": coll_id})\n if len(collection_list) == 0:\n return None\n return collection_list[0]", "def get_collection(coll_id=None, transform_id=None, relation_type=None):\n return collections.get_collection(coll_id=coll_id, transform_id=transform_id, relation_type=relation_type)", "def _get_collection(self):\n pk = self.kwargs.get('pk', None)\n ns_name = self.kwargs.get('namespace', None)\n name = self.kwargs.get('name', None)\n\n if pk:\n return get_object_or_404(models.Collection, pk=pk)\n ns = get_object_or_404(models.Namespace, name=ns_name)\n return get_object_or_404(models.Collection, namespace=ns, name=name)", "def get_collection(collection_id):\n print('collection_checker.get_collection()')\n collection = collection_dao.get_collection(collection_id)\n if collection is None:\n abort(404, 'Collection does not exist')\n else:\n collection = collection_dao.get_collection(collection_id)\n print(collection)\n return collection", "def _translate_from_collection(self, collection):\n\n row = {\n \"name\": collection.get_name(),\n }\n # If this run has been creeated in domain, it will not have ID until the DB gives it to him\n an_id = collection.get_id()\n if an_id:\n row[\"id\"] = an_id\n\n return row", "def to_collection_dict(cls, query, data, page, per_page):\n resources = query(data).skip(page * per_page).limit(per_page)\n data = {\n 'items': [cls().to_response(item) for item in resources],\n '_meta': {\n 'page': page,\n 'per_page': per_page,\n 'total_items': resources.count()\n }\n }\n return data", "def collection(self):\r\n raise NotImplementedError", "def _coerce_collection(\n self, value: Any, origin: Type, annotation: Type[Collection[Any]]\n ) -> Collection:\n args = self.get_args(annotation)\n value = self._coerce_builtin(value, origin)\n if args:\n arg = args[0]\n return type(value)(self.coerce_value(x, arg) for x in value)\n return self._coerce_builtin(value, origin)", "def collection_get(self):\n if self.request.params.get(\"all\", \"\"):\n collection_data = [i.serialize(\"view\") for i in self.context.documents]\n else:\n collection_data = sorted(\n dict([(i.id, i.serialize(\"view\")) for i in self.context.documents]).values(),\n key=lambda i: i[\"dateModified\"],\n )\n return {\"data\": collection_data}", "def collections(self, query, page=1, per_page=10):\n url = \"/search/collections\"\n data = self._search(url, query, page=page, per_page=per_page)\n data[\"results\"] = CollectionModel.parse_list(data.get(\"results\"))\n return data", "def get_collections_details(self, collection, recurse=True):\n if type(collection) == str:\n collid = self.get_collection_id(collection)\n else:\n collid = collection\n\n col_list = [COL_NAME_COLL_COLLID, COL_NAME_COLL_NAME, COL_NAME_COLL_IS_ACTIVE, COL_NAME_COLL_PRID,\n COL_NAME_COLL_COLLCOMMENT, COL_NAME_COLL_PARENTID]\n if recurse:\n rec_list, col_list = self.get_collection_tree(collid, incl_shared=True, col_list=col_list)\n # exclude the first record it was not expect as per previous implementation and convert to list of dict\n records = [dict(zip(col_list, rec)) for rec in rec_list[1:]]\n else:\n\n cond = SQLBinaryExpr(COL_NAME_COLL_PARENTID, OP_EQ, \":1\")\n records = self.select_generic_data(col_list, table_list=[TABLE_NAME_COLL],\n where=cond, sqlparams={\"1\": collid})\n return records", "def get_all_posts_from_collection(self):\n response = self.get_comments_all_posts(PAYLOAD)\n collection = (response.json())\n return collection", "def get_collection(self, db_name, collection_name):\n return self._client[db_name][collection_name]", "def get_collection(self, address):\n return self.client.get_collections(uri=address)", "def collection_create(self, name):\n try:\n return CastleCollection(name, self)\n except:\n raise", "def set_collection(self, collection):\n if isinstance(collection, BangumiSubjectCollection):\n return self.set_sub_collection(collection)\n elif isinstance(collection, BangumiEpisodeCollection):\n return self.set_ep_collection(collection)\n else:\n raise TypeError(\"Must be either BangumiSubjectCollection or \" + \n \"BangumiEpisodeCollection, got {0}\"\n .format(type(collection)))" ]
[ "0.6577779", "0.6343278", "0.6237831", "0.6221495", "0.6207642", "0.6203012", "0.6195551", "0.6181388", "0.6098431", "0.6020433", "0.60099417", "0.5990563", "0.5976519", "0.5969405", "0.5889118", "0.58554935", "0.58416325", "0.5817092", "0.5749313", "0.57408684", "0.57094455", "0.5703953", "0.5695147", "0.56906945", "0.5686716", "0.5678172", "0.5655432", "0.56514037", "0.5648792", "0.56479985" ]
0.71289414
0
Given data about a Bundle returned by any blockstore REST API, convert it to a Bundle instance.
def _bundle_from_response(data): return Bundle( uuid=UUID(data['uuid']), title=data['title'], description=data['description'], slug=data['slug'], # drafts: Convert from a dict of URLs to a dict of UUIDs: drafts={draft_name: UUID(url.split('/')[-1]) for (draft_name, url) in data['drafts'].items()}, # versions field: take the last one and convert it from URL to an int # i.e.: [..., 'https://blockstore/api/v1/bundle_versions/bundle_uuid,15'] -> 15 latest_version=int(data['versions'][-1].split(',')[-1]) if data['versions'] else 0, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bundle_instance(obj):\n\n content, contents = osl_encode(obj, True)\n # should be a bunch of documents, not just one.\n bundle = [json.dumps(c) for c in contents]\n return bundle", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n bundle_name: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n desktop_type: Optional[pulumi.Input[str]] = None,\n image_id: Optional[pulumi.Input[str]] = None,\n language: Optional[pulumi.Input[str]] = None,\n root_disk_performance_level: Optional[pulumi.Input[str]] = None,\n root_disk_size_gib: Optional[pulumi.Input[int]] = None,\n user_disk_performance_level: Optional[pulumi.Input[str]] = None,\n user_disk_size_gibs: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]] = None) -> 'Bundle':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _BundleState.__new__(_BundleState)\n\n __props__.__dict__[\"bundle_name\"] = bundle_name\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"desktop_type\"] = desktop_type\n __props__.__dict__[\"image_id\"] = image_id\n __props__.__dict__[\"language\"] = language\n __props__.__dict__[\"root_disk_performance_level\"] = root_disk_performance_level\n __props__.__dict__[\"root_disk_size_gib\"] = root_disk_size_gib\n __props__.__dict__[\"user_disk_performance_level\"] = user_disk_performance_level\n __props__.__dict__[\"user_disk_size_gibs\"] = user_disk_size_gibs\n return Bundle(resource_name, opts=opts, __props__=__props__)", "def dehydrate(self, bundle):\n if bundle.obj.img_thumbnail_url is None:\n del bundle.data['img_thumbnail_url']\n \n \"\"\" append layout url if a level filter exists in the request \"\"\"\n if \"level\" in bundle.request.GET:\n level = int(bundle.request.GET[\"level\"])\n bundle.data[\"layout_url\"] = bundle.obj.layouts.get(level=level).mapURL\n \n \"\"\"\n make bundle consistent for location parsing on mobile client: \n add a location_type entry in the bundle.data\n put all the rest of the data under location_data\n \"\"\"\n location_data = bundle.data.copy()\n bundle.data.clear()\n bundle.data['location_type'] = self._meta.resource_name\n bundle.data['location_data'] = location_data\n \n return bundle", "def bundle(self):\n return self._bundle", "def hydrate(self, bundle):\n \n #Update the fabric\n if \"fabric\" in bundle.data and bundle.request.user.has_perm('acknowledgements.change_fabric'):\n try:\n fabric = Fabric.objects.get(pk=bundle.data[\"fabric\"][\"id\"])\n bundle.obj.fabric = fabric\n logger.info(\"{0} changed fabric to {1}\".format(bundle.obj.description,\n fabric.description))\n except KeyError:\n raise ValueError(\"Missing fabric ID.\")\n except Fabric.DoesNotExist:\n raise\n \n #Update the unit price\n if \"unit_price\" in bundle.data:\n if bundle.data[\"unit_price\"] != bundle.obj.unit_price:\n if bundle.request.user.has_perm('acknowledgements.change_item_price'):\n bundle.obj.unit_price = bundle.data['unit_price']\n bundle.obj.total = bundle.obj.unit_price * bundle.obj.quantity\n else:\n bundle.data['unit_price'] = bundle.obj.unit_price\n \n return bundle", "def get_data_bundle(data_bundle_id):\n data_bundles = []\n found_server = None\n for server in server_list:\n response = requests.get(\n \"{}/databundles/{}\".format(server, data_bundle_id))\n if response.status_code == 200:\n data_bundles.append(response.json()['data_bundle'])\n found_server = server\n break\n\n if not found_server:\n return Response({'msg': 'A Data Bundle with the id'\n '{} was not found'.format(data_bundle_id)},\n status_code=404)\n\n # Modify the Data Bundle to include provenance about the\n # server we got metadata from.\n\n dos_url = \"{}/dataobjects/{}\".format(\n found_server, data_bundle_id)\n\n data_bundle = data_bundles[0]\n data_bundle['urls'].append({'url': dos_url})\n\n return {'data_bundle': data_bundle}", "def dehydrate(self, bundle):\n if not bundle.obj.environment is None:\n ## make the environment response a dictionary, containing resource_uri and name\n bundle.data['environment'] = {'resource_uri': bundle.data['environment'], 'name': bundle.obj.environment.name}\n \n if not bundle.obj.area is None:\n ## make the area response a dictionary, containing resource_uri and name\n bundle.data['area'] = {'resource_uri': bundle.data['area'], 'name': bundle.obj.area.name}\n \n \n \"\"\"\n bundle in the user's first and last name under the ['data']['user'] entry \n \"\"\"\n first_name = \"Anonymous\"\n last_name = \"Guest\"\n \n user_profile = bundle.obj.user\n \n if not user_profile is None and not user_profile.is_anonymous:\n first_name = user_profile.user.first_name\n last_name = user_profile.user.last_name\n \n \n bundle.data['data']['user'] = { 'first_name' : first_name,\n 'last_name' : last_name \n }\n \n \"\"\"\n now remove also null area/environment data\n \"\"\"\n if not bundle.data['environment']:\n del bundle.data['environment']\n \n if not bundle.data['area']:\n del bundle.data['area']\n \n \"\"\"\n if no data is found remove the 'data' attribute from the bundle to avoid useless processing on\n the mobile side \n \"\"\"\n if not bundle.data['data']:\n del bundle.data['data']\n \n return bundle", "def parse_bundle_for_file(fhir_bundle_path):\n\n with open(fhir_bundle_path, 'r', encoding='UTF-8') as f:\n bundle = bu.Bundle(json.load(f))\n return bundle", "def dehydrate(self, bundle):\n if bundle.obj.admin is None:\n del bundle.data['admin']\n \n \"\"\" Delete the img_thumbnail_url if it is null \"\"\"\n if bundle.obj.img_thumbnail_url is None:\n del bundle.data['img_thumbnail_url']\n \n \"\"\" append level data from the layout reference of the Area obj \"\"\"\n bundle.data['level'] = bundle.obj.layout.level\n \n \"\"\"\n make bundle consistent for location parsing on mobile client: \n add a location_type entry in the bundle.data\n put all the rest of the data under location_data\n \"\"\"\n location_data = bundle.data.copy()\n bundle.data.clear()\n bundle.data['location_type'] = self._meta.resource_name\n bundle.data['location_data'] = location_data\n \n return bundle", "def bundle_cmd(context, bundle_name, bundle_id, json, verbose, compact):\n store: Store = context.obj[\"store\"]\n bundles = store.bundles()\n\n if bundle_name:\n bundle = store.get_bundle_by_name(bundle_name=bundle_name)\n bundles = [bundle] if bundle else []\n\n if bundle_id:\n bundle = store.get_bundle_by_id(bundle_id=bundle_id)\n bundles = [bundle] if bundle else []\n\n if not bundles:\n LOG.info(\"Could not find any bundles\")\n return\n template = schema.BundleSchema()\n result = []\n for bundle in bundles:\n result.append(template.dump(bundle))\n\n if json:\n click.echo(jsonlib.dumps(result, indent=4, sort_keys=True))\n return\n console = Console()\n console.print(get_bundles_table(result))\n if verbose:\n for bundle in bundles:\n if len(bundle.versions) == 0:\n LOG.info(\"No versions found for bundle %s\", bundle.name)\n return\n version_obj = bundle.versions[0]\n context.invoke(\n version_cmd, version_id=version_obj.id, verbose=True, compact=compact\n )", "def get_bundle(bundle_uuid):\n assert isinstance(bundle_uuid, UUID)\n try:\n data = api_request('get', api_url('bundles', str(bundle_uuid)))\n except NotFound:\n raise BundleNotFound(f\"Bundle {bundle_uuid} does not exist.\") # lint-amnesty, pylint: disable=raise-missing-from\n return _bundle_from_response(data)", "def full_dehydrate(self, bundle):\n # Dehydrate each field.\n if bundle.obj.obj_type() == 'image':\n obj = ImageResource()\n elif bundle.obj.obj_type() == 'wordbox':\n obj = WordBoxResource()\n else:\n return bundle\n for field_name, field_object in obj.fields.items():\n try:\n # A touch leaky but it makes URI resolution work.\n if getattr(field_object, 'dehydrated_type', None) == 'related':\n field_object.api_name = self._meta.api_name\n field_object.resource_name = obj._meta.resource_name\n\n bundle.data[field_name] = field_object.dehydrate(bundle)\n\n # Check for an optional method to do further dehydration.\n method = getattr(obj, \"dehydrate_%s\" % field_name, None)\n except:\n raise BadRequest(\"Internal error, possible problem with \"\n \"top_commnets for images\")\n\n if method:\n bundle.data[field_name] = method(bundle)\n\n bundle = obj.dehydrate(bundle)\n return bundle", "def create_bundle(self):\n self._highest_bundle_id += 1\n bundle = Bundle(document=self, bundle_id=str(self._highest_bundle_id))\n self.bundles.append(bundle)\n bundle.number = len(self.bundles)\n return bundle", "def get_bundle():\n if should_save_generator_bundle():\n return None\n bundle_file = get_bundle_file()\n if bundle_file is None:\n return None\n return sequence_generator_bundle.read_bundle_file(bundle_file)", "def test_get_bundle(self):\n res = self.app.get('/bundle/DEFAULT/main')\n bundle = json.loads(res.body.decode('utf-8'))\n expected = self.stats1['chunks']['main'][0]\n self.assertEqual(len(bundle), 1)\n self.assertEqual(bundle[0]['name'], expected['name'])\n self.assertEqual(bundle[0]['path'], expected['path'])\n self.assertTrue('url' in bundle[0])", "def full_dehydrate(self, bundle):\n # Dehydrate each field.\n if bundle.obj.obj_type() == 'image':\n obj = ImageResource()\n elif bundle.obj.obj_type() == 'wordbox':\n obj = WordBoxResource()\n else:\n return bundle\n for field_name, field_object in obj.fields.items():\n try:\n # A touch leaky but it makes URI resolution work.\n if(getattr(field_object, 'dehydrated_type', None)\n == 'related'):\n field_object.api_name = self._meta.api_name\n field_object.resource_name = obj._meta.resource_name\n\n bundle.data[field_name] = field_object.dehydrate(bundle)\n\n # Check for an optional method to do further dehydration.\n method = getattr(obj, \"dehydrate_%s\" % field_name, None)\n except:\n raise BadRequest(\"Internal error, possible problem with\"\n \" top_commnets for images\")\n\n if method:\n bundle.data[field_name] = method(bundle)\n\n bundle = obj.dehydrate(bundle)\n return bundle", "def build(self) -> Optional[Bundle]:\n # Prepare STIX2 bundle objects with author.\n bundle_objects = [self.author]\n\n # Add object marking definitions to bundle.\n bundle_objects.extend(self.object_markings)\n\n # Create intrusion sets and add to bundle.\n intrusion_sets = self._create_intrusion_sets()\n bundle_objects.extend(intrusion_sets)\n\n # Create sectors and add to bundle.\n sectors = self._create_sectors()\n bundle_objects.extend(sectors)\n\n # Intrusion sets target sectors and add to bundle.\n intrusion_sets_target_sectors = self._create_targets_relationships(\n intrusion_sets, sectors\n )\n bundle_objects.extend(intrusion_sets_target_sectors)\n\n # Create locations and add to bundle.\n locations = self._create_locations()\n bundle_objects.extend(locations)\n\n # Intrusion sets target locations and add to bundle.\n intrusion_sets_target_locations = self._create_targets_relationships(\n intrusion_sets, locations\n )\n bundle_objects.extend(intrusion_sets_target_locations)\n\n # Create observations.\n observations = self._create_ioc_observations()\n\n # Get observables and add to bundle.\n observables = [o.observable for o in observations if o.observable is not None]\n bundle_objects.extend(observables)\n\n # Get indicators, create YARA indicators and to bundle.\n indicators = [o.indicator for o in observations if o.indicator is not None]\n indicators.extend(self._create_yara_indicators())\n bundle_objects.extend(indicators)\n\n # Get observation relationships and add to bundle.\n indicators_based_on_observables = [\n o.relationship for o in observations if o.relationship is not None\n ]\n bundle_objects.extend(indicators_based_on_observables)\n\n # Indicator indicates entities, add to bundle.\n indicator_indicates = intrusion_sets\n\n indicator_indicates_entities = self._create_indicates_relationships(\n indicators, indicator_indicates\n )\n bundle_objects.extend(indicator_indicates_entities)\n\n # Create object references for the report.\n object_refs = create_object_refs(\n intrusion_sets,\n sectors,\n intrusion_sets_target_sectors,\n locations,\n intrusion_sets_target_locations,\n observables,\n indicators,\n indicators_based_on_observables,\n indicator_indicates_entities,\n )\n\n # TODO: Ignore reports without any references or not?\n # Hack, the report must have at least on object reference.\n if not object_refs:\n dummy_object = self._create_dummy_object()\n\n bundle_objects.append(dummy_object)\n object_refs.append(dummy_object)\n\n # Create report and add to bundle.\n report = self._create_report(object_refs)\n bundle_objects.append(report)\n\n # XXX: Without allow_custom=True the observable with the custom property\n # will cause an unexpected property (x_opencti_score) error.\n return Bundle(objects=bundle_objects, allow_custom=True)", "def hydrate_content_type(self, bundle):\n if bundle.data['content_type'] == 'media':\n bundle.data['content_type'] = \"T\"\n return bundle\n bundle.data['content_type'] = CONTENT_HYDRATE[bundle.data['content_type']]\n return bundle", "def bundle_cls(self):\n return self.get_entity_cls('bundle')", "def bundle(class_: Type[T]) -> Type[T]:\n namespace = OrderedDict()\n for attr in dir(class_):\n if not attr.startswith(\"_\") and attr != \"metadata\":\n attr_object = getattr(class_, attr)\n namespace[attr] = attr_object\n return BundleMeta(class_.__name__, (), namespace) # noqa", "def dehydrate(self, bundle):\n #Add URLS for the acknowledgement\n #and the production pdf to the data\n #bundle\n if bundle.request.GET.get('pdf'):\n try:\n ack = bundle.obj.acknowledgement_pdf\n production = bundle.obj.production_pdf\n bundle.data['pdf'] = {'acknowledgement': ack.generate_url(),\n 'production': production.generate_url()}\n except AttributeError as e:\n logger.warn(e) \n logger.warn('Missing acknowledgement or production pdf')\n \n try:\n label = bundle.obj.label_pdf\n bundle.data['pdf']['label'] = label.generate_url()\n except AttributeError:\n logger.warn(\"Missing label pdf\")\n \n #Adds a dictionary for the project if it exists\n if bundle.obj.project:\n bundle.data['project'] = {'id': bundle.obj.project.id,\n 'codename': bundle.obj.project.codename}\n \n return bundle", "def dehydrate(self, bundle):\n #if 'research_profile' in bundle.data and not bundle.obj.research_profile:\n # del bundle.data['research_profile']\n if 'showprofile' in bundle.request.GET and \\\n bundle.request.GET['showprofile'] in UserSubProfile.get_subclass_list() + ['all']:\n \n ## get downcasted versions directly of all the subprofiles associated with this userprofile\n profile_type = bundle.request.GET['showprofile']\n subprofiles = []\n \n if profile_type == 'all':\n subprofiles = bundle.obj.subprofiles.all().select_subclasses()\n else:\n subprofiles = bundle.obj.subprofiles.all().select_subclasses(profile_type)\n \n subprofiles_dict = {}\n for profile in subprofiles:\n data = profile.to_serializable()\n if data:\n subprofiles_dict.update(data)\n \n if subprofiles_dict:\n bundle.data['subprofiles'] = subprofiles_dict\n \n \"\"\" if the user is requesting his own data then return his email too as it\n is an identifying element \"\"\" \n if hasattr(bundle.request, \"user\") and not bundle.request.user.is_anonymous():\n user_profile = bundle.request.user.get_profile()\n if user_profile.pk == bundle.obj.pk:\n bundle.data['email'] = bundle.obj.user.email \n \n \"\"\" remove c2dm data from bundle \"\"\"\n if 'c2dm_id' in bundle.data:\n del bundle.data['c2dm_id']\n \n return bundle", "def from_dict(cls, dikt) -> 'BundleData':\n return util.deserialize_model(dikt, cls)", "def list_bundles():\n response = houston.get(\"/zipline/bundles\")\n\n houston.raise_for_status_with_json(response)\n return response.json()", "def cast(self):\n if self.validate():\n if 'blueprint' in self.data:\n # A single blueprint\n obj = Blueprint.Blueprint()\n obj.versionCode = self.versionCode\n obj.data = self.data\n return obj\n elif 'blueprint-book' in self.data:\n # A book of blueprints\n obj = BlueprintBook.BlueprintBook()\n obj.versionCode = self.versionCode\n obj.data = self.data\n return obj\n else:\n # Unknown datatype. Just return the object\n return self\n \n else:\n # Broken validation means just return the object\n return self", "def fixture_minimal_bundle_obj(case_id, timestamp) -> models.Bundle:\n return models.Bundle(name=case_id, created_at=timestamp)", "def bundle(self, text):\n\n response = self._send_request(\"bundle\", dict(text=text))\n return response[self._layer]", "def unpack(self, obj):\n if obj is None:\n return\n try:\n return json.loads(obj)\n except Exception:\n return obj", "def hydrate(self, bundle):\n bundle.data['user'] = \"/api/v1/user/%d/\" % bundle.request.user.id\n return bundle", "def _accept_bundle(self, bundle):\n duration = bundle.duration\n supply_cost = 0\n # 1. Build a mapping from resource-specific info to resource record\n res_to_record_mapping = self._res_man.get_res_to_record_mapping()\n # 2. Add usage for zones\n zones = bundle.copy_zones()\n for zone in zones:\n zone_id = zone.zone_id\n for resource in zone.resources:\n res_type = resource.get_res_type()\n qty = resource.get_value()\n record = res_to_record_mapping[zone_id][res_type]\n self._res_man.update_res_usage(record, qty, duration)\n supply_cost += record.get_supply_cost()\n # 3. Add usage for links\n links = bundle.copy_links()\n for link in links:\n src_zone_id, dst_zone_id = link.get_src_and_dst()\n qty = link.get_value()\n record = res_to_record_mapping[src_zone_id][dst_zone_id]\n self._res_man.update_res_usage(record, qty, duration)\n supply_cost += record.get_supply_cost()\n # 4. Update bookkeeping\n self._revenue += bundle.payment\n self._expenses += supply_cost\n logger.debug(\n 'Updating books...\\n'\n f'\\tTotal revenue: {self._revenue}\\n'\n f'\\tTotal supply cost: {self._expenses}\\n'\n )\n if self._use_price_token and bundle.has_price_token():\n # Need to expire the token if it was used to compute the prices\n # this time\n token = bundle.get_price_token()\n with self._history_lock:\n try:\n del self._price_history[token]\n logger.debug(f'Deleted token {token}')\n except KeyError:\n # Token happened to expire between time prices were computed\n # and here\n pass" ]
[ "0.6342084", "0.6269155", "0.62631524", "0.6151707", "0.60612935", "0.5925814", "0.59022117", "0.5898795", "0.5896431", "0.58471715", "0.5792275", "0.5681991", "0.56783265", "0.56710964", "0.566184", "0.5634402", "0.56337357", "0.56250954", "0.5600761", "0.5547723", "0.5542083", "0.54638", "0.5450937", "0.5412953", "0.5359597", "0.5346194", "0.53309506", "0.53299206", "0.5324351", "0.52998406" ]
0.76745385
0
Given data about a Draft returned by any blockstore REST API, convert it to a Draft instance.
def _draft_from_response(data): return Draft( uuid=UUID(data['uuid']), bundle_uuid=UUID(data['bundle_uuid']), name=data['name'], updated_at=dateutil.parser.parse(data['staged_draft']['updated_at']), files={ path: DraftFile(path=path, **file) for path, file in data['staged_draft']['files'].items() }, links={ name: DraftLinkDetails( name=name, direct=LinkReference(**link["direct"]), indirect=[LinkReference(**ind) for ind in link["indirect"]], modified=link["modified"], ) for name, link in data['staged_draft']['links'].items() } )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_draft(self):\n return Draft(self)", "def convert_to_draft(self, source_location):\r\n if source_location.category in DIRECT_ONLY_CATEGORIES:\r\n raise InvalidVersionError(source_location)\r\n original = self.collection.find_one({'_id': source_location.to_deprecated_son()})\r\n if not original:\r\n raise ItemNotFoundError(source_location)\r\n draft_location = as_draft(source_location)\r\n original['_id'] = draft_location.to_deprecated_son()\r\n try:\r\n self.collection.insert(original)\r\n except pymongo.errors.DuplicateKeyError:\r\n raise DuplicateItemError(original['_id'])\r\n\r\n self.refresh_cached_metadata_inheritance_tree(draft_location.course_key)\r\n\r\n return wrap_draft(self._load_items(source_location.course_key, [original])[0])", "def get(self, oauth, resource_id, draft_id):\n d = Deposition.get(resource_id, user=current_user)\n return d.type.marshal_draft(d.get_draft(draft_id))", "def cast(self):\n if self.validate():\n if 'blueprint' in self.data:\n # A single blueprint\n obj = Blueprint.Blueprint()\n obj.versionCode = self.versionCode\n obj.data = self.data\n return obj\n elif 'blueprint-book' in self.data:\n # A book of blueprints\n obj = BlueprintBook.BlueprintBook()\n obj.versionCode = self.versionCode\n obj.data = self.data\n return obj\n else:\n # Unknown datatype. Just return the object\n return self\n \n else:\n # Broken validation means just return the object\n return self", "def get_draft(draft_uuid):\n assert isinstance(draft_uuid, UUID)\n try:\n data = api_request('get', api_url('drafts', str(draft_uuid)))\n except NotFound:\n raise DraftNotFound(f\"Draft does not exist: {draft_uuid}\") # lint-amnesty, pylint: disable=raise-missing-from\n return _draft_from_response(data)", "def edit_draft(self, message_id):\n return Draft(self, message_id).fetch()", "def _get_draft(self):\n review_request = self.create_review_request(publish=True)\n return ReviewRequestDraft.create(review_request)", "def from_data(cls, reddit, data):\n if data == '[deleted]':\n return None\n else:\n return cls(reddit, data)", "def get_draft(self, draft_number: Optional[int] = None) -> Draft:\n if draft_number is None:\n self._status.check_authority_for_draft()\n draft_number = self._status.draft_number\n\n if not draft_number:\n raise TypeError(\"The given draft number is illegal\")\n\n for draft in self.list_drafts():\n if draft_number == draft.number:\n return draft\n\n raise ResourceNotExistError(resource=\"draft\", identification=draft_number)", "def _get_draft_message(draft):\n return HttpTextResponse(draft.text if draft else '')", "def deserialize(self, data):\n payload = self._unpack(data)\n return decode(payload['body'], content_type=payload['content_type'],\n content_encoding=payload['content_encoding'], force=True)", "def from_dict(cls, dikt) -> 'Debt':\n return util.deserialize_model(dikt, cls)", "def get(self,\n draft_id,\n ):\n return self._invoke('get',\n {\n 'draft_id': draft_id,\n })", "def loads(self, data):\n self._id = data.get('id', -1)\n self._created = data.get('created', 0) # datetime.strptime(data.get('created', '1970-01-01T00:00:00'), '%Y-%m-%dT%H:%M:%S').timestamp()\n self._stage = data.get('stage', 0) # self.stage_from_str(data.get('stage', ''))\n self._dir = data.get('direction', 0) # self.direction_from_str(data.get('direction', ''))\n self._timeframe = data.get('timeframe') # timeframe_from_str(data.get('timeframe', 't'))\n self._expiry = data.get('expiry', 0) # datetime.strptime(data.get('expiry', '1970-01-01T00:00:00'), '%Y-%m-%dT%H:%M:%S').timestamp()", "def _object_decode(self, d):\n if ((isinstance(d, dict)) and\n ('clientId' in d)):\n cd = ClientData(**d)\n return cd\n elif self._other_object_hook is not None:\n return self._other_object_hook(d)\n else:\n return d", "def _deserialize(self, data):\n uri = data[1:-1]\n # We have to retrieve the type to rebuild the object\n attr = self.__dict__['field']\n # Be careful when orig = None !!!!!\n orig = getattr(attr.model, attr.name)\n if None == orig:\n return rdfSubject(rdflib.term.URIRef(uri))\n elif isinstance(orig, list):\n # rdfalchemy mapper gives me the solution\n rt = attr.model.__class__.__dict__[attr.name].range_type\n from rdfalchemy.orm import mapper\n alch_map = mapper()\n try:\n cls = alch_map[str(rt)]\n return cls(rdflib.term.URIRef(uri))\n except:\n rdfSubject(rdflib.term.URIRef(uri))\n else:\n return type(orig)(rdflib.term.URIRef(uri))", "def save_draft(cid):\r\n d_content = request.values.get('contract_content', '')\r\n if not d_content:\r\n return jsonify({'success': False, 'errorMsg': 'No content to save'})\r\n with engine.with_session() as ss:\r\n contract_to_update = ss.query(LxContract).get(cid)\r\n draft_to_update = contract_to_update.draft\r\n file_biz.save_contract_file(\r\n contract_to_update.owner_id, d_content,\r\n contract_to_update.name, draft_to_update.fuuid\r\n )\r\n return jsonify({'success': True, 'data': draft_to_update.id})", "def decode(self) -> D:\n if self.has_cached_data():\n return self._data\n\n # Dispatch decoding\n data = lookup_serializer(self.encoding).loads(self.blob)\n\n self._cache_data(data)\n return data", "def get_or_create_bundle_draft(bundle_uuid, draft_name):\n bundle = get_bundle(bundle_uuid)\n try:\n return get_draft(bundle.drafts[draft_name]) # pylint: disable=unsubscriptable-object\n except KeyError:\n # The draft doesn't exist yet, so create it:\n response = api_request('post', api_url('drafts'), json={\n \"bundle_uuid\": str(bundle_uuid),\n \"name\": draft_name,\n })\n # The result of creating a draft doesn't include all the fields we want, so retrieve it now:\n return get_draft(UUID(response[\"uuid\"]))", "def decode(cls: Type[T], data: Any) -> T:\n return cls(agent_id=data['agent_id'])", "def from_dict(cls, dikt) -> 'CardholderData':\n return util.deserialize_model(dikt, cls)", "def from_yaml(input_yaml: Dict) -> \"DBRevision\":\n return DBRevision(input_yaml[\"revision_name\"],\n set(input_yaml.get(\"dependencies\")),\n input_yaml[\"sql_text\"],\n input_yaml[\"active\"],\n input_yaml.get(\"description\"))", "def get_latest_draft(self, object_id):\n latest_revision = self.get_latest_draft_revision(object_id)\n return latest_revision", "def draft_message(request):\n query = models.Message.query(\n models.Message.issue_key == request.issue.key,\n models.Message.sender == request.user.email(),\n models.Message.draft == True)\n if query.count() == 0:\n draft_message = None\n else:\n draft_message = query.get()\n if request.method == 'GET':\n return _get_draft_message(draft_message)\n elif request.method == 'POST':\n return _post_draft_message(request, draft_message)\n elif request.method == 'DELETE':\n return _delete_draft_message(draft_message)\n return HttpTextResponse('An error occurred.', status=500)", "def get_draft_revisions(self, object_id):\n content_type = ContentType.objects.get_for_model(self.model)\n return Revision.objects.filter(\n version__object_id=object_id, \n version__content_type=content_type,\n easypublishermetadata__status='draft',\n easypublishermetadata__language=get_language()\n ).select_related().distinct()", "def save_draft(self, account, title, body):\n account = Account(account, hive_instance=self.hive)\n draft = {'title': title, 'body': body}\n return self._conveyor_method(account, None,\n \"conveyor.save_draft\",\n [account['name'], draft])", "def wrap_draft(item):\r\n setattr(item, 'is_draft', item.location.revision == DRAFT)\r\n item.location = item.location.replace(revision=None)\r\n return item", "def create_draft(convo_ID, template_ID):\n # Get response template through helper function.\n # Make an API request to reply to a conversation with the content in that template\n response_template = get_canned_response(template_ID)\n url = \"https://api2.frontapp.com/conversations/\" + convo_ID + \"/drafts\"\n payload = {\n \"body\": response_template[\"body\"],\n \"subject\": response_template[\"subject\"],\n \"author_id\": \"tea_188ud\", # [needs to change later on]\n \"channel_id\": \"cha_14tfp\", # [also will need to be changed for team based settings]\n }\n files = []\n headers = {\"Authorization\": BEARER_TOKEN}\n requests.request(\"POST\", url, headers=headers, json=payload, files=files)", "def test_publish_draft_delete(self):\r\n location = self.old_course_key.make_usage_key('vertical', name='Vert1')\r\n item = self.draft_mongo.get_item(location, 2)\r\n self._xmodule_recurse(\r\n item,\r\n lambda i: self.draft_mongo.publish(i.location, self.userid)\r\n )\r\n # verify status\r\n item = self.draft_mongo.get_item(location, 0)\r\n self.assertFalse(getattr(item, 'is_draft', False), \"Item was published. Draft should not exist\")\r\n # however, children are still draft, but I'm not sure that's by design\r\n\r\n # convert back to draft\r\n self.draft_mongo.convert_to_draft(location)\r\n # both draft and published should exist\r\n draft_vert = self.draft_mongo.get_item(location, 0)\r\n self.assertTrue(getattr(draft_vert, 'is_draft', False), \"Item was converted to draft but doesn't say so\")\r\n item = self.old_mongo.get_item(location, 0)\r\n self.assertFalse(getattr(item, 'is_draft', False), \"Published item doesn't say so\")\r\n\r\n # delete the discussion (which oddly is not in draft mode)\r\n location = self.old_course_key.make_usage_key('discussion', name='Discussion1')\r\n self.draft_mongo.delete_item(location)\r\n # remove pointer from draft vertical (verify presence first to ensure process is valid)\r\n self.assertIn(location, draft_vert.children)\r\n draft_vert.children.remove(location)\r\n # move the other child\r\n other_child_loc = self.old_course_key.make_usage_key('html', name='Html2')\r\n draft_vert.children.remove(other_child_loc)\r\n other_vert = self.draft_mongo.get_item(self.old_course_key.make_usage_key('vertical', name='Vert2'), 0)\r\n other_vert.children.append(other_child_loc)\r\n self.draft_mongo.update_item(draft_vert, self.userid)\r\n self.draft_mongo.update_item(other_vert, self.userid)\r\n # publish\r\n self._xmodule_recurse(\r\n draft_vert,\r\n lambda i: self.draft_mongo.publish(i.location, self.userid)\r\n )\r\n item = self.old_mongo.get_item(draft_vert.location, 0)\r\n self.assertNotIn(location, item.children)\r\n with self.assertRaises(ItemNotFoundError):\r\n self.draft_mongo.get_item(location)\r\n self.assertNotIn(other_child_loc, item.children)\r\n self.assertTrue(self.draft_mongo.has_item(other_child_loc), \"Oops, lost moved item\")", "def validate(self, data):\n draft_group_id = data['draft_group']\n if draft_group_id is None:\n raise serializers.ValidationError(\"invalid draft_group id\")\n try:\n draftgroup.models.DraftGroup.objects.get(pk=draft_group_id)\n except draftgroup.models.DraftGroup.DoesNotExist:\n raise serializers.ValidationError('invalid draft_group id')\n\n return data" ]
[ "0.6512679", "0.6157547", "0.58887076", "0.5854836", "0.57501346", "0.5479013", "0.54623795", "0.5257162", "0.5218776", "0.51486325", "0.51365983", "0.5121651", "0.50962335", "0.50772154", "0.5046453", "0.4987209", "0.49718696", "0.49420643", "0.48823994", "0.4866242", "0.4821684", "0.4806939", "0.47689435", "0.47574726", "0.4754054", "0.4745648", "0.47145852", "0.4710929", "0.47003788", "0.46998414" ]
0.75933146
0
Create a new bundle. Note that description is currently required.
def create_bundle(collection_uuid, slug, title="New Bundle", description=""): result = api_request('post', api_url('bundles'), json={ "collection_uuid": str(collection_uuid), "slug": slug, "title": title, "description": description, }) return _bundle_from_response(result)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_bundle(self):\n self._highest_bundle_id += 1\n bundle = Bundle(document=self, bundle_id=str(self._highest_bundle_id))\n self.bundles.append(bundle)\n bundle.number = len(self.bundles)\n return bundle", "def firmware_pack_create(handle, org_name, name, rack_bundle_version,\n blade_bundle_version, descr=\"\", mode=\"staged\",\n org_parent=\"org-root\"):\n\n org_dn = org_parent + \"/org-\" + org_name\n p_mo = handle.query_dn(org_dn)\n if not p_mo:\n log.info(\"Sub-Org <%s> not found!\" % org_name)\n else:\n from ucsmsdk.mometa.firmware.FirmwareComputeHostPack import\\\n FirmwareComputeHostPack\n\n mo = FirmwareComputeHostPack(parent_mo_or_dn=org_dn,\n name=name,\n descr=descr,\n rack_bundle_version=rack_bundle_version,\n mode=mode,\n blade_bundle_version=blade_bundle_version)\n handle.add_mo(mo)\n handle.commit()", "def create(self):\n self.parser.add_argument('lp_file',\n help=\"Language pack file.\")\n args = self.parser.parse_args()\n with open(args.lp_file) as lang_pack_file:\n try:\n data = json.load(lang_pack_file)\n except ValueError as exc:\n print(\"Error in language pack file: %s\", str(exc))\n sys.exit(1)\n\n json_data = json.dumps(data)\n languagepack = self.client.languagepacks.create(json_data)\n fields = ['uuid', 'name', 'description', 'compiler_versions',\n 'os_platform']\n data = dict([(f, getattr(languagepack, f, ''))\n for f in fields])\n cliutils.print_dict(data, wrap=72)", "def obj_create(self, bundle, request = None, **kwargs):\n #pdb.set_trace()\n object_id = self.get_collection(request).insert(bundle.data)\n bundle.obj = self.obj_get(request, pk = object_id)\n return bundle", "def bundle(self, app):\r\n assert(isinstance(app, BundleCreate.App))\r\n\r\n bundledir = os.path.join(self.outdir, '%s-bundle' % app.basename)\r\n self.context.log.info('creating %s' % os.path.relpath(bundledir, get_buildroot()))\r\n\r\n safe_mkdir(bundledir, clean=True)\r\n\r\n classpath = OrderedSet()\r\n if not self.deployjar:\r\n libdir = os.path.join(bundledir, 'libs')\r\n os.mkdir(libdir)\r\n\r\n # Add internal dependencies to the bundle.\r\n def add_jars(target):\r\n target_jars = self.context.products.get('jars').get(target)\r\n if target_jars is not None:\r\n for basedir, jars in target_jars.items():\r\n for internaljar in jars:\r\n os.symlink(os.path.join(basedir, internaljar),\r\n os.path.join(libdir, internaljar))\r\n classpath.add(internaljar)\r\n app.binary.walk(add_jars, lambda t: t.is_internal)\r\n\r\n # Add external dependencies to the bundle.\r\n for basedir, externaljar in self.list_jar_dependencies(app.binary):\r\n path = os.path.join(basedir, externaljar)\r\n os.symlink(path, os.path.join(libdir, externaljar))\r\n classpath.add(externaljar)\r\n\r\n for basedir, jars in self.context.products.get('jars').get(app.binary).items():\r\n if len(jars) != 1:\r\n raise TaskError('Expected 1 mapped binary for %s but found: %s' % (app.binary, jars))\r\n\r\n binary = jars[0]\r\n binary_jar = os.path.join(basedir, binary)\r\n bundle_jar = os.path.join(bundledir, binary)\r\n if not classpath:\r\n os.symlink(binary_jar, bundle_jar)\r\n else:\r\n with open_zip(binary_jar, 'r') as src:\r\n with open_zip(bundle_jar, 'w', compression=ZIP_DEFLATED) as dest:\r\n for item in src.infolist():\r\n buf = src.read(item.filename)\r\n if Manifest.PATH == item.filename:\r\n manifest = Manifest(buf)\r\n manifest.addentry(Manifest.CLASS_PATH,\r\n ' '.join(os.path.join('libs', jar) for jar in classpath))\r\n buf = manifest.contents()\r\n dest.writestr(item, buf)\r\n\r\n for bundle in app.bundles:\r\n for path, relpath in bundle.filemap.items():\r\n bundlepath = os.path.join(bundledir, relpath)\r\n safe_mkdir(os.path.dirname(bundlepath))\r\n os.symlink(path, bundlepath)\r\n\r\n return bundledir", "def bundle(self, app):\n assert(isinstance(app, BundleCreate.App))\n\n bundledir = os.path.join(self.outdir, '%s-bundle' % app.basename)\n self.context.log.info('creating %s' % os.path.relpath(bundledir, get_buildroot()))\n\n safe_mkdir(bundledir, clean=True)\n\n classpath = OrderedSet()\n if not self.deployjar:\n libdir = os.path.join(bundledir, 'libs')\n os.mkdir(libdir)\n\n # Add external dependencies to the bundle.\n for basedir, externaljar in self.list_jar_dependencies(app.binary):\n path = os.path.join(basedir, externaljar)\n os.symlink(path, os.path.join(libdir, externaljar))\n classpath.add(externaljar)\n\n # TODO: There should probably be a separate 'binary_jars' product type,\n # so we can more easily distinguish binary jars (that contain all the classes of their\n # transitive deps) and per-target jars.\n for basedir, jars in self.context.products.get('jars').get(app.binary).items():\n if len(jars) != 1:\n raise TaskError('Expected 1 mapped binary for %s but found: %s' % (app.binary, jars))\n\n binary = jars[0]\n binary_jar = os.path.join(basedir, binary)\n bundle_jar = os.path.join(bundledir, binary)\n # Add the internal classes into the bundle_jar.\n if not classpath:\n os.symlink(binary_jar, bundle_jar)\n else:\n # TODO: Can we copy the existing jar and inject the manifest in, instead of\n # laboriously copying the contents one by one? Would that be more efficient?\n with open_zip(binary_jar, 'r') as src:\n with open_zip(bundle_jar, 'w', compression=ZIP_DEFLATED) as dest:\n for item in src.infolist():\n buf = src.read(item.filename)\n if Manifest.PATH == item.filename:\n manifest = Manifest(buf)\n manifest.addentry(Manifest.CLASS_PATH,\n ' '.join(os.path.join('libs', jar) for jar in classpath))\n buf = manifest.contents()\n dest.writestr(item, buf)\n\n for bundle in app.bundles:\n for path, relpath in bundle.filemap.items():\n bundlepath = os.path.join(bundledir, relpath)\n safe_mkdir(os.path.dirname(bundlepath))\n os.symlink(path, bundlepath)\n\n return bundledir", "def fusion_api_create_firmware_bundle(self, body, api=None, headers=None):\n return self.driver.post(body, api, headers)", "def bundle(bundle_name='', file_list=None, bundle_size=0, meta_list=None):\n\n # validate parameters\n if bundle_name is None or bundle_name == '':\n task_error(\"Missing bundle name\")\n\n if file_list is None or len(file_list) == 0:\n task_error(\"Missing file list\")\n\n # Set up the bundle file\n bundle_path = os.path.abspath(bundle_name)\n\n # Set up the bundler object\n bundler = None\n\n bundler = TarBundler(bundle_path)\n\n bundler.bundle_file(file_list, bundle_size, meta_list)\n\n meta_str = json.dumps(meta_list)\n bundler.bundle_metadata(meta_str)\n\n TaskComm.set_state('PROGRESS', 'Bundling complete')", "def bundle_cmd(context, bundle_name, bundle_id, json, verbose, compact):\n store: Store = context.obj[\"store\"]\n bundles = store.bundles()\n\n if bundle_name:\n bundle = store.get_bundle_by_name(bundle_name=bundle_name)\n bundles = [bundle] if bundle else []\n\n if bundle_id:\n bundle = store.get_bundle_by_id(bundle_id=bundle_id)\n bundles = [bundle] if bundle else []\n\n if not bundles:\n LOG.info(\"Could not find any bundles\")\n return\n template = schema.BundleSchema()\n result = []\n for bundle in bundles:\n result.append(template.dump(bundle))\n\n if json:\n click.echo(jsonlib.dumps(result, indent=4, sort_keys=True))\n return\n console = Console()\n console.print(get_bundles_table(result))\n if verbose:\n for bundle in bundles:\n if len(bundle.versions) == 0:\n LOG.info(\"No versions found for bundle %s\", bundle.name)\n return\n version_obj = bundle.versions[0]\n context.invoke(\n version_cmd, version_id=version_obj.id, verbose=True, compact=compact\n )", "def obj_create(self, bundle, **kwargs):\n logger.info(\"Creating a new acknowledgement...\")\n #Create the object\n bundle.obj = Acknowledgement()\n #hydrate\n bundle = self.full_hydrate(bundle)\n \n #Set the customer\n try:\n logger.info(\"Setting customer...\")\n bundle.obj.customer = Customer.objects.get(pk=bundle.data[\"customer\"][\"id\"])\n bundle.obj.discount = bundle.obj.customer.discount\n except:\n logger.error(\"Customer with ID {0} could not be found.\".format(bundle.data['customer']['id']))\n raise\n \n #Set the employee\n try:\n logger.info(\"Setting employee...\")\n bundle.obj.employee = bundle.request.user\n except User.DoesNotExist:\n logger.error(\"User with ID {0} could not be found\".format(bundle.data['employee']['id']))\n raise\n except KeyError:\n logger.critical(\"Missing employee ID.\")\n raise\n \n #Set Status\n bundle.obj.status = \"ACKNOWLEDGED\"\n \n #Set the project or create a new one\n if \"project\" in bundle.data:\n try:\n project = Project.objects.get(pk=bundle.data['project']['id'])\n except KeyError, Project.DoesNotExist:\n try:\n project = Project()\n project.codename = bundle.data['project']['codename']\n project.save()\n except KeyError:\n project = None\n \n bundle.obj.project = project\n \n #Create items without saving them \n logger.info(\"Creating items...\")\n self.items = [Item.create(acknowledgement=bundle.obj,\n commit=False,\n **product) for product in bundle.data[\"items\"]]\n \n #Calculate the total price\n logger.info(\"Calculating balance of the order...\")\n bundle.obj.calculate_totals(self.items)\n bundle = self.save(bundle)\n \n #Save the items\n logger.info(\"Saving the items to the database...\")\n for item in self.items:\n item.acknowledgement = bundle.obj\n item.save()\n \n log_message = \"Ack {0} created on {1}. Schedule to be delivered on {1}\"\n log_message = log_message.format(bundle.obj.id,\n bundle.obj.time_created.strftime('%B %d, %Y'),\n bundle.obj.delivery_date.strftime('%B %d, %Y'))\n log = Log(message=log_message,\n delivery_date=bundle.obj.delivery_date,\n acknowledgement=bundle.obj)\n log.save()\n #Create and upload the pdfs to the \n #S3 system. The save the pdfs as\n #Attributes of the acknowledgement\n logger.info(\"Creating PDF documents...\")\n bundle.obj.create_and_upload_pdfs()\n \n \n #Add the url of the pdf to the outgoing data\n #only for when an acknowledgement is create\n try:\n ack = bundle.obj.acknowledgement_pdf\n production = bundle.obj.production_pdf\n bundle.data['pdf'] = {'acknowledgement': ack.generate_url(),\n 'production': production.generate_url()}\n except AttributeError: \n logger.warn('Missing acknowledgement or production pdf')\n \n #Conditionally email ack to Decoroom\n if \"decoroom\" in bundle.obj.customer.name.lower():\n try:\n logger.info(\"Emailing Decoroom Co., Ltd. the order details...\")\n bundle.obj.email_decoroom()\n except Exception as e:\n logger.error(\"Unable to mail decoroom.\")\n logger.error(e)\n \n \n \n logger.info(u\"Acknowledgement #{0} created for {1}\".format(bundle.obj.id, \n bundle.obj.customer.name)) \n return bundle", "def register_bundle(self, cls):\n return self.register_entity('bundle', cls)", "def obj_create(self, bundle, **kwargs):\n bundle.obj = self._meta.object_class()\n\n for key, value in kwargs.items():\n setattr(bundle.obj, key, value)\n\n self.authorized_create_detail(self.get_object_list(bundle.request), bundle)\n bundle = self.full_hydrate(bundle)\n bundle.obj.user_created_id = bundle.request.user.id\n return self.save(bundle)", "def create_application(name=None, description=None):\n pass", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n bundle_name: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n desktop_type: Optional[pulumi.Input[str]] = None,\n image_id: Optional[pulumi.Input[str]] = None,\n language: Optional[pulumi.Input[str]] = None,\n root_disk_performance_level: Optional[pulumi.Input[str]] = None,\n root_disk_size_gib: Optional[pulumi.Input[int]] = None,\n user_disk_performance_level: Optional[pulumi.Input[str]] = None,\n user_disk_size_gibs: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]] = None) -> 'Bundle':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _BundleState.__new__(_BundleState)\n\n __props__.__dict__[\"bundle_name\"] = bundle_name\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"desktop_type\"] = desktop_type\n __props__.__dict__[\"image_id\"] = image_id\n __props__.__dict__[\"language\"] = language\n __props__.__dict__[\"root_disk_performance_level\"] = root_disk_performance_level\n __props__.__dict__[\"root_disk_size_gib\"] = root_disk_size_gib\n __props__.__dict__[\"user_disk_performance_level\"] = user_disk_performance_level\n __props__.__dict__[\"user_disk_size_gibs\"] = user_disk_size_gibs\n return Bundle(resource_name, opts=opts, __props__=__props__)", "def sli_create(obj, product_name, sli_file):\n client = get_client(obj)\n\n product = client.product_list(name=product_name)\n if not product:\n fatal_error('Product {} does not exist'.format(product_name))\n\n product = product[0]\n\n with Action('Creating SLI for product: {}'.format(product_name), nl=True) as act:\n sli = json.load(sli_file)\n\n validate_sli(obj, sli, act)\n\n if not act.errors:\n res = client.sli_create(product, sli['name'], sli['unit'], sli['source'])\n print(json.dumps(res, indent=4))", "def __init__(__self__, *,\n bundle_id: str):\n pulumi.set(__self__, \"bundle_id\", bundle_id)", "def test_create_software_bundle_from_system_module(self):\n pass", "def obj_create(self, bundle, request=None, **kwargs):\n request = request or bundle.request\n\n pv_required_msg = str(\"The 'productversions' key must exist, \" +\n \"must be a list, and the list must contain \" +\n \"at least one entry.\")\n # pull the productversions off, they don't exist yet\n try:\n productversions = bundle.data.pop('productversions')\n if not isinstance(productversions, list):\n raise ImmediateHttpResponse(\n response=http.HttpBadRequest(pv_required_msg))\n if not len(productversions):\n raise ImmediateHttpResponse(\n response=http.HttpBadRequest(pv_required_msg))\n\n bundle.data[\"productversions\"] = []\n except KeyError:\n raise ImmediateHttpResponse(\n response=http.HttpBadRequest(pv_required_msg))\n\n # create the product\n updated_bundle = super(ProductResource, self).obj_create(\n bundle=bundle, request=request, **kwargs)\n\n # create the productversions\n for pv in productversions:\n ProductVersion.objects.get_or_create(\n product=updated_bundle.obj, **pv)\n\n return updated_bundle", "def create_usstock_bundle(code, sids=None, universes=None, free=False, data_frequency=None):\n params = {}\n params[\"ingest_type\"] = \"usstock\"\n if sids:\n params[\"sids\"] = sids\n if universes:\n params[\"universes\"] = universes\n if free:\n params[\"free\"] = free\n if data_frequency:\n params[\"data_frequency\"] = data_frequency\n\n response = houston.put(\"/zipline/bundles/{}\".format(code), params=params)\n\n houston.raise_for_status_with_json(response)\n return response.json()", "def fixture_minimal_bundle_obj(case_id, timestamp) -> models.Bundle:\n return models.Bundle(name=case_id, created_at=timestamp)", "def build(self) -> Optional[Bundle]:\n # Prepare STIX2 bundle objects with author.\n bundle_objects = [self.author]\n\n # Add object marking definitions to bundle.\n bundle_objects.extend(self.object_markings)\n\n # Create intrusion sets and add to bundle.\n intrusion_sets = self._create_intrusion_sets()\n bundle_objects.extend(intrusion_sets)\n\n # Create sectors and add to bundle.\n sectors = self._create_sectors()\n bundle_objects.extend(sectors)\n\n # Intrusion sets target sectors and add to bundle.\n intrusion_sets_target_sectors = self._create_targets_relationships(\n intrusion_sets, sectors\n )\n bundle_objects.extend(intrusion_sets_target_sectors)\n\n # Create locations and add to bundle.\n locations = self._create_locations()\n bundle_objects.extend(locations)\n\n # Intrusion sets target locations and add to bundle.\n intrusion_sets_target_locations = self._create_targets_relationships(\n intrusion_sets, locations\n )\n bundle_objects.extend(intrusion_sets_target_locations)\n\n # Create observations.\n observations = self._create_ioc_observations()\n\n # Get observables and add to bundle.\n observables = [o.observable for o in observations if o.observable is not None]\n bundle_objects.extend(observables)\n\n # Get indicators, create YARA indicators and to bundle.\n indicators = [o.indicator for o in observations if o.indicator is not None]\n indicators.extend(self._create_yara_indicators())\n bundle_objects.extend(indicators)\n\n # Get observation relationships and add to bundle.\n indicators_based_on_observables = [\n o.relationship for o in observations if o.relationship is not None\n ]\n bundle_objects.extend(indicators_based_on_observables)\n\n # Indicator indicates entities, add to bundle.\n indicator_indicates = intrusion_sets\n\n indicator_indicates_entities = self._create_indicates_relationships(\n indicators, indicator_indicates\n )\n bundle_objects.extend(indicator_indicates_entities)\n\n # Create object references for the report.\n object_refs = create_object_refs(\n intrusion_sets,\n sectors,\n intrusion_sets_target_sectors,\n locations,\n intrusion_sets_target_locations,\n observables,\n indicators,\n indicators_based_on_observables,\n indicator_indicates_entities,\n )\n\n # TODO: Ignore reports without any references or not?\n # Hack, the report must have at least on object reference.\n if not object_refs:\n dummy_object = self._create_dummy_object()\n\n bundle_objects.append(dummy_object)\n object_refs.append(dummy_object)\n\n # Create report and add to bundle.\n report = self._create_report(object_refs)\n bundle_objects.append(report)\n\n # XXX: Without allow_custom=True the observable with the custom property\n # will cause an unexpected property (x_opencti_score) error.\n return Bundle(objects=bundle_objects, allow_custom=True)", "def catalog_create(self, args):\n try:\n if args.id and self.server.connect_ermrest(args.id).exists():\n print(\"Catalog already exists\")\n return\n owner = args.owner if args.owner else None\n catalog = self.server.create_ermrest_catalog(args.id, owner)\n if args.auto_configure:\n model = catalog.getCatalogModel()\n model.configure_baseline_catalog(**args.configure_args)\n if not args.quiet:\n print(\"Created new catalog %s with the following default configuration:\\n\" % catalog.catalog_id)\n pp(catalog.get('/').json())\n except HTTPError as e:\n if e.response.status_code == requests.codes.not_found:\n raise ResourceException('Catalog not found', e)\n elif e.response.status_code == requests.codes.conflict:\n raise ResourceException(\"Catalog already exists\", e)\n else:\n raise e", "def test_create_software_asset_bundle_from_system_module(self):\n pass", "def create_deployment(self, ApiId: str, Description: str = None, StageName: str = None) -> Dict:\n pass", "def add_link_to_bundle(request, bundle_id):\n\n # ensure bundle exists\n bundle = get_object_or_404(Bundle, id=bundle_id)\n\n # get/create link for given url\n url = request.data.get('url', None)\n\n # validate url is a url\n v = URLValidator()\n\n try:\n v(url)\n except ValidationError as exc:\n # the user must be joking\n return Response({'error': True, 'msg': 'Invalid URL'}, status=400)\n\n # assert that \"comfort_level\" is specified.\n # this is validated outside of the `Link` fields handled by\n # DRF serializer validation.\n comfort_level = int(request.data.get('comfort_level', None))\n if comfort_level not in [i[0] for i in COMFORT_LEVELS]:\n return Response({'error': True,\n 'msg': 'Please specify a reader comfort level'\n })\n\n url = urltools.normalize(url)\n\n try:\n # fetch existing link\n link = Link.objects.get(url=url)\n except Link.DoesNotExist:\n # create a new link\n link_serializer = LinkSerializer(data=request.data)\n link_serializer.is_valid(raise_exception=True)\n link = link_serializer.save()\n\n # add link to bundle\n if not BundleLink.objects.filter(bundle=bundle, link=link).exists():\n # call alchemy util to fetch concepts for URL\n concepts = bundles.alchemy_utils.get_concepts(url)\n this_bundle = BundleLink.objects.create(bundle=bundle,\n link=link,\n comfort_level=comfort_level,\n curator_id=1)\n for concept in concepts:\n this_bundle.tags.add(concept)\n\n return Response('', status=201)", "def __init__(__self__,\n resource_name: str,\n args: BundleArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def slo_create(obj, product_name, title, description, slo_file):\n client = get_client(obj)\n\n product = client.product_list(name=product_name)\n if not product:\n fatal_error('Product {} does not exist'.format(product_name))\n\n product = product[0]\n\n with Action('Creating SLO for product: {}'.format(product_name), nl=True) as act:\n if slo_file:\n slo = json.load(slo_file)\n else:\n slo = {'title': title, 'description': description}\n\n validate_slo(slo, act)\n\n if not act.errors:\n new_slo = client.slo_create(product, slo['title'], slo.get('description', ''))\n\n print(json.dumps(new_slo, indent=4))\n\n for target in slo.get('targets', []):\n t = client.target_create(new_slo, target['sli_uri'], target_from=target['from'], target_to=target['to'])\n act.ok('Created a new target')\n print(json.dumps(t, indent=4))", "def create(index):\n # Get the project root\n project_root = get_project_root()\n package_name = os.path.basename(project_root)\n logging.info(\"Creating package for current project: \" + package_name)\n Packager(package_name, project_root).create(index)", "def __init__(__self__, *,\n bundle_id: Optional[pulumi.Input[str]] = None):\n if bundle_id is not None:\n pulumi.set(__self__, \"bundle_id\", bundle_id)", "def makeProcessedBundle(p):\n p.splitBundle()\n return" ]
[ "0.668372", "0.62939143", "0.6256046", "0.61546004", "0.61484855", "0.6085676", "0.6012268", "0.5936996", "0.5896103", "0.5847529", "0.58094597", "0.5789102", "0.57620066", "0.57466364", "0.5724561", "0.5723571", "0.5719755", "0.5677166", "0.5674543", "0.5627814", "0.5617824", "0.56120336", "0.55604607", "0.552988", "0.5495903", "0.541213", "0.54109037", "0.54058146", "0.54049367", "0.5394359" ]
0.71486557
0
Update a bundle's title, description, slug, or collection.
def update_bundle(bundle_uuid, **fields): assert isinstance(bundle_uuid, UUID) data = {} # Most validation will be done by Blockstore, so we don't worry too much about data validation for str_field in ("title", "description", "slug"): if str_field in fields: data[str_field] = fields.pop(str_field) if "collection_uuid" in fields: data["collection_uuid"] = str(fields.pop("collection_uuid")) if fields: raise ValueError(f"Unexpected extra fields passed " # pylint: disable=dict-keys-not-iterating f"to update_bundle: {fields.keys()}") result = api_request('patch', api_url('bundles', str(bundle_uuid)), json=data) return _bundle_from_response(result)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_in_place(self, request, original_bundle, new_data):\r\n\r\n # TODO: Is this the place to use MongoDB atomic operations to update the document?\r\n\r\n from tastypie.utils import dict_strip_unicode_keys\r\n original_bundle.data.update(**dict_strip_unicode_keys(new_data))\r\n\r\n # Now we've got a bundle with the new data sitting in it and we're\r\n # we're basically in the same spot as a PUT request. So the rest of this\r\n # function is cribbed from put_detail.\r\n self.alter_deserialized_detail_data(request, original_bundle.data)\r\n\r\n # Removed request from kwargs, breaking obj_get filter, currently present\r\n # in tastypie. See https://github.com/toastdriven/django-tastypie/issues/824.\r\n kwargs = {\r\n self._meta.detail_uri_name: self.get_bundle_detail_data(original_bundle),\r\n }\r\n return self.obj_update(bundle=original_bundle, **kwargs)", "def update(self, request, slug=None, **kwargs):\n article_update = self.get_object()\n serializer = self.serializer_class(\n article_update, data=request.data, partial=True\n )\n serializer.is_valid(raise_exception=True)\n serializer.save()\n\n return Response(serializer.data, status=status.HTTP_200_OK)", "def obj_update(self, bundle, request=None, **kwargs):\n\n # pull the productversions off, you can't edit them from here\n productversions = bundle.data.pop(\"productversions\", [])\n bundle.data[\"productversions\"] = []\n\n updated_bundle = super(ProductResource, self).obj_update(\n bundle=bundle, request=request, **kwargs)\n\n # create the productversions\n for pv in productversions:\n ProductVersion.objects.get_or_create(\n product=updated_bundle.obj, **pv)\n\n return updated_bundle", "def obj_update(self, bundle, skip_errors=False, **kwargs):\n try:\n updated_bundle = super(AnnotationResource, self).obj_update(bundle, skip_errors=skip_errors, **kwargs)\n return updated_bundle\n except NotFound, enf:\n raise ImmediateHttpResponse(response = http.HttpBadRequest(content=enf.get_message()))\n except MultipleObjectsReturned, emult:\n raise ImmediateHttpResponse(response = http.HttpBadRequest(content=emult.get_message()))", "def update(self, title=None, description = None):\n jsonData = self.metaData.jsonObj\n header = self._baseHeader.copy()\n\n header['Content-type'] = \"application/vnd.huddle.data+json\"\n url = self.metaData.getLink(\"edit\")\n assert url is not None\n\n if title is not None: jsonData['title'] = title\n if description is not None: jsonData['description'] = description\n\n response = self._adapter.putRequest(url, header, json.dumps(jsonData))\n\n return Document(self._client, self._client.getUrlFromHeaderLink(response['Headers']['link']))", "def update(self, request, *args, **kwargs):\n response = super(ProductViewSet, self).update(request, *args, **kwargs)\n response.data['message'] = \"Producto ha sido editado\"", "def sli_update(obj, product_name, name, sli_file):\n client = get_client(obj)\n\n product = client.product_list(name=product_name)\n if not product:\n fatal_error('Product {} does not exist'.format(product_name))\n\n product = product[0]\n\n slis = client.sli_list(product, name)\n if not slis:\n fatal_error('SLI {} does not exist'.format(name))\n\n with Action('Updating SLI {} for product: {}'.format(name, product_name), nl=True) as act:\n sli = json.load(sli_file)\n\n validate_sli(obj, sli, act)\n\n if not act.errors:\n sli['uri'] = slis[0]['uri']\n s = client.sli_update(sli)\n\n print(json.dumps(s, indent=4))", "def update(self, request, pk=None): #update a specific object\n return Response({'http_method': 'PUT'})", "def test_update(self):\n obj = self.provision_single_asset()\n test_string = \"testing this thing\"\n p = {'id': obj.id, 'description': test_string}\n self.put('widget', 200, params=p)\n self.session.refresh(obj)\n assert obj.description == test_string", "def _accept_bundle(self, bundle):\n duration = bundle.duration\n supply_cost = 0\n # 1. Build a mapping from resource-specific info to resource record\n res_to_record_mapping = self._res_man.get_res_to_record_mapping()\n # 2. Add usage for zones\n zones = bundle.copy_zones()\n for zone in zones:\n zone_id = zone.zone_id\n for resource in zone.resources:\n res_type = resource.get_res_type()\n qty = resource.get_value()\n record = res_to_record_mapping[zone_id][res_type]\n self._res_man.update_res_usage(record, qty, duration)\n supply_cost += record.get_supply_cost()\n # 3. Add usage for links\n links = bundle.copy_links()\n for link in links:\n src_zone_id, dst_zone_id = link.get_src_and_dst()\n qty = link.get_value()\n record = res_to_record_mapping[src_zone_id][dst_zone_id]\n self._res_man.update_res_usage(record, qty, duration)\n supply_cost += record.get_supply_cost()\n # 4. Update bookkeeping\n self._revenue += bundle.payment\n self._expenses += supply_cost\n logger.debug(\n 'Updating books...\\n'\n f'\\tTotal revenue: {self._revenue}\\n'\n f'\\tTotal supply cost: {self._expenses}\\n'\n )\n if self._use_price_token and bundle.has_price_token():\n # Need to expire the token if it was used to compute the prices\n # this time\n token = bundle.get_price_token()\n with self._history_lock:\n try:\n del self._price_history[token]\n logger.debug(f'Deleted token {token}')\n except KeyError:\n # Token happened to expire between time prices were computed\n # and here\n pass", "def update(self, request, pk=None):\n return Response({'http_method': 'PUT'})", "def obj_update(self, bundle, **kwargs):\n logger.info(\"Updating acknowledgement...\")\n \n bundle = super(AcknowledgementResource, self).obj_update(bundle, **kwargs)\n \n bundle.obj.create_and_upload_pdfs()\n \n return bundle", "def update_object(self, name: str) -> None:", "def update_object(self, oid, name, url):\n r = self.request(\n 'put',\n safeformat('registry/objects/{:int}/', oid),\n json.dumps({\n 'description': {\n 'name': name,\n 'url': url\n }\n })\n )\n return self._extract_id_from_batch_response(r, 'oid')", "def obj_update(self, bundle, request=None, **kwargs):\n request = request or bundle.request\n bundle = self.check_read_create(bundle)\n\n try:\n # use grandparent rather than parent\n bundle = super(MTResource, self).obj_update(\n bundle, **kwargs)\n\n # update the cc_version\n bundle.obj.cc_version = self.model.objects.get(\n id=bundle.obj.id).cc_version\n\n # specify the user\n bundle.obj.save(user=request.user)\n\n except Exception: # pragma: no cover\n logger.exception(\"error updating %s\", bundle) # pragma: no cover\n raise # pragma: no cover\n\n return bundle", "def update(self, request, pk=None):\n\n return Response({'http_method': 'PUT'})", "def update(self, request, slug):\n serializer_context = {'request': request}\n try:\n serializer_instance = self.queryset.get(slug=slug)\n except Article.DoesNotExist:\n raise NotFound(\"An article with this slug doesn't exist.\")\n\n if not serializer_instance.author_id == request.user.profile.id:\n raise PermissionDenied(\n \"You are not authorized to edit this article.\")\n\n serializer_data = request.data.get('article', )\n\n serializer = self.serializer_class(\n serializer_instance,\n context=serializer_context,\n data=serializer_data,\n partial=True\n )\n\n serializer.is_valid(raise_exception=True)\n\n return Response(serializer.data, status=status.HTTP_200_OK)", "def put(self, request, pk=None): #pk of id of objects to be updated (DB)\n return Response({'method':'PUT'})", "def slo_update(obj, product_name, slo_id, title, description, slo_file):\n client = get_client(obj)\n\n product = client.product_list(name=product_name)\n if not product:\n fatal_error('Product {} does not exist'.format(product_name))\n\n product = product[0]\n\n slo = client.slo_list(product, id=slo_id)\n if not slo:\n fatal_error('SLO {} does not exist'.format(slo_id))\n\n slo = slo[0]\n\n with Action('Updating SLO {} for product {}'.format(slo_id, slo['product_name']), nl=True) as act:\n if slo_file:\n slo = json.load(slo_file)\n slo['uri'] = slo['uri']\n else:\n if title:\n slo['title'] = title\n if description:\n slo['description'] = description\n\n validate_slo(slo, act)\n\n if not act.errors:\n slo = client.slo_update(slo)\n\n print(json.dumps(slo, indent=4))", "def update(self, request, pk=None):\n\n return Response({'http_method':'PUT'})", "def updateItem(self, object):\n pass", "def update(self, request, pk=None):\n\n job = Job.objects.get(pk=pk)\n job.title = request.data[\"title\"]\n job.description = request.data[\"description\"]\n job.city = request.data[\"city\"]\n job.state = request.data[\"state\"]\n job.application = request.data[\"application\"]\n user = request.auth.user\n job.user = user\n job.save()\n\n return Response({}, status=status.HTTP_204_NO_CONTENT)", "def patch(self, request, slug, **kwargs):\n request.POST._mutable = True\n payload = request.data\n payload.pop('client', None)\n obj = self.get_object()\n # update main image\n updated_main_image = Uploader.upload_image_from_request(request)\n if updated_main_image:\n payload['image_main'] = updated_main_image\n # update image list\n updated_image_list = Uploader.upload_image_batch(\n request, instance=obj)\n if updated_image_list:\n payload.setlist('image_others', updated_image_list)\n # update videos\n video = Uploader.upload_video_from_request(request)\n if video:\n payload['video'] = video\n serializer = self.serializer_class(obj, data=payload, partial=True)\n serializer.is_valid(raise_exception=True)\n serializer.update(obj, payload)\n response = {\n \"data\": {\"property\": serializer.data},\n \"message\": \"Successfully updated your property\"\n\n }\n return Response(response)", "def bundle_id(self, bundle_id):\n\n self._bundle_id = bundle_id", "def update_book(isbn):\n put_req = request.get_json()\n if not (Book.replace_book(isbn, put_req['name'], put_req['price'])):\n invalid_book_object_error_msg = {\n \"error\": \"Invalid book object update passed in PUT request\",\n \"helpString\": \"Valid data format is {'name': 'bookname', 'price': 7.9, 'isbn': 12345678}\"\n }\n # Because invalidBookObjectErrorMsg is a dictionary, need to convert it into a json object.\n # Set Header info for location (location of endpoint in request)\n return Response(json.dumps(invalid_book_object_error_msg), status=406, mimetype='application/json')\n # See https://www.flaskapi.org/api-guide/status-codes/ for flask API\n # response codes\n response = Response(\"\", 204, mimetype='application/json')\n response.headers['Location'] = \"/books/\" + str(isbn)\n return response", "def obj_update(self, bundle, skip_errors=False, **kwargs): \n from tastypie.serializers import Serializer\n \n try:\n serdes = Serializer()\n deserialized = None\n try:\n deserialized = serdes.deserialize(bundle.request.raw_post_data, \n format=bundle.request.META.get('CONTENT_TYPE', 'application/json'))\n except Exception:\n deserialized = None\n del serdes\n \n if deserialized is None:\n return ImmediateHttpResponse(response = http.HttpBadRequest())\n \n if 'unregister_c2dm' in deserialized and deserialized['unregister_c2dm'] == True:\n bundle.data['c2dm_id'] = None\n \n updated_bundle = super(UserResource, self).obj_update(bundle, skip_errors=skip_errors, **kwargs)\n return updated_bundle\n except (NotFound, MultipleObjectsReturned):\n raise ImmediateHttpResponse(response = http.HttpBadRequest())", "def update(self, request, pk=None):\n\n return Response({'http_method': 'PUT'})", "def put(self):\n request = transforms.loads(self.request.get('request'))\n\n if not self.assert_xsrf_token_or_fail(\n request, 'update-category', {}):\n return\n\n if not roles.Roles.is_super_admin():\n transforms.send_json_response(\n self, 401, 'Access denied.')\n return\n\n payload = request.get('payload')\n updated_dict = transforms.json_to_dict(\n transforms.loads(payload), self.get_schema_dict())\n\n errors = []\n self.apply_updates(updated_dict, errors)\n if not errors:\n transforms.send_json_response(self, 200, 'Saved.')\n else:\n transforms.send_json_response(self, 412, '\\n'.join(errors))", "def update(self,request,pk = None):\n return Response({'http_method':'PUT'})", "def update(self, resource, id, **data):\n self.request('/' + resource + '/' + str(id), 'PUT', body=urllib.urlencode(data))\n return True" ]
[ "0.64282304", "0.5881592", "0.58728546", "0.5861083", "0.5718554", "0.56827384", "0.5633981", "0.5599242", "0.55933994", "0.55191696", "0.54296964", "0.5405867", "0.54009414", "0.5397951", "0.5393401", "0.5391082", "0.5379236", "0.53778917", "0.5375577", "0.5358188", "0.535537", "0.53334874", "0.53207755", "0.53112215", "0.52960026", "0.5283612", "0.52289224", "0.5210727", "0.51973164", "0.5193034" ]
0.7425863
0
Delete the specified draft, removing any staged changes/files/deletes. Does not return any value.
def delete_draft(draft_uuid): api_request('delete', api_url('drafts', str(draft_uuid)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete(self,\n draft_id,\n ):\n return self._invoke('delete',\n {\n 'draft_id': draft_id,\n })", "def _delete_draft_message(draft):\n if draft is not None:\n draft.key.delete()\n return HttpTextResponse('OK')", "def do_delete_draft(draft_id: int, user_profile: UserProfile) -> None:\n try:\n draft_object = Draft.objects.get(id=draft_id, user_profile=user_profile)\n except Draft.DoesNotExist:\n raise ResourceNotFoundError(_(\"Draft does not exist\"))\n\n draft_id = draft_object.id\n draft_object.delete()\n\n event = {\"type\": \"drafts\", \"op\": \"remove\", \"draft_id\": draft_id}\n send_event(user_profile.realm, event, [user_profile.id])", "def DeleteDraft(host, change):\n path = _GetChangePath(change)\n try:\n FetchUrl(host, path, reqtype='DELETE', ignore_204=True, ignore_404=False)\n except GOBError as e:\n # On success, gerrit returns status 204; anything else is an error.\n if e.http_status != 204:\n raise\n else:\n raise GOBError(\n 200, 'Unexpectedly received a 200 http status while deleting draft %r'\n % change)", "def delete_drafts(request):\n query = models.Comment.query(\n models.Comment.author == request.user, models.Comment.draft == True,\n ancestor=request.issue.key)\n keys = query.fetch(keys_only=True)\n ndb.delete_multi(keys)\n request.issue.calculate_draft_count_by_user()\n request.issue.put()\n return HttpResponseRedirect(\n reverse(publish, args=[request.issue.key.id()]))", "def abort(self,\n draft_id,\n ):\n return self._invoke('abort',\n {\n 'draft_id': draft_id,\n })", "def test_publish_draft_delete(self):\r\n location = self.old_course_key.make_usage_key('vertical', name='Vert1')\r\n item = self.draft_mongo.get_item(location, 2)\r\n self._xmodule_recurse(\r\n item,\r\n lambda i: self.draft_mongo.publish(i.location, self.userid)\r\n )\r\n # verify status\r\n item = self.draft_mongo.get_item(location, 0)\r\n self.assertFalse(getattr(item, 'is_draft', False), \"Item was published. Draft should not exist\")\r\n # however, children are still draft, but I'm not sure that's by design\r\n\r\n # convert back to draft\r\n self.draft_mongo.convert_to_draft(location)\r\n # both draft and published should exist\r\n draft_vert = self.draft_mongo.get_item(location, 0)\r\n self.assertTrue(getattr(draft_vert, 'is_draft', False), \"Item was converted to draft but doesn't say so\")\r\n item = self.old_mongo.get_item(location, 0)\r\n self.assertFalse(getattr(item, 'is_draft', False), \"Published item doesn't say so\")\r\n\r\n # delete the discussion (which oddly is not in draft mode)\r\n location = self.old_course_key.make_usage_key('discussion', name='Discussion1')\r\n self.draft_mongo.delete_item(location)\r\n # remove pointer from draft vertical (verify presence first to ensure process is valid)\r\n self.assertIn(location, draft_vert.children)\r\n draft_vert.children.remove(location)\r\n # move the other child\r\n other_child_loc = self.old_course_key.make_usage_key('html', name='Html2')\r\n draft_vert.children.remove(other_child_loc)\r\n other_vert = self.draft_mongo.get_item(self.old_course_key.make_usage_key('vertical', name='Vert2'), 0)\r\n other_vert.children.append(other_child_loc)\r\n self.draft_mongo.update_item(draft_vert, self.userid)\r\n self.draft_mongo.update_item(other_vert, self.userid)\r\n # publish\r\n self._xmodule_recurse(\r\n draft_vert,\r\n lambda i: self.draft_mongo.publish(i.location, self.userid)\r\n )\r\n item = self.old_mongo.get_item(draft_vert.location, 0)\r\n self.assertNotIn(location, item.children)\r\n with self.assertRaises(ItemNotFoundError):\r\n self.draft_mongo.get_item(location)\r\n self.assertNotIn(other_child_loc, item.children)\r\n self.assertTrue(self.draft_mongo.has_item(other_child_loc), \"Oops, lost moved item\")", "def remove_draft(self, account, uuid):\n account = Account(account, hive_instance=self.hive)\n return self._conveyor_method(account, None,\n \"conveyor.remove_draft\",\n [account['name'], uuid])", "def delete_integrations_action_draft(self, action_id, **kwargs):\n\n all_params = ['action_id']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method delete_integrations_action_draft\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'action_id' is set\n if ('action_id' not in params) or (params['action_id'] is None):\n raise ValueError(\"Missing the required parameter `action_id` when calling `delete_integrations_action_draft`\")\n\n\n resource_path = '/api/v2/integrations/actions/{actionId}/draft'.replace('{format}', 'json')\n path_params = {}\n if 'action_id' in params:\n path_params['actionId'] = params['action_id']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['PureCloud OAuth']\n\n response = self.api_client.call_api(resource_path, 'DELETE',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None,\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def commit_draft(draft_uuid):\n api_request('post', api_url('drafts', str(draft_uuid), 'commit'))", "def action_draft(self):\n options=self.env['plm.config.settings'].GetOptions()\n status = 'draft'\n action = 'draft'\n default = {\n 'state': status,\n 'engineering_writable': True,\n }\n doc_default = {\n 'state': status,\n 'writable': True,\n }\n operationParams = {\n 'status': status,\n 'statusName': _('Draft'),\n 'action': action,\n 'docaction': 'draft',\n 'excludeStatuses': ['draft', 'released', 'undermodify', 'obsoleted'],\n 'includeStatuses': ['confirmed', 'uploaded', 'transmitted'],\n 'default': default,\n 'doc_default': doc_default,\n }\n if options.get('opt_showWFanalysis', False):\n return self.action_check_workflow(operationParams)\n else:\n ids=self._ids\n self.logging_workflow(ids, action, status)\n return self._action_to_perform(ids, operationParams, default)", "def get_draft(draft_uuid):\n assert isinstance(draft_uuid, UUID)\n try:\n data = api_request('get', api_url('drafts', str(draft_uuid)))\n except NotFound:\n raise DraftNotFound(f\"Draft does not exist: {draft_uuid}\") # lint-amnesty, pylint: disable=raise-missing-from\n return _draft_from_response(data)", "async def delete(self):\n return await self._state.delete_team(self.id)", "def get(self,\n draft_id,\n ):\n return self._invoke('get',\n {\n 'draft_id': draft_id,\n })", "def delete(self):\n self.current_revision.delete()", "def delete(self):\n return self.client._perform_empty(\n \"DELETE\", \"/projects/%s/managedfolders/%s\" % (self.project_key, self.odb_id))", "def create_draft(self):\n return Draft(self)", "def draft_message(request):\n query = models.Message.query(\n models.Message.issue_key == request.issue.key,\n models.Message.sender == request.user.email(),\n models.Message.draft == True)\n if query.count() == 0:\n draft_message = None\n else:\n draft_message = query.get()\n if request.method == 'GET':\n return _get_draft_message(draft_message)\n elif request.method == 'POST':\n return _post_draft_message(request, draft_message)\n elif request.method == 'DELETE':\n return _delete_draft_message(draft_message)\n return HttpTextResponse('An error occurred.', status=500)", "def get(self, oauth, resource_id, draft_id):\n d = Deposition.get(resource_id, user=current_user)\n return d.type.marshal_draft(d.get_draft(draft_id))", "def get_draft(self, draft_number: Optional[int] = None) -> Draft:\n if draft_number is None:\n self._status.check_authority_for_draft()\n draft_number = self._status.draft_number\n\n if not draft_number:\n raise TypeError(\"The given draft number is illegal\")\n\n for draft in self.list_drafts():\n if draft_number == draft.number:\n return draft\n\n raise ResourceNotExistError(resource=\"draft\", identification=draft_number)", "def get_draft_by_id(request, draft_id):\n\n for draft in request.session[\"drafts\"]:\n if draft[\"id\"] == draft_id:\n # Found a valid draft, return it\n return draft\n\n return None # Otherwise return None.", "def cmd_conversation_delete(client, args):\n delete_conversation = client.delete_conversation(args.conversation_id)\n generate_output({'delete_conversation': delete_conversation})", "def _draft_from_response(data):\n return Draft(\n uuid=UUID(data['uuid']),\n bundle_uuid=UUID(data['bundle_uuid']),\n name=data['name'],\n updated_at=dateutil.parser.parse(data['staged_draft']['updated_at']),\n files={\n path: DraftFile(path=path, **file)\n for path, file in data['staged_draft']['files'].items()\n },\n links={\n name: DraftLinkDetails(\n name=name,\n direct=LinkReference(**link[\"direct\"]),\n indirect=[LinkReference(**ind) for ind in link[\"indirect\"]],\n modified=link[\"modified\"],\n )\n for name, link in data['staged_draft']['links'].items()\n }\n )", "def userproject_post_delete(sender, instance, **kwargs):\n instance.document.delete(False)", "def delete_stored_project():\n client = RequestManager()\n client.set_method(\"DELETE\")\n client.set_endpoint(\"/projects/{0}\".format(STORED_ID['project_id']))\n client.execute_request()", "def _revert_to_draft(self):\n self.status = self.DRAFT\n try:\n with transaction.atomic():\n for signup in self.signups.all():\n signup.delete()\n self.save()\n except DatabaseError:\n self.status = self.REGISTRATION", "def test_delete(self):\n thread = self.create_thread()\n ut = UserThread.objects.get(\n user=thread.recipients.first(), thread=thread)\n ut_id = ut.pk\n ut.delete()\n ut = UserThread.objects.with_deleted().get(pk=ut_id)\n self.assertEqual(ut.status, 'deleted')", "def delete(self, guid):\n if helpers.authorized(self.request.params['UUID'], self.request.params['ATO'], self.request.params['action']):\n # search for the Project and delete if found\n key = db.Key.from_path('Project', int(guid))\n project = db.get(key)\n if not project == None:\n project.delete()\n self.response.set_status(204, \"Deleted\")\n else:\n self.response.set_status(404, \"Not Found\")\n else:\n self.response.set_status(401, \"Not Authorized\")", "def delete(self):\n pdbox._args.get(\"dryrun\") or shutil.rmtree(self.path)\n pdbox.info(\"Deleted %s/\" % self.path)", "def delete(self):\n\n headers = self._default_headers()\n\n return self._request(self.name,\n ok_status=None,\n data=None,\n headers=headers,\n method=\"DELETE\")" ]
[ "0.8477247", "0.7536803", "0.70039415", "0.6800242", "0.6678656", "0.6091169", "0.59882015", "0.598721", "0.5658908", "0.5609526", "0.55231327", "0.55021924", "0.5446673", "0.54136", "0.5397559", "0.53314614", "0.53098047", "0.5283851", "0.52465004", "0.5162799", "0.5152421", "0.514619", "0.51282775", "0.51045734", "0.5083122", "0.50827837", "0.50825983", "0.50642145", "0.5048877", "0.5039136" ]
0.78767365
1
Get the details of the specified bundle version
def get_bundle_version(bundle_uuid, version_number): if version_number == 0: return None version_url = api_url('bundle_versions', str(bundle_uuid) + ',' + str(version_number)) return api_request('get', version_url)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_version():\n return about.get_version()", "def get_version(self):\n return self.http_call(\"get\", url=f\"{self.base_url}/version\").json()", "def _get_via_app_bundle(self, path: pathlib.Path | str) -> str:\n\n path = pathlib.Path(path) / \"Contents\" / \"Info.plist\"\n\n if not path.exists():\n logger.warning(\n f\"Could not determine application version. Missing: {path}...\"\n )\n return \"?\"\n\n with open(path, \"rb\") as f:\n data = plistlib.load(f)\n\n bundle_short_version: str = data.get(\"CFBundleShortVersionString\", \"?\")\n bundle_version: str = data.get(\"CFBundleVersion\", None)\n\n if bundle_version is None:\n return f\"{bundle_short_version}\"\n\n return f\"{bundle_short_version}-{bundle_version}\"", "def version(self):\n _, body = self.request('/', 'GET')\n return body.get('version', None)", "def get_version(self):\n url = '{}/v2/version'.format(self.url)\n try:\n r = requests.get(url)\n if r.status_code == 200:\n return r.json()['version']\n except Exception as e:\n pass\n return ''", "def get_version(self) -> Dict[str, str]:\n return self.http.get(self.config.paths.version)", "def GetVersion(self):\n return self._SendRequest(HTTP_GET, \"/version\", None, None)", "def get_version():\n return '%d.%d.%d' % version_info", "def get_version():\n click.echo(get_current_version_number())", "def get_version(self):\n url = '{}/version'.format(self.url)\n try:\n r = requests.get(url)\n if r.status_code == 200:\n return r.json()['version']\n except Exception as e:\n pass\n return ''", "def get_version(self):\n return self.__make_api_call('get/version')", "def _get_version(self):", "def version_get():\n try:\n return json_response.success({'version': version.local_version()})\n except version.Error as e:\n return json_response.error(str(e)), 200", "def determine_version(self, bundle, ctx, hunk=None):\n raise NotImplementedError()", "def get_version_info(self):\n return self._jadeRpc('get_version_info')", "def get_release_info(self):\r\n return self.detail_info.get_release_info(self.version)", "def get_version(self):\n return self.cur_config['version']['name']", "def get_release_info(self, version):\r\n try:\r\n return self._detail[\"releases\"][version]\r\n except KeyError as key_error:\r\n log.warning(key_error)\r\n return []", "def get_version(self):\n url = '{}/version'.format(self.url)\n try:\n r = requests.get(url)\n if r.status_code == 200:\n return r.json()['orionld version']\n except Exception as e:\n pass\n return ''", "def do_get_version(self, arg):\n arg = arg\n print(self.phil.if_version)", "def get_version(self):\n pass", "def read_version():\n # code parts were taken from here https://stackoverflow.com/a/67692\n\n path2setup = os.path.dirname(__file__)\n version_file = os.path.abspath(\n os.path.join(path2setup, \"diffusion_maps\", \"version.py\"))\n\n spec = importlib.util.spec_from_file_location(\"version\", version_file)\n version = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(version)\n return version.version.v_short", "def get_version(self):\n res = requests.get(self.base_url + '/version')\n\n return res", "def info(self):\n version_str = self.version\n return Utils.version_str2tuple(version_str)", "def getVersion(self):\n return self.get('Version', type=\"numeric\")", "def version(self):\n return self._get(\"version\")", "def get_version(self):\n data = self._get('app_version')\n return data['version']", "def get_application_version(self):\n return self.connector.request('GET', '/app/version')", "def get_version(self):\n return self.version", "def getVersionInfo(cls):\n\n return __version__ + \"\\n\"" ]
[ "0.71063685", "0.7055568", "0.69581395", "0.6899469", "0.68665576", "0.68434507", "0.6821725", "0.6754685", "0.6730333", "0.6705119", "0.67003167", "0.6696833", "0.6665585", "0.66480154", "0.6647472", "0.663893", "0.6636554", "0.66245484", "0.6607001", "0.658064", "0.6521611", "0.65128034", "0.6505741", "0.64942706", "0.64731646", "0.64481944", "0.64394665", "0.6415042", "0.6412496", "0.6400011" ]
0.7637766
0
Get a list of the files in the specified bundle version
def get_bundle_version_files(bundle_uuid, version_number): if version_number == 0: return [] version_info = get_bundle_version(bundle_uuid, version_number) return [BundleFile(path=path, **file_metadata) for path, file_metadata in version_info["snapshot"]["files"].items()]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_version_files(self, package, version):\n with self._conn.begin():\n return {\n row.filename\n for row in self._conn.execute(\n \"SELECT filename \"\n \"FROM get_version_files(%s, %s)\", (package, version)\n )\n }", "def get_bundle_files(bundle_uuid, use_draft=None):\n return get_bundle_files_dict(bundle_uuid, use_draft).values() # lint-amnesty, pylint: disable=dict-values-not-iterating", "def versions(self, name):\n if not len(self):\n self.update()\n return [version for version in self if os.path.basename(version) == name]", "def retrieve(self,version=None):\n result = []\n groups = (os.path.split(x) for x in self.list_filenames())\n groups2 = itertools.groupby(groups,operator.itemgetter(0))\n groups3 = ((k,[x[1] for x in g]) for k,g in groups2)\n for (result_version, filenames) in groups3:\n if not version or version == result_version:\n for filename in filenames:\n filename = os.path.join(self.archive_path,result_version,filename)\n result.append(RunResults.load(filename))\n return result", "def versions_of_recipe(recipe):\n\n versions = []\n for entry in os.listdir(\"../../meta-mender-core/recipes-mender/%s/\" % recipe):\n match = re.match(r\"^%s_([1-9][0-9]*\\.[0-9]+\\.[0-9]+[^.]*)\\.bb\" % recipe, entry)\n if match is not None:\n versions.append(match.group(1))\n return versions", "def files_cmd(\n context,\n tag_names: List[str],\n version_id: int,\n verbose: bool,\n bundle: str,\n json: bool,\n compact: bool,\n):\n store: Store = context.obj[\"store\"]\n file_objs = store.get_files(\n bundle_name=bundle, tag_names=tag_names, version_id=version_id\n )\n template = schema.FileSchema()\n result = []\n for file in file_objs:\n result.append(template.dump(file))\n if json:\n click.echo(jsonlib.dumps(result))\n return\n console = Console()\n console.print(get_files_table(result, verbose=verbose, compact=compact))", "def readVersionList(filename):\n\ttry:\n\t\tlines = []\n\t\tif os.path.isfile(filename):\n\t\t\twith open(r''+ filename, 'r') as f:\n\t\t\t\tlines = f.readlines()\n\t\treturn lines\n\texcept IOError as e:\n\t\tprint(traceback.format_exc())\n\t\tinfo = filename + 'can\\'t open'\n\t\tdoExit(0, info)", "def list_installed(self) -> Generator[Path, None, None]:\n LOGGER.verbose(\"checking %s for Terraform versions...\", self.versions_dir)\n return self.versions_dir.rglob(\"*.*.*\")", "def get_used_versions(self, egg_directory):\n return [\n egg.split('-')[0]\n for egg in os.listdir(egg_directory)\n if egg.endswith('.egg')\n ]", "def list_bundles():\n response = houston.get(\"/zipline/bundles\")\n\n houston.raise_for_status_with_json(response)\n return response.json()", "def full_find(self, file, version):\n matches = []\n for root, dirnames, filenames in os.walk(self.checkout_path(version)):\n for filename in fnmatch.filter(filenames, file):\n matches.append(os.path.join(root, filename))\n return matches", "def get_package_versions(name: str) -> List[str]:\n with request.urlopen(PYPI_SIMPLE_API_URL + name) as response:\n html = response.read()\n\n return re.findall(f'>{name}-(.+).tar', html.decode())", "def get_file_list(pkg_root_dir, pkg_num, tree_size):\n\n # Get the bundle list\n pkg_dir = os.path.join(pkg_root_dir, build_package_name(pkg_num))\n bundle_list = get_bundle_list(pkg_dir, tree_size)\n\n file_list = []\n for bundle in bundle_list:\n # The dict must contain the path relative to the bittorrent download dir + the name the user choose to store\n # the files downloaded from this torrent. As such, the file_path is just the relative path to the bundle, from\n # the pkg_root_dir\n # The dict must also contain the length of said file\n file_info = os.stat(os.path.join(pkg_dir, bundle))\n file_path = [build_package_name(pkg_num), bundle]\n file_list.append(\n {\n 'length': file_info.st_size,\n 'path': file_path\n }\n )\n\n # The torrent also contains the STH file for a specified tree_size\n sth_fn = build_sth_name(tree_size)\n file_info = os.stat(os.path.join(pkg_root_dir, sth_fn))\n file_list.append({'length': file_info.st_size, 'path': [sth_fn]})\n\n # The info file containing the pkg_hash and the merkle proof for this package up to the STH must be included for\n # downloaders to be able to verify this package\n info_file_fn = build_info_file_name(pkg_num, tree_size)\n file_info = os.stat(os.path.join(pkg_root_dir, info_file_fn))\n file_list.append({'length': file_info.st_size, 'path': [info_file_fn]})\n\n return file_list", "def get_files(self):\r\n return self._filelist", "def _list_files(product, date=None):\n\n config = 'long_range' if 'long_range' in product else product\n member = _product_to_member_arg(product)\n date = _date_to_start_date_arg(date)\n template = 'api/GetFileList/?config={config}&geom=channel{date}{member}'\n args = template.format(config=config, date=date, member=member)\n uri = HS_DATA_EXPLORER_URI + args\n response = urlopen(uri).read()\n files = json.loads(response)\n if not isinstance(files, list):\n return []\n if product == 'analysis_assim' and date != '':\n yyyymmdd = re.findall('\\d{4}-\\d{2}-\\d{2}', date)[0]\n yyyymmdd = yyyymmdd.replace('-', '')\n files = [f for f in files if _date_from_filename(f) == yyyymmdd]\n return files", "def find(self, file, version):\n matches = []\n for root, dirnames, filenames in os.walk(self.full_doc_path(version)):\n for filename in fnmatch.filter(filenames, file):\n matches.append(os.path.join(root, filename))\n return matches", "def listFiles(self):\n pass", "def _sdk_versions_from_bin(cls, sdk_dir: Path) -> list[str]:\n bin_dir = sdk_dir / \"bin\"\n # prioritize newer versions of the SDK\n version_dirs = sorted(bin_dir.glob(f\"{cls.SDK_VERSION}.*.0/\"), reverse=True)\n return [d.name for d in version_dirs]", "def get_vendor_bundle_path() -> str:\n vendor_bundle_directory = os.path.join(os.path.dirname(__file__), \"dist\", \"js\")\n file_list_with_full_path = []\n for f in os.listdir(vendor_bundle_directory):\n file_path = os.path.join(vendor_bundle_directory, f)\n if os.path.isfile(file_path):\n if os.path.splitext(file_path)[-1].endswith(\"js\"):\n if os.path.splitext(f)[0].startswith(\"chunk-vendors\"):\n file_list_with_full_path.append(os.path.abspath(file_path))\n return file_list_with_full_path[0]", "def select_versions(self):\n return []", "def get_versions_from_path(self, path):\n if not path:\n return []\n\n # convert '\\\\' to '/'\n path = os.path.normpath(path).replace(\"\\\\\", \"/\")\n from stalker import Repository\n\n os_independent_path = Repository.to_os_independent_path(path)\n logger.debug(\"os_independent_path: %s\" % os_independent_path)\n\n from stalker import Version\n from stalker.db.session import DBSession\n\n # try to get all versions with that info\n with DBSession.no_autoflush:\n versions = Version.query.filter(\n Version.full_path.startswith(os_independent_path)\n ).all()\n\n return versions", "def get_installed_files(packagename, venv_pip, temp_dir):\n result = check_output(venv_pip + ['show', '-f', packagename])\n result = (result.decode()).split('\\n')\n files = []\n\n for line in result:\n # this line contains path to venv directory\n if line.startswith('Location:'):\n line = line[len('Location: '):]\n prefix = '/' + line.replace(temp_dir, 'usr') + '/'\n if line.startswith(' '*2):\n path = os.path.abspath(prefix + line.strip())\n if os.path.isdir(path):\n path += \"/\"\n files.append(path)\n return files", "def get_versions():\n ret_obj = {'versions': picard_versions(current_app)}\n return make_response(jsonify(ret_obj), 200)", "def get_tool_version_files():\n similar_files = defaultdict(list)\n for path in Runtime_Datasets.RAW_FILE_PATHS:\n filename = get_file_name(path)\n filename = filename.rsplit('_', 1)[0]\n similar_files[filename].append(path)\n\n Runtime_Datasets.RAW_FILE_PATHS = similar_files", "def get_project_files(self, package):\n with self._conn.begin():\n return [\n ProjectFilesRow(*row)\n for row in self._conn.execute(\n \"SELECT version, platform_tag, builder_abi, file_abi_tag, \"\n \"filename, filesize, filehash, yanked, requires_python, \"\n \"dependencies \"\n \"FROM get_project_files(%s)\", (package,)\n )\n ]", "def files(self):\n return [surrogate(name) for name in self.hdr[rpm.RPMTAG_FILENAMES]]", "def list_sources(topdir, version):\n sources = []\n with open(os.path.join(topdir, 'SPECS', 'openafs.spec'), 'r') as spec:\n for line in spec.readlines():\n line = line.rstrip()\n m = re.match(r'Source[\\d]+: (.*)', line)\n if m:\n source = m.group(1).replace(r'%{afsvers}',\n version['openafs_version'])\n sources.append(os.path.basename(source))\n return sources", "def list_patches(topdir, version):\n patches = []\n with open(os.path.join(topdir, 'SPECS', 'openafs.spec'), 'r') as spec:\n for line in spec.readlines():\n line = line.rstrip()\n m = re.match(r'Patch[\\d]+: (.*)', line)\n if m:\n patch = m.group(1).replace(r'%{afsvers}',\n version['openafs_version'])\n patches.append(os.path.basename(patch))\n return patches", "def file_list(load):\n if \"env\" in load:\n # \"env\" is not supported; Use \"saltenv\".\n load.pop(\"env\")\n\n ret = []\n\n if \"saltenv\" not in load:\n return ret\n\n saltenv = load[\"saltenv\"]\n metadata = _init()\n\n if not metadata or saltenv not in metadata:\n return ret\n for bucket in _find_files(metadata[saltenv]):\n for buckets in bucket.values():\n files = [f for f in buckets if not fs.is_file_ignored(__opts__, f)]\n ret += _trim_env_off_path(files, saltenv)\n\n return ret", "def get_list_of_comitted_files():\n files = []\n output = []\n try:\n output = subprocess.check_output(['git','diff-index', '--name-status', '--cached','HEAD']\n ).decode(\"utf-8\")\n except subprocess.CalledProcessError:\n print(\"Error diff files get: trace %s\" % subprocess.CalledProcessError)\n return files\n\n for result in output.split(\"\\n\"):\n logging.info(result)\n if result != '':\n match = modified.match(result)\n if match:\n files.append(match.group('name'))\n\n return files" ]
[ "0.7293217", "0.69692844", "0.6767985", "0.65724486", "0.65694773", "0.6546248", "0.6306112", "0.6290885", "0.6254722", "0.6251354", "0.6199755", "0.6197552", "0.61879873", "0.61280656", "0.61088586", "0.60504794", "0.60438734", "0.59778523", "0.5970127", "0.5940632", "0.5922757", "0.5907193", "0.59036744", "0.58859867", "0.5878581", "0.58771235", "0.5858442", "0.5854195", "0.58452505", "0.5839855" ]
0.7863409
0
Get a dictionary of the links in the specified bundle version
def get_bundle_version_links(bundle_uuid, version_number): if version_number == 0: return {} version_info = get_bundle_version(bundle_uuid, version_number) return { name: LinkDetails( name=name, direct=LinkReference(**link["direct"]), indirect=[LinkReference(**ind) for ind in link["indirect"]], ) for name, link in version_info['snapshot']['links'].items() }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_bundle_links(bundle_uuid, use_draft=None):\n bundle = get_bundle(bundle_uuid)\n if use_draft and use_draft in bundle.drafts: # pylint: disable=unsupported-membership-test\n draft_uuid = bundle.drafts[use_draft] # pylint: disable=unsubscriptable-object\n return get_draft(draft_uuid).links\n elif not bundle.latest_version:\n # This bundle has no versions so definitely does not contain any links\n return {}\n else:\n return get_bundle_version_links(bundle_uuid, bundle.latest_version)", "def get_product_bundle_urls():\n # TODO(fxb/115328): Replaces with JSON API when available.\n bundles = common.run_ffx_command(cmd=('product-bundle', 'list'),\n capture_output=True).stdout.strip()\n urls = [\n line.strip() for line in bundles.splitlines() if 'gs://fuchsia' in line\n ]\n structured_urls = []\n for url in urls:\n downloaded = False\n if '*' in url:\n downloaded = True\n url = url.split(' ')[1]\n structured_urls.append({'downloaded': downloaded, 'url': url.strip()})\n return structured_urls", "def get_linked_versions(version='current'):\n version = check_version_str(version)\n chapters = [10, 9, 8]\n version_page = 'https://research.cs.wisc.edu/htcondor/manual/{ver}/{chapter}_Version_History.html'\n r = requests.get(version_page.format(ver=version, chapter=chapters[0]))\n if r.status_code == 404:\n # Try different chapter numbers, as it changes for different versions\n i = 1\n while r.status_code == 404 and i < len(chapters):\n r = requests.get(version_page.format(ver=version, chapter=chapters[i]))\n i += 1\n if r.status_code == 404:\n return []\n soup_vers = bs4.BeautifulSoup(r.text, 'lxml')\n versions = [x.text.replace('Version ', '')\n for x in soup_vers.find_all('a')\n if x.text.startswith('Version')]\n return versions", "def links(self):\n links = {}\n data = self.data['links']\n for key in data:\n links[key] = data[key]['url']\n return links", "def downloads_per_version(package):\n downloads = {}\n for release in package['files']:\n downloads[release['version']] = release['ndownloads']\n return downloads", "def find_bundles_for_url(request):\n\n # get/create link for given url\n url = request.query_params.get('url', None)\n\n # validate url is a url\n v = URLValidator()\n\n try:\n v(url)\n except ValidationError as exc:\n # the user must be joking\n return Response({'error': True, 'msg': 'Invalid URL'}, status=400)\n\n # normalize url for tidyness\n url = urltools.normalize(url)\n\n try:\n link = Link.objects.get(url=url)\n except Link.DoesNotExist:\n return Response([])\n\n # find all bundle memberships for this link\n memberships = (BundleLink.objects\n .filter(link=link)\n .only('bundle_id')\n .distinct())\n\n # fetch all bundle-link pairs for bundles containing this link\n bundle_ids = [m.bundle_id for m in memberships]\n all_links = (BundleLink.objects\n .filter(bundle_id__in=bundle_ids)\n .select_related('bundle', 'link', 'curator'))\n\n # group bundlelinks by bundle - <bundle: [bundlelink, ...]>\n grouped = itertools.groupby(all_links, key=operator.attrgetter('bundle'))\n\n output = []\n\n for bundle, link_list in grouped:\n setattr(bundle, 'link_list', link_list)\n serialized = BundleSerializer(bundle)\n output.append(serialized.data)\n\n return Response(output)", "def list_bundles():\n response = houston.get(\"/zipline/bundles\")\n\n houston.raise_for_status_with_json(response)\n return response.json()", "def links_json(self, absolutize_url):\n return [\n {\n \"href\": absolutize_url(\"v2/{0}/images/{1}\"\n .format(self.tenant_id, self.image_id)),\n \"rel\": \"self\"\n },\n {\n \"href\": absolutize_url(\"{0}/images/{1}\"\n .format(self.tenant_id, self.image_id)),\n \"rel\": \"bookmark\"\n },\n {\n \"href\": absolutize_url(\"/images/{0}\"\n .format(self.image_id)),\n \"type\": \"application/vnd.openstack.image\",\n \"rel\": \"alternate\"\n }\n ]", "def links_json(self, absolutize_url):\n return [\n {\n \"href\": absolutize_url(\"v2/{0}/images/{1}\"\n .format(self.tenant_id, self.image_id)),\n \"rel\": \"self\"\n },\n {\n \"href\": absolutize_url(\"{0}/images/{1}\"\n .format(self.tenant_id, self.image_id)),\n \"rel\": \"bookmark\"\n },\n {\n \"href\": absolutize_url(\"/images/{0}\"\n .format(self.image_id)),\n \"type\": \"application/vnd.openstack.image\",\n \"rel\": \"alternate\"\n }\n ]", "def GetVersions(url, requestedProduct, requestedVersion):\n dictValidReleasesSorted = {}\n response = requests.get(url)\n if response.status_code == 200:\n jsonResult = response.json()\n jVersions = jsonResult[requestedProduct][\"versions\"]\n dictValidReleases = {}\n # do not want pre-releases; filter them out\n for item in jVersions.items(): \n for build in item[1][\"builds\"]:\n if (build[\"os\"] == SUPPORTED_OS):\n if (build[\"arch\"] == SUPPORTED_ARCH):\n if not (re.search('[a-zA-Z]', item[1][\"version\"])): \n dictValidReleases[item[1][\"version\"]] = build[\"url\"]\n\n for key in sorted(dictValidReleases,key=LooseVersion):\n dictValidReleasesSorted[key] = dictValidReleases[key]\n else:\n raise requests.ConnectionError(\"Server did not return status 200 - returned {0}\".format(response.status_code))\n\n return dictValidReleasesSorted", "def versions(self) -> Dict[str, str]:\n self.__logger.debug('Eva.versions called')\n return self.__http_client.api_versions()", "def links(self):\n return self.container['links']", "def get_component_versions(session):\n # type: (Session) -> Dict[str, Any]\n return _get_dict(session, \"/version\")", "def get_linked_rvt_info(rvt_file):\n tm_data = get_transmission_data(rvt_file, cleaned_str=True)\n re_tm_data = re.compile(\"(<\\?xml version=(?s).+)\")\n tm_xml = re.findall(re_tm_data, tm_data)\n root = ElementTree.fromstring(tm_xml[0])\n rvt_links = defaultdict(dict)\n for ext_ref in root.findall('ExternalFileReference'):\n ext_id = ext_ref.find('ElementId').text\n ref_type = ext_ref.find('ExternalFileReferenceType').text\n if ref_type == 'Revit Link':\n for child in ext_ref.getchildren():\n rvt_links[ext_id][child.tag] = child.text\n return rvt_links", "def app_links_json(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"app_links_json\")", "def get_resource_urls():\n base_url = 'http://developer.pardot.com/'\n pattern = re.compile(\n r'(?ims)\\<a [^>]*?href=\"(kb/api-version-3/[^>]*?/)\"[^>]*?\\>'\n r'[^<]*?\\</a\\>')\n response = requests.get(base_url)\n return [\n '%s/%s' % (base_url, url) for url in pattern.findall(response.text)]", "def getLinksToPhonesPerBrands(url):\n urls = {}\n print(\"brand link being scrapped : \", url)\n try:\n request = requests.get(url)\n if request.status_code == 200:\n sourceCode = BeautifulSoup(request.content, \"html.parser\")\n li = sourceCode.select('#review-body div > ul > li > a')\n for link in li:\n title = link.get_text()\n url = processUrl(link['href'])\n if title not in urls.keys():\n urls[title] = url\n print(title, ' ', url)\n else:\n print('no table or row found ')\n except requests.HTTPError as e:\n print('Unable to open url', e)\n return urls", "def list_versions(self):\n if not USE_GCLOUD:\n return self.run_appcfg(['list_versions'])\n data = self.run_gcloud(['app', 'versions', 'list'])\n per_module = collections.defaultdict(list)\n for deployment in data:\n service = deployment['service'].encode('utf-8')\n version_id = deployment['id'].encode('utf-8')\n per_module[service].append(version_id)\n return dict(per_module)", "def get_links(self):\r\n return self.links", "def get_extended_resources(self, version):\n return {}", "def getLinkstoBrands(url):\n brandUrls = {}\n try:\n print(\"Maker link being crawled : \", url)\n request = requests.get(url)\n if request.status_code == 200:\n sourceCode = BeautifulSoup(request.text, \"html.parser\")\n for td in sourceCode.findAll('td'):\n link = td.find('a', href=True)\n title = td.get_text()\n url = processUrl(link['href'])\n if title not in brandUrls.keys():\n brandUrls[title] = url\n print(title, ' ', url)\n else:\n print('no table or row found ')\n except requests.HTTPError as e:\n print('Unable to open url', e)\n return brandUrls", "def get_links(self):\n return self.__data['links']", "def list_versions(quartus_versions):\n for key in quartus_versions.keys():\n print(key)", "def get_url(name, version=None):\n global urls\n\n # Only download the URL look up table once.\n if urls is None:\n from six.moves.urllib.request import urlopen\n import json\n f = urlopen(\"http://sncosmo.github.io/data/urls.json\")\n reader = codecs.getreader(\"utf-8\")\n urls = json.load(reader(f))\n f.close()\n\n key = name if (version is None) else \"{0}_v{1}\".format(name, version)\n\n return urls[key]", "def getExpandedLinks():", "def links(self):\n\t\treturn self.list_of_links", "def get_versions():\n ret_obj = {'versions': picard_versions(current_app)}\n return make_response(jsonify(ret_obj), 200)", "def get_bdist_release_info(self, version):\r\n for dist in self.get_release_info(version):\r\n if \"bdist\" in dist[\"packagetype\"]:\r\n return dist\r\n return {}", "def schema_links(section, sec_key=None):\n NESTED_FORMAT = '%s > %s' # this format is used in docs/js/api.js:normalizeKeys\n links = section.links\n if section.data:\n data = section.data.items()\n for sub_section_key, sub_section in data:\n new_links = schema_links(sub_section, sec_key=sub_section_key)\n links.update(new_links)\n\n if sec_key is not None:\n new_links = OrderedDict()\n for link_key, link in links.items():\n new_key = NESTED_FORMAT % (sec_key, link_key)\n new_links.update({new_key: link})\n return new_links\n\n return links", "def get_urls(self) -> Dict[str, str]:\n return {}" ]
[ "0.65551144", "0.63308424", "0.6044713", "0.5982717", "0.5820173", "0.58162254", "0.5697063", "0.5680008", "0.5680008", "0.5654119", "0.5614463", "0.5573144", "0.55640477", "0.55347484", "0.5501417", "0.5493479", "0.5456731", "0.5444286", "0.54436064", "0.5442817", "0.54288197", "0.53922427", "0.53874356", "0.53836125", "0.536689", "0.53652203", "0.5358444", "0.53371567", "0.5333206", "0.5323028" ]
0.7957789
0
Get a dict of all the files in the specified bundle. Returns a dict where the keys are the paths (strings) and the values are BundleFile or DraftFile tuples.
def get_bundle_files_dict(bundle_uuid, use_draft=None): bundle = get_bundle(bundle_uuid) if use_draft and use_draft in bundle.drafts: # pylint: disable=unsupported-membership-test draft_uuid = bundle.drafts[use_draft] # pylint: disable=unsubscriptable-object return get_draft(draft_uuid).files elif not bundle.latest_version: # This bundle has no versions so definitely does not contain any files return {} else: return {file_meta.path: file_meta for file_meta in get_bundle_version_files(bundle_uuid, bundle.latest_version)}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_bundle_files(bundle_uuid, use_draft=None):\n return get_bundle_files_dict(bundle_uuid, use_draft).values() # lint-amnesty, pylint: disable=dict-values-not-iterating", "def get_bundle_version_files(bundle_uuid, version_number):\n if version_number == 0:\n return []\n version_info = get_bundle_version(bundle_uuid, version_number)\n return [BundleFile(path=path, **file_metadata) for path, file_metadata in version_info[\"snapshot\"][\"files\"].items()]", "def get_package_files(self, package):\n with self._conn.begin():\n return {\n row.filename: row.filehash\n for row in self._conn.execute(\n \"SELECT filename, filehash \"\n \"FROM get_package_files(%s)\", (package,)\n )\n }", "def load_files(paths):\n\n bundle = {}\n\n # Load files\n # The returned dict of a parsed file cannot be guaranteed consistently\n # ordered, so sadly here we loose sequentially of declaration in files.\n for file in paths:\n\n log.info(\n 'Loading file {} ...'.format(file)\n )\n\n content = load_file(file)\n\n log.debug(\n 'Content loaded:\\n{}'.format(pformat(content))\n )\n\n # Update the general bundle\n update(bundle, content)\n\n if bundle:\n log.debug(\n 'Final bundle:\\n{}'.format(pformat(bundle))\n )\n\n return bundle", "def get_files(self):\n return self._files.values()", "def _get_package_files(self) -> Dict[str, 'BinPackageFile']:\n\n if self._package_files is None:\n manifest_path = f\"{self.path}/packagemanifest\"\n manifest_urlpath = f\"{self.path}/packages/files/packagemanifest\"\n self.project.storage.download(manifest_urlpath, manifest_path)\n files = BinPackageFile.from_package_manifest(self.project.storage.fspath(manifest_path))\n self._package_files = {pf.extract_path: pf for pf in files}\n return self._package_files", "def files(self):\n files = []\n if self.package_type == 'package':\n file_data = dict([(k, self[k]) \\\n for k in ['size', 'sha1', 'sha256', 'md5sum']])\n file_data['name'] = self['filename'].split('/')[-1]\n files.append(file_data)\n else:\n for d in self['files']:\n file_data = d.copy()\n # Get checksum data as well...\n for key in ['sha1', 'sha256']:\n for data in self['checksums-' + key]:\n if file_data['name'] == data['name']:\n file_data[key] = data[key]\n files.append(file_data)\n return files", "def files(self):\n return self._files.items()", "def filepaths(self) -> Dict[str, 'BinPackageFile']:\n return self._get_package_files()", "def files(self):\n result = {}\n if 'files' in self.fields:\n indicies = xrange(len(self.fields['files']))\n files = self.fields['files']\n priorities = self.fields['priorities']\n wanted = self.fields['wanted']\n index = 1\n for item in zip(indicies, files, priorities, wanted):\n if item[3]:\n selected = True\n else:\n selected = False\n priority = PRIORITY[item[2]]\n result[item[0]] = {\n 'selected': selected,\n 'priority': priority,\n 'size': item[1]['length'],\n 'name': item[1]['name'],\n 'completed': item[1]['bytesCompleted']}\n return result", "def get_bundle_file_metadata(bundle_uuid, path, use_draft=None):\n assert isinstance(bundle_uuid, UUID)\n files_dict = get_bundle_files_dict(bundle_uuid, use_draft=use_draft)\n try:\n return files_dict[path]\n except KeyError:\n raise BundleFileNotFound( # lint-amnesty, pylint: disable=raise-missing-from\n f\"Bundle {bundle_uuid} (draft: {use_draft}) does not contain a file {path}\"\n )", "def files(self):\n files = dict()\n for name, value in self.__dict__.items():\n public_methods = ['list_filenames', 'add']\n if not name.startswith('_') and name not in public_methods:\n files[name] = value\n return files", "def _load_files(self):\n files = {}\n for fn_ in self.opts[\"src\"]:\n if os.path.isfile(fn_):\n files.update(self._file_dict(fn_))\n elif os.path.isdir(fn_):\n salt.utils.stringutils.print_cli(\n \"{} is a directory, only files are supported \"\n 'in non-chunked mode. Use \"--chunked\" command '\n \"line argument.\".format(fn_)\n )\n sys.exit(1)\n return files", "def filelist(folder):\n file_dict={}\n folderlist = glob.glob(os.getcwd()+\"/\"+folder+\"/*\")\n for i in tqdm(folderlist):\n filelist = glob.glob(i+\"/*\")\n filename = i.rsplit(\"/\")[-1]\n file_dict[filename]= filelist\n\n return file_dict", "def files(self):\r\n url = '{0}/files'.format(self.get_url())\r\n\r\n return http.Request('GET', url), parsers.parse_json", "def files(self):\n try:\n return glob.glob(self.path)\n except (AttributeError, TypeError):\n try:\n return glob.glob(self.alias)\n except (AttributeError, TypeError):\n return []", "def get_files_dict(folder_path, filter_term, recursive):\n if recursive:\n query = folder_path + '**/' + filter_term\n files_list = glob.glob(query, recursive=True)\n else:\n query = folder_path + filter_term\n files_list = glob.glob(query, recursive=False)\n files_list = [f for f in files_list if os.path.isfile(f)]\n files_dict = {f: get_timestamp(f) for f in files_list}\n return files_dict", "def files_cmd(\n context,\n tag_names: List[str],\n version_id: int,\n verbose: bool,\n bundle: str,\n json: bool,\n compact: bool,\n):\n store: Store = context.obj[\"store\"]\n file_objs = store.get_files(\n bundle_name=bundle, tag_names=tag_names, version_id=version_id\n )\n template = schema.FileSchema()\n result = []\n for file in file_objs:\n result.append(template.dump(file))\n if json:\n click.echo(jsonlib.dumps(result))\n return\n console = Console()\n console.print(get_files_table(result, verbose=verbose, compact=compact))", "def forge_files(self) -> Dict[str, BaseForge]:\n\t\treturn self._forge_files", "def get_files(self):\n return self.ebook_file.get_files()", "def get_bundle_file_data(bundle_uuid, path, use_draft=None):\n metadata = get_bundle_file_metadata(bundle_uuid, path, use_draft)\n with requests.get(metadata.url, stream=True) as r:\n return r.content", "def list_files_in_directory(self):\n lesson_file_dict = dict()\n lesson_file_dict[\"files\"] = []\n\n directory_list = listdir(self.sub_dir)\n for directory in directory_list:\n if isfile(join(self.sub_dir, directory)):\n lesson_file_dict[\"files\"].append(directory)\n\n return lesson_file_dict", "def get_file_list(pkg_root_dir, pkg_num, tree_size):\n\n # Get the bundle list\n pkg_dir = os.path.join(pkg_root_dir, build_package_name(pkg_num))\n bundle_list = get_bundle_list(pkg_dir, tree_size)\n\n file_list = []\n for bundle in bundle_list:\n # The dict must contain the path relative to the bittorrent download dir + the name the user choose to store\n # the files downloaded from this torrent. As such, the file_path is just the relative path to the bundle, from\n # the pkg_root_dir\n # The dict must also contain the length of said file\n file_info = os.stat(os.path.join(pkg_dir, bundle))\n file_path = [build_package_name(pkg_num), bundle]\n file_list.append(\n {\n 'length': file_info.st_size,\n 'path': file_path\n }\n )\n\n # The torrent also contains the STH file for a specified tree_size\n sth_fn = build_sth_name(tree_size)\n file_info = os.stat(os.path.join(pkg_root_dir, sth_fn))\n file_list.append({'length': file_info.st_size, 'path': [sth_fn]})\n\n # The info file containing the pkg_hash and the merkle proof for this package up to the STH must be included for\n # downloaders to be able to verify this package\n info_file_fn = build_info_file_name(pkg_num, tree_size)\n file_info = os.stat(os.path.join(pkg_root_dir, info_file_fn))\n file_list.append({'length': file_info.st_size, 'path': [info_file_fn]})\n\n return file_list", "def files():\n return get_cached(\"files.json\")", "def get_all_job_files(jobFolder):\n job_files = {}\n for job_file in glob.glob(os.path.join(jobFolder, '*.json')):\n __, j = os.path.split(job_file)\n job_files[j] = ''\n return job_files", "def get_files(self):\n\n for path, dirs, files in os.walk(self.data_path):\n for dir in dirs:\n self.original_files[dir] = []\n self.imitation_files[dir] = []\n for file in os.listdir(path + \"/\" + dir):\n if( \"original\" in file ):\n self.original_files[dir].append(path + \"/\" + dir + \"/\" + file)\n else:\n self.imitation_files[dir].append(path + \"/\" + dir + \"/\" + file)\n\n return", "def collect_files(self):\n self.files = []\n for bundle in self.bundles:\n bundle.init_build(self, self.builder)\n bundle_files = bundle.prepare()\n self.files.extend(bundle_files)\n return self", "def get_files(self) -> Set[str]:\n return ({f for f in os.listdir(self.get_directory())\n if os.path.isfile(os.path.join(self.get_directory(), f))} if self.directory_exists(self.get_directory()) else set())", "def GetFileEntries(self, path_prefix=''):\n if self._file_entries:\n for path, file_entry in self._file_entries.items():\n if path.startswith(path_prefix):\n yield file_entry", "def read_files(path):\n filenames = listdir(path)\n contents = {}\n for filename in filenames:\n fullpath = f\"{path}/{filename}\"\n if isfile(fullpath):\n contents[fullpath] = _read_file_content(fullpath)\n\n return contents" ]
[ "0.7790072", "0.64431036", "0.63394016", "0.63182753", "0.630003", "0.62759304", "0.6194732", "0.6178175", "0.61479455", "0.6144583", "0.6076846", "0.60737944", "0.6040074", "0.5976576", "0.59606755", "0.5862239", "0.5857732", "0.58249676", "0.5790602", "0.5765096", "0.5762962", "0.5761767", "0.5729838", "0.5726096", "0.5695629", "0.56501484", "0.5644065", "0.5640523", "0.56333345", "0.5623302" ]
0.8014129
0
Get an iterator over all the files in the specified bundle or draft.
def get_bundle_files(bundle_uuid, use_draft=None): return get_bundle_files_dict(bundle_uuid, use_draft).values() # lint-amnesty, pylint: disable=dict-values-not-iterating
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_files(self, block):\n \n raise NotImplementedError('get_files')", "def __iter__(self):\n\n return iter(self.files)", "def getFiles(self, getContent=False):\n for index, file in enumerate(self.files):\n if getContent:\n logger.debug(\n \"get file {} from service {}\".format(\n file, self.servicename)\n )\n content = self.getFile(index)\n\n yield file, content\n else:\n yield file", "def files(self) -> Iterator[List[Path]]:\n\n files: List[Path] = []\n fragments: List[str] = []\n data: Dict[str, Any] = {}\n\n inst: Optional[\"CLIInstance\"] = self\n while inst is not None:\n for k, v in inst._data.items():\n if isinstance(v, UnknownExpression) or k in data:\n fragments.append(f\"{k} = {v};\\n\")\n elif isinstance(v, EnumMeta):\n fragments.append(\n f\"{k} = {{{', '.join([i for i in v.__members__])}}};\\n\"\n )\n else:\n data[k] = v\n fragments.extend(inst._code_fragments)\n files.extend(inst._includes)\n\n inst = inst._parent\n\n gen_files = []\n try:\n if len(data) > 0:\n file = tempfile.NamedTemporaryFile(\n prefix=\"mzn_data\", suffix=\".json\", delete=False\n )\n gen_files.append(file)\n file.write(json.dumps(data, cls=MZNJSONEncoder).encode())\n file.close()\n files.append(Path(file.name))\n if len(fragments) > 0 or len(files) == 0:\n file = tempfile.NamedTemporaryFile(\n prefix=\"mzn_fragment\", suffix=\".mzn\", delete=False\n )\n gen_files.append(file)\n for code in fragments:\n file.write(code.encode())\n file.close()\n files.append(Path(file.name))\n yield files\n finally:\n for file in gen_files:\n os.remove(file.name)", "def get_files(self):\n return self.ebook_file.get_files()", "def enumerate_files(self, table):\n for i in range(self.nrofrecords()):\n data = self.bank.readrec(i + 1)\n if data and data[0] == table.tableid:\n yield i + 1, data[1:]", "def get_bundle_files_dict(bundle_uuid, use_draft=None):\n bundle = get_bundle(bundle_uuid)\n if use_draft and use_draft in bundle.drafts: # pylint: disable=unsupported-membership-test\n draft_uuid = bundle.drafts[use_draft] # pylint: disable=unsubscriptable-object\n return get_draft(draft_uuid).files\n elif not bundle.latest_version:\n # This bundle has no versions so definitely does not contain any files\n return {}\n else:\n return {file_meta.path: file_meta for file_meta in get_bundle_version_files(bundle_uuid, bundle.latest_version)}", "def get_all(self) -> Generator:\n\n for filename in self.list_files():\n yield self.get(filename)", "def GetFileEntries(self, path_prefix=''):\n if self._file_entries:\n for path, file_entry in self._file_entries.items():\n if path.startswith(path_prefix):\n yield file_entry", "def __iter__(self):\n for p in self.paths:\n yield Document.load(os.path.join(self.dirpath, p), fmt=self.fmt)", "def find_files(self):\n # yield blueprint paths first\n if getattr(self, 'blueprint_name', None):\n for path in walk_directory(os.path.join(self.path, self.blueprint_name), ignore=self.project.EXCLUDES):\n yield 'preview', {'path': path}\n\n # then yield project paths\n for path in walk_directory(self.path, ignore=self.project.EXCLUDES):\n yield 'preview', {'path': path}", "def get_files(self):\r\n return self._filelist", "def filelist_generator(self):\n for filename in self.filenames:\n yield filename", "def get_files(self) -> tp.Iterable[str]:\n return os.listdir(self.path)", "def files_and_folders(self):\n yield from self._root.files_and_folders(0)", "def fileobjects_iter(imagefile=None,xmlfile=None,fiwalk=\"fiwalk\",flags=0):\n def local_iter(fi):\n yield fi\n fiwalk_using_sax(imagefile=imagefile,xmlfile=xmlfile,fiwalk=fiwalk,flags=flags,\n callback=local_iter)", "def iterRegularFileContents(self):\n unpack = {}\n for (oldFileId, newFileId), stream in self.files.iteritems():\n if not files.frozenFileHasContents(stream):\n continue\n if files.frozenFileFlags(stream).isEncapsulatedContent():\n continue\n cont = files.frozenFileContentInfo(stream)\n unpack[newFileId] = cont.sha1()\n\n want_tag = '0 ' + ChangedFileTypes.file[4:]\n while True:\n f = self._nextFile()\n if not f:\n break\n name, tag, fobj, csf = f\n if len(name) != 36 or tag != want_tag:\n continue\n fileId = name[16:]\n sha1 = unpack.get(fileId)\n if not sha1:\n continue\n yield sha1, fobj", "def __iter__(self):\n for p in self.pkg.datastream:\n act = self.action(self.pkgmap, p, p.name)\n if act:\n yield act\n\n # for some reason, some packages may have directories specified\n # in the pkgmap that don't exist in the archive. They need to\n # be found and iterated as well.\n #\n # Some of the blastwave packages also have directories in the\n # archive that don't exist in the package metadata. I don't see\n # a whole lot of point in faking those up.\n for p in self.pkg.manifest:\n if p.pathname.startswith(\"/\"):\n dir = \"root\"\n else:\n dir = \"reloc/\"\n if p.type == \"d\" and \\\n dir + p.pathname not in self.pkg.datastream:\n act = self.action(self.pkgmap, None,\n dir + p.pathname)\n if act:\n yield act\n if p.type in \"ls\":\n act = self.action(self.pkgmap, None,\n dir + p.pathname)\n if act:\n yield act", "def get_bundle_version_files(bundle_uuid, version_number):\n if version_number == 0:\n return []\n version_info = get_bundle_version(bundle_uuid, version_number)\n return [BundleFile(path=path, **file_metadata) for path, file_metadata in version_info[\"snapshot\"][\"files\"].items()]", "def files(self) -> Generator[Path, None, None]:\n return Path(self.package).resolve(strict=True).glob(self.glob)", "def find_object_files(kem_dir):\n for dirpath, _, filenames in os.walk(kem_dir):\n for fn in filenames:\n if fn.endswith('.o'):\n yield os.path.join(dirpath, fn)", "def files_cmd(\n context,\n tag_names: List[str],\n version_id: int,\n verbose: bool,\n bundle: str,\n json: bool,\n compact: bool,\n):\n store: Store = context.obj[\"store\"]\n file_objs = store.get_files(\n bundle_name=bundle, tag_names=tag_names, version_id=version_id\n )\n template = schema.FileSchema()\n result = []\n for file in file_objs:\n result.append(template.dump(file))\n if json:\n click.echo(jsonlib.dumps(result))\n return\n console = Console()\n console.print(get_files_table(result, verbose=verbose, compact=compact))", "def getFiles(folderToProcess,filter):\n\n print(f\"Parsing {folderToProcess} for {filter} files\")\n\n if debug:\n for path in Path(folderToProcess).rglob(filter):\n print(f\"Found {path}\")\n\n all_files = [str(x) for x in Path(folderToProcess).rglob(filter)] \n\n return all_files", "def _get_files(self):\n # pylint: disable=unused-variable\n for dirpath, __, filenames in os.walk(self.start_location):\n for file_ in filenames:\n if file_.endswith('.py'):\n yield \"{0}{1}\".format(dirpath, file_)", "def files(self):\r\n return files.Files(self)", "def get_documents(self, batch=None):\n\t\t\n\t\tfiles = None\n\t\tif not batch:\n\t\t\t# no batch = all the batches\n\t\t\tfiles = self._get_batch_files()\n\t\telif batch == \"random\":\n\t\t\t# get all the batches and pick one from random\n\t\t\tbatches = self._get_batches()\n\t\t\tfiles = [ self._get_batch_file(batch=random.randint(1, len(batches))) ]\n\t\telse:\n\t\t\t# get the specified batch\n\t\t\tfiles = [ self._get_batch_file(batch=batch) ]\n\t\t\t\n\t\t# loop through all the batch files\n\t\tfor f in files:\n\t\t\twith gzip.open(f, \"rb\") as infile:\n\t\t\t\tfor line in infile:\n\t\t\t\t\t# parse the JSON for each line\n\t\t\t\t\tyield json.loads(line)", "def iterator(dataset_fn, sections=None, lang=None, field_indices=None):\n files = filelist(lang=lang, sections=sections)\n\n with TarFile.open(dataset_fn, 'r:gz') as f:\n for member in f:\n if member.isfile() and os.path.basename(member.name) in files:\n logging.info('parsing %s ...' % member.name)\n m_f = f.extractfile(member)\n\n for sentence in parse_conll(m_f, field_indices=field_indices):\n yield sentence\n\n m_f.close()", "def _iter_module_files():\n # The list call is necessary on Python 3 in case the module\n # dictionary modifies during iteration.\n for module in list(sys.modules.values()):\n if module is None:\n continue\n filename = getattr(module, \"__file__\", None)\n if filename:\n old = None\n while not os.path.isfile(filename):\n old = filename\n filename = os.path.dirname(filename)\n if filename == old:\n break\n else:\n if filename[-4:] in (\".pyc\", \".pyo\"):\n filename = filename[:-1]\n yield filename", "def files_from_roots(roots, accept):\n for root in roots:\n if root is not None:\n yield from files_from_root(root, accept)", "def _GetSubFileEntries(self):\n if self._directory is None:\n self._directory = self._GetDirectory()\n\n if self._directory:\n for path_spec in self._directory.entries:\n yield APMFileEntry(self._resolver_context, self._file_system, path_spec)" ]
[ "0.61938465", "0.6149882", "0.61284655", "0.60207605", "0.5920121", "0.5907837", "0.5906147", "0.58372724", "0.5820502", "0.5797337", "0.575836", "0.5687807", "0.56840444", "0.56556374", "0.56395537", "0.5559454", "0.5515556", "0.54998755", "0.54887193", "0.54826784", "0.5477529", "0.54628944", "0.543585", "0.5427369", "0.5424238", "0.5402175", "0.53913456", "0.53761166", "0.53638273", "0.5356941" ]
0.6996779
0
Get a dict of all the links in the specified bundle. Returns a dict where the keys are the link names (strings) and the values are LinkDetails or DraftLinkDetails tuples.
def get_bundle_links(bundle_uuid, use_draft=None): bundle = get_bundle(bundle_uuid) if use_draft and use_draft in bundle.drafts: # pylint: disable=unsupported-membership-test draft_uuid = bundle.drafts[use_draft] # pylint: disable=unsubscriptable-object return get_draft(draft_uuid).links elif not bundle.latest_version: # This bundle has no versions so definitely does not contain any links return {} else: return get_bundle_version_links(bundle_uuid, bundle.latest_version)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_bundle_version_links(bundle_uuid, version_number):\n if version_number == 0:\n return {}\n version_info = get_bundle_version(bundle_uuid, version_number)\n return {\n name: LinkDetails(\n name=name,\n direct=LinkReference(**link[\"direct\"]),\n indirect=[LinkReference(**ind) for ind in link[\"indirect\"]],\n )\n for name, link in version_info['snapshot']['links'].items()\n }", "def links(self):\n links = {}\n data = self.data['links']\n for key in data:\n links[key] = data[key]['url']\n return links", "def get_links(self):\n return self.__data['links']", "def links(self):\n if not hasattr(self, '_links'):\n self._links = self.resource.links()\n values = self._response.headers.get('link')\n self._links.update([link for link in Links.parse(values)])\n return self._links", "def get_all_links(self):\n links_url = \"{}/links\".format(self._project_url)\n print(links_url)\n response = requests.get(links_url).json()\n return json.dumps(response, indent=4, sort_keys=True)", "def get_links(self):\r\n return self.__links", "def _links_get(self, cr, uid, context=None):\n obj = self.pool.get('res.request.link')\n ids = obj.search(cr, uid, [])\n res = obj.read(cr, uid, ids, ['object', 'name'], context)\n return [(r['object'], r['name']) for r in res]", "def get_links(self):\r\n return self.links", "def links(self):\n return self.container['links']", "def links(self):\n return self._links", "def links(self):\n return self._links", "def links(self):\n return self._links", "def links(self):\n return self._links", "def links(self):\n return self._links", "def get_links(self):\n return (link for link in self.links)", "def find_bundles_for_url(request):\n\n # get/create link for given url\n url = request.query_params.get('url', None)\n\n # validate url is a url\n v = URLValidator()\n\n try:\n v(url)\n except ValidationError as exc:\n # the user must be joking\n return Response({'error': True, 'msg': 'Invalid URL'}, status=400)\n\n # normalize url for tidyness\n url = urltools.normalize(url)\n\n try:\n link = Link.objects.get(url=url)\n except Link.DoesNotExist:\n return Response([])\n\n # find all bundle memberships for this link\n memberships = (BundleLink.objects\n .filter(link=link)\n .only('bundle_id')\n .distinct())\n\n # fetch all bundle-link pairs for bundles containing this link\n bundle_ids = [m.bundle_id for m in memberships]\n all_links = (BundleLink.objects\n .filter(bundle_id__in=bundle_ids)\n .select_related('bundle', 'link', 'curator'))\n\n # group bundlelinks by bundle - <bundle: [bundlelink, ...]>\n grouped = itertools.groupby(all_links, key=operator.attrgetter('bundle'))\n\n output = []\n\n for bundle, link_list in grouped:\n setattr(bundle, 'link_list', link_list)\n serialized = BundleSerializer(bundle)\n output.append(serialized.data)\n\n return Response(output)", "def getlinklist(self):\n d = []\n try:\n con = hcpsdk.Connection(self.target, debuglevel=self.debuglevel)\n except Exception as e:\n raise hcpsdk.HcpsdkError(str(e))\n else:\n self.connect_time = con.connect_time\n try:\n r = con.GET('/mapi/services/replication/links')\n except Exception as e:\n d.append('Error: {}'.format(str(e)))\n else:\n if r.status == 200:\n # Good status, get and parse the Response\n x = r.read()\n self.service_time = con.service_time2\n root = Et.fromstring(x)\n for child in root:\n if child.tag == 'name':\n d.append(child.text)\n else:\n raise (hcpsdk.HcpsdkError('{} - {}'.format(r.status, r.reason)))\n finally:\n # noinspection PyUnboundLocalVariable\n con.close()\n\n return d", "def getLinks(self):\n\n return self.links", "def get_links(self, url):\n page_content = self.previous_results['page_content'][url]\n assert 'content' in page_content\n\n if page_content['content'] is None:\n return\n\n result = {\n 'links': [],\n 'exception': None,\n }\n\n soup = BeautifulSoup(page_content['content'], 'html.parser')\n\n for link in soup.find_all(\"a\"):\n result['links'].append({\n 'href': link.get('href'),\n 'text': link.text.strip(),\n })\n\n return result", "def get_links(self) -> List[str]:\n return self.__links", "def getLinks(self):\n\t\threfs = []\n\t\tfor link in self.bsource.find_all('a'):\n\t\t\threfs.append(link.get('href'))\n\t\treturn hrefs", "def _parse_links(self, item):\n regex = compile(r\"<a\\s+(?:[^>]*?\\s+)?href=([\\\"\\'])(.*?)\\1.*\\>(.*)<\\/a>\")\n links = [\n {\"href\": href, \"title\": title}\n for (_, href, title) in findall(regex, item[\"Event\"][\"Description\"])\n ]\n for link in links:\n if link[\"href\"][0] == \"/\":\n link[\"href\"] = \"https://www.pghschools.org\" + link[\"href\"]\n return links", "def links(self):\n\t\treturn self.list_of_links", "def schema_links(section, sec_key=None):\n NESTED_FORMAT = '%s > %s' # this format is used in docs/js/api.js:normalizeKeys\n links = section.links\n if section.data:\n data = section.data.items()\n for sub_section_key, sub_section in data:\n new_links = schema_links(sub_section, sec_key=sub_section_key)\n links.update(new_links)\n\n if sec_key is not None:\n new_links = OrderedDict()\n for link_key, link in links.items():\n new_key = NESTED_FORMAT % (sec_key, link_key)\n new_links.update({new_key: link})\n return new_links\n\n return links", "def get_links(self):\n msg = self.get_message()\n return msg.split()", "def links(self):\n return self._links_tpl.expand(self._identity, self._record)", "def getLinks(self):\n return self.pageLinks", "def links_json(self, absolutize_url):\n return [\n {\n \"href\": absolutize_url(\"v2/{0}/images/{1}\"\n .format(self.tenant_id, self.image_id)),\n \"rel\": \"self\"\n },\n {\n \"href\": absolutize_url(\"{0}/images/{1}\"\n .format(self.tenant_id, self.image_id)),\n \"rel\": \"bookmark\"\n },\n {\n \"href\": absolutize_url(\"/images/{0}\"\n .format(self.image_id)),\n \"type\": \"application/vnd.openstack.image\",\n \"rel\": \"alternate\"\n }\n ]", "def links_json(self, absolutize_url):\n return [\n {\n \"href\": absolutize_url(\"v2/{0}/images/{1}\"\n .format(self.tenant_id, self.image_id)),\n \"rel\": \"self\"\n },\n {\n \"href\": absolutize_url(\"{0}/images/{1}\"\n .format(self.tenant_id, self.image_id)),\n \"rel\": \"bookmark\"\n },\n {\n \"href\": absolutize_url(\"/images/{0}\"\n .format(self.image_id)),\n \"type\": \"application/vnd.openstack.image\",\n \"rel\": \"alternate\"\n }\n ]", "def get_links(self, response, domain, port, folder):\n\t\t# find link in tags: a, link, form, button\n\t\t# call to all function in file get_link\n\t\t# for method in get_link:\n\t\tlinks = get_link(response, domain, port, folder)\n\t\tlinks = filter(None, links.getResults())\n\t\treturn links" ]
[ "0.7305899", "0.68209445", "0.6084511", "0.5888304", "0.58443224", "0.58205533", "0.57978463", "0.57962805", "0.57638013", "0.56552774", "0.56552774", "0.56552774", "0.56552774", "0.56552774", "0.5595823", "0.55944407", "0.5573283", "0.5561761", "0.5560517", "0.55239433", "0.5518763", "0.5510302", "0.5509307", "0.5484993", "0.54758877", "0.5451703", "0.5444355", "0.5416124", "0.5416124", "0.5413589" ]
0.73346376
0
Create or overwrite the file at 'path' in the specified draft with the given contents. To delete a file, pass contents=None. If you don't know the draft's UUID, look it up using get_or_create_bundle_draft() Does not return anything.
def write_draft_file(draft_uuid, path, contents): api_request('patch', api_url('drafts', str(draft_uuid)), json={ 'files': { path: encode_str_for_draft(contents) if contents is not None else None, }, })
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_file(path: Path, content: str) -> None:\n path.touch()\n with path.open(\"w\") as f:\n f.write(content)", "def put_contents( path, name, contents, dryrun = False, get_config=lambda: {}, verbose=False ):\n t_file_fh, t_file_name = tempfile.mkstemp()\n os.close(t_file_fh)\n print(contents, file=open(t_file_name,\"w\"))\n if not dryrun:\n fs_mod.fs_put( t_file_name, path+\"/\"+name, get_config, verbose )\n if not path.startswith(\"s3://\"):\n t = time.time()\n fs_mod.fs_utime( path+\"/\"+name, (t,t), get_config )\n os.remove(t_file_name)\n return", "def mkfile(self, _path, contents=None, overwrite=False):\n if path.isfile(_path) and not overwrite:\n self.die('Cannot make file \"{0}\". Already exists and overwrite={1}'.format(_path, repr(overwrite)))\n \n # Make sure the directory exists\n self.mkpath(_path)\n \n # Make the file\n fh = open(_path, 'w')\n \n # If writing contents\n if contents:\n fh.write(contents)\n \n # Close the file\n fh.close()\n \n # Return the path\n return _path", "def write_file(path, contents, mode=\"w\"):\n with open(path, mode) as f:\n f.write(contents)", "def touch(path, content=\"\", encoding=\"utf-8\", overwrite=False):\n path = os.path.abspath(path)\n if not overwrite and os.path.exists(path):\n logger.warning('touch: \"%s\" already exists', path)\n return False\n try:\n logger.info(\"touch: %s\", path)\n with io.open(path, \"wb\") as f:\n if not isinstance(content, bytes):\n content = content.encode(encoding)\n f.write(content)\n return True\n except Exception as e:\n logger.error(\"touch: %s failed. Error: %s\", path, e)\n return False", "def update(self, store, uuid, contents):\n\n stored_file = self._retrieve(store.object_type, uuid)\n\n stored_file.contents = contents\n\n if store.versioned:\n version = self._get_latest_version(store, stored_file.name) + 1\n return self._create(\n store, stored_file.name, stored_file.contents, version)\n\n return self._upsert(store, stored_file)", "def touch(path):\n open(path, 'wb').close()", "def replace_file(filename, contents):\n filename = path.join(PATH_ROOT, filename)\n filename_bak = \"%s.release.bak\" % filename\n os.rename(filename, filename_bak)\n with open(filename, \"w\") as out_file:\n out_file.write(\"\".join(contents))\n shutil.copymode(filename_bak, filename)\n os.remove(filename_bak)", "def put_file(self, path, contents):\n data = io.BytesIO()\n with tarfile.open(fileobj=data, mode='w') as tarfile_:\n file_contents = contents.encode() if isinstance(contents, str) else contents\n tarinfo = tarfile.TarInfo(path)\n\n # We set the modification time to now because some systems (e.g. logging) rely upon\n # timestamps to determine whether to read config files.\n tarinfo.mtime = time.time()\n tarinfo.size = len(file_contents)\n tarfile_.addfile(tarinfo, io.BytesIO(file_contents))\n data.seek(0)\n\n self.container.put_archive(path='/', data=data)", "def edit_file(path, editor=None):\n\n # Find the editor to use\n editor = find_editor(editor)\n\n # Create temporary directory and copy the file\n tmpdir = tempfile.mkdtemp()\n tmpfile = os.path.join(tmpdir, os.path.basename(path))\n shutil.copy2(path, tmpfile)\n\n # Execute the editor\n subprocess.call([editor, tmpfile])\n\n # Copy the temporary file back and cleanup\n shutil.copy2(tmpfile, path)\n shutil.rmtree(tmpdir)", "def write_file(file_path, contents):\n logger.debug(f'write to file:{file_path}')\n with open(file_path, 'w') as outfile:\n outfile.write(contents)", "def makeFile(self, path=None, content=b''):\n if path is None:\n path = self.mktemp()\n with open(path, 'wb') as file:\n file.write(content)\n return path", "def save_entry(title, content):\n filename = f\"entries/{title}.md\"\n if default_storage.exists(filename):\n default_storage.delete(filename)\n default_storage.save(filename, ContentFile(content))", "def save_entry(title, content):\n filename = f\"entries/{title}.md\"\n if default_storage.exists(filename):\n default_storage.delete(filename)\n default_storage.save(filename, ContentFile(content))", "def write(self, path):\n try:\n contents = self.file_contents()\n except Exception as e:\n raise e\n\n tmp_hosts_file_path = \"{0}.tmp\".format(path) # Write atomically\n with open(tmp_hosts_file_path, 'w') as tmp_hosts_file:\n tmp_hosts_file.write(contents)\n\n os.rename(tmp_hosts_file_path, path)", "def make_dummy_file(path, contents=None):\n dirname = op.dirname(path)\n\n if not op.exists(dirname):\n os.makedirs(dirname)\n\n if contents is None:\n contents = '{}\\n'.format(op.basename(path))\n with open(path, 'wt') as f:\n f.write(contents)\n\n return hash(contents)", "def file(path, contents):\n\t__files[path.rstrip(os.path.sep)] = contents\n\tos.path.exists = __os_path_exists\n\t__builtin__.file = __builtins_file\n\t__builtin__.open = __builtins_file", "def write_file(path: str, content: Union[str, bytes], mode: str = 'w') -> None:\n from peltak.core import context, log\n\n if context.get('pretend', False):\n log.info(\"Would overwrite <34>{path}<32> with:\\n<90>{content}\",\n path=path,\n content=content)\n else:\n with open(path, mode) as fp:\n fp.write(content)", "def delete_file(self, path):\n return self.client._perform_empty(\n \"DELETE\", \"/projects/%s/managedfolders/%s/contents/%s\" % (self.project_key, self.odb_id, utils.quote(path)))", "def save_draft(cid):\r\n d_content = request.values.get('contract_content', '')\r\n if not d_content:\r\n return jsonify({'success': False, 'errorMsg': 'No content to save'})\r\n with engine.with_session() as ss:\r\n contract_to_update = ss.query(LxContract).get(cid)\r\n draft_to_update = contract_to_update.draft\r\n file_biz.save_contract_file(\r\n contract_to_update.owner_id, d_content,\r\n contract_to_update.name, draft_to_update.fuuid\r\n )\r\n return jsonify({'success': True, 'data': draft_to_update.id})", "def touch(path):\n fd = open(path, 'a')\n fd.close()", "def file_write(path: str, contents=\"\", mode=\"w\") -> None:\n while True:\n try:\n with open(path, mode, encoding=\"utf-8\") as fptr:\n fptr.write(f\"{contents}\\n\")\n return None\n except PermissionError:\n pass", "def save_file(content: Any, filename: str, path: str):\n\n logging.info('Saving file: %s ' % filename)\n path_to_file = join(path, filename)\n if isfile(path_to_file):\n ctrl = input('%s exists already in\\n %s.\\n'\n ' Are you sure you want to overwrite it [y/N]: '\n % (filename, path))\n if ctrl.lower() == 'y' or ctrl.lower() == 'yes':\n with open(path_to_file, \"wb\") as f:\n pickle.dump(content, f)\n else:\n logging.warning(\"%s NOT saved..\" % filename)\n return\n else:\n with open(path_to_file, \"wb\") as f:\n pickle.dump(content, f)\n\n logging.info(\"File '%s' saved.\" % filename)", "def create_file(dir, path, contents):\n\n fullpath = os.path.join(dir, path)\n fulldir = os.path.dirname(fullpath)\n\n if fulldir:\n try:\n os.makedirs(fulldir)\n except OSError:\n pass\n\n with open(fullpath, 'w') as file:\n file.write(contents)", "def write_contents(path, data):\n with open(path, 'wb') as stream:\n return stream.write(data)", "def create(contents, title, path=\"\"):\n filename = secure_filename(title)\n data_dir = get_data_dir()\n max_filename_length = 255\n if len(filename + \".md\") > max_filename_length:\n filename = filename[0 : max_filename_length - 3]\n if not is_relative_to(data_dir / path, data_dir):\n path = \"\"\n path_to_md_file = data_dir / path / f\"{filename}.md\"\n with open(path_to_md_file, \"w\", encoding=\"utf-8\") as file:\n file.write(contents)\n\n return path_to_md_file", "def write(self, path, content):\n this_file = open(path, 'w')\n this_file.write(content)\n this_file.close()", "def touch(path):\n with open(path, 'wt') as f:\n pass", "def create_file(cls, relpath, contents='', mode='w'):\r\n with safe_open(os.path.join(cls.build_root, relpath), mode=mode) as fp:\r\n fp.write(contents)", "def create_file(self, content=\"\"):\n if (self.exists()):\n raise IOError(\"A file at '{}' already exists.\".format(self.location))\n with open(self.location, 'w') as f:\n f.write(content)" ]
[ "0.59362435", "0.5571267", "0.5565971", "0.55636615", "0.55125356", "0.5481879", "0.5437837", "0.5421367", "0.5293984", "0.5231956", "0.5211434", "0.51726305", "0.5162061", "0.5162061", "0.5150922", "0.51438993", "0.51366174", "0.5113197", "0.5112223", "0.51041156", "0.50978494", "0.50740856", "0.50691414", "0.5064708", "0.5058191", "0.5054983", "0.5048492", "0.5039107", "0.50132364", "0.5011087" ]
0.80568475
0
Create or replace the link with the given name in the specified draft so that it points to the specified bundle version. To delete a link, pass bundle_uuid=None, version=None. If you don't know the draft's UUID, look it up using get_or_create_bundle_draft() Does not return anything.
def set_draft_link(draft_uuid, link_name, bundle_uuid, version): api_request('patch', api_url('drafts', str(draft_uuid)), json={ 'links': { link_name: {"bundle_uuid": str(bundle_uuid), "version": version} if bundle_uuid is not None else None, }, })
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_or_create_bundle_draft(bundle_uuid, draft_name):\n bundle = get_bundle(bundle_uuid)\n try:\n return get_draft(bundle.drafts[draft_name]) # pylint: disable=unsubscriptable-object\n except KeyError:\n # The draft doesn't exist yet, so create it:\n response = api_request('post', api_url('drafts'), json={\n \"bundle_uuid\": str(bundle_uuid),\n \"name\": draft_name,\n })\n # The result of creating a draft doesn't include all the fields we want, so retrieve it now:\n return get_draft(UUID(response[\"uuid\"]))", "def add_link_to_bundle(request, bundle_id):\n\n # ensure bundle exists\n bundle = get_object_or_404(Bundle, id=bundle_id)\n\n # get/create link for given url\n url = request.data.get('url', None)\n\n # validate url is a url\n v = URLValidator()\n\n try:\n v(url)\n except ValidationError as exc:\n # the user must be joking\n return Response({'error': True, 'msg': 'Invalid URL'}, status=400)\n\n # assert that \"comfort_level\" is specified.\n # this is validated outside of the `Link` fields handled by\n # DRF serializer validation.\n comfort_level = int(request.data.get('comfort_level', None))\n if comfort_level not in [i[0] for i in COMFORT_LEVELS]:\n return Response({'error': True,\n 'msg': 'Please specify a reader comfort level'\n })\n\n url = urltools.normalize(url)\n\n try:\n # fetch existing link\n link = Link.objects.get(url=url)\n except Link.DoesNotExist:\n # create a new link\n link_serializer = LinkSerializer(data=request.data)\n link_serializer.is_valid(raise_exception=True)\n link = link_serializer.save()\n\n # add link to bundle\n if not BundleLink.objects.filter(bundle=bundle, link=link).exists():\n # call alchemy util to fetch concepts for URL\n concepts = bundles.alchemy_utils.get_concepts(url)\n this_bundle = BundleLink.objects.create(bundle=bundle,\n link=link,\n comfort_level=comfort_level,\n curator_id=1)\n for concept in concepts:\n this_bundle.tags.add(concept)\n\n return Response('', status=201)", "def delete_draft(draft_uuid):\n api_request('delete', api_url('drafts', str(draft_uuid)))", "def commit_draft(draft_uuid):\n api_request('post', api_url('drafts', str(draft_uuid), 'commit'))", "def dangerously_delete(self, bento_name, bento_version):", "def svn_fs_revision_link(*args):\r\n return _fs.svn_fs_revision_link(*args)", "def model_version(name, version):\n model = Model.query.filter_by(name=name).first_or_404()\n\n if request.method == 'DELETE':\n # delete the version\n validate_owner(model, request)\n try:\n model.delete(version)\n return jsonify(status='success')\n except ModelNotFoundException:\n abort(404)\n else:\n # download the version\n try:\n return send_from_directory(*os.path.split(model.archive(version)))\n except ModelNotFoundException:\n abort(404)", "def switchRevision(deploymentPath, revision):\n with cd(deploymentPath):\n sudo('rm -f current')\n sudo('ln -s %s current' % revision)", "def create_draft(self, ka_id):\n url = (\n self.api.base_url +\n 'knowledgeManagement/articleVersions/masterVersions'\n )\n data = {'articleId': ka_id}\n result = self.api._call_salesforce('POST', url, json=data)\n if result.status_code != HTTPStatus.CREATED:\n e = SalesforceError((\n 'Error creating new draft for KnowlegeArticle (ID={})'\n ).format(ka_id))\n raise(e)\n kav_id = result.json()['id']\n return kav_id", "def set_version(self, bundle, ctx, filename, version):", "def firmware_pack_modify(handle, org_name, name, rack_bundle_version=None,\n blade_bundle_version=None, descr=None, mode=None,\n org_parent=\"org-root\"):\n\n org_dn = org_parent + \"/org-\" + org_name\n fw_dn= org_dn + \"/fw-host-pack-\" + name\n mo = handle.query_dn(fw_dn)\n if mo is not None:\n if rack_bundle_version is not None:\n mo.rack_bundle_version = rack_bundle_version\n if blade_bundle_version is not None:\n mo.blade_bundle_version = blade_bundle_version\n if mode is not None:\n mo.mode=mode\n if descr is not None:\n mo.descr = descr\n\n handle.set_mo(mo)\n handle.commit()\n else:\n log.info(\"Firmware host pack <%s> not found.\" % name)", "def as_draft(location):\r\n return location.replace(revision=DRAFT)", "def delete(self,\n draft_id,\n ):\n return self._invoke('delete',\n {\n 'draft_id': draft_id,\n })", "def update_current_link(self, name: str):\n lnk = self.ws_current_link\n if lnk.is_symlink():\n lnk.unlink()\n if name is not None:\n lnk.symlink_to(name)\n self.ws_config_file.touch(exist_ok=True)", "def remove_link_type_vlan(enode, name, shell=None):\n assert name\n if name not in enode.ports:\n raise ValueError('Port {name} doesn\\'t exists'.format(name=name))\n\n cmd = 'ip link del link dev {name}'.format(name=name)\n\n response = enode(cmd, shell=shell)\n assert not response, 'Cannot remove virtual link {name}'.format(name=name)\n\n del enode.ports[name]", "def add(self, bento_name, bento_version):", "def delete_version(self, version):\n\n parent_version_id = self.get_parent_version().id\n\n try:\n versioned_post = Post.objects.get(blog=self.blog,\n version_id=parent_version_id,\n version=version)\n versioned_post.delete()\n except Post.DoesNotExist:\n pass", "def edit_draft(self):\r\n EmptyPromise(\r\n lambda: self.q(css='.create-draft').present,\r\n 'Wait for edit draft link to be present'\r\n ).fulfill()\r\n\r\n self.q(css='.create-draft').first.click()\r\n\r\n EmptyPromise(\r\n lambda: self.q(css='.editing-draft-alert').present,\r\n 'Wait for draft mode to be activated'\r\n ).fulfill()", "def get_bundle_links(bundle_uuid, use_draft=None):\n bundle = get_bundle(bundle_uuid)\n if use_draft and use_draft in bundle.drafts: # pylint: disable=unsupported-membership-test\n draft_uuid = bundle.drafts[use_draft] # pylint: disable=unsubscriptable-object\n return get_draft(draft_uuid).links\n elif not bundle.latest_version:\n # This bundle has no versions so definitely does not contain any links\n return {}\n else:\n return get_bundle_version_links(bundle_uuid, bundle.latest_version)", "def delete_release(ctx, name):\n\n try:\n\n gh = ctx.obj.github\n\n log.echo('Deleting release...', break_line=False)\n gh.delete_release(name=name)\n log.checkmark()\n except BaseException as _:\n log.xmark()\n raise", "def put(self, blueprint_id, **kwargs):\n rm = get_resource_manager()\n sm = get_storage_manager()\n\n validate_inputs({'blueprint_id': blueprint_id})\n\n rm = get_resource_manager()\n\n with sm.transaction():\n blueprint = models.Blueprint(\n plan=None,\n id=blueprint_id,\n description=None,\n main_file_name='',\n state=BlueprintUploadState.UPLOADING,\n )\n sm.put(blueprint)\n blueprint.upload_execution, messages = rm.upload_blueprint(\n blueprint_id,\n '',\n None,\n config.instance.file_server_root, # for the import resolver\n config.instance.marketplace_api_url, # for the import resolver\n labels=None,\n )\n sm.update(blueprint)\n\n try:\n upload_blueprint_archive_to_file_server(\n blueprint_id)\n workflow_executor.execute_workflow(messages)\n except manager_exceptions.ExistingRunningExecutionError as e:\n blueprint.state = BlueprintUploadState.FAILED_UPLOADING\n blueprint.error = str(e)\n blueprint.error_traceback = traceback.format_exc()\n sm.update(blueprint)\n cleanup_blueprint_archive_from_file_server(\n blueprint_id, current_tenant.name)\n raise\n return blueprint, 201", "def update_bundle(bundle_uuid, **fields):\n assert isinstance(bundle_uuid, UUID)\n data = {}\n # Most validation will be done by Blockstore, so we don't worry too much about data validation\n for str_field in (\"title\", \"description\", \"slug\"):\n if str_field in fields:\n data[str_field] = fields.pop(str_field)\n if \"collection_uuid\" in fields:\n data[\"collection_uuid\"] = str(fields.pop(\"collection_uuid\"))\n if fields:\n raise ValueError(f\"Unexpected extra fields passed \" # pylint: disable=dict-keys-not-iterating\n f\"to update_bundle: {fields.keys()}\")\n result = api_request('patch', api_url('bundles', str(bundle_uuid)), json=data)\n return _bundle_from_response(result)", "def write_draft_file(draft_uuid, path, contents):\n api_request('patch', api_url('drafts', str(draft_uuid)), json={\n 'files': {\n path: encode_str_for_draft(contents) if contents is not None else None,\n },\n })", "def get_bundle_version_links(bundle_uuid, version_number):\n if version_number == 0:\n return {}\n version_info = get_bundle_version(bundle_uuid, version_number)\n return {\n name: LinkDetails(\n name=name,\n direct=LinkReference(**link[\"direct\"]),\n indirect=[LinkReference(**ind) for ind in link[\"indirect\"]],\n )\n for name, link in version_info['snapshot']['links'].items()\n }", "def mklinkto(self, oldname):\n error.checked_call(os.link, str(oldname), str(self))", "def unlink(address):", "def DeleteVersion(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def unlink(self, link_id):", "def _link(filename, existing_filename):\n CreateHardLinkW(filename, existing_filename, 0)", "def delete(name, phonebook):\n\n phonebook_data = read_phonebook(phonebook)\n\n if not phonebook_data.get(name):\n raise NoEntryError(\"This entry does not exist! \"\n \"(Names are case-sensitive.)\")\n\n else:\n print \"Deleting entry:\", name, phonebook_data[name]\n del phonebook_data[name]\n save(phonebook_data, phonebook)" ]
[ "0.58408153", "0.51587373", "0.49001387", "0.4804741", "0.47611052", "0.47401834", "0.46968883", "0.46876737", "0.45107222", "0.4470004", "0.44540083", "0.4445877", "0.44092384", "0.44081253", "0.44045332", "0.44012755", "0.43953812", "0.4391029", "0.43773264", "0.43721762", "0.43706688", "0.43695214", "0.43524188", "0.43216032", "0.4310829", "0.43075803", "0.42911294", "0.42798716", "0.42689347", "0.42420518" ]
0.8172148
0
Ensure that the given URL Blockstore is a URL accessible from the end user's browser.
def force_browser_url(blockstore_file_url): # Hack: on some devstacks, we must necessarily use different URLs for # accessing Blockstore file data from within and outside of docker # containers, but Blockstore has no way of knowing which case any particular # request is for. So it always returns a URL suitable for use from within # the container. Only this edxapp can transform the URL at the last second, # knowing that in this case it's going to the user's browser and not being # read by edxapp. # In production, the same S3 URLs get used for internal and external access # so this hack is not necessary. return blockstore_file_url.replace('http://edx.devstack.blockstore:', 'http://localhost:')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate_url(self):\n pass", "def check_url_invalidity(self) -> bool:\n validate = URLValidator()\n try:\n validate(self.args.url)\n return False\n except ValidationError:\n return True", "def check_url(url=None, parse_url=None):\n return False", "def is_valid_url(url: str) -> bool:\n try:\n requests.get(url)\n except requests.exceptions.RequestException:\n return False\n return True", "def isURL(gridurl):\n\n isURL = False\n try:\n urllib.request.urlopen(gridurl)\n isURL = True\n except:\n pass\n return isURL", "def check_url(url=None, parse_url=None):\n \n if not parse_url:\n parse_url = urlparse.urlparse(url)\n \n return parse_url.netloc.endswith('slideshare.net')", "def test_is_url(self):\n\n url = \"https://shadowrun.needs.management\"\n self.assertTrue(run(verification.is_url(url)))\n\n url = \"https:// www.google.com\"\n self.assertFalse(run(verification.is_url(url)))", "def check_url(url=None, parse_url=None):\n \n if not parse_url:\n parse_url = urlparse.urlparse(url)\n \n return parse_url.netloc.endswith('bambuser.com')\\\n and bool(re.search('^\\/(v|broadcast)\\/\\d+(\\.live)?$', parse_url.path))", "def check_url(url=None, parse_url=None):\n if not parse_url:\n parse_url = urlparse.urlparse(url)\n \n return (parse_url.netloc == 'vine.co' or parse_url.netloc.endswith('.vine.co')) \\\n and re.search('/v/\\w', parse_url.path) is not None", "def url_check(url):\n \n url_tuple = urlparse.urlparse(url)\n if url_tuple[0] == 'http' or url_tuple[0] == 'https' and url_tuple[1] != \"\":\n return url\n else:\n raise Exception('bad url')", "def isURL(gridurl):\n\n is_url = False\n try:\n urllib.request.urlopen(gridurl)\n is_url = True\n except BaseException:\n pass\n return is_url", "def checkURL(cls, trust_root, url):\n tr = cls.parse(trust_root)\n return tr is not None and tr.validateURL(url)", "def _urlcheck(self):\n if (self['.managerhost'] and self['.settingurl'] and self['.guid']):\n return True\n else:\n return False", "def validate_url(self, v):\n u = urlparse.urlparse(v)\n if u.scheme.lower() not in ('http', 'https'):\n raise ValueError('URL scheme must be either http:// or https://')\n if not u.netloc:\n raise ValueError('URL must specify a network location.')\n return u.scheme.lower() == 'https'", "def verify(self):\n if self.geturl():\n return True\n return False", "def urlValidator(url):\n if 'amazon.com/' not in url:\n print('ERROR: Please enter a valid amazon.com URL. (ERROR 1)')\n else:\n validURL = url\n if 'Amazon.com/' not in url:\n print('ERROR: Please enter a valid amazon.com URL. (ERROR 2)')\n else:\n validURL = url\n\n return validURL", "def check_url(url=None, parse_url=None):\n \n if not parse_url:\n parse_url = urlparse.urlparse(url)\n \n invalid_paths = ['^\\/?$', '^\\/(stream|explore|groups|upload|you|dashboard|messages|settings|creativecommons|tracks|people)(\\/|$)']\n \n return parse_url.netloc in ['soundcloud.com', 'www.soundcloud.com', 'm.soundcloud.com']\\\n and not any(re.search(invalid_path, parse_url.path) for invalid_path in invalid_paths)", "def is_valid_for_downloading(base_url, asset_url):\n if not asset_url:\n return False\n base_netloc = urlsplit(base_url).netloc\n asset_netloc = urlsplit(asset_url).netloc\n return base_netloc == asset_netloc", "def is_url_valid(self, url: str) -> bool:\n if self.exclude and re.search(self.exclude, url):\n return False\n\n parts = urllib.parse.urlparse(url)\n\n if parts.scheme not in ('http', 'https'):\n LOGGER.debug(f'skipping non-http scheme in found at {url}')\n return False\n\n host, _ = urllib.parse.splitport(parts.netloc) # type: ignore\n\n if not self.host_okay(host):\n LOGGER.debug(f'skipping non-root host found at {url}')\n return False\n\n return True", "def validate_url(ctx, param, value):\n try:\n return URL(request.urlopen(value).read())\n except ValueError:\n raise click.BadParameter('url need to be a correct URL string')", "def check_url(url):\n return 'products.json' in url", "def validate_url(url: str) -> None:\n if not is_valid_url(url):\n raise ValueError(f\"Validation Error. Provided url '{url}' is not valid.\")\n try:\n response = requests.get(url)\n except Exception as e:\n raise ValueError(f\"Validation Error. '{url}' website doesn't exists.\")\n else:\n if response.status_code != status.HTTP_200_OK:\n raise ValueError(f\"Validation Error. '{url}' website doesn't exists.\")", "def is_valid_s3_url(url):\n scheme, netloc, path, _, _, _ = urlparse(url)\n\n port_except = RemotePortValidationError(\n 'Port value %s is not a valid s3 location' % url\n )\n\n if len(scheme) < 2:\n raise port_except\n\n if 's3' in scheme or 's3' in netloc or 's3' in path:\n return True\n else:\n raise port_except", "def _is_valid(self, url: ParseResult):\n\n if (\n re.match('(.*).' + self.netloc, url.netloc) is None or\n re.match('(.*)\\+[0-9]*$', url.path) is not None or\n re.match('(.*)javascript:(.*)', url.path) is not None\n ):\n return False\n\n return True", "def check_url(url: str) -> bool:\n try:\n potential_error = driver.find_element_by_xpath(\"/html/body/div[5]/div/div/div[1]/div/div/div/section/div[2]/div\").text\n if '403' in potential_error:\n return True\n except:\n return False", "def isUrlValid(self, url):\n if url is None:\n return False\n elif url.startswith('//'):\n return False\n elif ':' in url:\n return False\n elif url.startswith('/wiki'):\n return True\n elif 'en.wikipedia.org/wiki/' not in url:\n return False\n return True", "def isValidURL(self, url):\n if \"imdb.com\" in url:\n return True\n else:\n return False", "def test_validate_url_invalid_netloc():\n url_invalid_netloc = 'https://invalid.netloc.com/spreadsheets/d/AbCde1'\n assert validate_url(url_invalid_netloc) is False", "def _validate_url(url):\n if urlparse.urlparse(url).scheme not in VALID_SCHEMES:\n _fail(url, \"Invalid URL\")", "def check_url(self):\n\n base = 'https://www.reformagkh.ru/myhouse/profile/view/'\n\n if base not in self.url:\n raise UrlError('It is not an www.reformagkh.ru link. '\n 'Please try the correct link.')" ]
[ "0.6467403", "0.61517715", "0.61352235", "0.6027143", "0.6014926", "0.6010201", "0.6007184", "0.599208", "0.59750557", "0.5965351", "0.5957245", "0.5952767", "0.5918729", "0.5906625", "0.589159", "0.58743834", "0.58737737", "0.58627915", "0.5818365", "0.581255", "0.5793624", "0.5775979", "0.5762996", "0.57591105", "0.5727414", "0.57247496", "0.5722033", "0.5715989", "0.5713214", "0.57126695" ]
0.66461307
0
Computes the forward pass for the tanh activation function.
def tanh_forward(self, x): ############################################################################# # TODO: Implement the tanh forward pass. # ############################################################################# out = np.tanh(x) ############################################################################# # END OF YOUR CODE # ############################################################################# cache = out return out, cache
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def forward_hidden_activation(self, X):\n return np.tanh(X)", "def test_tanh_activation(self):\n self.assertEqual([0.099668, 0.099668], list(\n af.TanH().output(np.array([0.1, 0.1]))))\n self.assertEqual([0.990066, 0.990066], list(\n af.TanH().derivative(np.array([0.1, 0.1]))))", "def tanh_grad(self, X):\n return 1-self.tanh(X)**2", "def tanh(input, inplace=False):\n return FunctionLib.apply(\n 'Tanh', input.device, [input],\n outputs=[input if inplace else None])", "def forward(self, state):\n x = F.relu(self.fc1(state))\n x = F.relu(self.fc2(x))\n return F.tanh(self.fc3(x))", "def forward(self, x):\n # Pass the input through all the layers apllying ReLU activation, but the last\n for layer in self.fc_layers[:-1]:\n x = F.relu(layer(x))\n # Pass the result through the output layer apllying hyperbolic tangent function\n x = torch.tanh(self.fc_layers[-1](x))\n # Return the better action for the input state\n return x", "def forward(self, state):#forward pass\n x = F.relu(self.fc1(state))\n x = F.relu(self.fc2(x))\n return torch.tanh(self.fc3(x))", "def tanh(data):\n return _make.tanh(data)", "def tanh(x):\r\n # see decorator for function body\r", "def forward(self, state):\n x = f.relu(self.fc1(state))\n x = f.relu(self.fc2(x))\n return torch.tanh(self.fc3(x))", "def derived_tanh(x):\n return 1 - tanh(x)", "def derived_tanh(x):\n return 1 - tanh(x)", "def tanh(X):\n\tif isinstance(X,np.ndarray):\n\t\treturn (2.0/(1.0+np.exp(-(2*X))))-1\n\telse:\n\t\tX=np.array(X)\n\t\treturn tanh(X)", "def forward(self, state):\n x = F.relu(self.fc1(state))\n x = F.relu(self.fc2(x))\n x = F.relu(self.fc3(x))\n x = F.relu(self.fc4(x))\n\n return F.tanh(self.fc5(x))", "def tanh(x):\n raise NotImplementedError", "def grad_tanh(self):\n grad = 1 - self.tanh(self.x) ** 2\n return grad", "def tanh(self, x):\n self.x = x\n output = np.tanh(x)\n return output", "def forward(self, state):\n \n x = F.relu(self.fc1(state)) \n x = F.relu(self.fc2(x)) \n x = F.tanh(self.fc3(x)) \n \n \n ####x = F.relu(self.bn1(self.fc1(state)))\n ####x = F.relu(self.bn2(self.fc2(x)))\n ####x = torch.tanh(self.bn3(self.fc3(x)))\n ##x = torch.tanh(self.fc3(x))\n\n return x", "def forward(self, x):\n self.y = x.tanh()\n return self.y", "def tanh(x):\n return 0.0", "def tanh(self, X):\n return (np.exp(X)-np.exp(-X))/(np.exp(X)+np.exp(-X))", "def tanh(self, x):\r\n self.x = x\r\n return np.tanh(x)", "def grad_tanh(self):\r\n return 1 - np.square(self.tanh(self.x))", "def tanh(self):\n return type(self)(self.parent(),\n self._simplify(self._express.tanh()))", "def grad_tanh(self):\n return (1-np.tanh(self.x)*np.tanh(self.x))\n raise NotImplementedError(\"tanh gradient not implemented\")", "def tanh(x):\r\n ex = np.exp(x)\r\n emx = np.exp(-x)\r\n return (ex - emx) / (ex + emx)", "def tanh(x):\n return (1 - e ** (-2*x))/ (1 + e ** (-2*x))", "def tanh(self):\t\t\t\t\n\t\tval = np.tanh(self.val)\n\t\tif len(self.der.shape):\n\t\t\tto_multiply = 1 / np.power(np.cosh(self.val), 2)\n\t\t\tto_multiply = np.expand_dims(to_multiply, 1) if len(self.der.shape) > len(to_multiply.shape) else to_multiply\n\t\t\tder = to_multiply * self.der\n\t\telse:\n\t\t\tder = None\n\t\treturn Var(val, der)", "def tanh(obj):\n\tif isinstance(obj, Variable):\n\t\tval = np.tanh(obj.val)\n\t\tder = 1-np.tanh(obj.val)**2\n\t\tif len(obj.der.shape)>len(der.shape):\n\t\t\tder = np.expand_dims(der,1)\n\t\tder = np.multiply(der, obj.der)\n\t\treturn Variable(val, der)\n\telse:\n\t\treturn np.tanh(obj)", "def tanh_grad(z):\n return 1.7159 * 2 / 3.0 * (1 - (np.tanh(2 / 3.0 * z)) ** 2)" ]
[ "0.7288412", "0.7224226", "0.71908355", "0.7186722", "0.7179838", "0.71629184", "0.71265453", "0.7122386", "0.7074787", "0.7027048", "0.69930476", "0.69930476", "0.6986668", "0.6968281", "0.6967701", "0.6929699", "0.6904969", "0.68929416", "0.68862855", "0.6878756", "0.687386", "0.6852396", "0.6844744", "0.682989", "0.68291503", "0.68053424", "0.67556316", "0.67083144", "0.667216", "0.66372603" ]
0.8011069
0
Computes the forward pass of a rectified linear unit (ReLU).
def relu_forward(self, x): #out = None ############################################################################# # TODO: Implement the ReLU forward pass. # ############################################################################# out = np.array(x, copy=True) out[out <= 0] = 0 ############################################################################# # END OF YOUR CODE # ############################################################################# cache = x return out, cache
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def relu_forward(x):\n ############################################################################\n # TODO: Implement the ReLU forward pass. #\n ############################################################################\n ############################################################################\n # START OF YOUR CODE #\n ############################################################################\n out = x\n out[out<0] = 0\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n return out", "def relu_forward(x):\n out = None\n ###########################################################################\n # TODO: Implement the ReLU forward pass. #\n ###########################################################################\n out = x.copy()\n out[x<=0] = 0\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n cache = x\n return out, cache", "def relu_forward(x):\n out = None\n ###########################################################################\n # TODO: Implement the ReLU forward pass. #\n ###########################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n out = np.where(x<=0, 0, x)\n\n pass\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n cache = x\n return out, cache", "def relu_forward(x):\n out = None\n ###########################################################################\n # TODO: Implement the ReLU forward pass. #\n ###########################################################################\n #out = np.zeros(x.shape)\n #np.clip(x, 0, None, out)\n out = np.empty_like(x) #faster than zeros\n np.clip(x, 0, None, out)\n #out = x\n #out [out < 0] = 0\n #print(x)\n #print(out)\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n cache = x\n return out, cache", "def relu_forward(x):\n out = None\n ###########################################################################\n # TODO: Implement the ReLU forward pass. #\n ###########################################################################\n out = x * (x > 0)\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n cache = x\n return out, cache", "def forward(self, x):\n assert not torch.isnan(x).any(), f\"NaN in input {x}\"\n x = F.relu(self.l1(x))\n x = F.relu(self.l2(x))\n x = self.l3(x)\n return x", "def relu_forward(x):\n out = None\n ###########################################################################\n # TODO: Implement the ReLU forward pass. #\n ###########################################################################\n out = np.maximum(0, x)\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n cache = x\n return out, cache", "def grad_ReLU(self):\n y = self.x\n y[y<=0] = 0\n y[y>0] = 1\n return y\n raise NotImplementedError(\"ReLU gradient not implemented\")", "def forward(self, x):\n res = self.residual(x)\n x = self.gcn(x)\n x = self.tcn(x) + res\n return self.relu(x)", "def relu_forward(x):\n out = None\n ###########################################################################\n # TODO: Implement the ReLU forward pass. #\n ###########################################################################\n out = np.maximum(x,0)\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n cache = x\n return out, cache", "def forward(self, x):\n # Pass the input through all the layers apllying ReLU activation, but the last\n for layer in self.fc_layers[:-1]:\n x = F.relu(layer(x))\n # Pass the result through the output layer apllying hyperbolic tangent function\n x = torch.tanh(self.fc_layers[-1](x))\n # Return the better action for the input state\n return x", "def relu_grad(self, X):\n X[X<=0]=0\n X[X>0]=1\n return X", "def update_relus(self):\n\n def relu_backward_hook_function(module, grad_in, grad_out):\n \"\"\"\n If there is a negative gradient, change it to zero\n \"\"\"\n # Get last forward output\n corresponding_forward_output = self.forward_relu_outputs[-1]\n corresponding_forward_output[corresponding_forward_output > 0] = 1\n modified_grad_out = corresponding_forward_output * torch.clamp(grad_in[0], min=0.0)\n del self.forward_relu_outputs[-1] # Remove last forward output\n return (modified_grad_out,)\n\n def relu_forward_hook_function(module, ten_in, ten_out):\n \"\"\"\n Store results of forward pass\n \"\"\"\n self.forward_relu_outputs.append(ten_out)\n\n # Loop through layers, hook up ReLUs", "def forward(self, x: torch.Tensor):\n x = self.linear1(x)\n x = torch.relu(x)\n x = self.linear2(x)\n x = self.dropout(x)\n return x", "def forward(self, x):\r\n y = self.en_fc1(x)\r\n y = F.relu(y)\r\n y = self.en_fc2(y)\r\n y = F.relu(y)\r\n y = self.en_fc3(y)\r\n y = F.relu(y)\r\n\r\n mean = self.en_mu(y)\r\n stddev_p = self.en_log(y)\r\n \r\n n = x.shape[0]\r\n z = torch.randn(n,self.latent_dim)\r\n std = torch.exp(stddev_p/2.0)\r\n z = z.mul(std) + mean\r\n \r\n xhat = self.de_fc1(z)\r\n xhat = F.relu(xhat)\r\n xhat = self.de_fc2(xhat)\r\n xhat = F.relu(xhat)\r\n xhat = self.de_fc3(xhat)\r\n xhat = F.sigmoid(xhat)\r\n \r\n return y,mean,stddev_p,z,xhat", "def forward(self, x):\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n return x", "def forward(self, state):\n x = state\n for layer in self.linear_layers[:-1]:\n x = F.relu(layer(x))\n x = self.linear_layers[-1](x)\n return x", "def forward(self, x):\n assert not torch.isnan(x).any(), f\"NaN in input {x}\"\n x = F.relu(self.l1(x))\n x = F.relu(self.l2(x))\n x = self.l3(x)\n return torch.clamp(x, -1, 1)", "def update_relus(self):\n def relu_backward_hook_function(module, grad_in, grad_out):\n \"\"\"\n If there is a negative gradient, change it to zero\n \"\"\"\n # Get last forward output\n corresponding_forward_output = self.forward_relu_outputs[-1]\n corresponding_forward_output[corresponding_forward_output > 0] = 1\n modified_grad_out = corresponding_forward_output * torch.clamp(grad_in[0], min=0.0)\n del self.forward_relu_outputs[-1] # Remove last forward output\n return (modified_grad_out,)\n\n def relu_forward_hook_function(module, ten_in, ten_out):\n \"\"\"\n Store results of forward pass\n \"\"\"\n self.forward_relu_outputs.append(ten_out)\n\n # Loop through layers, hook up ReLUs\n for pos, module in self.model.features._modules.items():\n if isinstance(module, ReLU):\n module.register_backward_hook(relu_backward_hook_function)\n module.register_forward_hook(relu_forward_hook_function)", "def forward(self, x):\n # x = state\n \n x = F.relu(self.input(x))\n x = self.output(x)\n \n return x", "def forward(self, state):\r\n x = F.relu(self.linear1(state))\r\n x = F.relu(self.linear2(x))\r\n #x = torch.tanh(self.linear3(x))\r\n #x = F.relu(self.linear3(x))\r\n #x = nn.LeakyReLU(self.linear3(x), negative_slope=0.1)# .negativ_slope nur für leakyReLU relevant\r\n x = F.leaky_relu(self.linear3(x), 0.1)\r\n #x = F.softmax(self.linear3(x), dim=0)\r\n \r\n return x#.negativ_slope", "def update_relus(self):\n def relu_backward_hook_function(module, grad_in, grad_out):\n \"\"\"\n If there is a negative gradient, change it to zero\n \"\"\"\n # Get last forward output\n corresponding_forward_output = self.forward_relu_outputs[-1]\n corresponding_forward_output[corresponding_forward_output > 0] = 1\n modified_grad_out = corresponding_forward_output * torch.clamp(grad_in[0], min=0.0)\n del self.forward_relu_outputs[-1] # Remove last forward output\n return (modified_grad_out,)\n\n def relu_forward_hook_function(module, ten_in, ten_out):\n \"\"\"\n Store results of forward pass\n \"\"\"\n self.forward_relu_outputs.append(ten_out)\n\n # Loop through layers, hook up ReLUs\n for pos, module in self.model._features_extractor._modules.items():\n for layer in module:\n if isinstance(layer, LeakyReLU):\n layer.register_backward_hook(relu_backward_hook_function)\n layer.register_forward_hook(relu_forward_hook_function)", "def forward(self, x):\n return self.relu(self.conv(x))", "def forward(self, x):\n x = self.first_deconv(x)\n x = self.first_batch_norm(x)\n x = F.leaky_relu(x)\n\n x = self.second_deconv(x)\n x = self.second_batch_norm(x)\n x = F.leaky_relu(x)\n\n x = self.third_deconv(x)\n x = self.third_batch_norm(x)\n x = F.leaky_relu(x)\n\n x = self.fourth_deconv(x)\n x = self.fourth_batch_norm(x)\n\n x = self.fifth_deconv(x)\n x = self.fifth_batch_norm(x)\n\n x = self.sixth_deconv(x)\n x = self.sixth_batch_norm(x)\n\n x = self.seventh_deconv(x)\n\n # sigmoid_out = nn.functional.sigmoid(x)\n tanh_out = nn.functional.tanh(x)\n\n out = (tanh_out + 1) * 255 / 2\n\n # print 'out.shape =', out.shape\n\n return out", "def forward(self, state):\n x = F.relu(self.linear1(state))\n x = F.relu(self.linear2(x))\n x = self.linear3(x)\n return x", "def update_relus(self):\n def relu_backward_hook_function(module, grad_in, grad_out):\n \"\"\"\n If there is a negative gradient, change it to zero\n \"\"\"\n # Get last forward output\n corresponding_forward_output = self.forward_relu_outputs[-1]\n corresponding_forward_output[corresponding_forward_output > 0] = 1\n modified_grad_out = corresponding_forward_output * torch.clamp(grad_in[0], min=0.0)\n del self.forward_relu_outputs[-1] # Remove last forward output\n return (modified_grad_out,)\n\n def relu_forward_hook_function(module, ten_in, ten_out):\n \"\"\"\n Store results of forward pass\n \"\"\"\n self.forward_relu_outputs.append(ten_out)\n\n # Loop through layers, hook up ReLUs\n for pos, module in self.model.named_modules():\n if isinstance(module, nn.ReLU):\n module.register_backward_hook(relu_backward_hook_function)\n module.register_forward_hook(relu_forward_hook_function)", "def update_relus(self):\n def relu_backward_hook_function(module, grad_in, grad_out):\n \"\"\"\n If there is a negative gradient, change it to zero\n \"\"\"\n # Get last forward output\n corresponding_forward_output = self.forward_relu_outputs[-1]\n corresponding_forward_output[corresponding_forward_output > 0] = 1\n modified_grad_out = corresponding_forward_output * torch.clamp(grad_in[0], min=0.0)\n del self.forward_relu_outputs[-1] # Remove last forward output\n return (modified_grad_out,)\n\n def relu_forward_hook_function(module, ten_in, ten_out):\n \"\"\"\n Store results of forward pass\n \"\"\"\n self.forward_relu_outputs.append(ten_out)\n\n # Loop through layers, hook up ReLUs\n for pos, module in self.model.named_modules():\n if isinstance(module, ReLU):\n module.register_backward_hook(relu_backward_hook_function)\n module.register_forward_hook(relu_forward_hook_function)", "def forward(self, state):\n x = F.relu(self.linear1(state))\n x = F.relu(self.linear2(x))\n x = torch.tanh(self.linear3(x))\n\n return x", "def relu_backward(dout, x):\n ############################################################################\n # TODO: Implement the ReLU backward pass. #\n ############################################################################\n ############################################################################\n # START OF YOUR CODE #\n ############################################################################\n judge = x>0\n dx = dout*judge\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n return dx", "def relu(input, inplace=False):\n return FunctionLib.apply(\n 'Relu', input.device, [input],\n outputs=[input if inplace else None], alpha=0.)" ]
[ "0.6923425", "0.6670239", "0.6663412", "0.6650045", "0.6630312", "0.65392053", "0.64895386", "0.648836", "0.64600945", "0.6429588", "0.6399773", "0.63981783", "0.63528794", "0.63357466", "0.63077855", "0.62966233", "0.6285175", "0.6278084", "0.62459034", "0.62427825", "0.6203059", "0.6195473", "0.6194887", "0.61704004", "0.61551404", "0.61313015", "0.6124539", "0.61058307", "0.610198", "0.610122" ]
0.67316556
1
Computes the backward pass for a layer of rectified linear units (ReLUs).
def relu_backward(self, dUpper, cache): x = cache ############################################################################# # TODO: Implement the ReLU backward pass. # ############################################################################# x = np.array(x , copy=True) x[x <= 0] = 0 x[x > 0] = 1 drelu = dUpper * x ############################################################################# # END OF YOUR CODE # ############################################################################# return drelu
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def backward(ctx, grad_output):\n loss, reg, u, lbda = ctx.saved_tensors\n\n device = u.device\n\n # do clever computations\n eps = 1e-10\n grad, = torch.autograd.grad(loss, u, only_inputs=True,\n retain_graph=True)\n x = (u - eps * grad).data\n lbda = lbda.data\n\n prox_x = check_tensor(\n np.array([prox_tv.tv1_1d(xx, eps * lbda) for xx in x]),\n device=device,\n )\n grad_u = (u - prox_x) / eps\n grad_lbda = reg.clone()\n return (torch.ones(0), grad_u, grad_lbda)", "def conv_relu_backward(dout, cache):\n conv_cache, relu_cache = cache\n da = layers.relu_backward(dout, relu_cache)\n dx, dw, db = layers.conv_backward_fast(da, conv_cache)\n return dx, dw, db", "def conv_relu_backward(dout, cache):\n conv_cache, relu_cache = cache\n da = relu_backward(dout, relu_cache)\n dx, dw, db = conv_backward_fast(da, conv_cache)\n return dx, dw, db", "def conv_relu_backward(dout, cache):\n conv_cache, relu_cache = cache\n da = relu_backward(dout, relu_cache)\n dx, dw, db = conv_backward_fast(da, conv_cache)\n return dx, dw, db", "def backward_deconvnet_relu(x):\n def grad(dy):\n return tf.nn.relu(dy)\n return tf.nn.relu(x), grad", "def relu_backward(dout, cache):\n dx, x = None, cache\n ###########################################################################\n # TODO: Implement the ReLU backward pass. #\n ###########################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n dx = np.where(x<=0, 0, 1) * dout\n \n\n pass\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return dx", "def backward_pass(self, loss):\n\n self.optimizer.zero_grad()\n self.optimizer.backward(loss)\n self.optimizer.step()", "def backward_D(self):\n self.loss_D.backward()", "def backward(ctx, grad_L):\n A, T = ctx.saved_tensors\n\n grad_A = None\n grad_T = None\n\n B = A.shape[0]\n\n # We only need to compute gradients for tensors that are flagged to\n # require gradients!\n if ctx.needs_input_grad[0]:\n grad_A = (A - T) / B\n\n if ctx.needs_input_grad[1]:\n grad_T = (T - A) / B\n\n return grad_A, grad_T", "def _backward(loss):\n\n loss.backward()", "def backward_pass(self, grad):\n pass", "def L_model_backward(AL, Y, caches):\n pass", "def relu_backward(dout, cache):\n dx, x = None, cache\n ###########################################################################\n # TODO: Implement the ReLU backward pass. #\n ###########################################################################\n dx = dout.copy()\n dx[x<=0] = 0\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return dx", "def backward(self, inputs, grad_loss_input):\n raise NotImplementedError", "def relu_backward(dout, x):\n ############################################################################\n # TODO: Implement the ReLU backward pass. #\n ############################################################################\n ############################################################################\n # START OF YOUR CODE #\n ############################################################################\n judge = x>0\n dx = dout*judge\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n return dx", "def backward_G(self):\n self.loss_G.backward()", "def relu_backward(dout, cache):\n dx, x = None, cache\n ###########################################################################\n # TODO: Implement the ReLU backward pass. #\n ###########################################################################\n dx = dout * (x > 0)\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return dx", "def relu_backward(dout, cache):\n dx, x = None, cache\n ###########################################################################\n # TODO: Implement the ReLU backward pass. #\n ###########################################################################\n dx = dout * (x > 0)\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return dx", "def backward(self, lhs: Tensor, rhs: Tensor, acc_grad: np.ndarray):\n raise NotImplementedError", "def relu_backward_hook_function(module, grad_in, grad_out):\n # Get last forward output\n corresponding_forward_output = self.forward_relu_outputs[-1]\n corresponding_forward_output[corresponding_forward_output > 0] = 1\n modified_grad_out = corresponding_forward_output * torch.clamp(grad_in[0], min=0.0)\n del self.forward_relu_outputs[-1] # Remove last forward output\n return (modified_grad_out,)", "def relu_backward_hook_function(module, grad_in, grad_out):\n # Get last forward output\n corresponding_forward_output = self.forward_relu_outputs[-1]\n corresponding_forward_output[corresponding_forward_output > 0] = 1\n modified_grad_out = corresponding_forward_output * torch.clamp(grad_in[0], min=0.0)\n del self.forward_relu_outputs[-1] # Remove last forward output\n return (modified_grad_out,)", "def relu_backward_hook_function(module, grad_in, grad_out):\n # Get last forward output\n corresponding_forward_output = self.forward_relu_outputs[-1]\n corresponding_forward_output[corresponding_forward_output > 0] = 1\n modified_grad_out = corresponding_forward_output * torch.clamp(grad_in[0], min=0.0)\n del self.forward_relu_outputs[-1] # Remove last forward output\n return (modified_grad_out,)", "def relu_backward_hook_function(module, grad_in, grad_out):\n # Get last forward output\n corresponding_forward_output = self.forward_relu_outputs[-1]\n corresponding_forward_output[corresponding_forward_output > 0] = 1\n modified_grad_out = corresponding_forward_output * torch.clamp(grad_in[0], min=0.0)\n del self.forward_relu_outputs[-1] # Remove last forward output\n return (modified_grad_out,)", "def relu_backward_hook_function(module, grad_in, grad_out):\n # Get last forward output\n corresponding_forward_output = self.forward_relu_outputs[-1]\n corresponding_forward_output[corresponding_forward_output > 0] = 1\n modified_grad_out = corresponding_forward_output * torch.clamp(grad_in[0], min=0.0)\n del self.forward_relu_outputs[-1] # Remove last forward output\n return (modified_grad_out,)", "def backward(ctx, dy):\n y = ctx.y\n if ctx.eagerly_discard_variables:\n del ctx.y\n for i in range(len(ctx.reversible_blocks) - 1, -1, -1):\n y, dy = ctx.reversible_blocks[i].backward_pass(y, dy, not ctx.eagerly_discard_variables)\n if ctx.eagerly_discard_variables:\n del ctx.reversible_blocks\n return dy, None, None", "def relu_backward(dout, cache):\n dx, x = None, cache\n ###########################################################################\n # TODO: Implement the ReLU backward pass. #\n ###########################################################################\n #print(dout)\n dx = np.empty_like(dout)\n np.copyto(dx, dout)\n dx[x < 0] = 0\n #print(dx)\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return dx", "def relu_backward_hook_function(module, grad_in, grad_out):\n # Get last forward output\n corresponding_forward_output = self.forward_relu_outputs[-1]\n corresponding_forward_output[corresponding_forward_output > 0] = 1\n modified_grad_out = corresponding_forward_output * torch.clamp(grad_in[0], min=0.0)\n del self.forward_relu_outputs[-1] # Remove last forward output\n return (modified_grad_out,)", "def L_model_backward(AL, Y, caches):\n grads = {}\n L = len(caches) # the number of layers\n m = AL.shape[1]\n Y = Y.reshape(AL.shape) # after this line, Y is the same shape as AL\n\n dAL = -(np.divide(Y,AL)-np.divide(1-Y,1-AL))\n \"\"\"\n cache = caches[-1]\n grads[\"dA\"+str(L)],grads[\"dW\"+str(L)],grads[\"db\"+str(L)] = linear_activation_backward(dAL,cache,activation = 'sigmoid')\n\n for i in reversed(range(L-1)):\n grads[\"dA\"+str(i+1)],grads[\"dW\"+str(i+1)],grads[\"db\"+str(i+1)] = linear_activation_backward(grads[\"dA\"+str(i+2)],caches[i],activation = 'relu')\n \"\"\"\n\n current_cache = caches[-1]\n grads[\"dA\" + str(L)], grads[\"dW\" + str(L)], grads[\"db\" + str(L)] = linear_backward(sigmoid_backward(dAL, current_cache[1]),current_cache[0])\n\n for l in reversed(range(L - 1)):\n # lth layer: (RELU -> LINEAR) gradients.\n # Inputs: \"grads[\"dA\" + str(l + 2)], caches\". Outputs: \"grads[\"dA\" + str(l + 1)] , grads[\"dW\" + str(l + 1)] , grads[\"db\" + str(l + 1)]\n ### START CODE HERE ### (approx. 5 lines)\n current_cache = caches[l]\n dA_prev_temp, dW_temp, db_temp = linear_backward(sigmoid_backward(dAL, current_cache[1]), current_cache[0])\n grads[\"dA\" + str(l + 1)] = dA_prev_temp\n grads[\"dW\" + str(l + 1)] = dW_temp\n grads[\"db\" + str(l + 1)] = db_temp\n ### END CODE HERE ###\n\n return grads", "def _AffLayerReluDrop_Backprop(self, dscores, cache):\n grads = {}\n loss = None\n #Last Softmax Layer\n ##Add L2 Regularization loss\n loss = 0.5 * self.reg * np.sum(self.params['W{0}'.format(self.num_layers)]**2)\n ##Calculate grads for last Affine\n dhid, grads['W{0}'.format(self.num_layers)], grads['b{0}'.format(self.num_layers)] =\\\n affine_backward(dscores, cache[-1])\n grads['W{0}'.format(self.num_layers)] += self.reg * self.params['W{0}'.format(self.num_layers)]\n\n for i in range(self.num_layers-1, 0, -1): #hidden layers\n ##L2 Reg. loss\n loss += 0.5 * self.reg * np.sum(self.params['W{0}'.format(i)]**2)\n ##Calculate grads for [{affine-Batchnorm-relu} X (L-1)]\n dhid = dropout_backward(dhid, cache[i]['drop'])\n dhid = relu_backward(dhid, cache[i]['relu'])\n dhid, grads['gamma{0}'.format(i)], grads['beta{0}'.format(i)] = \\\n layernorm_backward(dhid, cache[i]['layernorm'])\n dhid, grads['W{0}'.format(i)], grads['b{0}'.format(i)] = \\\n affine_backward(dhid, cache[i]['affine']) \n grads['W{0}'.format(i)] += self.reg * self.params['W{0}'.format(i)]\n\n return grads, loss", "def backward(self, inputs, gradients, **kwargs):\n grad_relu = inputs > 0\n return gradients * grad_relu" ]
[ "0.6948249", "0.6898323", "0.6847992", "0.6847992", "0.68477255", "0.68407637", "0.68200904", "0.6768128", "0.67572653", "0.67565167", "0.67562467", "0.6743265", "0.67394495", "0.6715497", "0.67090523", "0.67006433", "0.669643", "0.669643", "0.66886026", "0.6666757", "0.6666757", "0.6666757", "0.6666757", "0.6666757", "0.6662765", "0.6661605", "0.66594064", "0.66544884", "0.6627915", "0.66035134" ]
0.70406467
0
returns count of sequences in given fasta file(s) The input_fasta_files is a list of fasta filepaths
def get_sequence_count(input_fasta_files): # Correction for the case that only one file passed if type(input_fasta_files)==str: input_fasta_files=[input_fasta_files] count=0 for n in input_fasta_files: fasta_f=open(n,'U') for label,seq in MinimalFastaParser(fasta_f): count+=1 fasta_f.close() return count
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def count_seqs_in_filepaths(fasta_filepaths, seq_counter=count_seqs):\r\n total = 0\r\n counts = []\r\n inaccessible_filepaths = []\r\n # iterate over the input files\r\n for fasta_filepath in fasta_filepaths:\r\n # if the file is actually fastq, use the fastq parser.\r\n # otherwise use the fasta parser\r\n if fasta_filepath.endswith('.fastq'):\r\n parser = parse_fastq\r\n elif fasta_filepath.endswith('.tre') or \\\r\n fasta_filepath.endswith('.ph') or \\\r\n fasta_filepath.endswith('.ntree'):\r\n # This is clunky, but really convenient bc\r\n # it lets us count tree tips with count_seqs.py\r\n def parser(f):\r\n t = DndParser(f, constructor=PhyloNode)\r\n return zip(t.iterTips(), repeat(''))\r\n else:\r\n parser = parse_fasta\r\n\r\n try:\r\n # get the count of sequences in the current file\r\n current_count = seq_counter(fasta_filepath, parser=parser)\r\n # store it\r\n counts.append((current_count, fasta_filepath))\r\n # and increment the total count\r\n total += current_count[0]\r\n except IOError:\r\n # if the file couldn't be open, keep track of the filepath\r\n inaccessible_filepaths.append(fasta_filepath)\r\n\r\n return counts, total, inaccessible_filepaths", "def count_seqs(fasta_filepath, parser=parse_fasta):\r\n # Open the file and pass it to py_count_seqs_from_file -- wrapping\r\n # this makes for easier unit testing\r\n return count_seqs_from_file(open(fasta_filepath, 'U'), parser=parser)", "def count_seqs_from_file(fasta_file, parser=parse_fasta):\r\n result = 0\r\n lens = []\r\n for record in parser(fasta_file):\r\n result += 1\r\n lens.append(len(record[1]))\r\n if result == 0:\r\n return result, None, None\r\n else:\r\n return result, mean(lens), std(lens)", "def test_count_seqs(self):\r\n def seq_counter(filepath, parser=None):\r\n # Fake sequence counter to test count_seqs without\r\n # having to write files to disk (note don't need to\r\n # test actual sequence counters here as they're tested\r\n # elsewhere)\r\n if filepath.startswith('fake'):\r\n raise IOError\r\n else:\r\n return len(filepath), 0, 0\r\n\r\n in_fps = ['1.fasta', 'fake1.fasta', 'fake.fasta', '2.fa']\r\n expected = [((7, 0, 0), '1.fasta'),\r\n ((4, 0, 0), '2.fa')],\\\r\n 11, ['fake1.fasta', 'fake.fasta']\r\n self.assertEqual(count_seqs_in_filepaths(\r\n in_fps, seq_counter), expected)\r\n\r\n in_fps = ['fake1.fasta', 'fake.fasta']\r\n expected = [], 0, ['fake1.fasta', 'fake.fasta']\r\n self.assertEqual(count_seqs_in_filepaths(\r\n in_fps, seq_counter), expected)\r\n\r\n in_fps = ['1.fasta', '2.fa', '12.txt']\r\n expected = [((7, 0, 0), '1.fasta'),\r\n ((4, 0, 0), '2.fa'),\r\n ((6, 0, 0), '12.txt')], 17, []\r\n self.assertEqual(count_seqs_in_filepaths(\r\n in_fps, seq_counter), expected)", "def count_matches(sam_input):\n logging.info(\"Counting aligned bases in %s ...\", sam_input.name)\n\n total_bases = 0\n with pysam.AlignmentFile(sam_input, \"r\") as sam:\n for read in sam:\n total_bases += aligned_bases(read.cigar)\n return total_bases", "def count_n_grams_fasta(fasta_dict, name, kmin, kmax):\n # get the number of files in the names directory\n num_fastas = len(fasta_dict[name])\n # initialyze the counter\n counter = Counter()\n # iterates through the list of paths\n for filename in fasta_dict[name]:\n # reads the file and parse the content\n print(f'Reading and parsing the filename {filename}')\n for name, sequence in parse_fasta(filename):\n # counting the kmers\n cnt = count_kmers(sequence, kmin, kmax, counter=None)\n # add the count of the current file to the counter\n counter.update(cnt)\n # to get the mean of the kmer count for all the files\n final_counter = {k: (c // num_fastas) for k, c in counter.items()}\n return final_counter", "def _compute_seqs_per_file(self,\r\n input_fasta_fp,\r\n num_jobs_to_start):\r\n # count the number of sequences in the fasta file\r\n num_input_seqs = count_seqs(input_fasta_fp)[0]\r\n\r\n # divide the number of sequences by the number of jobs to start\r\n result = num_input_seqs / num_jobs_to_start\r\n\r\n # if we don't have a perfect split, round up\r\n if result % 1 != 0:\r\n result += 1\r\n\r\n # return the result as an integer\r\n return int(result)", "def countsubcatchments(inputfilename=FileSettings.settingsdict['inputfilename']):\r\n global count\r\n with open(inputfilename, 'r') as swmmput:\r\n contents = swmmput.readlines()\r\n count = len(contents)\r\n return(count)", "def freqs_from_aln_array(seqs):\n result = None\n for label, seq in MinimalFastaParser(seqs):\n # Currently cogent does not support . characters for gaps, converting\n # to - characters for compatability.\n seq = ModelDnaSequence(seq.replace('.','-'))\n if result is None:\n result = zeros((len(seq.Alphabet), len(seq)),dtype=int)\n indices = arange(len(seq), dtype=int)\n result[seq._data,indices] += 1\n return Profile(result, seq.Alphabet)", "def test_split_fasta_equal_num_seqs_per_file(self):\r\n fd, filename_prefix = mkstemp(dir=get_qiime_temp_dir(),\r\n prefix='split_fasta_tests',\r\n suffix='')\r\n close(fd)\r\n infile = ['>seq1', 'AACCTTAA', '>seq2', 'TTAACC', 'AATTAA',\r\n '>seq3', 'CCTT--AA']\r\n\r\n actual = split_fasta(infile, 1, filename_prefix)\r\n actual_seqs = []\r\n for fp in actual:\r\n actual_seqs += list(open(fp))\r\n remove_files(actual)\r\n\r\n expected = ['%s.%d.fasta' % (filename_prefix, i) for i in range(3)]\r\n\r\n self.assertEqual(actual, expected)\r\n self.assertEqual(\r\n SequenceCollection.from_fasta_records(parse_fasta(infile), DNA),\r\n SequenceCollection.from_fasta_records(parse_fasta(actual_seqs), DNA))", "def countBasesInFasta(fastaFile):\n recordRE=re.compile(r'^>')\n whiteSpaceRE=re.compile(r'\\s+')\n totalBases=0\n totalSeqs=0\n with open(fastaFile) as f:\n for line in f:\n if recordRE.match(line):\n totalSeqs+=1\n continue\n totalBases+=len(whiteSpaceRE.sub('',line))", "def count_examples(filepaths):\n n = 0\n for f in filepaths:\n for r in tf.python_io.tf_record_iterator(f):\n n += 1\n return n", "def count_total_mutations_cpp(seqs):\n folder = \"/gpfs/group/cdm/IPRO_Suite/modules/CPP/humanization/\"\n name = \"humanization.out\"\n shutil.copyfile(folder + name, name)\n cmd = \"chmod a+x \" + name\n os.system(cmd)\n seqFile = \"sequences.txt\"\n f = open(seqFile, 'w')\n for s in seqs:\n f.write(s + \"\\n\")\n f.close()\n cmd = \"./humanization.out \" + seqFile\n os.system(cmd)\n countFile = \"counts.txt\"\n if os.path.exists(countFile):\n f = open(countFile, 'r')\n firstline = f.readline().strip(' \\t\\n')\n return int(firstline)\n else:\n text = \"humanization.out cpp code do not give the right counts of the mutations, please check\"\n raise DeimmunizationError(text)", "def count_reads(in_fastq, in_ref, KEY_INTERVAL=(10,80), DIR='FWD',\r\n KEY='CGAAACACCG', KEY_REV='GTTTTAGA', out_counts='counts.csv',\r\n out_np='np_counts.csv', out_stats='stats.txt'):\r\n\r\n # STEP 1A: OPEN INPUT FILES FOR PROCESSING, CHECK FOR REQUIRED FORMATTING\r\n # look for 'sgRNA_seq' column, raise Exception if missing\r\n df_ref = pd.read_csv(in_ref, header=0) # explicit header = first row\r\n if 'sgRNA_seq' not in df_ref.columns.tolist():\r\n raise Exception('in_ref is missing column: sgRNA_seq')\r\n # look for other cols, raise Warning if suggested cols are missing\r\n list_headcols = ['sgRNA_ID', 'sgRNA_seq', 'Gene', 'cut_site_AA', 'Domain']\r\n if not all(col in df_ref.columns.tolist() for col in list_headcols):\r\n list_miss = [col for col in list_headcols if col not in df_ref.columns.tolist()]\r\n warnings.warn('Warning! in_ref is missing column(s) for downstream functions: ' + str(list_miss))\r\n # try opening input FASTQ, raise Exception if not possible\r\n try:\r\n handle = open(in_fastq)\r\n except:\r\n print('Error! Could not open the FASTQ file: %s' % in_fastq)\r\n return\r\n\r\n # STEP 1B: SET UP VARIABLES FOR SCRIPT\r\n # make dictionary to hold sgRNA counts - sgRNA_seq, count as k,v\r\n dict_perfects = {sgRNA:0 for sgRNA in df_ref['sgRNA_seq']}\r\n list_np = [] # placeholder list for non-perfect matches\r\n num_reads = 0 # total number of reads processed\r\n num_perfect_matches = 0 # count of reads with a perfect match to library\r\n num_np_matches = 0 # count of reads without a perfect match to library\r\n num_nokey = 0 # count of reads where key was not found\r\n KEY_START, KEY_END = KEY_INTERVAL[0], KEY_INTERVAL[1] # set the key interval\r\n\r\n # STEP 2: PROCESS FASTQ FILE READS AND ADD COUNTS TO DICT\r\n readiter = SeqIO.parse(handle, 'fastq') # process reads in fastq file\r\n # find sgRNA using FORWARD direction (default)\r\n if DIR == 'FWD':\r\n for record in readiter: # contains the seq and Qscore etc.\r\n num_reads += 1\r\n read_sequence = str.upper(str(record.seq))\r\n key_region = read_sequence[KEY_START:KEY_END]\r\n key_index = key_region.find(KEY)\r\n if key_index >= 0: # if key found\r\n start_index = key_index + KEY_START + len(KEY)\r\n guide = read_sequence[start_index:(start_index + 20)]\r\n if guide in dict_perfects:\r\n dict_perfects[guide] += 1\r\n num_perfect_matches += 1\r\n else:\r\n num_np_matches += 1\r\n list_np.append(guide)\r\n else:\r\n num_nokey += 1\r\n # find sgRNA using REVERSE direction\r\n elif DIR == 'REV':\r\n for record in readiter: # contains the seq and Qscore etc.\r\n num_reads += 1\r\n read_sequence = str.upper(str(record.seq))\r\n key_region = read_sequence[KEY_START:KEY_END]\r\n key_index = key_region.find(KEY_REV)\r\n if key_index >= 0: # if key found\r\n start_index = key_index + KEY_START\r\n guide = read_sequence[(start_index - 20):(start_index)]\r\n if guide in dict_perfects:\r\n dict_perfects[guide] += 1\r\n num_perfect_matches += 1\r\n else:\r\n num_np_matches += 1\r\n list_np.append(guide)\r\n else:\r\n num_nokey += 1\r\n else:\r\n raise Exception('ERROR! Specified direction is not valid')\r\n handle.close()\r\n\r\n # STEP 3: SORT DICTIONARIES AND GENERATE OUTPUT FILES\r\n # sort perf matches (A-Z) with guides,counts as k,v and output to csv\r\n df_perfects = pd.DataFrame(data=dict_perfects.items(), columns=['sgRNA_seq', 'reads'])\r\n df_perfects.sort_values(by='sgRNA_seq', inplace=True)\r\n df_perfects.to_csv(out_counts, index=False, header=False)\r\n # now sort non-perfect matches by frequency and output to csv\r\n dict_np = Counter(list_np) # use Counter to tally up np matches\r\n df_npmatches = pd.DataFrame(data=dict_np.items(), columns=['sgRNA_seq', 'reads'])\r\n df_npmatches.sort_values(by='reads', ascending=False, inplace=True)\r\n df_npmatches.to_csv(out_np, index=False)\r\n\r\n # STEP 4: CALCULATE STATS AND GENERATE STAT OUTPUT FILE\r\n # percentage of guides that matched perfectly\r\n pct_perfmatch = round(num_perfect_matches/float(num_perfect_matches + num_np_matches) * 100, 1)\r\n # percentage of undetected guides (no read counts)\r\n guides_with_reads = np.count_nonzero(list(dict_perfects.values()))\r\n guides_no_reads = len(dict_perfects) - guides_with_reads\r\n pct_no_reads = round(guides_no_reads/float(len(dict_perfects.values())) * 100, 1)\r\n # skew ratio of top 10% to bottom 10% of guide counts\r\n top_10 = np.percentile(list(dict_perfects.values()), 90)\r\n bottom_10 = np.percentile(list(dict_perfects.values()), 10)\r\n if top_10 != 0 and bottom_10 != 0:\r\n skew_ratio = top_10/bottom_10\r\n else:\r\n skew_ratio = 'Not enough perfect matches to determine skew ratio'\r\n # calculate the read coverage (reads processed / sgRNAs in library)\r\n num_guides = df_ref['sgRNA_seq'].shape[0]\r\n coverage = round(num_reads / num_guides, 1)\r\n # calculate the number of unmapped reads (num_nokey / total_reads)\r\n pct_unmapped = round((num_nokey / num_reads) * 100, 2)\r\n\r\n # write analysis statistics to statfile\r\n with open(out_stats, 'w') as statfile:\r\n statfile.write('Number of reads processed: ' + str(num_reads) + '\\n')\r\n statfile.write('Number of reads where key was not found: ' + str(num_nokey) + '\\n')\r\n statfile.write('Number of perfect guide matches: ' + str(num_perfect_matches) + '\\n')\r\n statfile.write('Number of nonperfect guide matches: ' + str(num_np_matches) + '\\n')\r\n statfile.write('Number of undetected guides: ' + str(guides_no_reads) + '\\n')\r\n statfile.write('Percentage of unmapped reads (key not found): ' + str(pct_unmapped) + '\\n')\r\n statfile.write('Percentage of guides that matched perfectly: ' + str(pct_perfmatch) + '\\n')\r\n statfile.write('Percentage of undetected guides: ' + str(pct_no_reads) + '\\n')\r\n statfile.write('Skew ratio of top 10% to bottom 10%: ' + str(skew_ratio) + '\\n')\r\n statfile.write('Read coverage: ' + str(coverage))\r\n statfile.close()\r\n\r\n print(str(in_fastq) + ' processed')\r\n return", "def count_unique_sequences_per_otu(otu_ids, otu_map_file, input_seqs_file):\n # This will hold the OTU map for the OTUs in otu_ids\n otu_map = {x: set() for x in otu_ids}\n\n # go through the otu map and save the lines of interest to the otu_map\n # data structure above\n print \"Reading OTU map...\"\n for line in otu_map_file:\n otu_id, seq_ids = line.strip().split('\\t', 1)\n if otu_id in otu_ids:\n otu_map[otu_id] = set(seq_ids.split('\\t'))\n\n # this will hold, for each OTU in otus, counts of each unique sequence\n # observed in that OTU\n unique_counts = {x: defaultdict(int) for x in otu_ids}\n\n # go through input fasta file TWO LINES AT A TIME, counting unique\n # sequences in each OTU of intrest\n print \"Reading FASTA file and counting unique sequences...\"\n for header, sequence in izip(input_seqs_file, input_seqs_file):\n header = header.strip()\n sequence = sequence.strip()\n seq_id = header.split(' ', 1)[0][1:]\n for otu_id in otu_ids:\n if seq_id in otu_map[otu_id]:\n unique_counts[otu_id][sequence] += 1\n break\n\n return unique_counts", "def read_several_fasta(input_files):\n pb_seq = []\n pb_name = []\n for name in input_files:\n header, seq = read_fasta(name)\n pb_name += header\n pb_seq += seq\n return pb_name, pb_seq", "def calculate_coverage(path, alignment, number_of_fastas):\n\n path_to_alignment = path + 'Modeling/fasta_alns_and_identities/' + alignment\n fastas_iterator = parse_multifasta_file(path_to_alignment, number_of_fastas)\n fastas = []\n targer_name, target_seq = next(fastas_iterator)\n fastas.append(target_seq)\n length_of_target = 0\n for i in target_seq:\n if i != '-':\n length_of_target += 1\n for i in range(1, number_of_fastas):\n name, seq = next(fastas_iterator)\n fastas.append(seq)\n coverage = 0\n for i in range(len(fastas[0])):\n for j in range(1, len(fastas)):\n if fastas[0][i] != '-' and fastas[j][i] != '-':\n coverage += 1\n break\n coverage_percent = round(coverage / length_of_target * 100, 2)\n return coverage_percent", "def test_split_fasta_diff_num_seqs_per_file(self):\r\n fd, filename_prefix = mkstemp(dir=get_qiime_temp_dir(),\r\n prefix='split_fasta_tests',\r\n suffix='')\r\n close(fd)\r\n infile = ['>seq1', 'AACCTTAA', '>seq2', 'TTAACC', 'AATTAA',\r\n '>seq3', 'CCTT--AA']\r\n\r\n actual = split_fasta(infile, 2, filename_prefix)\r\n\r\n actual_seqs = []\r\n for fp in actual:\r\n actual_seqs += list(open(fp))\r\n remove_files(actual)\r\n\r\n expected = ['%s.%d.fasta' % (filename_prefix, i) for i in range(2)]\r\n # list of file paths is as expected\r\n self.assertEqual(actual, expected)\r\n # building seq collections from infile and the split files result in\r\n # equivalent seq collections\r\n self.assertEqual(\r\n SequenceCollection.from_fasta_records(parse_fasta(infile), DNA),\r\n SequenceCollection.from_fasta_records(parse_fasta(actual_seqs), DNA))", "def parse_file_count(path, args):\n try:\n fisier = open(path, 'r')\n except IOError:\n print(\"Nu am putut deschide fisierul :\", path)\n return\n n_found = 0\n pattern = args.pattern\n for line in fisier:\n if args.ignore_case:\n line = line.lower()\n pattern = pattern.lower()\n n_found += line.count(pattern)\n\n fisier.close()\n return n_found", "def get_rec_count(files: List[str],\n dialect: csv.Dialect) -> Tuple[Optional[int], int]:\n rec_cnt = -1\n for _ in csv.reader(fileinput.input(files), dialect):\n rec_cnt += 1\n fileinput.close()\n return rec_cnt", "def count_each_aa(aa_seq):\n amino_acids = IUPAC_AA_codes.keys()\n return dict((aa, aa_seq.count(aa)) for aa in amino_acids)", "def count_amino_acids(self):\n n = 0\n for chain in self.iter_chains():\n n += chain.count_amino_acids()\n return n", "def test_compute_seqs_per_file(self):\r\n fd, temp_fasta_fp = mkstemp(prefix='QiimeScriptUtilTests',\r\n suffix='.fasta')\r\n close(fd)\r\n temp_fasta = ['>seq', 'AAACCCCAAATTGG'] * 25\r\n open(temp_fasta_fp, 'w').write('\\n'.join(temp_fasta))\r\n\r\n actual_25 = self.pw._compute_seqs_per_file(temp_fasta_fp, 25)\r\n actual_2 = self.pw._compute_seqs_per_file(temp_fasta_fp, 2)\r\n actual_10 = self.pw._compute_seqs_per_file(temp_fasta_fp, 10)\r\n actual_5 = self.pw._compute_seqs_per_file(temp_fasta_fp, 5)\r\n actual_40 = self.pw._compute_seqs_per_file(temp_fasta_fp, 40)\r\n\r\n remove_files([temp_fasta_fp])\r\n\r\n self.assertEqual(actual_25, 1)\r\n self.assertEqual(actual_2, 13)\r\n self.assertEqual(actual_10, 3)\r\n self.assertEqual(actual_5, 5)\r\n self.assertEqual(actual_40, 1)", "def numberFiles(self):\n with open(self.inputfile) as fin:\n for n, _ in enumerate(fin, start=1): pass\n self.n = n\n return self.n", "def get_num_examples(path_in):\n i = 0\n with open(path_in, 'r', encoding='utf8') as f:\n for _ in f:\n i += 1\n return i", "def get_fasta_sequence_ids(fasta):\n if not os.path.exists(fasta) or (not os.path.isfile(fasta)):\n raise FileNotFoundError(fasta)\n seq_ids = set()\n with open(fasta, \"r\") as f:\n # 'fasta' is https://biopython.org/wiki/SeqIO file type.\n for record in SeqIO.parse(f, \"fasta\"):\n seq_ids.add(record.id)\n return seq_ids", "def get_counts_from_kmer_list(filenames_lst, alphabet, kmin, kmax):\n # initialize the array container\n dic_list = []\n # iterates through the file paths\n for filename in filenames_lst:\n # get the sequences and ids\n for n, seq in parse_fasta(filename):\n # append the counts to the array\n dic_list.append(count_kmers(seq, alphabet, kmin, kmax))\n return dic_list", "def gatherReadCounts(samplesList, scriptsDir, threads, alignmentPath, outRoot, stype, mode):\n reads = 0\n ext = \".pruned.bam\"\n if mode == \"all_reads\":\n ext = \".bam\"\n for i in range(len(samplesList)):\n bam = os.path.join(alignmentPath, outRoot) + \".\" + stype + \".\" + str(i) + ext\n reads += int(subprocess.run([os.path.join(scriptsDir, \"get_readcount.sh\"), bam, str(threads)], capture_output=True, text=True).stdout.strip(\"\\n\"))\n return reads", "def batch_count(in_batch, in_ref, dir_fastq='', dir_counts='', dir_np='',\r\n dir_stats='', **kwargs):\r\n\r\n batch_st = time.perf_counter()\r\n # define all the directory paths\r\n path = Path.cwd()\r\n list_dirs = [path / subdir for subdir in [dir_fastq, dir_counts, dir_np, dir_stats]]\r\n for subdir in list_dirs:\r\n Path.mkdir(subdir, exist_ok=True)\r\n\r\n # import batch csv and process samples with count_reads()\r\n df_batch = pd.read_csv(in_batch)\r\n list_reqcols = ['sample_id', 'fastq_file', 'condition']\r\n list_batchcols = df_batch.columns.tolist()\r\n if not all(col in list_batchcols for col in list_reqcols):\r\n list_miss = [col for col in list_reqcols if col not in list_batchcols]\r\n raise Exception('Error! in_batch is missing column(s): ' + str(list_miss))\r\n\r\n # perform batch processing\r\n for row in df_batch.itertuples():\r\n t_start = time.perf_counter()\r\n fastq = list_dirs[0] / row.fastq_file\r\n counts = list_dirs[1] / (row.sample_id + '_counts.csv')\r\n np = list_dirs[2] / (row.sample_id + '_npcounts.csv')\r\n stats = list_dirs[3] / (row.sample_id + '_stats.txt')\r\n count_reads(in_fastq=fastq, in_ref=in_ref, out_counts=counts,\r\n out_np=np, out_stats=stats, **kwargs)\r\n t_end = time.perf_counter()\r\n print(row.sample_id + ' processed in %.2f sec' % (t_end - t_start))\r\n\r\n batch_end = time.perf_counter()\r\n print('Batch count completed in %.2f min' % ((batch_end - batch_st) / 60))\r\n return", "def fast_Q2A(fastq_filepath):\n filein = open(fastq_filepath, \"r\")\n fileout = open(fastq_filepath[:-5] + \"fasta\", \"w\")\n found_id = 0\n num_of_seqs = 0\n for i in filein:\n if i[0] == \"@\":\n seq_id = \">\" + i[1:]\n found_id = 1\n num_of_seqs += 1\n continue\n if found_id == 1:\n seq = i\n found_id = 0\n fileout.write(seq_id + seq)\n filein.close()\n fileout.close()\n print num_of_seqs\n return os.path.abspath(fileout.name)" ]
[ "0.7568509", "0.7414015", "0.67043936", "0.6663063", "0.6578762", "0.61955506", "0.61545354", "0.59026015", "0.5882633", "0.586203", "0.5835468", "0.58194464", "0.57913864", "0.5739672", "0.5698394", "0.569644", "0.5685289", "0.56555086", "0.56553745", "0.56428534", "0.56414044", "0.56365836", "0.5633306", "0.5620018", "0.5584788", "0.55624473", "0.55437773", "0.5521561", "0.5521334", "0.5508506" ]
0.8925589
0
Builds list of primer objects from initial_primers
def construct_primers(initial_primers): primers=[] for n in initial_primers: primers.append(ProspectivePrimer(n[0],n[1],initial_primers[n])) return primers
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_primers(header,\r\n mapping_data):\r\n\r\n if \"LinkerPrimerSequence\" in header:\r\n primer_ix = header.index(\"LinkerPrimerSequence\")\r\n else:\r\n raise IndexError(\r\n (\"Mapping file is missing LinkerPrimerSequence field.\"))\r\n if \"ReversePrimer\" in header:\r\n rev_primer_ix = header.index(\"ReversePrimer\")\r\n else:\r\n raise IndexError((\"Mapping file is missing ReversePrimer field.\"))\r\n\r\n iupac = {'A': 'A', 'T': 'T', 'G': 'G', 'C': 'C', 'R': '[AG]', 'Y': '[CT]',\r\n 'S': '[GC]', 'W': '[AT]', 'K': '[GT]', 'M': '[AC]', 'B': '[CGT]',\r\n 'D': '[AGT]', 'H': '[ACT]', 'V': '[ACG]', 'N': '[ACGT]'}\r\n\r\n raw_forward_primers = set([])\r\n raw_forward_rc_primers = set([])\r\n raw_reverse_primers = set([])\r\n raw_reverse_rc_primers = set([])\r\n\r\n for line in mapping_data:\r\n # Split on commas to handle pool of primers\r\n raw_forward_primers.update([upper(primer).strip() for\r\n primer in line[primer_ix].split(',')])\r\n raw_forward_rc_primers.update([str(DNA(primer).rc()) for\r\n primer in raw_forward_primers])\r\n raw_reverse_primers.update([upper(primer).strip() for\r\n primer in line[rev_primer_ix].split(',')])\r\n raw_reverse_rc_primers.update([str(DNA(primer).rc()) for\r\n primer in raw_reverse_primers])\r\n\r\n if not raw_forward_primers:\r\n raise ValueError((\"No forward primers detected in mapping file.\"))\r\n if not raw_reverse_primers:\r\n raise ValueError((\"No reverse primers detected in mapping file.\"))\r\n\r\n # Finding the forward primers, or rc of reverse primers indicates forward\r\n # read. Finding the reverse primer, or rc of the forward primers, indicates\r\n # the reverse read, so these sets are merged.\r\n raw_forward_primers.update(raw_reverse_rc_primers)\r\n raw_reverse_primers.update(raw_forward_rc_primers)\r\n\r\n forward_primers = []\r\n reverse_primers = []\r\n for curr_primer in raw_forward_primers:\r\n forward_primers.append(compile(''.join([iupac[symbol] for\r\n symbol in curr_primer])))\r\n for curr_primer in raw_reverse_primers:\r\n reverse_primers.append(compile(''.join([iupac[symbol] for\r\n symbol in curr_primer])))\r\n\r\n return forward_primers, reverse_primers", "def primer_set(self):\n return composition_module.PrimerSet(self._get_attr('primer_set_id'))", "def expand_degeneracies(raw_primers):\r\n\r\n expanded_primers = []\r\n\r\n for raw_primer in raw_primers:\r\n primer_seq = DNASequence(raw_primer.strip())\r\n\r\n for expanded_primer in primer_seq.nondegenerates():\r\n expanded_primers.append(str(expanded_primer))\r\n\r\n return expanded_primers", "def test_build_primers_bs(self):\n \n starts = [5, 9, 62]\n stops = [2, 13, 17]\n forward_overhang = 'cagggacccggt'\n reverse_overhang = 'cgaggagaagcccggtta'\n dna_orf = 'ATGTGGAGACGGAAACATCCGAGGACATCCGGAGGAACCCGGGGAGTTCTGAGTGGTAATTAG'\n expected_primers = [['Fw_5', 'cagggacccggtAAACATCCGA'],\n ['Fw_9', 'cagggacccggtACATCCGGAG'],\n ['Rv_13.0', 'cgaggagaagcccggttaGGTTCCTCCG'],\n ['Rv_17.0', 'cgaggagaagcccggttaCAGAACTCCC']]\n expected_errors = [['Fw_62', ' Not enough bases in that direction'], ['Rv_2.0', ' Not enough bases in that direction']]\n expected_vstarts = [5, 9]\n expected_vstops = [13, 17]\n result_primers, result_errors, result_vstarts, result_vstops = build_primers_bs(dna_orf, starts, stops,\n forward_overhang, reverse_overhang, 10)\n self.assertEqual(result_primers, expected_primers)\n self.assertEqual(result_errors, expected_errors)\n self.assertEqual(expected_vstarts, result_vstarts)\n self.assertEqual(expected_vstops, result_vstops)", "def get_init_list(self):\n\n return self.convert_compartments_to_list(self.init_compartments)", "def get_objectives_requiring_candidate_preds(self):\n objs = []\n for objective, obj_args in self.objective_args.items():\n if \"candidate_preds\" in obj_args:\n objs.append(objective)\n return objs", "def parse_course_pre_to_list(self):\n prere_courses = []\n\n # convert non-word to spaces except \"-\"\n self.prere_raw = re.sub(\"[^\\w-]\", \" \", self.prere_raw)\n\n # split the string by spaces\n words = self.prere_raw.split()\n\n # check if the string contains number, if True then the string is of the form: \"140A\"\n def append_to_list(word, previous_word):\n try:\n if word[0].isdigit():\n toappend = None\n # course abbs = words[i-1]\n try:\n toappend = \"{} {}\".format(previous_word.upper(), word.upper())\n except AttributeError:\n #TODO check this error for HIGR 216A-B\n print(\"previous word is {}, word is {}\".format(previous_word, word))\n if toappend not in prere_courses:\n prere_courses.append(toappend)\n except IndexError:\n #TODO why this would occur?\n print(\"word is {}, previous word is {}\".format(word, previous_word))\n\n # iterate through words to find numbers\n for i in range(len(words)):\n\n previous_word = None\n if i is not 0:\n # define the previous word like MATH\n previous_word = words[i-1]\n\n if \"-\" in words[i]:\n num = re.split(\"[A-Z]\", words[i])[0]\n letters = re.split(\"-\", words[i])\n new_words = []\n for i in range(len(letters)):\n if i is 0:\n new_words.append(letters[0])\n else:\n new_words.append(num + letters[i])\n for word in new_words:\n if word is not None and previous_word is not None:\n append_to_list(word, previous_word)\n else:\n #TODO: what if the word is None?\n pass\n else:\n append_to_list(words[i], previous_word)\n\n return prere_courses", "def test_get_primers(self):\r\n\r\n # Raise error if ReversePrimer not supplied\r\n header = ['SampleID', 'BarcodeSequence', 'LinkerPrimerSequence',\r\n 'Description']\r\n mapping_data = [['s1', 'ATCG', 'TTGGCC,TTGGWC', 'ATRCCTA']]\r\n self.assertRaises(IndexError, get_primers, header, mapping_data)\r\n\r\n header = ['SampleID', 'BarcodeSequence', 'LinkerPrimerSequence',\r\n 'ReversePrimer', 'Description']\r\n forward_primers, reverse_primers = get_primers(header, mapping_data)\r\n\r\n forward_primers = set([seq.pattern for seq in forward_primers])\r\n reverse_primers = set([seq.pattern for seq in reverse_primers])\r\n\r\n expected_forward_primers = set(['TTGGCC', 'TAGG[CT]AT', 'TTGG[AT]C'])\r\n expected_reverse_primers = set(['GGCCAA', 'AT[AG]CCTA', 'G[AT]CCAA'])\r\n\r\n self.assertEqual(forward_primers, expected_forward_primers)\r\n self.assertEqual(reverse_primers, expected_reverse_primers)", "def make_priors(self):\r\n if self.last_img_size != (self.target_size, self.target_size):\r\n prior_data = []\r\n\r\n for conv_w, conv_h, scale in zip(self.conv_ws, self.conv_hs, self.scales):\r\n for i in range(conv_h):\r\n for j in range(conv_w):\r\n # +0.5 because priors are in center-size notation\r\n cx = (j + 0.5) / conv_w\r\n cy = (i + 0.5) / conv_h\r\n\r\n for ar in self.aspect_ratios:\r\n ar = np.sqrt(ar)\r\n\r\n w = scale * ar / self.target_size\r\n h = scale / ar / self.target_size\r\n\r\n # This is for backward compatability with a bug where I made everything square by accident\r\n h = w\r\n\r\n prior_data += [cx, cy, w, h]\r\n\r\n self.priors = np.array(prior_data).reshape(-1, 4)\r\n self.last_img_size = (self.target_size, self.target_size)\r\n return self.priors", "def __init__(self, chromosome_list):\n\n self.chromosome_list = [make_chromosome(chromosome) for chromosome in chromosome_list]\n self.mating_pool = []\n self.next_population = []", "def rtailed(rprimers: list) -> list:\n from .models import Primer\n\n tails = {'', 'C', 'G', 'CG', 'GC', 'CGC', 'GCG'}\n\n tailed = list() # Primers with tails.\n for tail, primer in itertools.product(tails, rprimers):\n # Should the spans be updated when a tail is added?\n if primer.strand == 1:\n primer.sequence = primer.sequence + tail\n elif primer.strand == -1:\n if (primer.allele1_start - len(tail) >= 0\n and primer.allele2_start - len(tail) >= 0):\n primer.sequence = tail + primer.sequence\n\n return [primer for primer in rprimers\n if len(primer) < 28\n and primer.tm <= 62]", "def primer_srch(self, primers):\n for fP in primers:\n if fP.binds_to(self):\n return fP", "def create_primer_regex_patterns(self, header, mapping_data):\n import logging\n self.logger = logging.getLogger('_getprm_')\n \n if \"LinkerPrimerSequence\" in header:\n primer_ix = header.index(\"LinkerPrimerSequence\")\n else:\n raise IndexError(\n (\"Mapping file is missing LinkerPrimerSequence field.\"))\n if \"ReversePrimer\" in header:\n rev_primer_ix = header.index(\"ReversePrimer\")\n else:\n raise IndexError((\"Mapping file is missing ReversePrimer field.\"))\n \n raw_forward_primers = set([])\n \n raw_reverse_primers = set([])\n \n for line in mapping_data:\n # Split on commas to handle pool of primers\n raw_forward_primers.update([upper(primer).strip() for\n primer in line[primer_ix].split(',')])\n # reverse primer were reverse complemented\n raw_reverse_primers.update([upper(str(DNA(primer))) for\n primer in line[rev_primer_ix].split(',')])\n \n if not raw_forward_primers:\n self.logger.critical(\"No forward primers detected in mapping file.\")\n raise ValueError(\"No forward primers detected in mapping file.\")\n \n if not raw_reverse_primers:\n self.logger.critical(\"No reverse primers detected in mapping file.\")\n raise ValueError(\"No reverse primers detected in mapping file.\")\n\n \n forward_primers = []\n forward_primers_rc = []\n reverse_primers = []\n reverse_primers_rc = []\n\n for curr_primer in raw_forward_primers:\n \n forward_primers.append(compile(''.join([self.iupac[symbol] for symbol in curr_primer[:self.search_length]])))\n forward_primers_rc.append(compile(''.join([self.iupac[symbol] for symbol in self.reverse_complement(curr_primer[:self.search_length])])))\n \n for curr_primer in raw_reverse_primers:\n reverse_primers.append(compile(''.join([self.iupac[symbol] for symbol in curr_primer[:self.search_length]])))\n reverse_primers_rc.append(compile(''.join([self.iupac[symbol] for symbol in self.reverse_complement(curr_primer[:self.search_length])])))\n \n return forward_primers, forward_primers_rc, reverse_primers, reverse_primers_rc", "def test_build_primers_tm(self):\n \n starts = [5, 9, 62]\n stops = [2, 13, 17]\n forward_overhang = 'cagggacccggt'\n reverse_overhang = 'cgaggagaagcccggtta'\n dna_orf = 'ATGTGGAGACGGAAACATCCGAGGACATCCGGAGGAACCCGGGGAGTTCTGAGTGGTAATTAG'\n expected_primers = [['Fw_5', 'cagggacccggtAAACATCCGAGGACATCCGGAGGAACCCG'],\n ['Fw_9', 'cagggacccggtACATCCGGAGGAACCCGGGGAGTTCTG'],\n ['Rv_13.0', 'cgaggagaagcccggttaGGTTCCTCCGGATGTCCTCGGATGTTTCC'],\n ['Rv_17.0', 'cgaggagaagcccggttaCAGAACTCCCCGGGTTCCTCCGGATG']]\n expected_errors = [['Fw_62', ' Not enough bases in that direction'], ['Rv_2.0', ' Not enough bases in that direction']]\n expected_vstarts = [5, 9]\n expected_vstops = [13, 17]\n result_primers, result_errors, result_vstarts, result_vstops = build_primers_tm(dna_orf, starts, stops,\n forward_overhang, reverse_overhang, 65)\n self.assertEqual(result_primers, expected_primers)\n self.assertEqual(result_errors, expected_errors)\n self.assertEqual(expected_vstarts, result_vstarts)\n self.assertEqual(expected_vstops, result_vstops)", "def make_mer_list(mer_len):\r\n\tli = bases\r\n\tfor i in range(mer_len-1):\r\n\t\tli = add_base(li)\r\n\treturn li", "def get_primes_list(start, end):\r\n primes_list_obj = PrimesList(start, end)\r\n primes_list = primes_list_obj.primes_list()\r\n return primes_list", "def __init__(self, name: str, professors: list[Professor]):\n self.professors = professors", "def __init__(self, title, semester, professor, crn, status):\n self.semesters = [semester]\n self.professors = [professor]\n self.title = title\n self.statuses = [status]\n self.instances = {crn: (semester, professor, status)}", "def create_random_proposals(self): \r\n global MAX_NUMBER_PROPOSALS\r\n global LOCATIONS\r\n global CATEGORIES\r\n \r\n for i in range(MAX_NUMBER_PROPOSALS):\r\n description = \"\"\r\n location = locations_rv.rvs(size=1)[0]\r\n category = categories_rv.rvs(size=1)[0]\r\n budget = random.uniform(500000, 1000000)\r\n project = Project(i, description, category, budget, location)\r\n self.proposals.append(project)", "def __init__(self, root, p=2, q=3):\r\n super(Profile, self).__init__()\r\n ancestors = collections.deque('*'*p, maxlen=p)\r\n self.list = list()\r\n\r\n self.profile(root, p, q, ancestors)\r\n self.sort()", "def test_enumerating_protomers(self):\n\n mol = Molecule.from_smiles(\"Oc2ccc(c1ccncc1)cc2\")\n\n # there should be three protomers for this molecule so restrict the output\n protomers = mol.enumerate_protomers(max_states=2)\n\n assert mol not in protomers\n assert len(protomers) == 2\n\n # now make sure we can generate them all\n protomers = mol.enumerate_protomers(max_states=10)\n\n assert mol not in protomers\n assert len(protomers) == 3\n\n # make sure each protomer is unique\n unique_protomers = set(protomers)\n assert len(protomers) == len(unique_protomers)", "def test_enumerating_protomers(self):\n\n mol = Molecule.from_smiles(\"Oc2ccc(c1ccncc1)cc2\")\n\n # there should be three protomers for this molecule so restrict the output\n protomers = mol.enumerate_protomers(max_states=2)\n\n assert mol not in protomers\n assert len(protomers) == 2\n\n # now make sure we can generate them all\n protomers = mol.enumerate_protomers(max_states=10)\n\n assert mol not in protomers\n assert len(protomers) == 3\n\n # make sure each protomer is unique\n unique_protomers = set(protomers)\n assert len(protomers) == len(unique_protomers)", "def __init__(self, prepositions,preposition_data, bigram_prob, pos_trigrams_prob):\n self._bigram_prob = bigram_prob\n self._trigram_prob = pos_trigrams_prob\n self._prepositions = tuple(prepositions)\n self._preposition_data = preposition_data\n self._outcome_and_sents = []\n for key in self._preposition_data.keys():\n sentences = self._preposition_data[key]\n for sents in sentences:\n temp = []\n temp.append(self._prepositions.index(key))\n temp.append(sents)\n self._outcome_and_sents.append(temp)", "def _populate(self):\n self.addDemographics()\n self.addLabs()\n self.addProblems()\n self.addMeds()\n self.addAllergies()\n self.addImmunizations()\n self.addVitals()\n self.populated_p = True", "def generar_poblacion():\n poblacion = []\n ind = Arbol()\n for i in range(size_pop):\n poblacion.append(generar_individuo_recursivo(ind))\n return poblacion", "def __init__(self, viruses, maxPop):\n\n Patient.__init__(self,viruses,maxPop)\n self.Prescriptions = []", "def priors(self):\n\n return self._priors", "def people(self):\r\n return pp.People(self)", "def build_lipid_list(self):\n\n lipid_list = []\n if self.lipid_patterns == ['']:\n return []\n for pattern, sidechain in itertools.product(\n self.lipid_patterns, self.lipid_tails\n ):\n if pattern not in PL_PATTERNS:\n continue\n if pattern == 'lipid':\n try:\n assert ' ' not in sidechain\n except AssertionError:\n continue\n lipid_list.append(Lipid.from_string(sidechain))\n else:\n lipid_list.append(Pl.from_string(pattern, sidechain))\n\n return lipid_list", "def add_candidates() -> None:\r\n faculties = [\"Computer Science\", \"Performing Arts\", \"Engineering\", \"Economics\"]\r\n for faculty in faculties:\r\n create_candidate(faculty, \"President\")\r\n for _ in range(3):\r\n create_candidate(faculty, \"GSU Officer\")\r\n for _ in range(16):\r\n create_candidate(faculty, \"Faculty Officer\")" ]
[ "0.61152774", "0.5685748", "0.54845613", "0.54509944", "0.5440025", "0.53709817", "0.53416723", "0.53409606", "0.53197396", "0.5274368", "0.5267137", "0.5261285", "0.52573895", "0.5245288", "0.5206712", "0.5176481", "0.51707155", "0.5140353", "0.5139623", "0.5128205", "0.5076864", "0.5076864", "0.50725865", "0.50691247", "0.50603724", "0.5054345", "0.5052913", "0.5050999", "0.5043551", "0.5029079" ]
0.8783308
0
convert DNA codes to numeric values for bitwise comparisons returns a numeric list corresponding to the nucleotide sequence
def convert_to_numeric(sequence): int_mapped_seq=[] DNA_to_numeric = get_DNA_to_numeric() for n in sequence: int_mapped_seq.append(DNA_to_numeric[n]) return int_mapped_seq
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def str2NumList(strn):\n\treturn [ord(chars) for chars in strn]", "def nucleotide_numbering():\n nucleotide_to_number = {'A': 0, 'C': 1, 'G': 2, 'T': 3}\n number_to_nucleotide = {0: 'A', 1: 'C', 2: 'G', 3: 'T'}\n return nucleotide_to_number, number_to_nucleotide", "def encode_rna(x):\n return [0 if y == 'A' else 1 if y == 'U' else 2 if y == 'G' else 3 for y in x]", "def convert_code_to_decimal(processed_code):\n converted_digits = []\n\n for index, digit in enumerate(processed_code):\n if not digit.isnumeric():\n digit = HEXADECIMAL_TO_DECIMAL[digit]\n converted_digits.append(int(digit))\n\n return converted_digits", "def dna_number(bp_seq):\r\n # Hint: use dna_digit\r\n\r\n # YOUR CODE HERE\r", "def binary_to_seq():\n bin_seq, dico_binary, comp_seq, file_comp = utf8_to_binary()\n \n #for each binary value associate the corresponding letter (key) \n #according to the dictionnary \n dna_seq = \"\"\n reading_binary = \"\"\n for value in bin_seq:\n reading_binary += value\n for letter, code in dico_binary.items():\n if code == reading_binary:\n dna_seq += letter\n reading_binary = \"\"\n break\n \n #print(dna_seq, bin_seq, comp_seq, file_comp)\n return dna_seq, bin_seq, comp_seq, file_comp", "def _convert_to_number(self):\n if type(self.data) is not str:\n return -1\n out = list()\n for char in self.data:\n out.append(ord(char))\n return (out)", "def Decodingfunc(Codebyte):\r\n Decodedint=struct.unpack('b',Codebyte)[0]\r\n N=0 #number of repetitions\r\n L=0 # length of single/multiple sequence\r\n if Decodedint >= 0: #single\r\n N = 1\r\n L = Decodedint+1\r\n else: #multiple\r\n L = -Decodedint//16+1\r\n N = -Decodedint-(L-1)*16+1\r\n #print(\"N =\",N,\" L =\",L)\r\n return (N,L)", "def codage(nbr):\n\tmask=1\n\tresult=0\n\tfor index in range(len(G)):\n\t\tif ((mask<<index)&nbr) != 0:\n\t\t\tresult^=G[len(G)-index-1]\n\treturn result", "def coding_strand_to_AA(dna):\n num_codons = int(len(dna)/3)\n num = 0\n list_codons = []\n aacids = ''\n while num < num_codons:\n num_start = int(num*3)\n num_end = int(num*3 + 3)\n list_codons.append(dna[num_start:num_end])\n num = num + 1\n for element in list_codons:\n thing = aa_table[element]\n aacids = aacids + thing\n return aacids", "def _codes_to_ints(self, codes):\n # Shift the representation of each level by the pre-calculated number\n # of bits. Since this can overflow uint64, first make sure we are\n # working with Python integers:\n codes = codes.astype(\"object\") << self.offsets\n\n # Now sum and OR are in fact interchangeable. This is a simple\n # composition of the (disjunct) significant bits of each level (i.e.\n # each column in \"codes\") in a single positive integer (per row):\n if codes.ndim == 1:\n # Single key\n return np.bitwise_or.reduce(codes)\n\n # Multiple keys\n return np.bitwise_or.reduce(codes, axis=1)", "def convert2int(self,seq_pep):\n\t\treturn [self.aminoacids.index(pep) for pep in seq_pep]", "def bin_code(self):\n self.alphabet = np.unique(self.sequence)\n\n for s, n in zip([chr(k + ord('a') - 1) for k in self.alphabet], self.alphabet):\n self.alphabet_symbol[s] = n\n\n sigm = len(self.alphabet)\n bin_code = []\n for i, e in enumerate(self.alphabet):\n em = [0] * sigm\n em[sigm - 1 - i] = 1\n bin_code.append(em)\n\n for i in range(len(bin_code)):\n self.alphabet_dict[self.alphabet[i]] = bin_code[i]\n\n return reduce(lambda r, e: r + self.alphabet_dict[e], self.sequence, [])", "def functionG(a,b,c,d):\n intArr = []\n comboArr = a + b + c + d\n for x in comboArr:\n intArr.append(ord(x))\n\n return str(sum(intArr))", "def _seq2vec(seq):\n vec = np.zeros(len(seq), dtype=int)\n for aai, aa in enumerate(seq):\n vec[aai] = AA2CODE[aa]\n return vec", "def _codes_to_ints(self, codes):\n # Shift the representation of each level by the pre-calculated number\n # of bits:\n codes <<= self.offsets\n\n # Now sum and OR are in fact interchangeable. This is a simple\n # composition of the (disjunct) significant bits of each level (i.e.\n # each column in \"codes\") in a single positive integer:\n if codes.ndim == 1:\n # Single key\n return np.bitwise_or.reduce(codes)\n\n # Multiple keys\n return np.bitwise_or.reduce(codes, axis=1)", "def get_complement(nucleotide): # This one works\n nuc = list(nucleotide)\n count = 0\n complement = ''\n for element in nuc:\n if element == 'A':\n nuc[count] = 'T'\n elif element == 'T':\n nuc[count] = 'A'\n elif element == 'C':\n nuc[count] = 'G'\n elif element == 'G':\n nuc[count] = 'C'\n complement = complement + nuc[count]\n count = count + 1\n return complement", "def code_to_sequences( self, ucode ):\n\t\t\n\t\tassert isinstance( ucode, unicode ), 'ucode must be unicode string!' \n\t\t\n\t\tfor uchar in ucode:\n\t\t\tif not( uchar in self._char39 ):\n\t\t\t\traise Barcode39Error( '%s char is not listed in Barcode39 characters [0..9,A..Z,space,9,-,.,$,/,+,%]' )\n\n\t\tresult = []\n\t\tfor uchar in ucode:\n\t\t\tresult = result + self.char_to_seq(uchar) \n\t\t\t\n\t\treturn result", "def decode(code):\n def h(x):\n hs = []\n for i in range(len(code)):\n if code[i] != '0' and (code[i] == '?' or code[i] == x[i]):\n hs.append(True)\n else:\n hs.append(False)\n return all(hs)\n return h", "def populate_code_list():\n\tletter_code_ST = \"JZIHGFEDCBA\"\n\tletter_code_FG = \"XWUTRQPNMLK\"\n\tfor pos in range(\n\t len(letter_code_ST)): #Interestingly, the values start from 0\n\t\tcode_ST.append(pos) # Number first\n\t\tcode_ST.append(letter_code_ST[pos])\n\tfor pos in range(len(letter_code_FG)):\n\t\tcode_FG.append(pos)\n\t\tcode_FG.append(letter_code_FG[pos])", "def coding_strand_to_AA(dna):\n s = \"\"\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# intitialize empty list\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n for i in range(0, len(dna)-2, 3):\t\t\t\t\t\t\t\t\t\t\t\t\t\t# for range of length of dna, indexes w/ step 3 (to isolate codons)\n \t\tamino_acid = aa_table[dna[i:i+3]]\t\t\t\t\t\t\t\t\t\t\t\t# translates each codon to an amino acid\n \t\ts = s + amino_acid \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# adds amino acid to list\n return s \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# returns list of amino acids", "def code(action_sequence):\r\n # refuse any invalid action :\r\n if set(action_sequence) - set(CODE_MAP): # some action was not in the known ones.\r\n return '0' # per spec (test_unknown_action)\r\n\r\n mapdict = dict(zip(CODE_MAP, (1, 2, 4, 8)))\r\n da_code = [mapdict[action] for action in action_sequence]\r\n if sorted(da_code) != da_code: # list is not sorted : assume reversed\r\n da_code.append(16)\r\n return format(sum(da_code), 'b') # sum to integer, binstring, and return", "def genes():\n return [\"b2935\", \"b0723\", \"b0451\"]", "def game_decoder(game):\n res = []\n for c in game:\n if '0' <= c <= '9':\n res.append(ord(c) - ord('0') + 1)\n elif 'a' <= c <= 'd':\n res.append((ord(c) - ord('a') + 1) * 25)\n else:\n raise ValueError('invalid game component')\n return res", "def string_to_numbers(str):\n return [ord(ch) - ord(\"a\") for ch in str]", "def create_code():\n\n code = [0, 0, 0, 0]\n\n for i in range(4):\n value = random.randint(1, 8) # 8 possible digits\n while value in code:\n value = random.randint(1, 8) # 8 possible digits\n code[i] = value\n \n #print(code)\n return code", "def convert2seq(self,seq_int):\n\t\treturn [self.aminoacids[i] for i in seq_int]", "def translate(rna):\n RNA_CODON_TABLE = {\"UUU\": \"F\", \"UUC\": \"F\", \"UUA\": \"L\", \"UUG\": \"L\",\n \"UCU\": \"S\", \"UCC\": \"S\", \"UCA\": \"S\", \"UCG\": \"S\",\n \"UAU\": \"Y\", \"UAC\": \"Y\", \"UAA\": \"*\", \"UAG\": \"*\",\n \"UGU\": \"C\", \"UGC\": \"C\", \"UGA\": \"*\", \"UGG\": \"W\",\n \"CUU\": \"L\", \"CUC\": \"L\", \"CUA\": \"L\", \"CUG\": \"L\",\n \"CCU\": \"P\", \"CCC\": \"P\", \"CCA\": \"P\", \"CCG\": \"P\",\n \"CAU\": \"H\", \"CAC\": \"H\", \"CAA\": \"Q\", \"CAG\": \"Q\",\n \"CGU\": \"R\", \"CGC\": \"R\", \"CGA\": \"R\", \"CGG\": \"R\",\n \"AUU\": \"I\", \"AUC\": \"I\", \"AUA\": \"I\", \"AUG\": \"M\",\n \"ACU\": \"T\", \"ACC\": \"T\", \"ACA\": \"T\", \"ACG\": \"T\",\n \"AAU\": \"N\", \"AAC\": \"N\", \"AAA\": \"K\", \"AAG\": \"K\",\n \"AGU\": \"S\", \"AGC\": \"S\", \"AGA\": \"R\", \"AGG\": \"R\",\n \"GUU\": \"V\", \"GUC\": \"V\", \"GUA\": \"V\", \"GUG\": \"V\",\n \"GCU\": \"A\", \"GCC\": \"A\", \"GCA\": \"A\", \"GCG\": \"A\",\n \"GAU\": \"D\", \"GAC\": \"D\", \"GAA\": \"E\", \"GAG\": \"E\",\n \"GGU\": \"G\", \"GGC\": \"G\", \"GGA\": \"G\", \"GGG\": \"G\"}\n str = ''\n list = [rna[i:i+3] for i in range(0,len(rna),3)]\n for x in list:\n #checks if x is in key of RNA_CODON_TABLE\n if x in RNA_CODON_TABLE:\n #appends only if the value for the given key is not *\n if RNA_CODON_TABLE[x] != '*':\n str = str + RNA_CODON_TABLE[x]\n #if only one char is extra(meaning apart form the 3 pair characters available in dictionary)\n #checks if the char is in following\n elif len(x) == 1 and x in ['A','G','C','U']:\n str = str + x\n #if the char is of length 2 i.e, 2 words extra\n elif len(x) == 2 and x[0] in ['A','G','C','U'] and x[1] in ['A','G','C','U']:\n #Then appending the char to the actually converted string\n str = str + x[0]\n str = str + x[1]\n #if the char is not in the above characters then it is a unrecognised character.\n else:\n print(\"Unrecognised character:\",x)\n return str", "def n_to_data(n):\n if n <= 62:\n return [n]\n elif n <= 258047:\n return [63, (n >> 12) & 0x3f, (n >> 6) & 0x3f, n & 0x3f]\n else: # if n <= 68719476735:\n return [63, 63,\n (n >> 30) & 0x3f, (n >> 24) & 0x3f, (n >> 18) & 0x3f,\n (n >> 12) & 0x3f, (n >> 6) & 0x3f, n & 0x3f]", "def gene_finder(dna):\n viable_strings = []\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# intitialize empty list (for strings)\n viable_amino_acids = []\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# intitialize empty list (for amino acids)\n threshold = longest_ORF_noncoding(dna, 1500)\t\t\t\t\t\t\t\t\t\t\t# sets threshold to longest random dna string\n real_dna = list(find_all_ORFs_both_strands(dna))\t\t\t\t\t\t\t\t\t\t# sets real_dna equal to all the ORFs, both strands\n for i in range(len(real_dna)):\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# searches through all the elements in list real_dna\n \tif len(real_dna[i]) > len(threshold):\t\t\t\t\t\t\t\t\t\t\t\t# compares real string to random string\n \t\tviable_strings.append(real_dna[i])\t\t\t\t\t\t\t\t\t\t\t\t# if real string is longer, adds it to list\n for i in range(len(viable_strings)):\t\t\t\t\t\t\t\t\t\t\t\t\t# searches through all elements in viable_strings\n \ta = coding_strand_to_AA(viable_strings[i])\t\t\t\t\t\t\t\t\t\t\t# translates each string to amino acid sequence\n \tviable_amino_acids.append(a)\t\t\t\t\t\t\t\t\t\t\t\t\t\t# adds amino acids to list\n return viable_amino_acids\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# returns list" ]
[ "0.6309183", "0.6230118", "0.6208863", "0.6096392", "0.5975599", "0.59637845", "0.5953173", "0.59482646", "0.580563", "0.5801203", "0.5766781", "0.5749419", "0.5746811", "0.57197213", "0.5715822", "0.5704024", "0.570132", "0.5697264", "0.5672337", "0.56659025", "0.56524974", "0.5639418", "0.5636124", "0.56037605", "0.5558389", "0.5554042", "0.5538871", "0.5527109", "0.551634", "0.5515629" ]
0.69297695
0
returns a corrected unaligned index based on aligned index
def get_corrected_index(seq, aligned_index): # Counts the number of nucleotides in aligned sequence, returns # count of nucleotides occuring before aligned index reached slice_seq=seq[0:aligned_index] # If different gap characters used, may need to modify this # In current form, it is optimized for speed corrected_index=\ aligned_index - (slice_seq.count("-") + slice_seq.count(".")) return corrected_index
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_align_idx(self):\n self.amp4.rotateAng([5, 5, 5], ang='deg')\n al = align(self.amp3, self.amp4, mv=[0, 1, 2, 3], sv=[0, 1, 2, 3], method='idxPoints')\n all(self.assertAlmostEqual(al.m.vert[i, 0], al.s.vert[i, 0], delta=0.1) for i in range(al.s.vert.shape[0]))", "def idx2off(i):\n return i * 32 - (24 * (i//4))", "def alignment_index(self, sequence_index):\n if sequence_index >= len(self.ungapped()):\n raise IndexError(\"sequence index out of range\")\n sequence_index %= len(self.ungapped())\n iCurrent = -1\n for i, sResidue in enumerate(self.sequence):\n if sResidue not in GAP_CHARACTERS:\n iCurrent += 1\n if iCurrent == sequence_index:\n return i", "def get_alignment(a: np.ndarray, max_alignment: int = 128) -> int:\n # Check max_alignment\n if bin(max_alignment).count('1') != 1:\n raise ValueError(\"'max_alignment' must be a power of 2.\")\n\n # Get largest base\n b = int(np.log2(max_alignment))\n\n # Get best alignment\n return next(2**x for x in range(b, 0, -1) if (a.ctypes.data % 2**x) == 0)", "def align_addr(addr, align = 16, dir = ALIGN_DOWN):\n\n if dir == ALIGN_DOWN:\n return addr - (addr % align)\n else:\n return addr + (align - addr % align)", "def test_align_unaligned_seqs(self):\n res = align_unaligned_seqs(self.seqs1_fp, RNA)\n self.assertEqual(res.toFasta(), self.seqs1_aln)", "def qindex2index(index):\n r = index.row()\n c = index.column()\n if c > 0x10:\n return (0x10 * r) + c - 0x11\n else:\n return (0x10 * r) + c", "def test_align_invert(self):\n al = align(self.amp1, self.amp2, inverse=False)\n\n al_inv = align(self.amp2, self.amp1, inverse=True)\n\n print(al.R)\n print(al_inv.R)\n\n print(al.T)\n print(al_inv.T)", "def north_index(self, index):\n return index - self.size", "def get_alignment_offset(self):\n\n return 0", "def _index(orig, off):\n orig_x, orig_y = orig\n off_x, off_y = off\n return (orig_y - off_y) * self.ncols + (orig_x - off_x)", "def local2align(seq):\n lookup = []\n for i in xrange(len(seq)):\n if seq[i] == \"-\":\n continue\n lookup.append(i)\n return lookup", "def align(offset, data, align_to=64):\n rem = offset % align_to\n new_offset = offset if (rem == 0) else offset + (align_to - rem)\n\n if data is not None:\n new_data = np.pad(\n data.flatten(),\n (0, int((new_offset - offset) / data.dtype.itemsize)), mode=\"constant\")\n else:\n new_data = None\n return new_offset, new_data", "def aligned(self):\n return self.__aligned", "def filter_aligned_codons(aln):\n\n ind = find_aligned_codons(aln)\n return subalign(aln, ind)", "def pad_array_index(low,high,segment_length,reverse=False):\n \n remainder = (segment_length-(high-low)%segment_length)\n if not reverse:\n return high + remainder\n else:\n return low - remainder", "def getAddressOfIndex(self) -> long:\n ...", "def isaligned(a: np.ndarray, alignment: int) -> bool:\n return (a.ctypes.data % alignment) == 0", "def append_std_aligned_index(primers,\n standard_index_seq,\n region_slice):\n \n for n in primers:\n n.std_index = True\n standard_unaligned_index = get_corrected_index(standard_index_seq,\n n.aligned_index)\n # 5' for forward primer would be upstream of the Xmer by the\n # number of bases in the region slice\n n.f_std_index = standard_unaligned_index - region_slice\n # 5' for reverse primer is the length of the Xmer plus the number\n # of bases in the region slice.\n n.r_std_index = standard_unaligned_index + len(n.seq) + region_slice\n \n return primers", "def find_known_index(cryptf):\n ct1 = cryptf(b'A')\n ct2 = cryptf(b'B')\n bsz = find_blocksize(cryptf)\n\n # find where are plaintext block is by comparing two ciphertexts with equal size inputs\n blocknum = -1\n for i, (blk1, blk2) in enumerate(zip(chunks(ct1, bsz), chunks(ct2, bsz))):\n if blk1 != blk2:\n blocknum = i + 1\n break\n else:\n raise Exception(\"Could not find delta block!\")\n\n nextblock = chunks(ct1, bsz)[blocknum]\n\n # find offset by creating blocks of equal size but different last byte\n # when the next block differs, we know we've passed a block boundary\n offset = -1\n for i in range(bsz+1):\n nextblock1 = chunks(cryptf(b'A'*i+b'A'), bsz)[blocknum]\n nextblock2 = chunks(cryptf(b'A'*i+b'B'), bsz)[blocknum]\n if nextblock1 != nextblock2:\n offset = i\n break\n else:\n raise Exception(\"Could not find alignment offset!\")\n\n return blocknum, offset", "def _resolve_via_offset_table(stream, cu, index, base_attribute_name):\n base_offset = _get_base_offset(cu, base_attribute_name)\n # That's offset (within the rnglists/loclists/str_offsets section) of\n # the offset table for this CU's block in that section, which in turn is indexed by the index.\n\n offset_size = 4 if cu.structs.dwarf_format == 32 else 8\n with preserve_stream_pos(stream):\n return base_offset + struct_parse(cu.structs.Dwarf_offset(''), stream, base_offset + index*offset_size)", "def index2qindexb(self, index):\n r = index // 0x10\n c = index % 0x10\n return self.index(r, c)", "def align2local(seq):\n i = -1\n lookup = []\n for c in seq:\n if c != \"-\":\n i += 1\n lookup.append(i)\n return lookup", "def _raveled_index_for_transformed(self, param):\n ravi = self._raveled_index_for(param)\n if self._has_fixes():\n fixes = self._fixes_\n ### Transformed indices, handling the offsets of previous fixes\n transformed = (np.r_[:self.size] - (~fixes).cumsum())\n return transformed[ravi[fixes[ravi]]]\n else:\n return ravi", "def unaligned(self):\n new_alignment = Alignment()\n new_alignment.datatype = self.datatype\n for name, seq in self.items():\n new_seq = re.sub(_INDEL, '', str(seq))\n if new_seq != '':\n new_alignment[name] = new_seq\n return new_alignment", "def memory_index(indices, t):\n memlen, itemsize, ndim, shape, strides, offset = t\n p = offset\n for i in range(ndim):\n p += strides[i] * indices[i]\n return p", "def up_index(index):\n return 2 * index", "def memory_index(indices, t):\n memlen, itemsize, ndim, shape, strides, offset = t\n p = offset\n for i in range(ndim):\n p += strides[i]*indices[i]\n return p", "def fix_indexes(res, idx_local, idx, buffer_size):\n\n # get limits for the data (exlude indexes that have buffer data)\n data_start = idx_local[0].start\n data_end = idx_local[0].stop\n\n return res[data_start:data_end]", "def get_original_span(self, input_processed_span: Span,\n align_mode: str = \"relaxed\"):\n assert align_mode in [\"relaxed\", \"strict\", \"backward\", \"forward\"]\n\n req_begin = input_processed_span.begin\n req_end = input_processed_span.end\n\n def get_original_index(input_index: int, is_begin_index: bool,\n mode: str) -> int:\n r\"\"\"\n Args:\n input_index: begin or end index of the input span\n is_begin_index: if the index is the begin index of the input\n span or the end index of the input span\n mode: alignment mode\n Returns:\n Original index that aligns with input_index\n \"\"\"\n if len(self.processed_original_spans) == 0:\n return input_index\n\n len_processed_text = len(self._text)\n orig_index = None\n prev_end = 0\n for (inverse_span, original_span) in self.processed_original_spans:\n # check if the input_index lies between one of the unprocessed\n # spans\n if prev_end <= input_index < inverse_span.begin:\n increment = original_span.begin - inverse_span.begin\n orig_index = input_index + increment\n # check if the input_index lies between one of the processed\n # spans\n elif inverse_span.begin <= input_index < inverse_span.end:\n # look backward - backward shift of input_index\n if is_begin_index and mode in [\"backward\", \"relaxed\"]:\n orig_index = original_span.begin\n if not is_begin_index and mode == \"backward\":\n orig_index = original_span.begin - 1\n\n # look forward - forward shift of input_index\n if is_begin_index and mode == \"forward\":\n orig_index = original_span.end\n if not is_begin_index and mode in [\"forward\", \"relaxed\"]:\n orig_index = original_span.end - 1\n\n # break if the original index is populated\n if orig_index is not None:\n break\n prev_end = inverse_span.end\n\n if orig_index is None:\n # check if the input_index lies between the last unprocessed\n # span\n inverse_span, original_span = self.processed_original_spans[-1]\n if inverse_span.end <= input_index < len_processed_text:\n increment = original_span.end - inverse_span.end\n orig_index = input_index + increment\n else:\n # check if there input_index is not valid given the\n # alignment mode or lies outside the processed string\n raise ValueError(f\"The input span either does not adhere \"\n f\"to the {align_mode} alignment mode or \"\n f\"lies outside to the processed string.\")\n return orig_index\n\n orig_begin = get_original_index(req_begin, True, align_mode)\n orig_end = get_original_index(req_end - 1, False, align_mode) + 1\n\n return Span(orig_begin, orig_end)" ]
[ "0.63992536", "0.58021104", "0.56746924", "0.5617751", "0.56103647", "0.5595864", "0.55761355", "0.54659295", "0.54612035", "0.5456233", "0.5443871", "0.5337581", "0.53357214", "0.53213453", "0.529732", "0.5248855", "0.5244388", "0.5244107", "0.52407306", "0.5207596", "0.51945806", "0.5186875", "0.5170513", "0.5168578", "0.5163014", "0.5162509", "0.5135674", "0.5127133", "0.5113516", "0.51098704" ]
0.6640512
0
Appends upstream and downstream sequence information for primer hit Because some sequences may be hit near the 5' or 3' end of sequence read, it is necessary to append N's to the upstream or downstream region. This makes both visual inspection of the primers easier and allows for alignment objects to be loaded given a list of primers.
def append_primer_hit(primer, label, hit_index, region_slice, overall_length, unaligned_seq, primer_len): primer.match_count+=1 primer.labels.append(label.split()[0]) # Fill in 'N' for incomplete sequences # Set primer_index to 0 in case slicing left end of sequence primer_index=hit_index-region_slice if primer_index<0: primer_index=0 unknown_bases=overall_length-len(unaligned_seq[primer_index:hit_index+ primer_len]) if unknown_bases>0: filler="-"*unknown_bases else: filler="" upstream_region=filler+unaligned_seq[primer_index:hit_index+primer_len] primer.upstream_regions.append(upstream_region) unknown_bases=overall_length-len(unaligned_seq[hit_index:hit_index+ primer_len+region_slice]) if unknown_bases>0: filler="-"*unknown_bases else: filler="" downstream_region=unaligned_seq[hit_index:hit_index + primer_len+region_slice]+filler primer.downstream_regions.append(downstream_region) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def primer_start_fix(self):\r\n #TODO this function will not be used anymore, remove?\r\n if self.type in [\"forward_primer\", \"reverse_primer\", \"PCR_product\"]:\r\n self.start += 1\r\n if self.type == \"region\" and self.source == \"Primer3\":\r\n # this is the region containing the primers\r\n self.start += 1", "def _annotate(reads, mirbase_ref, precursors):\n for r in reads:\n for p in reads[r].precursors:\n start = reads[r].precursors[p].start + 1 # convert to 1base\n end = start + len(reads[r].sequence)\n for mature in mirbase_ref[p]:\n mi = mirbase_ref[p][mature]\n is_iso = _coord(reads[r].sequence, start, mi, precursors[p], reads[r].precursors[p])\n logger.debug((\"{r} {p} {start} {is_iso} {mature} {mi} {mature_s}\").format(s=reads[r].sequence, mature_s=precursors[p][mi[0]-1:mi[1]], **locals()))\n if is_iso:\n reads[r].precursors[p].mirna = mature\n break\n return reads", "def get_primers(header,\r\n mapping_data):\r\n\r\n if \"LinkerPrimerSequence\" in header:\r\n primer_ix = header.index(\"LinkerPrimerSequence\")\r\n else:\r\n raise IndexError(\r\n (\"Mapping file is missing LinkerPrimerSequence field.\"))\r\n if \"ReversePrimer\" in header:\r\n rev_primer_ix = header.index(\"ReversePrimer\")\r\n else:\r\n raise IndexError((\"Mapping file is missing ReversePrimer field.\"))\r\n\r\n iupac = {'A': 'A', 'T': 'T', 'G': 'G', 'C': 'C', 'R': '[AG]', 'Y': '[CT]',\r\n 'S': '[GC]', 'W': '[AT]', 'K': '[GT]', 'M': '[AC]', 'B': '[CGT]',\r\n 'D': '[AGT]', 'H': '[ACT]', 'V': '[ACG]', 'N': '[ACGT]'}\r\n\r\n raw_forward_primers = set([])\r\n raw_forward_rc_primers = set([])\r\n raw_reverse_primers = set([])\r\n raw_reverse_rc_primers = set([])\r\n\r\n for line in mapping_data:\r\n # Split on commas to handle pool of primers\r\n raw_forward_primers.update([upper(primer).strip() for\r\n primer in line[primer_ix].split(',')])\r\n raw_forward_rc_primers.update([str(DNA(primer).rc()) for\r\n primer in raw_forward_primers])\r\n raw_reverse_primers.update([upper(primer).strip() for\r\n primer in line[rev_primer_ix].split(',')])\r\n raw_reverse_rc_primers.update([str(DNA(primer).rc()) for\r\n primer in raw_reverse_primers])\r\n\r\n if not raw_forward_primers:\r\n raise ValueError((\"No forward primers detected in mapping file.\"))\r\n if not raw_reverse_primers:\r\n raise ValueError((\"No reverse primers detected in mapping file.\"))\r\n\r\n # Finding the forward primers, or rc of reverse primers indicates forward\r\n # read. Finding the reverse primer, or rc of the forward primers, indicates\r\n # the reverse read, so these sets are merged.\r\n raw_forward_primers.update(raw_reverse_rc_primers)\r\n raw_reverse_primers.update(raw_forward_rc_primers)\r\n\r\n forward_primers = []\r\n reverse_primers = []\r\n for curr_primer in raw_forward_primers:\r\n forward_primers.append(compile(''.join([iupac[symbol] for\r\n symbol in curr_primer])))\r\n for curr_primer in raw_reverse_primers:\r\n reverse_primers.append(compile(''.join([iupac[symbol] for\r\n symbol in curr_primer])))\r\n\r\n return forward_primers, reverse_primers", "def append_std_aligned_index(primers,\n standard_index_seq,\n region_slice):\n \n for n in primers:\n n.std_index = True\n standard_unaligned_index = get_corrected_index(standard_index_seq,\n n.aligned_index)\n # 5' for forward primer would be upstream of the Xmer by the\n # number of bases in the region slice\n n.f_std_index = standard_unaligned_index - region_slice\n # 5' for reverse primer is the length of the Xmer plus the number\n # of bases in the region slice.\n n.r_std_index = standard_unaligned_index + len(n.seq) + region_slice\n \n return primers", "def build_seq_data(seq,\n sequence_length,\n initial_primers,\n search_range):\n \n aligned_seq=DNA.make_seq(seq)\n # remove gap characters\n unaligned_seq=str(DNA.make_seq(seq).degap())\n gaps=aligned_seq.gap_maps()\n \n if search_range:\n primer_start = get_corrected_index(seq,int(search_range.split(\":\")[0]))\n primer_end = get_corrected_index(seq,int(search_range.split(\":\")[1]))\n # Correct in case end index is close to the end of the sequence\n if primer_end + sequence_length > len(unaligned_seq):\n primer_end = len(unaligned_seq)-sequence_length+1\n\n else:\n primer_start = 0\n primer_end = len(unaligned_seq)-sequence_length+1\n \n for n in range(primer_start, primer_end):\n seq_slice=unaligned_seq[n:n+sequence_length]\n aligned_index=gaps[0][n]\n unaligned_index=n\n init_key=(seq_slice,aligned_index)\n initial_primers[init_key]=unaligned_index\n \n return initial_primers", "def maskPrimers(seq_file, primer_file, mode, align_func, align_args={}, \n max_error=default_max_error, barcode=False,\n out_args=default_out_args, nproc=None, queue_size=None):\n # Define subcommand label dictionary\n cmd_dict = {alignPrimers:'align', scorePrimers:'score'}\n \n # Print parameter info\n log = OrderedDict()\n log['START'] = 'MaskPrimers'\n log['COMMAND'] = cmd_dict.get(align_func, align_func.__name__)\n log['SEQ_FILE'] = os.path.basename(seq_file)\n log['PRIMER_FILE'] = os.path.basename(primer_file)\n log['MODE'] = mode\n log['BARCODE'] = barcode\n log['MAX_ERROR'] = max_error\n if 'start' in align_args: log['START_POS'] = align_args['start']\n if 'max_len' in align_args: log['MAX_LEN'] = align_args['max_len']\n if 'rev_primer' in align_args: log['REV_PRIMER'] = align_args['rev_primer']\n if 'skip_rc' in align_args: log['SKIP_RC'] = align_args['skip_rc']\n if 'gap_penalty' in align_args:\n log['GAP_PENALTY'] = ', '.join([str(x) for x in align_args['gap_penalty']])\n log['NPROC'] = nproc\n printLog(log)\n\n # Create dictionary of primer sequences to pass to maskPrimers\n primers = readPrimerFile(primer_file)\n if 'rev_primer' in align_args and align_args['rev_primer']:\n primers = {k: reverseComplement(v) for k, v in primers.items()}\n\n # Define alignment arguments and compile primers for align mode\n align_args['primers'] = primers \n align_args['score_dict'] = getDNAScoreDict(mask_score=(0, 1), gap_score=(0, 0))\n if align_func is alignPrimers:\n align_args['max_error'] = max_error\n align_args['primers_regex'] = compilePrimers(primers)\n \n # Define sequence masking arguments\n mask_args = {'mode': mode, \n 'barcode': barcode, \n 'delimiter': out_args['delimiter']}\n\n # Define feeder function and arguments\n feed_func = feedSeqQueue\n feed_args = {'seq_file': seq_file}\n # Define worker function and arguments\n work_func = processMPQueue\n work_args = {'align_func': align_func, \n 'align_args': align_args,\n 'mask_args': mask_args,\n 'max_error': max_error}\n \n # Define collector function and arguments\n collect_func = collectSeqQueue\n collect_args = {'seq_file': seq_file,\n 'task_label': 'primers',\n 'out_args': out_args}\n \n # Call process manager\n result = manageProcesses(feed_func, work_func, collect_func, \n feed_args, work_args, collect_args, \n nproc, queue_size)\n\n # Print log\n result['log']['END'] = 'MaskPrimers'\n printLog(result['log'])\n \n return result['out_files']", "def alignPrimers(seq_record, primers, primers_regex=None, max_error=default_max_error,\n max_len=default_max_len, rev_primer=False, skip_rc=False,\n gap_penalty=default_gap_penalty,\n score_dict=getDNAScoreDict(mask_score=(0, 1), gap_score=(0, 0))):\n # Defined undefined parameters\n if primers_regex is None: primers_regex = compilePrimers(primers)\n seq_record = seq_record.upper()\n rec_len = len(seq_record)\n max_len = min(rec_len, max_len)\n\n # Create empty return object\n align = PrimerAlignment(seq_record)\n align.rev_primer = rev_primer\n \n # Define sequences to align and assign orientation tags\n if not skip_rc:\n seq_list = [seq_record, reverseComplement(seq_record)]\n seq_list[0].annotations['seqorient'] = 'F'\n seq_list[1].annotations['seqorient'] = 'RC'\n else:\n seq_list = [seq_record]\n seq_list[0].annotations['seqorient'] = 'F'\n \n # Assign primer orientation tags\n for rec in seq_list:\n rec.annotations['prorient'] = 'F' if not rev_primer else 'RC' \n \n # Attempt regular expression match first\n for rec in seq_list:\n scan_seq = str(rec.seq)\n scan_seq = scan_seq[:max_len] if not rev_primer else scan_seq[-max_len:]\n for adpt_id, adpt_regex in primers_regex.items():\n adpt_match = adpt_regex.search(scan_seq)\n # Parse matches\n if adpt_match:\n align.seq = rec\n align.seq.annotations['primer'] = adpt_id\n align.primer = adpt_id\n align.align_seq = scan_seq\n align.align_primer = '-' * adpt_match.start(0) + \\\n primers[adpt_id] + \\\n '-' * (max_len - adpt_match.end(0))\n align.gaps = 0\n align.error = 0\n align.valid = True\n\n # Determine start and end positions\n if not rev_primer:\n align.start = adpt_match.start(0)\n align.end = adpt_match.end(0)\n else:\n rev_pos = rec_len - max_len\n align.start = adpt_match.start(0) + rev_pos\n align.end = adpt_match.end(0) + rev_pos\n\n return align\n \n # Perform local alignment if regular expression match fails\n best_align, best_rec, best_adpt, best_error = None, None, None, None\n for rec in seq_list:\n this_align = dict()\n scan_seq = str(rec.seq)\n scan_seq = scan_seq[:max_len] if not rev_primer else scan_seq[-max_len:]\n for adpt_id, adpt_seq in primers.items():\n pw2_align = pairwise2.align.localds(scan_seq, adpt_seq, score_dict,\n -gap_penalty[0], -gap_penalty[1],\n one_alignment_only=True)\n if pw2_align:\n this_align.update({adpt_id: pw2_align[0]})\n if not this_align: continue\n \n # Determine alignment with lowest error rate\n for x_adpt, x_align in this_align.items():\n x_error = 1.0 - x_align[2] / len(primers[x_adpt])\n #x_gaps = len(x_align[1]) - max_len\n #x_error = 1.0 - (x_align[2] + x_gaps) / primers[x_adpt])\n if best_error is None or x_error < best_error:\n best_align = this_align\n best_rec = rec\n best_adpt = x_adpt\n best_error = x_error\n \n # Skip rev_primer complement if forward sequence error within defined threshold\n if best_error <= max_error: break\n\n # Set return object to lowest error rate alignment\n if best_align:\n # Define input alignment string and gap count\n align_primer = best_align[best_adpt][1]\n align_len = len(align_primer)\n align_gaps = align_len - max_len\n\n # Populate return object\n align.seq = best_rec\n align.primer = best_adpt\n align.align_seq = str(best_align[best_adpt][0])\n align.align_primer = align_primer\n align.gaps = align_gaps\n align.error = best_error\n align.valid = True\n\n # Determine start and end positions\n if not rev_primer:\n # TODO: need to switch to an aligner that outputs start/end for both sequences in alignment\n align.start = align_len - len(align_primer.lstrip('-'))\n align.end = best_align[best_adpt][4] - align_gaps\n else:\n # Count position from tail and end gaps\n rev_pos = rec_len - align_len\n align.start = rev_pos + best_align[best_adpt][3] + align_gaps\n align.end = rev_pos + len(align_primer.rstrip('-'))\n\n return align", "def create_primer_regex_patterns(self, header, mapping_data):\n import logging\n self.logger = logging.getLogger('_getprm_')\n \n if \"LinkerPrimerSequence\" in header:\n primer_ix = header.index(\"LinkerPrimerSequence\")\n else:\n raise IndexError(\n (\"Mapping file is missing LinkerPrimerSequence field.\"))\n if \"ReversePrimer\" in header:\n rev_primer_ix = header.index(\"ReversePrimer\")\n else:\n raise IndexError((\"Mapping file is missing ReversePrimer field.\"))\n \n raw_forward_primers = set([])\n \n raw_reverse_primers = set([])\n \n for line in mapping_data:\n # Split on commas to handle pool of primers\n raw_forward_primers.update([upper(primer).strip() for\n primer in line[primer_ix].split(',')])\n # reverse primer were reverse complemented\n raw_reverse_primers.update([upper(str(DNA(primer))) for\n primer in line[rev_primer_ix].split(',')])\n \n if not raw_forward_primers:\n self.logger.critical(\"No forward primers detected in mapping file.\")\n raise ValueError(\"No forward primers detected in mapping file.\")\n \n if not raw_reverse_primers:\n self.logger.critical(\"No reverse primers detected in mapping file.\")\n raise ValueError(\"No reverse primers detected in mapping file.\")\n\n \n forward_primers = []\n forward_primers_rc = []\n reverse_primers = []\n reverse_primers_rc = []\n\n for curr_primer in raw_forward_primers:\n \n forward_primers.append(compile(''.join([self.iupac[symbol] for symbol in curr_primer[:self.search_length]])))\n forward_primers_rc.append(compile(''.join([self.iupac[symbol] for symbol in self.reverse_complement(curr_primer[:self.search_length])])))\n \n for curr_primer in raw_reverse_primers:\n reverse_primers.append(compile(''.join([self.iupac[symbol] for symbol in curr_primer[:self.search_length]])))\n reverse_primers_rc.append(compile(''.join([self.iupac[symbol] for symbol in self.reverse_complement(curr_primer[:self.search_length])])))\n \n return forward_primers, forward_primers_rc, reverse_primers, reverse_primers_rc", "def alignprotein(self, sample, analysistype, target, program, index, hit):\n # Initialise lists to store the outputs\n if target not in sample[analysistype].dnaseq:\n sample[analysistype].dnaseq[target] = list()\n sample[analysistype].protseq[target] = list()\n sample[analysistype].ntalign[target] = list()\n sample[analysistype].ntindex[target] = list()\n sample[analysistype].aaidentity[target] = list()\n sample[analysistype].aaalign[target] = list()\n sample[analysistype].aaindex[target] = list()\n # Only BLASTn analyses require additional effort to find the protein sequence\n if program == 'blastn':\n # Convert the extracted, properly-oriented DNA sequence to a Seq object\n sample[analysistype].dnaseq[target].append(Seq(hit['query_sequence']))\n # Create the BLAST-like interleaved outputs with the query and subject sequences\n sample[analysistype].ntalign[target].append(self.interleaveblastresults(query=hit['query_sequence'],\n subject=hit['subject_sequence']))\n # Determine the number and position of SNPs\n count = 0\n ntindex = str()\n # Iterate through every position in the query sequence, and determine if the subject sequence at that\n # position is a match\n for i, bp in enumerate(hit['query_sequence']):\n # If the sequence at the query and subject sequences do not match, store the location\n if bp != hit['subject_sequence'][i]:\n # Append the current location (+1 due to zero indexing)\n ntindex += '{i};'.format(i=i + 1)\n # Increment the count by the length of the current position - should make the output more\n # uniform due to the fact that the numbers are not padded\n count += len(str(i))\n # If there are many SNPs, then insert line breaks for every 15+ characters\n if count >= 15:\n ntindex += '\\n'\n # Reset the character count to 0\n count = 0\n # Remove trailing ';' (or ';' followed by a newline)\n ntindex = ntindex.rstrip(';').replace(';\\n', '\\n') if ntindex else '-'\n # Add the cleaned string to the list\n sample[analysistype].ntindex[target].append(ntindex)\n # Convert the target name to a string without illegal characters - necessary for creating the\n # temporary databases below\n clean_target = ''.join(filter(str.isalnum, target))\n # Set the absolute path, and create the tmp working directory\n tmp_dir = os.path.join(sample[analysistype].reportdir, 'tmp')\n make_path(tmp_dir)\n # Set the absolute path of the FASTA file that will store the subject sequence. Will be used as the\n # database in the tblastx analysis used to translate the query and subject sequence to amino acid\n tmp_subject = os.path.join(tmp_dir, '{sn}_{target}_{at}_db_{index}.fa'\n .format(sn=sample.name,\n target=clean_target,\n at=analysistype,\n index=index))\n # Write the appropriately-converted subject sequence to the database file\n with open(tmp_subject, 'w') as tmp_db:\n SeqIO.write(SeqRecord(Seq(hit['subject_sequence'].replace('-', '')),\n id='{}_{}'.format(sample.name, target),\n description=''), tmp_db, 'fasta')\n # Create a BLAST database from this file\n self.makeblastdb(fasta=tmp_subject)\n # Create the tblastx (translated nt query: translated nt subject) call. Remove any masking. Do not\n # include the 'query' parameter, as it will be supplied below\n tblastx = NcbitblastxCommandline(db=os.path.splitext(tmp_subject)[0],\n evalue=0.1,\n outfmt=15,\n soft_masking=False,\n seg='no')\n # Run the tblastx analysis. Supply the query as stdin. Capture stdout, and stderr\n stdout, stderr = tblastx(stdin=sample[analysistype].targetsequence[target][index].replace('-', ''))\n # Convert the string stdout to JSON format\n json_output = json.loads(stdout)\n # Extract the necessary list of HSPs from the JSON-formatted outputs\n data = json_output['BlastOutput2'][0]['report']['results']['search']['hits'][0]['hsps']\n # Initialise a string to store the extracted amino acid subject sequence\n ref_prot = str()\n for results in data:\n # Attempt to use hit_frame 1 - the .targetsequence attribute was populated with the nt sequence in\n # (hopefully) the correct orientation, so attempt to use that\n if results['hit_frame'] == 1:\n # Populate the .protseq attribute with the Seq-converted amino acid sequence extracted from the\n # report\n sample[analysistype].protseq[target].append(Seq(results['qseq'].upper()))\n # Grab the subject sequence\n ref_prot = results['hseq']\n # Only the first result is required\n break\n # If there were no results with the hit_frame equal to 1, get the best result from the analysis\n if not ref_prot:\n for results in data:\n sample[analysistype].protseq[target].append(Seq(results['qseq'].upper()))\n ref_prot = results['hseq']\n break\n # Clear out the tmp directory\n try:\n shutil.rmtree(tmp_dir)\n except FileNotFoundError:\n pass\n else:\n # Non-blastn analyses will already have the outputs as amino acid sequences. Populate variables as required\n ref_prot = hit['subject_sequence']\n sample[analysistype].protseq[target].append(Seq(hit['query_sequence']))\n # Create the BLAST-like alignment of the amino acid query and subject sequences\n sample[analysistype].aaalign[target]\\\n .append(self.interleaveblastresults(query=sample[analysistype].protseq[target][index],\n subject=ref_prot))\n # Determine the number of matches, as well as the number and location of mismatches\n count = 0\n matches = 0\n aaindex = str()\n # Iterate through the query sequence to determine matching positions\n for i, bp in enumerate(sample[analysistype].protseq[target][index]):\n if bp != ref_prot[i]:\n aaindex += '{i};'.format(i=i + 1)\n count += len(str(i))\n # If there are many SNPs, then insert line breaks for every 10 SNPs\n if count >= 15:\n aaindex += '\\n'\n count = 0\n # Increment the total number of matches\n if bp == ref_prot[i]:\n matches += 1\n # Clean the index string\n aaindex = aaindex.rstrip(';').replace(';\\n', '\\n') if aaindex else '-'\n # Append the cleaned string to the list\n sample[analysistype].aaindex[target].append(aaindex)\n # Determine percent identity between the query and subject amino acid sequence by dividing the number of\n # matches by the total length of the query sequence and multiplying this result by 100. Convert to two\n # decimal places\n pid = float('{:.2f}'.format(matches / len(sample[analysistype].protseq[target][index]) * 100))\n # Append the calculated percent identity to the list\n sample[analysistype].aaidentity[target].append(pid)\n return sample", "def find_specific_primer_matches(primers,\n integer_mapped_seq,\n deletion_threshold,\n seq_count,\n sequence_length,\n label,\n unaligned_seq,\n region_slice,\n seq):\n \n primer_len=sequence_length\n overall_length=region_slice+primer_len\n bad_primers=[]\n seq_length=len(integer_mapped_seq)\n \n if len(unaligned_seq)==0:\n raise_(ValueError,('unaligned sequence contains no data.'))\n \n for p in range(len(primers)):\n corrected_index = get_corrected_index(seq,primers[p].aligned_index)\n start_index = corrected_index\n end_index = corrected_index + primer_len\n \n \n # skip test if testing beyond the end of the sequence\n if end_index > seq_length:\n continue\n # Will return all non-zeros with perfect base pair matching\n seq_bitwise = bitwise_and(primers[p].numeric_seq,\n integer_mapped_seq[start_index:end_index])\n if len(seq_bitwise.nonzero()[0])==primer_len:\n primers[p].non_specific_hits +=1\n if primers[p].non_specific_hits>deletion_threshold:\n bad_primers.append(p)\n\n \n del_primers(primers,bad_primers)\n return primers", "def process_vrps(self):\n self.origins = set()\n for afi in (\"ipv4\", \"ipv6\"):\n self.info(\"Creating prefix-lists for {} address-family\"\n .format(afi))\n self.covered[afi] = [\"seq {seq} permit {prefix} le {maxLength}\"\n .format(seq=seq, **entry)\n for seq, entry\n in enumerate(self.vrps.covered(afi))]\n origins = self.vrps.origins(afi)\n self.for_origin[afi] = {}\n for asn in origins:\n self.for_origin[afi][asn] = [\"seq {seq} permit {prefix} le {maxLength}\" # noqa: E501\n .format(seq=seq, **entry)\n for seq, entry\n in enumerate(self.vrps.for_origin(asn, afi))] # noqa: E501\n self.origins.update(origins)", "def premrna_desc(gff3, fasta):\n seqs = {}\n for defline, seq in LocusPocus.fasta.parse(fasta):\n seqid = defline[1:].split(' ')[0]\n if seqid not in seqs:\n seqs[seqid] = seq\n\n mrnaacc = ''\n mrnalen = 0\n gccontent = 0.0\n gcskew = 0.0\n ncontent = 0.0\n exoncount = 0\n introncount = 0\n utr5plen = 0\n utr3plen = 0\n for entry in gff3:\n if '\\tmRNA\\t' in entry:\n fields = entry.rstrip().split('\\t')\n assert len(fields) == 9\n mrnaacc = re.search(r'accession=([^;\\n]+)', fields[8]).group(1)\n mrnalen = int(fields[4]) - int(fields[3]) + 1\n mrnaseq = seqs[mrnaacc]\n if len(mrnaseq) != mrnalen:\n message = 'pre-mRNA \"%s\": length mismatch' % mrnaacc\n message += ' (gff3=%d, fa=%d)' % (mrnalen, len(mrnaseq))\n message += '; most likely a duplicated accession, discarding'\n print(message, file=sys.stderr)\n mrnaacc = ''\n gccontent = gc_content(mrnaseq)\n gcskew = gc_skew(mrnaseq)\n ncontent = n_content(mrnaseq)\n elif '\\texon\\t' in entry:\n exoncount += 1\n elif '\\tintron\\t' in entry:\n introncount += 1\n elif '\\tfive_prime_UTR\\t' in entry:\n fields = entry.rstrip().split('\\t')\n assert len(fields) == 9\n utr5plen += int(fields[4]) - int(fields[3]) + 1\n elif '\\tthree_prime_UTR\\t' in entry:\n fields = entry.rstrip().split('\\t')\n assert len(fields) == 9\n utr3plen += int(fields[4]) - int(fields[3]) + 1\n elif entry.startswith('###'):\n if mrnaacc != '':\n values = '%s %d %.3f %.3f %.3f %d %d %d %d' % (\n mrnaacc, mrnalen, gccontent, gcskew, ncontent,\n exoncount, introncount, utr5plen, utr3plen)\n yield values.split(' ')\n mrnaacc = ''\n mrnalen = 0\n gccontent = 0.0\n gcskew = 0.0\n ncontent = 0.0\n exoncount = 0\n exonlen = 0\n introncount = 0\n utr5plen = 0\n utr3plen = 0", "def genPrimerPairs_5Ext(primer_length=20, anneal_length=10, GC_low=40, GC_high=60):\n\n print('Primers for 5\\' extension half-asstemers')\n\n forwTemplate5_3 = GenOligoGC(primer_length,GC_low, GC_high)\n \"\"\"re.match checks if the first 2 Nuc are GC in the forward and backwards direction\"\"\"\n while not (re.match(\"[GC]{2}\",str(forwTemplate5_3)) and\n re.match(\"[GC]{2}\", str(forwTemplate5_3[::-1])) and\n re.match(\"[GC]{2}\", str(forwTemplate5_3[10:12]))):\n\n forwTemplate5_3 = GenOligoGC(primer_length,GC_low, GC_high)\n\n forwTemp3_5 = forwTemplate5_3[::-1]\n forwPrimer5_3 = forwTemp3_5.complement()\n print(f\"Template Seq 3\\' - > 5\\': {forwTemp3_5}\")\n print(f\"ForwPrimer Seq 5\\' - > 3\\': {forwPrimer5_3}\")\n\n forwPrimer_f10 = forwPrimer5_3[:10]\n print(f\"First 10 Nucleotides of forward primer: {forwPrimer_f10}\")\n\n revPrimer_f10 = GenOligoGC(10,GC_low, GC_high)\n while not re.match(\"[GC]{2}\",str(revPrimer_f10)):\n revPrimer_f10 = GenOligoGC(10,GC_low, GC_high)\n\n revPrimer5_3 = revPrimer_f10 + forwPrimer_f10\n\n print(f\"RevPrimer Seq 5\\' - > 3\\': {revPrimer5_3}\")\n\n return forwPrimer5_3, revPrimer5_3", "def scorePrimers(seq_record, primers, start=default_start, rev_primer=False, \n score_dict=getDNAScoreDict(mask_score=(0, 1), gap_score=(0, 0))):\n # Create empty return dictionary\n seq_record = seq_record.upper()\n align = PrimerAlignment(seq_record)\n align.rev_primer = rev_primer\n\n # Define orientation variables\n seq_record.annotations['seqorient'] = 'F'\n seq_record.annotations['prorient'] = 'F' if not rev_primer else 'RC'\n\n # Score primers\n this_align = {}\n rec_len = len(seq_record)\n if rev_primer: end = rec_len - start\n for adpt_id, adpt_seq in primers.items():\n if rev_primer: start = end - len(adpt_seq)\n else: end = start + len(adpt_seq)\n chars = zip(seq_record[start:end], adpt_seq)\n score = sum([score_dict[(c1, c2)] for c1, c2 in chars])\n this_align.update({adpt_id: (score, start, end)})\n\n # Determine primer with lowest error rate\n best_align, best_adpt, best_err = None, None, None\n for adpt, algn in this_align.items():\n #adpt_err = 1.0 - float(algn[0]) / weightSeq(primers[adpt])\n err = 1.0 - float(algn[0]) / len(primers[adpt])\n if best_err is None or err < best_err:\n best_align = algn\n best_adpt = adpt\n best_err = err\n\n # Set return dictionary to lowest error rate alignment\n if best_align:\n # Populate return object\n align.primer = best_adpt if best_err < 1.0 else None\n align.start = best_align[1]\n align.end = best_align[2]\n align.error = best_err\n align.valid = True\n\n # Determine alignment sequences\n if not rev_primer:\n align.align_seq = str(seq_record.seq[:best_align[2]])\n align.align_primer = '-' * best_align[1] + primers[best_adpt]\n else:\n align.align_seq = str(seq_record.seq[best_align[1]:])\n align.align_primer = primers[best_adpt] + '-' * (rec_len - best_align[2])\n \n return align", "def mergeChainedAlignedSegments(chainedAlignedSegments, refSequence, readSequence):\n cAR = pysam.AlignedSegment()\n aR = chainedAlignedSegments[0]\n cAR.query_name = aR.query_name\n \n #Parameters we don't and therefore set properly\n #cAR.flag = aR.flag\n #cAR.mapq = aR.mapq\n #cAR.mrnm = 0\n #cAR.mpos=0\n #cAR.isize=0\n #cAR.qual = \"<\" * len(readSequence)\n #cAR.tags = aR.tags \n cAR.next_reference_id = -1\n cAR.reference_start = aR.reference_start #Reference start\n cAR.is_reverse = aR.is_reverse\n cAR.query_sequence = reverseComplement(readSequence) if cAR.is_reverse else readSequence\n cAR.reference_id = aR.reference_id\n cigarList = []\n pPos = aR.reference_start\n #Iterate from the other end of the sequence if reversed\n pQPos = -(len(readSequence)-1) if cAR.is_reverse else 0 \n \n for aR in chainedAlignedSegments:\n assert cAR.is_reverse == aR.is_reverse\n #Add a deletion representing the preceding unaligned reference positions\n assert aR.reference_start >= pPos\n if aR.reference_start > pPos:\n cigarList.append((2, aR.reference_start - pPos))\n pPos = aR.reference_start \n \n #Add an insertion representing the preceding unaligned read positions\n #make it a soft clip if it is the first chained alignment\n qPos = getFirstNonClippedPositionInRead(aR, readSequence)\n assert qPos >= pQPos\n if qPos > pQPos:\n cigarList.append((4 if aR == chainedAlignedSegments[0] else 1, qPos - pQPos)) \n pQPos = qPos\n \n #Add the operations of the cigar, filtering hard and soft clipping\n for op, length in aR.cigar:\n assert op in (0, 1, 2, 4, 5)\n if op in (0, 1, 2):\n cigarList.append((op, length))\n if op in (0, 2): #Is match or deletion\n pPos += length\n if op in (0, 1): #Is match or insertion\n pQPos += length\n \n assert pPos <= len(refSequence)\n \n #Set reference end coordinate (which is exclusive)\n #cAR.reference_end = pPos #We don't do this because it is set by cigar string\n \n #Now add any trailing, necessary soft clipping\n if cAR.is_reverse:\n assert pQPos <= 1\n if pQPos < 1:\n cigarList.append((4, -pQPos + 1))\n else:\n assert pQPos <= len(readSequence)\n if pQPos < len(readSequence):\n cigarList.append((4, len(readSequence) - pQPos))\n \n cAR.cigar = tuple(cigarList)\n \n #Check ops\n for op, length in cAR.cigar: #We should have no hard clipped ops\n assert op in (0, 1, 2, 4)\n \n #Reference sequence check coordinates\n assert sum([ length for op, length in cigarList if op in (0, 2)]) == cAR.reference_end - cAR.reference_start\n assert cAR.reference_start >= 0 and cAR.reference_start < len(refSequence)\n assert cAR.reference_end >= 0 and cAR.reference_end <= len(refSequence)\n \n #Read sequence check coordinates\n assert cAR.query_alignment_start >= 0 and cAR.query_alignment_start < len(readSequence)\n assert cAR.query_alignment_end >= 0 and cAR.query_alignment_end <= len(readSequence)\n assert cAR.query_alignment_start + sum([ length for op, length in cigarList if op in (0, 1)]) == cAR.query_alignment_end\n \n return cAR", "def __init__(self, protein, fragment_set, nmers, start_temp, end_temp, nfrags, anneal_rate):\n \n #store variables used to initiate the object\n self.scorefxn = create_score_function('score3')\n self.current_protein = protein\n self.best_protein = protein\n self.my_fragment_set = fragment_set\n self.nmers = nmers\n self.nfrags = nfrags\n self.end_temp = end_temp\n self.anneal_rate = anneal_rate\n \n #initialize some starter values (for tracking later...)\n self.current_energy = self.compute_energy(self.current_protein)\n self.current_iteration = 0\n self.current_T = start_temp\n\n \n #initialize a data structure to keep track of which fragments (at a given position) have been sampled already during each sampling step\n self.sampled_fragments = {}\n \n #create a dictionary of nfrag candidate fragments for each position in the sequence\n self.candidate_frag_list = {}\n for position in range(1, protein.length-self.nmers+1):\n self.candidate_frag_list[position] = self.my_fragment_set.get_lowRMS_fragments(position, nfrags)\n \n #for reporting information to the log file later\n self.temperature = [self.current_T]\n self.iteration = [self.current_iteration]\n self.energy = [self.current_energy]", "def genPrimerPairs_3Ext(primer_length=20, anneal_length=10, GC_low=40, GC_high=60):\n\n print('Primers for 3\\' extension half-asstemers')\n\n\n forwTemplate5_3 = GenOligoGC(primer_length,GC_low, GC_high)\n \"\"\"re.match checks if the first 2 Nuc are GC in the forward and backwards direction\"\"\"\n while not (re.match(\"[GC]{2}\",str(forwTemplate5_3)) and\n re.match(\"[GC]{2}\", str(forwTemplate5_3[::-1])) and\n re.match(\"[GC]{2}\", str(forwTemplate5_3[8:10]))):\n\n forwTemplate5_3 = GenOligoGC(primer_length,GC_low, GC_high)\n\n forwTemp3_5 = forwTemplate5_3[::-1]\n forwPrimer5_3 = forwTemp3_5.complement()\n print(f\"Template Seq 3\\' - > 5\\': {forwTemp3_5}\")\n print(f\"ForwPrimer Seq 5\\' - > 3\\': {forwPrimer5_3}\")\n\n forwPrimer_L10 = forwPrimer5_3[10:]\n print(f\"Last 10 Nucleotides of forward primer: {forwPrimer_L10}\")\n\n revPrimer_L10 = GenOligoGC(10,GC_low, GC_high)\n while not re.match(\"[GC]{2}\",str(revPrimer_L10[::-1])):\n revPrimer_L10 = GenOligoGC(10,GC_low, GC_high)\n\n \"\"\"First 10 Nuc of rev primer must be identical to last 10 Nuc of forward Primer\"\"\"\n revPrimer5_3 = forwPrimer_L10 + revPrimer_L10\n\n print(f\"RevPrimer Seq 5\\' - > 3\\': {revPrimer5_3}\")\n\n return forwPrimer5_3, revPrimer5_3", "def _synthesize_prologue(self):\n\n InstructionStream._synthesize_prologue(self)\n\n # Parallel parameters are passed in the prefered slot and the next\n # slot of the user arugment.\n self._prologue.add(spu.shlqbyi(self.r_rank, SPURegister(3, None), 4)) \n self._prologue.add(spu.shlqbyi(self.r_size, SPURegister(3, None), 8)) \n\n if self.raw_data_size is not None:\n self.acquire_block_registers()\n\n self._prologue.add(spu.shlqbyi(self.r_block_size, SPURegister(4, None), 4)) \n self._prologue.add(spu.shlqbyi(self.r_offset, SPURegister(4, None), 8)) \n else:\n print 'no raw data'\n return", "def intronDiscovery(poolArguement):\n\n\tbamFiles, gene, chrom, start, stop, cwd = poolArguement\n\n\tprint ('processing ' + gene)\n\n\tpos = ''.join([chrom, ':', start, '-', stop])\n\n\tfor bam in bamFiles:\n\n\t\tspliceDict = {}\n\t\tgeneFilePath = (cwd + \"/\" + bam[:-4] + \"/\" + gene + \".txt\")\n\n\t\ttry:\n\t\t\texitcode, stdout, stderr = run(' '.join(['samtools view', bam, pos]))\n\t\texcept Exception as e:\n\t\t\tprint ('Exception message: ' + str(e))\n\t\t\tprint (\"Exception occured while running \\\"samtools view\\\" on \" + bam + \" for position \" + pos + \" Skipping.\")\n\t\t\tcontinue\n\n\t\tif not stdout:\n\t\t\t#print ('No introns found for ' + gene + ' at ' + pos + ' in ' + bam)\n\t\t\tcontinue\n\n\t\tfor line in stdout.splitlines():\n\n\t\t\telems = line.decode().split()\n\n\t\t\talignmentStart = int(elems[3])\n\t\t\tcigar = str(elems[5])\n\t\t\talignmentScore = int(elems[1])\n \n\t\t\tif 'N' not in cigar: \t#only get introns\n\t\t\t\tcontinue\n\n\t\t\tif (alignmentScore >= 256): \t#only primary alignments\n\t\t\t\tcontinue\n\n\t\t\tif not ((alignmentStart > int(start)) and (alignmentStart < int(stop))): \t#check if alignment start is after known junction start but before known junction end \n\t\t\t\tcontinue\n\n\t\t\ttry:\n\t\t\t\toffset, matchedExon, intronLength = parseCIGARForIntrons(cigar)\n\t\t\texcept Exception as e:\n\t\t\t\tprint ('Error message: ' + str(e))\n\t\t\t\tprint ('Error trying to parse CIGAR string: ' + cigar + ' with the bam file ' + bam + ' and the position: ' + pos + ' Skipping.')\n\t\t\t\tcontinue\n\n\t\t\tjunctionStart = alignmentStart + matchedExon + offset\n\t\t\tjunctionEnd = junctionStart + intronLength\n\n\t\t\t# Beryl Cummings' Code, taken from makeUniqSpliceDict()\n\t\t\t# uniqueSplice = ':'.join([chrom, str(junctionStart), str(junctionEnd)])\n\t\t\tuniqueSplice = (chrom, str(junctionStart), str(junctionEnd))\n\t\t\t\n\t\t\tif uniqueSplice not in spliceDict:\n\t\t\t\tspliceDict[uniqueSplice] = 1\n\t\t\telse:\n\t\t\t\tspliceDict[uniqueSplice] += 1\n\n\t\tdel stdout # saves ram in between samtool calls\n\n\t\tif spliceDict:\n\t\t\tprintSplices(geneFilePath, spliceDict)\n\t\t\tdel spliceDict\n\n\tprint ('finished ' + gene)", "def __init__(self,\n seq,\n aligned_index,\n unaligned_index):\n \n self.seq=seq\n self.aligned_index=aligned_index\n self.unaligned_index=unaligned_index\n self.numeric_seq=convert_to_numeric(self.seq)\n self.upstream_regions=[]\n self.downstream_regions=[]\n self.labels=[]\n self.match_count=0\n self.percent_match=0\n self.non_specific_hits=0\n self.non_specific_percent=0\n \n self.std_index = False\n self.f_std_index = None\n self.r_std_index = None", "def updateResidueProbAnnotation(residueProb):\n\n for resonance in residueProb.resonanceGroup.resonances:\n updateResonanceAnnotation(resonance)", "def process_barcode_paired_end_data(read1_data,\r\n read2_data,\r\n output_bc_fastq,\r\n output_fastq1,\r\n output_fastq2,\r\n bc1_len=6,\r\n bc2_len=6,\r\n rev_comp_bc1=False,\r\n rev_comp_bc2=False,\r\n attempt_read_orientation=False,\r\n forward_primers=None,\r\n reverse_primers=None,\r\n output_bc_not_oriented=None,\r\n fastq1_out_not_oriented=None,\r\n fastq2_out_not_oriented=None):\r\n\r\n header_index = 0\r\n sequence_index = 1\r\n quality_index = 2\r\n\r\n found_primer_match = False\r\n # Break from orientation search as soon as a match is found\r\n if attempt_read_orientation:\r\n # First check forward primers\r\n for curr_primer in forward_primers:\r\n if curr_primer.search(read1_data[sequence_index]):\r\n read1 = read1_data\r\n read2 = read2_data\r\n found_primer_match = True\r\n break\r\n if curr_primer.search(read2_data[sequence_index]):\r\n read1 = read2_data\r\n read2 = read1_data\r\n found_primer_match = True\r\n break\r\n # Check reverse primers if forward primers not found\r\n if not found_primer_match:\r\n for curr_primer in reverse_primers:\r\n if curr_primer.search(read1_data[sequence_index]):\r\n read1 = read2_data\r\n read2 = read1_data\r\n found_primer_match = True\r\n break\r\n if curr_primer.search(read2_data[sequence_index]):\r\n read1 = read1_data\r\n read2 = read2_data\r\n found_primer_match = True\r\n break\r\n else:\r\n read1 = read1_data\r\n read2 = read2_data\r\n\r\n if not found_primer_match and attempt_read_orientation:\r\n read1 = read1_data\r\n read2 = read2_data\r\n output_bc = output_bc_not_oriented\r\n output_read1 = fastq1_out_not_oriented\r\n output_read2 = fastq2_out_not_oriented\r\n else:\r\n output_bc = output_bc_fastq\r\n output_read1 = output_fastq1\r\n output_read2 = output_fastq2\r\n\r\n bc_read1 = read1[sequence_index][0:bc1_len]\r\n bc_read2 = read2[sequence_index][0:bc2_len]\r\n bc_qual1 = read1[quality_index][0:bc1_len]\r\n bc_qual2 = read2[quality_index][0:bc2_len]\r\n if rev_comp_bc1:\r\n bc_read1 = str(DNA(bc_read1).rc())\r\n bc_qual1 = bc_qual1[::-1]\r\n if rev_comp_bc2:\r\n bc_read2 = str(DNA(bc_read2).rc())\r\n bc_qual2 = bc_qual2[::-1]\r\n\r\n bc_lines = format_fastq_record(read1[header_index],\r\n bc_read1 + bc_read2,\r\n np.hstack([bc_qual1, bc_qual2]))\r\n output_bc.write(bc_lines)\r\n seq1_lines = format_fastq_record(read1[header_index],\r\n read1[sequence_index][bc1_len:], read1[quality_index][bc1_len:])\r\n output_read1.write(seq1_lines)\r\n seq2_lines = format_fastq_record(read2[header_index],\r\n read2[sequence_index][bc2_len:], read2[quality_index][bc2_len:])\r\n output_read2.write(seq2_lines)\r\n\r\n return", "def gen_prologue(self, frame): # pragma: no cover\n raise NotImplementedError(\"Implement this!\")", "def prepareMarkerSequence(self):\n # first prepare the markers for the first channel of the pulse\n # generaton\n markerSequence1shape1 = zeros(self.numberOfPoints(), dtype=numpy.int8)\n markerSequence1shape2 = zeros(self.numberOfPoints(), dtype=numpy.int8)\n for marker in self.markersList1:\n markerSequence1shape1[:] += marker._shape1\n markerSequence1shape2[:] += marker._shape2\n\n # take care of marker ovelap\n for i in range(len(markerSequence1shape1)):\n if markerSequence1shape1[i] > 1:\n markerSequence1shape1[i] = 1\n if markerSequence1shape2[i] > 2:\n markerSequence1shape2[i] = 2\n\n self.markerArray1[:] = [sum(i)for i in zip(\n markerSequence1shape1[:], markerSequence1shape2[:])]\n\n # if there are 2 channels the second one is prepared here\n if self.markersChannels == 2:\n\n markerSequence2shape1 = zeros(\n self.numberOfPoints(), dtype=numpy.int)\n markerSequence2shape2 = zeros(\n self.numberOfPoints(), dtype=numpy.int)\n\n if self.markersList2 == ():\n self.markersList2 = self.markersList1\n for marker in self.markersList2:\n markerSequence2shape1[:] += marker._shape1\n markerSequence2shape2[:] += marker._shape2\n for i in range(len(markerSequence2shape1)):\n if markerSequence2shape1[i] > 1:\n markerSequence2shape1[i] = 1\n if markerSequence2shape2[i] > 2:\n markerSequence2shape2[i] = 2\n\n self.markerArray2[:] = [sum(i)for i in zip(\n markerSequence2shape1[:], markerSequence2shape2[:])]", "def find_progenitors_until_z_EAGLE(self, mtree, nodeID, z1, z2):\n snapcount = 0\n print(':Read MergerTree from %d until %d' % (z2, z1))\n for ss in range(z2, z1, -1):\n if ss == z2:\n df_target = pd.DataFrame({'nodeID':nodeID})\n _indx = np.where(mtree.data.snapnum.values == ss-1)\n nodeID_prog = mtree.data.index.values[_indx]\n nodeID_prog_desc = mtree.data.descendantIndex.values[_indx]\n _indx = np.where((nodeID_prog_desc < 1e15) &\n (nodeID_prog_desc > 1e11))\n nodeID_prog = nodeID_prog[_indx]\n nodeID_prog_desc = nodeID_prog_desc[_indx]\n\n df_prog = pd.DataFrame({'nodeID' : nodeID_prog,\n 'nodeID_target' : nodeID_prog_desc})\n\n # Initiliaze Output Array\n progcounts = np.zeros((df_target['nodeID'].size, z2-z1))\n\n # nodeID_prog_desc_unic is sorted\n nodeID_prog_desc_unic, count = np.unique(nodeID_prog_desc,\n return_counts=True)\n # remove -1's\n nodeID_prog_desc_unic=nodeID_prog_desc_unic[1:]; count=count[1:]\n\n # Nr. of progenitors for sub-&halos at snapshot z2\n s = pd.Index(df_target['nodeID'].tolist())\n _indx_now = s.get_indexer(list(nodeID_prog_desc_unic))\n now_sort_indx = np.argsort(df_target['nodeID'].values[_indx_now])\n pro_sort_indx = np.argsort(nodeID_prog_desc_unic)\n progcounts[_indx_now[now_sort_indx], snapcount] = count[pro_sort_indx]\n \n else:\n df_now = df_prog\n _indx = np.where(mtree.data.snapnum.values == ss-1)\n nodeID_prog = mtree.data.index.values[_indx]\n nodeID_prog_desc = mtree.data.descendantIndex.values[_indx]\n #_indx = np.where((nodeID_prog_desc < 1e15) &\n # (nodeID_prog_desc > 1e10))\n #nodeID_prog = nodeID_prog[_indx]\n #nodeID_prog_desc = nodeID_prog_desc[_indx]\n df_prog = pd.DataFrame({'nodeID' : nodeID_prog})\n \n progcounts_local = np.zeros(df_now['nodeID'].size)\n nodeID_prog_desc_unic, count = np.unique(nodeID_prog_desc,\n return_counts=True)\n # remove -1's\n nodeID_prog_desc_unic=nodeID_prog_desc_unic[1:]; count=count[1:]\n \n # progenitors for snapshot ss\n s = pd.Index(df_now['nodeID'].tolist())\n _indx_now = s.get_indexer(list(nodeID_prog_desc_unic))\n now_sort_indx = np.argsort(df_now['nodeID'].values[_indx_now])\n pro_sort_indx = np.argsort(nodeID_prog_desc_unic)\n progcounts_local[_indx_now[now_sort_indx]] = count[pro_sort_indx]\n df_now['progcount'] = pd.Series(progcounts_local,\n index=df_now.index, dtype=int)\n\n # Nr. of progenitors for sub-&halos at snapshot z2\n df_inter = df_now.groupby(['nodeID_target'],\n as_index=False)['progcount'].sum()\n # only real progeniteurs\n df_inter = df_inter[(df_inter['nodeID_target'] > 1e10) & \n (df_inter['nodeID_target'] < 1e15)]\n df_inter = df_inter.drop_duplicates(subset=['nodeID_target'],\n keep='first')\n \n s = pd.Index(df_target['nodeID'].tolist())\n _indx_now = s.get_indexer(df_inter['nodeID_target'].tolist())\n now_sort_indx = np.argsort(df_target['nodeID'].values[_indx_now])\n pro_sort_indx = np.argsort(df_inter['nodeID_target'].values)\n progcounts[_indx_now[now_sort_indx], snapcount] = df_inter['progcount'].values[pro_sort_indx]\n\n # sort nodeID_prog to nodeID\n #s = pd.Index(df_now['nodeID'].tolist())\n #_indx_now = s.get_indexer(list(nodeID_prog_desc_unic))\n #df_now['nodeID_target'].values[_indx_now]\n \n obs_ref_local = np.zeros(df_prog['nodeID'].size)\n for ii in range(len(nodeID_prog_desc_unic)):\n tarID = df_now.loc[\n df_now['nodeID'] == nodeID_prog_desc_unic[ii],\n 'nodeID_target'].values.astype(int)\n if tarID:\n _indx = np.where(\n nodeID_prog_desc == nodeID_prog_desc_unic[ii])\n obs_ref_local[_indx] = tarID\n df_prog['nodeID_target'] = pd.Series(obs_ref_local,\n index=df_prog.index)\n\n snapcount += 1\n del nodeID_prog_desc\n del df_now, df_inter, df_prog\n return np.asarray(df_target['nodeID'].tolist()), progcounts", "def find_sensitive_primer_matches(primers,\n integer_mapped_seq,\n deletion_threshold,\n seq_count,\n sequence_length,\n label,\n unaligned_seq,\n region_slice,\n seq):\n \n quality_threshold=seq_count-deletion_threshold\n primer_len=sequence_length\n overall_length=region_slice+primer_len\n \n \n bad_primers=[]\n seq_length=len(integer_mapped_seq)\n if len(unaligned_seq)==0:\n raise_(ValueError,('unaligned_seq contains no data.'))\n \n for p in range(len(primers)):\n corrected_index = get_corrected_index(seq,primers[p].aligned_index)\n start_index = corrected_index\n end_index = corrected_index + primer_len\n \n # skip test if testing beyond the end of the sequence\n if end_index > seq_length:\n # This counts as a miss, so do miss check\n if primers[p].match_count<quality_threshold:\n bad_primers.append(p)\n continue\n \n seq_bitwise = bitwise_and(primers[p].numeric_seq,\n integer_mapped_seq[start_index:end_index])\n if len(seq_bitwise.nonzero()[0])==primer_len:\n append_primer_hit(primers[p],label,start_index,region_slice,\n overall_length,unaligned_seq,primer_len)\n if primers[p].match_count<quality_threshold:\n bad_primers.append(p)\n\n del_primers(primers,bad_primers)\n \n return primers", "def process_read(self, ref, read, ref_offset=0):\n\n if read.alignment.mapping_quality < self.config.min_mapq:\n return\n\n ref_pos = read.alignment.position.position - ref_offset\n read_pos = 0\n # Use set(), as some cigar operations might generate duplicated positions,\n # E.g. for insertions, it extends the candidate positions to\n # [ins_pos - ins_len, ins_pos + ins_len] which might overlap with some\n # nearby mismatches.\n positions = set()\n for cigar in read.alignment.cigar:\n # Break if it reached the end of reference sequence.\n if ref_pos >= len(ref):\n break\n if cigar.operation not in utils.CIGAR_OPS:\n raise ValueError('Unexpected CIGAR operation', cigar, read)\n\n if cigar.operation == cigar_pb2.CigarUnit.ALIGNMENT_MATCH:\n positions.update(\n self._process_align_match(cigar, ref, read, ref_pos, read_pos))\n read_pos += cigar.operation_length\n ref_pos += cigar.operation_length\n elif cigar.operation == cigar_pb2.CigarUnit.SEQUENCE_MISMATCH:\n positions.update(\n self._process_seq_mismatch(cigar, ref, read, ref_pos, read_pos))\n read_pos += cigar.operation_length\n ref_pos += cigar.operation_length\n elif cigar.operation == cigar_pb2.CigarUnit.INSERT:\n positions.update(\n self._process_insert(cigar, ref, read, ref_pos, read_pos))\n read_pos += cigar.operation_length\n elif cigar.operation == cigar_pb2.CigarUnit.CLIP_SOFT:\n positions.update(\n self._process_soft_clip(cigar, ref, read, ref_pos, read_pos))\n read_pos += cigar.operation_length\n elif (cigar.operation == cigar_pb2.CigarUnit.DELETE or\n cigar.operation == cigar_pb2.CigarUnit.SKIP):\n positions.update(\n self._process_delete(cigar, ref, read, ref_pos, read_pos))\n ref_pos += cigar.operation_length\n elif cigar.operation == cigar_pb2.CigarUnit.SEQUENCE_MATCH:\n ref_pos += cigar.operation_length\n read_pos += cigar.operation_length\n elif (cigar.operation == cigar_pb2.CigarUnit.CLIP_HARD or\n cigar.operation == cigar_pb2.CigarUnit.PAD):\n pass\n\n # Yield positions within the range\n for pos in sorted(positions):\n if pos >= 0 and pos < len(ref):\n yield pos", "def parse_proteome(fasta_file,kmer_size=12,out_base=\"kmers\",seq_per_file=50000,num_to_write=1000000):\n\n all_kmers = {}\n seq_name = None\n current_sequence = []\n\n # Parse fasta file, splitting into kmers as we go\n with open(fasta_file) as infile:\n for l in infile:\n\n if l.startswith(\">\"):\n if seq_name is not None:\n\n sequence = \"\".join(current_sequence)\n kmer_list = create_kmers(sequence,kmer_size)\n\n for k in kmer_list:\n try:\n all_kmers[k].append(seq_name)\n except KeyError:\n all_kmers[k] = [seq_name]\n\n current_sequence = []\n seq_name = l[1:].strip()\n else:\n if seq_name is None or l.strip() == \"\":\n continue\n current_sequence.append(l.strip())\n\n if seq_name is not None:\n\n sequence = \"\".join(current_sequence)\n kmer_list = create_kmers(sequence,kmer_size)\n\n for k in kmer_list:\n try:\n all_kmers[k].append(seq_name)\n except KeyError:\n all_kmers[k] = [seq_name]\n\n # Sort kmers\n to_sort = [(len(all_kmers[k]),k) for k in all_kmers.keys()]\n to_sort.sort(reverse=True)\n\n # kmers \n kmers = [k[1] for k in to_sort]\n\n if len(kmers) > num_to_write:\n kmers = kmers[:num_to_write]\n else:\n\n # If there are more single kmers than the total we want to get, grab a\n # random selection of them.\n single_kmers = [k[1] for k in to_sort if k[0] == 1]\n if num_to_write - len(kmers) > 0:\n to_grab = num_to_write - len(kmers)\n random.shuffle(single_kmers)\n kmers.extend(single_kmers[:to_grab])\n\n out = []\n counter = 0\n for k in kmers:\n\n # make sure kmer has only amino acids in it\n score = sum([1 for l in k if l not in \"ACDEFGHIKLMNPQRSTVWY\"])\n if score > 0:\n continue\n\n ids = \",\".join(all_kmers[k])\n out.append(\"{} {:5d} {}\\n\".format(k,len(all_kmers[k]),ids))\n\n if counter != 0 and counter % seq_per_file == 0:\n\n out_file = \"{}_{}.kmers\".format(out_base,counter)\n print(counter,len(kmers))\n sys.stdout.flush()\n\n f = open(out_file,'w')\n f.write(\"\".join(out))\n f.close()\n\n out = []\n\n counter += 1\n\n\n out_file = \"{}_{}.kmers\".format(out_base,counter)\n\n f = open(out_file,'w')\n f.write(\"\".join(out))\n f.close()", "def Calculate3UTRUsage(entrez_genes, bedfile, chroms, outfile, threshold, PAfile, extension, index):\n\t# Separate reads by chrom \n\trawreadslibName1 = (bedfile).split('/')[-1]\n\trawreadssuffix1 = rawreadslibName1.split('.')[-1] \n\trawreadslibName1 = rawreadslibName1.split('.')[0]\n\trawreadsextension1 = \"-\" + rawreadslibName1 +'.' + rawreadssuffix1 + \"1\"\n\tif Utility_extended.fileExists(bedfile):\n\t\tif Utility_extended.chrom_files_exist(chroms, rawreadsextension1) != 1:\n\t\t\t# Separate by chrom and sort by start\n\t\t\tprint chroms, rawreadsextension1, \" files do not exist, separate by chroms and sort each file according to the second column. \"\n\t\t\tUtility_extended.separate_by_chrom_sort(chroms, bedfile, rawreadsextension1, str(index))\n\telse:\n\t\tprint bedfile, \" is not found\"\n\t\tsys.exit(1)\n\n\t#This part is to access the polyadenylation sites\n\tPA1 = open(PAfile, 'r')\n\t\n\tPAsiteslist = []\n\tPA2 = 'i'\n\twhile PA2 != '':\n\t\tPA2 = PA1.readline()\n\t\tif PA2 != '':\n\t\t\tPA3 = PA2.strip('\\n')\n\t\t\tPA4 = PA3.split('\\t')\n\t\t\tPAsiteslist.append((PA4[0],PA4[1]))\n\n\tPA1.close()\n\n\t# Here the output is 'a', i.e. the output is appended to an existing file instead of creating one\n\toutf = open(outfile, 'a')\t\n\tfor chrom in chroms: \n\t\tif chrom in entrez_genes.chroms:\n\t\t\t# a KnownEntrezGenes object\n\t\t\tentrez_genes_by_chrom = Entrez.KnownEntrezGenes([chrom], entrez_genes.subset_by_chrom(chrom))\n\t\t\t# Get the read locations\n\t\t\tif Utility_extended.fileExists(chrom + rawreadsextension1):\n\t\t\t\tf = open(chrom + rawreadsextension1, 'r')\n\t\t\t\ttag_positions = []\n\t\t\t\tfor line in f:\n\t\t\t\t\tline = line.strip()\n\t\t\t\t\tsline = line.split()\n\t\t\t\t\t#make sure the extension is always 0, otherwise the rest of the program might not work as intended\n\t\t\t\t\ttag_positions.append(associate_tags_with_regions.tag_position(sline, 0))\n\t\t\t\t\n\t\t\t\tf.close()\n\t\t\t\tif not Utility_extended.is_list_sorted(tag_positions):\n\t\t\t\t\ttag_positions.sort()\t\t\t\t\t\n\t\t\t\t#By this point tag_positions is a sorted list of all the reads located on the strand and chromosome the code is currently dealing with\n\n\t\t\t\tfor entrez_id in entrez_genes_by_chrom.entrez_ids:\n\t\t\t\t\tgene = entrez_genes_by_chrom.entrez_genes[entrez_id] # an EntrezGene class object\n\t\t\t\t\t# get_3UTRs gets the ENTREZ 3'UTR, which appears to generally give the beginning of the 3'UTR and a site very close to the most distal polyadenylation site\n\t\t\t\t\tthree_UTRs = gene.get_3UTRs()\n\t\t\t\t\t# Mastertuplemaker uses the ENTREZ 3'UTR and the polyA sites given to create the true data for the 3'UTR needed for CUTR_vs_AUTR to work\n\t\t\t\t\ttrue3UTRstarts, true3UTRends, UTRregion_start, UTRregion_end, UTRbeginning = Mastertuplemaker(three_UTRs,PAsiteslist,chrom,gene.strand, extension)\n\t\t\t\t\t#value should always be 1 as only 3'UTR with more than 1 polyA site need be considered\n\t\t\t\t\tif len(true3UTRends) > 1:\n\t\t\t\t\t\t#find all reads inside the 3'UTR\n\t\t\t\t\t\tinside_reads = associate_tags_with_3UTR(tag_positions, UTRregion_start, UTRregion_end)\n\t\t\t\t\t\t#finds reads in each region of the 3'UTR and calculates aUTR/cUTR for each of them\n\t\t\t\t\t\t#PolyAsites potentially useful for output\n\t\t\t\t\t\tRUDs, basic_RUD, PolyAsites = CUTR_vs_AUTR(true3UTRstarts, true3UTRends, inside_reads, gene.strand, threshold)\n\t\t\t\t\t\t\n\t\t\t\t\t\t#important if one wants to output gene_symbol information\n\t\t\t\t\t\tgene_symbol = []\n\t\t\t\t\t\tfor mytranscript in gene.transcripts:\n\t\t\t\t\t\t\tif mytranscript.additional_annotations[0] not in gene_symbol:\n\t\t\t\t\t\t\t\tgene_symbol.append(mytranscript.additional_annotations[0])\n\n\n\t\t\t\t\t\t#outline to use to output RUDs\n\t\t\t\t\t\toutline = str(entrez_id) + \"\\t\" + chrom + \"\\t\" + gene.strand + \"\\t\" + str(basic_RUD) + \"\\t\" + \",\".join(map(str, RUDs)) + \"\\n\"\n\t\t\t\t\t\t\n\t\t\t\t\t\t#outline to use to output polyA information for a species\n\t\t\t\t\t\t#outline = str(entrez_id) + \"\\t\" + chrom + \"\\t\" + gene.strand + \"\\t\" + str(UTRbeginning) + \"\\t\" + \",\".join(map(str, PolyAsites)) + \"\\n\"\n\t\t\t\t\t\n\t\t\t\t\t\toutf.write(outline)\n\toutf.close()", "def _annotate(self, generation: int):\n # Get pareto front\n pareto_front_scores = np.array(\n [individual.fitness.values for individual in self._population.individuals\n if individual.fitness.rank == 0]\n )\n\n # Calculate hypervolume\n self._evolution['hypervolume'][generation + 1] = hypervolume(pareto_front=pareto_front_scores)\n\n # Get number of solutions on the Pareto front\n self._evolution['num_solutions_front'][generation + 1] = len(pareto_front_scores)\n\n # Get best performance achieved for each objective\n self._evolution['best_values'][generation + 1] = np.max(pareto_front_scores, axis=0)" ]
[ "0.61210686", "0.6015792", "0.5825172", "0.5388359", "0.5349065", "0.5332983", "0.5280968", "0.5251739", "0.52036214", "0.5173795", "0.5172554", "0.516637", "0.5143279", "0.5141967", "0.5104009", "0.5093995", "0.5085161", "0.50663805", "0.50557864", "0.50397253", "0.49996945", "0.497015", "0.49499413", "0.4916235", "0.49071148", "0.48912933", "0.48858264", "0.4883689", "0.48836195", "0.48741853" ]
0.6823022
0
searches through integer mapped sequence to find specific matches This function does not append data from sequences, rather its purpose is to eliminate nonspecific primers before the sensitive primers (along with the associated sequence data) are built.
def find_specific_primer_matches(primers, integer_mapped_seq, deletion_threshold, seq_count, sequence_length, label, unaligned_seq, region_slice, seq): primer_len=sequence_length overall_length=region_slice+primer_len bad_primers=[] seq_length=len(integer_mapped_seq) if len(unaligned_seq)==0: raise_(ValueError,('unaligned sequence contains no data.')) for p in range(len(primers)): corrected_index = get_corrected_index(seq,primers[p].aligned_index) start_index = corrected_index end_index = corrected_index + primer_len # skip test if testing beyond the end of the sequence if end_index > seq_length: continue # Will return all non-zeros with perfect base pair matching seq_bitwise = bitwise_and(primers[p].numeric_seq, integer_mapped_seq[start_index:end_index]) if len(seq_bitwise.nonzero()[0])==primer_len: primers[p].non_specific_hits +=1 if primers[p].non_specific_hits>deletion_threshold: bad_primers.append(p) del_primers(primers,bad_primers) return primers
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_sensitive_primer_matches(primers,\n integer_mapped_seq,\n deletion_threshold,\n seq_count,\n sequence_length,\n label,\n unaligned_seq,\n region_slice,\n seq):\n \n quality_threshold=seq_count-deletion_threshold\n primer_len=sequence_length\n overall_length=region_slice+primer_len\n \n \n bad_primers=[]\n seq_length=len(integer_mapped_seq)\n if len(unaligned_seq)==0:\n raise_(ValueError,('unaligned_seq contains no data.'))\n \n for p in range(len(primers)):\n corrected_index = get_corrected_index(seq,primers[p].aligned_index)\n start_index = corrected_index\n end_index = corrected_index + primer_len\n \n # skip test if testing beyond the end of the sequence\n if end_index > seq_length:\n # This counts as a miss, so do miss check\n if primers[p].match_count<quality_threshold:\n bad_primers.append(p)\n continue\n \n seq_bitwise = bitwise_and(primers[p].numeric_seq,\n integer_mapped_seq[start_index:end_index])\n if len(seq_bitwise.nonzero()[0])==primer_len:\n append_primer_hit(primers[p],label,start_index,region_slice,\n overall_length,unaligned_seq,primer_len)\n if primers[p].match_count<quality_threshold:\n bad_primers.append(p)\n\n del_primers(primers,bad_primers)\n \n return primers", "def handle_seq(seq, barcode_map, result_dict):\n for i in range(len(seq)):\n for barcode in barcode_map.keys():\n possible_match = seq[i: i + len(barcode)]\n if possible_match == barcode:\n result_dict[barcode][i] += 1", "def get_sensitive_hits(primers,\n input_fasta_files,\n percent_match,\n sequence_length,\n region_slice):\n\n seq_count=0\n for n in input_fasta_files:\n seq_total_target=get_sequence_count(n)\n deletion_threshold=get_deletion_threshold(percent_match,\n seq_total_target)\n fasta_f=open(n,'U')\n for label,seq in MinimalFastaParser(fasta_f):\n seq_count+=1\n unaligned_seq = seq.replace(\"-\",\"\")\n unaligned_seq = unaligned_seq.replace(\".\",\"\")\n unaligned_seq = unaligned_seq.upper()\n unaligned_seq = unaligned_seq.replace(\"U\",\"T\")\n integer_mapped_seq = convert_to_numeric(unaligned_seq)\n primers=find_sensitive_primer_matches(primers, integer_mapped_seq,\n deletion_threshold, seq_count, sequence_length,\n label,unaligned_seq, region_slice, seq)\n fasta_f.close()\n \n return primers", "def find_match(line,dic):\n seqid = line[0:seqid_len]\n sequence = line[(seqid_len + f_primer_len):(len(line) - r_primer_len)]\n if seqid in dic:\n increment(dic[seqid],sequence,1)\n else:\n dic[seqid] = {sequence:1}", "def create_primer_regex_patterns(self, header, mapping_data):\n import logging\n self.logger = logging.getLogger('_getprm_')\n \n if \"LinkerPrimerSequence\" in header:\n primer_ix = header.index(\"LinkerPrimerSequence\")\n else:\n raise IndexError(\n (\"Mapping file is missing LinkerPrimerSequence field.\"))\n if \"ReversePrimer\" in header:\n rev_primer_ix = header.index(\"ReversePrimer\")\n else:\n raise IndexError((\"Mapping file is missing ReversePrimer field.\"))\n \n raw_forward_primers = set([])\n \n raw_reverse_primers = set([])\n \n for line in mapping_data:\n # Split on commas to handle pool of primers\n raw_forward_primers.update([upper(primer).strip() for\n primer in line[primer_ix].split(',')])\n # reverse primer were reverse complemented\n raw_reverse_primers.update([upper(str(DNA(primer))) for\n primer in line[rev_primer_ix].split(',')])\n \n if not raw_forward_primers:\n self.logger.critical(\"No forward primers detected in mapping file.\")\n raise ValueError(\"No forward primers detected in mapping file.\")\n \n if not raw_reverse_primers:\n self.logger.critical(\"No reverse primers detected in mapping file.\")\n raise ValueError(\"No reverse primers detected in mapping file.\")\n\n \n forward_primers = []\n forward_primers_rc = []\n reverse_primers = []\n reverse_primers_rc = []\n\n for curr_primer in raw_forward_primers:\n \n forward_primers.append(compile(''.join([self.iupac[symbol] for symbol in curr_primer[:self.search_length]])))\n forward_primers_rc.append(compile(''.join([self.iupac[symbol] for symbol in self.reverse_complement(curr_primer[:self.search_length])])))\n \n for curr_primer in raw_reverse_primers:\n reverse_primers.append(compile(''.join([self.iupac[symbol] for symbol in curr_primer[:self.search_length]])))\n reverse_primers_rc.append(compile(''.join([self.iupac[symbol] for symbol in self.reverse_complement(curr_primer[:self.search_length])])))\n \n return forward_primers, forward_primers_rc, reverse_primers, reverse_primers_rc", "def get_primers(header,\r\n mapping_data):\r\n\r\n if \"LinkerPrimerSequence\" in header:\r\n primer_ix = header.index(\"LinkerPrimerSequence\")\r\n else:\r\n raise IndexError(\r\n (\"Mapping file is missing LinkerPrimerSequence field.\"))\r\n if \"ReversePrimer\" in header:\r\n rev_primer_ix = header.index(\"ReversePrimer\")\r\n else:\r\n raise IndexError((\"Mapping file is missing ReversePrimer field.\"))\r\n\r\n iupac = {'A': 'A', 'T': 'T', 'G': 'G', 'C': 'C', 'R': '[AG]', 'Y': '[CT]',\r\n 'S': '[GC]', 'W': '[AT]', 'K': '[GT]', 'M': '[AC]', 'B': '[CGT]',\r\n 'D': '[AGT]', 'H': '[ACT]', 'V': '[ACG]', 'N': '[ACGT]'}\r\n\r\n raw_forward_primers = set([])\r\n raw_forward_rc_primers = set([])\r\n raw_reverse_primers = set([])\r\n raw_reverse_rc_primers = set([])\r\n\r\n for line in mapping_data:\r\n # Split on commas to handle pool of primers\r\n raw_forward_primers.update([upper(primer).strip() for\r\n primer in line[primer_ix].split(',')])\r\n raw_forward_rc_primers.update([str(DNA(primer).rc()) for\r\n primer in raw_forward_primers])\r\n raw_reverse_primers.update([upper(primer).strip() for\r\n primer in line[rev_primer_ix].split(',')])\r\n raw_reverse_rc_primers.update([str(DNA(primer).rc()) for\r\n primer in raw_reverse_primers])\r\n\r\n if not raw_forward_primers:\r\n raise ValueError((\"No forward primers detected in mapping file.\"))\r\n if not raw_reverse_primers:\r\n raise ValueError((\"No reverse primers detected in mapping file.\"))\r\n\r\n # Finding the forward primers, or rc of reverse primers indicates forward\r\n # read. Finding the reverse primer, or rc of the forward primers, indicates\r\n # the reverse read, so these sets are merged.\r\n raw_forward_primers.update(raw_reverse_rc_primers)\r\n raw_reverse_primers.update(raw_forward_rc_primers)\r\n\r\n forward_primers = []\r\n reverse_primers = []\r\n for curr_primer in raw_forward_primers:\r\n forward_primers.append(compile(''.join([iupac[symbol] for\r\n symbol in curr_primer])))\r\n for curr_primer in raw_reverse_primers:\r\n reverse_primers.append(compile(''.join([iupac[symbol] for\r\n symbol in curr_primer])))\r\n\r\n return forward_primers, reverse_primers", "def test_check_map_primer_pool(self):\r\n s = \"\"\"#SampleID\\tBarcodeSequence\\tLinkerPrimerSequence\\tX\\tDescription\r\n#fake data\r\nx\\tAA\\tAC\\t3\\tsample_x\r\ny\\t\"AC\"\\tAT,DC\\t4\\t\"sample_y\"\r\nz\\tGG\\tGC\\t5\\tsample_z\"\"\"\r\n f = StringIO(s)\r\n f.name = 'test.xls'\r\n headers, id_map, barcode_to_sample_id, warnings, errors, \\\r\n primer_seqs_lens, all_primers = check_map(f,\r\n disable_primer_check=False)\r\n\r\n self.assertEqual(\r\n barcode_to_sample_id,\r\n {'AA': 'x',\r\n 'AC': 'y',\r\n 'GG': 'z'})\r\n self.assertEqual(errors, [])\r\n self.assertEqual(warnings, [])\r\n\r\n # Returns all possible primers with lengths associated.\r\n expected_all_primers = {'AC': 2, 'GC': 2, 'AT': 2, 'TC': 2}\r\n self.assertEqual(all_primers, expected_all_primers)\r\n\r\n # Returns all primers associated with each barcode.\r\n expected_primer_seqs_lens = {'AA': {'AC': 2}, 'GG': {'GC': 2},\r\n 'AC': {'AC': 2, 'GC': 2, 'AT': 2, 'TC': 2}}\r\n\r\n self.assertEqual(primer_seqs_lens, expected_primer_seqs_lens)", "def search_sequences(input_fasta_filepath, \n sequence_length,\n exclude_fasta_filepath,\n verbose,\n percent_match,\n full_primer_length,\n output_f,\n specificity_threshold,\n log_filepath, \n standard_index_file, \n search_range):\n \n # Check input and output files before generating data\n\n if isdir(output_f):\n raise IOError('%s is a directory, please specify a file path.' \\\n % output_f)\n \n try:\n output_filepath=open(output_f, 'w')\n except IOError:\n raise IOError('Unabled to open output filepath %s' %\\\n output_f)\n \n if standard_index_file:\n try:\n test_alignment_file = open(standard_index_file, \"U\")\n test_alignment_file.close()\n except IOError:\n raise IOError('Unable to open standard index file %s'%\\\n standard_index_file)\n \n if log_filepath:\n if isdir(log_filepath):\n raise IOError('log_filepath %s is a directory, please specify '+\\\n 'a filepath.' % log_filepath)\n try:\n test_log_f = open(log_filepath, 'w')\n except IOError:\n raise IOError('Unable to open log file %s' %\\\n log_filepath)\n \n region_slice=full_primer_length-sequence_length\n \n \n if log_filepath:\n log_f = open(log_filepath, 'w')\n if verbose:\n print(\"Building prospective primers\")\n if log_filepath:\n log_f.write(\"Building prosective primers\\n\")\n \n input_fasta_files=input_fasta_filepath.split(\":\")\n initial_primers=iterate_target_sequences(input_fasta_files,sequence_length,\\\n percent_match, search_range)\n \n if verbose:\n print(\"Constructing primer objects\")\n if log_filepath:\n log_f.write(\"Constructing primer objects\\n\")\n\n primers=construct_primers(initial_primers)\n\n if exclude_fasta_filepath:\n exclude_fasta_files=exclude_fasta_filepath.split(\":\")\n else:\n if not exclude_fasta_filepath:\n # Setting variable to 1 in case no exclusion files\n # Limits need for redundant functions\n seq_total_exclude=1\n \n if verbose and exclude_fasta_filepath:\n print(\"Counting sequences for excluded fasta file(s)\")\n if log_filepath:\n log_f.write(\"Counting sequences for excluded fasta file(s)\\n\")\n\n if exclude_fasta_filepath:\n seq_total_exclude=get_sequence_count(exclude_fasta_files)\n if verbose and exclude_fasta_filepath:\n print(\"Total sequences: %d\" % seq_total_exclude)\n if log_filepath and exclude_fasta_filepath:\n log_f.write(\"Total sequences: %d\\n\" % seq_total_exclude)\n \n if verbose and exclude_fasta_filepath:\n print(\"Finding specific hits\")\n if log_filepath and exclude_fasta_filepath:\n log_f.write(\"Finding specific hits\\n\")\n \n if exclude_fasta_filepath:\n primers=get_specific_hits(primers,exclude_fasta_files,\\\n specificity_threshold,sequence_length,region_slice,\\\n seq_total_exclude)\n \n seq_total_target=get_sequence_count(input_fasta_files)\n if verbose:\n print(\"Total number of target sequences: %d\" % seq_total_target)\n if log_filepath:\n log_f.write(\"Total number of target sequences: %d\\n\" \\\n % seq_total_target)\n\n if verbose:\n print(\"Finding sensitive primer regions.\")\n if log_filepath:\n log_f.write(\"Finding sensitive primer regions.\\n\")\n \n primers=get_sensitive_hits(primers,input_fasta_files,\\\n percent_match,sequence_length,region_slice)\n primers=calculate_percent_match(primers,seq_total_target,seq_total_exclude)\n \n if standard_index_file:\n standard_index_fasta = open(standard_index_file, \"U\")\n # Only read first file\n for label, seq in MinimalFastaParser(standard_index_fasta):\n standard_index_seq = seq\n break\n primers = append_std_aligned_index(primers, standard_index_seq,\n region_slice)\n \n else:\n standard_index_seq = None\n \n \n generate_denovo_output_file(primers,output_filepath,\\\n specificity_threshold, region_slice, standard_index_seq, percent_match,\n bool(exclude_fasta_filepath))\n \n if verbose:\n print(\"Module complete\")\n if log_filepath:\n log_f.write(\"Module complete\\n\")", "def challenge2(self):\n # Remove one letter at each position from each ID and plonk them in a set\n match_possibilities = set()\n for id in self.lines:\n sub_ids = set()\n for letter_pos in range(len(id)):\n sub_ids.add(id[:letter_pos] + id[(letter_pos + 1):])\n \n matching_letters = match_possibilities.intersection(sub_ids)\n if matching_letters:\n break\n\n match_possibilities.update(sub_ids)\n\n # If the current one matches\n print(f\"Matching letters: {matching_letters.pop()}\")", "def iterate_target_sequences(input_fasta_files,\n sequence_length,\n percent_match,\n search_range):\n \n initial_primers={}\n\n for n in input_fasta_files:\n # seq_count and total_seq_use based on percent_match parameter to\n # limit the number of sequences searched and optimize performance.\n analyzed_count=0\n seq_count=get_sequence_count(n)\n total_seq_use=get_number_seqs_for_primer(percent_match, seq_count)\n fasta_f=open(n,'U')\n for label,seq in MinimalFastaParser(fasta_f):\n if analyzed_count>total_seq_use:\n break\n analyzed_count+=1\n seq = seq.upper()\n initial_primers=build_seq_data(seq.replace(\"U\",\"T\"),\n sequence_length,initial_primers, search_range)\n fasta_f.close()\n if len(initial_primers)==0:\n raise ValueError('Cannot find any primers from the given fasta '+\\\n 'files, please check file format, sensitivity/specificity, '+\\\n 'and search_range parameters.')\n return initial_primers", "def get_specific_hits(primers,\n exclude_fasta_files,\n specificity_max,\n sequence_length,\n region_slice,\n seq_total_exclude):\n \n seq_count=0\n # Once sequence is found deletion_threshold number of times in excluded\n # fasta sequences, delete the primer as being nonspecific\n deletion_threshold=int(round(specificity_max*seq_total_exclude))\n for n in exclude_fasta_files:\n fasta_f=open(n,'U')\n for label,seq in MinimalFastaParser(fasta_f):\n seq_count+=1\n unaligned_seq = seq.replace(\"-\",\"\")\n unaligned_seq = unaligned_seq.replace(\".\",\"\")\n unaligned_seq = unaligned_seq.replace(\"U\",\"T\")\n unaligned_seq = unaligned_seq.upper()\n integer_mapped_seq = convert_to_numeric(unaligned_seq)\n primers=find_specific_primer_matches(primers, integer_mapped_seq,\n deletion_threshold, seq_count, sequence_length,\n label, unaligned_seq, region_slice,seq)\n fasta_f.close()\n \n return primers", "def primer_srch(self, primers):\n for fP in primers:\n if fP.binds_to(self):\n return fP", "def primer_binary_srch(self, primers):\n l = 0\n r = len(primers) - 1\n while l <= r:\n m = l + int((r - l) / 2) \n if primers[m].binds_to(self):\n return primers[m]\n elif primers[m] < self:\n l = m + 1\n else:\n r = m - 1\n return None", "def search_motif(sequences):\n motif = re.compile(r'(?=(N[^P](S|T)[^P]))') #N{P}[ST]{P}\n motif_index = {}\n\n for key,value in sequences.items():\n match_motif = re.finditer(motif, value)\n motif_start_list = []\n\n for i in match_motif:\n motif_start_list.append(str(i.start()+1))\n motif_index[key] = ' '.join(motif_start_list)\n return motif_index", "def main():\n\n args = get_args()\n \n patient_years_dict = {}\n\n isolates_to_seq = []\n\n with open(args.input_file, \"r\") as infile1:\n for line in infile1:\n if not line.startswith(\"PID\"):\n line_elements = line.strip().split(\"\\t\")\n patient_anumber = line_elements[1].split(\"_\")[0]\n patient_year = line_elements[2].split(\"-\")[0]\n if patient_anumber not in patient_years_dict:\n patient_years_dict[patient_anumber] = {patient_year:[line]}\n else:\n if patient_year not in patient_years_dict[patient_anumber]:\n patient_years_dict[patient_anumber][patient_year] = [line]\n if line not in patient_years_dict[patient_anumber][patient_year]:\n patient_years_dict[patient_anumber][patient_year].append(line)\n \n for patient in patient_years_dict:\n for year in patient_years_dict[patient]:\n print(f\"Checking patient {patient} and year {year}...\")\n wgs_codes = []\n pfge_codes = []\n isolate_dates = []\n isolate_lines = []\n for isolate in patient_years_dict[patient][year]:\n isolate_elements = isolate.strip().split(\"\\t\")\n wgs_codes.append(int(isolate_elements[5]))\n pfge_codes.append(int(isolate_elements[4]))\n isolate_dates.append(isolate_elements[2])\n isolate_lines.append(isolate)\n \n if any(wgs_codes):\n print(f\"\\tWGS present, moving on to next year/patient.\")\n continue\n else:\n print(f\"\\tWGS missing, checking PFGE...\\n\\tPFGE presence/absence codes for {year} are: {pfge_codes}\")\n if any(pfge_codes):\n pfge_index = pfge_codes.index(1)\n isolate_to_seq = isolate_dates[pfge_index]\n isolate_line_to_seq = isolate_lines[pfge_index]\n print(f\"\\tPFGE present, selecting first isolate with PFGE to sequence...\\n\\tIsolate to sequence is ---> {isolate_to_seq} <--- out of possible isolates {isolate_dates}.\")\n isolates_to_seq.append(isolate_line_to_seq)\n else:\n print(f\"\\tPFGE missing...\\n\\tSelecting 1st available isolate for sequencing ---> {isolate_dates[0]} <--- out of available isolates {isolate_dates}.\")\n isolates_to_seq.append(isolate_lines[0])\n\n header = f\"PID\\tADATE\\tCULTDAT\\tvalues\\tPFGE_Isolates\\tSequenced_Isolates\\n\" \n to_write = \"\".join(isolates_to_seq)\n with open(args.output_file, \"w\") as outfile1:\n outfile1.write(header + to_write)", "def check_map(infile, disable_primer_check, barcode_type=\"golay_12\",\r\n added_demultiplex_field=None, has_barcodes=True):\r\n\r\n if barcode_type == \"variable_length\":\r\n var_len_barcodes = True\r\n else:\r\n var_len_barcodes = False\r\n\r\n if barcode_type == \"0\":\r\n has_barcodes = False\r\n\r\n # hds, id_map, dsp, run_description, errors, warnings\r\n hds, mapping_data, run_description, errors, warnings = \\\r\n process_id_map(infile, has_barcodes=has_barcodes,\r\n disable_primer_check=disable_primer_check,\r\n added_demultiplex_field=added_demultiplex_field,\r\n variable_len_barcodes=var_len_barcodes)\r\n\r\n if errors:\r\n raise ValueError('Errors were found with mapping file, ' +\r\n 'please run validate_mapping_file.py to ' +\r\n 'identify problems.')\r\n\r\n id_map = {}\r\n\r\n for curr_data in mapping_data:\r\n id_map[curr_data[0]] = {}\r\n\r\n for header in range(len(hds)):\r\n for curr_data in mapping_data:\r\n id_map[curr_data[0]][hds[header]] = curr_data[header]\r\n\r\n barcode_to_sample_id = {}\r\n\r\n primer_seqs_lens = {}\r\n all_primers = {}\r\n\r\n for sample_id, sample in id_map.items():\r\n if added_demultiplex_field:\r\n barcode_to_sample_id[sample['BarcodeSequence'].upper() + \",\" +\r\n sample[added_demultiplex_field]] = sample_id\r\n else:\r\n barcode_to_sample_id[sample['BarcodeSequence'].upper()] = sample_id\r\n if not disable_primer_check:\r\n raw_primers = sample['LinkerPrimerSequence'].upper().split(',')\r\n\r\n if len(raw_primers[0].strip()) == 0:\r\n raise ValueError('No primers detected, please use the ' +\r\n '-p parameter to disable primer detection.')\r\n expanded_primers = expand_degeneracies(raw_primers)\r\n curr_bc_primers = {}\r\n for primer in expanded_primers:\r\n curr_bc_primers[primer] = len(primer)\r\n all_primers[primer] = len(primer)\r\n primer_seqs_lens[sample['BarcodeSequence']] = curr_bc_primers\r\n\r\n return hds, id_map, barcode_to_sample_id, warnings, errors, \\\r\n primer_seqs_lens, all_primers", "def interactor_finder():\n from tools import prot_id_converter\n\n proteinList = []\n with open(\"../datafiles/known_interactors.txt\",\"r\") as inpProt: # create list of gene names from hand-made text file with known ptp22 interactors\n for protLine in inpProt:\n if protLine != \"\\n\":\n curName = protLine.strip().split(\"\\t\")[0]\n curName = curName[0] + curName[1:].lower()\n proteinList.append(curName)\n inpIdL = prot_id_converter(proteinList, \"10090\", \"genesymbol\", \"uniprotaccession\") # convert to uniprot accessions\n print(inpIdL)\n \n with open(\"../bob/processed/bobprots_all.csv\",\"r\") as targetF: # create list of all uniprot accessions in Bob's dataset (unique razor proteins only)\n targetD = {}\n for targetLine in targetF:\n targetD[targetLine.split(\",\")[0]] = targetLine.split(\",\")[1].strip()\n for inpIdItem in inpIdL:\n for queryI in inpIdItem:\n if queryI in targetD:\n print(targetD[queryI])\n break", "def cyclic_metasploit_find(subseq, sets = None):\n sets = sets or [ string.ascii_uppercase.encode(), string.ascii_lowercase.encode(), string.digits.encode() ]\n\n if isinstance(subseq, six.integer_types):\n subseq = packing.pack(subseq, 'all', 'little', False)\n\n return _gen_find(subseq, metasploit_pattern(sets))", "def get_source_candidates(all_data_epigen):\n candids = {s:\n [np.where(np.array(c[1])!=0)[0] for c in mdata[\"test\"] ]\n for s, mdata in all_data_epigen.items()}\n return candids", "def get_seq_and_id(fasta_file, promoter_seq, promoter_ids, threshold, scores_file, delimiter):\n\n map_txt = \"DDB_DDB_G/DDB-GeneID-UniProt.txt\"\n df = pd.read_csv(map_txt, sep=\"\\t\")\n ddb_id = list(df['DDBDDB ID'].as_matrix())\n ddb_g_id = list(df['DDB_G ID'].as_matrix())\n\n all_valid_records = get_data_target.get_ids(scores_file, delimiter, 'ID')\n print(all_valid_records)\n sequences = []\n record_ids = []\n for record in SeqIO.parse(fasta_file, \"fasta\"):\n record_id = str(record.id)\n end = record_id.find('|')\n record_id_short = record_id\n if end != -1:\n record_id_short = record_id[:end]\n print(record_id_short)\n try:\n ddbg_record_id_short = ddb_g_id[ddb_id.index(record_id_short)]\n except ValueError:\n ddbg_record_id_short = record_id_short\n if ddbg_record_id_short in all_valid_records:\n record_ids.append(ddbg_record_id_short)\n seq = str(record.seq)[-threshold:]\n sequences.append(seq)\n data_record_ids = pd.DataFrame({\"record_id\": record_ids})\n data_sequences = pd.DataFrame({\"record_sequence\": sequences})\n data_record_ids.to_csv(promoter_ids, index=False, header=False)\n data_sequences.to_csv(promoter_seq, index=False, header=False)", "def prot_sequence_finder(protL):\n \n idDict = prot_id_converter(protL, \"9606\", inpDB = \"genesymbol\",outDB=\"refseqproteingi\")\n seqD = prot_entrez_fetch(idDict, retM=\"gb\", retT=\"fasta\")\n \n protD = {}\n \n for keyS, valueS in idDict.items():\n protD[keyS] = seqD[valueS]\n \n return protD", "def build_seq_data(seq,\n sequence_length,\n initial_primers,\n search_range):\n \n aligned_seq=DNA.make_seq(seq)\n # remove gap characters\n unaligned_seq=str(DNA.make_seq(seq).degap())\n gaps=aligned_seq.gap_maps()\n \n if search_range:\n primer_start = get_corrected_index(seq,int(search_range.split(\":\")[0]))\n primer_end = get_corrected_index(seq,int(search_range.split(\":\")[1]))\n # Correct in case end index is close to the end of the sequence\n if primer_end + sequence_length > len(unaligned_seq):\n primer_end = len(unaligned_seq)-sequence_length+1\n\n else:\n primer_start = 0\n primer_end = len(unaligned_seq)-sequence_length+1\n \n for n in range(primer_start, primer_end):\n seq_slice=unaligned_seq[n:n+sequence_length]\n aligned_index=gaps[0][n]\n unaligned_index=n\n init_key=(seq_slice,aligned_index)\n initial_primers[init_key]=unaligned_index\n \n return initial_primers", "def find_matching_seqs_from_alignment(sequences, ref_sequence):\n\n # if the first sequence (gaps removed) in MSA matches with reference,\n # return this sequence.\n first_seq_in_alignment = sequences[0] \n #first_seq_in_alignment_gaps_removed = first_seq_in_alignment.replace('-','')\n first_seq_in_alignment_gaps_removed = find_and_replace(first_seq_in_alignment, '-','')\n if first_seq_in_alignment_gaps_removed == ref_sequence:\n print('\\n\\tFirst sequence in alignment (gaps removed) matches reference,'\n '\\n\\tSkipping regorous search for matching sequence'\n )\n first_seq = list()\n first_seq.append(first_seq_in_alignment)\n return first_seq\n pairwise_scores = []\n for seq_indx, seq in enumerate(sequences):\n #seq_gaps_removed = seq.replace('-','')\n seq_gaps_removed = find_and_replace(seq, '-', '')\n print(seqs_gaps_removed)\n\n score = align_pairs_local(\n ref_sequence,\n seq_gaps_removed,\n score_only = True,\n )\n score_at_indx = (seq_indx, score)\n pairwise_scores.append(score_at_indx)\n\n seq_indx, max_score = max(pairwise_scores, key=lambda x: x[1])\n matching_seqs_indx = [\n indx for indx, score in pairwise_scores if score == max_score\n ]\n\n best_matching_seqs = [\n sequences[indx] for indx in matching_seqs_indx\n ]\n num_matching_seqs = len(best_matching_seqs)\n if num_matching_seqs > 1 :\n print('\\n\\tFound %d sequences in MSA that match the reference'\n '\\n\\tThe first sequence is taken as matching'% num_matching_seqs\n )\n return best_matching_seqs", "def check_seqs(fasta_out, fasta_files, starting_ix, valid_map, qual_mappings,\r\n filters, barcode_len, keep_primer, keep_barcode, barcode_type,\r\n max_bc_errors, retain_unassigned_reads, attempt_bc_correction,\r\n primer_seqs_lens, all_primers, max_primer_mm, disable_primer_check,\r\n reverse_primers, rev_primers, qual_out, qual_score_window=0,\r\n discard_bad_windows=False, min_qual_score=25, min_seq_len=200,\r\n median_length_filtering=None, added_demultiplex_field=None,\r\n reverse_primer_mismatches=0, truncate_ambi_bases=False):\r\n\r\n seq_lengths = {}\r\n\r\n # Record complete barcode + primer + sequence lengths\r\n raw_seq_lengths = {}\r\n # Record sequence lengths after all optional removal of components\r\n final_seq_lengths = {}\r\n\r\n bc_counts = defaultdict(list)\r\n curr_ix = starting_ix\r\n corr_ct = 0 # count of corrected barcodes\r\n\r\n # get the list of barcode lengths in reverse order\r\n barcode_length_order =\\\r\n sorted(set([len(bc.split(',')[0]) for bc in valid_map]))\r\n barcode_length_order = barcode_length_order[::-1]\r\n\r\n primer_mismatch_count = 0\r\n all_primers_lens = sorted(set(all_primers.values()))\r\n\r\n reverse_primer_not_found = 0\r\n\r\n sliding_window_failed = 0\r\n trunc_ambi_base_counts = 0\r\n\r\n below_seq_min_after_trunc = 0\r\n below_seq_min_after_ambi_trunc = 0\r\n\r\n for fasta_in in fasta_files:\r\n for curr_id, curr_seq in parse_fasta(fasta_in):\r\n curr_rid = curr_id.split()[0]\r\n curr_seq = upper(curr_seq)\r\n\r\n curr_len = len(curr_seq)\r\n curr_qual = qual_mappings.get(curr_rid, None)\r\n\r\n # if qual_out:\r\n # curr_qual_out_score = \\\r\n # \"%2.2f\" % float(float(sum(curr_qual))/float(len(curr_qual)))\r\n seq_lengths[curr_rid] = curr_len\r\n failed = False\r\n\r\n for f in filters:\r\n failed = failed or f(curr_rid, curr_seq, curr_qual)\r\n if failed: # if we failed any of the checks, bail out here\r\n bc_counts['#FAILED'].append(curr_rid)\r\n continue\r\n\r\n if barcode_type == 'variable_length':\r\n # Reset the raw_barcode, raw_seq, and barcode_len -- if\r\n # we don't match a barcode from the mapping file, we want\r\n # these values to be None\r\n raw_barcode, raw_seq, barcode_len = (None, None, None)\r\n\r\n curr_valid_map =\\\r\n [curr_bc.split(',')[0] for curr_bc in valid_map]\r\n # Iterate through the barcode length from longest to shortest\r\n for l in barcode_length_order:\r\n # extract the current length barcode from the sequence\r\n bc, seq = get_barcode(curr_seq, l)\r\n # check if the sliced sequence corresponds to a valid\r\n # barcode, and if so set raw_barcode, raw_seq, and\r\n # barcode_len for use in the next steps\r\n if bc in curr_valid_map:\r\n raw_barcode, raw_seq = bc, seq\r\n barcode_len = len(raw_barcode)\r\n break\r\n # if we haven't found a valid barcode, log this sequence as\r\n # failing to match a barcode, and move on to the next sequence\r\n if not raw_barcode:\r\n bc_counts['#FAILED'].append(curr_rid)\r\n continue\r\n\r\n else:\r\n # Get the current barcode to look up the associated primer(s)\r\n raw_barcode, raw_seq = get_barcode(curr_seq, barcode_len)\r\n\r\n if not disable_primer_check:\r\n try:\r\n current_primers = primer_seqs_lens[raw_barcode]\r\n # In this case, all values will be the same, i.e. the length\r\n # of the given primer, or degenerate variations thereof.\r\n primer_len = current_primers.values()[0]\r\n\r\n if primer_exceeds_mismatches(raw_seq[:primer_len],\r\n current_primers, max_primer_mm):\r\n bc_counts['#FAILED'].append(curr_rid)\r\n primer_mismatch_count += 1\r\n continue\r\n except KeyError:\r\n # If the barcode read does not match any of those in the\r\n # mapping file, the situation becomes more complicated. We do\r\n # not know the length the sequence to slice out to compare to\r\n # our primer sets, so, in ascending order of all the given\r\n # primer lengths, a sequence will the sliced out and compared\r\n # to the primer set.\r\n current_primers = all_primers\r\n found_match = False\r\n for seq_slice_len in all_primers_lens:\r\n if not(\r\n primer_exceeds_mismatches(raw_seq[:seq_slice_len],\r\n current_primers, max_primer_mm)):\r\n primer_len = seq_slice_len\r\n found_match = True\r\n break\r\n if not found_match:\r\n bc_counts['#FAILED'].append(curr_rid)\r\n primer_mismatch_count += 1\r\n continue\r\n except IndexError:\r\n # Try to raise meaningful error if problem reading primers\r\n raise IndexError('Error reading primer sequences. If ' +\r\n 'primers were purposefully not included in the mapping ' +\r\n 'file, disable usage with the -p option.')\r\n else:\r\n # Set primer length to zero if primers are disabled.\r\n primer_len = 0\r\n\r\n # split seqs\r\n cbc, cpr, cres = split_seq(curr_seq, barcode_len,\r\n primer_len)\r\n\r\n total_bc_primer_len = len(cbc) + len(cpr)\r\n\r\n # get current barcode\r\n try:\r\n bc_diffs, curr_bc, corrected_bc = \\\r\n check_barcode(cbc, barcode_type, valid_map.keys(),\r\n attempt_bc_correction, added_demultiplex_field, curr_id)\r\n if bc_diffs > max_bc_errors:\r\n raise ValueError(\"Too many errors in barcode\")\r\n corr_ct += bool(corrected_bc)\r\n except Exception as e:\r\n bc_counts[None].append(curr_rid)\r\n continue\r\n\r\n curr_samp_id = valid_map.get(curr_bc, 'Unassigned')\r\n\r\n new_id = \"%s_%d\" % (curr_samp_id, curr_ix)\r\n # check if writing out primer\r\n write_seq = cres\r\n\r\n if reverse_primers == \"truncate_only\":\r\n try:\r\n rev_primer = rev_primers[curr_bc]\r\n mm_tested = {}\r\n for curr_rev_primer in rev_primer:\r\n # Try to find lowest count of mismatches for all\r\n # reverse primers\r\n rev_primer_mm, rev_primer_index = \\\r\n local_align_primer_seq(curr_rev_primer, cres)\r\n mm_tested[rev_primer_mm] = rev_primer_index\r\n\r\n rev_primer_mm = min(mm_tested.keys())\r\n rev_primer_index = mm_tested[rev_primer_mm]\r\n if rev_primer_mm <= reverse_primer_mismatches:\r\n write_seq = write_seq[0:rev_primer_index]\r\n if qual_out:\r\n curr_qual = curr_qual[0:barcode_len +\r\n primer_len + rev_primer_index]\r\n else:\r\n reverse_primer_not_found += 1\r\n except KeyError:\r\n pass\r\n elif reverse_primers == \"truncate_remove\":\r\n try:\r\n rev_primer = rev_primers[curr_bc]\r\n mm_tested = {}\r\n for curr_rev_primer in rev_primer:\r\n # Try to find lowest count of mismatches for all\r\n # reverse primers\r\n rev_primer_mm, rev_primer_index = \\\r\n local_align_primer_seq(curr_rev_primer, cres)\r\n mm_tested[rev_primer_mm] = rev_primer_index\r\n\r\n rev_primer_mm = min(mm_tested.keys())\r\n rev_primer_index = mm_tested[rev_primer_mm]\r\n if rev_primer_mm <= reverse_primer_mismatches:\r\n write_seq = write_seq[0:rev_primer_index]\r\n if qual_out:\r\n curr_qual = curr_qual[0:barcode_len +\r\n primer_len + rev_primer_index]\r\n else:\r\n reverse_primer_not_found += 1\r\n write_seq = False\r\n except KeyError:\r\n bc_counts['#FAILED'].append(curr_rid)\r\n continue\r\n\r\n # Check for quality score windows, truncate or remove sequence\r\n # if poor window found. Previously tested whole sequence-now\r\n # testing the post barcode/primer removed sequence only.\r\n if qual_score_window:\r\n passed_window_check, window_index =\\\r\n check_window_qual_scores(curr_qual, qual_score_window,\r\n min_qual_score)\r\n # Throw out entire sequence if discard option True\r\n if discard_bad_windows and not passed_window_check:\r\n sliding_window_failed += 1\r\n write_seq = False\r\n # Otherwise truncate to index of bad window\r\n elif not discard_bad_windows and not passed_window_check:\r\n sliding_window_failed += 1\r\n write_seq = write_seq[0:window_index]\r\n if qual_out:\r\n curr_qual = curr_qual[0:barcode_len +\r\n primer_len + window_index]\r\n # Check for sequences that are too short after truncation\r\n if len(write_seq) + total_bc_primer_len < min_seq_len:\r\n write_seq = False\r\n below_seq_min_after_trunc += 1\r\n\r\n if truncate_ambi_bases and write_seq:\r\n write_seq_ambi_ix = True\r\n # Skip if no \"N\" characters detected.\r\n try:\r\n ambi_ix = write_seq.index(\"N\")\r\n write_seq = write_seq[0:ambi_ix]\r\n except ValueError:\r\n write_seq_ambi_ix = False\r\n pass\r\n if write_seq_ambi_ix:\r\n # Discard if too short after truncation\r\n if len(write_seq) + total_bc_primer_len < min_seq_len:\r\n write_seq = False\r\n below_seq_min_after_ambi_trunc += 1\r\n else:\r\n trunc_ambi_base_counts += 1\r\n if qual_out:\r\n curr_qual = curr_qual[0:barcode_len +\r\n primer_len + ambi_ix]\r\n\r\n # Slice out regions of quality scores that correspond to the\r\n # written sequence, i.e., remove the barcodes/primers and reverse\r\n # primers if option is enabled.\r\n if qual_out:\r\n qual_barcode, qual_primer, qual_scores_out = \\\r\n split_seq(curr_qual, barcode_len, primer_len)\r\n # Convert to strings instead of numpy arrays, strip off\r\n # brackets\r\n qual_barcode = format_qual_output(qual_barcode)\r\n qual_primer = format_qual_output(qual_primer)\r\n qual_scores_out = format_qual_output(qual_scores_out)\r\n\r\n if not write_seq:\r\n bc_counts['#FAILED'].append(curr_rid)\r\n continue\r\n\r\n if keep_primer:\r\n write_seq = cpr + write_seq\r\n if qual_out:\r\n qual_scores_out = qual_primer + qual_scores_out\r\n if keep_barcode:\r\n write_seq = cbc + write_seq\r\n if qual_out:\r\n qual_scores_out = qual_barcode + qual_scores_out\r\n\r\n # Record number of seqs associated with particular barcode.\r\n bc_counts[curr_bc].append(curr_rid)\r\n\r\n if retain_unassigned_reads and curr_samp_id == \"Unassigned\":\r\n fasta_out.write(\r\n \">%s %s orig_bc=%s new_bc=%s bc_diffs=%s\\n%s\\n\" %\r\n (new_id, curr_rid, cbc, curr_bc, int(bc_diffs), write_seq))\r\n if qual_out:\r\n qual_out.write(\r\n \">%s %s orig_bc=%s new_bc=%s bc_diffs=%s\\n%s\" %\r\n (new_id, curr_rid, cbc, curr_bc, int(bc_diffs),\r\n qual_scores_out))\r\n elif not retain_unassigned_reads and curr_samp_id == \"Unassigned\":\r\n bc_counts['#FAILED'].append(curr_rid)\r\n else:\r\n fasta_out.write(\r\n \">%s %s orig_bc=%s new_bc=%s bc_diffs=%s\\n%s\\n\" %\r\n (new_id, curr_rid, cbc, curr_bc, int(bc_diffs), write_seq))\r\n if qual_out:\r\n qual_out.write(\r\n \">%s %s orig_bc=%s new_bc=%s bc_diffs=%s\\n%s\" %\r\n (new_id, curr_rid, cbc, curr_bc, int(bc_diffs),\r\n qual_scores_out))\r\n\r\n curr_len = len(write_seq)\r\n\r\n #seq_lengths[curr_rid] = curr_len\r\n\r\n curr_ix += 1\r\n\r\n # Record the raw and written seq length of everything passing\r\n # filters\r\n raw_seq_lengths[curr_rid] = len(curr_seq)\r\n final_seq_lengths[curr_id] = curr_len\r\n\r\n if median_length_filtering:\r\n # Read original fasta file output to get sequence lengths\r\n fasta_out.close()\r\n fasta_out = open(fasta_out.name, \"U\")\r\n\r\n # Record sequence lengths for median/mad calculation\r\n sequence_lens = []\r\n for label, seq in parse_fasta(fasta_out):\r\n sequence_lens.append(len(seq))\r\n\r\n '''# Create a temporary file to copy the contents of the fasta file, will\r\n # need to delete once operations complete.\r\n fasta_temp = open(fasta_out.name + \"_tmp.fasta\", \"w\")\r\n\r\n sequence_lens = []\r\n for label, seq in parse_fasta(fasta_lens):\r\n sequence_lens.append(len(seq))\r\n fasta_temp.write(\">%s\\n%s\\n\" % (label, seq))\r\n\r\n fasta_temp.close()\r\n fasta_temp = open(fasta_out.name + \"_tmp.fasta\", \"U\")\r\n\r\n fasta_lens.close()\r\n # Overwrite seqs.fna with length filtered data\r\n fasta_out = open(fasta_out.name, \"w\")'''\r\n\r\n med_abs_dev, med_length = median_absolute_deviation(sequence_lens)\r\n\r\n min_corrected_len = med_length - med_abs_dev *\\\r\n float(median_length_filtering)\r\n max_corrected_len = med_length + med_abs_dev *\\\r\n float(median_length_filtering)\r\n seqs_discarded_median = 0\r\n\r\n fasta_out.seek(0)\r\n\r\n final_written_lens = []\r\n\r\n # Create final seqs.fna\r\n final_fasta_out = open(fasta_out.name.replace('.tmp', ''), \"w\")\r\n\r\n for label, seq in parse_fasta(fasta_out):\r\n curr_len = len(seq)\r\n if curr_len < min_corrected_len or curr_len > max_corrected_len:\r\n seqs_discarded_median += 1\r\n else:\r\n final_fasta_out.write(\">%s\\n%s\\n\" % (label, seq))\r\n final_written_lens.append(len(seq))\r\n\r\n final_fasta_out.close()\r\n fasta_out.close()\r\n remove_files([fasta_out.name])\r\n\r\n else:\r\n min_corrected_len = 0\r\n max_corrected_len = 0\r\n seqs_discarded_median = 0\r\n final_written_lens = 0\r\n\r\n # Copy tmp seqs file to final seqs.fna file\r\n fasta_out.close()\r\n fasta_out = open(fasta_out.name, \"U\")\r\n\r\n # Create final seqs.fna\r\n final_fasta_out = open(fasta_out.name.replace('.tmp', ''), \"w\")\r\n\r\n for label, seq in parse_fasta(fasta_out):\r\n final_fasta_out.write(\">%s\\n%s\\n\" % (label, seq))\r\n\r\n final_fasta_out.close()\r\n fasta_out.close()\r\n remove_files([fasta_out.name])\r\n\r\n median_results = (median_length_filtering, min_corrected_len,\r\n max_corrected_len, seqs_discarded_median, final_written_lens)\r\n\r\n raw_seq_lengths = raw_seq_lengths.values()\r\n final_seq_lengths = final_seq_lengths.values()\r\n\r\n log_out = format_log(bc_counts, corr_ct, valid_map, seq_lengths, filters,\r\n retain_unassigned_reads, attempt_bc_correction, primer_mismatch_count,\r\n max_primer_mm, reverse_primers, reverse_primer_not_found,\r\n sliding_window_failed, below_seq_min_after_trunc, qual_score_window,\r\n discard_bad_windows, min_seq_len, raw_seq_lengths,\r\n final_seq_lengths, median_results, truncate_ambi_bases,\r\n below_seq_min_after_ambi_trunc, )\r\n\r\n #all_seq_lengths, good_seq_lengths = get_seq_lengths(seq_lengths, bc_counts)\r\n\r\n return log_out, seq_lengths.values(), raw_seq_lengths, final_seq_lengths", "def process_seq(seq, mapping):\n return [mapping[w] for w in seq]", "def candidate_map(self):\n candidates = [[set(range(1, 10)) for _dummy in range(9)] for _dummy in range(9)]\n vertex_value_unknown = [[True for _dummy in range(9)] for _dummy in range(9)]\n for (line, row) in [(ln, rw) for ln in range(9) for rw in range(9)]:\n if self.grid[line][row] in range(1, 10):\n candidates[line][row] = set([self.grid[line][row]])\n vertex_value_unknown[line][row] = False\n for i in range(9):\n if i != row:\n candidates[line][i].discard(self.grid[line][row])\n if i != line:\n candidates[i][row].discard(self.grid[line][row])\n if line - line%3 + i//3 != line or row - row%3 + i%3 != row:\n candidates[line - line%3 + i//3][row - row%3 + i%3].discard(self.grid[line][row])\n # Further reduce candidate map\n reduce_cadidate_map_further = True\n while reduce_cadidate_map_further:\n reduce_cadidate_map_further = False\n total_number_of_candidates = sum([len(candidates[ln][rw]) for ln in range(9) for rw in range(9)])\n for number in range(1, 10):\n for i in range(9):\n # Check for single possible vertex for *number* in candidate map line *i*\n seen_in_j = []\n for j in range(9):\n if number in candidates[i][j]:\n seen_in_j.append(j)\n if len(seen_in_j) == 1 and vertex_value_unknown[i][seen_in_j[0]]:\n candidates[i][seen_in_j[0]] = set([number])\n vertex_value_unknown[i][seen_in_j[0]] = False\n # Discard other candidates for *number* in corresponding row and subsquare\n for j in range(9):\n if j != i:\n candidates[j][seen_in_j[0]].discard(number)\n if i - i%3 + j//3 != i:\n candidates[i - i%3 + j//3][seen_in_j[0] - seen_in_j[0]%3 + j%3].discard(number)\n # otherwise add check wheter all candidates for *number* are in the same subsquare\n elif 1 < len(seen_in_j) < 4:\n subsquares = set()\n for j in seen_in_j:\n subsquares.add(3*(i//3) + j//3)\n if len(subsquares) == 1:\n subsquare = subsquares.pop()\n for j in range(9):\n if 3*(subsquare//3) + j//3 != i:\n candidates[3*(subsquare//3) + j//3][3*(subsquare%3) + j%3].discard(number)\n # Check for single possible vertex for *number* in candidate map row *i*\n seen_in_j = []\n for j in range(9):\n if number in candidates[j][i]:\n seen_in_j.append(j)\n if len(seen_in_j) == 1 and vertex_value_unknown[seen_in_j[0]][i]:\n candidates[seen_in_j[0]][i] = set([number])\n vertex_value_unknown[seen_in_j[0]][i] = False\n # Discard other candidates for *number* in corresponding line and subsquare\n for j in range(9):\n if j != i:\n candidates[seen_in_j[0]][j].discard(number)\n if i - i%3 + j%3 != i:\n candidates[seen_in_j[0] - seen_in_j[0]%3 + j//3][i - i%3 + j%3].discard(number)\n # otherwise add check wheter all candidates for *number* are in the same subsquare\n elif 1 < len(seen_in_j) < 4:\n subsquares = set()\n for j in seen_in_j:\n subsquares.add(3*(j//3) + i//3)\n if len(subsquares) == 1:\n subsquare = subsquares.pop()\n for j in range(9):\n if 3*(subsquare%3) + j%3 != i:\n candidates[3*(subsquare//3) + j//3][3*(subsquare%3) + j%3].discard(number)\n # Check for single possible vertex for *number* in candidate map subsquare *i*\n seen_in_j = []\n for j in range(9):\n if number in candidates[3*(i//3) + j//3][3*(i%3) + j%3]:\n seen_in_j.append(j)\n if len(seen_in_j) == 1 and vertex_value_unknown[3*(i//3) + seen_in_j[0]//3][3*(i%3) + seen_in_j[0]%3]:\n candidates[3*(i//3) + seen_in_j[0]//3][3*(i%3) + seen_in_j[0]%3] = set([number])\n vertex_value_unknown[3*(i//3) + seen_in_j[0]//3][3*(i%3) + seen_in_j[0]%3] = False\n # Discard other candidates for *number* in corresponding line and row\n for j in range(9):\n if j not in [3*(i%3), 3*(i%3) + 1, 3*(i%3) + 2]:\n candidates[3*(i//3) + seen_in_j[0]//3][j].discard(number)\n if j not in [3*(i//3), 3*(i//3) + 1, 3*(i//3) + 2]:\n candidates[j][3*(i%3) + seen_in_j[0]%3].discard(number)\n # otherwise add check wheter all candidates for *number* are in the same line/row\n elif 1 < len(seen_in_j) < 4:\n lines = set()\n rows = set()\n for j in seen_in_j:\n lines.add(3*(i//3) + j//3)\n rows.add(3*(i%3) + j%3)\n if len(lines) == 1:\n line = lines.pop()\n for row in [rw for rw in range(9) if rw not in [3*(i%3), 3*(i%3) + 1, 3*(i%3) + 2]]:\n candidates[line][row].discard(number)\n elif len(rows) == 1:\n row = rows.pop()\n for line in [ln for ln in range(9) if ln not in [3*(i//3), 3*(i//3) + 1, 3*(i//3) + 2]]:\n candidates[line][row].discard(number)\n if sum([len(candidates[ln][rw]) for ln in range(9) for rw in range(9)]) < total_number_of_candidates:\n reduce_cadidate_map_further = True\n return candidates", "def findPAMs(self,i):\r\n\t\timport sys\r\n\t\tlistofPAMS = [] \t\t\t\t\t# Create a list for the PAM sequences.\r\n\t\tlistofReversedPAMS = [] \t\t\t\t# Create a list for the reverse PAM sequences.\r\n\t\tcounter = 0 \t\t\t\t\t\t# This counter starts for the forward sequences.\r\n\t\tfor nucleotide in self.sequences[i]:\r\n\t\t\tif nucleotide == \"G\" and self.sequences[i][counter-1] == \"G\":\r\n\t\t\t\tif counter > 23: \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Have a set length that is 23 or greater to pass it on.\r\n\t\t\t\t\tlistofPAMS.append((self.sequences[i][counter-22:counter-2],counter-1)) # Add the sequence with the correct position to the list.\r\n\t\t\tcounter+=1\r\n\r\n\t\tcounter = 0 # This counter starts for the reverse sequences\r\n\t\tfor nucleotide in self.reversedSequenceList[i]: # Looking for the sequence in the reversed list.\r\n\t\t\tif nucleotide == \"G\" and self.reversedSequenceList[i][counter-1] == \"G\":\r\n\t\t\t\tif counter > 23:\r\n\t\t\t\t\tlistofReversedPAMS.append((self.reversedSequenceList[i][counter-22:counter-2],len(self.reversedSequenceList[i])-counter+2))\r\n\t\t\tcounter+=1\r\n\t\t\r\n\t\tself.listofPAMS.append((listofPAMS)) \t\t\t # Add to the the forward sequences to the list.\r\n\t\tself.listofReversedPAMS.append((listofReversedPAMS[::-1])) # Add the reverse sequence lists to the lists for reverse sequences.\r", "def _match(self) -> None:\n self.matched = [i for i in self.data if self.match(i)]\n self.unmatched = [i for i in self.data if not self.match(i)]", "def matching(self, pids):\n for pid in pids:\n if self.matches(pid):\n yield pid", "def findMatches2(personDict,matches,skepticalMatches,personDict2,s2=0):\n try:\n additionalMatches = {}\n skipCount = 0\n L1 = list(personDict['LAST'])\n L2 = list(personDict['FIRST'])\n L3 = list(personDict['DOB'])\n count = 0\n for ln in L1[:]:\n count += 1\n if count%600==0:\n print (round(100*count/len(L1),3),\"% complete [\"+str(count)+\"/\"+str(len(L1))+\"] after\",round(time.time()-s2,2),\"seconds\")\n print (len(additionalMatches),\"additional matches found so far...\",flush=True)\n if ln=='':\n continue\n LNIDs = personDict['LAST'][ln]\n for fn in L2:\n if fn=='':\n continue\n \n FNIDs = personDict['FIRST'][fn]\n toPassOn = LNIDs.intersection(FNIDs)\n if len(toPassOn)==0:\n skipCount += 1\n continue\n \n for dob in L3:\n if dob=='':\n continue\n DOBIDs = personDict['DOB'][dob]\n finalSet = toPassOn.intersection(DOBIDs)\n if len(finalSet)==0:\n skipCount += 1\n continue\n pairs = itertools.combinations(finalSet,2)\n for p in pairs:\n k = tuple(sorted(p))\n \n info1b = personDict2['EnterpriseID'][p[0]]\n info2b = personDict2['EnterpriseID'][p[1]]\n \n if (k not in matches) and (k not in skepticalMatches) and (k not in additionalMatches):\n badness = (L.distance(info1b[1],info2b[1])+L.distance(info1b[2],info2b[2])+2*L.distance(info1b[5],info2b[5]))\n score = getScorePair(info1b,info2b)\n if info1b[7]!=\"\" and info2b[7]!=\"\":\n badness+=L.distance(info1b[7],info2b[7])\n if len(info1b[12])>4 and len(info2b[12])>4:\n if info1b[12][0:4]==info2b[12][0:4]:\n badness-=2\n if badness>2 and score<5:\n continue\n \n additionalMatches[k] = score\n except KeyboardInterrupt:\n return additionalMatches\n return additionalMatches" ]
[ "0.70073456", "0.6101962", "0.5778454", "0.57115793", "0.5597058", "0.55890054", "0.55768037", "0.54905343", "0.54866445", "0.5374324", "0.5360855", "0.53143424", "0.5302089", "0.523852", "0.5202517", "0.52015847", "0.5189258", "0.5116563", "0.51111543", "0.50882447", "0.5085029", "0.50736547", "0.5062001", "0.50574195", "0.49985242", "0.4997851", "0.49879873", "0.49837956", "0.4973793", "0.49654093" ]
0.72535133
0
Iterates list of primer objects, calculates percent matches
def calculate_percent_match(primers, seq_count, exclude_seq_count=1): # Calculate percent of sequences that are 'hit' by each primer for n in range(len(primers)): # Calculate percent perfect match primers[n].percent_match=float(primers[n].match_count/seq_count) primers[n].non_specific_percent=\ float(primers[n].non_specific_hits/exclude_seq_count) return primers
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculate_p(candidate, reference):\n matches = 0\n for grama in candidate:\n if grama in reference:\n matches += 1\n return matches/len(candidate)", "def resultat_match(self, binomes):\n for binome in binomes:\n while True:\n score_un = self.vue.entree_resultats(binome[0])\n score_deux = self.vue.entree_resultats(binome[1])\n if score_un + score_deux != 1:\n self.vue.erreur_score()\n continue\n else:\n binome[0].ajout_score(score_un)\n binome[1].ajout_score(score_deux)\n table_players.update({\"score\": binome[0].points},\n doc_ids=[binome[0].id])\n table_players.update({\"score\": binome[1].points},\n doc_ids=[binome[1].id])\n break\n self.vue.afficher_resultats(binomes)", "def percentage(my_list, item):\n return 100.0 * frequency(my_list, item)", "def find_pcts_multi(P, start_b = [], iter = 10000):\n assert len(P) >= 2\n wins_per_player = [0] * len(P)\n all_hole = reduce(lambda x,y: x+y, P)\n for i in range(iter):\n deck = Deck()\n need = 5 - len(start_b)\n b2 = draw_sure(deck, need, all_hole+start_b)\n s = [evaluator.evaluate(start_b+b2, h) for h in P]\n for i, e in enumerate(s):\n if e == min(s):\n wins_per_player[i] += 1\n return [float(x) / sum(wins_per_player) for x in wins_per_player]", "def percentage_40(set_):\n db = TinyDB(CARD_DATA_FILE)\n card_data = db.table('card_data')\n total = card_data.count(where('set') == set_)\n q = Query()\n num_forties = card_data.count((q.set == set_) & (q.commons == 4) & (q.rares == 1))\n\n print(num_forties/total)", "def percentMatch(row):\n\n count = 0.\n for word in row['search_term']:\n if word in row['product_title'] or word in row['product_description']:\n count += 1.\n return count / len(row['search_term'])", "def __match_num(self, obj):\n score = 0\n for attr in self.list:\n try:\n if getattr(obj, attr) == getattr(self, attr):\n score += 1\n except AttributeError:\n pass\n return score", "def points_percentage(plane, p, points, total):\n match = 0\n for point in points:\n if distance_to_plane(plane, point) <= p:\n match += 1\n\n return match / total", "def analyze_similarities():\r\n print('Total number of candidate pairs:', len(pairs))\r\n print(f'\\nNumber of actual item pairs in the train set: {pairs[\"true_match\"].sum()}\\n')\r\n\r\n for feature in ['text_score', 'image_score', 'txt_img_score', 'words_ratio', 'txt_img_words']:\r\n\r\n # Check distribution of True and False predictions for various similarity scores\r\n print('-' * 50)\r\n print(f'\\nDistribution of True/False predictions for {feature}')\r\n for thr in (0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95):\r\n print('-' * 50)\r\n print(f'Similarity score over {thr}')\r\n pairs_sample = pairs[pairs[feature] >= thr]\r\n print(f'Number of similar item pairs: {len(pairs_sample)}')\r\n print(pairs_sample['true_match'].value_counts(normalize=True))\r\n\r\n # Check if identical phash can be used to improve the accuracy\r\n same_phash = pairs[pairs['phash_match'] == 1]\r\n different_phash = pairs[pairs['phash_match'] == 0]\r\n\r\n print('\\nFor item pairs with the same phash:')\r\n print(same_phash['true_match'].value_counts(normalize=True))\r\n print('Number of item pairs in this subset:', len(same_phash))\r\n\r\n print('\\nFor item pairs with different phash:')\r\n print(different_phash['true_match'].value_counts(normalize=True))\r\n print('Number of item pairs in this subset:', len(different_phash))\r\n\r\n # Check if numbers in titles can be used to improve the accuracy\r\n same_numbers = pairs[pairs['nums_match'] == 1]\r\n different_numbers = pairs[pairs['nums_match'] == 0]\r\n\r\n print('\\nFor item pairs with the same numbers:')\r\n print(same_numbers['true_match'].value_counts(normalize=True))\r\n print('Number of item pairs in this subset:', len(same_numbers))\r\n\r\n print('\\nFor item pairs with different numbers:')\r\n print(different_numbers['true_match'].value_counts(normalize=True))\r\n print('Number of item pairs in this subset:', len(different_numbers))", "def _find_matches(self, query, min_match):\n\t\tresult_list = []\n\t\tl_query = query.lower()\n\n\t\t#The card dictionary main keys are the sets card belongs to\n\t\tfor exp in self.card_dict:\n\t\t\tfor card in self.card_dict[exp]:\n\t\t\t\t#Change all uppercase letters to lowercase in preparation for string comparisons\n\t\t\t\tl_cardname = card['name'].lower()\n\n\t\t\t\tpercent_match = 0\n\n\t\t\t\tsearch_words = {}\n\n\t\t\t\t#Create a sub dictionary for each search word in the query\n\t\t\t\tfor word in l_query.split(' '):\n\t\t\t\t\tsearch_words.update({word : {}})\n\n\t\t\t\tcard_words = l_cardname.split(' ')\n\n\t\t\t\t#Calculate the match percentage between every search word and every card word\n\t\t\t\tfor search_word in search_words:\n\t\t\t\t\tfor card_word in card_words:\n\t\t\t\t\t\tmatch = 1 - (Searcher.levenshtein_distance(search_word, card_word) / \n\t\t\t\t\t\t\tmax(len(search_word), len(card_word)))\n\n\t\t\t\t\t\tif search_word not in search_words.keys():\n\t\t\t\t\t\t\tsearch_words[search_word] = {card_word: { 'match' : match } }\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tsearch_words[search_word].update( {card_word: { 'match' : match } } )\n\n\t\t\t\t#Calculates the total match mercentage for the entire query and the card name\n\t\t\t\tfor search_word in search_words:\n\n\t\t\t\t\tmax_value_key = list(search_words[search_word].keys())[0]\n\t\t\t\t\tmax_value = search_words[search_word][max_value_key]\n\n\t\t\t\t\tfor card_word in search_words[search_word]:\n\t\t\t\t\t\tif search_words[search_word][card_word]['match'] > max_value['match']:\n\t\t\t\t\t\t\tmax_value_key = card_word\n\t\t\t\t\t\t\tmax_value = search_words[search_word][card_word]\n\n\t\t\t\t\tpercent_card_match = len(max_value_key) / len(l_cardname.replace(\" \", \"\"))\n\t\t\t\t\tpercent_query_match = len(search_word) / len(l_query.replace(\" \", \"\"))\n\n\t\t\t\t\t#These weights emphasizes matching the query more than the entire card\n\t\t\t\t\tcard_match_weight = 0.25\n\t\t\t\t\tquery_match_weight = 1 - card_match_weight\n\n\t\t\t\t\tpercent_match += (percent_query_match * max_value['match'] * query_match_weight + \n\t\t\t\t\t\tpercent_card_match * max_value['match'] * card_match_weight)\n\n\t\t\t\tif percent_match >= min_match:\n\t\t\t\t\tresult_list.append([card, percent_match])\n\n\t\treturn result_list", "def permutate_genome_percent(human, phix, bacteria):\n \n per = list(itertools.product(human, phix, bacteria))\n sum_per = [sum(i) for i in zip(*per)]\n \n #check percentage sum < 1\n if all(i > 1 for i in sum_per):\n print \"Some combinations of human, phix and bacteria greater than 1\"\n sys.exit(0)\n \n return per", "def recalculate_popularity(self):\n self.voters = 0\n for x in self.votes:\n self.voters += 1\n if x.good:\n self.popularity += 1\n else:\n self.popularity -= 1", "def get_proteome_correct_percentages(prots_filtered_feathers, outpath, length_filter_pid=None,\n copynum_scale=False, copynum_df=None,\n force_rerun=False):\n if ssbio.utils.force_rerun(flag=force_rerun, outfile=outpath):\n prot_tracker = defaultdict(int)\n big_strain_counts_df = pd.DataFrame()\n first = True\n for feather in prots_filtered_feathers:\n loaded = load_feather(protein_feather=feather, length_filter_pid=length_filter_pid,\n copynum_scale=copynum_scale,\n copynum_df=copynum_df)\n\n if first:\n big_strain_counts_df = pd.DataFrame(columns=loaded.columns)\n first = False\n tmp_df = pd.DataFrame(columns=loaded.columns)\n for strain in loaded.columns:\n prot_tracker[strain] += 1\n totals = list(filter(lambda x: x.endswith('total'), loaded[strain].index))\n for t in totals:\n counts = t.rsplit('_', 1)[0]\n aa_counts = list(\n filter(lambda x: (x.startswith(counts) and x not in totals), loaded[strain].index))\n for aa_count in aa_counts:\n tmp_df.at[aa_count.replace('count', '%'), strain] = loaded[strain][aa_count] / \\\n loaded[strain][t]\n big_strain_counts_df = big_strain_counts_df.add(tmp_df, fill_value=0)\n\n for c, total in prot_tracker.items():\n big_strain_counts_df.loc[:, c] /= total\n\n if len(big_strain_counts_df) > 0:\n big_strain_counts_df.astype(float).reset_index().to_feather(outpath)\n return big_strain_counts_df\n else:\n return pd.read_feather(outpath).set_index('index')", "def all_match():\n S1=Spectrum.Spectrum()\n S1.add_peak(50.7,234)\n S1.add_peak(54.6,585)\n S1.add_peak(60.7,773)\n S1.add_peak(65.6,387)\n S1.add_peak(87.7,546)\n S1.add_peak(104.6,598)\n S1.pep_mass=100\n S1.euclidean_scale()\n\n S2=Spectrum.Spectrum()\n S2.add_peak(50.5,234/2)\n S2.add_peak(54.8,585/2)\n S2.add_peak(61.0,773/2)\n S2.add_peak(65.4,387/2)\n S2.add_peak(88.0,546/2)\n S2.add_peak(104.3,598/2)\n S2.pep_mass=100\n S2.euclidean_scale()\n\n score,peaks=similarity.cosine_score_max(S1,S2)\n assert peaks==6, \"Incorrect number of peaks matched with greedy method\"\n assert math.isclose(score,1.0), \"Incorrect score with greedy method\"\n\n score,peaks=similarity.cosine_score_greedy(S1,S2)\n assert peaks==6, \"Incorrect number of peaks matched with maximum weighted method\"\n assert math.isclose(score,1.0), \"Incorrect score with maximum weighted method\"", "def find_pcts(p1, p2, start_b = [], iter = 10000):\n win_record = []\n for i in range(iter):\n deck = Deck()\n need = 5 - len(start_b)\n b2 = draw_sure(deck, need, p1+p2+start_b)\n win_record.append(_who_wins(start_b + b2, p1, p2, printout = False))\n return [win_record.count(1) / float(len(win_record)), \n win_record.count(2) / float(len(win_record))\n ]", "def compute_matches(self):\n\t\tself.local_database[\"figure_number\"] = [0] * len(self.local_database[\"feature_vectors\"])\n\t\tForensics = wbForensicsHOG(Database=self.local_database)\n\t\tForensics.KDTree_pairs(leaf_size = len(self.local_database)+1)\n\t\tForensics.d_rank(pairs=Forensics.pairs, distances=Forensics.dists, ratios=Forensics.ratios)\n\n\t\tself.local_matches = Forensics.Dist_Rank", "def alt_score(objects):\n scores = {}\n for tweet in objects:\n data = tweet._json\n raw_time = datetime.strptime(\n data['created_at'],\n '%a %b %d %H:%M:%S +0000 %Y'\n )\n age = ((datetime.utcnow() - raw_time).seconds / 60) + 1\n rt = data['retweet_count']\n fave = data['favorite_count']\n fol = data['user']['followers_count']\n weight = 1.5\n e2f = ((weight * rt + fave) / (fol / 2)) * 1000\n e2a = enagement / age\n score = e2f + e2a\n scores[score] = data['id']\n embeds = []\n for item in sorted(scores.items(), reverse=True)[:13]:\n embed = twitter.get_oembed(id=item[1], align='center')\n embeds.append(embed['html'])\n return embeds", "def get_score(self):\n for response in self.response_list:\n self.score += response.get_score", "def __call__(self, relsSortedByScores, qrelDict):\n result = 0.\n postQty = len(qrelDict)\n\n pos = 0\n for i, rel in enumerate(relsSortedByScores):\n if rel > RELEVANCE_THRESHOLD:\n pos += 1.\n result += pos / (i + 1.)\n\n return result / postQty", "def enter_matches_score(match_list):\n for match in match_list:\n view.show(\"{} vs {}\".format(match.player_blanc.player_first_name,\n match.player_black.player_first_name))\n score_blanc = enter_player_score(match.player_blanc)\n match.score_blanc = score_blanc\n score_black = enter_player_score(match.player_black)\n match.score_black = score_black", "def getStats(population, masterList):\n for team in population:\n for i in range(13): #13 are the number of roster spots?\n team.totHr += masterList[team.roster[i]].hr\n team.totAvg += masterList[team.roster[i]].avg\n team.totRuns += masterList[team.roster[i]].runs\n team.totSb += masterList[team.roster[i]].sb\n team.totRbi += masterList[team.roster[i]].rbi\n if i == 12:\n team.totAvg = team.totAvg / 13\n return population", "def calculateScore(self, queue):\n for song in queue:\n if song['explicit']:\n song['score'] = 3 * song['age'] + 2 * song['upvotes'] - 2 * song['downvotes']\n else:\n song['score'] = -1 * song['downvotes']", "def probability(freqlst):\n\tproblist = []\n\ttotal = 0\n\ttotes = 0\n\tfor elem in freqlst:\n\t\ttotal = total + elem\n\tfor item in freqlst:\n\t\tprob = item / total\n\t\tproblist.append(prob)\n\tfor la in problist:\n\t\ttotes = totes + la\n\treturn problist", "def get_verified_ratio(self):\n if len(self.pages) == 0: # There are no pages in this journal \n return 0, 0, 0\n verified = (1, 2, 4) \n numVerified = 0 \n numSeen = 0 \n for page in self.pages: \n numSeen += len(page.names) # page.names is a list of Name objects \n for name in page.names: \n if name.match in verified: \n numVerified += 1\n if numSeen == 0: # No names in any of the pages of the journal \n return 0, 0, 0\n return numVerified, numSeen, numVerified / numSeen", "def _ratios() -> Iterable[float]:\n index = 0\n primes = 0\n\n while True:\n primes += 1 if is_prime(_first_spiral_arm(index)) else 0\n primes += 1 if is_prime(_second_spiral_arm(index)) else 0\n primes += 1 if is_prime(_third_spiral_arm(index)) else 0\n primes += 1 if is_prime(_fourth_spiral_arm(index)) else 0\n\n yield primes / (index * 4 + 1)\n\n index += 1", "def calculateResults(predictions, answers):\r\n t = 0\r\n f = 0\r\n for i in range(len(answers)):\r\n if predictions[i] == answers[i]:\r\n t += 1\r\n else:\r\n f += 1\r\n\r\n print(\"The Percent of Correct Predictions is {t}%\".format(t=round((t * 100 / len(answers)), 1)))\r\n print(\"The Percent of Incorrect Predictions is {f}%\\n\".format(f=round((f * 100 / len(answers)), 1)))", "def precision(self, user_list):\n hit = 0\n all_recom = 0\n print('Calculate precision: ')\n for user in tqdm(user_list):\n recom_data = self._get_recommend(user)\n recom_item = set([data[0] for data in recom_data])\n user_item = set(\n self.test[self.test['userId'] == user]['movieId'].values)\n overlap = recom_item & user_item\n hit += len(overlap)\n all_recom += len(recom_item)\n print('\\nprecision is: ', hit / (all_recom * 1.0))\n return hit / (all_recom * 1.0)", "def score_tweets(objects):\n scores = {}\n for tweet in objects:\n data = tweet._json\n rt = data['retweet_count']\n fave = data['favorite_count']\n fol = data['user']['followers_count']\n weight = 1.5\n score = ((weight * rt + fave) / (fol / 2)) * 1000\n scores[score] = data['id']\n embeds = []\n for item in sorted(scores.items(), reverse=True)[:13]: #sorted returns tuple\n embed = twitter.get_oembed(id=item[1],align='center')\n embeds.append(embed['html'])\n return embeds", "def calculate_scores(players):\n scores = {}\n for player in players.tuple_:\n scores[player.id_] = player.score()\n return scores", "def measure(self, recommender):\n similarity = 0\n items_shown = recommender.items_shown\n if items_shown.size == 0:\n # at the beginning of the simulation, there are no recommendations yet\n self.observe(None)\n return\n\n for pair in self.pairs:\n itemset_1 = set(items_shown[pair[0], :])\n itemset_2 = set(items_shown[pair[1], :])\n common = len(itemset_1.intersection(itemset_2))\n union = len(itemset_1.union(itemset_2))\n similarity += common / union / len(self.pairs)\n self.observe(similarity)" ]
[ "0.61019254", "0.5894499", "0.58586526", "0.5768695", "0.5768504", "0.5746886", "0.57437664", "0.57017726", "0.5665368", "0.5643431", "0.56431913", "0.5563038", "0.55320835", "0.5523601", "0.54986566", "0.54957074", "0.54766464", "0.54745346", "0.5454031", "0.54450965", "0.54431146", "0.5418987", "0.54115266", "0.5409334", "0.5405679", "0.5399868", "0.5373609", "0.5370584", "0.5363803", "0.5363604" ]
0.71711147
0
Appends standard aligned index value to ProspectivePrimer objects
def append_std_aligned_index(primers, standard_index_seq, region_slice): for n in primers: n.std_index = True standard_unaligned_index = get_corrected_index(standard_index_seq, n.aligned_index) # 5' for forward primer would be upstream of the Xmer by the # number of bases in the region slice n.f_std_index = standard_unaligned_index - region_slice # 5' for reverse primer is the length of the Xmer plus the number # of bases in the region slice. n.r_std_index = standard_unaligned_index + len(n.seq) + region_slice return primers
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def table_key(self, reindex_dict):\n reindexed_marks = []\n for m in self.component1.marks:\n new_m = reindex_dict.get(m)\n if new_m == None:\n if len(reindex_dict) == 0:\n new_m = 0\n else:\n new_m = max(reindex_dict.values())+1\n reindex_dict[m] = new_m\n reindexed_marks.append(new_m)\n return tuple( [self.component1.genus] + sorted(reindexed_marks) )", "def __getitem__(self, index):\n self.update(index)\n return self.primes[index]", "def append_primer_hit(primer, \n label,\n hit_index,\n region_slice,\n overall_length,\n unaligned_seq,\n primer_len):\n \n \n primer.match_count+=1\n primer.labels.append(label.split()[0])\n # Fill in 'N' for incomplete sequences\n # Set primer_index to 0 in case slicing left end of sequence\n primer_index=hit_index-region_slice\n if primer_index<0:\n primer_index=0\n unknown_bases=overall_length-len(unaligned_seq[primer_index:hit_index+\n primer_len])\n if unknown_bases>0:\n filler=\"-\"*unknown_bases\n else:\n filler=\"\"\n upstream_region=filler+unaligned_seq[primer_index:hit_index+primer_len]\n primer.upstream_regions.append(upstream_region)\n unknown_bases=overall_length-len(unaligned_seq[hit_index:hit_index+\n primer_len+region_slice])\n if unknown_bases>0:\n filler=\"-\"*unknown_bases\n else:\n filler=\"\"\n downstream_region=unaligned_seq[hit_index:hit_index +\n primer_len+region_slice]+filler\n primer.downstream_regions.append(downstream_region)\n return", "def generate_inv_index(people):\n pass", "def fix_index(self,priority):\n #if self.record_size < self.size:\n self.record_size += 1\n \n if self.record_size > self.size:\n\n '''\n if self.replace_flag:\n self.index = bisect.bisect_right(self.priority_queue, priority)\n self.priority_queue.insert(self.index, priority)\n self.priority_queue.pop(0)\n return self.index\n else:\n '''\n sys.stderr.write('Experience replay buff is full and replace is set to FALSE!\\n')\n self.index = -10\n return self.index\n else:\n self.index = bisect.bisect_right(self.priority_queue, priority)\n self.priority_queue.insert(self.index, priority)\n return self.index", "def _pval_pairs(self, idx0, idx1):\n pass", "def up_index(index):\n return 2 * index", "def test_align_idx(self):\n self.amp4.rotateAng([5, 5, 5], ang='deg')\n al = align(self.amp3, self.amp4, mv=[0, 1, 2, 3], sv=[0, 1, 2, 3], method='idxPoints')\n all(self.assertAlmostEqual(al.m.vert[i, 0], al.s.vert[i, 0], delta=0.1) for i in range(al.s.vert.shape[0]))", "def align_probas(self, probas, generator):\n if probas.ndim <= 2:\n return probas\n\n num_class = probas.shape[-1]\n output = np.zeros((generator.num_samples, num_class), dtype=probas.dtype)\n\n for idxs, p in zip(generator.idx_pairs, probas):\n output[idxs[0]:idxs[1],:] += p\n\n output /= np.linalg.norm(output, axis=1, keepdims=True)\n\n return output", "def update_idx(self):\n self.idx = (self.F * self.FMUL +\n self.E * self.EMUL +\n self.Z * self.ZMUL +\n self.A * self.AMUL +\n self.B * self.BMUL )", "def retrograde(self, index=0):\r\n retr = self.prime(index=index).__reversed__()\r\n return retr", "def NewStartingIndex(self) -> int:", "def support_idx(self, j):\n return (j, j+self.p+1)", "def align(self):\n ...", "def _prog_field_indices(self):\n\n if self._pfi is not None:\n return self._pfi\n\n self.arbor._grow_tree(self)\n self._pfi = np.array([node.tree_id for node in self._prog_nodes])\n return self._pfi", "def _normalize_index(self, index: int):\n if index < 0:\n return len(self) + index\n else:\n return index", "def OldStartingIndex(self) -> int:", "def buildindex4(invertedindex, index):\n sortedbycount = sorted(invertedindex.items(), key=lambda x: x[1][1])\n startindex = math.floor((2*len(sortedbycount))/100) + 1\n for word, count in sortedbycount[startindex:]:\n index[word] = count\n return", "def change_priorities(self,idxs, errors): \n for i in range(len(idxs)):\n self.update(idxs[i] , errors[i])", "def _natural_index(self, index):\r\n if index < 0:\r\n return len(self.data) + index\r\n return index", "def push_dynamical_merging_index(self):\n Total_dyn_mergindex = np.zeros((0,), dtype=np.float)\n ParType0_dyn_mergindex = np.zeros((0,), dtype=np.float)\n ParType1_dyn_mergindex = np.zeros((0,), dtype=np.float)\n ParType4_dyn_mergindex = np.zeros((0,), dtype=np.float)\n ParType5_dyn_mergindex = np.zeros((0,), dtype=np.float)\n\n for r in self.cluster.generate_apertures():\n part_dyn_mergindex_aperture = self.cluster.group_dynamical_merging_index(aperture_radius=r, \n out_allPartTypes=True)\n ParType0_dyn_mergindex = np.concatenate((ParType0_dyn_mergindex, [part_dyn_mergindex_aperture[0]]), axis=0)\n ParType1_dyn_mergindex = np.concatenate((ParType1_dyn_mergindex, [part_dyn_mergindex_aperture[1]]), axis=0)\n ParType4_dyn_mergindex = np.concatenate((ParType4_dyn_mergindex, [part_dyn_mergindex_aperture[2]]), axis=0)\n ParType5_dyn_mergindex = np.concatenate((ParType5_dyn_mergindex, [part_dyn_mergindex_aperture[3]]), axis=0)\n\n Total_dyn_mergindex_apertur = self.cluster.group_dynamical_merging_index(aperture_radius=r, \n out_allPartTypes=False)\n Total_dyn_mergindex = np.concatenate((Total_dyn_mergindex, [Total_dyn_mergindex_apertur]), axis=0)\n\n data = {'/Total_dyn_mergindex' : np.array(Total_dyn_mergindex),\n '/ParType0_dyn_mergindex': np.array(ParType0_dyn_mergindex),\n '/ParType1_dyn_mergindex': np.array(ParType1_dyn_mergindex),\n '/ParType4_dyn_mergindex': np.array(ParType4_dyn_mergindex),\n '/ParType5_dyn_mergindex': np.array(ParType5_dyn_mergindex)}\n\n attributes = {'Description': \"\"\"Datasets with the dynamical merging index of the cluster, calculated \n from particles within a specific aperture radius from the Centre of Potential. Individual datasets contain \n merging index information about each particle type separately, as well as one with combined total \n contribution.\n The dynamical merging index is computed according to the equation:\n dynamical_merging_index = || CoM(r) - CoP(r) || / r.\n \n Note: The particle type infomation combines the CoM calculated for every particle type and the \n overall CoP of the whole FoF cluster. I.e., the CoP is not computed in a particle type-wise manner. \n If in doubt, use the Total_dynindex dataset, which contains the dynamical merging index computed for \n all particle types within a given aperture.\n \"\"\",\n 'Units': '[None]'}\n\n out = FOFOutput(self.cluster, filename='dynamical_merging_index.hdf5', data=data, attrs=attributes)\n out.makefile()", "def fixIndexes(self,ind1,ind2):\n if ind1 > 0:\n ind1 -= 1\n else:\n ind2 += 1\n return ind1,ind2", "def scorePrimers(seq_record, primers, start=default_start, rev_primer=False, \n score_dict=getDNAScoreDict(mask_score=(0, 1), gap_score=(0, 0))):\n # Create empty return dictionary\n seq_record = seq_record.upper()\n align = PrimerAlignment(seq_record)\n align.rev_primer = rev_primer\n\n # Define orientation variables\n seq_record.annotations['seqorient'] = 'F'\n seq_record.annotations['prorient'] = 'F' if not rev_primer else 'RC'\n\n # Score primers\n this_align = {}\n rec_len = len(seq_record)\n if rev_primer: end = rec_len - start\n for adpt_id, adpt_seq in primers.items():\n if rev_primer: start = end - len(adpt_seq)\n else: end = start + len(adpt_seq)\n chars = zip(seq_record[start:end], adpt_seq)\n score = sum([score_dict[(c1, c2)] for c1, c2 in chars])\n this_align.update({adpt_id: (score, start, end)})\n\n # Determine primer with lowest error rate\n best_align, best_adpt, best_err = None, None, None\n for adpt, algn in this_align.items():\n #adpt_err = 1.0 - float(algn[0]) / weightSeq(primers[adpt])\n err = 1.0 - float(algn[0]) / len(primers[adpt])\n if best_err is None or err < best_err:\n best_align = algn\n best_adpt = adpt\n best_err = err\n\n # Set return dictionary to lowest error rate alignment\n if best_align:\n # Populate return object\n align.primer = best_adpt if best_err < 1.0 else None\n align.start = best_align[1]\n align.end = best_align[2]\n align.error = best_err\n align.valid = True\n\n # Determine alignment sequences\n if not rev_primer:\n align.align_seq = str(seq_record.seq[:best_align[2]])\n align.align_primer = '-' * best_align[1] + primers[best_adpt]\n else:\n align.align_seq = str(seq_record.seq[best_align[1]:])\n align.align_primer = primers[best_adpt] + '-' * (rec_len - best_align[2])\n \n return align", "def update_priority(self, indexes, values):\n values = values * 10000\n values = self._clip_p(values)\n values = int(values)\n self.sum_tree.update(indexes, values)", "def change_priorities(self,idxs,errors):\n #print(\"Indecies \",idxs)\n for i,idx in enumerate(idxs):\n self.update(idx, errors[i])", "def index(self, index):\n index.column_protein[self.column].add((self.protein,self.protein_res))\n index.protein_domain[(self.protein.id,self.protein_res)] = (self.domain,self.domain_res)\n index.domain_structure[(self.domain.id,self.domain_res)].add((self.structure,self.structure_res))\n index.structure[(self.structure.index, self.structure_res)] = self", "def __getitem__(self, index):\n sample, label = self.data[index], self.labels[index]\n\n # transform the sample and the label,\n # in order to feed them to the model\n vec_sample = vectorize(sample, self.word2idx, self.length)\n\n # PROSOXH EIXAME BUUUUUUUUUUUUUUUUUUUG ZHTAGAME index POU > MAX_LENGTH\n return vec_sample, label, min(len(self.data[index]), self.length)", "def generate_reverse_index(self):", "def primer_start_fix(self):\r\n #TODO this function will not be used anymore, remove?\r\n if self.type in [\"forward_primer\", \"reverse_primer\", \"PCR_product\"]:\r\n self.start += 1\r\n if self.type == \"region\" and self.source == \"Primer3\":\r\n # this is the region containing the primers\r\n self.start += 1", "def get_Pre_Succ(I):\n #Docs = I.docs\n #Docs_id = Docs.keys()\n Docs = I.getIndex().all_ids_\n Docs_id = [ int(float(k)) for k in Docs] \n N_pgs = len(Docs_id)\n Index_P = { id:idx for idx,id in enumerate(Docs_id)}\n Counter_Index_P = { idx:id for idx,id in enumerate(Docs_id)}\n \n print \"\\nBuilding Pi...\"\n Succ = { Index_P[p]:(I.getLinksForDoc(p),len(I.getLinksForDoc(p))) for p in Docs_id }\n P = {}\n for e in Succ:\n succ_e,l_e = Succ[e]\n for s in succ_e: \n if Index_P.get(s,\"Unknown_Doc_id\") not in P:\n P[Index_P.get(s,\"Unknown_Doc_id\")] = set()\n P[Index_P.get(s,\"Unknown_Doc_id\")].add(e) \n \n return P,Succ,Index_P,Counter_Index_P,N_pgs" ]
[ "0.590127", "0.5743373", "0.5539483", "0.5368967", "0.52574486", "0.5224323", "0.52219516", "0.5219616", "0.51754165", "0.5169657", "0.5157575", "0.51386046", "0.51257545", "0.5122756", "0.50792426", "0.50733215", "0.5063912", "0.5045901", "0.50283784", "0.50128955", "0.5008945", "0.5003716", "0.49797863", "0.49714077", "0.4948095", "0.49409518", "0.49389058", "0.49244434", "0.49121916", "0.48903987" ]
0.68654364
0
CASSANDRA9871 Test that we can replace a node that is shutdown gracefully.
def replace_shutdown_node_test(self): self._replace_node_test(gently=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_node_graceful_shutdown(self, proc_info, controller_node):\n launch_testing.asserts.assertExitCodes(proc_info, process=controller_node)", "def test_05_node_down_and_resync_hard(self):\n for cluster in test_rest.cluster.clusters:\n if cluster == 'index':\n continue\n test_rest.db_simulate(cluster, 240)\n port = test_rest.cluster.clusters[cluster][0][0]\n test_rest.step(f'stopping cluster {cluster} node with port {port} - during load')\n test_rest.docker_stop(cluster, port)\n # restart nodes\n for cluster in test_rest.cluster.clusters:\n if cluster == 'index':\n continue\n port = test_rest.cluster.clusters[cluster][0][0]\n test_rest.step(f'restarting cluster {cluster} node with port {port}')\n test_rest.expand_data_cluster(cluster, port=port)\n test_rest.step(\"restarted nodes, waiting 10 seconds to begin monitoring table state & running sync jobs\")\n time.sleep(10)\n test_rest.cluster.insync_and_state_check()\n while test_rest.is_running_simulations():\n print(\"waiting on running simulations to complete\")\n time.sleep(10)\n test_rest.cluster.insync_and_state_check()\n test_rest.cluster.verify_data()", "def test_create_node_shutdown_item(self):\n pass", "def test_04_node_down_and_resync_soft(self):\n for cluster in test_rest.cluster.clusters:\n if cluster == 'index':\n continue\n port = test_rest.cluster.clusters[cluster][0][0]\n test_rest.step(f'stopping cluster {cluster} node with port {port}')\n test_rest.docker_stop(cluster, port)\n test_rest.step(f\"starting db_simulator on cluster {cluster}\")\n test_rest.db_simulate(cluster, 180)\n # restart nodes\n for cluster in test_rest.cluster.clusters:\n if cluster == 'index':\n continue\n port = test_rest.cluster.clusters[cluster][0][0]\n test_rest.step(f'restarting cluster {cluster} node with port {port}')\n test_rest.expand_data_cluster(cluster, port=port)\n test_rest.step(\"restarted nodes, waiting 10 seconds to begin monitoring table state & running sync jobs\")\n time.sleep(10)\n test_rest.cluster.insync_and_state_check()\n while test_rest.is_running_simulations():\n print(\"waiting on running simulations to complete\")\n time.sleep(10)\n test_rest.cluster.verify_data()", "def reboot(self, node):", "def test_replaceDoesNotDisableCircuitBreaker(self):\n disco = create_disco()\n node = create_node(\"somewhere\")\n disco.onMessage(None, NodeActive(node))\n resolved_node = resolve(disco, \"myservice\", \"1.0\")\n # Uh-oh it's a pretty broken node:\n for i in range(10):\n resolved_node.failure()\n\n node = create_node(\"somewhere\")\n disco.onMessage(None, ReplaceCluster(\"myservice\",\n SANDBOX_ENV,\n [node]))\n resolved_node2 = resolve(disco, \"myservice\", \"1.0\")\n self.assertEqual(resolved_node2, None)\n resolved_node.success()\n self.assertNodesEqual(resolve(disco, \"myservice\", \"1.0\"), node)", "def resumable_replace_test(self):\n\n cluster = self.cluster\n cluster.populate(3).start()\n node1, node2, node3 = cluster.nodelist()\n\n node1.stress(['write', 'n=100K', 'no-warmup', '-schema', 'replication(factor=3)'])\n\n session = self.patient_cql_connection(node1)\n stress_table = 'keyspace1.standard1'\n query = SimpleStatement('select * from %s LIMIT 1' % stress_table, consistency_level=ConsistencyLevel.THREE)\n initial_data = rows_to_list(session.execute(query))\n\n node3.stop(gently=False)\n\n # kill node1 in the middle of streaming to let it fail\n t = InterruptBootstrap(node1)\n t.start()\n # replace node 3 with node 4\n debug(\"Starting node 4 to replace node 3\")\n node4 = Node('node4', cluster=cluster, auto_bootstrap=True, thrift_interface=('127.0.0.4', 9160),\n storage_interface=('127.0.0.4', 7000), jmx_port='7400', remote_debug_port='0',\n initial_token=None, binary_interface=('127.0.0.4', 9042))\n # keep timeout low so that test won't hang\n node4.set_configuration_options(values={'streaming_socket_timeout_in_ms': 1000})\n cluster.add(node4, False)\n try:\n node4.start(jvm_args=[\"-Dcassandra.replace_address_first_boot=127.0.0.3\"], wait_other_notice=False)\n except NodeError:\n pass # node doesn't start as expected\n t.join()\n\n # bring back node1 and invoke nodetool bootstrap to resume bootstrapping\n node1.start()\n node4.nodetool('bootstrap resume')\n # check if we skipped already retrieved ranges\n node4.watch_log_for(\"already available. Skipping streaming.\")\n # wait for node3 ready to query\n node4.watch_log_for(\"Listening for thrift clients...\")\n\n # check if 2nd bootstrap succeeded\n assert_bootstrap_state(self, node4, 'COMPLETED')\n\n # query should work again\n debug(\"Stopping old nodes\")\n node1.stop(gently=False, wait_other_notice=True)\n node2.stop(gently=False, wait_other_notice=True)\n\n debug(\"Verifying data on new node.\")\n session = self.patient_exclusive_cql_connection(node4)\n assert_all(session, 'SELECT * from {} LIMIT 1'.format(stress_table),\n expected=initial_data,\n cl=ConsistencyLevel.ONE)", "def fail_without_replace_test(self):\n debug(\"Starting cluster with 3 nodes.\")\n cluster = self.cluster\n cluster.populate(3)\n node1, node2, node3 = cluster.nodelist()\n cluster.seeds.remove(node3)\n NUM_TOKENS = os.environ.get('NUM_TOKENS', '256')\n if DISABLE_VNODES:\n cluster.set_configuration_options(values={'initial_token': None, 'num_tokens': 1})\n else:\n cluster.set_configuration_options(values={'initial_token': None, 'num_tokens': NUM_TOKENS})\n cluster.start()\n\n debug(\"Inserting Data...\")\n node1.stress(['write', 'n=10K', 'no-warmup', '-schema', 'replication(factor=3)'])\n\n mark = None\n for auto_bootstrap in (True, False):\n debug(\"Stopping node 3.\")\n node3.stop(gently=False)\n\n # completely delete the data, commitlog, and saved caches\n for d in chain([os.path.join(node3.get_path(), \"commitlogs\")],\n [os.path.join(node3.get_path(), \"saved_caches\")],\n node3.data_directories()):\n if os.path.exists(d):\n rmtree(d)\n\n node3.set_configuration_options(values={'auto_bootstrap': auto_bootstrap})\n debug(\"Starting node 3 with auto_bootstrap = {val}\".format(val=auto_bootstrap))\n node3.start(wait_other_notice=False)\n node3.watch_log_for('Use cassandra.replace_address if you want to replace this node', from_mark=mark, timeout=20)\n mark = node3.mark_log()", "def test_replaceDoesNotMutate(self):\n disco = create_disco()\n node = create_node(\"somewhere\")\n disco.onMessage(None, NodeActive(node))\n resolved_node = resolve(disco, \"myservice\", \"1.0\")\n\n node2 = create_node(\"somewhere\")\n node2.version = \"1.3\"\n disco.onMessage(None, ReplaceCluster(\"myservice\",\n SANDBOX_ENV,\n [node2]))\n self.assertEqual(resolved_node.version, \"1.0\")", "def test_create_node_reboot_item(self):\n pass", "def ex_shutdown_node(self, node):\n # NOTE: This method is here for backward compatibility reasons after\n # this method was promoted to be part of the standard compute API in\n # Libcloud v2.7.0\n return self.stop_node(node=node)", "def test_restart_node_with_encrypted_pkeys(self):\n self.x509.generate_multiple_x509_certs(servers=self.servers[:self.nodes_init])\n self.x509.upload_root_certs(self.master)\n self.x509.upload_node_certs(servers=self.servers[:self.nodes_init])\n rest = RestConnection(self.master)\n nodes_in_cluster = [node for node in self.servers[:self.nodes_init]]\n for node in self.servers[1:self.nodes_init]:\n shell = RemoteMachineShellConnection(node)\n shell.restart_couchbase()\n shell.disconnect()\n self.sleep(10, \"Wait after restart\")\n self.cluster.async_failover(nodes_in_cluster,\n [node],\n graceful=False)\n self.wait_for_failover_or_assert(1)\n rest.set_recovery_type(\"ns_1@\" + node.ip, recoveryType=\"delta\")\n https_val = CbServer.use_https # so that add_node uses https\n CbServer.use_https = True\n task = self.cluster.async_rebalance(nodes_in_cluster, [], [])\n CbServer.use_https = https_val\n self.wait_for_rebalance_to_complete(task)\n shell = RemoteMachineShellConnection(node)\n shell.restart_couchbase()\n shell.disconnect()\n https_val = CbServer.use_https # so that add_node uses https\n CbServer.use_https = True\n task = self.cluster.async_rebalance(nodes_in_cluster,\n [], [node])\n self.wait_for_rebalance_to_complete(task)\n CbServer.use_https = https_val\n nodes_in_cluster.remove(node)", "def replace_with_reset_resume_state_test(self):\n\n cluster = self.cluster\n cluster.populate(3).start()\n node1, node2, node3 = cluster.nodelist()\n\n node1.stress(['write', 'n=100K', 'no-warmup', '-schema', 'replication(factor=3)'])\n\n session = self.patient_cql_connection(node1)\n stress_table = 'keyspace1.standard1'\n query = SimpleStatement('select * from %s LIMIT 1' % stress_table, consistency_level=ConsistencyLevel.THREE)\n initial_data = rows_to_list(session.execute(query))\n\n node3.stop(gently=False)\n\n # kill node1 in the middle of streaming to let it fail\n t = InterruptBootstrap(node1)\n t.start()\n # replace node 3 with node 4\n debug(\"Starting node 4 to replace node 3\")\n node4 = Node('node4', cluster=cluster, auto_bootstrap=True, thrift_interface=('127.0.0.4', 9160),\n storage_interface=('127.0.0.4', 7000), jmx_port='7400', remote_debug_port='0',\n initial_token=None, binary_interface=('127.0.0.4', 9042))\n\n # keep timeout low so that test won't hang\n node4.set_configuration_options(values={'streaming_socket_timeout_in_ms': 1000})\n cluster.add(node4, False)\n try:\n node4.start(jvm_args=[\"-Dcassandra.replace_address_first_boot=127.0.0.3\"], wait_other_notice=False)\n except NodeError:\n pass # node doesn't start as expected\n t.join()\n node1.start()\n\n # restart node4 bootstrap with resetting bootstrap state\n node4.stop()\n mark = node4.mark_log()\n node4.start(jvm_args=[\n \"-Dcassandra.replace_address_first_boot=127.0.0.3\",\n \"-Dcassandra.reset_bootstrap_progress=true\"\n ])\n # check if we reset bootstrap state\n node4.watch_log_for(\"Resetting bootstrap progress to start fresh\", from_mark=mark)\n # wait for node3 ready to query\n node4.watch_log_for(\"Listening for thrift clients...\", from_mark=mark)\n\n # check if 2nd bootstrap succeeded\n assert_bootstrap_state(self, node4, 'COMPLETED')\n\n # query should work again\n debug(\"Stopping old nodes\")\n node1.stop(gently=False, wait_other_notice=True)\n node2.stop(gently=False, wait_other_notice=True)\n\n debug(\"Verifying data on new node.\")\n session = self.patient_exclusive_cql_connection(node4)\n assert_all(session, 'SELECT * from {} LIMIT 1'.format(stress_table),\n expected=initial_data,\n cl=ConsistencyLevel.ONE)", "def test_replace(self):\n disco = create_disco()\n node1 = create_node(\"somewhere\")\n node2 = create_node(\"somewhere2\")\n node3 = create_node(\"somewhere3\")\n node4 = create_node(\"somewhere4\")\n disco.onMessage(None, NodeActive(node1))\n disco.onMessage(None, NodeActive(node2))\n disco.onMessage(None, ReplaceCluster(\"myservice\",\n SANDBOX_ENV,\n [node3, node4]))\n self.assertEqual(knownNodes(disco, \"myservice\", \"sandbox\"), [node3, node4])", "async def test_early_exit(self):\n n = Node()\n run_task = asyncio.create_task(n.run_node())\n await asyncio.sleep(0)\n self.assertFalse(n.check_alive())\n n.exit_node()\n await n.wait_running()\n await n.wait_stopped()\n await run_task\n await self._check_exited_node(n)", "def test_unavailable_server(cluster):\n node2 = cluster.instances[\"node2\"]\n global uuids\n node2.query(\n \"\"\"\n CREATE TABLE test0 UUID '{}'\n (id Int32) ENGINE = MergeTree() ORDER BY id\n SETTINGS storage_policy = 'web';\n \"\"\".format(\n uuids[0]\n )\n )\n node2.stop_clickhouse()\n try:\n # NOTE: you cannot use separate disk instead, since MergeTree engine will\n # try to lookup parts on all disks (to look unexpected disks with parts)\n # and fail because of unavailable server.\n node2.exec_in_container(\n [\n \"bash\",\n \"-c\",\n \"sed -i 's#http://nginx:80/test1/#http://nginx:8080/test1/#' /etc/clickhouse-server/config.d/storage_conf_web.xml\",\n ]\n )\n with pytest.raises(Exception):\n # HTTP retries with backup can take awhile\n node2.start_clickhouse(start_wait_sec=120, retry_start=False)\n assert node2.contains_in_log(\n \"Caught exception while loading metadata.*Connection refused\"\n )\n assert node2.contains_in_log(\n \"HTTP request to \\`http://nginx:8080/test1/.*\\` failed at try 1/10 with bytes read: 0/unknown. Error: Connection refused.\"\n )\n finally:\n node2.exec_in_container(\n [\n \"bash\",\n \"-c\",\n \"sed -i 's#http://nginx:8080/test1/#http://nginx:80/test1/#' /etc/clickhouse-server/config.d/storage_conf_web.xml\",\n ]\n )\n node2.start_clickhouse()\n node2.query(\"DROP TABLE test0 SYNC\")", "def simulate_node_failure(node_ips, max_duration, tests_completed):\n run = True\n l.info(\"START Cassandra Node Failure Simulation. Entering.\")\n while run:\n # If stress-tests are still running continue with node failure simulation\n if not tests_completed.isSet():\n # Select 'random' node from Cassandra Cluster\n node_ip = select_random_node(node_ips)\n # Determine delay before stopping cassandra node (to simulate failure / node down)\n duration_secs = max_duration*60\n time_next_stop = random.randint(1, duration_secs/4)\n l.debug(\"STOP programmed in %s seconds\" % time_next_stop)\n # Wait\n time.sleep(time_next_stop)\n ssh_fail = False\n # Stop Cassandra Node (simulate failure / stop the service)\n stop_cmd = \"sudo service cassandra stop\"\n l.debug(\"STOP Cassandra Node: %s\"%node_ip)\n try:\n ssh = paramiko.SSHClient()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh.connect(str(node_ip))\n l.debug(\"[Simulate Cassandra Node Failure] Connected to host: %s\" % node_ip)\n except paramiko.AuthenticationException as e:\n l.error(\"Authentication failed when connecting to %s. ERROR: %s\" % (node_ip, e))\n ssh_fail = True\n except:\n l.error(\"Could not SSH to %s, waiting for it to start\" % node_ip)\n ssh_fail = True\n if not ssh_fail:\n # Send the command to STOP cassandra node\n ssh.exec_command(stop_cmd)\n # Determine delay before starting cassandra node (to simulate rejoin to the cluster)\n time_next_rejoin = random.randint(1, duration_secs/4)\n l.debug(\"START programmed in %s seconds\" % time_next_rejoin)\n time.sleep(time_next_rejoin)\n # Start Cassandra Node (simulate rejoin / start the service)\n start_cmd = \"sudo service cassandra start\"\n l.debug(\"START Cassandra Node: %s\"%node_ip)\n # Send the command (non-blocking)\n ssh.exec_command(start_cmd)\n # Disconnect from the host\n l.debug(\"Closing SSH connection to host: %s\" % node_ip)\n ssh.close()\n run=False\n else:\n # Tests Complete has been signaled\n run=False\n l.info(\"END node failure simulation. Exiting.\")", "def test_redeploy_edges(self):\n pass", "def _replace_node_test(self, gently):\n debug(\"Starting cluster with 3 nodes.\")\n cluster = self.cluster\n cluster.populate(3).start()\n node1, node2, node3 = cluster.nodelist()\n\n if DISABLE_VNODES:\n num_tokens = 1\n else:\n # a little hacky but grep_log returns the whole line...\n num_tokens = int(node3.get_conf_option('num_tokens'))\n\n debug(\"testing with num_tokens: {}\".format(num_tokens))\n\n debug(\"Inserting Data...\")\n node1.stress(['write', 'n=10K', 'no-warmup', '-schema', 'replication(factor=3)'])\n\n session = self.patient_cql_connection(node1)\n session.default_timeout = 45\n stress_table = 'keyspace1.standard1'\n query = SimpleStatement('select * from %s LIMIT 1' % stress_table, consistency_level=ConsistencyLevel.THREE)\n initial_data = rows_to_list(session.execute(query))\n\n # stop node, query should not work with consistency 3\n debug(\"Stopping node 3.\")\n node3.stop(gently=gently, wait_other_notice=True)\n\n debug(\"Testing node stoppage (query should fail).\")\n with self.assertRaises(NodeUnavailable):\n try:\n query = SimpleStatement('select * from %s LIMIT 1' % stress_table, consistency_level=ConsistencyLevel.THREE)\n session.execute(query)\n except (Unavailable, ReadTimeout):\n raise NodeUnavailable(\"Node could not be queried.\")\n\n # replace node 3 with node 4\n debug(\"Starting node 4 to replace node 3\")\n node4 = Node('node4', cluster=cluster, auto_bootstrap=True, thrift_interface=('127.0.0.4', 9160),\n storage_interface=('127.0.0.4', 7000), jmx_port='7400', remote_debug_port='0',\n initial_token=None, binary_interface=('127.0.0.4', 9042))\n cluster.add(node4, False)\n node4.start(replace_address='127.0.0.3', wait_for_binary_proto=True)\n\n debug(\"Verifying tokens migrated sucessfully\")\n moved_tokens = node4.grep_log(\"Token .* changing ownership from /127.0.0.3 to /127.0.0.4\")\n debug(\"number of moved tokens: {}\".format(len(moved_tokens)))\n self.assertEqual(len(moved_tokens), num_tokens)\n\n # check that restarting node 3 doesn't work\n debug(\"Try to restart node 3 (should fail)\")\n node3.start(wait_other_notice=False)\n collision_log = node1.grep_log(\"between /127.0.0.3 and /127.0.0.4; /127.0.0.4 is the new owner\")\n debug(collision_log)\n self.assertEqual(len(collision_log), 1)\n node3.stop(gently=False)\n\n # query should work again\n debug(\"Stopping old nodes\")\n node1.stop(gently=False, wait_other_notice=True)\n node2.stop(gently=False, wait_other_notice=True)\n\n debug(\"Verifying data on new node.\")\n session = self.patient_exclusive_cql_connection(node4)\n assert_all(session, 'SELECT * from {} LIMIT 1'.format(stress_table),\n expected=initial_data,\n cl=ConsistencyLevel.ONE)", "def test_concurrent_failover_timer_reset(self):\n\n services_to_fo = self.failover_order[0].split(\":\")\n self.nodes_to_fail = self.get_nodes_to_fail(services_to_fo,\n dynamic_fo_method=True)\n expected_fo_nodes = self.num_nodes_to_be_failover\n self.__update_server_obj()\n rand_node = choice(self.nodes_to_fail.keys())\n self.__update_unaffected_node()\n self.__display_failure_node_status(\"Nodes to be failed\")\n try:\n self.log.info(\"Starting auto-failover procedure\")\n failover_task = ConcurrentFailoverTask(\n task_manager=self.task_manager, master=self.orchestrator,\n servers_to_fail=self.nodes_to_fail,\n expected_fo_nodes=expected_fo_nodes,\n task_type=\"induce_failure\")\n self.task_manager.add_new_task(failover_task)\n self.sleep(int(self.timeout * 0.7),\n \"Wait before bringing back the failed nodes\")\n\n self.log.info(\"Bringing back '%s' for some time\" % rand_node.ip)\n new_timer = None\n shell = RemoteMachineShellConnection(rand_node)\n cb_err = CouchbaseError(self.log, shell)\n if self.nodes_to_fail[rand_node] == CouchbaseError.STOP_MEMCACHED:\n cb_err.revert(CouchbaseError.STOP_MEMCACHED)\n self.sleep(10, \"Wait before creating failure again\")\n cb_err.create(CouchbaseError.STOP_MEMCACHED)\n new_timer = time()\n elif self.nodes_to_fail[rand_node] == \"stop_couchbase\":\n cb_err.revert(CouchbaseError.STOP_SERVER)\n self.sleep(10, \"Wait before creating failure again\")\n cb_err.create(CouchbaseError.STOP_SERVER)\n new_timer = time()\n shell.disconnect()\n\n # Validate the previous auto-failover task failed\n # due to the random_node coming back online\n self.task_manager.get_task_result(failover_task)\n self.assertFalse(failover_task.result,\n \"Nodes failed over though nodes became active\")\n\n # Validate auto_failover_settings\n self.validate_failover_settings(True, self.timeout,\n 0, self.max_count)\n\n # Make sure the new auto-failover timing is honoured\n new_timer = new_timer + self.timeout\n while int(time()) < new_timer:\n settings = self.rest.get_autofailover_settings()\n if settings.count != 0:\n self.fail(\"Nodes failed over before new failover time\")\n\n self.sleep(10, \"Wait for failover rebalance to trigger\")\n self.rest.monitorRebalance()\n\n # Validate auto_failover_settings after actual auto failover\n self.validate_failover_settings(True, self.timeout,\n expected_fo_nodes, self.max_count)\n finally:\n # Recover all nodes from induced failures\n failover_task = ConcurrentFailoverTask(\n task_manager=self.task_manager, master=self.orchestrator,\n servers_to_fail=self.nodes_to_fail,\n expected_fo_nodes=expected_fo_nodes,\n task_type=\"revert_failure\")\n self.task_manager.add_new_task(failover_task)\n self.task_manager.get_task_result(failover_task)\n self.log.info(\"Rebalance out the failed nodes\")\n result = self.cluster_util.rebalance(self.cluster)\n self.assertTrue(result, \"Final rebalance failed\")\n\n # Perform collection crud + doc_ops after rebalance operation\n self.__perform_doc_ops()", "def test_update_node_state_smartfail(self):\n pass", "def test_reboot_system(duthosts, localhost, all_cfg_facts, nbrhosts, nbr_macs, tbinfo):\n\n @reset_ansible_local_tmp\n def reboot_node(lh, node=None, results=None):\n node_results = []\n node_results.append(reboot(node, lh, wait=120))\n results[node.hostname] = node_results\n\n logger.info(\"=\" * 80)\n logger.info(\"Precheck\")\n logger.info(\"-\" * 80)\n\n check_intfs_and_nbrs(duthosts, all_cfg_facts, nbrhosts, nbr_macs)\n check_ip_fwd(duthosts, all_cfg_facts, nbrhosts, tbinfo)\n\n logger.info(\"=\" * 80)\n logger.info(\"Coldboot on all nodes\")\n logger.info(\"-\" * 80)\n\n t0 = time.time()\n\n parallel_run(reboot_node, [localhost], {}, duthosts.nodes, timeout=1000)\n\n for node in duthosts.nodes:\n assert wait_until(300, 20, 2, node.critical_services_fully_started),\\\n \"Not all critical services are fully started\"\n\n poll_bgp_restored(duthosts)\n\n t1 = time.time()\n elapsed = t1 - t0\n\n logger.info(\"-\" * 80)\n logger.info(\"Time to reboot and recover: %s seconds.\", str(elapsed))\n logger.info(\"-\" * 80)\n\n logger.info(\"=\" * 80)\n logger.info(\"Postcheck\")\n logger.info(\"-\" * 80)\n\n check_intfs_and_nbrs(duthosts, all_cfg_facts, nbrhosts, nbr_macs)\n check_ip_fwd(duthosts, all_cfg_facts, nbrhosts, tbinfo)", "def test_cluster_works_fine_after_deleting_CA_folder(self):\n self.x509.generate_multiple_x509_certs(servers=self.servers[:self.nodes_init])\n random_nodes = random.sample(self.servers[1:self.nodes_init], 1)\n self.log.info(\"Uploading root certs from {0}\".format(random_nodes[0]))\n self.x509.upload_root_certs(random_nodes[0])\n self.x509.upload_node_certs(servers=self.servers[:self.nodes_init])\n self.x509.delete_unused_out_of_the_box_CAs(server=self.master)\n self.x509.upload_client_cert_settings(server=self.master)\n shell = RemoteMachineShellConnection(random_nodes[0])\n shell.remove_directory(self.x509.install_path + x509main.CHAINFILEPATH +\n \"/\" + x509main.TRUSTEDCAPATH)\n shell.disconnect()\n\n failover_nodes = random_nodes\n nodes_in_cluster = self.servers[:self.nodes_init]\n for operation in [\"recovery\", \"out\"]:\n shell = RemoteMachineShellConnection(failover_nodes[0])\n shell.stop_server()\n self.cluster.async_failover(self.servers[:self.nodes_init],\n failover_nodes,\n graceful=False)\n self.wait_for_failover_or_assert(1)\n if operation == \"out\":\n https_val = CbServer.use_https # so that add_node uses https\n CbServer.use_https = True\n rest = RestConnection(self.master)\n otp_nodes = []\n ejected_nodes = []\n for node in nodes_in_cluster:\n otp_nodes.append('ns_1@'+node.ip)\n for node in failover_nodes:\n ejected_nodes.append('ns_1@' + node.ip)\n status = rest.rebalance(otpNodes=otp_nodes, ejectedNodes=ejected_nodes)\n if not status:\n shell.start_server(failover_nodes[0])\n self.fail(\"rebalance/failover failed\")\n CbServer.use_https = https_val\n nodes_in_cluster = nodes_in_cluster.remove(failover_nodes[0])\n shell.start_server(failover_nodes[0])\n if operation == \"recovery\":\n rest = RestConnection(self.master)\n for node in failover_nodes:\n rest.set_recovery_type(\"ns_1@\" + node.ip, recoveryType=\"delta\")\n https_val = CbServer.use_https # so that add_node uses https\n CbServer.use_https = True\n task = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [])\n self.wait_for_rebalance_to_complete(task)\n CbServer.use_https = https_val\n self.auth(servers=nodes_in_cluster)", "def test_replaceEmpty(self):\n disco = create_disco()\n node1 = create_node(\"somewhere\")\n node2 = create_node(\"somewhere2\")\n disco.onMessage(None, ReplaceCluster(\"myservice\",\n SANDBOX_ENV,\n [node1, node2]))\n self.assertEqual(knownNodes(disco, \"myservice\", \"sandbox\"), [node1, node2])", "def _shutdown_node(self, conn):\n compose_fname = COMPOSE_FNAME\n exec_plan = self.node_exec_plan.copy()\n while len(exec_plan) > 0:\n container_name = exec_plan.pop()\n self.__shutdown_service(conn, compose_fname, container_name)", "def test_shutdown_restart(self):\n log.info(\"CONFIG: %s\", self._agent_config())\n self.create_sample_data_set_dir('node59p1_step2.dat', TELEM_DIR, \"node59p1.dat\")\n\n self.assert_initialize(final_state=ResourceAgentState.COMMAND)\n\n # Slow down processing to 1 per second to give us time to stop\n self.dataset_agent_client.set_resource({DriverParameter.RECORDS_PER_SECOND: 1})\n self.assert_start_sampling()\n\n # Verify we get one sample\n try:\n # Read the first file and verify the data\n result = self.data_subscribers.get_samples(DataParticleType.CONTROL, 1)\n result1 = self.data_subscribers.get_samples(DataParticleType.SAMPLE, 3)\n result.extend(result1)\n log.debug(\"RESULT: %s\", result)\n\n # Verify values\n self.assert_data_values(result, 'test_data_1-2.txt.result.yml')\n self.assert_sample_queue_size(DataParticleType.CONTROL, 0)\n self.assert_sample_queue_size(DataParticleType.SAMPLE, 0)\n\n self.create_sample_data_set_dir('node59p1_step4.dat', TELEM_DIR, \"node59p1.dat\")\n # Now read the first record of the second file then stop\n result = self.data_subscribers.get_samples(DataParticleType.SAMPLE, 3)\n log.debug(\"RESULT 1: %s\", result)\n self.assert_stop_sampling()\n self.assert_sample_queue_size(DataParticleType.CONTROL, 0)\n self.assert_sample_queue_size(DataParticleType.SAMPLE, 0)\n\n # stop and re-start the agent\n self.stop_dataset_agent_client()\n self.init_dataset_agent_client()\n # re-initialize\n self.assert_initialize()\n\n result2 = self.data_subscribers.get_samples(DataParticleType.SAMPLE, 3)\n log.debug(\"RESULT 2: %s\", result2)\n result.extend(result2)\n log.debug(\"RESULT: %s\", result)\n self.assert_data_values(result, 'test_data_3-4.txt.result.yml')\n self.assert_sample_queue_size(DataParticleType.CONTROL, 0)\n self.assert_sample_queue_size(DataParticleType.SAMPLE, 0)\n except SampleTimeout as e:\n log.error(\"Exception trapped: %s\", e, exc_info=True)\n self.fail(\"Sample timeout.\")", "def replace_with_insufficient_replicas_test(self):\n debug(\"Starting cluster with 3 nodes.\")\n cluster = self.cluster\n cluster.populate(3).start()\n node1, node2, node3 = cluster.nodelist()\n\n if DISABLE_VNODES:\n num_tokens = 1\n else:\n # a little hacky but grep_log returns the whole line...\n num_tokens = int(node3.get_conf_option('num_tokens'))\n\n debug(\"testing with num_tokens: {}\".format(num_tokens))\n\n debug(\"Inserting Data...\")\n node1.stress(['write', 'n=10K', 'no-warmup', '-schema', 'replication(factor=2)'])\n\n # stop node to replace\n debug(\"Stopping node to replace.\")\n node3.stop(wait_other_notice=True)\n\n # stop other replica\n debug(\"Stopping node2 (other replica)\")\n node2.stop(wait_other_notice=True)\n\n # replace node 3 with node 4\n debug(\"Starting node 4 to replace node 3\")\n\n node4 = Node('node4', cluster=cluster, auto_bootstrap=True, thrift_interface=('127.0.0.4', 9160),\n storage_interface=('127.0.0.4', 7000), jmx_port='7400', remote_debug_port='0',\n initial_token=None, binary_interface=('127.0.0.4', 9042))\n cluster.add(node4, False)\n node4.start(replace_address='127.0.0.3', wait_for_binary_proto=False, wait_other_notice=False)\n\n # replace should fail due to insufficient replicas\n node4.watch_log_for(\"Unable to find sufficient sources for streaming range\")\n assert_not_running(node4)", "def test_snat_with_nodes_reboot(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.reboot(self.inputs.k8s_master_ip)\n for node in self.inputs.k8s_slave_ips:\n self.inputs.reboot(node)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)", "def test_liveness_bird6_down(self):\n with DockerHost('host1',\n additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS) as host1:\n retry_until_success(host1.assert_is_ready, retries=30)\n host1.execute(\"docker exec -it calico-node sv stop /etc/service/enabled/bird6\")\n\n # Check that the readiness script is reporting 'not ready'\n self.assertRaisesRegexp(CalledProcessError, \"calico/node is not ready: bird6/confd is not live: Service bird6 is not running.\",\n host1.execute, \"docker exec calico-node /bin/calico-node -bird6-live\")", "def changes_while_node_down_test(self):\n debug(\"changes_while_node_down_test()\")\n cluster = self.cluster\n cluster.populate(2).start()\n node1, node2 = cluster.nodelist()\n wait(2)\n session = self.patient_cql_connection(node2)\n\n self.prepare_for_changes(session, namespace='ns2')\n node1.stop()\n wait(2)\n self.make_schema_changes(session, namespace='ns2')\n wait(2)\n node2.stop()\n wait(2)\n node1.start()\n node2.start()\n wait(20)\n self.validate_schema_consistent(node1)" ]
[ "0.72033966", "0.71024096", "0.70595825", "0.69823843", "0.69612706", "0.6911128", "0.68039066", "0.6747855", "0.65598166", "0.6542973", "0.6518864", "0.64961773", "0.6485054", "0.6472087", "0.6463794", "0.6445934", "0.6360621", "0.6331515", "0.6325584", "0.627847", "0.6266507", "0.62173915", "0.62028617", "0.61756134", "0.61646134", "0.6132038", "0.612977", "0.611995", "0.61061233", "0.6097568" ]
0.75695765
0
When starting a node from a clean slate with the same address as an existing down node, the node should error out even when auto_bootstrap = false (or the node is a seed) and tell the user to use replace_address. CASSANDRA10134
def fail_without_replace_test(self): debug("Starting cluster with 3 nodes.") cluster = self.cluster cluster.populate(3) node1, node2, node3 = cluster.nodelist() cluster.seeds.remove(node3) NUM_TOKENS = os.environ.get('NUM_TOKENS', '256') if DISABLE_VNODES: cluster.set_configuration_options(values={'initial_token': None, 'num_tokens': 1}) else: cluster.set_configuration_options(values={'initial_token': None, 'num_tokens': NUM_TOKENS}) cluster.start() debug("Inserting Data...") node1.stress(['write', 'n=10K', 'no-warmup', '-schema', 'replication(factor=3)']) mark = None for auto_bootstrap in (True, False): debug("Stopping node 3.") node3.stop(gently=False) # completely delete the data, commitlog, and saved caches for d in chain([os.path.join(node3.get_path(), "commitlogs")], [os.path.join(node3.get_path(), "saved_caches")], node3.data_directories()): if os.path.exists(d): rmtree(d) node3.set_configuration_options(values={'auto_bootstrap': auto_bootstrap}) debug("Starting node 3 with auto_bootstrap = {val}".format(val=auto_bootstrap)) node3.start(wait_other_notice=False) node3.watch_log_for('Use cassandra.replace_address if you want to replace this node', from_mark=mark, timeout=20) mark = node3.mark_log()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def replace_with_reset_resume_state_test(self):\n\n cluster = self.cluster\n cluster.populate(3).start()\n node1, node2, node3 = cluster.nodelist()\n\n node1.stress(['write', 'n=100K', 'no-warmup', '-schema', 'replication(factor=3)'])\n\n session = self.patient_cql_connection(node1)\n stress_table = 'keyspace1.standard1'\n query = SimpleStatement('select * from %s LIMIT 1' % stress_table, consistency_level=ConsistencyLevel.THREE)\n initial_data = rows_to_list(session.execute(query))\n\n node3.stop(gently=False)\n\n # kill node1 in the middle of streaming to let it fail\n t = InterruptBootstrap(node1)\n t.start()\n # replace node 3 with node 4\n debug(\"Starting node 4 to replace node 3\")\n node4 = Node('node4', cluster=cluster, auto_bootstrap=True, thrift_interface=('127.0.0.4', 9160),\n storage_interface=('127.0.0.4', 7000), jmx_port='7400', remote_debug_port='0',\n initial_token=None, binary_interface=('127.0.0.4', 9042))\n\n # keep timeout low so that test won't hang\n node4.set_configuration_options(values={'streaming_socket_timeout_in_ms': 1000})\n cluster.add(node4, False)\n try:\n node4.start(jvm_args=[\"-Dcassandra.replace_address_first_boot=127.0.0.3\"], wait_other_notice=False)\n except NodeError:\n pass # node doesn't start as expected\n t.join()\n node1.start()\n\n # restart node4 bootstrap with resetting bootstrap state\n node4.stop()\n mark = node4.mark_log()\n node4.start(jvm_args=[\n \"-Dcassandra.replace_address_first_boot=127.0.0.3\",\n \"-Dcassandra.reset_bootstrap_progress=true\"\n ])\n # check if we reset bootstrap state\n node4.watch_log_for(\"Resetting bootstrap progress to start fresh\", from_mark=mark)\n # wait for node3 ready to query\n node4.watch_log_for(\"Listening for thrift clients...\", from_mark=mark)\n\n # check if 2nd bootstrap succeeded\n assert_bootstrap_state(self, node4, 'COMPLETED')\n\n # query should work again\n debug(\"Stopping old nodes\")\n node1.stop(gently=False, wait_other_notice=True)\n node2.stop(gently=False, wait_other_notice=True)\n\n debug(\"Verifying data on new node.\")\n session = self.patient_exclusive_cql_connection(node4)\n assert_all(session, 'SELECT * from {} LIMIT 1'.format(stress_table),\n expected=initial_data,\n cl=ConsistencyLevel.ONE)", "def reboot(self, node):", "def resumable_replace_test(self):\n\n cluster = self.cluster\n cluster.populate(3).start()\n node1, node2, node3 = cluster.nodelist()\n\n node1.stress(['write', 'n=100K', 'no-warmup', '-schema', 'replication(factor=3)'])\n\n session = self.patient_cql_connection(node1)\n stress_table = 'keyspace1.standard1'\n query = SimpleStatement('select * from %s LIMIT 1' % stress_table, consistency_level=ConsistencyLevel.THREE)\n initial_data = rows_to_list(session.execute(query))\n\n node3.stop(gently=False)\n\n # kill node1 in the middle of streaming to let it fail\n t = InterruptBootstrap(node1)\n t.start()\n # replace node 3 with node 4\n debug(\"Starting node 4 to replace node 3\")\n node4 = Node('node4', cluster=cluster, auto_bootstrap=True, thrift_interface=('127.0.0.4', 9160),\n storage_interface=('127.0.0.4', 7000), jmx_port='7400', remote_debug_port='0',\n initial_token=None, binary_interface=('127.0.0.4', 9042))\n # keep timeout low so that test won't hang\n node4.set_configuration_options(values={'streaming_socket_timeout_in_ms': 1000})\n cluster.add(node4, False)\n try:\n node4.start(jvm_args=[\"-Dcassandra.replace_address_first_boot=127.0.0.3\"], wait_other_notice=False)\n except NodeError:\n pass # node doesn't start as expected\n t.join()\n\n # bring back node1 and invoke nodetool bootstrap to resume bootstrapping\n node1.start()\n node4.nodetool('bootstrap resume')\n # check if we skipped already retrieved ranges\n node4.watch_log_for(\"already available. Skipping streaming.\")\n # wait for node3 ready to query\n node4.watch_log_for(\"Listening for thrift clients...\")\n\n # check if 2nd bootstrap succeeded\n assert_bootstrap_state(self, node4, 'COMPLETED')\n\n # query should work again\n debug(\"Stopping old nodes\")\n node1.stop(gently=False, wait_other_notice=True)\n node2.stop(gently=False, wait_other_notice=True)\n\n debug(\"Verifying data on new node.\")\n session = self.patient_exclusive_cql_connection(node4)\n assert_all(session, 'SELECT * from {} LIMIT 1'.format(stress_table),\n expected=initial_data,\n cl=ConsistencyLevel.ONE)", "def test_set_self_address(self):\n print('### Testing set up address ###')\n node_id = \"001\" # node_id of the form of 3 chr string already verified in Nanomodem.py\n \n command = b'$A' + node_id.encode()\n self.serport.write(command)\n\n received_bytes = self.serport.readline()\n index = received_bytes.find(b'#A')\n #print(\"SET_ADDRESS len is \"+ str(len(received_bytes)) +\" and index is \"+str(index))\n\n if (index != -1) and (len(received_bytes) - index == 5 and received_bytes.decode()[1] == 'A'): \n # received_bytes[1] == b'A' as condition doesn't work because x = b'A' still stay b'A' and x[0] give 65 (the byte for A)\n #print(\"SET_ADDRESS A was spot on\")\n if received_bytes[1:4] == command[1:4]:\n node_id = received_bytes.decode()[2:5]\n print(\"SET_ADDRESS node is :\"+ node_id)\n print(\"set self address SUCCESS\")\n return True\n else: \n print(\"set self address FAILURE\")\n return False", "def test_snat_with_nodes_reboot(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.reboot(self.inputs.k8s_master_ip)\n for node in self.inputs.k8s_slave_ips:\n self.inputs.reboot(node)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)", "def test_snat_with_master_reboot(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.reboot(self.inputs.k8s_master_ip)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)", "def bootstrap(self, params = None):\n command = \"\"\n print \"NODE: [%s] running bootstrap scripts\" % self.name\n if self.type == \"SEED\":\n command += get_script_text(\"cassandra_seednode_bootstrap\")\n elif self.type == \"CLIENT\":\n if self.name.endswith('1'):\n command += get_script_text(\"ganglia_endpoint\")\n command += get_script_text(\"cassandra_client_bootstrap\")\n\n else:\n command = get_script_text(\"cassandra_node_bootstrap\")\n timer = Timer.get_timer()\n self.vm.run_command(command, silent=True)\n print \"NODE: %s is now bootstrapped (took %d sec)\" % (self.name, timer.stop())\n self.bootstraped = True", "def network_bootstrap(self, args):\n pass", "def test_05_node_down_and_resync_hard(self):\n for cluster in test_rest.cluster.clusters:\n if cluster == 'index':\n continue\n test_rest.db_simulate(cluster, 240)\n port = test_rest.cluster.clusters[cluster][0][0]\n test_rest.step(f'stopping cluster {cluster} node with port {port} - during load')\n test_rest.docker_stop(cluster, port)\n # restart nodes\n for cluster in test_rest.cluster.clusters:\n if cluster == 'index':\n continue\n port = test_rest.cluster.clusters[cluster][0][0]\n test_rest.step(f'restarting cluster {cluster} node with port {port}')\n test_rest.expand_data_cluster(cluster, port=port)\n test_rest.step(\"restarted nodes, waiting 10 seconds to begin monitoring table state & running sync jobs\")\n time.sleep(10)\n test_rest.cluster.insync_and_state_check()\n while test_rest.is_running_simulations():\n print(\"waiting on running simulations to complete\")\n time.sleep(10)\n test_rest.cluster.insync_and_state_check()\n test_rest.cluster.verify_data()", "def new_remotenode(address) -> str:\n return address", "def test_replace_host_subnet(self):\n pass", "def test_bootstrap_source_no_bootstrap(self):\n self.test_config.set('no-bootstrap', False)\n ceph_hooks.bootstrap_source_relation_changed()\n self.status_set.assert_called_once_with('blocked',\n 'Cannot join the '\n 'bootstrap-source relation '\n 'when no-bootstrap is False')", "def reconfigure(self, consensus=None):\r\n pass", "def test_update_node_state_smartfail(self):\n pass", "def test_rebuild_on_host_updated_target_node_not_found(self):\n def fake_get_compute_info(context, host):\n raise exception.ComputeHostNotFound(host=host)\n with test.nested(\n mock.patch.object(self.compute.driver, 'instance_on_disk',\n side_effect=lambda x: True),\n mock.patch.object(self.compute, '_get_compute_info',\n side_effect=fake_get_compute_info)\n ) as (mock_inst, mock_get):\n self.assertRaises(exception.InstanceFaultRollback,\n self._rebuild, expect_error=True)\n\n # Should be on destination host\n instance = db.instance_get(self.context, self.inst.id)\n self.assertEqual('fake_host_2', instance['host'])\n self.assertEqual('fakenode2', instance['node'])\n mock_inst.assert_not_called()\n mock_get.assert_called_once_with(mock.ANY, self.compute.host)", "def _bootup_node(self, conn):\n compose_fname = COMPOSE_FNAME\n exec_plan = self.node_exec_plan.copy()\n while len(exec_plan) > 0:\n container_name = exec_plan.popleft()\n self.__bootup_service(conn, compose_fname, container_name)", "def update_one_node_from_pbs_data(node, attr_dict):\n # put node under a subcluster if it does not have any yet\n if not node.subcluster:\n for id,node_regexp in SubCluster.objects.filter(server=node.server).values_list('id','node_regexp'):\n if re.match(node_regexp,node.name):\n node.subcluster_id = id\n node.save()\n break\n # fill node's np if it is not present\n if not node.np:\n node.np = attr_dict['np']\n node.save()\n\n new_states = []\n if attr_dict.has_key('state'):\n# node.state.clear()\n for statename in attr_dict['state'].split(','):\n #node.state.add(NodeState.objects.get(name=statename.strip()))\n new_states.append(NodeState.objects.get(name=statename.strip()))\n attr_dict['state'] = new_states\n\n\n new_properties = []\n if attr_dict.has_key('properties'):\n# node.properties.clear()\n for propertyname in attr_dict['properties'].split(','):\n np,created = NodeProperty.objects.get_or_create(name=propertyname.strip())\n if created:\n print(\"New property created: %s\" % propertyname)\n new_properties.append(np)\n# node.properties.add(np)\n attr_dict['properties'] = new_properties\n\n new_jobs = []\n if attr_dict.has_key('jobs'):\n slot_jobs = dict([tuple(j.strip().split('/')) for j in attr_dict['jobs'].split(',')])\n for slotstr, longjobid in slot_jobs.items():\n slot = int(slotstr)\n# js,created = getJobSlot(slot=slot,node=node)\n# if created:\n# logging.info(\"new jobslot will be created: slot: %d, node name: %s\" % (slot,name))\n jobid = int(longjobid.split('.')[0])\n new_jobs.append(jobid)\n \n# js.livejob,created = LiveJob.objects.get_or_create(jobid=jobid, server=node.server)\n# if created:\n# logging.info(\"new livejob created: %d\" % jobid)\n# js.save()\n attr_dict['jobs'] = new_jobs\n return attr_dict", "def test_config_changed_no_bootstrap_changed(self,\n bootstrap_source_rel_changed,\n get_mon_hosts,\n check_for_upgrade,\n create_sysctl,\n emit_ceph_conf):\n self.relations_of_type.return_value = []\n self.is_relation_made.return_value = True\n self.test_config.set_changed('no-bootstrap', True)\n self.test_config.set('balancer-mode', '')\n ceph_hooks.config_changed()\n bootstrap_source_rel_changed.assert_called_once()", "def test_04_node_down_and_resync_soft(self):\n for cluster in test_rest.cluster.clusters:\n if cluster == 'index':\n continue\n port = test_rest.cluster.clusters[cluster][0][0]\n test_rest.step(f'stopping cluster {cluster} node with port {port}')\n test_rest.docker_stop(cluster, port)\n test_rest.step(f\"starting db_simulator on cluster {cluster}\")\n test_rest.db_simulate(cluster, 180)\n # restart nodes\n for cluster in test_rest.cluster.clusters:\n if cluster == 'index':\n continue\n port = test_rest.cluster.clusters[cluster][0][0]\n test_rest.step(f'restarting cluster {cluster} node with port {port}')\n test_rest.expand_data_cluster(cluster, port=port)\n test_rest.step(\"restarted nodes, waiting 10 seconds to begin monitoring table state & running sync jobs\")\n time.sleep(10)\n test_rest.cluster.insync_and_state_check()\n while test_rest.is_running_simulations():\n print(\"waiting on running simulations to complete\")\n time.sleep(10)\n test_rest.cluster.verify_data()", "def test_add_strict_node_to_non_strict_node(self):\n non_strict_node = self.cluster.master\n strict_node = self.cluster.servers[self.nodes_init:self.nodes_init + 1][0]\n self.enable_tls_encryption_cli_on_nodes \\\n (nodes=self.cluster.servers[self.nodes_init:self.nodes_init + 1])\n CbServer.use_https = True\n RestConnection(non_strict_node).add_node(user='Administrator', password='password',\n port=CbServer.ssl_port,\n remoteIp=strict_node.ip)\n CbServer.use_https = False\n rest = RestConnection(non_strict_node)\n nodes = rest.node_statuses()\n rest.rebalance(otpNodes=[node.id for node in nodes],\n ejectedNodes=[])\n result = rest.monitorRebalance()\n self.assertTrue(result, \"Rebalance failed\")", "def on_NodeBootWalletAddress_editingFinished(self):\n # TODO: not implemented yet\n raise NotImplementedError", "def _retry_bootstrap_candidates(self):\n if __debug__: dprint(\"unable to resolve all bootstrap addresses\", level=\"warning\")\n for counter in count(1):\n yield 1.0 if counter < 30 else 30.0\n if __debug__: dprint(\"attempt #\", counter, level=\"warning\")\n candidates = get_bootstrap_candidates(self)\n for candidate in candidates:\n if candidate is None:\n break\n else:\n if __debug__: dprint(\"resolved all bootstrap addresses\")\n self._bootstrap_candidates = dict((candidate.sock_addr, candidate) for candidate in candidates if candidate)\n break", "def ForceMaster(node, is_testver):\n gsaport = core_utils.GSAMasterPort(is_testver)\n # ignore the result of forcemaster\n port_talker.TCPTalk(node, gsaport, 30, command='GET /forcemaster\\n')", "def replace_with_insufficient_replicas_test(self):\n debug(\"Starting cluster with 3 nodes.\")\n cluster = self.cluster\n cluster.populate(3).start()\n node1, node2, node3 = cluster.nodelist()\n\n if DISABLE_VNODES:\n num_tokens = 1\n else:\n # a little hacky but grep_log returns the whole line...\n num_tokens = int(node3.get_conf_option('num_tokens'))\n\n debug(\"testing with num_tokens: {}\".format(num_tokens))\n\n debug(\"Inserting Data...\")\n node1.stress(['write', 'n=10K', 'no-warmup', '-schema', 'replication(factor=2)'])\n\n # stop node to replace\n debug(\"Stopping node to replace.\")\n node3.stop(wait_other_notice=True)\n\n # stop other replica\n debug(\"Stopping node2 (other replica)\")\n node2.stop(wait_other_notice=True)\n\n # replace node 3 with node 4\n debug(\"Starting node 4 to replace node 3\")\n\n node4 = Node('node4', cluster=cluster, auto_bootstrap=True, thrift_interface=('127.0.0.4', 9160),\n storage_interface=('127.0.0.4', 7000), jmx_port='7400', remote_debug_port='0',\n initial_token=None, binary_interface=('127.0.0.4', 9042))\n cluster.add(node4, False)\n node4.start(replace_address='127.0.0.3', wait_for_binary_proto=False, wait_other_notice=False)\n\n # replace should fail due to insufficient replicas\n node4.watch_log_for(\"Unable to find sufficient sources for streaming range\")\n assert_not_running(node4)", "def test_client_address_update(self):\n pass", "def changes_while_node_down_test(self):\n debug(\"changes_while_node_down_test()\")\n cluster = self.cluster\n cluster.populate(2).start()\n node1, node2 = cluster.nodelist()\n wait(2)\n session = self.patient_cql_connection(node2)\n\n self.prepare_for_changes(session, namespace='ns2')\n node1.stop()\n wait(2)\n self.make_schema_changes(session, namespace='ns2')\n wait(2)\n node2.stop()\n wait(2)\n node1.start()\n node2.start()\n wait(20)\n self.validate_schema_consistent(node1)", "def test_create_node_reboot_item(self):\n pass", "def test_replace_cluster_network(self):\n pass", "def test_patch_host_subnet(self):\n pass", "def boot(self, boot_node_request):\n return self.client.call('POST',\n self.name + 'boot', payload=boot_node_request)" ]
[ "0.6077486", "0.59152275", "0.5730985", "0.57131267", "0.5675744", "0.55063087", "0.5453649", "0.54527897", "0.54008937", "0.5391057", "0.5317235", "0.53120404", "0.52710044", "0.52428687", "0.5236482", "0.51801217", "0.51627666", "0.51423806", "0.51393414", "0.51322865", "0.5126891", "0.5111571", "0.5106275", "0.5100639", "0.5093111", "0.5084151", "0.5083584", "0.5063983", "0.5055371", "0.50470454" ]
0.6074668
1
To handle situations such as failed disk in a JBOD, it may be desirable to replace a node without bootstrapping. In such scenarios best practice advice has been to wipe the node's system keyspace data, set the initial tokens via cassandra.yaml, startup without bootstrap and then repair. Starting the node as a replacement allows the tokens to be learned from gossip, but previously required auto_bootstrap=true. Since CASSANDRA10134 replacement is allowed without bootstrapping, but it requires the operator to acknowledge the risk in doing so by setting the cassandra.allow_unsafe_replace system property at startup. CASSANDRA10134
def unsafe_replace_test(self): debug('Starting cluster with 3 nodes.') cluster = self.cluster cluster.populate(3) cluster.set_batch_commitlog(enabled=True) node1, node2, node3 = cluster.nodelist() cluster.seeds.remove(node3) NUM_TOKENS = os.environ.get('NUM_TOKENS', '256') if DISABLE_VNODES: cluster.set_configuration_options(values={'initial_token': None, 'num_tokens': 1}) else: cluster.set_configuration_options(values={'initial_token': None, 'num_tokens': NUM_TOKENS}) cluster.start() debug('Inserting Data...') node1.stress(['write', 'n=10K', 'no-warmup', '-schema', 'replication(factor=3)']) cluster.flush() session = self.patient_cql_connection(node1) stress_table = 'keyspace1.standard1' query = SimpleStatement('select * from {} LIMIT 1'.format(stress_table), consistency_level=ConsistencyLevel.THREE) initial_data = rows_to_list(session.execute(query)) for set_allow_unsafe_flag in [False, True]: debug('Stopping node 3.') node3.stop(gently=False) # completely delete the system keyspace data plus commitlog and saved caches for d in node3.data_directories(): system_data = os.path.join(d, 'system') if os.path.exists(system_data): rmtree(system_data) for d in ['commitlogs', 'saved_caches']: p = os.path.join(node3.get_path(), d) if os.path.exists(p): rmtree(p) node3.set_configuration_options(values={'auto_bootstrap': False}) mark = node3.mark_log() if set_allow_unsafe_flag: debug('Starting node3 with auto_bootstrap = false and replace_address = 127.0.0.3 and allow_unsafe_replace = true') node3.start(replace_address='127.0.0.3', wait_for_binary_proto=True, jvm_args=['-Dcassandra.allow_unsafe_replace=true']) # query should work again debug("Stopping old nodes") node1.stop(gently=False, wait_other_notice=True) node2.stop(gently=False, wait_other_notice=True) debug("Verifying data on new node.") session = self.patient_exclusive_cql_connection(node3) assert_all(session, 'SELECT * from {} LIMIT 1'.format(stress_table), expected=initial_data, cl=ConsistencyLevel.ONE) else: debug('Starting node 3 with auto_bootstrap = false and replace_address = 127.0.0.3') node3.start(replace_address='127.0.0.3', wait_other_notice=False) node3.watch_log_for('To perform this operation, please restart with -Dcassandra.allow_unsafe_replace=true', from_mark=mark, timeout=20)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fail_without_replace_test(self):\n debug(\"Starting cluster with 3 nodes.\")\n cluster = self.cluster\n cluster.populate(3)\n node1, node2, node3 = cluster.nodelist()\n cluster.seeds.remove(node3)\n NUM_TOKENS = os.environ.get('NUM_TOKENS', '256')\n if DISABLE_VNODES:\n cluster.set_configuration_options(values={'initial_token': None, 'num_tokens': 1})\n else:\n cluster.set_configuration_options(values={'initial_token': None, 'num_tokens': NUM_TOKENS})\n cluster.start()\n\n debug(\"Inserting Data...\")\n node1.stress(['write', 'n=10K', 'no-warmup', '-schema', 'replication(factor=3)'])\n\n mark = None\n for auto_bootstrap in (True, False):\n debug(\"Stopping node 3.\")\n node3.stop(gently=False)\n\n # completely delete the data, commitlog, and saved caches\n for d in chain([os.path.join(node3.get_path(), \"commitlogs\")],\n [os.path.join(node3.get_path(), \"saved_caches\")],\n node3.data_directories()):\n if os.path.exists(d):\n rmtree(d)\n\n node3.set_configuration_options(values={'auto_bootstrap': auto_bootstrap})\n debug(\"Starting node 3 with auto_bootstrap = {val}\".format(val=auto_bootstrap))\n node3.start(wait_other_notice=False)\n node3.watch_log_for('Use cassandra.replace_address if you want to replace this node', from_mark=mark, timeout=20)\n mark = node3.mark_log()", "def resumable_replace_test(self):\n\n cluster = self.cluster\n cluster.populate(3).start()\n node1, node2, node3 = cluster.nodelist()\n\n node1.stress(['write', 'n=100K', 'no-warmup', '-schema', 'replication(factor=3)'])\n\n session = self.patient_cql_connection(node1)\n stress_table = 'keyspace1.standard1'\n query = SimpleStatement('select * from %s LIMIT 1' % stress_table, consistency_level=ConsistencyLevel.THREE)\n initial_data = rows_to_list(session.execute(query))\n\n node3.stop(gently=False)\n\n # kill node1 in the middle of streaming to let it fail\n t = InterruptBootstrap(node1)\n t.start()\n # replace node 3 with node 4\n debug(\"Starting node 4 to replace node 3\")\n node4 = Node('node4', cluster=cluster, auto_bootstrap=True, thrift_interface=('127.0.0.4', 9160),\n storage_interface=('127.0.0.4', 7000), jmx_port='7400', remote_debug_port='0',\n initial_token=None, binary_interface=('127.0.0.4', 9042))\n # keep timeout low so that test won't hang\n node4.set_configuration_options(values={'streaming_socket_timeout_in_ms': 1000})\n cluster.add(node4, False)\n try:\n node4.start(jvm_args=[\"-Dcassandra.replace_address_first_boot=127.0.0.3\"], wait_other_notice=False)\n except NodeError:\n pass # node doesn't start as expected\n t.join()\n\n # bring back node1 and invoke nodetool bootstrap to resume bootstrapping\n node1.start()\n node4.nodetool('bootstrap resume')\n # check if we skipped already retrieved ranges\n node4.watch_log_for(\"already available. Skipping streaming.\")\n # wait for node3 ready to query\n node4.watch_log_for(\"Listening for thrift clients...\")\n\n # check if 2nd bootstrap succeeded\n assert_bootstrap_state(self, node4, 'COMPLETED')\n\n # query should work again\n debug(\"Stopping old nodes\")\n node1.stop(gently=False, wait_other_notice=True)\n node2.stop(gently=False, wait_other_notice=True)\n\n debug(\"Verifying data on new node.\")\n session = self.patient_exclusive_cql_connection(node4)\n assert_all(session, 'SELECT * from {} LIMIT 1'.format(stress_table),\n expected=initial_data,\n cl=ConsistencyLevel.ONE)", "def replace_with_reset_resume_state_test(self):\n\n cluster = self.cluster\n cluster.populate(3).start()\n node1, node2, node3 = cluster.nodelist()\n\n node1.stress(['write', 'n=100K', 'no-warmup', '-schema', 'replication(factor=3)'])\n\n session = self.patient_cql_connection(node1)\n stress_table = 'keyspace1.standard1'\n query = SimpleStatement('select * from %s LIMIT 1' % stress_table, consistency_level=ConsistencyLevel.THREE)\n initial_data = rows_to_list(session.execute(query))\n\n node3.stop(gently=False)\n\n # kill node1 in the middle of streaming to let it fail\n t = InterruptBootstrap(node1)\n t.start()\n # replace node 3 with node 4\n debug(\"Starting node 4 to replace node 3\")\n node4 = Node('node4', cluster=cluster, auto_bootstrap=True, thrift_interface=('127.0.0.4', 9160),\n storage_interface=('127.0.0.4', 7000), jmx_port='7400', remote_debug_port='0',\n initial_token=None, binary_interface=('127.0.0.4', 9042))\n\n # keep timeout low so that test won't hang\n node4.set_configuration_options(values={'streaming_socket_timeout_in_ms': 1000})\n cluster.add(node4, False)\n try:\n node4.start(jvm_args=[\"-Dcassandra.replace_address_first_boot=127.0.0.3\"], wait_other_notice=False)\n except NodeError:\n pass # node doesn't start as expected\n t.join()\n node1.start()\n\n # restart node4 bootstrap with resetting bootstrap state\n node4.stop()\n mark = node4.mark_log()\n node4.start(jvm_args=[\n \"-Dcassandra.replace_address_first_boot=127.0.0.3\",\n \"-Dcassandra.reset_bootstrap_progress=true\"\n ])\n # check if we reset bootstrap state\n node4.watch_log_for(\"Resetting bootstrap progress to start fresh\", from_mark=mark)\n # wait for node3 ready to query\n node4.watch_log_for(\"Listening for thrift clients...\", from_mark=mark)\n\n # check if 2nd bootstrap succeeded\n assert_bootstrap_state(self, node4, 'COMPLETED')\n\n # query should work again\n debug(\"Stopping old nodes\")\n node1.stop(gently=False, wait_other_notice=True)\n node2.stop(gently=False, wait_other_notice=True)\n\n debug(\"Verifying data on new node.\")\n session = self.patient_exclusive_cql_connection(node4)\n assert_all(session, 'SELECT * from {} LIMIT 1'.format(stress_table),\n expected=initial_data,\n cl=ConsistencyLevel.ONE)", "def replace_with_insufficient_replicas_test(self):\n debug(\"Starting cluster with 3 nodes.\")\n cluster = self.cluster\n cluster.populate(3).start()\n node1, node2, node3 = cluster.nodelist()\n\n if DISABLE_VNODES:\n num_tokens = 1\n else:\n # a little hacky but grep_log returns the whole line...\n num_tokens = int(node3.get_conf_option('num_tokens'))\n\n debug(\"testing with num_tokens: {}\".format(num_tokens))\n\n debug(\"Inserting Data...\")\n node1.stress(['write', 'n=10K', 'no-warmup', '-schema', 'replication(factor=2)'])\n\n # stop node to replace\n debug(\"Stopping node to replace.\")\n node3.stop(wait_other_notice=True)\n\n # stop other replica\n debug(\"Stopping node2 (other replica)\")\n node2.stop(wait_other_notice=True)\n\n # replace node 3 with node 4\n debug(\"Starting node 4 to replace node 3\")\n\n node4 = Node('node4', cluster=cluster, auto_bootstrap=True, thrift_interface=('127.0.0.4', 9160),\n storage_interface=('127.0.0.4', 7000), jmx_port='7400', remote_debug_port='0',\n initial_token=None, binary_interface=('127.0.0.4', 9042))\n cluster.add(node4, False)\n node4.start(replace_address='127.0.0.3', wait_for_binary_proto=False, wait_other_notice=False)\n\n # replace should fail due to insufficient replicas\n node4.watch_log_for(\"Unable to find sufficient sources for streaming range\")\n assert_not_running(node4)", "def reboot(self, node):", "def node_restart(ctx):\n ctx.obj['node'].attempt_restart()", "def multi_dc_replace_with_rf1_test(self):\n cluster = self.cluster\n cluster.populate([1, 1])\n cluster.start()\n node1, node2 = cluster.nodelist()\n\n node1 = cluster.nodes['node1']\n yaml_config = \"\"\"\n # Create the keyspace and table\n keyspace: keyspace1\n keyspace_definition: |\n CREATE KEYSPACE keyspace1 WITH replication = {'class': 'NetworkTopologyStrategy', 'dc1': 1, 'dc2': 1};\n table: users\n table_definition:\n CREATE TABLE users (\n username text,\n first_name text,\n last_name text,\n email text,\n PRIMARY KEY(username)\n ) WITH compaction = {'class':'SizeTieredCompactionStrategy'};\n insert:\n partitions: fixed(1)\n batchtype: UNLOGGED\n queries:\n read:\n cql: select * from users where username = ?\n fields: samerow\n \"\"\"\n with tempfile.NamedTemporaryFile(mode='w+') as stress_config:\n stress_config.write(yaml_config)\n stress_config.flush()\n node1.stress(['user', 'profile=' + stress_config.name, 'n=10k', 'no-warmup',\n 'ops(insert=1)', '-rate', 'threads=50'])\n\n session = self.patient_cql_connection(node1)\n\n # change system_auth keyspace to 2 (default is 1) to avoid\n # \"Unable to find sufficient sources for streaming\" warning\n if cluster.cassandra_version() >= '2.2.0':\n session.execute(\"\"\"\n ALTER KEYSPACE system_auth\n WITH replication = {'class':'SimpleStrategy', 'replication_factor':2};\n \"\"\")\n\n # Save initial data\n stress_table = 'keyspace1.users'\n query = SimpleStatement('select * from %s LIMIT 1' % stress_table, consistency_level=ConsistencyLevel.TWO)\n initial_data = rows_to_list(session.execute(query))\n\n # stop node to replace\n debug(\"Stopping node to replace.\")\n node2.stop(wait_other_notice=True)\n\n node3 = new_node(cluster, data_center='dc2')\n node3.start(replace_address='127.0.0.2', wait_for_binary_proto=True)\n\n assert_bootstrap_state(self, node3, 'COMPLETED')\n\n # Check that keyspace was replicated from dc1 to dc2\n self.assertFalse(node3.grep_log(\"Unable to find sufficient sources for streaming range\"))\n\n # query should work again with node1 stopped\n node1.stop(wait_other_notice=True)\n debug(\"Verifying data on new node.\")\n session = self.patient_exclusive_cql_connection(node3)\n assert_all(session, 'SELECT * from {} LIMIT 1'.format(stress_table),\n expected=initial_data,\n cl=ConsistencyLevel.LOCAL_ONE)", "def _replace_node_test(self, gently):\n debug(\"Starting cluster with 3 nodes.\")\n cluster = self.cluster\n cluster.populate(3).start()\n node1, node2, node3 = cluster.nodelist()\n\n if DISABLE_VNODES:\n num_tokens = 1\n else:\n # a little hacky but grep_log returns the whole line...\n num_tokens = int(node3.get_conf_option('num_tokens'))\n\n debug(\"testing with num_tokens: {}\".format(num_tokens))\n\n debug(\"Inserting Data...\")\n node1.stress(['write', 'n=10K', 'no-warmup', '-schema', 'replication(factor=3)'])\n\n session = self.patient_cql_connection(node1)\n session.default_timeout = 45\n stress_table = 'keyspace1.standard1'\n query = SimpleStatement('select * from %s LIMIT 1' % stress_table, consistency_level=ConsistencyLevel.THREE)\n initial_data = rows_to_list(session.execute(query))\n\n # stop node, query should not work with consistency 3\n debug(\"Stopping node 3.\")\n node3.stop(gently=gently, wait_other_notice=True)\n\n debug(\"Testing node stoppage (query should fail).\")\n with self.assertRaises(NodeUnavailable):\n try:\n query = SimpleStatement('select * from %s LIMIT 1' % stress_table, consistency_level=ConsistencyLevel.THREE)\n session.execute(query)\n except (Unavailable, ReadTimeout):\n raise NodeUnavailable(\"Node could not be queried.\")\n\n # replace node 3 with node 4\n debug(\"Starting node 4 to replace node 3\")\n node4 = Node('node4', cluster=cluster, auto_bootstrap=True, thrift_interface=('127.0.0.4', 9160),\n storage_interface=('127.0.0.4', 7000), jmx_port='7400', remote_debug_port='0',\n initial_token=None, binary_interface=('127.0.0.4', 9042))\n cluster.add(node4, False)\n node4.start(replace_address='127.0.0.3', wait_for_binary_proto=True)\n\n debug(\"Verifying tokens migrated sucessfully\")\n moved_tokens = node4.grep_log(\"Token .* changing ownership from /127.0.0.3 to /127.0.0.4\")\n debug(\"number of moved tokens: {}\".format(len(moved_tokens)))\n self.assertEqual(len(moved_tokens), num_tokens)\n\n # check that restarting node 3 doesn't work\n debug(\"Try to restart node 3 (should fail)\")\n node3.start(wait_other_notice=False)\n collision_log = node1.grep_log(\"between /127.0.0.3 and /127.0.0.4; /127.0.0.4 is the new owner\")\n debug(collision_log)\n self.assertEqual(len(collision_log), 1)\n node3.stop(gently=False)\n\n # query should work again\n debug(\"Stopping old nodes\")\n node1.stop(gently=False, wait_other_notice=True)\n node2.stop(gently=False, wait_other_notice=True)\n\n debug(\"Verifying data on new node.\")\n session = self.patient_exclusive_cql_connection(node4)\n assert_all(session, 'SELECT * from {} LIMIT 1'.format(stress_table),\n expected=initial_data,\n cl=ConsistencyLevel.ONE)", "def upgrade_kernel():\n execute(\"upgrade_kernel_node\", env.host_string)", "def test_restart_node_with_encrypted_pkeys(self):\n self.x509.generate_multiple_x509_certs(servers=self.servers[:self.nodes_init])\n self.x509.upload_root_certs(self.master)\n self.x509.upload_node_certs(servers=self.servers[:self.nodes_init])\n rest = RestConnection(self.master)\n nodes_in_cluster = [node for node in self.servers[:self.nodes_init]]\n for node in self.servers[1:self.nodes_init]:\n shell = RemoteMachineShellConnection(node)\n shell.restart_couchbase()\n shell.disconnect()\n self.sleep(10, \"Wait after restart\")\n self.cluster.async_failover(nodes_in_cluster,\n [node],\n graceful=False)\n self.wait_for_failover_or_assert(1)\n rest.set_recovery_type(\"ns_1@\" + node.ip, recoveryType=\"delta\")\n https_val = CbServer.use_https # so that add_node uses https\n CbServer.use_https = True\n task = self.cluster.async_rebalance(nodes_in_cluster, [], [])\n CbServer.use_https = https_val\n self.wait_for_rebalance_to_complete(task)\n shell = RemoteMachineShellConnection(node)\n shell.restart_couchbase()\n shell.disconnect()\n https_val = CbServer.use_https # so that add_node uses https\n CbServer.use_https = True\n task = self.cluster.async_rebalance(nodes_in_cluster,\n [], [node])\n self.wait_for_rebalance_to_complete(task)\n CbServer.use_https = https_val\n nodes_in_cluster.remove(node)", "def test_05_node_down_and_resync_hard(self):\n for cluster in test_rest.cluster.clusters:\n if cluster == 'index':\n continue\n test_rest.db_simulate(cluster, 240)\n port = test_rest.cluster.clusters[cluster][0][0]\n test_rest.step(f'stopping cluster {cluster} node with port {port} - during load')\n test_rest.docker_stop(cluster, port)\n # restart nodes\n for cluster in test_rest.cluster.clusters:\n if cluster == 'index':\n continue\n port = test_rest.cluster.clusters[cluster][0][0]\n test_rest.step(f'restarting cluster {cluster} node with port {port}')\n test_rest.expand_data_cluster(cluster, port=port)\n test_rest.step(\"restarted nodes, waiting 10 seconds to begin monitoring table state & running sync jobs\")\n time.sleep(10)\n test_rest.cluster.insync_and_state_check()\n while test_rest.is_running_simulations():\n print(\"waiting on running simulations to complete\")\n time.sleep(10)\n test_rest.cluster.insync_and_state_check()\n test_rest.cluster.verify_data()", "def test_04_node_down_and_resync_soft(self):\n for cluster in test_rest.cluster.clusters:\n if cluster == 'index':\n continue\n port = test_rest.cluster.clusters[cluster][0][0]\n test_rest.step(f'stopping cluster {cluster} node with port {port}')\n test_rest.docker_stop(cluster, port)\n test_rest.step(f\"starting db_simulator on cluster {cluster}\")\n test_rest.db_simulate(cluster, 180)\n # restart nodes\n for cluster in test_rest.cluster.clusters:\n if cluster == 'index':\n continue\n port = test_rest.cluster.clusters[cluster][0][0]\n test_rest.step(f'restarting cluster {cluster} node with port {port}')\n test_rest.expand_data_cluster(cluster, port=port)\n test_rest.step(\"restarted nodes, waiting 10 seconds to begin monitoring table state & running sync jobs\")\n time.sleep(10)\n test_rest.cluster.insync_and_state_check()\n while test_rest.is_running_simulations():\n print(\"waiting on running simulations to complete\")\n time.sleep(10)\n test_rest.cluster.verify_data()", "def test_replaceDoesNotMutate(self):\n disco = create_disco()\n node = create_node(\"somewhere\")\n disco.onMessage(None, NodeActive(node))\n resolved_node = resolve(disco, \"myservice\", \"1.0\")\n\n node2 = create_node(\"somewhere\")\n node2.version = \"1.3\"\n disco.onMessage(None, ReplaceCluster(\"myservice\",\n SANDBOX_ENV,\n [node2]))\n self.assertEqual(resolved_node.version, \"1.0\")", "def upgrade_kernel(**kwargs):\n execute(\"upgrade_kernel_node\", env.host_string, **kwargs)", "def multiple_repair_test(self):\n cluster = self.cluster\n cluster.populate(3).start()\n node1, node2, node3 = cluster.nodelist()\n\n session = self.patient_cql_connection(node1)\n create_ks(session, 'ks', 3)\n create_cf(session, 'cf', read_repair=0.0, columns={'c1': 'text', 'c2': 'text'})\n\n debug(\"insert data\")\n\n insert_c1c2(session, keys=range(1, 50), consistency=ConsistencyLevel.ALL)\n node1.flush()\n\n debug(\"bringing down node 3\")\n node3.flush()\n node3.stop(gently=False)\n\n debug(\"inserting additional data into node 1 and 2\")\n insert_c1c2(session, keys=range(50, 100), consistency=ConsistencyLevel.TWO)\n node1.flush()\n node2.flush()\n\n debug(\"restarting and repairing node 3\")\n node3.start(wait_for_binary_proto=True)\n\n if cluster.version() >= \"2.2\":\n node3.repair()\n else:\n node3.nodetool(\"repair -par -inc\")\n\n # wait stream handlers to be closed on windows\n # after session is finished (See CASSANDRA-10644)\n if is_win:\n time.sleep(2)\n\n debug(\"stopping node 2\")\n node2.stop(gently=False)\n\n debug(\"inserting data in nodes 1 and 3\")\n insert_c1c2(session, keys=range(100, 150), consistency=ConsistencyLevel.TWO)\n node1.flush()\n node3.flush()\n\n debug(\"start and repair node 2\")\n node2.start(wait_for_binary_proto=True)\n\n if cluster.version() >= \"2.2\":\n node2.repair()\n else:\n node2.nodetool(\"repair -par -inc\")\n\n debug(\"replace node and check data integrity\")\n node3.stop(gently=False)\n node5 = Node('node5', cluster, True, ('127.0.0.5', 9160), ('127.0.0.5', 7000), '7500', '0', None, ('127.0.0.5', 9042))\n cluster.add(node5, False)\n node5.start(replace_address='127.0.0.3', wait_other_notice=True)\n\n assert_one(session, \"SELECT COUNT(*) FROM ks.cf LIMIT 200\", [149])", "def test_add_node_after_mv(self):\n\n session = self.prepare()\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n for i in range(1000):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=-i))\n\n for i in range(1000):\n assert_one(session, \"SELECT * FROM t_by_v WHERE v = {}\".format(-i), [-i, i])\n\n node4 = new_node(self.cluster, data_center=\"dc1\")\n node4.start(wait_for_binary_proto=True, jvm_args=[\"-Dcassandra.migration_task_wait_in_seconds={}\".format(MIGRATION_WAIT)])\n\n session2 = self.patient_exclusive_cql_connection(node4)\n\n \"\"\"\n @jira_ticket CASSANDRA-12984\n\n Assert that MVs are marked as build after bootstrap. Otherwise newly streamed MVs will be built again\n \"\"\"\n assert_one(session2, \"SELECT count(*) FROM system.built_views WHERE keyspace_name = 'ks' AND view_name = 't_by_v'\", [1])\n\n for i in range(1000):\n assert_one(session2, \"SELECT * FROM ks.t_by_v WHERE v = {}\".format(-i), [-i, i])\n\n for i in range(1000, 1100):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=-i))\n\n for i in range(1000, 1100):\n assert_one(session, \"SELECT * FROM t_by_v WHERE v = {}\".format(-i), [-i, i])", "def test_replaceDoesNotDisableCircuitBreaker(self):\n disco = create_disco()\n node = create_node(\"somewhere\")\n disco.onMessage(None, NodeActive(node))\n resolved_node = resolve(disco, \"myservice\", \"1.0\")\n # Uh-oh it's a pretty broken node:\n for i in range(10):\n resolved_node.failure()\n\n node = create_node(\"somewhere\")\n disco.onMessage(None, ReplaceCluster(\"myservice\",\n SANDBOX_ENV,\n [node]))\n resolved_node2 = resolve(disco, \"myservice\", \"1.0\")\n self.assertEqual(resolved_node2, None)\n resolved_node.success()\n self.assertNodesEqual(resolve(disco, \"myservice\", \"1.0\"), node)", "def test_add_strict_node_to_non_strict_node(self):\n non_strict_node = self.cluster.master\n strict_node = self.cluster.servers[self.nodes_init:self.nodes_init + 1][0]\n self.enable_tls_encryption_cli_on_nodes \\\n (nodes=self.cluster.servers[self.nodes_init:self.nodes_init + 1])\n CbServer.use_https = True\n RestConnection(non_strict_node).add_node(user='Administrator', password='password',\n port=CbServer.ssl_port,\n remoteIp=strict_node.ip)\n CbServer.use_https = False\n rest = RestConnection(non_strict_node)\n nodes = rest.node_statuses()\n rest.rebalance(otpNodes=[node.id for node in nodes],\n ejectedNodes=[])\n result = rest.monitorRebalance()\n self.assertTrue(result, \"Rebalance failed\")", "def test_snat_with_nodes_reboot(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.reboot(self.inputs.k8s_master_ip)\n for node in self.inputs.k8s_slave_ips:\n self.inputs.reboot(node)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)", "def test_replaceEmpty(self):\n disco = create_disco()\n node1 = create_node(\"somewhere\")\n node2 = create_node(\"somewhere2\")\n disco.onMessage(None, ReplaceCluster(\"myservice\",\n SANDBOX_ENV,\n [node1, node2]))\n self.assertEqual(knownNodes(disco, \"myservice\", \"sandbox\"), [node1, node2])", "def bootstrap(self, params = None):\n command = \"\"\n print \"NODE: [%s] running bootstrap scripts\" % self.name\n if self.type == \"SEED\":\n command += get_script_text(\"cassandra_seednode_bootstrap\")\n elif self.type == \"CLIENT\":\n if self.name.endswith('1'):\n command += get_script_text(\"ganglia_endpoint\")\n command += get_script_text(\"cassandra_client_bootstrap\")\n\n else:\n command = get_script_text(\"cassandra_node_bootstrap\")\n timer = Timer.get_timer()\n self.vm.run_command(command, silent=True)\n print \"NODE: %s is now bootstrapped (took %d sec)\" % (self.name, timer.stop())\n self.bootstraped = True", "def upgrade_to_version(self, version, mixed_version=False, nodes=None):\n debug('Upgrading to ' + version)\n if not mixed_version:\n nodes = self.cluster.nodelist()\n\n for node in nodes:\n debug('Prepping node for shutdown: ' + node.name)\n node.flush()\n self._check_values()\n self._check_counter_values()\n \n for node in nodes:\n debug('Shutting down node: ' + node.name)\n time.sleep(.5)\n node.stop(wait_other_notice=False)\n\n if ENABLE_VNODES and version >= \"1.2\":\n self.cluster.set_configuration_options(values={'initial_token': None, 'num_tokens': 256})\n\n for node in nodes:\n debug('Upgrading node: ' + node.name)\n node.set_cassandra_dir(cassandra_version=version)\n node.start(wait_other_notice=True)\n time.sleep(.5)\n if not mixed_version:\n node.nodetool('upgradesstables')\n\n if ENABLE_VNODES and version >= \"1.2\" and not mixed_version:\n debug(\"Running shuffle\")\n self.node2.shuffle(\"create\")\n self.node2.shuffle(\"en\")\n\n for node in nodes:\n debug('Checking node: ' + node.name)\n if not mixed_version:\n self._write_values()\n self._check_values()\n\n self._increment_counter_value()\n time.sleep(0.5)\n self._check_counter_values()\n \n if not mixed_version:\n # Check we can bootstrap a new node on the upgraded cluster:\n debug(\"Adding a node to the cluster\")\n self.cluster.set_cassandra_dir(cassandra_version=version)\n nnode = new_node(self.cluster, remote_debug_port=str(2000+len(self.cluster.nodes)))\n nnode.start(no_wait=False)\n nnode.watch_log_for(\"Bootstrap completed!\")\n debug(\"node should be up, but sleeping a bit to ensure...\")\n time.sleep(15)\n self._check_values()\n self._check_counter_values()\n \n if mixed_version:\n debug('Successfully upgraded part of the cluster to %s' % version) \n else:\n debug('Successfully upgraded to %s' % version)", "def test_cluster_works_fine_after_deleting_CA_folder(self):\n self.x509.generate_multiple_x509_certs(servers=self.servers[:self.nodes_init])\n random_nodes = random.sample(self.servers[1:self.nodes_init], 1)\n self.log.info(\"Uploading root certs from {0}\".format(random_nodes[0]))\n self.x509.upload_root_certs(random_nodes[0])\n self.x509.upload_node_certs(servers=self.servers[:self.nodes_init])\n self.x509.delete_unused_out_of_the_box_CAs(server=self.master)\n self.x509.upload_client_cert_settings(server=self.master)\n shell = RemoteMachineShellConnection(random_nodes[0])\n shell.remove_directory(self.x509.install_path + x509main.CHAINFILEPATH +\n \"/\" + x509main.TRUSTEDCAPATH)\n shell.disconnect()\n\n failover_nodes = random_nodes\n nodes_in_cluster = self.servers[:self.nodes_init]\n for operation in [\"recovery\", \"out\"]:\n shell = RemoteMachineShellConnection(failover_nodes[0])\n shell.stop_server()\n self.cluster.async_failover(self.servers[:self.nodes_init],\n failover_nodes,\n graceful=False)\n self.wait_for_failover_or_assert(1)\n if operation == \"out\":\n https_val = CbServer.use_https # so that add_node uses https\n CbServer.use_https = True\n rest = RestConnection(self.master)\n otp_nodes = []\n ejected_nodes = []\n for node in nodes_in_cluster:\n otp_nodes.append('ns_1@'+node.ip)\n for node in failover_nodes:\n ejected_nodes.append('ns_1@' + node.ip)\n status = rest.rebalance(otpNodes=otp_nodes, ejectedNodes=ejected_nodes)\n if not status:\n shell.start_server(failover_nodes[0])\n self.fail(\"rebalance/failover failed\")\n CbServer.use_https = https_val\n nodes_in_cluster = nodes_in_cluster.remove(failover_nodes[0])\n shell.start_server(failover_nodes[0])\n if operation == \"recovery\":\n rest = RestConnection(self.master)\n for node in failover_nodes:\n rest.set_recovery_type(\"ns_1@\" + node.ip, recoveryType=\"delta\")\n https_val = CbServer.use_https # so that add_node uses https\n CbServer.use_https = True\n task = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [])\n self.wait_for_rebalance_to_complete(task)\n CbServer.use_https = https_val\n self.auth(servers=nodes_in_cluster)", "def test_patch_hyperflex_cluster(self):\n pass", "def decommission(self):\n print \"NODE: Decommissioning node: \" + self.name\n keyspace = env_vars['keyspace']\n timer = Timer.get_timer()\n self.vm.run_command(\"nodetool repair -h %s %s\" % (self.name, keyspace))\n self.vm.run_command(\"nodetool decommission\")\n print \"NODE: %s is decommissioned (took %d secs)\" % (self.name, timer.stop())\n #self.vm.shutdown()", "def _unprovision_node(self, conn):\n conn.run(f\"rm -rf {EXPORTER_HOME}\")", "def test_default_installation(cassandra_service):\n verify_client_can_write_read_and_delete()", "def test_replace(self):\n disco = create_disco()\n node1 = create_node(\"somewhere\")\n node2 = create_node(\"somewhere2\")\n node3 = create_node(\"somewhere3\")\n node4 = create_node(\"somewhere4\")\n disco.onMessage(None, NodeActive(node1))\n disco.onMessage(None, NodeActive(node2))\n disco.onMessage(None, ReplaceCluster(\"myservice\",\n SANDBOX_ENV,\n [node3, node4]))\n self.assertEqual(knownNodes(disco, \"myservice\", \"sandbox\"), [node3, node4])", "def test_update_hyperflex_cluster(self):\n pass", "def test_snat_with_kubelet_restart_on_master(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.restart_service(service_name = \"kubelet\",\n host_ips = [self.inputs.k8s_master_ip])\n time.sleep(30) # Wait timer for all kubernetes pods to stablise.\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)" ]
[ "0.71049017", "0.682219", "0.65992427", "0.6297513", "0.62767684", "0.6222005", "0.61809766", "0.60596913", "0.6026414", "0.5952776", "0.5792869", "0.5709729", "0.56964314", "0.55712783", "0.55003196", "0.54920876", "0.5481311", "0.54768157", "0.5469145", "0.5460061", "0.54394656", "0.5389811", "0.53818315", "0.5353411", "0.5349072", "0.53181833", "0.53109795", "0.5286094", "0.5282526", "0.5271682" ]
0.6841581
1
Test that replace fails when there are insufficient replicas CASSANDRA11848
def replace_with_insufficient_replicas_test(self): debug("Starting cluster with 3 nodes.") cluster = self.cluster cluster.populate(3).start() node1, node2, node3 = cluster.nodelist() if DISABLE_VNODES: num_tokens = 1 else: # a little hacky but grep_log returns the whole line... num_tokens = int(node3.get_conf_option('num_tokens')) debug("testing with num_tokens: {}".format(num_tokens)) debug("Inserting Data...") node1.stress(['write', 'n=10K', 'no-warmup', '-schema', 'replication(factor=2)']) # stop node to replace debug("Stopping node to replace.") node3.stop(wait_other_notice=True) # stop other replica debug("Stopping node2 (other replica)") node2.stop(wait_other_notice=True) # replace node 3 with node 4 debug("Starting node 4 to replace node 3") node4 = Node('node4', cluster=cluster, auto_bootstrap=True, thrift_interface=('127.0.0.4', 9160), storage_interface=('127.0.0.4', 7000), jmx_port='7400', remote_debug_port='0', initial_token=None, binary_interface=('127.0.0.4', 9042)) cluster.add(node4, False) node4.start(replace_address='127.0.0.3', wait_for_binary_proto=False, wait_other_notice=False) # replace should fail due to insufficient replicas node4.watch_log_for("Unable to find sufficient sources for streaming range") assert_not_running(node4)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_redis_increase_replica_count_usual_case():", "def test_base_replica_repair_with_contention(self):\n self._base_replica_repair_test(fail_mv_lock=True)", "def resumable_replace_test(self):\n\n cluster = self.cluster\n cluster.populate(3).start()\n node1, node2, node3 = cluster.nodelist()\n\n node1.stress(['write', 'n=100K', 'no-warmup', '-schema', 'replication(factor=3)'])\n\n session = self.patient_cql_connection(node1)\n stress_table = 'keyspace1.standard1'\n query = SimpleStatement('select * from %s LIMIT 1' % stress_table, consistency_level=ConsistencyLevel.THREE)\n initial_data = rows_to_list(session.execute(query))\n\n node3.stop(gently=False)\n\n # kill node1 in the middle of streaming to let it fail\n t = InterruptBootstrap(node1)\n t.start()\n # replace node 3 with node 4\n debug(\"Starting node 4 to replace node 3\")\n node4 = Node('node4', cluster=cluster, auto_bootstrap=True, thrift_interface=('127.0.0.4', 9160),\n storage_interface=('127.0.0.4', 7000), jmx_port='7400', remote_debug_port='0',\n initial_token=None, binary_interface=('127.0.0.4', 9042))\n # keep timeout low so that test won't hang\n node4.set_configuration_options(values={'streaming_socket_timeout_in_ms': 1000})\n cluster.add(node4, False)\n try:\n node4.start(jvm_args=[\"-Dcassandra.replace_address_first_boot=127.0.0.3\"], wait_other_notice=False)\n except NodeError:\n pass # node doesn't start as expected\n t.join()\n\n # bring back node1 and invoke nodetool bootstrap to resume bootstrapping\n node1.start()\n node4.nodetool('bootstrap resume')\n # check if we skipped already retrieved ranges\n node4.watch_log_for(\"already available. Skipping streaming.\")\n # wait for node3 ready to query\n node4.watch_log_for(\"Listening for thrift clients...\")\n\n # check if 2nd bootstrap succeeded\n assert_bootstrap_state(self, node4, 'COMPLETED')\n\n # query should work again\n debug(\"Stopping old nodes\")\n node1.stop(gently=False, wait_other_notice=True)\n node2.stop(gently=False, wait_other_notice=True)\n\n debug(\"Verifying data on new node.\")\n session = self.patient_exclusive_cql_connection(node4)\n assert_all(session, 'SELECT * from {} LIMIT 1'.format(stress_table),\n expected=initial_data,\n cl=ConsistencyLevel.ONE)", "def unsafe_replace_test(self):\n debug('Starting cluster with 3 nodes.')\n cluster = self.cluster\n cluster.populate(3)\n cluster.set_batch_commitlog(enabled=True)\n node1, node2, node3 = cluster.nodelist()\n cluster.seeds.remove(node3)\n NUM_TOKENS = os.environ.get('NUM_TOKENS', '256')\n if DISABLE_VNODES:\n cluster.set_configuration_options(values={'initial_token': None, 'num_tokens': 1})\n else:\n cluster.set_configuration_options(values={'initial_token': None, 'num_tokens': NUM_TOKENS})\n cluster.start()\n\n debug('Inserting Data...')\n node1.stress(['write', 'n=10K', 'no-warmup', '-schema', 'replication(factor=3)'])\n cluster.flush()\n\n session = self.patient_cql_connection(node1)\n stress_table = 'keyspace1.standard1'\n query = SimpleStatement('select * from {} LIMIT 1'.format(stress_table), consistency_level=ConsistencyLevel.THREE)\n initial_data = rows_to_list(session.execute(query))\n\n for set_allow_unsafe_flag in [False, True]:\n debug('Stopping node 3.')\n node3.stop(gently=False)\n\n # completely delete the system keyspace data plus commitlog and saved caches\n for d in node3.data_directories():\n system_data = os.path.join(d, 'system')\n if os.path.exists(system_data):\n rmtree(system_data)\n\n for d in ['commitlogs', 'saved_caches']:\n p = os.path.join(node3.get_path(), d)\n if os.path.exists(p):\n rmtree(p)\n\n node3.set_configuration_options(values={'auto_bootstrap': False})\n mark = node3.mark_log()\n\n if set_allow_unsafe_flag:\n debug('Starting node3 with auto_bootstrap = false and replace_address = 127.0.0.3 and allow_unsafe_replace = true')\n node3.start(replace_address='127.0.0.3', wait_for_binary_proto=True, jvm_args=['-Dcassandra.allow_unsafe_replace=true'])\n # query should work again\n debug(\"Stopping old nodes\")\n node1.stop(gently=False, wait_other_notice=True)\n node2.stop(gently=False, wait_other_notice=True)\n\n debug(\"Verifying data on new node.\")\n session = self.patient_exclusive_cql_connection(node3)\n assert_all(session, 'SELECT * from {} LIMIT 1'.format(stress_table),\n expected=initial_data,\n cl=ConsistencyLevel.ONE)\n else:\n debug('Starting node 3 with auto_bootstrap = false and replace_address = 127.0.0.3')\n node3.start(replace_address='127.0.0.3', wait_other_notice=False)\n node3.watch_log_for('To perform this operation, please restart with -Dcassandra.allow_unsafe_replace=true',\n from_mark=mark, timeout=20)", "def fail_without_replace_test(self):\n debug(\"Starting cluster with 3 nodes.\")\n cluster = self.cluster\n cluster.populate(3)\n node1, node2, node3 = cluster.nodelist()\n cluster.seeds.remove(node3)\n NUM_TOKENS = os.environ.get('NUM_TOKENS', '256')\n if DISABLE_VNODES:\n cluster.set_configuration_options(values={'initial_token': None, 'num_tokens': 1})\n else:\n cluster.set_configuration_options(values={'initial_token': None, 'num_tokens': NUM_TOKENS})\n cluster.start()\n\n debug(\"Inserting Data...\")\n node1.stress(['write', 'n=10K', 'no-warmup', '-schema', 'replication(factor=3)'])\n\n mark = None\n for auto_bootstrap in (True, False):\n debug(\"Stopping node 3.\")\n node3.stop(gently=False)\n\n # completely delete the data, commitlog, and saved caches\n for d in chain([os.path.join(node3.get_path(), \"commitlogs\")],\n [os.path.join(node3.get_path(), \"saved_caches\")],\n node3.data_directories()):\n if os.path.exists(d):\n rmtree(d)\n\n node3.set_configuration_options(values={'auto_bootstrap': auto_bootstrap})\n debug(\"Starting node 3 with auto_bootstrap = {val}\".format(val=auto_bootstrap))\n node3.start(wait_other_notice=False)\n node3.watch_log_for('Use cassandra.replace_address if you want to replace this node', from_mark=mark, timeout=20)\n mark = node3.mark_log()", "def test_replaceDoesNotMutate(self):\n disco = create_disco()\n node = create_node(\"somewhere\")\n disco.onMessage(None, NodeActive(node))\n resolved_node = resolve(disco, \"myservice\", \"1.0\")\n\n node2 = create_node(\"somewhere\")\n node2.version = \"1.3\"\n disco.onMessage(None, ReplaceCluster(\"myservice\",\n SANDBOX_ENV,\n [node2]))\n self.assertEqual(resolved_node.version, \"1.0\")", "def _base_replica_repair_test(self, fail_mv_lock=False):\n\n self.prepare(rf=3)\n node1, node2, node3 = self.cluster.nodelist()\n session = self.patient_exclusive_cql_connection(node1)\n session.execute('USE ks')\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n logger.debug('Write initial data')\n for i in range(1000):\n session.execute(\"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)\".format(v=i))\n\n self._replay_batchlogs()\n\n logger.debug('Verify the data in the MV with CL=ALL')\n for i in range(1000):\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i),\n [i, i, 'a', 3.0],\n cl=ConsistencyLevel.ALL\n )\n\n logger.debug('Shutdown node1')\n node1.stop(wait_other_notice=True)\n logger.debug('Delete node1 data')\n node1.clear(clear_all=True)\n\n jvm_args = []\n if fail_mv_lock:\n if self.cluster.version() >= LooseVersion('3.10'): # CASSANDRA-10134\n jvm_args = ['-Dcassandra.allow_unsafe_replace=true', '-Dcassandra.replace_address={}'.format(node1.address())]\n jvm_args.append(\"-Dcassandra.test.fail_mv_locks_count=1000\")\n # this should not make Keyspace.apply throw WTE on failure to acquire lock\n node1.set_configuration_options(values={'write_request_timeout_in_ms': 100})\n logger.debug('Restarting node1 with jvm_args={}'.format(jvm_args))\n node1.start(wait_for_binary_proto=True, jvm_args=jvm_args)\n logger.debug('Shutdown node2 and node3')\n node2.stop(wait_other_notice=True)\n node3.stop(wait_other_notice=True)\n\n session = self.patient_exclusive_cql_connection(node1)\n session.execute('USE ks')\n\n logger.debug('Verify that there is no data on node1')\n for i in range(1000):\n assert_none(\n session,\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i)\n )\n\n logger.debug('Restarting node2 and node3')\n node2.start(wait_for_binary_proto=True)\n node3.start(wait_for_binary_proto=True)\n\n # Just repair the base replica\n logger.debug('Starting repair on node1')\n node1.nodetool(\"repair ks t\")\n\n logger.debug('Verify data with cl=ALL')\n for i in range(1000):\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i),\n [i, i, 'a', 3.0]\n )", "def test_replaceDoesNotDisableCircuitBreaker(self):\n disco = create_disco()\n node = create_node(\"somewhere\")\n disco.onMessage(None, NodeActive(node))\n resolved_node = resolve(disco, \"myservice\", \"1.0\")\n # Uh-oh it's a pretty broken node:\n for i in range(10):\n resolved_node.failure()\n\n node = create_node(\"somewhere\")\n disco.onMessage(None, ReplaceCluster(\"myservice\",\n SANDBOX_ENV,\n [node]))\n resolved_node2 = resolve(disco, \"myservice\", \"1.0\")\n self.assertEqual(resolved_node2, None)\n resolved_node.success()\n self.assertNodesEqual(resolve(disco, \"myservice\", \"1.0\"), node)", "def test_complex_repair(self):\n session = self.prepare(rf=5, options={'hinted_handoff_enabled': False}, nodes=5)\n node1, node2, node3, node4, node5 = self.cluster.nodelist()\n\n # we create the base table with gc_grace_seconds=5 so batchlog will expire after 5 seconds\n session.execute(\"CREATE TABLE ks.t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\"\n \"WITH gc_grace_seconds = 5\")\n session.execute((\"CREATE MATERIALIZED VIEW ks.t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n logger.debug('Shutdown node2 and node3')\n node2.stop()\n node3.stop(wait_other_notice=True)\n\n logger.debug('Write initial data to node1 (will be replicated to node4 and node5)')\n for i in range(1000):\n session.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)\".format(v=i))\n\n logger.debug('Verify the data in the MV on node1 with CL=ONE')\n for i in range(1000):\n assert_one(\n session,\n \"SELECT * FROM ks.t_by_v WHERE v = {}\".format(i),\n [i, i, 'a', 3.0]\n )\n\n logger.debug('Close connection to node1')\n session.cluster.shutdown()\n logger.debug('Shutdown node1, node4 and node5')\n node1.stop()\n node4.stop()\n node5.stop()\n\n logger.debug('Start nodes 2 and 3')\n node2.start()\n node3.start(wait_for_binary_proto=True)\n\n session2 = self.patient_cql_connection(node2)\n\n logger.debug('Verify the data in the MV on node2 with CL=ONE. No rows should be found.')\n for i in range(1000):\n assert_none(\n session2,\n \"SELECT * FROM ks.t_by_v WHERE v = {}\".format(i)\n )\n\n logger.debug('Write new data in node2 and node3 that overlap those in node1, node4 and node5')\n for i in range(1000):\n # we write i*2 as value, instead of i\n session2.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)\".format(v=i * 2))\n\n logger.debug('Verify the new data in the MV on node2 with CL=ONE')\n for i in range(1000):\n v = i * 2\n assert_one(\n session2,\n \"SELECT * FROM ks.t_by_v WHERE v = {}\".format(v),\n [v, v, 'a', 3.0]\n )\n\n logger.debug('Wait for batchlogs to expire from node2 and node3')\n time.sleep(5)\n\n logger.debug('Start remaining nodes')\n node1.start(wait_for_binary_proto=True)\n node4.start(wait_for_binary_proto=True)\n node5.start(wait_for_binary_proto=True)\n\n session = self.patient_cql_connection(node1)\n\n logger.debug('Read data from MV at QUORUM (old data should be returned)')\n for i in range(1000):\n assert_one(\n session,\n \"SELECT * FROM ks.t_by_v WHERE v = {}\".format(i),\n [i, i, 'a', 3.0],\n cl=ConsistencyLevel.QUORUM\n )\n\n logger.debug('Run global repair on node1')\n node1.repair()\n\n logger.debug('Read data from MV at quorum (new data should be returned after repair)')\n for i in range(1000):\n v = i * 2\n assert_one(\n session,\n \"SELECT * FROM ks.t_by_v WHERE v = {}\".format(v),\n [v, v, 'a', 3.0],\n cl=ConsistencyLevel.QUORUM\n )", "def test_replace_cluster_resource_quota(self):\n pass", "def test_replace(self):\n disco = create_disco()\n node1 = create_node(\"somewhere\")\n node2 = create_node(\"somewhere2\")\n node3 = create_node(\"somewhere3\")\n node4 = create_node(\"somewhere4\")\n disco.onMessage(None, NodeActive(node1))\n disco.onMessage(None, NodeActive(node2))\n disco.onMessage(None, ReplaceCluster(\"myservice\",\n SANDBOX_ENV,\n [node3, node4]))\n self.assertEqual(knownNodes(disco, \"myservice\", \"sandbox\"), [node3, node4])", "def test_primary_key_update_failure(self):\r\n with self.assertRaises(ValidationError):\r\n TestQueryUpdateModel.objects(partition=uuid4(), cluster=3).update(cluster=5000)", "def test_really_complex_repair(self):\n session = self.prepare(rf=5, options={'hinted_handoff_enabled': False}, nodes=5)\n node1, node2, node3, node4, node5 = self.cluster.nodelist()\n\n # we create the base table with gc_grace_seconds=5 so batchlog will expire after 5 seconds\n session.execute(\"CREATE TABLE ks.t (id int, v int, v2 text, v3 decimal, PRIMARY KEY(id, v, v2))\"\n \"WITH gc_grace_seconds = 1\")\n session.execute((\"CREATE MATERIALIZED VIEW ks.t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL AND v IS NOT NULL AND \"\n \"v2 IS NOT NULL PRIMARY KEY (v2, v, id)\"))\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n logger.debug('Shutdown node2 and node3')\n node2.stop(wait_other_notice=True)\n node3.stop(wait_other_notice=True)\n\n session.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'a', 3.0)\")\n session.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'a', 3.0)\")\n self._replay_batchlogs()\n logger.debug('Verify the data in the MV on node1 with CL=ONE')\n assert_all(session, \"SELECT * FROM ks.t_by_v WHERE v2 = 'a'\", [['a', 1, 1, 3.0], ['a', 2, 2, 3.0]])\n\n session.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'b', 3.0)\")\n session.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'b', 3.0)\")\n self._replay_batchlogs()\n logger.debug('Verify the data in the MV on node1 with CL=ONE')\n assert_all(session, \"SELECT * FROM ks.t_by_v WHERE v2 = 'b'\", [['b', 1, 1, 3.0], ['b', 2, 2, 3.0]])\n\n session.shutdown()\n\n logger.debug('Shutdown node1, node4 and node5')\n node1.stop()\n node4.stop()\n node5.stop()\n\n logger.debug('Start nodes 2 and 3')\n node2.start()\n node3.start(wait_for_binary_proto=True)\n\n session2 = self.patient_cql_connection(node2)\n session2.execute('USE ks')\n\n logger.debug('Verify the data in the MV on node2 with CL=ONE. No rows should be found.')\n assert_none(session2, \"SELECT * FROM ks.t_by_v WHERE v2 = 'a'\")\n\n logger.debug('Write new data in node2 that overlap those in node1')\n session2.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'c', 3.0)\")\n session2.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'c', 3.0)\")\n self._replay_batchlogs()\n assert_all(session2, \"SELECT * FROM ks.t_by_v WHERE v2 = 'c'\", [['c', 1, 1, 3.0], ['c', 2, 2, 3.0]])\n\n session2.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'd', 3.0)\")\n session2.execute(\"INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'd', 3.0)\")\n self._replay_batchlogs()\n assert_all(session2, \"SELECT * FROM ks.t_by_v WHERE v2 = 'd'\", [['d', 1, 1, 3.0], ['d', 2, 2, 3.0]])\n\n logger.debug(\"Composite delete of everything\")\n session2.execute(\"DELETE FROM ks.t WHERE id = 1 and v = 1\")\n session2.execute(\"DELETE FROM ks.t WHERE id = 2 and v = 2\")\n self._replay_batchlogs()\n assert_none(session2, \"SELECT * FROM ks.t_by_v WHERE v2 = 'c'\")\n assert_none(session2, \"SELECT * FROM ks.t_by_v WHERE v2 = 'd'\")\n\n logger.debug('Wait for batchlogs to expire from node2 and node3')\n time.sleep(5)\n\n logger.debug('Start remaining nodes')\n node1.start(wait_for_binary_proto=True)\n node4.start(wait_for_binary_proto=True)\n node5.start(wait_for_binary_proto=True)\n\n # at this point the data isn't repaired so we have an inconsistency.\n # this value should return None\n assert_all(\n session2,\n \"SELECT * FROM ks.t_by_v WHERE v2 = 'a'\", [['a', 1, 1, 3.0], ['a', 2, 2, 3.0]],\n cl=ConsistencyLevel.QUORUM\n )\n\n logger.debug('Run global repair on node1')\n node1.repair()\n\n assert_none(session2, \"SELECT * FROM ks.t_by_v WHERE v2 = 'a'\", cl=ConsistencyLevel.QUORUM)", "def test_primary_key_update_failure(self):\n with self.assertRaises(ValidationError):\n TestQueryUpdateModel.objects(partition=uuid4(), cluster=3).update(cluster=5000)", "def test_replace_cluster_policy(self):\n pass", "def test_rollback(self):\n os.system('rm config.txt; touch config.txt')\n test_oplog, primary_conn, mongos, solr = self.get_new_oplog()\n\n if not start_cluster():\n self.fail('Cluster could not be started successfully!')\n\n solr = DocManager()\n test_oplog.doc_manager = solr\n solr._delete() # equivalent to solr.delete(q='*: *')\n\n mongos['test']['test'].remove({})\n mongos['test']['test'].insert( \n {'_id': ObjectId('4ff74db3f646462b38000001'),\n 'name': 'paulie'},\n safe=True\n )\n while (mongos['test']['test'].find().count() != 1):\n time.sleep(1)\n cutoff_ts = test_oplog.get_last_oplog_timestamp()\n\n first_doc = {'name': 'paulie', '_ts': bson_ts_to_long(cutoff_ts),\n 'ns': 'test.test',\n '_id': ObjectId('4ff74db3f646462b38000001')}\n\n #try kill one, try restarting\n kill_mongo_proc(primary_conn.host, PORTS_ONE['PRIMARY'])\n\n new_primary_conn = Connection(HOSTNAME, int(PORTS_ONE['SECONDARY']))\n admin = new_primary_conn['admin']\n while admin.command(\"isMaster\")['ismaster'] is False:\n time.sleep(1)\n time.sleep(5)\n count = 0\n while True:\n try:\n mongos['test']['test'].insert({\n '_id': ObjectId('4ff74db3f646462b38000002'),\n 'name': 'paul'}, \n safe=True)\n break\n except OperationFailure:\n count += 1\n if count > 60:\n self.fail('Call to insert doc failed too many times')\n time.sleep(1)\n continue\n while (mongos['test']['test'].find().count() != 2):\n time.sleep(1)\n kill_mongo_proc(primary_conn.host, PORTS_ONE['SECONDARY'])\n start_mongo_proc(PORTS_ONE['PRIMARY'], \"demo-repl\", \"/replset1a\",\n \"/replset1a.log\", None)\n\n #wait for master to be established\n while primary_conn['admin'].command(\"isMaster\")['ismaster'] is False:\n time.sleep(1)\n\n start_mongo_proc(PORTS_ONE['SECONDARY'], \"demo-repl\", \"/replset1b\",\n \"/replset1b.log\", None)\n\n #wait for secondary to be established\n admin = new_primary_conn['admin']\n while admin.command(\"replSetGetStatus\")['myState'] != 2:\n time.sleep(1)\n while retry_until_ok(mongos['test']['test'].find().count) != 1:\n time.sleep(1)\n\n self.assertEqual(str(new_primary_conn.port), PORTS_ONE['SECONDARY'])\n self.assertEqual(str(primary_conn.port), PORTS_ONE['PRIMARY'])\n\n last_ts = test_oplog.get_last_oplog_timestamp()\n second_doc = {'name': 'paul', '_ts': bson_ts_to_long(last_ts),\n 'ns': 'test.test', \n '_id': ObjectId('4ff74db3f646462b38000002')}\n\n test_oplog.doc_manager.upsert(first_doc)\n test_oplog.doc_manager.upsert(second_doc)\n\n test_oplog.rollback()\n test_oplog.doc_manager.commit()\n results = solr._search()\n\n assert(len(results) == 1)\n\n self.assertEqual(results[0]['name'], 'paulie')\n self.assertTrue(results[0]['_ts'] <= bson_ts_to_long(cutoff_ts))\n\n #test_oplog.join()", "def test_cbrestoremgr_should_not_change_replica_count_in_restore_bucket(self):\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=10000)\n if not self.new_replicas:\n self.fail(\"This test needs to pass param 'new-replicas' to run\")\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n self.log.info(\"Start backup cluster\")\n self.backup_cluster_validate()\n self.backup_restore_validate()\n\n self.log.info(\"replicas from backup bucket: {0}\".format(self.num_replicas))\n self.log.info(\"replica in restore bucket should be {0} after restore\"\\\n .format(self.new_replicas))\n rest_r = RestConnection(self.backupset.restore_cluster_host)\n for bucket in self.buckets:\n bucket_stats = rest_r.get_bucket_json(bucket.name)\n if self.new_replicas != bucket_stats[\"replicaNumber\"]:\n self.fail(\"replia number in bucket {0} did change after restore\"\\\n .format(bucket.name))\n self.log.info(\"Verified replica in bucket {0}: {1}\"\\\n .format(bucket.name,\n bucket_stats[\"replicaNumber\"]))", "def multi_dc_replace_with_rf1_test(self):\n cluster = self.cluster\n cluster.populate([1, 1])\n cluster.start()\n node1, node2 = cluster.nodelist()\n\n node1 = cluster.nodes['node1']\n yaml_config = \"\"\"\n # Create the keyspace and table\n keyspace: keyspace1\n keyspace_definition: |\n CREATE KEYSPACE keyspace1 WITH replication = {'class': 'NetworkTopologyStrategy', 'dc1': 1, 'dc2': 1};\n table: users\n table_definition:\n CREATE TABLE users (\n username text,\n first_name text,\n last_name text,\n email text,\n PRIMARY KEY(username)\n ) WITH compaction = {'class':'SizeTieredCompactionStrategy'};\n insert:\n partitions: fixed(1)\n batchtype: UNLOGGED\n queries:\n read:\n cql: select * from users where username = ?\n fields: samerow\n \"\"\"\n with tempfile.NamedTemporaryFile(mode='w+') as stress_config:\n stress_config.write(yaml_config)\n stress_config.flush()\n node1.stress(['user', 'profile=' + stress_config.name, 'n=10k', 'no-warmup',\n 'ops(insert=1)', '-rate', 'threads=50'])\n\n session = self.patient_cql_connection(node1)\n\n # change system_auth keyspace to 2 (default is 1) to avoid\n # \"Unable to find sufficient sources for streaming\" warning\n if cluster.cassandra_version() >= '2.2.0':\n session.execute(\"\"\"\n ALTER KEYSPACE system_auth\n WITH replication = {'class':'SimpleStrategy', 'replication_factor':2};\n \"\"\")\n\n # Save initial data\n stress_table = 'keyspace1.users'\n query = SimpleStatement('select * from %s LIMIT 1' % stress_table, consistency_level=ConsistencyLevel.TWO)\n initial_data = rows_to_list(session.execute(query))\n\n # stop node to replace\n debug(\"Stopping node to replace.\")\n node2.stop(wait_other_notice=True)\n\n node3 = new_node(cluster, data_center='dc2')\n node3.start(replace_address='127.0.0.2', wait_for_binary_proto=True)\n\n assert_bootstrap_state(self, node3, 'COMPLETED')\n\n # Check that keyspace was replicated from dc1 to dc2\n self.assertFalse(node3.grep_log(\"Unable to find sufficient sources for streaming range\"))\n\n # query should work again with node1 stopped\n node1.stop(wait_other_notice=True)\n debug(\"Verifying data on new node.\")\n session = self.patient_exclusive_cql_connection(node3)\n assert_all(session, 'SELECT * from {} LIMIT 1'.format(stress_table),\n expected=initial_data,\n cl=ConsistencyLevel.LOCAL_ONE)", "def test_replace_cluster_resource_quota_status(self):\n pass", "def test_replaceEmpty(self):\n disco = create_disco()\n node1 = create_node(\"somewhere\")\n node2 = create_node(\"somewhere2\")\n disco.onMessage(None, ReplaceCluster(\"myservice\",\n SANDBOX_ENV,\n [node1, node2]))\n self.assertEqual(knownNodes(disco, \"myservice\", \"sandbox\"), [node1, node2])", "def test_MB_51219(self):\n len_of_nodes_to_afo = len(self.failover_order[0].split(\":\"))\n nodes_to_fo = dict()\n nodes_in_cluster = self.rest.get_nodes()\n for node in nodes_in_cluster:\n if len_of_nodes_to_afo <= 0:\n break\n if str(self.cluster.master.ip) == str(node.ip):\n continue\n nodes_to_fo[node] = self.failover_method\n len_of_nodes_to_afo -= 1\n self.cluster_util.update_cluster_nodes_service_list(self.cluster)\n self.nodes_to_fail = nodes_to_fo\n self.__update_server_obj()\n try:\n failover_task = ConcurrentFailoverTask(\n task_manager=self.task_manager, master=self.orchestrator,\n servers_to_fail=self.nodes_to_fail,\n expected_fo_nodes=self.fo_events,\n task_type=\"induce_failure\")\n self.task_manager.add_new_task(failover_task)\n self.task_manager.get_task_result(failover_task)\n dictionary = dict(list(self.nodes_to_fail.items())[:1])\n failover_task = ConcurrentFailoverTask(\n task_manager=self.task_manager, master=self.orchestrator,\n servers_to_fail=dictionary,\n task_type=\"revert_failure\")\n self.task_manager.add_new_task(failover_task)\n self.task_manager.get_task_result(failover_task)\n timeout = int(time()) + 15\n task_id_changed = False\n self.prev_rebalance_status_id = None\n while not task_id_changed and int(time()) < timeout:\n server_task = self.rest.ns_server_tasks(\n task_type=\"rebalance\", task_sub_type=\"failover\")\n if server_task and server_task[\"statusId\"] != \\\n self.prev_rebalance_status_id:\n task_id_changed = True\n self.prev_rebalance_status_id = server_task[\"statusId\"]\n self.log.debug(\"New failover status id: %s\"\n % server_task[\"statusId\"])\n self.assertTrue(task_id_changed,\n \"Fail-over did not happen as expected\")\n self.bucket_util._wait_warmup_completed(self.cluster.buckets[0],\n servers=[\n self.cluster.master],\n wait_time=30)\n finally:\n # reverting failure from all the nodes\n failover_task = ConcurrentFailoverTask(\n task_manager=self.task_manager, master=self.orchestrator,\n servers_to_fail=self.nodes_to_fail,\n task_type=\"revert_failure\")\n self.task_manager.add_new_task(failover_task)\n self.task_manager.get_task_result(failover_task)\n result = self.cluster_util.rebalance(self.cluster)\n self.assertTrue(result, \"Final re-balance failed\")", "def _replace_node_test(self, gently):\n debug(\"Starting cluster with 3 nodes.\")\n cluster = self.cluster\n cluster.populate(3).start()\n node1, node2, node3 = cluster.nodelist()\n\n if DISABLE_VNODES:\n num_tokens = 1\n else:\n # a little hacky but grep_log returns the whole line...\n num_tokens = int(node3.get_conf_option('num_tokens'))\n\n debug(\"testing with num_tokens: {}\".format(num_tokens))\n\n debug(\"Inserting Data...\")\n node1.stress(['write', 'n=10K', 'no-warmup', '-schema', 'replication(factor=3)'])\n\n session = self.patient_cql_connection(node1)\n session.default_timeout = 45\n stress_table = 'keyspace1.standard1'\n query = SimpleStatement('select * from %s LIMIT 1' % stress_table, consistency_level=ConsistencyLevel.THREE)\n initial_data = rows_to_list(session.execute(query))\n\n # stop node, query should not work with consistency 3\n debug(\"Stopping node 3.\")\n node3.stop(gently=gently, wait_other_notice=True)\n\n debug(\"Testing node stoppage (query should fail).\")\n with self.assertRaises(NodeUnavailable):\n try:\n query = SimpleStatement('select * from %s LIMIT 1' % stress_table, consistency_level=ConsistencyLevel.THREE)\n session.execute(query)\n except (Unavailable, ReadTimeout):\n raise NodeUnavailable(\"Node could not be queried.\")\n\n # replace node 3 with node 4\n debug(\"Starting node 4 to replace node 3\")\n node4 = Node('node4', cluster=cluster, auto_bootstrap=True, thrift_interface=('127.0.0.4', 9160),\n storage_interface=('127.0.0.4', 7000), jmx_port='7400', remote_debug_port='0',\n initial_token=None, binary_interface=('127.0.0.4', 9042))\n cluster.add(node4, False)\n node4.start(replace_address='127.0.0.3', wait_for_binary_proto=True)\n\n debug(\"Verifying tokens migrated sucessfully\")\n moved_tokens = node4.grep_log(\"Token .* changing ownership from /127.0.0.3 to /127.0.0.4\")\n debug(\"number of moved tokens: {}\".format(len(moved_tokens)))\n self.assertEqual(len(moved_tokens), num_tokens)\n\n # check that restarting node 3 doesn't work\n debug(\"Try to restart node 3 (should fail)\")\n node3.start(wait_other_notice=False)\n collision_log = node1.grep_log(\"between /127.0.0.3 and /127.0.0.4; /127.0.0.4 is the new owner\")\n debug(collision_log)\n self.assertEqual(len(collision_log), 1)\n node3.stop(gently=False)\n\n # query should work again\n debug(\"Stopping old nodes\")\n node1.stop(gently=False, wait_other_notice=True)\n node2.stop(gently=False, wait_other_notice=True)\n\n debug(\"Verifying data on new node.\")\n session = self.patient_exclusive_cql_connection(node4)\n assert_all(session, 'SELECT * from {} LIMIT 1'.format(stress_table),\n expected=initial_data,\n cl=ConsistencyLevel.ONE)", "def test_replace_cluster_role(self):\n pass", "def check_if_cluster_was_upgraded():\n return True if \"replaces\" in get_ocs_csv().get().get(\"spec\") else False", "def test_add_node_after_wide_mv_with_range_deletions(self):\n\n session = self.prepare()\n\n session.execute(\"CREATE TABLE t (id int, v int, PRIMARY KEY (id, v)) WITH compaction = { 'class': 'SizeTieredCompactionStrategy', 'enabled': 'false' }\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n for i in range(10):\n for j in range(100):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=j))\n\n self.cluster.flush()\n\n for i in range(10):\n for j in range(100):\n assert_one(session, \"SELECT * FROM t WHERE id = {} and v = {}\".format(i, j), [i, j])\n assert_one(session, \"SELECT * FROM t_by_v WHERE id = {} and v = {}\".format(i, j), [j, i])\n\n for i in range(10):\n for j in range(100):\n if j % 10 == 0:\n session.execute(\"DELETE FROM t WHERE id = {} AND v >= {} and v < {}\".format(i, j, j + 2))\n\n self.cluster.flush()\n\n for i in range(10):\n for j in range(100):\n if j % 10 == 0 or (j - 1) % 10 == 0:\n assert_none(session, \"SELECT * FROM t WHERE id = {} and v = {}\".format(i, j))\n assert_none(session, \"SELECT * FROM t_by_v WHERE id = {} and v = {}\".format(i, j))\n else:\n assert_one(session, \"SELECT * FROM t WHERE id = {} and v = {}\".format(i, j), [i, j])\n assert_one(session, \"SELECT * FROM t_by_v WHERE id = {} and v = {}\".format(i, j), [j, i])\n\n node4 = new_node(self.cluster, data_center=\"dc1\")\n node4.set_configuration_options(values={'max_mutation_size_in_kb': 25}) # CASSANDRA-11670\n logger.debug(\"Start join at {}\".format(time.strftime(\"%H:%M:%S\")))\n node4.start(wait_for_binary_proto=True, jvm_args=[\"-Dcassandra.migration_task_wait_in_seconds={}\".format(MIGRATION_WAIT)])\n\n session2 = self.patient_exclusive_cql_connection(node4)\n\n for i in range(10):\n for j in range(100):\n if j % 10 == 0 or (j - 1) % 10 == 0:\n assert_none(session2, \"SELECT * FROM ks.t WHERE id = {} and v = {}\".format(i, j))\n assert_none(session2, \"SELECT * FROM ks.t_by_v WHERE id = {} and v = {}\".format(i, j))\n else:\n assert_one(session2, \"SELECT * FROM ks.t WHERE id = {} and v = {}\".format(i, j), [i, j])\n assert_one(session2, \"SELECT * FROM ks.t_by_v WHERE id = {} and v = {}\".format(i, j), [j, i])\n\n for i in range(10):\n for j in range(100, 110):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=j))\n\n for i in range(10):\n for j in range(110):\n if j < 100 and (j % 10 == 0 or (j - 1) % 10 == 0):\n assert_none(session2, \"SELECT * FROM ks.t WHERE id = {} and v = {}\".format(i, j))\n assert_none(session2, \"SELECT * FROM ks.t_by_v WHERE id = {} and v = {}\".format(i, j))\n else:\n assert_one(session2, \"SELECT * FROM ks.t WHERE id = {} and v = {}\".format(i, j), [i, j])\n assert_one(session2, \"SELECT * FROM ks.t_by_v WHERE id = {} and v = {}\".format(i, j), [j, i])", "def test_update_hyperflex_cluster(self):\n pass", "def test_replace_cluster_network(self):\n pass", "def test_unavailable_server(cluster):\n node2 = cluster.instances[\"node2\"]\n global uuids\n node2.query(\n \"\"\"\n CREATE TABLE test0 UUID '{}'\n (id Int32) ENGINE = MergeTree() ORDER BY id\n SETTINGS storage_policy = 'web';\n \"\"\".format(\n uuids[0]\n )\n )\n node2.stop_clickhouse()\n try:\n # NOTE: you cannot use separate disk instead, since MergeTree engine will\n # try to lookup parts on all disks (to look unexpected disks with parts)\n # and fail because of unavailable server.\n node2.exec_in_container(\n [\n \"bash\",\n \"-c\",\n \"sed -i 's#http://nginx:80/test1/#http://nginx:8080/test1/#' /etc/clickhouse-server/config.d/storage_conf_web.xml\",\n ]\n )\n with pytest.raises(Exception):\n # HTTP retries with backup can take awhile\n node2.start_clickhouse(start_wait_sec=120, retry_start=False)\n assert node2.contains_in_log(\n \"Caught exception while loading metadata.*Connection refused\"\n )\n assert node2.contains_in_log(\n \"HTTP request to \\`http://nginx:8080/test1/.*\\` failed at try 1/10 with bytes read: 0/unknown. Error: Connection refused.\"\n )\n finally:\n node2.exec_in_container(\n [\n \"bash\",\n \"-c\",\n \"sed -i 's#http://nginx:8080/test1/#http://nginx:80/test1/#' /etc/clickhouse-server/config.d/storage_conf_web.xml\",\n ]\n )\n node2.start_clickhouse()\n node2.query(\"DROP TABLE test0 SYNC\")", "def sstable_repairedset_test(self):\n cluster = self.cluster\n cluster.set_configuration_options(values={'hinted_handoff_enabled': False})\n cluster.populate(2).start()\n node1, node2 = cluster.nodelist()\n node1.stress(['write', 'n=10K', 'no-warmup', '-schema', 'replication(factor=2)', 'compaction(strategy=SizeTieredCompactionStrategy,enabled=false)', '-rate', 'threads=50'])\n\n node1.flush()\n node2.flush()\n\n node2.stop(gently=False)\n\n node2.run_sstablerepairedset(keyspace='keyspace1')\n node2.start(wait_for_binary_proto=True)\n\n initialOut1 = node1.run_sstablemetadata(keyspace='keyspace1').stdout\n initialOut2 = node2.run_sstablemetadata(keyspace='keyspace1').stdout\n\n matches = findall('(?<=Repaired at:).*', '\\n'.join([initialOut1, initialOut2]))\n debug(\"Repair timestamps are: {}\".format(matches))\n\n uniquematches = set(matches)\n matchcount = Counter(matches)\n\n self.assertGreaterEqual(len(uniquematches), 2, uniquematches)\n\n self.assertGreaterEqual(max(matchcount), 1, matchcount)\n\n self.assertIn('Repaired at: 0', '\\n'.join([initialOut1, initialOut2]))\n\n node1.stop()\n node2.stress(['write', 'n=15K', 'no-warmup', '-schema', 'replication(factor=2)'])\n node2.flush()\n node1.start(wait_for_binary_proto=True)\n\n if cluster.version() >= \"2.2\":\n node1.repair()\n else:\n node1.nodetool(\"repair -par -inc\")\n\n finalOut1 = node1.run_sstablemetadata(keyspace='keyspace1').stdout\n finalOut2 = node2.run_sstablemetadata(keyspace='keyspace1').stdout\n\n matches = findall('(?<=Repaired at:).*', '\\n'.join([finalOut1, finalOut2]))\n\n debug(matches)\n\n uniquematches = set(matches)\n matchcount = Counter(matches)\n\n self.assertGreaterEqual(len(uniquematches), 2)\n\n self.assertGreaterEqual(max(matchcount), 2)\n\n self.assertNotIn('Repaired at: 0', '\\n'.join([finalOut1, finalOut2]))", "def multiple_repair_test(self):\n cluster = self.cluster\n cluster.populate(3).start()\n node1, node2, node3 = cluster.nodelist()\n\n session = self.patient_cql_connection(node1)\n create_ks(session, 'ks', 3)\n create_cf(session, 'cf', read_repair=0.0, columns={'c1': 'text', 'c2': 'text'})\n\n debug(\"insert data\")\n\n insert_c1c2(session, keys=range(1, 50), consistency=ConsistencyLevel.ALL)\n node1.flush()\n\n debug(\"bringing down node 3\")\n node3.flush()\n node3.stop(gently=False)\n\n debug(\"inserting additional data into node 1 and 2\")\n insert_c1c2(session, keys=range(50, 100), consistency=ConsistencyLevel.TWO)\n node1.flush()\n node2.flush()\n\n debug(\"restarting and repairing node 3\")\n node3.start(wait_for_binary_proto=True)\n\n if cluster.version() >= \"2.2\":\n node3.repair()\n else:\n node3.nodetool(\"repair -par -inc\")\n\n # wait stream handlers to be closed on windows\n # after session is finished (See CASSANDRA-10644)\n if is_win:\n time.sleep(2)\n\n debug(\"stopping node 2\")\n node2.stop(gently=False)\n\n debug(\"inserting data in nodes 1 and 3\")\n insert_c1c2(session, keys=range(100, 150), consistency=ConsistencyLevel.TWO)\n node1.flush()\n node3.flush()\n\n debug(\"start and repair node 2\")\n node2.start(wait_for_binary_proto=True)\n\n if cluster.version() >= \"2.2\":\n node2.repair()\n else:\n node2.nodetool(\"repair -par -inc\")\n\n debug(\"replace node and check data integrity\")\n node3.stop(gently=False)\n node5 = Node('node5', cluster, True, ('127.0.0.5', 9160), ('127.0.0.5', 7000), '7500', '0', None, ('127.0.0.5', 9042))\n cluster.add(node5, False)\n node5.start(replace_address='127.0.0.3', wait_other_notice=True)\n\n assert_one(session, \"SELECT COUNT(*) FROM ks.cf LIMIT 200\", [149])" ]
[ "0.6814609", "0.68009305", "0.6593998", "0.639997", "0.63156056", "0.6274903", "0.627408", "0.6214506", "0.6018259", "0.58900636", "0.5862111", "0.5845466", "0.5839016", "0.5836313", "0.5830308", "0.5829698", "0.57167", "0.56765765", "0.5672758", "0.56702614", "0.5639919", "0.5556977", "0.55454195", "0.55444753", "0.5536935", "0.55210906", "0.5507621", "0.550361", "0.54740727", "0.5444304" ]
0.73329014
0
Test that multidc replace works when rf=1 on each dc
def multi_dc_replace_with_rf1_test(self): cluster = self.cluster cluster.populate([1, 1]) cluster.start() node1, node2 = cluster.nodelist() node1 = cluster.nodes['node1'] yaml_config = """ # Create the keyspace and table keyspace: keyspace1 keyspace_definition: | CREATE KEYSPACE keyspace1 WITH replication = {'class': 'NetworkTopologyStrategy', 'dc1': 1, 'dc2': 1}; table: users table_definition: CREATE TABLE users ( username text, first_name text, last_name text, email text, PRIMARY KEY(username) ) WITH compaction = {'class':'SizeTieredCompactionStrategy'}; insert: partitions: fixed(1) batchtype: UNLOGGED queries: read: cql: select * from users where username = ? fields: samerow """ with tempfile.NamedTemporaryFile(mode='w+') as stress_config: stress_config.write(yaml_config) stress_config.flush() node1.stress(['user', 'profile=' + stress_config.name, 'n=10k', 'no-warmup', 'ops(insert=1)', '-rate', 'threads=50']) session = self.patient_cql_connection(node1) # change system_auth keyspace to 2 (default is 1) to avoid # "Unable to find sufficient sources for streaming" warning if cluster.cassandra_version() >= '2.2.0': session.execute(""" ALTER KEYSPACE system_auth WITH replication = {'class':'SimpleStrategy', 'replication_factor':2}; """) # Save initial data stress_table = 'keyspace1.users' query = SimpleStatement('select * from %s LIMIT 1' % stress_table, consistency_level=ConsistencyLevel.TWO) initial_data = rows_to_list(session.execute(query)) # stop node to replace debug("Stopping node to replace.") node2.stop(wait_other_notice=True) node3 = new_node(cluster, data_center='dc2') node3.start(replace_address='127.0.0.2', wait_for_binary_proto=True) assert_bootstrap_state(self, node3, 'COMPLETED') # Check that keyspace was replicated from dc1 to dc2 self.assertFalse(node3.grep_log("Unable to find sufficient sources for streaming range")) # query should work again with node1 stopped node1.stop(wait_other_notice=True) debug("Verifying data on new node.") session = self.patient_exclusive_cql_connection(node3) assert_all(session, 'SELECT * from {} LIMIT 1'.format(stress_table), expected=initial_data, cl=ConsistencyLevel.LOCAL_ONE)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_data_source_soaps_id_replace_post(self):\n pass", "def test_replace_groups(self):\n pass", "def test_replace_group(self):\n pass", "def test_replace_identity(self):\n pass", "def _add_dc_after_mv_test(self, rf, nts):\n\n session = self.prepare(rf=rf)\n\n logger.debug(\"Creating schema\")\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n logger.debug(\"Writing 1k to base\")\n for i in range(1000):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=-i))\n\n logger.debug(\"Reading 1k from view\")\n for i in range(1000):\n assert_one(session, \"SELECT * FROM t_by_v WHERE v = {}\".format(-i), [-i, i])\n\n logger.debug(\"Reading 1k from base\")\n for i in range(1000):\n assert_one(session, \"SELECT * FROM t WHERE id = {}\".format(i), [i, -i])\n\n logger.debug(\"Bootstrapping new node in another dc\")\n node4 = new_node(self.cluster, data_center='dc2')\n node4.start(wait_for_binary_proto=True,\n jvm_args=[\"-Dcassandra.migration_task_wait_in_seconds={}\".format(MIGRATION_WAIT),\n \"-Dcassandra.reset_bootstrap_progress=false\"])\n\n logger.debug(\"Bootstrapping new node in another dc\")\n node5 = new_node(self.cluster, remote_debug_port='1414', data_center='dc2')\n node5.start(jvm_args=[\"-Dcassandra.migration_task_wait_in_seconds={}\".format(MIGRATION_WAIT),\n \"-Dcassandra.reset_bootstrap_progress=false\"],\n wait_for_binary_proto=True)\n if nts:\n session.execute(\"alter keyspace ks with replication = {'class':'NetworkTopologyStrategy', 'dc1':1, 'dc2':1}\")\n session.execute(\"alter keyspace system_auth with replication = {'class':'NetworkTopologyStrategy', 'dc1':1, 'dc2':1}\")\n session.execute(\"alter keyspace system_traces with replication = {'class':'NetworkTopologyStrategy', 'dc1':1, 'dc2':1}\")\n node4.nodetool('rebuild dc1')\n node5.nodetool('rebuild dc1')\n\n cl = ConsistencyLevel.LOCAL_ONE if nts else ConsistencyLevel.ONE\n session2 = self.patient_exclusive_cql_connection(node4, consistency_level=cl)\n\n logger.debug(\"Verifying data from new node in view\")\n for i in range(1000):\n assert_one(session2, \"SELECT * FROM ks.t_by_v WHERE v = {}\".format(-i), [-i, i])\n\n logger.debug(\"Inserting 100 into base\")\n for i in range(1000, 1100):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=-i))\n\n logger.debug(\"Verify 100 in view\")\n for i in range(1000, 1100):\n assert_one(session, \"SELECT * FROM t_by_v WHERE v = {}\".format(-i), [-i, i])", "def test_replace(self):\n\n rule = ('alert(name:\"test1\"; side:client; match:\"AB\"; replace:\"XY\";)\\n'\n 'alert(name:\"test2\"; side:server; match:\"XY\";)\\n')\n\n tests = {\n (\"AB\", \"XY\"): [\"proxying connection from\",\n \"INFO : filter matched: 'test1'\",\n \"INFO : filter matched: 'test2'\"],\n \"XY\": [\"proxying connection from\",\n \"INFO : filter matched: 'test2'\"],\n (\"ABAB\", \"XYXY\"): [\"proxying connection from\",\n \"INFO : filter matched: 'test1'\",\n \"INFO : filter matched: 'test1'\",\n \"INFO : filter matched: 'test2'\",\n \"INFO : filter matched: 'test2'\"],\n }\n\n self.run_rules(rule, tests, echo=True)", "def test_replaceIsEnvironmentSpecific(self):\n node = create_node(\"somewhere\", \"myservice\", \"env1\")\n node2 = create_node(\"somewhere2\", \"myservice\", \"env2\")\n node3 = create_node(\"somewhere3\", \"myservice\", \"env2\")\n disco = create_disco()\n disco.onMessage(None, NodeActive(node))\n disco.onMessage(None, NodeActive(node2))\n disco.onMessage(None, ReplaceCluster(node3.service, node3.environment,\n [node3]))\n self.assertEqual((knownNodes(disco, \"myservice\", \"env1\"),\n knownNodes(disco, \"myservice\", \"env2\")),\n ([node], [node3]))", "def test_replace(self):\n disco = create_disco()\n node1 = create_node(\"somewhere\")\n node2 = create_node(\"somewhere2\")\n node3 = create_node(\"somewhere3\")\n node4 = create_node(\"somewhere4\")\n disco.onMessage(None, NodeActive(node1))\n disco.onMessage(None, NodeActive(node2))\n disco.onMessage(None, ReplaceCluster(\"myservice\",\n SANDBOX_ENV,\n [node3, node4]))\n self.assertEqual(knownNodes(disco, \"myservice\", \"sandbox\"), [node3, node4])", "def test_add_dc_after_mv_simple_replication(self):\n\n self._add_dc_after_mv_test(1, False)", "def test_add_dc_after_mv_network_replication(self):\n\n self._add_dc_after_mv_test({'dc1': 1}, True)", "def test_replaceDoesNotDisableCircuitBreaker(self):\n disco = create_disco()\n node = create_node(\"somewhere\")\n disco.onMessage(None, NodeActive(node))\n resolved_node = resolve(disco, \"myservice\", \"1.0\")\n # Uh-oh it's a pretty broken node:\n for i in range(10):\n resolved_node.failure()\n\n node = create_node(\"somewhere\")\n disco.onMessage(None, ReplaceCluster(\"myservice\",\n SANDBOX_ENV,\n [node]))\n resolved_node2 = resolve(disco, \"myservice\", \"1.0\")\n self.assertEqual(resolved_node2, None)\n resolved_node.success()\n self.assertNodesEqual(resolve(disco, \"myservice\", \"1.0\"), node)", "def test_dvidir(self):\n self.chck_triple('dvidir')", "def test_replaceDoesNotMutate(self):\n disco = create_disco()\n node = create_node(\"somewhere\")\n disco.onMessage(None, NodeActive(node))\n resolved_node = resolve(disco, \"myservice\", \"1.0\")\n\n node2 = create_node(\"somewhere\")\n node2.version = \"1.3\"\n disco.onMessage(None, ReplaceCluster(\"myservice\",\n SANDBOX_ENV,\n [node2]))\n self.assertEqual(resolved_node.version, \"1.0\")", "def resumable_replace_test(self):\n\n cluster = self.cluster\n cluster.populate(3).start()\n node1, node2, node3 = cluster.nodelist()\n\n node1.stress(['write', 'n=100K', 'no-warmup', '-schema', 'replication(factor=3)'])\n\n session = self.patient_cql_connection(node1)\n stress_table = 'keyspace1.standard1'\n query = SimpleStatement('select * from %s LIMIT 1' % stress_table, consistency_level=ConsistencyLevel.THREE)\n initial_data = rows_to_list(session.execute(query))\n\n node3.stop(gently=False)\n\n # kill node1 in the middle of streaming to let it fail\n t = InterruptBootstrap(node1)\n t.start()\n # replace node 3 with node 4\n debug(\"Starting node 4 to replace node 3\")\n node4 = Node('node4', cluster=cluster, auto_bootstrap=True, thrift_interface=('127.0.0.4', 9160),\n storage_interface=('127.0.0.4', 7000), jmx_port='7400', remote_debug_port='0',\n initial_token=None, binary_interface=('127.0.0.4', 9042))\n # keep timeout low so that test won't hang\n node4.set_configuration_options(values={'streaming_socket_timeout_in_ms': 1000})\n cluster.add(node4, False)\n try:\n node4.start(jvm_args=[\"-Dcassandra.replace_address_first_boot=127.0.0.3\"], wait_other_notice=False)\n except NodeError:\n pass # node doesn't start as expected\n t.join()\n\n # bring back node1 and invoke nodetool bootstrap to resume bootstrapping\n node1.start()\n node4.nodetool('bootstrap resume')\n # check if we skipped already retrieved ranges\n node4.watch_log_for(\"already available. Skipping streaming.\")\n # wait for node3 ready to query\n node4.watch_log_for(\"Listening for thrift clients...\")\n\n # check if 2nd bootstrap succeeded\n assert_bootstrap_state(self, node4, 'COMPLETED')\n\n # query should work again\n debug(\"Stopping old nodes\")\n node1.stop(gently=False, wait_other_notice=True)\n node2.stop(gently=False, wait_other_notice=True)\n\n debug(\"Verifying data on new node.\")\n session = self.patient_exclusive_cql_connection(node4)\n assert_all(session, 'SELECT * from {} LIMIT 1'.format(stress_table),\n expected=initial_data,\n cl=ConsistencyLevel.ONE)", "def test_substitutions_in_word(self):\n m = strutils.MultiReplace({r'cat': 'kedi', r'purple': 'mor', })\n self.assertEqual(m.sub('Thecatispurple'), 'Thekediismor')", "def test_partially_update_device_group_by_id1(self):\n pass", "def test_partially_update_device_by_id1(self):\n pass", "def test_replacement_rules(sdc_builder, sdc_executor, replacement_rule_property):\n\n DATA = {'name': 'Al Gore', 'birthplace': 'Washington, D.C.'}\n\n EXPECTED_OUTPUT_IF_SET_TO_NULL = {'name': 'Al Gore', 'birthplace': None}\n EXPECTED_OUTPUT_IF_NEW_VALUE = {'name': 'Al Gore', 'birthplace': 'Houston'}\n\n if replacement_rule_property == 'SET_TO_NULL':\n replacement_rules = [{'setToNull': True, 'fields': '/birthplace'}]\n elif replacement_rule_property == 'NEW_VALUE':\n replacement_rules = [{'setToNull': False, 'fields': '/birthplace', 'replacement': 'Houston'}]\n\n pipeline_builder = sdc_builder.get_pipeline_builder()\n\n dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')\n dev_raw_data_source.data_format = 'JSON'\n dev_raw_data_source.raw_data = json.dumps(DATA)\n dev_raw_data_source.stop_after_first_batch = True\n\n field_replacer = pipeline_builder.add_stage('Field Replacer')\n field_replacer.replacement_rules = replacement_rules\n\n wiretap = pipeline_builder.add_wiretap()\n\n dev_raw_data_source >> field_replacer >> wiretap.destination\n\n pipeline = pipeline_builder.build()\n\n sdc_executor.add_pipeline(pipeline)\n sdc_executor.start_pipeline(pipeline).wait_for_finished()\n\n if replacement_rule_property == 'SET_TO_NULL':\n record = wiretap.output_records[0]\n assert record.field == EXPECTED_OUTPUT_IF_SET_TO_NULL\n elif replacement_rule_property == 'NEW_VALUE':\n record = wiretap.output_records[0]\n assert record.field == EXPECTED_OUTPUT_IF_NEW_VALUE", "def test_lo_interface_tc4_replace(duthost):\n json_patch = [\n {\n \"op\": \"remove\",\n \"path\": \"/LOOPBACK_INTERFACE/Loopback0|FC00:1::32~1128\"\n },\n {\n \"op\": \"remove\",\n \"path\": \"/LOOPBACK_INTERFACE/Loopback0|10.1.0.32~132\"\n },\n {\n \"op\": \"add\",\n \"path\": \"/LOOPBACK_INTERFACE/Loopback0|10.1.0.33~132\",\n \"value\": {}\n },\n {\n \"op\": \"add\",\n \"path\": \"/LOOPBACK_INTERFACE/Loopback0|FC00:1::33~1128\",\n \"value\": {}\n }\n ]\n\n tmpfile = generate_tmpfile(duthost)\n logger.info(\"tmpfile {}\".format(tmpfile))\n\n try:\n output = apply_patch(duthost, json_data=json_patch, dest_file=tmpfile)\n expect_op_success(duthost, output)\n\n check_show_ip_intf(duthost, \"Loopback0\", [\"10.1.0.33/32\"], [\"10.1.0.32/32\"], is_ipv4=True)\n check_show_ip_intf(duthost, \"Loopback0\", [\"fc00:1::33/128\"], [\"fc00:1::32/128\"], is_ipv4=False)\n finally:\n delete_tmpfile(duthost, tmpfile)", "def test_ccds(self):\n #TODO write ccds tests", "def test_replace():\n car_before = Car('Opel', 'Astra', '1.4 L Family 1 I4', False, 2005, False, 12)\n car_after = car_before._replace(version='1.6 L Family 1 I4', availability=True)\n car_expected = Car('Opel', 'Astra', '1.6 L Family 1 I4', True, 2005, False, 12)\n assert car_after == car_expected", "def test_simple_substitutions(self):\n m = strutils.MultiReplace({r'cat': 'kedi', r'purple': 'mor', })\n self.assertEqual(m.sub('The cat is purple'), 'The kedi is mor')", "def test_patch_pci_switch(self):\n pass", "def test_update_device_group_by_id1(self):\n pass", "def test_partially_update_device_group_by_id(self):\n pass", "def test_update_device_by_id1(self):\n pass", "def test_redundant_set_field(self):\n SF1, SF2 = (\"SET_FIELD\", (\"IPV4_DST\", 1)), (\"SET_FIELD\", (\"IPV4_DST\", 2))\n SF3, SF4 = (\"SET_FIELD\", (\"IPV4_DST\", 3)), (\"SET_FIELD\", (\"IPV4_DST\", 4))\n OUT = (\"OUTPUT\", 1)\n n1 = normalise([\n Rule(priority=10,\n instructions=inst_from_acts([SF2, OUT])),\n Rule(priority=0)\n ])\n n2 = normalise([\n Rule(priority=10,\n instructions=inst_from_acts([SF1, SF2, OUT])),\n Rule(priority=0)\n ])\n n3 = normalise([\n Rule(priority=10,\n instructions=inst_from_acts([SF3, SF2, OUT])),\n Rule(priority=0)\n ])\n n4 = normalise([\n Rule(priority=10,\n instructions=inst_from_acts([SF4, SF3, SF1, SF2, OUT])),\n Rule(priority=0)\n ])\n n5 = normalise([\n Rule(priority=10,\n instructions=inst_from_acts([SF2, SF2, SF2, SF2, OUT])),\n Rule(priority=0)\n ])\n self.assertTrue(check_equal(n1, n2))\n self.assertTrue(check_equal(n1, n3))\n self.assertTrue(check_equal(n1, n4))\n self.assertTrue(check_equal(n1, n5))\n\n # Sanity check\n n6 = normalise([\n Rule(priority=10,\n instructions=inst_from_acts([SF4, SF3, SF1, SF1, OUT])),\n Rule(priority=0)\n ])\n self.assertFalse(check_equal(n1, n6))", "def test_WINNF_FT_S_REG_18(self):\n\n # Register the devices\n device_a = json.load(\n open(os.path.join('testcases', 'testdata', 'device_a.json')))\n device_b = json.load(\n open(os.path.join('testcases', 'testdata', 'device_b.json')))\n device_c = json.load(\n open(os.path.join('testcases', 'testdata', 'device_c.json')))\n devices = [device_a, device_b, device_c]\n for device in devices:\n self._sas_admin.InjectFccId({'fccId': device['fccId']})\n device['measCapability'] = []\n request = {'registrationRequest': devices}\n response = self._sas.Registration(request)['registrationResponse']\n # Check registration response\n for resp in response:\n self.assertTrue('cbsdId' in resp)\n self.assertEqual(resp['response']['responseCode'], 0)\n del request, response\n\n # Blacklist the third device\n self._sas_admin.BlacklistByFccId({'fccId':device_c['fccId']})\n\n # Re-register the devices\n request = {'registrationRequest': devices}\n response = self._sas.Registration(request)['registrationResponse']\n\n # Check registration response\n self.assertEqual(len(response), len(devices))\n for response_num, resp in enumerate(response[:2]):\n self.assertEqual(resp['response']['responseCode'], 0)\n self.assertTrue('cbsdId' in resp)\n self.assertFalse('measReportConfig' in resp)\n self.assertFalse('measReportConfig' in response[2])\n self.assertEqual(response[2]['response']['responseCode'], 101)", "def test_evaluate_replace_expression(self):\n value = self.evaluate_common(\"replace('startswith','tart','cake')\")\n self.assertTrue(\n value.type_code == edm.SimpleType.String, \"Expected String\")\n self.assertTrue(value.value == \"scakeswith\")\n value = self.evaluate_common(\"replace('startswith','t','x')\")\n self.assertTrue(value.value == \"sxarxswixh\")\n # not case insensitive\n value = self.evaluate_common(\"replace('sTartswith','t','x')\")\n self.assertTrue(value.value == \"sTarxswixh\")\n value = self.evaluate_common(\"replace('startswith','t','tx')\")\n self.assertTrue(value.value == \"stxartxswitxh\")\n try:\n value = self.evaluate_common(\"replace('3.14','1',2)\")\n self.fail(\"integer as parameter\")\n except odata.EvaluationError:\n pass\n try:\n value = self.evaluate_common(\"replace('3.14','1')\")\n self.fail(\"2 parameter\")\n except odata.EvaluationError:\n pass", "def feedReplaceSetups(self, recipies, *args, **keys):\n log(\"REPLACE SELECTION\")\n self.addFeedback(\"replaceSelection\", recipies)\n return \"\"" ]
[ "0.5868793", "0.57626647", "0.57394946", "0.57088983", "0.56332976", "0.5594742", "0.556429", "0.55391914", "0.54893154", "0.5411253", "0.5371333", "0.52877027", "0.52727324", "0.52647424", "0.52616", "0.524178", "0.5240369", "0.52387863", "0.5235418", "0.519722", "0.5170311", "0.51424086", "0.513428", "0.51179594", "0.50815344", "0.50502455", "0.5032098", "0.49831247", "0.49779722", "0.49769536" ]
0.6363337
0
Initialize our `Finitefield` object with a given `prime` number
def __init__(self, prime): if prime != 0: # Check if prime is different from zero self.prime = prime # Assign it else: raise ValueError # Raise an error if prime is negative
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(s, p):\n Zmod.__init__(s, p)\n if s.element_class != FiniteFieldElement:\n raise ArithmeticError(\"Invalid Prime : %d\" % p)\n s.p = p", "def __init__(self, prime, server):\n self.N = prime\n self.g = 2\n self.k = 3\n self.server = server", "def __init__(self, coefficient, basefield=None):\n\n # parameter parse\n try:\n character = basefield.getCharacteristic()\n field = basefield\n except AttributeError:\n # backward compatibility\n if isinstance(basefield, int):\n field = finitefield.FinitePrimeField.getInstance(basefield)\n character = basefield\n else:\n raise ValueError(\"basefield must be FiniteField object.\")\n\n coeffs_list = []\n if isinstance(coefficient, list):\n for c in coefficient:\n if isinstance(c, int):\n coeff = field.createElement(c)\n elif c in field:\n coeff = c\n else:\n raise ValueError(\"coefficient not in basefield.\")\n coeffs_list.append(coeff)\n\n # general initialize\n ECGeneric.__init__(self, coeffs_list, field)\n\n zero = self.basefield.zero\n one = self.basefield.one\n\n # format attribute\n if self.ch == 2:\n if len(self) == 5:\n # FIXME\n if coeffs_list[0] % 2 == one and coeffs_list[2] % 2 == coeffs_list[3] % 2 == zero and coeffs_list[4]:\n self.a1 = one\n self.a2 = coeffs_list[1]\n self.a3 = zero\n self.a4 = zero\n self.a6 = coeffs_list[4]\n self.b2 = one\n self.b4 = zero\n self.b6 = zero\n self.b8 = self.a6\n self.c4 = one\n self.c6 = one\n self.disc = self.a6\n self.j = self.disc.inverse()\n elif coeffs_list[0] % 2 == coeffs_list[1] % 2 == zero and coeffs_list[2]:\n self.a1 = zero\n self.a2 = zero\n self.a3 = coeffs_list[2]\n self.a4 = coeffs_list[3]\n self.a6 = coeffs_list[4]\n self.b2 = zero\n self.b4 = zero\n self.b6 = self.a3**2\n self.b8 = self.a4**2\n self.c4 = zero\n self.c6 = zero\n self.disc = self.a3**4\n self.j = zero\n else:\n raise ValueError(\"coefficient may be not representation of EC.\")\n else:\n raise ValueError(\"coefficient may only use full Weierstrass form for characteristic 2.\")\n elif self.ch == 3: # y^2=x^3+a2*x^2+a6 or y^2=x^3+a4*x+a6\n # FIXME\n if len(self) == 5:\n if coeffs_list[0] % 3 == coeffs_list[2] % 3 == coeffs_list[3] % 3 == 0 and coeffs_list[1] and coeffs_list[4]:\n self.a1 = zero\n self.a2 = coeffs_list[1]\n self.a3 = zero\n self.a4 = zero\n self.a6 = coeffs_list[4]\n self.b2 = self.a2\n self.b4 = zero\n self.b6 = self.a6\n self.b8 = self.a2*self.a6\n self.c4 = self.b2**2\n self.c6 = 2*self.b2**3\n self.disc = -self.a2**3*self.a6\n self.j = (-self.a2**3)*self.a6.inverse()\n elif coeffs_list[0] == coeffs_list[1] == coeffs_list[2] == 0 and coeffs_list[3]:\n self.a1 = zero\n self.a2 = zero\n self.a3 = zero\n self.a4 = coeffs_list[3]\n self.a6 = coeffs_list[4]\n self.b2 = zero\n self.b4 = 2*self.a4\n self.b6 = self.a6\n self.b8 = 2*self.a4**2\n self.c4 = zero\n self.c6 = zero\n self.disc = -self.a4**3\n self.j = zero\n else:\n raise ValueError(\"can't defined EC.\")\n if not self.disc:\n raise ValueError(\"this curve is singular.\")\n else:\n raise ValueError(\"coefficient is less or more, can't defined EC.\")\n else:\n if len(self) == 5:\n self.a1 = coeffs_list[0]\n self.a2 = coeffs_list[1]\n self.a3 = coeffs_list[2]\n self.a4 = coeffs_list[3]\n self.a6 = coeffs_list[4]\n self.b2 = self.a1**2+4*self.a2\n self.b4 = self.a1*self.a3+2*self.a4\n self.b6 = self.a3**2+4*self.a6\n self.b8 = self.a1**2*self.a6+4*self.a2*self.a6-self.a1*self.a3*self.a4+self.a2*self.a3**2-self.a4**2\n self.c4 = self.b2**2-24*self.b4\n self.c6 = -self.b2**3+36*self.b2*self.b4-216*self.b6\n self.disc = -self.b2**2*self.b8-8*self.b4**3-27*self.b6**2+9*self.b2*self.b4*self.b6\n if self.disc:\n self.j = self.c4**3*self.disc.inverse()\n else:\n raise ValueError(\"coefficients creates singular curve.\")\n elif len(self) == 2:\n self.a = coeffs_list[0]\n self.b = coeffs_list[1]\n self.a1 = zero\n self.a2 = zero\n self.a3 = zero\n self.a4 = self.a\n self.a6 = self.b\n self.b2 = zero\n self.b4 = 2*self.a\n self.b6 = 4*self.b\n self.b8 = -(self.a**2)\n self.c4 = -48*self.a\n self.c6 = -864*self.b\n self.disc = -self.b2**2*self.b8-8*self.b4**3-27*self.b6**2+9*self.b2*self.b4*self.b6\n if self.disc:\n self.j = self.c4**3*self.disc.inverse()\n else:\n raise ValueError(\"coefficients creates singular curve.\")\n else:\n raise ValueError(\"coefficient is less or more, can't defined EC.\")\n\n self.ord = None\n self.abelian = None\n self.cubic = UniVarPolynomial({0:self.a6, 1:self.a4, 2:self.a2, 3:one},\n self.basefield)", "def __init__(self, coefficient, basefield=None):\n\n try:\n character = basefield.getCharacteristic()\n self.basefield = basefield\n except:\n # backward compatibility support\n if isinstance(basefield, rational.RationalField) or (not basefield):\n character = 0\n self.basefield = rational.theRationalField\n elif isinstance(basefield, int):\n character = basefield\n if character == 1 or character < 0:\n raise ValueError(\"basefield characteristic must be 0 or prime.\")\n self.basefield = finitefield.FinitePrimeField.getInstance(character)\n else:\n raise ValueError(\"basefield must be FiniteField.\")\n\n self.ch = character\n self.infpoint = [self.basefield.zero]\n if isinstance(coefficient, list):\n self.coefficient = coefficient\n if self.ch == 0:\n if len(self) == 5:\n self.a1 = self.coefficient[0]\n self.a2 = self.coefficient[1]\n self.a3 = self.coefficient[2]\n self.a4 = self.coefficient[3]\n self.a6 = self.coefficient[4]\n self.b2 = self.a1**2+4*self.a2\n self.b4 = self.a1*self.a3+2*self.a4\n self.b6 = self.a3**2+4*self.a6\n self.b8 = self.a1**2*self.a6+4*self.a2*self.a6-self.a1*self.a3*self.a4+self.a2*self.a3**2-self.a4**2\n self.c4 = self.b2**2-24*self.b4\n self.c6 = -self.b2**3+36*self.b2*self.b4-216*self.b6\n self.disc = -self.b2**2*self.b8-8*self.b4**3-27*self.b6**2+9*self.b2*self.b4*self.b6\n elif len(self) == 2:\n self.a = self.coefficient[0]\n self.b = self.coefficient[1]\n self.a1 = 0\n self.a2 = 0\n self.a3 = 0\n self.a4 = self.coefficient[0]\n self.a6 = self.coefficient[1]\n self.b2 = 0\n self.b4 = 2*self.a\n self.b6 = 4*self.b\n self.b8 = -self.a**2\n self.c4 = -48*self.a\n self.c6 = -864*self.b\n self.disc = (self.c4**3-self.c6**2)/1728\n else:\n raise ValueError(\"coefficient is less or more, can't defined EC.\")\n if self.disc == 0:\n raise ValueError(\"this curve is singular.\")\n self.j = (self.c4**3)/self.disc\n self.cubic = UniVarPolynomial({0:self.a6, 1:self.a4,\n 3:self.basefield.one},\n self.basefield)\n else:\n pass # support for subclass\n else:\n raise ValueError(\"parameters must be (coefficient, basefield)\")", "def start_prime_test():", "def set_hash_prime(self):\n n = int(self.max_int+50)\n prime = getPrimes(n)\n self.hash_prime = prime", "def prime_factors(number: int) -> dict:\n f = {}\n i = 2\n while number > 1 and number >= i:\n if number % i == 0:\n if i not in f:\n f[i] = 1\n else:\n f[i] += 1\n number //= i\n else:\n i += 1\n return f", "def __init__(self, value=None):\r\n if value is not None:\r\n if isinstance(value, int):\r\n value = self.field(value)\r\n elif isinstance(value, self.field):\r\n pass\r\n# elif isinstance(value, Future):\r\n# pass # NB: for internal use in runtime only\r\n else:\r\n if isinstance(value, finfields.FiniteFieldElement):\r\n raise TypeError(f'incompatible finite field {type(value).__name__} '\r\n f'for {type(self).__name__}')\r\n\r\n raise TypeError('None, int, or finite field element required')\r\n\r\n super().__init__(value)", "def __init__(self, n=10000):\n if n < 3:\n raise ValueError('too small n: {0}'.format(n))\n self._p = list(generate_primes(n))\n self._len = len(self._p)\n self._i = 0\n self._n = n\n self._a = n", "def __init__(self, w=None, d=None, delta=None, epsilon=None, bits=256):\r\n\t\t\r\n\t\tif w is not None and d is not None:\r\n\t\t\tself.w = w\r\n\t\t\tself.d = d\r\n\t\telif delta is not None and epsilon is not None:\r\n\t\t\tself.w = int(ceil(e/epsilon))\r\n\t\t\tself.d = int(ceil(log(1./delta)))\r\n\t\t\tprint self.w, self.d\r\n\t\telse:\r\n\t\t\traise Exception(\"You must either supply both w and d or delta and epsilon.\")\r\n\t\t\r\n\t\tif 2**bits < w:\r\n\t\t\traise Exception(\"Too few bits for w\")\r\n\r\n\t\t#Values taken from http://www.isthe.com/chongo/tech/comp/fnv/\t\t\r\n\t\tif bits == 32:\r\n\t\t\tself.prime = 0x1000193\r\n\t\t\tself.offset = 0x811c9dc5\r\n\t\telif bits == 64:\r\n\t\t\tself.prime = 0x100000001b3\r\n\t\t\tself.offset = 0xcbf29ce484222325L\r\n\t\telif bits == 128:\r\n\t\t\tself.prime = 0x1000000000000000000013bL\r\n\t\t\tself.offset = 0x6c62272e07bb014262b821756295c58dL\r\n\t\telif bits == 256:\r\n\t\t\tself.prime = 0x1000000000000000000000000000000000000000163L\r\n\t\t\tself.offset = 0xdd268dbcaac550362d98c384c4e576ccc8b1536847b6bbb31023b4c8caee0535L\r\n\t\telif bits == 512:\r\n\t\t\tself.prime = 0x100000000000000000000000000000000000000000000000000000000000000000000000000000000000157L\r\n\t\t\tself.offset = 0xb86db0b1171f4416dca1e50f309990acac87d059c90000000000000000000d21e948f68a34c192f62ea79bc942dbe7ce182036415f56e34bac982aac4afe9fd9L\r\n\t\telif bits == 1024:\r\n\t\t\tself.prime = 0x10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000018dL\r\n\t\t\tself.offset = 0x5f7a76758ecc4d32e56d5a591028b74b29fc4223fdada16c3bf34eda3674da9a21d9000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004c6d7eb6e73802734510a555f256cc005ae556bde8cc9c6a93b21aff4b16c71ee90b3L\r\n\t\telse:\r\n\t\t\traise Exception(\"Bits must be 32, 64, 128, 256, 512, or 1024\")\r\n\t\tself.L = self.w*(2**bits/self.w)\r\n\t\tself.mod = 2**bits-1\r\n\t\tself.bits = bits\r\n\t\tself.count = zeros((self.d, self.w), dtype=int32)\r\n\t\tself.rows = arange(self.d)\r\n\t\tself.shift_by = int(ceil(log(self.w, 2)))", "def __init__(self,F,t,n):\n \n assert type(F) is Field\n assert type(t) is int, \"t is not an integer: %r\" % t\n assert type(n) is int, \"n is not an integer: %r\" % n\n assert t<= n, \"threshold t must be <= than n\"\n assert t>1, \"threshold t must be >1 %r\" % t\n \n self.t = t\n self.n = n\n \n self.F = F\n self.p = F.p # order of F", "def __init__(self, value=None):\r\n if value is not None:\r\n if isinstance(value, int):\r\n value = self.field(value)\r\n elif isinstance(value, self.field):\r\n pass\r\n elif isinstance(value, Future):\r\n pass # NB: for internal use in runtime only\r\n else:\r\n if isinstance(value, finfields.FiniteFieldElement):\r\n raise TypeError(f'incompatible finite field {type(value).__name__} '\r\n f'for {type(self).__name__}')\r\n\r\n raise TypeError('None, int, or finite field element required')\r\n\r\n super().__init__(value)", "def is_prime_field(cls) -> bool:\n return cls._degree == 1", "def __init__(self, n, a=None):\n if a == None:\n self.a = float(\"inf\")\n else:\n self.a = a\n self.n = n % self.a", "def __init__(self, n, e):\n\t\tself.known_primes = [2,3,5,7,11,13,17,19,23,29,31,37,41,43,47,53,59,61,67,71,73,79,83,89,97,101,\n\t\t\t103,107,109,113,127,131,137,139,149,151,157,163,167,173,179,181,191,193,197,\n\t\t\t199,211,223,227,229,233,239,241,251,257,263,269,271,277,281,283,293,307,311,\n\t\t\t313,317,331,337,347,349,353,359,367,373,379,383,389,397,401,409,419,421,431,\n\t\t\t433,439,443,449,457,461,463,467,479,487,491,499,503,509,521,523,541,547,557,\n\t\t\t563,569,571,577,587,593,599,601,607,613,617,619,631,641,643,647,653,659,661,\n\t\t\t673,677,683,691,701,709,719,727,733,739,743,751,757,761,769,773,787,797,809,\n\t\t\t811,821,823,827,829,839,853,857,859,863,877,881,883,887,907,911,919,929,937,\n\t\t\t941,947,953,967,971,977,983,991,997,1009,1013,1019,1021,1031,1033,1039,1049,\n\t\t\t1051,1061,1063,1069,1087,1091,1093,1097,1103,1109,1117,1123,1129,1151,1153,\n\t\t\t1163,1171,1181,1187,1193,1201,1213,1217,1223,1229,1231,1237,1249,1259,1277,\n\t\t\t1279,1283,1289,1291,1297,1301,1303,1307,1319,1321,1327,1361,1367,1373,1381,\n\t\t\t1399,1409,1423,1427,1429,1433,1439,1447,1451,1453,1459,1471,1481,1483,1487,\n\t\t\t1489,1493,1499,1511,1523,1531,1543,1549,1553,1559,1567,1571,1579,1583,1597,\n\t\t\t1601,1607,1609,1613,1619,1621,1627,1637,1657,1663,1667,1669,1693,1697,1699,\n\t\t\t1709,1721,1723,1733,1741,1747,1753,1759,1777,1783,1787,1789,1801,1811,1823,\n\t\t\t1831,1847,1861,1867,1871,1873,1877,1879,1889,1901,1907,1913,1931,1933,1949,\n\t\t\t1951,1973,1979,1987,1993,1997,1999,2003,2011,2017,2027,2029,2039,2053,2063]\n\t\tself.hidden_primes_product = n\n\t\tself.public_key = e\n\t\tself.private_key = None", "def __init__(self, value=None, integral=None):\r\n if value is not None:\r\n if isinstance(value, int):\r\n if integral is None:\r\n integral = True\r\n value = self.field(value << self.frac_length)\r\n elif isinstance(value, float):\r\n if integral is None:\r\n integral = value.is_integer()\r\n value = self.field(round(value * (1<<self.frac_length)))\r\n elif isinstance(value, self.field):\r\n pass\r\n elif isinstance(value, Future):\r\n pass # NB: for internal use in runtime only\r\n else:\r\n if isinstance(value, finfields.FiniteFieldElement):\r\n raise TypeError(f'incompatible finite field {type(value).__name__} '\r\n f'for {type(self).__name__}')\r\n\r\n raise TypeError('None, int, float, or finite field element required')\r\n\r\n self.integral = integral\r\n super().__init__(value)", "def __init__(self, root=0, prime=None):\n if type(root) != int:\n raise ValueError(\n 'Root must be an integer MIDI note number. ' +\n 'Got: {}'.format(root))\n if (root < 0) or (root > 115):\n raise ValueError(\n 'Root must be a valid MIDI note in the range of 0 to 115. ' +\n 'Got: {}'.format(root))\n\n self._transposition = 0\n\n if prime is not None:\n self._prime = prime\n else:\n self._generate_prime(root)\n\n self._apply_transformations()", "def __post_init__(self) -> None:\n\n if self.q < 0:\n raise ValueError(\"q is negative\")\n\n if not is_prime(self.q):\n raise ValueError(\"q not prime\")\n\n if not is_prime(self.q * 2 + 1):\n raise ValueError(\"2q + 1 not prime\")", "def __init__(self, prim):\n self.actual = prim", "def __init__(__self__, *,\n number: int):\n pulumi.set(__self__, \"number\", number)", "def __init__(self, matrix, type) -> None:\n self.field = Field(matrix)\n assert (type in [\"seki\", \"dseki\"])\n self.eval_field = eval_field_seki if type == \"seki\" else eval_field_dseki\n self.depth = 1\n self.unrolled = 0", "def primes(n):\n primfac = {}\n primfac = defaultdict(lambda: 0, primfac)\n while (n % 2) == 0:\n primfac[2] += 1 \n n //= 2\n d = 3\n while d*d <= n:\n while (n % d) == 0:\n primfac[d] += 1 # supposing you want multiple factors repeated\n n //= d\n d += 2\n if n > 1:\n primfac[n] = 1\n return primfac", "def __init__(self, N=40):\n self._primes = []\n self.find_primes(N)", "def factorization(n):\n pf = []\n for p in primeslist:\n if p*p > n : break\n count = 0\n while not n % p:\n n //= p\n count += 1\n if count > 0: pf.append((p, count))\n if n > 1: pf.append((n, 1))\n return pf", "def getPrimeFactors(num):\n n = num\n primes = {}\n\n p = 2\n sqrt = math.sqrt(num)\n\n def checkAndUpdate(inc):\n nonlocal n\n nonlocal p\n nonlocal primes\n if n % p == 0:\n if str(p) in primes.keys():\n primes[str(p)] += 1\n else:\n primes[str(p)] = 1\n n /= p\n else:\n p += inc\n \n while p == 2 and p <= n:\n checkAndUpdate(1)\n while p <= n and p <= sqrt:\n checkAndUpdate(2)\n if len(primes.keys()) == 0:\n primes[str(num)] = 1\n elif n != 1:\n primes[str(n)] = 1\n return primes", "def __init__(self, num, denom):\n assert type(num) == int and type(denom) == int, \"ints not used\"\n self.num = num\n self.denom = denom\n def simplify(x, y):\n \"\"\" Simplifies a fraction \"\"\"\n if x % 2 > 0:\n if y % x > 0:\n # Check Prime\n prime = check_prime(x, y)\n if prime == 0:\n return str(int(x)) + \"/\" + str(int(y))\n else:\n return simplify ((x / prime), (y / prime))\n else:\n return str(int(x/x)) + \"/\" + str(int(y/x))\n else:\n return simplify ((x / 2), (y / 2))\n def check_prime(x, y):\n \"\"\" Function used by simplify to check prime number division of num and denom \"\"\"\n pri = (3,5,7,11,13,17,19,23)\n for i in pri:\n if (x % i == 0) and (y % i == 0):\n return i\n return 0", "def main():\n prime = gen_prime(1, 100000)\n print(prime)", "def is_prime(self):\n pass", "def setup(self):\n n = 0\n while n.bit_length() != self.n_len:\n p = q = 0\n while p % 4 != 3:\n p = self._gen_prime(self.n_len // 2)\n while p == q or q % 4 != 3:\n q = self._gen_prime(self.n_len // 2)\n n = p * q\n self.p = p\n self.q = q\n self.n = n", "def isPrime(n): \n if n == 2 or n == 3: return True\n if n < 2 or n%2 == 0: return False\n if n < 9: return True\n if n%3 == 0: return False\n r = int(n**0.5)\n f = 5\n #Loop seeks out next prime factor and returns it\n while f <= r:\n if n%f == 0: return (False, f)\n if n%(f+2) == 0: return (False, (f+2))\n f +=6\n return True" ]
[ "0.69248664", "0.64870983", "0.5943145", "0.5748694", "0.57429487", "0.5697714", "0.5641275", "0.5625622", "0.5612961", "0.56080437", "0.5605338", "0.5581152", "0.555875", "0.5549988", "0.55280745", "0.5524173", "0.54919416", "0.5483966", "0.5442872", "0.5397962", "0.5372536", "0.5371054", "0.53030026", "0.52872217", "0.5267666", "0.5264198", "0.5262913", "0.5260093", "0.5224244", "0.52169263" ]
0.74800426
0
Obtain equivalence class of a certain number.
def equivalence(self, n): return n % self.prime
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_equivalent_class(record):\n equivalent_class = {}\n class_members=[]\n max_class_number = -1\n for pair in record:\n if (pair[0] in equivalent_class) and (not (pair[1] in equivalent_class)):\n equivalent_class[pair[1]] = equivalent_class[pair[0]]\n if (not(pair[0] in equivalent_class)) and (not (pair[1] in equivalent_class)):\n max_class_number+=1\n equivalent_class[pair[0]] = max_class_number\n equivalent_class[pair[1]] = max_class_number\n for c in range(max_class_number+1):\n class_members.append([index for index,val in equivalent_class.items() if val==c])\n return class_members", "def find_class(complex,heme,nucleotide,control, steroid):\n\n if complex in heme:\n return 0\n elif complex in nucleotide:\n return 1\n elif complex in control :\n return 2\n elif steroid in control :\n return 3", "def classify(cls, i):\r\n # lda \r\n if i[1] == None:\r\n return 1\r\n elif (float(i[1])) <= 0.891599215656381:\r\n return 1\r\n else:\r\n return 0", "def get_weight_class(weight):\n\n if(weight >= 3500):\n return 5\n elif(weight >= 3000 and weight < 3500):\n return 4\n elif(weight >= 2500 and weight < 3000):\n return 3\n elif(weight >= 2000 and weight < 2500):\n return 2\n else:\n return 1", "def class_uc(x):\r\n if Class(x) == \"G\" :\r\n return 1\r\n else :\r\n if Class(x) == \"I\" :\r\n return 2\r\n else :\r\n return 0", "def get_mpg_class(mpg):\n\n if(mpg >= 45):\n return 10\n elif(mpg >= 37 and mpg < 45):\n return 9\n elif(mpg >= 31 and mpg < 37):\n return 8\n elif(mpg >= 27 and mpg < 31):\n return 7\n elif(mpg >= 24 and mpg < 27):\n return 6\n elif(mpg >= 20 and mpg < 24):\n return 5\n elif(mpg >= 17 and mpg < 20):\n return 4\n elif(mpg >= 15 and mpg < 17):\n return 3\n elif(mpg >= 14 and mpg < 15):\n return 2\n else:\n return 1", "def plurality_value(examples):\n common = defaultdict(int)\n for example_dict in examples:\n common[example_dict['class']] += 1\n return max(common.items(), key=itemgetter(1))[0]", "def extract_redundancy_factor(oclass):\n match = re.search(\"EC_[0-9]+P([0-9])+\", oclass)\n if match:\n return int(match.group(1))\n match = re.search(\"RP_([0-9]+)\", oclass)\n if match:\n return int(match.group(1)) - 1\n return 0", "def classify(cls, i):\r\n # language_model \r\n if i[2] == None:\r\n return 1\r\n elif (float(i[2])) <= -7.848941176618522:\r\n return 0\r\n else:\r\n return 1", "def class_num(self) -> int:\n return int(np.argmax(self.class_scores))", "def classify(cls, i):\r\n # language_model \r\n if i[2] == None:\r\n return 1\r\n elif (float(i[2])) <= -8.357419966171143:\r\n return 1\r\n else:\r\n return 0", "def classify(cls, i):\r\n # lda \r\n if i[1] == None:\r\n return 1\r\n elif (float(i[1])) <= 0.6215704159296479:\r\n return 0\r\n else:\r\n return 1", "def classify(cls, i):\r\n # lda \r\n if i[1] == None:\r\n return 1\r\n elif (float(i[1])) <= 0.02728102940334218:\r\n return 1\r\n else:\r\n return 1", "def classify(cls, i):\r\n # lda \r\n if i[1] == None:\r\n return 1\r\n elif (float(i[1])) <= 0.1142382568740966:\r\n return 1\r\n else:\r\n return 1", "def majority_class(classes):\n num_pos = len(classes[np.where(classes == 1)])\n num_neg = len(classes) - num_pos\n return 1 if num_pos > num_neg else 0", "def classify(cls, i):\r\n # lda \r\n if i[1] == None:\r\n return 1\r\n elif (float(i[1])) <= 0.01755814193254369:\r\n return 1\r\n else:\r\n return 0", "def classify(cls, i):\r\n # lda \r\n if i[1] == None:\r\n return 1\r\n elif (float(i[1])) <= 0.610257172808176:\r\n return 1\r\n else:\r\n return 0", "def get_majority_class(self, documents):\n counts = {}\n for document in documents:\n if document.c not in counts:\n counts[document.c] = 0\n counts[document.c] += 1\n majority_class = None\n majority_class_count = -1\n for c in counts:\n if counts[c] > majority_class_count:\n majority_class = c\n majority_class_count = counts[c]\n return majority_class", "def codage(nbr):\n\tmask=1\n\tresult=0\n\tfor index in range(len(G)):\n\t\tif ((mask<<index)&nbr) != 0:\n\t\t\tresult^=G[len(G)-index-1]\n\treturn result", "def kind(n, ranks):\r\n for i in ranks:\r\n if n == ranks.count(i):\r\n return i", "def class_size(self):\n if not self.is_mutation_finite():\n return infinity\n\n # type A (finite and affine)\n if self._letter == 'A':\n # the formula is taken from Torkildsen - Counting\n # cluster-tilted algebras of type A\n if self.is_finite():\n n = self._rank\n a = binomial( 2*(n+1), n+1 ) // (n+2)\n if n % 2 == 1:\n a += binomial( n+1, (n+1)//2 )\n if n % 3 == 0:\n a += 2 * binomial( 2*n//3, n//3 )\n return a // (n+3)\n # the formula is taken from Bastian, Prellberg, Rubey, Stump\n elif self.is_affine():\n i,j = self._bi_rank\n i = ZZ(i)\n j = ZZ(j)\n n = i+j\n f = Euler_Phi()\n if i == j:\n return ( binomial( 2*i,i ) +\n sum( f(k) * binomial(2*i//k,i//k)**2\n for k in [k for k in i.divisors()\n if k in j.divisors()] ) // n ) // 4\n else:\n return sum( f(k) * binomial(2*i//k,i//k) *\n binomial(2*j//k,j//k)\n for k in [k for k in i.divisors()\n if k in j.divisors()] ) // ( 2 * n )\n\n # types B and C (finite and affine)\n elif self._letter in ['B', 'C']:\n # this formula is proven but nowhere published correctness\n # is clear enough that I don't think a warning is needed\n if self.is_finite():\n n = self._rank\n return binomial(2 * n, n) // (n + 1)\n\n elif self._letter in ['BB','CC']:\n # these two formulas are not yet proven\n print(Warning(\"Warning: This method uses a formula \"\n \"which has not been proved correct.\"))\n if self.is_affine():\n if self._twist == 1:\n n = self._rank - 1\n if n%2==1:\n return binomial( 2*n-1, n-1 )\n else:\n return binomial( 2*n-1, n-1 ) + binomial( n-1, n//2 -1 )\n\n # type BC (affine)\n elif self._letter == 'BC':\n # this formula is not yet proven\n print(Warning(\"Warning: This method uses a formula \"\n \"which has not been proved correct.\"))\n if self.is_affine():\n if self._twist == 1:\n n = self._rank - 1\n return binomial( 2*n, n )\n\n # types BD and CD (affine)\n elif self._letter in ['BD','CD']:\n # this formula is not yet proven\n print(Warning(\"Warning: This method uses a formula \"\n \"which has not been proved correct.\"))\n if self.is_affine():\n if self._twist == 1:\n n = self._rank - 2\n return 2*binomial( 2*n, n )\n\n # type D (finite and affine)\n elif self._letter == 'D':\n # the formula is taken from Bastian, Prellberg, Rubey, Stump\n if self.is_finite():\n if self._rank == 4:\n return 6\n else:\n f = Euler_Phi()\n n = ZZ(self._rank)\n return sum( f( n//k ) * binomial( 2*k, k )\n for k in n.divisors() ) // (2*n)\n # this formula is not yet proven\n elif self.is_affine():\n n = self._rank - 3\n if n == 2:\n return 9\n else:\n print(Warning (\"Warning: This method uses a formula \"\n \"which has not been proved correct.\"))\n if n%2==1:\n return 2*binomial(2*n,n)\n else:\n return 2*binomial(2*n,n) + binomial(n, n//2)\n\n # the exceptional types are hard-coded\n # type E (finite, affine and elliptic)\n elif self._letter == 'E':\n if self.is_finite():\n if self._rank == 6:\n return 67\n elif self._rank == 7:\n return 416\n elif self._rank == 8:\n return 1574\n elif self.is_affine():\n if self._rank == 7:\n return 132\n elif self._rank == 8:\n return 1080\n elif self._rank == 9:\n return 7560\n elif self.is_elliptic():\n if self._rank == 8:\n return 49\n elif self._rank == 9:\n return 506\n elif self._rank == 10:\n return 5739\n\n # type F\n elif self._letter == 'F':\n if self.is_finite():\n return 15\n elif self.is_affine():\n return 60\n elif self.is_elliptic():\n if self._twist == [1,2]:\n return 90\n if self._twist == [1,1] or self._twist == [2,2]:\n return 35\n\n # type G\n elif self._letter == 'G':\n if self.is_finite():\n return 2\n elif self.is_affine():\n return 6\n elif self.is_elliptic():\n if self._twist == [1,3]:\n return 7\n if self._twist == [1,1] or self._twist == [3,3]:\n return 2\n\n # type X\n elif self._letter == 'X':\n if self._rank == 6:\n return 5\n elif self._rank == 7:\n return 2\n\n # otherwise the size is returned to be unknown\n else:\n print(\"Size unknown\")\n return NotImplemented", "def classify(cls, i):\r\n # lda \r\n if i[1] == None:\r\n return 0\r\n elif (float(i[1])) <= 0.02728102940334218:\r\n return 1\r\n else:\r\n return 0", "def classify(cls, i):\r\n # lda \r\n if i[1] == None:\r\n return 0\r\n elif (float(i[1])) <= 0.1142382568740966:\r\n return 1\r\n else:\r\n return 0", "def __eq__(self, number):\n return int(self) != number", "def classes_calculations(input):\n counts, _ = np.histogram(input, bins=int(\n input.max() + 1), range=(0, int(input.max())))\n return np.nonzero(counts)[0]", "def get_nominal(self, key):\n return ((hash(key) % 12) + 6.0) * 3", "def to_class(numlist,classlist=string.ascii_lowercase):\n\n return np.vectorize(lambda t: classlist[t])(numlist)", "def to_class(numlist,classlist=string.ascii_lowercase):\n\n return np.vectorize(lambda t: classlist[t])(numlist)", "def evaluate_number(number : int)->int:\n if type(number) == int and number >1 and number < 100:\n num = total_numbers = porc = 0\n while porc < number:\n num = num + 1\n clasificate = is_bouncy(str(num))\n result = evaluate(clasificate , num)\n if result:\n total_numbers = total_numbers + 1\n porc = total_numbers * 100 / num\n return num\n return 0", "def classify(x, c, b):\n if x<c-b:\n return 0\n elif x>c+b:\n return 1\n else:\n if b>10**-7:\n return (x-c+b)/2/b\n else:\n return 0.5" ]
[ "0.5669883", "0.5552854", "0.5449721", "0.5332555", "0.5325522", "0.53195566", "0.53147596", "0.529187", "0.5286526", "0.5284268", "0.52787423", "0.52514434", "0.5224838", "0.5221005", "0.522084", "0.5210592", "0.52059454", "0.52004546", "0.51959074", "0.51881945", "0.5165687", "0.516315", "0.5161881", "0.51607877", "0.51268786", "0.511624", "0.5074186", "0.5074186", "0.5065526", "0.5064382" ]
0.61218315
0
Obtain this finite fields `prime` number.
def get_prime(self): return self.prime
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_prime(self):\n if(not self._constructed): raise EGCSUnconstructedStateError()\n return self._prime", "def Em_prime(self):\n delta_electrons = self._GetElectronDiff()\n assert delta_electrons != 0\n return - self.DeltaGmPrime() / (constants.F*delta_electrons)", "def E_prime(self):\n delta_electrons = self._GetElectronDiff()\n assert delta_electrons != 0\n return - self.DeltaGPrime() / (constants.F*delta_electrons)", "def is_prime_field(cls) -> bool:\n return cls._degree == 1", "def is_prime(self):\n pass", "def E0_prime(self):\n delta_electrons = self._GetElectronDiff()\n assert delta_electrons != 0\n return - self.DeltaG0Prime() / (constants.F*delta_electrons)", "def __init__(self, prime):\n if prime != 0: # Check if prime is different from zero\n self.prime = prime # Assign it\n else:\n raise ValueError # Raise an error if prime is negative", "def first_factor(cls, number):\n for prime in cls(maximum=math.sqrt(number)):\n if not number % prime:\n return prime\n return None", "def getPrime(bits):\n\twhile(True) :\n\t\t# on continue a tirer des nombres tant que l'on n'a pas trouve de nombre premier\n\t\tp = getrandbits(bits)\n\t\tif(miller_rabin(p,100)) :\n\t\t\treturn p", "def next(self):\n nextPrime = None\n i = self.lastPrime+2\n while nextPrime is None:\n sqrt_i = math.sqrt(i)\n isPrime = True\n for p in self.primes:\n if i%p == 0:\n isPrime = False\n i += 2\n break\n if p > sqrt_i:\n break\n if isPrime:\n nextPrime = i\n self.primes.append(nextPrime)\n self.lastPrime = nextPrime\n return nextPrime", "def _g_prime(self, x):\n return self._g(x)*(1 - self._g(x))", "def pi(self):\n return self(self._real_field().pi())", "def _gen_prime(self, n_bits):\n n = gmpy2.mpz(prng.getrandbits(n_bits))\n return gmpy2.next_prime(n)", "def __Relu_prime(self, x):\n \n return x/x", "def get_prime_factor(n):\n if n % 2 == 0:\n return 2\n for num in range(3, n + 1, 2):\n if n % num == 0:\n return num", "def nextPrime(self):\n\t\tnum = self.cur + 1\n\t\twhile not self.isPrime(num):\n\t\t\tnum += 1\n\t\tself.cur = num\n\t\tself.prev.append(num)\n\t\t# print num\n\t\treturn num", "def psi_prime(n,x):\r\n a = 1/(sqrt((2**n)*fac(n)*sqrt(pi)))\r\n b = (e)**(-1*(x**2)*0.5)\r\n third_factor = (-1*x*H(n,x))+(2*n*H(n-1,x))\r\n return a*b*third_factor", "def __init__(s, p):\n Zmod.__init__(s, p)\n if s.element_class != FiniteFieldElement:\n raise ArithmeticError(\"Invalid Prime : %d\" % p)\n s.p = p", "def prime_factor(x):\n thelist=get_factors(x)\n newlist=return_primelist(thelist)\n result=newlist[-1]\n return result", "def is_prime(n):\n return mr_prime(n)", "def isprime(n):\r\n\treturn is_prime(n)", "def KeqPrime(self):\n dg0_prime = self.DeltaG0Prime()\n if dg0_prime is None:\n return None\n \n rt = constants.R * constants.DEFAULT_TEMP\n keq = numpy.exp(-dg0_prime / rt)\n return keq", "def carbon_prime(C,p,p0):\r\n \r\n if p > p0:\r\n return C\r\n else:\r\n return .03", "def prime(self, y, a):\n return y - a/(a*(1-a))", "def factorone(n):\n\tif (is_prime(n)): return n\n\tfor fact in (2,3,5,7,11,13,17,19,23,29):\n\t\tif n%fact == 0: return fact\n\treturn factorPR(n) # Needs work - no guarantee that a prime factor will be returned", "def factorone(n):\r\n\tif (is_prime(n)): return n\r\n\tfor fact in [2,3,5,7,11,13,17,19,23,29]:\r\n\t\tif n%fact == 0: return fact\r\n\treturn factorPR(n) # Needs work - no guarantee that a prime factor will be returned\r", "def equivalence(self, n):\n return n % self.prime", "def isprime(n):\n\treturn is_prime(n)", "def getPrime(N):\n if GMPY:\n randomFunction = random.SystemRandom()\n rand = gmpy2.mpz(randomFunction.getrandbits(N))\n rand = gmpy2.bit_set(rand, N - 1)\n return int(gmpy2.next_prime(rand))\n elif PYCRYPTO:\n return number.getPrime(N, os.urandom)\n else:\n raise NotImplementedError(\"Couldn't find GMP or PyCrypto. No futher method implemented. Please install one of these two.\")", "def prime_factorization(num):\n return prime_factors_p(num, _sieve)" ]
[ "0.71399903", "0.66589963", "0.6651876", "0.6646228", "0.662972", "0.647131", "0.6122692", "0.6115008", "0.6047143", "0.60251045", "0.6022619", "0.5988457", "0.59664625", "0.596395", "0.5935993", "0.5896268", "0.58959156", "0.58298343", "0.5828062", "0.5750545", "0.5699855", "0.56938684", "0.5687346", "0.56753415", "0.5673297", "0.56527877", "0.56495374", "0.5644158", "0.5632837", "0.5626246" ]
0.7992657
0
Returns true if i is a leaf. True if i has no children
def is_leaf(self, i): return len(self.children[i]) == 0 or len(self.pq[i]) == 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_leaf(self):\n if len(self.children) == 0:\n return True\n else:\n return False", "def is_leaf(self):\r\n return self.num_children() == 0", "def is_leaf(self):\n return len(self.children) == 0", "def is_leaf(self):\n return len(self.child_list) == 0", "def is_leaf(self):\n return not self.children.exists()", "def isLeaf(self):\n \n # I am a leaf if I have no children\n return not self._children", "def is_leaf(self):\n return len(self._children) == 0", "def is_leaf(self):\n return self._children == {}", "def is_leaf(self):\n return self._children == {}", "def is_leaf(self):\n return self._children == {}", "def is_leaf(self):\n return self._children == {}", "def is_leaf(self):\n if len(self.children) == 0: #If the Node has no children, it's a leaf\n return True\n else:\n return False", "def isLeaf(self):\n\n return self.children == {}", "def is_leaf(self) -> bool:\n return not any(self.children)", "def is leaf(self, p):\n return self.num children(p) == 0", "def leaf(self):\n if not self._leftchild and not self._rightchild:\n return True\n return False", "def is_leaf(self, p):\n return self.num_children(p) == 0", "def is_leaf(self, n):\n return self.num_children(n) == 0", "def leaf(self):\n if not self.left and not self.right:\n return True\n return False", "def isLeaf(self) -> bool:\n return not self.left and not self.right", "def is_leaf(self):\n # Has no children nodes\n return self.nodes is None or len(self.nodes) == 0", "def isLeaf(self):\n return self.left is None and self.right is None", "def is_leaf(self, p):\n return self.num_children(p) == 0", "def is_leaf(self, p):\n return self.num_children(p) == 0", "def is_leaf(self, p):\n return self.num_children(p) == 0", "def is_leaf(self, p):\n return self.num_children(p) == 0", "def is_leaf(self):\n if self._leftchild or self._rightchild:\n return False\n return True", "def is_leaf(self):\n # TODO: Check if both left child and right child have no value\n return ... and ...", "def is_leaf(self):\n return self.__left == None and self.__right==None", "def is_leaf(node):\n return node.children == {}" ]
[ "0.82774425", "0.8232412", "0.8213968", "0.8201722", "0.8164184", "0.81311023", "0.81169236", "0.8108716", "0.8108716", "0.8108716", "0.8108716", "0.80393773", "0.80339175", "0.7978852", "0.797204", "0.79555184", "0.79298913", "0.78709584", "0.78234917", "0.7776668", "0.77559084", "0.774049", "0.77189225", "0.77189225", "0.77189225", "0.77189225", "0.77131516", "0.76882607", "0.7662664", "0.7629539" ]
0.8860917
0
Gives the children of node i that has elements elems. In this version, it grabs all 2 partitions if they are not there and caches this in children[i].
def get_children(self, i, elems): # if len(elems) == 1: # return [] # elif self.explored[i]: # return self.children[i] # else: self.children[i], self.children_elems[i] = self._get_children(list(elems)) # all_two_partitions(list(elems)) # self.update_from_children(i, (ch_l, ch_r)) return self.children[i], self.children_elems[i]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def children(self, i):\n if i < 0:\n raise IndexError()\n return self._children[i]", "def get_children_elements(self):\n\n pass", "def children_recursive(self, i):\n result = []\n for child in self.children(i):\n result += [child] + self.children_recursive(child)\n return result", "def elements(self):\n for c in self.children:\n yield c\n for c2 in c.elements:\n yield c2", "def get_child_nodes(self):\n parts = []\n for child in sorted(self.children, key=lambda c: c.start):\n child_part = (child.start, child.end, child)\n parts.append(child_part)\n return parts", "def computeChildren(self, root):\n d = deque()\n bag = set()\n d.append(root)\n while d:\n elem = d.pop()\n bag.add(elem)\n newElems = set(elem.children).difference(bag)\n d.extend(newElems)\n return bag", "def get_children(self):\n return self.children", "def iter_child_nodes(predicate, cursor):\n return (c for c in cursor.get_children() if predicate(c))", "def children(self) -> Iterable[Heirarchical]:\n return []", "def get_children(self):\n raise NotImplementedError()", "def extend_to_children(self, indices):\n def get_children(i):\n model = i.model()\n rows = model.rowCount(parent=i)\n for row in range(rows):\n child = model.index(row, 0, parent=i)\n yield child\n\n subitems = set()\n for i in indices:\n valid_parent = i.parent().isValid()\n if valid_parent and i not in subitems:\n subitems.add(i)\n\n if self._hierarchy_view:\n # Assume this is a group node\n for child in get_children(i):\n subitems.add(child)\n else:\n # is top level node\n for child in get_children(i):\n subitems.add(child)\n\n return list(subitems)", "def _get_children(self, x):\n try:\n return x._pfp__children\n\n except AttributeError:\n return []", "def GetChildren(self, p_int, p_int_1, p_int_2):\n ...", "def get_children(self):\r\n return self.children", "def __iter__(self):\n return iter(self._child_elements)", "def expand(self): #uninformed\n children = []\n index = self._find0()\n if index >= self.size: return children\n for change in range(1, self.size + 1):\n child = Permutation(self.size)\n elements = self.getElements()\n elements[index] = change\n child.setElements(elements)\n children.append(child)\n return children", "def children(self):\n return tuple(getattr(self, i) for i in self._traversable)", "def get_child_nodes(node):\r\n return list(iter_child_nodes(node))", "def get_all_children(self):\n return tuple(self.children)", "def children(self):\n try:\n attr_name = 'Parrot_%s_attributes' % self.pmc_name\n attr_type = gdb.lookup_type(attr_name).pointer()\n\n attrs = self.val['data'].cast(attr_type).dereference()\n\n '''\n Something ridiculous happens here. I take a list of tuples:\n [ (\"key1\", \"val1\"), (\"key2\", \"val2\") ]\n\n and turn it, in one iteration, into:\n [\n [(\"name\", \"key1\"), (\"value\", \"val1\")],\n [(\"name\", \"key2\"), (\"value\", \"val2\")]\n ]\n\n That, in turn, is mutated into one list.\n [\n (\"name\", \"key1\"), (\"value\", \"val1\"),\n (\"name\", \"key2\"), (\"value\", \"val2\")\n ]\n\n What we go through for 100% lazy iteration.\n '''\n name_value_tuples = PMCIterator(attrs)\n nv_iter = itertools.imap(lambda val: [ (\"name\", val[0]), (\"value\", val[1]) ],\n name_value_tuples)\n nv_chain = itertools.chain.from_iterable(nv_iter)\n\n return nv_chain\n except RuntimeError as e:\n return [ ( \"__ERROR__\", \"\" ) ].__iter__()", "def children(self, pos):\n return range(self.dary * pos + 1, min(self.dary * (pos + 1) + 1, len(self.heap)))", "def Children(self) -> _n_1_t_2:", "def descendants(self):\n for a in self._related(set(), 'children'):\n yield a", "def get_children(self):\r\n return self._children", "def _all_children(self) -> list[Container]:\n\n def get() -> list[Container]:\n result: list[Container] = []\n\n # Padding left.\n if self.align in (HorizontalAlign.CENTER, HorizontalAlign.RIGHT):\n result.append(Window(width=Dimension(preferred=0)))\n\n # The children with padding.\n for child in self.children:\n result.append(child)\n result.append(\n Window(\n width=self.padding,\n char=self.padding_char,\n style=self.padding_style,\n )\n )\n if result:\n result.pop()\n\n # Padding right.\n if self.align in (HorizontalAlign.CENTER, HorizontalAlign.LEFT):\n result.append(Window(width=Dimension(preferred=0)))\n\n return result\n\n return self._children_cache.get(tuple(self.children), get)", "def get_children(self):\n return NodeList(self._my_map['childNodes'])", "def each_child(\n self,\n search_range=None,\n descended_from_type=_otio.Composable,\n shallow_search=False,\n):\n for child in self.children_if(descended_from_type, search_range, shallow_search):\n yield child", "def get_children(self):\n return []", "def get_children(self):\n return self.children", "def get_children(self):\n return self.children" ]
[ "0.63445675", "0.60507435", "0.59418297", "0.5931057", "0.5728822", "0.5700945", "0.56202734", "0.5581891", "0.55794436", "0.55772966", "0.55548924", "0.5552774", "0.5548988", "0.5543629", "0.5530173", "0.551414", "0.55022126", "0.5479672", "0.54726505", "0.5466317", "0.54577327", "0.54466826", "0.5443984", "0.5443661", "0.54309434", "0.5421172", "0.5414293", "0.5394323", "0.5392361", "0.5392361" ]
0.81951326
0
Get the node corresponding to the given elements, create new id if needed. Creates a new id if needed.
def record_node(self, elements: frozenset) -> int: logging.debug('get node id from elements %s', str(elements)) if elements not in self.elems2node: logging.debug('get node id from elements %s. new node! %s', str(elements), self.next_id) logging.debug('Clusters =%s ', str(self.clusters)) self.elems2node[elements] = self.next_id self.clusters[self.next_id] = elements if len(elements)>1: # print('element in elements=', [element for element in elements]) # print("momentum =", np.asarray([self.momentum[frozenset({elem})] for elem in elements])) self.momentum[elements]= sum(np.asarray([self.momentum[frozenset({elem})] for elem in elements])) # Add the momentum of the leaves that compose the node # self.invariant_mass[self.next_id] = # elif len(elements)==1: # self.momentum[elements]= self.leaves_momentum[list(elements)[0]] self.next_id += 1 return self.next_id - 1 else: return self.elems2node[elements]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getNodeById(self, nodes, id):\n for item in nodes:\n if item.getProperty('id') == id:\n return item", "def create_id(elements: Iterable) -> str:\r\n i = 1\r\n while str(i) in elements:\r\n i += 1\r\n return str(i)", "def update_node_id(node: Element) -> None:\n new_ids: list[str] = []\n for node_id in node['ids']:\n new_id = self.fix_fragment('', node_id)\n if new_id not in new_ids:\n new_ids.append(new_id)\n node['ids'] = new_ids", "def add_nodes(list_of_ids, G, singleGraph):\r\n road_set=set()\r\n for id, pm, dir, coords, hwy in list_of_ids:\r\n id_dict=dict(lat=coords[0], lon=coords[1], dire=dir, mile=pm, road=hwy)\r\n G.add_node(id, id_dict)\r\n singleGraph.add_node(id)\r\n singleGraph.position[id]=(coords[1], coords[0])\r\n road_set.add(int(hwy))\r\n print 'road set: ', road_set\r\n return road_set, G, singleGraph", "def setFreeId(self,element):\n if element.__class__==Subtoken:\n prefix = 'st'\n inlist = self.subtokens.values()\n elif element.__class__==DepToken:\n prefix = 'dt'\n inlist = self.deptokens.values()\n elif element.__class__==RelToken:\n prefix = 'rt'\n inlist = self.reltokens.values()\n elif element.__class__==DepEntity:\n prefix = 'db'\n inlist = self.depentities.values()\n elif element.__class__==RelEntity:\n prefix = 'rb'\n inlist = self.relentities.values()\n elif element.__class__==DepNode:\n prefix = 'dn'\n inlist = self.dependencies.nodes()\n elif element.__class__==RelNode:\n prefix = 'rn'\n inlist = self.interactions.nodes()\n elif element.__class__==DepEdge:\n prefix = 'de'\n inlist = [x[2] for x in self.dependencies.edges()]\n elif element.__class__==RelEdge:\n prefix = 're'\n inlist = [x[2] for x in self.interactions.edges()]\n else:\n inlist = []\n printWarning(self.__class__,\n inspect.stack()[0][3],\n \"%s should not have id\"%(element.__class__))\n used = map(lambda x:x.getRunningId(),inlist)\n element.id = '.'.join([prefix, self.id, FreeIdIter(used).get()])\n return(True)", "def init_id_helper(node: TreeNode, current_id: List[int]) -> None:\n node.id = current_id[0]\n current_id[0] += 1\n if not isinstance(node, TreeNode):\n return\n init_id_helper(node.left, current_id)\n init_id_helper(node.right, current_id)", "def __getMultiTrackElementWithId(self, multiTrackXML, itemId):\n\n for rigType in multiTrackXML.getchildren():\n for r in rigType.getchildren():\n for rig in r.getchildren():\n if rig.attrib.get(\"id\") == itemId:\n return {\"rig\":rig}\n for billboard in rig.getchildren():\n if billboard.attrib.get(\"id\") == itemId:\n return {\"rig\":rig, \"billboard\":billboard}\n for multiTrack in billboard.getchildren():\n for track in multiTrack.getchildren():\n if track.attrib.get(\"id\") == itemId:\n return {\"rig\":rig, \"billboard\":billboard, \"track\":track}\n for cdl in track.getchildren():\n for clip in cdl.getchildren():\n if clip.attrib.get(\"id\") == itemId:\n return {\"rig\":rig, \"billboard\":billboard, \"track\":track, \"clip\":clip}\n for clipType in clip.getchildren():\n for pose in clipType.getchildren():\n if pose.attrib.get(\"id\") == itemId or itemId == -1:\n return {\"rig\":rig, \"billboard\":billboard, \"track\":track, \"clip\":clip, \"pose\":pose}\n return None", "def init_id(root: TreeNode):\n current_id = [0]\n init_id_helper(root, current_id)\n return current_id[0]", "def _get_id(self, item, prefix, item_list):\r\n try:\r\n index = item_list.index(item)\r\n except ValueError:\r\n index = len(item_list)\r\n item_list.append(item)\r\n\r\n return self._id_for_index(prefix, index)", "def setUniqueId(self, idsOfElementaryExpressions):\n for e in self.children:\n e.setUniqueId(idsOfElementaryExpressions)", "def fix_ids(self, tree: nodes.document) -> None:\n def update_node_id(node: Element) -> None:\n \"\"\"Update IDs of given *node*.\"\"\"\n new_ids: list[str] = []\n for node_id in node['ids']:\n new_id = self.fix_fragment('', node_id)\n if new_id not in new_ids:\n new_ids.append(new_id)\n node['ids'] = new_ids\n\n for reference in tree.findall(nodes.reference):\n if 'refuri' in reference:\n m = self.refuri_re.match(reference['refuri'])\n if m:\n reference['refuri'] = self.fix_fragment(m.group(1), m.group(2))\n if 'refid' in reference:\n reference['refid'] = self.fix_fragment('', reference['refid'])\n\n for target in tree.findall(nodes.target):\n update_node_id(target)\n\n next_node: Node = target.next_node(ascend=True)\n if isinstance(next_node, nodes.Element):\n update_node_id(next_node)\n\n for desc_signature in tree.findall(addnodes.desc_signature):\n update_node_id(desc_signature)", "def expand_id_nodes(self, id_nodes_path, update_nodes_paths):\n expanded_tree = copy.deepcopy(self)\n combinatorial_id_nodes = {} # map combinatorial_id -> list of combination_ids\n\n for id_node_key, id_node_val in self.__getitem__(id_nodes_path).items():\n # Find all combinations and expand them\n id_node_val = CombinatorialTree(id_node_val)\n combinations = {id_node_key + '_' + name: comb for name, comb\n in id_node_val.named_combinations(separator='_', max_name_length=30)}\n\n if len(combinations) > 1:\n # Substitute combinatorial node with all combinations\n del expanded_tree[id_nodes_path][id_node_key]\n expanded_tree[id_nodes_path].update(combinations)\n # We need the combinatorial_id_nodes substituted to an id_node_key\n # to have a deterministic value or MPI parallel processes will\n # iterate over combinations in different orders\n combinatorial_id_nodes[id_node_key] = sorted(combinations.keys())\n\n # Update ids in the rest of the tree\n for update_path in update_nodes_paths:\n for update_node_key, update_node_val in self._resolve_paths(self._d, update_path):\n # Check if the value is a collection or a scalar\n if isinstance(update_node_val, list):\n for v in update_node_val:\n if v in combinatorial_id_nodes:\n i = expanded_tree[update_node_key].index(v)\n expanded_tree[update_node_key][i:i+1] = combinatorial_id_nodes[v]\n elif update_node_val in combinatorial_id_nodes:\n comb_leaf = CombinatorialLeaf(combinatorial_id_nodes[update_node_val])\n expanded_tree[update_node_key] = comb_leaf\n\n return expanded_tree", "def get_create_named_node(self, node_id_name):\n n = node_id_name.split(\"_\", 1)\n node_id = int(n[0], 16)\n if node_id in self.nodes_dict:\n node = self.nodes_dict[node_id]\n else:\n node = self.get_create_node(node_id)\n\n if len(n) == 2 and node.node_name != n[1]:\n node.node_name = n[1]\n\n return node", "def get_node_with_id(self, graph, node_id):\n # TODO: Should probably move to parse_common\n nodes = []\n for node in graph.getElementsByTagName('node'):\n if node.attributes[\"id\"].value == node_id:\n nodes.append(node)\n if len(nodes) != 1:\n raise NameError('Node id either missing or not unique.')\n else:\n return nodes[0]", "def setUniqueId(self, idsOfElementaryExpressions):\n if self.elementaryName in idsOfElementaryExpressions:\n self.elementaryIndex = idsOfElementaryExpressions[\n self.elementaryName\n ]\n else:\n error_msg = (\n f'No index is available for elementary '\n f'expression {self.elementaryName}.'\n )\n raise excep.biogemeError(error_msg)\n self.child.setUniqueId(idsOfElementaryExpressions)", "def create_or_retrieve_node_id(self, wg, node_name):\n\n try:\n return self.retrieve_node_id(wg, node_name)\n except UnknownNodeError:\n return self._create_node(wg, node_name)", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def associate_node_id(tr, node=\"\"):\n return {\"id\": tr.get_uml_id(name=node)}", "def assign_ids(ast):\n def f_either(obj, *child_results):\n id_ = slast.SlAst.id_\n obj.id_ = id_[0]\n id_[0] += 1\n\n # def f_either(obj, *child_results):\n # _id_dict = slast.SlAst._id_dict\n # id_ = slast.SlAst.id_\n # # FIXME: Assign same id to all data predicate calls with the same root/stop-nodes\n # key = str(obj.to_sl_expr())\n # if key in _id_dict:\n # obj.id_ = _id_dict[key]\n # else:\n # obj.id_ = id_[0]\n # _id_dict[key] = id_[0]\n # id_[0] += 1\n\n astutils.fold(f_either, f_either, ast)" ]
[ "0.5942844", "0.58788836", "0.58618236", "0.55669534", "0.55313444", "0.54860556", "0.5439955", "0.54111147", "0.53839993", "0.53363097", "0.533143", "0.53260684", "0.53103817", "0.53013533", "0.5292114", "0.5276335", "0.5268265", "0.5268265", "0.5268265", "0.5268265", "0.5268265", "0.5268265", "0.5268265", "0.5268265", "0.5268265", "0.5268265", "0.5268265", "0.5268265", "0.52272904", "0.5196458" ]
0.6063719
0
Push RSPECs to Jira
def push_rspecs(host, auth, rspecs): for rspec in rspecs: description = rspec["fields"]["description"] click.echo(f"Pushing {rspec['key']} ", err=True) data = { "update": { "description": [ { "set": description } ], } } result = requests.put( f"{host}/rest/api/latest/issue/{rspec['key']}", json=data, auth=auth ) result.raise_for_status()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def push_current_data(project):\n defects = []\n\n logger.info(\"Starting {}...\".format(project))\n jira_issues = get_jira_defects(project)\n now = datetime.datetime.utcnow().strftime(DATE_FORMAT)\n logger.debug(\"Fetched {} issues successfully for {}\".format(len(jira_issues), project))\n\n # Each issue fetched is being generated with our schema.\n for issue in jira_issues:\n try:\n jira_dict = jira_obj_to_dict(issue, now)\n defect = create_defect(jira_dict, issue)\n defects.append(defect)\n except Exception as e:\n logger.debug(\"Exception processing {} {}\".format(issue.key, e))\n logger.debug(\"Missing values {}\".format(str(jira_dict)))\n pass\n if len(defects) < len(jira_issues):\n logger.debug(\"{delta} defects not added in the {} report\".format(project, delta=len(jira_issues) - len(defects)))\n\n return post_defects(project, jira_issues, defects)", "def post_defects(project, jira_issues, defects):\n payload = \"\"\n for defect in defects:\n #TODO: this is a hack which can be removed once, excel docs are done away with.\n if defect[\"assignee\"] == \"Unassigned\":\n defect[\"assignee\"] = None\n\n data = {\"host\": host,\n \"time\": int(datetime.datetime.strptime(defect[\"report_date\"], DATE_FORMAT).strftime(\"%s\")) * 1000,\n \"event\": defect,\n \"index\": INDEX,\n \"source\": \"defect\"}\n if config.splunk[config.environment].payload_limit and len(payload) + len(data) >= config.splunk[config.environment].payload_limit:\n logger.info(\"Reached length: {}, Restarting\".format(len(payload)))\n rsp = post_to_splunk(payload=payload)\n logger.info(\"Successfully posted batched data to Splunk {}\".format(project))\n payload = \"{}\".format(json.dumps(data))\n else:\n payload += \" {}\".format(json.dumps(data))\n\n rsp = post_to_splunk(payload=payload)\n logger.info(\"Successfully posted data to splunk for {}\".format(project))\n return {project: rsp.status_code, \"defects_require_fixing\": str(len(jira_issues) - len(defects))}", "def push_historic_data(project):\n defects = []\n\n logger.info(\"Starting {}...\".format(project))\n jira_issues = get_jira_defects(project)\n last_upload = datetime.datetime.utcnow().replace(hour=0, minute=0, second=0, microsecond=0) + relativedelta(weekday=SA(-1))\n logger.debug(\"Fetched {} issues successfully for {}\".format(len(jira_issues), project))\n for issue in jira_issues:\n try:\n created = datetime.datetime.strptime(issue.fields.created, DATE_FORMAT)\n jira_dict = jira_obj_to_dict(issue, datetime.datetime.utcnow().strftime(DATE_FORMAT))\n\n historic_data = []\n # Last Friday of the report ran\n report_date = last_upload\n while(report_date > created):\n jira_dict = jira_for_date(jira_dict, issue.changelog, report_date)\n historic_data.insert(0, create_defect(jira_dict, issue))\n report_date -= datetime.timedelta(weeks=1)\n defects.append(historic_data)\n except Exception as e:\n logger.debug(\"Exception processing {} {}\".format(jira_dict[\"key\"], e))\n logger.exception(\"Exception\")\n logger.debug(\"Missing values {}\".format(str(jira_dict)))\n pass\n if len(defects) < len(jira_issues):\n logger.debug(\"{delta} defects not added in the {} report\".format(project, delta=len(jira_issues) - len(defects)))\n defects_as_list = []\n for defect in defects:\n defects_as_list.extend(defect)\n return post_defects(project, jira_issues, defects_as_list)", "def sync_jira():\n from security_monkey import jirasync\n if jirasync:\n app.logger.info('Syncing issues with Jira')\n jirasync.sync_issues()\n else:\n app.logger.info('Jira sync not configured. Is SECURITY_MONKEY_JIRA_SYNC set?')", "def main():\n parser = argparse.ArgumentParser(description='Creates tickets for release certification')\n parser.add_argument('-u', '--username', help='jira username', default='admin')\n parser.add_argument('-p', '--password', help='jira password', default='admin')\n parser.add_argument('-c', '--config', help='path to config file', default='./options.ini')\n parser.add_argument('-j', '--jira', help='url of jira server', default='http://localhost:8080')\n\n args = parser.parse_args()\n\n jira_user = args.username\n jira_pass = args.password\n jira_server = args.jira\n config_file_path = args.config\n CONFIG.read(config_file_path)\n\n parent_ticket = config_map('JiraOptions')['parent_ticket']\n apprenda_version = config_map('VersionInfo')['to_version']\n jira_project = config_map('JiraOptions')['project']\n jira_issue_type = config_map('JiraOptions')['issue_type']\n jira = JIRA(jira_server, basic_auth=(jira_user, jira_pass))\n\n parent_issue = jira.issue(parent_ticket)\n ticket_list = []\n\n # create clean install tickets\n clean_strings = config_map('CleanInstallSection')\n for cloud in ['single', 'hybrid']:\n ticket_to_add = ticket.Ticket(jira_project, jira_issue_type)\n ticket_to_add.format_summary(clean_strings['summary'], apprenda_version, cloud)\n ticket_to_add.format_description(clean_strings['description'])\n ticket_list.append(ticket_to_add.__dict__)\n\n # create upgrade tickets\n from_versions = json.loads(config_map('VersionInfo')['from_versions'])\n upgrade_strings = config_map('UpgradeSection')\n\n # single cloud\n for version in from_versions:\n ticket_to_add = ticket.Ticket(jira_project, jira_issue_type)\n ticket_to_add.format_summary(upgrade_strings['summary'], apprenda_version, version,\n \"single\")\n ticket_to_add.format_description(upgrade_strings['description'])\n ticket_list.append(ticket_to_add.__dict__)\n\n # hybrid cloud\n for version in from_versions:\n ticket_to_add = ticket.Ticket(jira_project, jira_issue_type)\n ticket_to_add.format_summary(upgrade_strings['summary'], apprenda_version, version,\n \"hybrid\")\n ticket_to_add.format_description(upgrade_strings['description'])\n ticket_list.append(ticket_to_add.__dict__)\n\n # create testing tickets for other tasks\n for section in CONFIG.sections():\n if 'Ticket' in section:\n strings = config_map(section)\n ticket_to_add = ticket.Ticket(jira_project, jira_issue_type)\n ticket_to_add.format_summary(strings['summary'], apprenda_version)\n ticket_to_add.format_description(strings['description'])\n ticket_list.append(ticket_to_add.__dict__)\n\n print 'Created {0} tickets, now sending them to Jira'.format(len(ticket_list))\n # send issues to jira and create tickets and links\n issues = jira.create_issues(field_list=ticket_list)\n\n for item in issues:\n jira.create_issue_link(\n type=\"Task of Story\",\n outwardIssue=item['issue'].key,\n inwardIssue=parent_issue.key,\n )\n\n print 'Finished linking issues, exiting.'", "def raise_jira_ticket(obj,org_id):\n try:\n app_id = obj.get('app_id','') \n vul_name = obj.get('vul_name','')\n cwe = int(obj.get('cwe',0))\n project_key = obj.get('project_key','')\n issuetype = obj.get('issuetype','Bug')\n assignee = obj.get('assignee')\n app_obj = Application.objects.get(pk=app_id)\n if app_id and vul_name:\n vuls = Vulnerability.objects.filter(is_false_positive=False,is_remediated=False,scan__application=app_obj,cwe=cwe,name=vul_name)\n jira_obj = JiraIssueTypes.objects.get(org__id=org_id)\n jira = get_jira_con(jira_obj) \n if jira and vuls.exists(): \n complete_desc = ''\n references = '' \n if app_obj:\n complete_desc += 'Application:\\n{0}\\n\\n'.format(app_obj.name)\n complete_desc += 'Application URL:\\n{0}\\n\\n'.format(app_obj.url)\n if cwe:\n complete_desc += 'CWE :\\n{0}\\n\\n'.format(cwe)\n org_obj = app_obj.org\n if org_obj.orl_config_exists():\n vul_info = get_open_vul_info_from_api(cwe,org_obj)\n complete_desc += 'Description:\\n{0}\\n\\n'.format(vul_info.get('description','')) \n if references:\n complete_desc += 'References:\\n{0}'.format(references) \n data_dict = {\n 'project':{'key':project_key },\n 'issuetype':{'name': issuetype},\n 'priority':{'name': 'Highest'},\n 'summary':vul_name,\n 'description':complete_desc, \n } \n new_issue = jira.create_issue(**data_dict) \n evids = VulnerabilityEvidence.objects.filter(vul__in=vuls) \n attachment = io.StringIO()\n attachment.write('Evidences') \n for evid in evids:\n data = '\\n\\t- {0}\\n\\t\\t- {1}'.format(evid.url,evid.name)\n attachment.write(data) \n jira.add_attachment(issue=new_issue, attachment=attachment, filename='evidences.txt') \n vuls.update(jira_id=str(new_issue),jira_issue_status=str(new_issue.fields.status))\n info_debug_log(event='Raise Jira ticket',status='success')\n if assignee:\n jira.assign_issue(new_issue,assignee)\n info_debug_log(event='Assign Jira ticket to an assignee',status='success')\n except BaseException as e:\n print(\"Error raising JIRA tickets\")\n # general_error_messages.delay(path='raise_jira_ticket function',msg=log_exception(e))\n critical_debug_log(event=e,status='failure')", "def test_issue_create_issue(self):\n pass", "def post_to_github(results: List[dict]):\n\n tests_info_body = ''\n has_failed = False\n for result in results:\n if result['status'] == 'passed':\n tests_info_body += f':white_check_mark: `{result[\"command\"]}`\\n'\n else:\n has_failed = True\n tests_info_body += f':x: `{result[\"command\"]}`\\n```{result[\"output\"]}```\\n<br>'\n\n pr_body = 'Whoopsie. Looks like there are some issues with this PR. :space_invader:' if \\\n has_failed else 'This PR is good to go ! :tada:'\n\n pr_body += f'\\n\\n<details><summary><strong>Tests</strong></summary><p>\\n\\n{tests_info_body}\\n</p></details>'\n\n try:\n source_repo = '/'.join(os.getenv('CODEBUILD_SOURCE_REPO_URL')[:-4].split('/')[-2:])\n source_commit_hash = os.getenv('CODEBUILD_RESOLVED_SOURCE_VERSION')\n source_pr = int(os.getenv('CODEBUILD_WEBHOOK_PR', '0'))\n\n if source_pr > 0:\n g = Github(os.getenv('GITHUB_API_TOKEN', ''))\n repo = g.get_repo(source_repo)\n pr: PullRequest = repo.get_pull(source_pr)\n\n print(\n f'Creating review comment: '\n f'pr -> {pr.title} // '\n f'commit -> {source_commit_hash} // '\n f'has_failed -> {has_failed}'\n )\n\n pr.create_review(\n repo.get_commit(sha=source_commit_hash),\n pr_body,\n 'REQUEST_CHANGES' if has_failed else 'APPROVE'\n )\n finally:\n if has_failed:\n print('Test(s) failed.')\n exit(1)", "def test_issue_list_issues(self):\n pass", "def print_push_info(ctx, patches, sha1s, ticket_numbers, tickets):\n remote = ctx.config['remote']\n branches = sha1s.keys()\n\n ctx.push_info = {}\n pagure_log = []\n bugzilla_log = ['Fixed upstream']\n for branch in branches:\n pagure_log.append('%s:\\n' % branch) # we need extra newline for pagure\n bugzilla_log.append('%s:' % branch)\n log_result = ctx.runprocess(\n ['git', 'log', '--graph', '--oneline', '--abbrev=99',\n '--color=never', '%s/%s..%s' % (remote, branch, sha1s[branch])])\n pagure_log.extend(\n line.rstrip()\n for line in reversed(log_result.stdout.splitlines()))\n pagure_log.append('\\n') # add newline to fix github/pagure formatting\n\n log_result = ctx.runprocess(\n ['git', 'log', '--pretty=format:%H',\n '%s/%s..%s' % (remote, branch, sha1s[branch])])\n bugzilla_log.extend(\n ctx.config['commit-url'] + line.strip()\n for line in reversed(log_result.stdout.splitlines()))\n\n bugzilla_urls = []\n bugzilla_re = re.compile('(%s\\d+)' %\n re.escape(ctx.config['bugzilla-bug-url']))\n jira_urls = []\n jira_re = re.compile('(%s\\d+)' % re.escape(ctx.config['jira-ticket-url']))\n\n for ticket in tickets:\n if ticket.rhbz:\n for match in bugzilla_re.finditer(ticket.rhbz):\n bugzilla_urls.append(match.group(0))\n for match in jira_re.finditer(ticket.rhbz):\n jira_urls.append(match.group(0))\n\n for branch in branches:\n print(ctx.term.cyan('=== Diffstat for %s ===' % branch))\n log_result = ctx.runprocess(\n ['git', 'diff', '--stat', '--color=%s' % ctx.color_arg,\n '%s/%s..%s' % (remote, branch, sha1s[branch])],\n verbosity=2)\n print(ctx.term.cyan('=== Log for %s ===' % branch))\n log_result = ctx.runprocess(\n ['git', 'log', '--reverse', '--color=%s' % ctx.color_arg,\n '%s/%s..%s' % (remote, branch, sha1s[branch])],\n verbosity=2)\n\n print(ctx.term.cyan('=== Patches pushed ==='))\n for patch in patches:\n print(patch.filename)\n\n print(ctx.term.cyan('=== Mail summary ==='))\n if len(branches) == 1:\n print('Pushed to ', end='')\n else:\n print('Pushed to:')\n for branch in branches:\n print('%s: %s' % (branch, sha1s[branch]))\n\n print(ctx.term.cyan('=== Ticket comment ==='))\n pagure_msg = '\\n'.join(pagure_log)\n print(pagure_msg)\n ctx.push_info['pagure_comment'] = pagure_msg\n\n print(ctx.term.cyan('=== Bugzilla/JIRA comment ==='))\n bugzilla_msg = '\\n'.join(bugzilla_log)\n print(bugzilla_msg)\n ctx.push_info['bugzilla_comment'] = bugzilla_msg\n\n if ticket_numbers:\n print(ctx.term.cyan('=== Tickets fixed ==='))\n for number in sorted(ticket_numbers):\n print('%s%s' % (ctx.config['ticket-url'], number))\n\n if bugzilla_urls:\n print(ctx.term.cyan('=== Bugzillas fixed ==='))\n print('\\n'.join(bugzilla_urls))\n \n if jira_urls:\n print(ctx.term.cyan('=== Jira tickets fixed ==='))\n print('\\n'.join(jira_urls))\n\n print(ctx.term.cyan('=== Ready to push ==='))", "def autoReporter (environ, start_response):\n \n fields = paste.request.parse_formvars(environ)\n \n if environ['REQUEST_METHOD'] == 'POST':\n response = cgiIssue(fields)\n\n start_response('200 OK', [('content-type', 'text/plain')])\n return [response]\n \n else:\n start_response('200 OK', [('content-type', 'text/html')])\n return ['<html><title>Sample Report Interface</title><body>',\n '<form method=\"POST\">',\n 'Title (optional): <input type=\"text\" name=\"title\" /><br/>',\n 'App ID: <input type=\"text\" name=\"app_id\" /><br/>',\n 'App Version: <input type=\"text\" name=\"app_version\" /><br/>',\n 'Platform: <input type=\"text\" name=\"platform\" /><br/>',\n 'Message 1: <input type=\"text\" name=\"message\" /><br/>',\n 'Message 2: <input type=\"text\" name=\"message\" /><br/>',\n '<input type=\"submit\" /></form>',\n '</body></html>']", "async def run(self) -> None:\n pull_requests = await self.jira.run()\n if pull_requests:\n await self.slack.remind_about_pull_requests(pull_requests)\n else:\n await self.slack.send_no_pull_requests_message()", "def setUp(self):\n self.client = APIClient()\n for issue in api_response_issues:\n validate_and_store_issue(issue)", "def push_mockups():\n local('cd ../../cts-ui && grunt')\n local('cp ../../cts-ui/mockups/css/*.css ../../mockups/cts-ui/css/.')\n local('cp -R ../../cts-ui/mockups/css/bootstrap ../../mockups/cts-ui/css/bootstrap')\n local('cp -R ../../cts-ui/mockups/img ../../mockups/cts-ui/img')\n local('cp ../../cts-ui/mockups/*.html ../../mockups/cts-ui/.')\n local('cd ../../mockups/cts-ui && git add *.html')\n local('cd ../../mockups/cts-ui/css && git add *.css')\n local('cd ../../mockups/cts-ui/css && git add bootstrap/*')\n local('cd ../../mockups/cts-ui && git add img/*')\n local('cd ../../mockups && git commit -am \"New cts-ui mockups [fabfile]\"')\n local('cd ../../mockups && git push origin master')", "def visit_all_issues_in_list(self, issues):\n for issue in issues:\n self.driver.implicitly_wait(3)\n self.driver.get(issue)\n config_type_text = self.driver.find_element_by_xpath(\"/html/body/b-service-bootstrap/\"\\\n \"app-root/div[7]/div/div/edit-issue-page/b-resolving-issue-references/div[2]/div[1]/\"\\\n \"div[3]/div/div/div[2]/div[2]/div[3]/div/div[1]/div/span/span[6]/span/span/a\").text\n\n source_html = self.driver.page_source\n soup = BeautifulSoup(source_html, \"html.parser\")\n\n advanced_fields = {}\n advanced_fields[\"Issue Id\"] = issue.replace(\"https://b.corp.google.com/issues/\", \"\")\n reporter_tag = soup.find(\"div\", \"bv2-issue-metadata-field-inner \"\\\n \"bv2-issue-metadata-field-reporter\")\n reporter = reporter_tag[\"aria-label\"].replace(\n \" value is \", \"\\n\").split(\"\\n\")\n advanced_fields[reporter[0]] = reporter[1]\n assignee_tag = soup.find(\"div\", \"bv2-issue-metadata-field-inner bv2-issue-metadata-\"\\\n \"field-assignee\")\n assignee = assignee_tag[\"aria-label\"].replace(\n \" value is \", \"\\n\").split(\"\\n\")\n if assignee[1] != \"empty\":\n advanced_fields[assignee[0]] = assignee[1]\n\n if \"EnqueueRule\" in config_type_text:\n config_type = \"EnqueueRules\"\n elif \"RoutingTargets\" in config_type_text:\n config_type = \"RoutingTargets\"\n elif \"QueueInfo\" in config_type_text:\n config_type = \"QueueInfo\"\n\n advanced_fields[\"Config Type\"] = config_type\n\n if config_type == \"QueueInfo\":\n if assignee[1] != constants.AUTOMATION_USER:\n continue\n\n self.scrape_queue_info(advanced_fields)\n elif config_type == \"RoutingTargets\":\n if assignee[1] != constants.AUTOMATION_USER:\n continue\n self.scrape_routing_targets(advanced_fields)\n elif config_type == \"EnqueueRules\":\n self._message_parsing_util.parse_page(soup, reporter[1], issue)", "def report_tests(args, test_summary):\n try:\n if None not in [args.repo_github_token, args.repo_owner, args.repo_name, args.pr_number]:\n comment_on_pr(args.repo_github_token, test_summary, args.repo_owner, args.repo_name, args.pr_number)\n\n except Exception as e:\n logging.error(\"Posting test report on PR failed with error '{ERROR}'\".format(ERROR=e))", "def report(issues, show_urls=False):\r\n # titles may have unicode in them, so we must encode everything below\r\n if show_urls:\r\n for i in issues:\r\n role = 'ghpull' if 'merged' in i else 'ghissue'\r\n print('* :%s:`%d`: %s' % (role, i['number'],\r\n i['title'].encode('utf-8')))\r\n else:\r\n for i in issues:\r\n print('* %d: %s' % (i['number'], i['title'].encode('utf-8')))", "def test_insert_data(self):\n data_github = {\n \"version_control\": \"github\",\n \"scm_repo\": \"A\",\n \"scm_branch\": \"A\",\n \"scm_commit\": \"A\",\n \"repo\": \"A\",\n \"branch\": \"A\",\n \"enabled\": 0\n }\n\n data_git = {\n \"version_control\": \"git\",\n \"scm_repo\": \"A\",\n \"scm_branch\": \"A\",\n \"scm_commit\": \"A\",\n \"repo\": \"A\",\n \"branch\": \"A\",\n \"enabled\": 0\n }\n\n for data in [data_git, data_github]:\n resp = self.client.post(\"/tracking\", json=data, content_type=\"application/json\", headers=self.auth)\n resp_dict = json.loads(resp.data)\n self.assertIn(\"code\", resp_dict, msg=\"Error in data format return\")\n self.assertEqual(ResponseCode.SUCCESS, resp_dict.get(\"code\"), msg=\"Error in status code return\")\n\n self.assertIn(\"msg\", resp_dict, msg=\"Error in data format return\")\n self.assertEqual(\n ResponseCode.CODE_MSG_MAP.get(ResponseCode.SUCCESS),\n resp_dict.get(\"msg\"),\n msg=\"Error in status code return\"\n )\n\n self.assertIn(\"data\", resp_dict, msg=\"Error in data format return\")\n self.assertIsNotNone(resp_dict.get(\"data\"), msg=\"Error in data information return\")", "def test_basic_report(self):\n report = self.analytics.suites[testReportSuite].report\n queue = []\n queue.append(report)\n response = omniture.sync(queue)\n self.assertIsInstance(response, list)", "def do_the_issues(user_id, repo_id):\n with tempfile.TemporaryDirectory() as tmp:\n path = os.path.join(tmp, \"{}_{}_issues.txt\".format(repo_id, user_id))\n issues_initial_url = get_initial_url_issues(user_id, repo_id)\n resp_obj = requests.get(issues_initial_url, headers=headers)\n # prase the initial request. for Issue\n all_issues = json.loads(resp_obj.text)\n with open(path, \"w\") as out_stream:\n for an_issue in all_issues:\n print(an_issue, file=out_stream)\n print(\"the len of resp is {}\".format(len(all_issues)))\n LINK_HEADER = \"Link\"\n next_url = None\n if LINK_HEADER in resp_obj.headers:\n # parse next page (if present)\n next_url = parse_next_url(resp_obj.headers[LINK_HEADER])\n # subsequent page\n while next_url:\n resp_obj = requests.get(next_url, headers=headers)\n all_issues = json.loads(resp_obj.text)\n with open(path, \"a\") as out_stream:\n for an_issue in all_issues:\n print(an_issue, file=out_stream)\n if LINK_HEADER in resp_obj.headers:\n next_url = parse_next_url(resp_obj.headers[LINK_HEADER])\n print(next_url)\n else:\n next_url = None\n GsUpload.upload_blob(GS_BUCKET_NAME, path, basename(path))\n print(\"the issues path is \" + str(path))", "def get_jira_issues(jira, username):\n exclude_stories = cfg.args.x\n epics_only = cfg.args.e\n all_status = cfg.args.all\n filename = cfg.args.file\n user = cfg.args.user\n last_comment = cfg.args.l\n\n issue_types = [\"Sub-task\", \"Epic\"]\n if not epics_only:\n issue_types.append(\"Initiative\")\n if not exclude_stories:\n issue_types.extend([\"Story\", \"Task\", \"Sub-task\", \"Bug\"])\n issue_type = \"issuetype in (%s)\" % \", \".join(issue_types)\n\n status = 'status in (\"In Progress\")'\n if all_status:\n status = \"status not in (Resolved, Closed)\"\n\n if user is None:\n user = \"currentUser()\"\n else:\n user = '\"%s\"' % add_domain(user)\n\n jql = \"%s AND assignee = %s AND %s\" % (issue_type, user, status)\n log.debug(jql)\n\n my_issues = jira.search_issues(jql)\n if my_issues.total > my_issues.maxResults:\n my_issues = jira.search_issues(jql, maxResults=my_issues.total)\n\n showdate = strftime(\"%Y-%m-%d\", gmtime())\n subject = \"Subject: [Weekly] Week ending \" + showdate + \"\\n\\n\"\n\n msg = get_header()\n if msg != \"\":\n msg += email_to_name(username) + \"\\n\\n\"\n\n f = open_file(filename)\n filename = f.name\n\n f.write(subject)\n\n f.write(msg)\n log.debug(\"Found issue:\")\n for issue in my_issues:\n log.debug(\"%s : %s\" % (issue, issue.fields.summary))\n\n if merge_issue_header():\n f.write(\n \"[%s%s%s]\\n\" % (issue, get_header_separator(), issue.fields.summary)\n )\n else:\n f.write(\"[%s]\\n\" % issue)\n f.write(\"# Header: %s\\n\" % issue.fields.summary)\n\n f.write(\"# Type: %s\\n\" % issue.fields.issuetype)\n f.write(\"# Status: %s\\n\" % issue.fields.status)\n f.write(get_extra_comments())\n if last_comment:\n write_last_jira_comment(f, jira, issue)\n f.write(\"\\n\")\n\n f.close()\n return (filename, my_issues)", "def add_to_sprint(self, sprint_id: str):\n logger.debug(f'Adding Jira issue {self.jira_key} to sprint {sprint_id}')\n self.repo.api_call(requests.post, f'sprint/{sprint_id}/issue', url_head=self.repo.alt_url,\n json={'issues': [self.jira_key]}, success_code=204)", "def test_get_work_logs_multiple_pages(self):\n with open(\"work_logs_multiple_first_page.json\", \"r\") as issues_first_file:\n mock_response_first_page = issues_first_file.read()\n\n with open(\"work_logs_multiple_second_page.json\", \"r\") as issues_second_file:\n mock_response_second_page = issues_second_file.read()\n\n issues = [Issue(10005, \"MYB-5\", \"Summary of issue MYB-5\", \"MYB-3\", \"Summary of the parent issue of MYB-5\", 3600, 900, datetime(2020, 1, 20))]\n\n with requests_mock.Mocker() as m:\n m.register_uri('GET', '/rest/api/2/issue/MYB-5/worklog/', [{'text': mock_response_first_page},\n {'text': mock_response_second_page}])\n work_logs, issues = jiratimereport.get_work_logs(\"https://jira_url\", \"user_name\", \"api_token\",\n \"2020-01-10\", \"2020-01-20\", \"\", issues)\n\n work_logs_expected_result = [WorkLog(\"MYB-5\", datetime(2020, 1, 12), 3600, \"John Doe\"),\n WorkLog(\"MYB-5\", datetime(2020, 1, 18), 3600, \"John Doe\"),\n WorkLog(\"MYB-5\", datetime(2020, 1, 18), 5400, \"John Doe\")]\n\n self.assertListEqual(work_logs_expected_result, work_logs, \"Work Log lists are unequal\")\n\n issue_myb_5 = Issue(10005, \"MYB-5\", \"Summary of issue MYB-5\", \"MYB-3\", \"Summary of the parent issue of MYB-5\",\n 3600, 900, datetime(2020, 1, 20))\n issue_myb_5.issue_start_date = datetime(2020, 1, 12)\n\n issues_expected_result = [issue_myb_5]\n\n self.assertListEqual(issues_expected_result, issues, \"Issue lists are unequal\")", "def test_issue_post_issue_reaction(self):\n pass", "def test_run(self, mock):\n mock.return_value = mock_trello_service()\n\n pull_requests = PullRequest.query.all()\n self.assertTrue(len(pull_requests) is 0)\n\n payload = json_fixture('./tests/fixtures/pull_request_opened.json')\n CreatePullRequestCard.delay(\n board_id=default_board_id,\n list_id=default_list_id,\n name='Fake Pull Request',\n payload=payload\n )\n\n # Enqueuing new pull_request `CreatePullRequestCard` should create a\n # `PullRequest` record\n new_pull_requests = PullRequest.query.all()\n self.assertTrue(len(new_pull_requests) is 1)", "def test_get_work_logs_one_page(self):\n with open(\"work_logs_first_issue_one_page.json\", \"r\") as first_issue_file:\n mock_response_first_issue = first_issue_file.read()\n\n with open(\"work_logs_second_issue_one_page.json\", \"r\") as second_issue_file:\n mock_response_second_issue = second_issue_file.read()\n\n issues = [Issue(10005, \"MYB-5\", \"Summary of issue MYB-5\", \"MYB-3\", \"Summary of the parent issue of MYB-5\", 3600, 900, datetime(2020, 1, 20)),\n Issue(10004, \"MYB-4\", \"Summary of issue MYB-4\", \"MYB-3\", \"Summary of the parent issue of MYB-4\", 7200, 600, None)]\n\n with requests_mock.Mocker() as m:\n m.register_uri('GET', '/rest/api/2/issue/MYB-5/worklog/', text=mock_response_first_issue)\n m.register_uri('GET', '/rest/api/2/issue/MYB-4/worklog/', text=mock_response_second_issue)\n work_logs, issues = jiratimereport.get_work_logs(\"https://jira_url\", \"user_name\", \"api_token\",\n \"2020-01-10\", \"2020-01-20\", \"\", issues)\n\n work_logs_expected_result = [WorkLog(\"MYB-5\", datetime(2020, 1, 18), 3600, \"John Doe\"),\n WorkLog(\"MYB-5\", datetime(2020, 1, 18), 5400, \"John Doe\"),\n WorkLog(\"MYB-4\", datetime(2020, 1, 12), 3600, \"John Doe\")]\n\n self.assertListEqual(work_logs_expected_result, work_logs, \"Work Log lists are unequal\")\n\n issue_myb_5 = Issue(10005, \"MYB-5\", \"Summary of issue MYB-5\", \"MYB-3\", \"Summary of the parent issue of MYB-5\",\n 3600, 900, datetime(2020, 1, 20))\n issue_myb_5.issue_start_date = datetime(2020, 1, 18)\n issue_myb_4 = Issue(10004, \"MYB-4\", \"Summary of issue MYB-4\", \"MYB-3\", \"Summary of the parent issue of MYB-4\",\n 7200, 600, None)\n issue_myb_4.issue_start_date = datetime(2020, 1, 12)\n\n issues_expected_result = [issue_myb_5,\n issue_myb_4]\n\n self.assertListEqual(issues_expected_result, issues, \"Issue lists are unequal\")", "def post_to_github(report, user=None, pw=None, proxies=None):\n proxies = proxies or dict()\n # Determine authentication method. No username or password search for\n # configuration file with GITHUB section\n if not user and not pw:\n # Find configuration file\n cfg = ConfigParser()\n cfgs = cfg.read(['web.cfg', '.web.cfg',\n os.path.expanduser('~/.web.cfg'),\n 'qs.cfg', '.qs.cfg',\n os.path.expanduser('~/.qs.cfg')])\n if cfgs:\n # Grab login information\n try:\n user = cfg.get('GITHUB', 'user')\n pw = cfg.get('GITHUB', 'pw')\n except (NoOptionError, NoSectionError):\n logger.debug('No GITHUB section in configuration file '\n 'with user and pw entries')\n # Grab proxy information if we will be using web.cfg\n if (user or pw) and not proxies:\n try:\n proxy_name = cfg.get('GITHUB', 'proxy')\n logger.debug(\"Using proxy host %s\", proxy_name)\n proxies = {'https': proxy_name}\n except NoOptionError:\n logger.debug(\"No proxy information found\")\n # No valid configurations\n else:\n logger.debug('No \"web.cfg\" file found')\n # Manually ask if we didn't get the username or password already\n if not user:\n user = input('Github Username: ')\n if not pw:\n pw = getpass.getpass('Password for GitHub Account {}: '\n ''.format(user))\n # Our url to create issues via POST\n url = 'https://api.github.com/repos/pcdshub/Bug-Reports/issues'\n # Create the body of the template\n env = Environment(loader=PackageLoader('hutch_python'),\n trim_blocks=True, lstrip_blocks=True)\n template = env.get_template('issue.template')\n body = template.render(report)\n # Requests session\n session = requests.Session()\n session.auth = (user, pw)\n session.proxies.update(proxies)\n issue = {'title': report['title'],\n 'body': body,\n 'assignee': None,\n 'milestone': None,\n 'labels': []} # TODO: Determine hutch to create issue for\n # Post to GitHub\n r = session.post(url, simplejson.dumps(issue))\n if r.status_code == 201:\n logger.info(\"Succesfully created GitHub issue\")\n else:\n logger.exception(\"Could not create GitHub issue. HTTP Status Code: %s\",\n r.status_code)", "def create_jira_issue(self, server_url, username, password, issue_summary, issue_description, project_key, issue_type='Bug'):\n status = True\n output_dict = {}\n wdesc = \"Creates a JIRA issue\"\n pSubStep(wdesc)\n issue_summary = issue_summary.replace('\"', \" \")\n issue_description = issue_description.replace('\"', \"-\")\n fetchuri = server_url\n postdata_url=fetchuri+'/rest/api/2/issue/'\n postdata = \"\"\"\n {\n \"fields\": {\n \"project\":\n {\n \"key\": \\\"\"\"\"+project_key+\"\"\"\\\"\n },\n \"summary\": \\\"\"\"\"+issue_summary+\"\"\"\\\",\n \"description\": \\\"\"\"\"+issue_description+\"\"\"\\\",\n \"issuetype\": {\n \"name\": \\\"\"\"\"+issue_type+\"\"\"\\\"\n }\n }\n }\n \"\"\"\n credential_handler=urllib2.HTTPPasswordMgrWithDefaultRealm()\n credential_handler.add_password(None, postdata_url, username, password)\n auth = urllib2.HTTPBasicAuthHandler(credential_handler)\n userpassword = username + \":\" + password\n password = base64.b64encode(userpassword)\n #Create an Authentication handler\n opener = urllib2.build_opener(auth)\n urllib2.install_opener(opener)\n opener = urllib2.build_opener(urllib2.HTTPHandler(debuglevel=1))\n #Create a POST request\n headers={\"Authorization\" : \"Basic \"+password,\"Content-Type\": \"application/json\"}\n request=urllib2.Request(str(postdata_url),postdata,headers)\n try:\n handler = urllib2.urlopen(request)\n extension = json.loads(handler.read())\n issue_id = str(extension['key'])\n pNote(\"JIRA Issue Created. Issue-Id: {0}\".format(issue_id))\n output_dict[\"issue_id\"] = issue_id\n except Exception as e:\n status = False\n pNote(\"Problem creating JIRA issue.\" , \"error\")\n pNote(\"JIRA Error Code: ({0})\".format(e) , \"error\")\n\n Utils.data_Utils.update_datarepository(output_dict)\n Utils.testcase_Utils.report_substep_status(status)\n return status", "def add_jira_entries(config, date, dry_run, economic):\n if date is not None:\n jira = Jira(config.items('Jira'))\n for task in jira.get_tasks():\n if task:\n economic.add_time_entry(task, dry_run)", "def test_fewer_parameters(self):\n data_github = {\"version_control\": \"github\", \"scm_commit\": \"AA\", \"repo\": \"AA\", \"branch\": \"AA\", \"enabled\": 1}\n data_git = {\"version_control\": \"git\", \"scm_commit\": \"AA\", \"repo\": \"AA\", \"branch\": \"AA\", \"enabled\": 1}\n\n for data in [data_git, data_github]:\n resp = self.client.post(\"/tracking\", json=data, content_type=\"application/json\", headers=self.auth)\n resp_dict = json.loads(resp.data)\n self.assertIn(\"code\", resp_dict, msg=\"Error in data format return\")\n self.assertEqual(\n ResponseCode.INPUT_PARAMETERS_ERROR, resp_dict.get(\"code\"), msg=\"Error in status code return\"\n )\n\n self.assertIn(\"msg\", resp_dict, msg=\"Error in data format return\")\n self.assertEqual(\n ResponseCode.CODE_MSG_MAP.get(ResponseCode.INPUT_PARAMETERS_ERROR),\n resp_dict.get(\"msg\"),\n msg=\"Error in status code return\"\n )\n\n self.assertIn(\"data\", resp_dict, msg=\"Error in data format return\")\n self.assertEqual(resp_dict.get(\"data\"), None, msg=\"Error in data information return\")" ]
[ "0.60630643", "0.59804326", "0.5954895", "0.588658", "0.5663922", "0.5642216", "0.54534817", "0.54471946", "0.54439807", "0.54370046", "0.5394964", "0.5368015", "0.5364517", "0.53410673", "0.5320143", "0.5316345", "0.5315661", "0.53115475", "0.53106874", "0.5309121", "0.52791184", "0.52654403", "0.5220235", "0.5177885", "0.5165309", "0.51592", "0.51370186", "0.51299536", "0.51187134", "0.5116257" ]
0.7445449
0
Retrieve metadata describing an arrayset artifact.
def get_model_arrayset_metadata(database, model, aid, arrays=None, statistics=None, unique=None): if isinstance(arrays, str): arrays = slycat.hyperchunks.parse(arrays) if isinstance(statistics, str): statistics = slycat.hyperchunks.parse(statistics) if isinstance(unique, str): unique = slycat.hyperchunks.parse(unique) # Handle legacy behavior. if arrays is None and statistics is None and unique is None: with slycat.web.server.hdf5.lock: with slycat.web.server.hdf5.open(model["artifact:%s" % aid], "r+") as file: hdf5_arrayset = slycat.hdf5.ArraySet(file) results = [] for array in sorted(hdf5_arrayset.keys()): hdf5_array = hdf5_arrayset[array] results.append({ "array": int(array), "index": int(array), "dimensions": hdf5_array.dimensions, "attributes": hdf5_array.attributes, "shape": tuple([dimension["end"] - dimension["begin"] for dimension in hdf5_array.dimensions]), }) return results with slycat.web.server.hdf5.lock: with slycat.web.server.hdf5.open(model["artifact:%s" % aid], "r+") as file: # We have to open the file with writing enabled in case the statistics cache needs to be updated. hdf5_arrayset = slycat.hdf5.ArraySet(file) results = {} if arrays is not None: results["arrays"] = [] for array in slycat.hyperchunks.arrays(arrays, hdf5_arrayset.array_count()): hdf5_array = hdf5_arrayset[array.index] results["arrays"].append({ "index": array.index, "dimensions": hdf5_array.dimensions, "attributes": hdf5_array.attributes, "shape": tuple([dimension["end"] - dimension["begin"] for dimension in hdf5_array.dimensions]), }) if statistics is not None: results["statistics"] = [] for array in slycat.hyperchunks.arrays(statistics, hdf5_arrayset.array_count()): hdf5_array = hdf5_arrayset[array.index] for attribute in array.attributes(len(hdf5_array.attributes)): statistics = {} statistics["array"] = array.index if isinstance(attribute.expression, slycat.hyperchunks.grammar.AttributeIndex): statistics["attribute"] = attribute.expression.index statistics.update(hdf5_array.get_statistics(attribute.expression.index)) else: values = evaluate(hdf5_array, attribute.expression, "statistics") statistics["min"] = values.min() statistics["max"] = values.max() statistics["unique"] = len(numpy.unique(values)) results["statistics"].append(statistics) if unique is not None: results["unique"] = [] for array in slycat.hyperchunks.arrays(unique, hdf5_arrayset.array_count()): hdf5_array = hdf5_arrayset[array.index] for attribute in array.attributes(len(hdf5_array.attributes)): unique = {} unique["array"] = array.index unique["values"] = [] if isinstance(attribute.expression, slycat.hyperchunks.grammar.AttributeIndex): for hyperslice in attribute.hyperslices(): unique["attribute"] = attribute.expression.index unique["values"].append( hdf5_array.get_unique(attribute.expression.index, hyperslice)["values"]) else: values = evaluate(hdf5_array, attribute.expression, "uniques") for hyperslice in attribute.hyperslices(): unique["values"].append(numpy.unique(values)[hyperslice]) if isinstance(unique["values"][0], list): unique["values"] = [a.tolist() for a in unique["values"]] results["unique"].append(unique) return results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def metadata(self) -> 'outputs.DataCollectionEndpointResponseMetadata':\n return pulumi.get(self, \"metadata\")", "def get_assets_metadata(self):\n return Metadata(**settings.METADATA['asset_ids'])", "def GetMetadata(self):\n return self.dict['meta']", "def _getAllMeta(self):\n try:\n metadata = pyexiv2.ImageMetadata(self.imagePath)\n metadata.read()\n return metadata\n except:\n print 'error reading meta data'\n return None", "def get_metadata(self):\n return self._metadata", "def metadata(self):\n return self.meta.metadata", "def metadata(self) -> global___SummaryMetadata:", "def metadata(self) -> pulumi.Output[Optional['outputs.SecurityAssessmentMetadataPropertiesResponse']]:\n return pulumi.get(self, \"metadata\")", "def metadata(self) -> pulumi.Output[Optional['outputs.SecurityAssessmentMetadataPropertiesResponse']]:\n return pulumi.get(self, \"metadata\")", "def metadata(self):\r\n return self._metadata", "def get_metadata(self):\n return self.client._perform_json(\n \"GET\", \"/projects/%s/recipes/%s/metadata\" % (self.project_key, self.recipe_name))", "def get_metadata(self):\n return self.manager.get_metadata(self)", "def metadata(self):\n return self._metadata", "def metadata(self):\n return self._metadata", "def metadata(self):\n return self._metadata", "def metadata(self):\n return self._metadata", "def metadata(self):\n return self._metadata", "def get_meta(_id):\n dataset = ESDataset.get(id=_id, ignore=404, _source=\"_meta\")\n\n if dataset:\n return RegistryDocument.wraps(dataset).meta\n\n raise NoEntityError(f\"dataset {_id} does not exist.\")", "def meta(self):\n return self.spec.meta", "def _get_dsmeta(self, bids):\n # STEP 1: Extract metadata from `dataset_description.json`\n metadata = self._get_bids_dsdescription(bids)\n # STEP 2: Extract README text\n metadata[\"description\"] = self._get_bids_readme()\n # STEP 3: Extract information about entities and add to metadata\n metadata[\"entities\"] = self._get_bids_entities(bids)\n # STEP 4: Extract variable collection information on multiple levels\n metadata[\"variables\"] = self._get_bids_variables(bids)\n # STEP 5: Add context to metadata output\n metadata[\"@context\"] = BIDSCONTEXT\n return metadata", "def get(self):\n return self._metadata", "def get_metadata_v3(session):\n LOG.debug(\"Exporting metadata for SFS augur build\")\n\n metadata = datastore.fetch_rows_from_table(session, (\"shipping\", \"metadata_for_augur_build_v3\"))\n\n return Response((row[0] + '\\n' for row in metadata), mimetype=\"application/x-ndjson\")", "def metadata(self): # -> list[Unknown]:\n ...", "def metadata(self): # -> list[Unknown]:\n ...", "def metadata(self) -> Optional[pulumi.Input['SecurityAssessmentMetadataPropertiesArgs']]:\n return pulumi.get(self, \"metadata\")", "def metadata(self):\n return UnpackedSDist(self.find_egg_info_file())", "def get_assessments_metadata(self):\n return Metadata(**settings.METADATA['assessment_ids'])", "def metadata(self):\r\n return resources.Metadata(self)", "def metadata(self): # -> None:\n ...", "def metadata(self):\n return parse_metadata(self.metadata_path())" ]
[ "0.5965084", "0.5936909", "0.57802886", "0.57577825", "0.5755986", "0.5726956", "0.5722624", "0.57162297", "0.57162297", "0.5708068", "0.5654824", "0.5649437", "0.56223595", "0.56223595", "0.56223595", "0.56223595", "0.56223595", "0.56012475", "0.5600938", "0.5594341", "0.5587657", "0.55604976", "0.5553949", "0.5553949", "0.5545569", "0.55400795", "0.54960537", "0.5474925", "0.5474241", "0.54704046" ]
0.64080864
0
Start a new model array set artifact.
def put_model_arrayset(database, model, aid, input=False): model = database.get('model',model["_id"]) slycat.web.server.update_model(database, model, message="Starting array set %s." % (aid)) storage = uuid.uuid4().hex with slycat.web.server.hdf5.lock: with slycat.web.server.hdf5.create(storage) as file: arrayset = slycat.hdf5.start_arrayset(file) with get_model_lock(model["_id"]): database.save({"_id": storage, "type": "hdf5"}) model = database.get('model',model["_id"]) model["artifact:%s" % aid] = storage model["artifact-types"][aid] = "hdf5" if input: model["input-artifacts"] = list(set(model["input-artifacts"] + [aid])) database.save(model)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def put_model_array(database, model, aid, array_index, attributes, dimensions):\n slycat.web.server.update_model(database, model, message=\"Starting array set %s array %s.\" % (aid, array_index))\n model = database.get('model', model['_id'])\n storage = model[\"artifact:%s\" % aid]\n with slycat.web.server.hdf5.lock:\n with slycat.web.server.hdf5.open(storage, \"r+\") as file:\n slycat.hdf5.ArraySet(file).start_array(array_index, dimensions, attributes)", "def __init__(self, start_bag=None):\n self.da = DynamicArray()\n\n # populate bag with initial values (if provided)\n # before using this feature, implement add() method\n if start_bag is not None:\n for value in start_bag:\n self.add(value)", "def __init__(self, start_bag=None):\n self.da = DynamicArray()\n\n # populate bag with initial values (if provided)\n # before using this feature, implement add() method\n if start_bag is not None:\n for value in start_bag:\n self.add(value)", "async def begin_array(self):", "def do_start(self, arg):\n args = arg.split(\" \")\n self.model.initialise(args[0])\n self.model.run()", "def instantiate_batch(self, inputs):\n return inputs", "def start(self):\n\t\n\t\tmu_1 = np.array([-.5, .5, .5, -.5])\n\t\tmu_2 = np.array([-.5, -.5, .5, .5])\n\t\tself.mu_values = np.array([mu_1, mu_2])\n\t\t\n\t\tdim_set = self.mu_values.shape[1]\n\n\t\t# TODO: insert an assert if the number of dim_set is different from the number of files for the extraction of the output\n\t\tfor i in range(1,dim_set):\n\t\t\taux_snapshot = self.file_handler.parse(self.namefile_prefix + str(i) + self.file_format, self.output_name)\n\t\t\tsnapshot = aux_snapshot.reshape(aux_snapshot.shape[0],1)\n\t\t\tself.snapshots = np.append(self.snapshots, snapshot, 1)\n\t\t\n\t\ttry:\t\t\n\t\t\tweights = self.file_handler.parse(self.namefile_prefix + '0' + self.file_format, self.weights_name)\n\t\t\t\n\t\t\tif weights.shape[0] != snapshots.shape[0]: #vectorial field: to be improved for n-dimensional fields\n\t\t\t\tweights = np.append(weights, np.append(weights,weights,0), 0)\n\t\texcept:\n\t\t\tweights = 0.*snapshot + 1.\n\t\t\t\n\t\tself.weights = weights[:,0]\n\t\t\n\t\tself.print_info()", "def __init__(self, size_x, size_y, actions=50):\n self.size_x = size_x\n self.size_y = size_y\n self.actions = actions\n self.objects = []\n # a = self.reset()\n # print(a.shape)\n # print(a)\n # self.show_env(a)", "def __init__(self, initialState, runningModels, terminalModel):\n self.T = len(runningModels)\n self.initialState = initialState\n self.runningModels = runningModels\n self.runningDatas = [m.createData() for m in runningModels]\n self.terminalModel = terminalModel\n self.terminalData = terminalModel.createData()", "def __init__(self):\n self.arr = []", "def __init__(self):\n self.arr = []", "def __init__(self):\n self.arr = []", "def __init__(self):\n self.arr = []", "def init_batch(self):\n pass", "def __init__(self):\n self.train(positivity_files, 0)\n self.train(subjectivity_files, 1)", "def action(self):\n\n batch_name = super().find_batch(self.__arguments, '')\n butch = self.__batch_data.get_batch(batch_name)\n cmd = CMDfromArray(butch)\n cmd.run()", "def __init__(self, num_locations):\n self.dataset = {}\n self.num_locations = num_locations\n self.add_locations()", "def __init__(self):\n self.EntireSet = []", "def __init__(self, input, output, options, local=False):\n super().__init__(\n \"create_training_set\",\n None,\n input,\n output,\n local,\n \"multi_training_set.snakefile\",\n )\n self.options = options", "def __init__(self, paths, batch_size, endless=False, transform=lambda x:x):\n\n # Member variable(s)\n self.paths = paths\n self.transform = transform\n\n self.files = [h5py.File(path, 'r') for path in self.paths]\n self.datasets = [hf['egamma'] for hf in self.files]\n\n # Base class constructors\n Sequence.__init__(self)\n SplitArray.__init__(self, self.datasets, batch_size=batch_size, endless=endless)\n\n print \"Generator: Created with {} datasets\".format(len(self.datasets))\n return", "def _buildArray(self):\n if len(self.synth_spec.components) == 0:\n self.array = np.zeros(len(self.synth_spec.x))\n else:\n y = []\n for comp in self.synth_spec.components:\n y += [[comp.function(x) for x in self.synth_spec.x]]\n self.array = np.array(y)", "def train(self, absList, modelFilename):\n pass", "def __init__(self, number_of_cheeses, number_of_stools):\n self.model = TOAHModel(number_of_stools)\n self.model.fill_first_stool(number_of_cheeses)", "def __init__(self,goalx,goaly,color,id):\n\n self._point_pub = rospy.Publisher('/visualization_marker_array', MarkerArray,queue_size=1)\n self.goalx = goalx\n self.goaly = goaly\n self.color = color\n self.id = id\n \n markerArray = MarkerArray()\n marker = Marker()\n if(color[1]==1.0):\n rate = rospy.Rate(0.2)\n rate.sleep()\n for i in range(len(goalx)):\n marker.header.frame_id = \"/odom\"\n marker.type = marker.SPHERE\n marker.action = marker.ADD\n marker.scale.x = 0.2\n marker.scale.y = 0.2\n marker.scale.z = 0.2\n marker.color.a = 1.0\n marker.color.r = color[0]\n marker.color.g = color[1]\n marker.color.b = color[2]\n marker.pose.orientation.w = 1.0\n marker.pose.position.x = goalx[i]\n marker.pose.position.y = goaly[i]\n marker.pose.position.z = 0\n \n markerArray.markers.append(marker)\n \n # Renumber the marker IDs\n #id = 0\n for m in markerArray.markers:\n m.id = self.id[i]\n \n # Publish the MarkerArray\n self._point_pub.publish(markerArray)\n rospy.sleep(0.01)", "def train(self, absList, modelFilename):\n pass", "def new_task(self):\n self.true_trajectory = self.simulate()\n self.x0 = self.true_trajectory[0]\n self.xT = self.true_trajectory[-1]\n return self.reset()", "def setup(self):\n in_dataset, out_dataset = self.get_datasets()\n \n out_dataset[0].create_dataset(in_dataset[0])\n\n in_pData, out_pData = self.get_plugin_datasets()\n\n in_pData[0].plugin_data_setup( '',)\n out_pData[0].plugin_data_setup( 'PROJECTION','multiple')", "def _train(self):\n return np.zeros(1, 10)", "def __init__(self, shape):\n self.A = np.zeros(shape) # create space for the resultant activations", "def _from(\n self,\n action: Union[Dict[str, Any], np.ndarray],\n env_id: Optional[np.ndarray] = None,\n ) -> List[np.ndarray]:" ]
[ "0.61201274", "0.55832493", "0.55832493", "0.55684704", "0.5436291", "0.5358201", "0.5327634", "0.52918756", "0.52394766", "0.5192222", "0.5192222", "0.5192222", "0.5192222", "0.51882184", "0.5163489", "0.5162972", "0.5125661", "0.51193255", "0.50964105", "0.50704527", "0.5057363", "0.5053924", "0.5053321", "0.50507146", "0.503763", "0.5031542", "0.50260407", "0.501268", "0.5012582", "0.50043386" ]
0.5996218
1
Write data to an arrayset artifact.
def put_model_arrayset_data(database, model, aid, hyperchunks, data): # cherrypy.log.error("put_model_arrayset_data called with: {}".format(aid)) if isinstance(hyperchunks, str): hyperchunks = slycat.hyperchunks.parse(hyperchunks) data = iter(data) slycat.web.server.update_model(database, model, message="Storing data to array set %s." % (aid)) model = database.get('model', model['_id']) with slycat.web.server.hdf5.lock: with slycat.web.server.hdf5.open(model["artifact:%s" % aid], "r+") as file: hdf5_arrayset = slycat.hdf5.ArraySet(file) for array in slycat.hyperchunks.arrays(hyperchunks, hdf5_arrayset.array_count()): hdf5_array = hdf5_arrayset[array.index] for attribute in array.attributes(len(hdf5_array.attributes)): if not isinstance(attribute.expression, slycat.hyperchunks.grammar.AttributeIndex): cherrypy.log.error("slycat.web.server.__init__.py put_model_arrayset_data", "Cannot write to computed attribute.") raise ValueError("Cannot write to computed attribute.") stored_type = slycat.hdf5.dtype(hdf5_array.attributes[attribute.expression.index]["type"]) for hyperslice in attribute.hyperslices(): data_hyperslice = next(data) if isinstance(data_hyperslice, list): data_hyperslice = numpy.array(data_hyperslice, dtype=stored_type) hdf5_array.set_data(attribute.expression.index, hyperslice, data_hyperslice) file.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_write_element(setup_teardown_file):\n f = setup_teardown_file[3]\n\n dt = np.dtype('(3,)f8')\n dset = f.create_dataset('x', (10,), dtype=dt)\n\n data = np.array([1, 2, 3.0])\n dset[4] = data\n\n out = dset[4]\n assert np.all(out == data)", "def test_write_element(self):\n dt = np.dtype('(3,)f8')\n dset = self.f.create_dataset('x', (10,), dtype=dt)\n\n data = np.array([1,2,3.0])\n dset[4] = data\n\n out = dset[4]\n self.assertTrue(np.all(out == data))", "def put_model_arrayset(database, model, aid, input=False):\n model = database.get('model',model[\"_id\"])\n slycat.web.server.update_model(database, model, message=\"Starting array set %s.\" % (aid))\n storage = uuid.uuid4().hex\n with slycat.web.server.hdf5.lock:\n with slycat.web.server.hdf5.create(storage) as file:\n arrayset = slycat.hdf5.start_arrayset(file)\n with get_model_lock(model[\"_id\"]):\n database.save({\"_id\": storage, \"type\": \"hdf5\"})\n model = database.get('model',model[\"_id\"])\n model[\"artifact:%s\" % aid] = storage\n model[\"artifact-types\"][aid] = \"hdf5\"\n if input:\n model[\"input-artifacts\"] = list(set(model[\"input-artifacts\"] + [aid]))\n database.save(model)", "def write(self, data_set, io_manager=None, location=None,\n force_write=False, flush=True):\n if not hasattr(data_set, '_h5_base_group') or force_write:\n data_set._h5_base_group = self._create_data_object(\n data_set, io_manager, location)\n\n data_name = 'Data Arrays'\n\n if data_name not in data_set._h5_base_group.keys():\n arr_group = data_set._h5_base_group.create_group(data_name)\n else:\n arr_group = data_set._h5_base_group[data_name]\n\n for array_id in data_set.arrays.keys():\n if array_id not in arr_group.keys() or force_write:\n self._create_dataarray_dset(array=data_set.arrays[array_id],\n group=arr_group)\n dset = arr_group[array_id]\n # Resize the dataset and add the new values\n\n # dataset refers to the hdf5 dataset here\n datasetshape = dset.shape\n old_dlen = datasetshape[0]\n x = data_set.arrays[array_id]\n new_dlen = len(x[~np.isnan(x)])\n new_datasetshape = (new_dlen,\n datasetshape[1])\n dset.resize(new_datasetshape)\n new_data_shape = (new_dlen-old_dlen, datasetshape[1])\n dset[old_dlen:new_dlen] = x[old_dlen:new_dlen].reshape(\n new_data_shape)\n # allow resizing extracted data, here so it gets written for\n # incremental writes aswell\n dset.attrs['shape'] = x.shape\n self.write_metadata(data_set)\n\n # flush ensures buffers are written to disk\n # (useful for ensuring openable by other files)\n if flush:\n data_set._h5_base_group.file.flush()", "def read(self, data_set, location=None):\n self._open_file(data_set, location)\n\n for i, array_id in enumerate(\n data_set._h5_base_group['Data Arrays'].keys()):\n # Decoding string is needed because of h5py/issues/379\n name = array_id # will be overwritten if not in file\n dat_arr = data_set._h5_base_group['Data Arrays'][array_id]\n\n # write ensures these attributes always exist\n name = dat_arr.attrs['name'].decode()\n label = dat_arr.attrs['label'].decode()\n units = dat_arr.attrs['units'].decode()\n is_setpoint = str_to_bool(dat_arr.attrs['is_setpoint'].decode())\n # if not is_setpoint:\n set_arrays = dat_arr.attrs['set_arrays']\n set_arrays = [s.decode() for s in set_arrays]\n # else:\n # set_arrays = ()\n vals = dat_arr.value[:, 0]\n if 'shape' in dat_arr.attrs.keys():\n vals = vals.reshape(dat_arr.attrs['shape'])\n if array_id not in data_set.arrays.keys(): # create new array\n d_array = DataArray(\n name=name, array_id=array_id, label=label, parameter=None,\n units=units,\n is_setpoint=is_setpoint, set_arrays=(),\n preset_data=vals)\n data_set.add_array(d_array)\n else: # update existing array with extracted values\n d_array = data_set.arrays[array_id]\n d_array.name = name\n d_array.label = label\n d_array.units = units\n d_array.is_setpoint = is_setpoint\n d_array.ndarray = vals\n d_array.shape = dat_arr.attrs['shape']\n # needed because I cannot add set_arrays at this point\n data_set.arrays[array_id]._sa_array_ids = set_arrays\n\n # Add copy/ref of setarrays (not array id only)\n # Note, this is not pretty but a result of how the dataset works\n for array_id, d_array in data_set.arrays.items():\n for sa_id in d_array._sa_array_ids:\n d_array.set_arrays += (data_set.arrays[sa_id], )\n data_set = self.read_metadata(data_set)\n return data_set", "def export_set(dataset):\n return to_xml(dataset.dict)", "def __write_array(self, group: h5py.Group, name: str, data: np.array):\n # make sure chunk size is not bigger than actual matrix size\n if hasattr(data, \"shape\") and len(data.shape) > 1:\n chunks = (min(64, data.shape[0]), min(64, data.shape[1]))\n else:\n chunks = True\n group.create_dataset(\n name, data=data, chunks=chunks, compression=\"gzip\", shuffle=False, compression_opts=2\n )", "def write_data():", "def test_write(self):\n data2 = self.data.copy()\n data2['a'] *= 2\n self.dset['a'] = data2\n self.assertTrue(np.all(self.dset[...] == data2))\n data2['b'] *= 4\n self.dset['b'] = data2\n self.assertTrue(np.all(self.dset[...] == data2))\n data2['a'] *= 3\n data2['c'] *= 3\n self.dset['a','c'] = data2\n self.assertTrue(np.all(self.dset[...] == data2))", "def write(self, data, meta):\n raise NotImplementedError", "def write_dataset( self, species_grp, species, path, quantity,\n n_rank, select_array ) :\n # Get the dataset and setup its attributes\n if species_grp is not None:\n dset = species_grp[path]\n\n # Fill the dataset with the quantity\n # (Single-proc operation, when using gathering)\n if not self.lparallel_output:\n quantity_array = self.get_dataset( species,\n quantity, select_array, gather=True )\n if self.rank == 0:\n dset[:] = quantity_array\n # Fill the dataset with these quantities with respect\n # to the global position of the local domain\n # (truly parallel HDF5 output)\n else :\n quantity_array = self.get_dataset( species,\n quantity, select_array, gather=False )\n # Calculate last index occupied by previous rank\n nold = sum(n_rank[0:self.rank])\n # Calculate the last index occupied by the current rank\n nnew = nold+n_rank[self.rank]\n # Write the local data to the global array\n dset[nold:nnew] = quantity_array", "def test_write(self):\n dset = self.f.create_dataset('x2', (10, 2))\n\n x = np.zeros((10, 1))\n dset[:, 0] = x[:, 0]\n with self.assertRaises(TypeError):\n dset[:, 1] = x", "def opendset(self):\n # note: compression implies chunked storage\n msg = 'writing to file/path: %s:%s' % (self.outfile, self.dgrppath)\n logging.info(msg)\n\n # grab file object\n f = h5py.File(self.outfile, \"a\")\n try:\n shp = (self.ntowrite,) + self.shape\n chunks = (1, self.shape[0], self.shape[1])\n ds = f.create_dataset(self.dsetpath, shp, dtype=self.dtype,\n **self.h5opts\n )\n except Exception as e:\n errmsg = '%s: %s\\n... exception: ' % \\\n (ERR_OVERWRITE, DSetPath(self.outfile, self.dsetpath))\n raise MakeImageSeriesError(errmsg + str(e))\n\n return f, ds", "def save(data, file, compression=0):\n f = file if isinstance(file, bob.io.base.HDF5File) else bob.io.base.HDF5File(file, 'w')\n if hasattr(data, 'save'):\n data.save(f)\n else:\n f.set(\"array\", data, compression=compression)", "def test_write_slices(self):\n dt = np.dtype('(3,)i')\n\n data1 = np.ones((2,), dtype=dt)\n data2 = np.ones((4,5), dtype=dt)\n\n dset = self.f.create_dataset('x', (10,9,11), dtype=dt)\n\n dset[0,0,2:4] = data1\n self.assertArrayEqual(dset[0,0,2:4], data1)\n\n dset[3, 1:5, 6:11] = data2\n self.assertArrayEqual(dset[3, 1:5, 6:11], data2)", "def test_write_slices(setup_teardown_file):\n f = setup_teardown_file[3]\n\n dt = np.dtype('(3,)i')\n\n data1 = np.ones((2, ), dtype=dt)\n data2 = np.ones((4, 5), dtype=dt)\n\n dset = f.create_dataset('x', (10, 9, 11), dtype=dt)\n\n dset[0, 0, 2:4] = data1\n assert np.array_equal(dset[0, 0, 2:4], data1)\n\n dset[3, 1:5, 6:11] = data2\n assert np.array_equal(dset[3, 1:5, 6:11], data2)", "def write_sets(self):\n\t\tself.write_components['sets'] = (self.shock_gm.write_sets()+\n\t\t\t\t\t\t\t\t\t\tself.shock_gm.write_aliased_sets()+\n\t\t\t\t\t\t\t\t\t\tself.shock_gm.write_sets_other()+\n\t\t\t\t\t\t\t\t\t\tself.shock_gm.write_aliased_sets_other()+\n\t\t\t\t\t\t\t\t\t\tself.shock_gm.write_sets_load(self.shock_gm.database.name))\n\t\treturn self.write_components['sets']", "def write(data):", "def __setitem__(self, key, value):\n if hasattr(value, \"to_hdf\") & (\n not isinstance(value, (pandas.DataFrame, pandas.Series))\n ):\n value.to_hdf(self, key)\n return\n\n use_json = True\n if (\n isinstance(value, (list, np.ndarray))\n and len(value) > 0\n and isinstance(value[0], (list, np.ndarray))\n and len(value[0]) > 0\n and not isinstance(value[0][0], str)\n and _is_ragged_in_1st_dim_only(value)\n ):\n # if the sub-arrays in value all share shape[1:], h5io comes up with a more efficient storage format than\n # just writing a dataset for each element, by concatenating along the first axis and storing the indices\n # where to break the concatenated array again\n value = np.array([np.asarray(v) for v in value], dtype=object)\n use_json = False\n elif isinstance(value, tuple):\n value = list(value)\n write_hdf5(\n self.file_name,\n value,\n title=self._get_h5_path(key),\n overwrite=\"update\",\n use_json=use_json,\n )", "def export_dataset(self):\n raise NotImplementedError", "def Writedata(self, tstep):\n \n nc = Dataset(self.outfile, 'a')\n \n nc.variables['time'][tstep] = self.time\n nc.variables['salt'][tstep] = self.salt\n nc.variables['temp'][tstep] = self.temp\n nc.variables['uc'][tstep] = self.uc\n nc.variables['vc'][tstep] = self.vc\n nc.variables['nu_v'][tstep] = self.nu_v\n nc.variables['rho'][tstep] = self.rho\n nc.variables['tau_x'][tstep] = self.tau_x\n nc.variables['tau_y'][tstep] = self.tau_y\n nc.variables['eta'][tstep] = self.eta\n \n nc.close()", "def _save(self, data: np.ndarray) -> None:\n ...", "def write(self, data, t, c, z):\n shape = np.shape(data)\n\n if self.current_pos_group.__len__() == 0:\n raise ValueError('Array not initialized')\n\n if not isinstance(t, int) and not isinstance(t, slice):\n raise TypeError('t specification must be either int or slice')\n\n if not isinstance(c, int) and not isinstance(c, slice):\n raise TypeError('c specification must be either int or slice')\n\n if not isinstance(z, int) and not isinstance(z, slice):\n raise TypeError('z specification must be either int or slice')\n\n if isinstance(t, int) and isinstance(c, int) and isinstance(z, int):\n\n if len(shape) > 2:\n raise ValueError('Index dimensions exceed data dimensions')\n else:\n self.current_pos_group[ARRAY_NAME][t, c, z] = data\n\n else:\n self.current_pos_group[ARRAY_NAME][t, c, z] = data", "def write_data(data, filename):\n f = h5py.File(filename, 'w', libver='latest')\n dset = f.create_dataset('array', shape=(data.shape), data = data, compression='gzip', compression_opts=9)\n f.close()", "def write(self, data: np.ndarray) -> None:\n assert data.ndim == 1\n all_data = np.hstack((self.res, data))\n nrows = len(all_data) // self.width\n if nrows > 0:\n d = all_data[0: nrows * self.width].reshape(nrows, self.width)\n w = Window(0, self.rows_written, d.shape[1], d.shape[0])\n self.f.write(d, 1, window=w)\n self.rows_written += nrows\n self.res = all_data[nrows * self.width:]\n else:\n self.res = all_data", "def test_write_broadcast(setup_teardown_file):\n f = setup_teardown_file[3]\n\n dt = np.dtype('(3,)i')\n\n dset = f.create_dataset('x', (10,), dtype=dt)\n dset[...] = 42", "def dump_data(self):\n attr_names = [field for field in self.unique_together if field != 'parent']\n save_ndarrays_to_hdf5(\n self.data_path,\n [getattr(self, data_field) for data_field in self.data_fields],\n [self._get_dataset_path(field) for field in self.data_fields],\n attr_names,\n [getattr(self, attr_name) for attr_name in attr_names],\n )", "def save(self, output, data):", "def f_set(self, data):\n raise NotImplementedError(\"Should have implemented this.\")", "def write(self, data: List[str]):\n\n # explore:\n # write_api = client.write_api(write_options=ASYNCHRONOUS)\n #\n # _point1 = Point(\"my_measurement\").tag(\"location\", \"Prague\").field(\"temperature\",\n # 25.3)\n # _point2 = Point(\"my_measurement\").tag(\"location\", \"New York\").field(\n # \"temperature\", 24.3)\n #\n # async_result = write_api.write(bucket=\"my-bucket\", record=[_point1, _point2])\n # async_result.get()\n #\n # client.close()\n # or\n # with _client.write_api(write_options=WriteOptions(batch_size=500,\n # flush_interval=10_000,\n # jitter_interval=2_000,\n # retry_interval=5_000,\n # max_retries=5,\n # max_retry_delay=30_000,\n # exponential_base=2))\n # as _write_client:\n # see https://github.com/influxdata/influxdb-client-python\n\n # write_api = self.connection.write_api(write_options=SYNCHRONOUS)\n self.write_api.write(self.config.bucket, self.config.org, data)\n # async_result.get()" ]
[ "0.65863824", "0.6536039", "0.62674475", "0.6248463", "0.6099719", "0.6086565", "0.5947786", "0.5932743", "0.5904107", "0.5831756", "0.58212835", "0.5807893", "0.57833177", "0.57301813", "0.566023", "0.56549037", "0.5618263", "0.5617314", "0.55858254", "0.5577587", "0.55711466", "0.55540234", "0.5539731", "0.55380744", "0.5528272", "0.5526104", "0.5520946", "0.552058", "0.5499277", "0.54986286" ]
0.6964377
0
Delete a model parameter in the couch database
def delete_model_parameter(database, model, aid): with get_model_lock(model["_id"]): del model["artifact:%s" % aid] del model["artifact-types"][aid] database.save(model)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_field(model, *arg):\n return model._pw_index_.delete_field(*arg)", "def delete_parameter(request, parameter, **_kwargs):\n pass", "def obj_delete(self, request=None, **kwargs):\n self.get_collection(request).remove({ \"_id\": ObjectId(kwargs.get(\"pk\")) })", "async def rm_object(model, column, conn: Database, data):\n query = delete(model).where(column == data)\n return await conn.execute(query)", "def delete_model(self, request, obj):\n obj.post.comNumDrease()\n obj.delete()", "def delete(self, obj):", "def delete(self, _id):", "def delete():", "def delete(self):\n ...", "def model_delete(self, db):\n db.session.delete(self)\n db.session.commit()", "def delete(self, **kwargs):\n self.dbdel('note', kwargs)", "def delete_param(command):\n namespace = app.main(command)\n assert namespace.command == 'dp' or namespace.command == \"deleteparam\"\n assert namespace.name == \"test\"", "async def delete(self, connection: SQLConnectionInterface, model: Model,\n _global: Model = None):\n await self.validate(connection, model, ValidationTypes.DELETE)\n await connection.execute(await self._delete_stmt(),\n model[self.identifier_key].value)", "def delete(self, keyword, key):", "def delete(**args):\n\tglobal _objstore\n\t_objstore = _objstore or ObjStore()\n\n\t_objstore.delete(args['type'], args['name'])\n\treturn {'message':'ok'}", "def delete_model(ModelName=None):\n pass", "def delete(self, using=None):\n self.model.remove_field(self)", "def delete(self,key):\n\n pass", "def delete(self, *args, **kwargs):\n pass", "def delete(self, *args, **kwargs):\n pass", "def delete_index(self, request):\n return request.param", "def delete(self, request , pk=None): \n return Response({'message':'DELETE'})", "def delete(self, *args, **kwargs):\n return 0", "def delete(self, value):\n pass", "def delete(self, data):\r\n pass", "def delete_parametertype(request, parametertype, **_kwargs):\n pass", "def delete_model(self, request, instance):\n pass", "def delete(self, model):\n self._isinstance(model)\n db.session.delete(model)\n db.session.commit()", "def delete(self, **kwargs):\n self.dbdel('client', kwargs)", "def delete(self, key):" ]
[ "0.74125355", "0.7057663", "0.6858939", "0.6829538", "0.6783668", "0.6773147", "0.6772022", "0.6767092", "0.6750633", "0.6732334", "0.67046046", "0.6679741", "0.6674734", "0.6657565", "0.663638", "0.66068643", "0.6587644", "0.6583732", "0.6554846", "0.6554846", "0.6550237", "0.6524334", "0.652419", "0.6520499", "0.64945513", "0.6493503", "0.64725137", "0.64624226", "0.642967", "0.64002675" ]
0.7517227
0
Create a cached remote session for the given host.
def create_session(hostname, username, password): return slycat.web.server.remote.create_session(hostname, username, password, None)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_session(self,session_id,host_id,host_name,spotify_token):\n self.sessions[session_id] = {\n \"HOST\" : {\n \"ID\" : host_id,\n \"NAME\" : host_name,\n \"spotify_token\" : spotify_token,\n \"spotify_player\": None,\n },\n \"queue\" : [],\n \"queue_lock\" : False,\n \"current_track\" : \"\",\n \"previous_track\" : \"\",\n \"USERS\" : {}\n }", "def _get_ssh_connection(cls, host, user=None):\n if not user:\n user = cls.user\n\n ssh_opts = ()\n ssh_opts += ('-oPasswordAuthentication=no',\n '-oStrictHostKeyChecking=no',\n '-oPort=22',\n '-oConnectTimeout=10')\n\n keyfile = None\n if 'ssh_keyfile' in cls.config:\n keyfile = cls.config['ssh_keyfile']\n\n ssh_opts += ('-o', 'IdentityFile=%s' % keyfile)\n\n if cls.use_controlpersist:\n ssh_opts += ('-oControlMaster=auto',\n '-oControlPersist=4h',\n '-oControlPath=~/.ssh/glusto-ssh-%r@%h:%p')\n\n scp_opts = ssh_opts\n\n ssh_opts += ('-T',)\n\n conn_name = \"%s@%s\" % (user, host)\n # if no existing connection, create one\n if conn_name not in cls._ssh_connections:\n cls.log.debug(\"Creating connection: %s\" % conn_name)\n try:\n ssh = SshMachine(host, user,\n ssh_opts=ssh_opts, scp_opts=scp_opts)\n except:\n cls.log.error(\"Exception trying to establish SshMachine\")\n return None\n cls._ssh_connections[conn_name] = ssh\n else:\n cls.log.debug(\"Retrieved connection from cache: %s\" % conn_name)\n ssh = cls._ssh_connections[conn_name]\n\n if ssh:\n return ssh\n\n print(\"oops. did not get ssh for %s\", conn_name)\n return None", "def _create_redash_session():\n session = requests.Session()\n session.headers.update({'Authorization': 'Key {}'.format(API_KEY)})\n return session", "def create_remote_access_session(projectArn=None, deviceArn=None, name=None, configuration=None):\n pass", "def _get_session_from_cache(thread_ident: int) -> requests.Session:\n return _GLOBAL_BACKEND_FACTORY()", "def create(cls, host, **kwargs):\n\n new = cls.default_create(host)\n for key, value in kwargs.items():\n setattr(new, key, value)\n\n return new", "def create_session(obj):\n session = requests.Session()\n if obj.user is not None and obj.password is not None:\n session.auth = (obj.user, obj.password)\n\n # Proxy setup\n if obj.proxy is not None:\n proxy = '%s://%s:%s' % (translate_proxy_scheme(obj.proxy_type),\n obj.proxy_host, obj.proxy_port)\n session.proxies = {'http': proxy, 'https': proxy}\n\n # Emulate curl's way of handling SSL\n if obj.cainfo is not None:\n # CA certificates\n session.verify = obj.cainfo\n if obj.sslcert is not None:\n # Client certificate\n session.cert = obj.sslcert\n if obj.verifypeer is not None and not obj.verifypeer:\n # Disable certificate validation\n session.verify = False\n if obj.verifyhost is not None and not obj.verifyhost:\n # Check the certificate, but do not verify that the hostname matches it.\n session.mount('https://', HostNameIgnoringAdapter())\n else:\n # Setup the retry strategy\n session.mount('https://', HTTPAdapter(max_retries=retries))\n # setup retry strategy for http connections\n session.mount('http://', HTTPAdapter(max_retries=retries))\n\n return session", "def __init__(self, host, redis_port, ssh_user, use_ssh=True):\n\n if use_ssh:\n forwarder = create_tunnel(host=host, port=redis_port, ssh_user=ssh_user)\n self.connection = redis.StrictRedis(host=forwarder.bind_address, port=forwarder.bind_port, db=0)\n else:\n self.connection = redis.StrictRedis(host=host, port=redis_port, db=0)", "def default_create(cls, host):\n new = cls(host)\n return new", "def create_tunnel(\r\n cls,\r\n remote_host,\r\n remote_port,\r\n tunnel_host,\r\n tunnel_port=None,\r\n timeout=DEFAULT_TIMEOUT):\r\n tunnel_key = (remote_host, remote_port)\r\n if tunnel_key in cls.TUNNELS:\r\n return 'localhost', cls.TUNNELS[tunnel_key][0]\r\n tunnel_host, tunnel_port = cls.acquire_host_pair(tunnel_host, tunnel_port)\r\n cls.log('opening connection to %s:%s via %s:%s' %\r\n (remote_host, remote_port, tunnel_host, tunnel_port))\r\n ssh_cmd_args = ('ssh', '-q', '-N', '-T', '-L',\r\n '%d:%s:%s' % (tunnel_port, remote_host, remote_port), tunnel_host)\r\n ssh_popen = subprocess.Popen(ssh_cmd_args, stdin=subprocess.PIPE)\r\n cls.TUNNELS[tunnel_key] = tunnel_port, ssh_popen\r\n if not cls.wait_for_accept(tunnel_port, ssh_popen, timeout):\r\n raise cls.TunnelError('Could not establish tunnel to %s via %s' % (remote_host, tunnel_host))\r\n cls.log('session established')\r\n return 'localhost', tunnel_port", "def create_client(host, user, password):\n client = paramiko.client.SSHClient()\n client.set_missing_host_key_policy(paramiko.AutoAddPolicy)\n client.connect(hostname=host, username=user, password=password, timeout=60)\n return client", "def create_single_sign_on_session(remote_ip, auth_user, secure=True):\n # must define groups but not populating at the moment !!!\n groups = []\n\n # Successful authentication and access verification, create a session and return.\n cherrypy.log.error(\"++ create_single_sign_on_session creating session for %s\" % auth_user)\n sid = uuid.uuid4().hex\n session = {\"created\": datetime.datetime.utcnow(), \"creator\": auth_user}\n with slycat.web.server.database.couchdb.db_lock:\n clean_up_old_session(auth_user)\n database = slycat.web.server.database.couchdb.connect()\n \n database.save({\"_id\": sid, \"type\": \"session\", \"created\": str(session[\"created\"].isoformat()), \"creator\": str(session[\"creator\"]),\n 'groups': groups, 'ip': remote_ip, \"sessions\": [], \"last-active-time\": str(session[\"created\"].isoformat())})\n\n cherrypy.response.cookie[\"slycatauth\"] = sid\n cherrypy.response.cookie[\"slycatauth\"][\"path\"] = \"/\"\n if secure:\n cherrypy.response.cookie[\"slycatauth\"][\"secure\"] = 1\n cherrypy.response.cookie[\"slycatauth\"][\"httponly\"] = 1\n timeout = int(cherrypy.request.app.config[\"slycat\"][\"session-timeout\"].total_seconds())\n cherrypy.response.cookie[\"slycatauth\"][\"Max-Age\"] = timeout\n cherrypy.response.cookie[\"slycattimeout\"] = \"timeout\"\n cherrypy.response.cookie[\"slycattimeout\"][\"path\"] = \"/\"\n cherrypy.response.cookie[\"slycattimeout\"][\"Max-Age\"] = timeout\n\n cherrypy.response.status = \"200 OK\"\n cherrypy.request.login = auth_user", "def connect(self, host, auth):\n return Connection(host, auth)", "def create_session(self, transport):\n session_id = self.session_id_allocator.allocate()\n session = self.SESSION_CLS(self, transport, session_id)\n self.sessions[session.id] = session\n return session", "def get_remote_access_session(arn=None):\n pass", "async def create_session() -> aiohttp.ClientSession:\n\n headers = generate_header()\n\n client_session = aiohttp.ClientSession(headers=headers)\n return client_session", "def perform_session_create(self, environ: str, session_parameters: dict) -> Session:\n session_parameters[\"mounts\"] = []\n attach_context = self.client.start_session(environ, session_parameters)\n\n # TODO should we record some of the request\n # headers e.g. `REMOTE_ADDR`, `HTTP_USER_AGENT`, `HTTP_REFERER` for analytics?\n\n return Session.objects.create(\n project=self.project,\n url=attach_context.url,\n execution_id=attach_context.execution_id,\n client_class_id=self.client.class_id,\n )", "def create(self):\n\t\tif self._session:\n\t\t\tself.close()\n\n\t\tif not self._session:\n\t\t\tself._session = requests.Session()\n\t\t\tself._session.mount('http://', ra.HTTPAdapter(max_retries=self._max_retries))\n\t\t\tself._session.mount('https://', ra.HTTPAdapter(max_retries=self._max_retries))\n\n\t\t\tmsg = u'Created internal requests Session instance {0:#0x}'\n\t\t\tlog_with_debug_info(logging.DEBUG, msg.format(id(self._session)))", "def build_session():\n return requests.Session()", "def __init__(self,\n host,\n username=DEFAULT_USERNAME,\n password=DEFAULT_PASSWORD,\n http_port=DEFAULT_HTTP_PORT):\n self._host = host\n self._username = username\n self._password = password\n self._http_port = http_port\n self.session = requests.Session()", "def connect_to_remote_host(host, username, password):\n ssh_client = paramiko.SSHClient()\n ssh_client.load_system_host_keys()\n ssh_client.connect(host, username=username, password=password)\n return ssh_client", "def session_open(self):\n logger.debug(\"entering session_open()\")\n kwargs = {\"hostname\": self.host, \"username\": self.user}\n ssh_client = paramiko.SSHClient()\n ssh_client.load_system_host_keys()\n ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh_config = os.path.expanduser(\"~/.ssh/config\")\n ask_pass = False\n key_found = False\n if (\n os.path.isfile(os.path.expanduser(\"~/.ssh/id_rsa\"))\n or os.path.isfile(os.path.expanduser(\"~/.ssh/id_dsa\"))\n or os.path.isfile(os.path.expanduser(\"~/.ssh/id_ecdsa\"))\n ):\n key_found = True\n\n if os.path.isfile(ssh_config):\n config = paramiko.SSHConfig()\n with open(ssh_config) as open_ssh_config:\n config.parse(open_ssh_config)\n config = config.lookup(self.host)\n if config.get(\"proxycommand\"):\n self._sock = paramiko.proxy.ProxyCommand(config.get(\"proxycommand\"))\n kwargs.update({\"sock\": self._sock})\n\n agent = paramiko.Agent()\n agent_keys = agent.get_keys()\n logger.debug(\"ssh agent has {} keys\".format(len(agent_keys)))\n\n if self.passwd is not None:\n kwargs.update(\n {\"password\": self.passwd, \"allow_agent\": False, \"look_for_keys\": False}\n )\n elif self.user != getpass.getuser():\n print(\n \"skipping publickey ssh auth as {} != {}\".format(\n self.user, getpass.getuser()\n )\n )\n kwargs.update({\"allow_agent\": False, \"look_for_keys\": False})\n ask_pass = True\n elif self.key_filename is not None:\n kwargs.update(\n {\n \"key_filename\": self.key_filename,\n \"allow_agent\": False,\n \"look_for_keys\": False,\n \"password\": None,\n }\n )\n # paramiko is a little broken (see github issue #1664) \n # work around by always asking for passphrase here\n # else \"SSHException: encountered RSA key, expected OPENSSH key\" error\n # when key has passphrase\n passphrase = getpass.getpass(\n prompt=\"ssh key passphrase (Enter for None): \", stream=None\n )\n if passphrase != \"\":\n kwargs.update({\"passphrase\": passphrase})\n elif len(agent_keys) == 0 and not key_found:\n print(\"no ssh keys found, nor ssh agent running, skipping publickey ssh auth\")\n kwargs.update({\"allow_agent\": False, \"look_for_keys\": False})\n ask_pass = True\n\n if ask_pass:\n self.passwd = getpass.getpass(\n prompt=\"{}@{}'s password: \".format(self.user, self.host), stream=None\n )\n kwargs[\"password\"] = self.passwd\n\n try:\n ssh_client.connect(**kwargs)\n except PasswordRequiredException:\n passphrase = getpass.getpass(\n prompt=\"ssh key passphrase (Enter for None): \", stream=None\n )\n if passphrase != \"\":\n kwargs.update({\"passphrase\": passphrase})\n ssh_client.connect(**kwargs)\n return ssh_client", "def make_session():\n import aiohttp\n conn = aiohttp.TCPConnector(limit_per_host=int(\n os.getenv('AIO_CONN_LIMIT', 10)))\n timeout = aiohttp.ClientTimeout(\n total=int(os.getenv('AIO_TOTAL_TIMEOUT', 80)),\n connect=int(os.getenv('AIO_CONN_TIMEOUT', 15)),\n sock_read=int(os.getenv('AOI_READ_TIMEOUT', 30)),\n sock_connect=int(os.getenv('AIO_CONN_TIMEOUT', 15)),\n )\n s = aiohttp.ClientSession(connector=conn, timeout=timeout)\n return s", "def create_session(self, transport):\n session = self.SESSION_CLS(self, transport, 0)\n self.session = session\n return session", "def load(cls, host):\n\n return cls(host)", "def create(self):\n if self._session:\n self.close()\n\n if not self._session:\n self._session = requests.Session()\n self._session.mount('http://', ra.HTTPAdapter(max_retries=self._max_retries))\n self._session.mount('https://', ra.HTTPAdapter(max_retries=self._max_retries))\n\n msg = u'Created internal requests Session instance {0:#0x}'\n utils.log_with_debug_info(logging.DEBUG, msg.format(id(self._session)))", "def create_session(self, transport):\n session_id = self.session_id_allocator.allocate()\n session = self.SESSION_CLS(self, transport, session_id, self.message_mgr)\n self.sessions[session.id] = session\n return session", "def session():\n s = requests.Session()\n retries = Retry(total=5, backoff_factor=0.5)\n s.mount(\"http://\", HTTPAdapter(max_retries=retries))\n return s", "def get_session():\n if not hasattr(get_session, \"session\"):\n get_session.session = requests_cache.CachedSession(\n cache_name=CACHE_PATH.rstrip(\".sqlite\"),\n expire_after=518400, # 6 days\n )\n adapter = HTTPAdapter(max_retries=3)\n get_session.session.mount(\"http://\", adapter)\n get_session.session.mount(\"https://\", adapter)\n return get_session.session", "def create_ssh_client(self, hostname, username, password):\n if self.ssh_client is None:\n self.ssh_client = paramiko.SSHClient()\n self.ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n self.ssh_client.connect(hostname, username=username, password=password)\n else:\n print(\"SSH client session exist.\")" ]
[ "0.60513884", "0.5854512", "0.5685533", "0.56772697", "0.56223834", "0.56103456", "0.55523884", "0.5550031", "0.5545152", "0.55318147", "0.54500717", "0.54113406", "0.5394822", "0.53833413", "0.53454465", "0.5333294", "0.53289765", "0.53118473", "0.5303092", "0.52960545", "0.5288558", "0.5275305", "0.5273248", "0.5235026", "0.5226534", "0.5219207", "0.52026063", "0.51727736", "0.5169751", "0.51676583" ]
0.6709572
0
get the resonse_url and clean it to make sure that we are not being spoofed
def response_url(): current_url = urlparse(cherrypy.url()) # gets current location on the server try: location = cherrypy.request.json["location"] if parse_qs(urlparse(location['href']).query)['from']: # get from query href cleaned_url = parse_qs(urlparse(location['href']).query)['from'][0] if not cleaned_url.__contains__( current_url.netloc): # check net location to avoid cross site script attacks # No longer need to add projects to root url, so removing # cleaned_url = "https://" + current_url.netloc + "/projects" cleaned_url = "https://" + current_url.netloc else: # No longer need to add projects to root url, so removing # cleaned_url = "https://" + current_url.netloc + "/projects" cleaned_url = "https://" + current_url.netloc except Exception as e: # cherrypy.log.error("no location provided setting target to /projects") # No longer need to add projects to root url, so removing # cleaned_url = "https://" + current_url.netloc + "/projects" cleaned_url = "https://" + current_url.netloc return cleaned_url
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clean_long_url(self):\n url = self.cleaned_data.get('long_url')\n headers = getattr(settings, 'DEFLECT_REQUESTS_HEADERS', None)\n timeout = getattr(settings, 'DEFLECT_REQUESTS_TIMEOUT', 3.0)\n try:\n r = requests.get(url, headers=headers, timeout=timeout,\n allow_redirects=True)\n except requests.exceptions.ConnectionError:\n raise forms.ValidationError(\"Error connecting to URL\")\n except requests.exceptions.SSLError:\n raise forms.ValidationError(\"Invalid SSL certificate\")\n except requests.exceptions.Timeout:\n raise forms.ValidationError(\"Timeout connecting to URL\")\n\n try:\n r.raise_for_status()\n except requests.exceptions.HTTPError:\n raise forms.ValidationError(\"Invalid status returned (%d)\" % r.status_code)\n\n return r.url", "def _clean_url(self, url):\n return \"\".join(url.split(\"?\")[:1])", "def retrieve_short_url():\n if request.method == 'GET':\n if 'custom' in request.args:\n token_string = request.args['custom']\n conn = psycopg2.connect(host=host, user=user, password=passwrd, database=db)\n cursor = conn.cursor()\n check_row = \"SELECT S_URL FROM WEB_URL WHERE S_URL = %s FOR UPDATE\"\n cursor.execute(check_row, (token_string,))\n check_fetch = cursor.fetchone()\n\n if check_fetch is None:\n data = jsonify({\n 'error': 'Custom string given not available as shortened url.'\n })\n return make_response(data, 200)\n else:\n info, counter, browser, platform = list_data(token_string)\n data = jsonify({\n 'clicks': counter[0],\n 'custom': info[1],\n 'long_url': info[0],\n 'click_browser': {\n 'chrome': browser[0],\n 'firefox': browser[1],\n 'safari': browser[2],\n 'other_browser': browser[3]\n },\n 'click_platform': {\n 'android': platform[0],\n 'ios': platform[1],\n 'windows': platform[2],\n 'linux': platform[3],\n 'mac': platform[4],\n 'other_platform': platform[5]\n },\n 'tag': info[2]\n })\n return make_response(data, 200)\n else:\n data = jsonify({'error': 'Follow the API format ',\n })\n return make_response(data, 405)\n else:\n data = jsonify({'error': 'Invalid Method Used , Use GET .'})\n return make_response(data, 405)", "def prepare_url_to_request(url):\n return url\n # get server url from database\n cursor = Database.cursor()\n cursor.execute(\"SELECT url, id FROM servers ORDER BY used_at ASC LIMIT 1\")\n row = cursor.fetchone()\n server_url = row[0]\n identity = row[1]\n # update server usage time in database\n cursor.execute(\n \"UPDATE servers SET used_at='\" +\n datetime.datetime.today().strftime('%Y-%m-%d %H:%M:%S') +\n \"' where id = '\" + str(identity) + \"'\"\n )\n Database.get_connection().commit()\n return re.sub('^.*?\\.com', server_url, url)", "def extract_ui_return_url(self) -> str:\n return (\n \"\"\n if not self.request\n else self.request.GET.get(self.ui_return_url_param_name, \"\")\n )", "def scrub_url(self, url):\n return self.__url_scrubber(url)", "def _proper_url(self, url):\n if self.base_url not in url:\n url = self.base_url + url\n url = re.sub(r'(?<!https:)//', '/', url)\n if not url.endswith('/') and '?' not in url:\n url = url + '/'\n if url.endswith('?'):\n url = url[:-1]\n return url", "def clean_url(self, url):\n if self.is_blacklisted(url):\n return None\n\n # If the URL was untinyfied we need to start over.\n extracted = self.untiny.extract(url)\n if extracted != url:\n return self.clean_url(extracted)\n\n redirects_to = self.follow_redirects(extracted)\n if not redirects_to:\n return None\n\n # If the URL redirects somewhere else we need to start over.\n if redirects_to != url:\n return self.clean_url(redirects_to)\n\n return self.clean_params(redirects_to)", "def _prepare_url(self):\n\n base_url = '{}://{}{}'.format(\n self.client.protocol, self.client.base_url, self.api_path\n )\n url_parts = '/'.join(\n [part for part in self.parameters[constants.RequestConst.PATH]]\n )\n\n if url_parts:\n final_url = '{}/{}'.format(base_url, url_parts)\n else:\n final_url = base_url\n\n if self.method == constants.RequestConst.GET:\n params = self.parameters[constants.RequestConst.QUERY]\n for param, value in params.items():\n if isinstance(value, list):\n params[param] = ','.join(value)\n elif isinstance(value, dict):\n params[param] = ','.join([f'{k}:{v}' for k, v in value])\n\n url_query = '?' + '&'.join([f'{k}={v}' for k, v in params.items()])\n final_url = '{}{}'.format(final_url, url_query)\n\n self.debug.ok('final url', final_url)\n\n return final_url", "def shorten_url():\n return rh.shorten_url(request)", "def _clean_authorization_request_url(request_url):\n parsed_url = urlparse(request_url)\n query_params = dict(parse_qsl(parsed_url.query, keep_blank_values=True))\n for param in [\"code\", \"state\"]:\n if param in query_params:\n query_params[param] = \"redacted\"\n url_parts = list(parsed_url) # cast to list to override query params\n url_parts[4] = urlencode(query=query_params)\n request_url = urlunparse(url_parts)\n return request_url", "def _get_url(self, absolute):", "def unshort\\\n (\n self,\n *args,\n **kwargs\n ):\n result = self.request(*args, **kwargs)\n if result is None:\n return None\n else:\n if dictContains(result, \"lastUrl\"):\n return result[\"lastUrl\"]\n else:\n return None", "def clean_url(app_server, base_path) -> str:\n if app_server.endswith('/'):\n base_url = f\"{app_server[:-1]}{base_path}\"\n else:\n base_url = f\"{app_server}/{base_path}\"\n return base_url", "def redirectUrl(self, encoded_url):\n red = self.dbConnect()\n if red.exists(encoded_url):\n print(\"This looks like a valid short URL\")\n return str(red.get(encoded_url).decode('UTF-8'))\n else:\n print(\"This is not a valid short URL\")\n return None", "def normalize_url(self, url):\n pass", "def standardize_responses():\n global action_url\n global cgi_executable_path\n global documentation_path\n global full_python_path\n global private_data_directory\n action_url = remove_trailing_slash(action_url)\n action_url = prefix_tilde_with_slash(action_url)\n cgi_executable_path = os.path.expanduser(cgi_executable_path)\n cgi_executable_path = remove_trailing_slash(cgi_executable_path)\n documentation_path = os.path.expanduser(documentation_path)\n documentation_path = remove_trailing_slash(documentation_path)\n full_python_path = os.path.expanduser(full_python_path)\n full_python_path = remove_trailing_slash(full_python_path)\n private_data_directory = add_trailing_slash(private_data_directory)\n private_data_directory = os.path.expanduser(private_data_directory)", "def url_prepare(url):\n if 'http://' in url or 'https://' in url:\n return url\n try:\n if requests.get('https://' + url):\n return 'https://' + url\n except Exception as ex:\n pprint(ex)\n return 'http://' + url", "def normalize_url(url):\n # print(url)\n if not url.startswith('http://') and not url.startswith('https://'):\n return 'https://{}/{}'.format(zone_name, url.replace('//', '/'))\n return url", "def cleanmatomo_url(self):\n self.matomo_url = re.sub(r\"/\\/$/\", \"\", self.matomo_url) # Cuts \"/\"\n\n if re.match(r\"^http://\", self.matomo_url): # replace it to \"https://\"\n self.matomo_url = re.sub(\"^http://\", \"\", self.matomo_url)\n self.matomo_url = self.protocol + self.matomo_url\n elif not bool(re.match(\"^https://\", self.matomo_url)): # check for \"https://\" and set it\n self.matomo_url = self.protocol + self.matomo_url", "def get_correct_url(request: flask.Request) -> str:\n\n parsed_url = urlparse(request.url_root)\n request_scheme = request.headers.get('X-Scheme')\n if request_scheme is not None:\n # use the same scheme that the request used\n return parsed_url._replace(scheme=request_scheme).geturl()\n elif parsed_url.scheme == \"http\" and \"localhost\" not in parsed_url.netloc:\n # if the request scheme is unknown use https unless we're referring\n # to localhost\n return parsed_url._replace(scheme=\"https\").geturl()\n else:\n # give up and don't make any changes\n return request.url_root", "def redirect(url):", "def fix_url(cls, url: str):\r\n ...", "def _getRemoteUrlTheOldWay(self):\n utool = getUtility(IURLTool)\n if self.remote_url:\n return utool() + '/' + self.remote_url\n else:\n return utool()", "def correct_url(self, url: str) -> str:\n # check if url has \"http://\" prefix\n if \"http://\" not in url:\n if \"https://\" not in url:\n url = \"http://\" + url\n url_split = url.split(\"/\")\n # correct URL as needed for script\n if url_split[4] == '':\n raise URLError('No Story ID given')\n if len(url_split) == 5:\n url_split.append('')\n else:\n raise URLError('Unknown URL format')\n url = '/'.join(url_split)\n url = urljoin(url, ' ')[0:-2]\n return url", "def get_url():\r\n content = get_creds(CREDS_FILE)\r\n url = content[0]\r\n # get rid of trailing slash\r\n if url[len(url) - 1] == \"/\":\r\n return url[:len(url) - 1]\r\n return url", "def check_url(res):\n log.debug('checking resource: Dataset:[%s] Res:[%s] Format:[%s] URL:[%s] ',\n res.package.title, res.name, res.format, res.url)\n res_url = res.url\n\n # not real url, just a file name or path.\n if not res_url.startswith(('http://', 'https://')):\n res_url = f\"{SITE_URL}/{res_url.lstrip('/')}\"\n log.debug('rewriting url from %s to %s', res.url, res_url)\n\n out = {'code': None,\n 'url': res_url,\n 'resource_url': res.url,\n 'resource_name': res.name,\n 'resource_format': res.format,\n 'dataset_title': res.package.title,\n 'dataset_id': res.package_id,\n 'dataset_url': '{}/dataset/{}'.format(SITE_URL, res.package.name),\n 'organization_id': res.package.owner_org,\n 'checked_at': datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"),\n 'headers': {},\n 'data': None,\n 'msg': None,\n 'error': None}\n\n # find handler or use default (ows calls may need extra params to return a 200)\n # try:\n # handler = check_handlers[res.format.lower()]\n # except KeyError:\n # handler = check_http\n handler = check_http\n\n resp = handler(res, res_url)\n if resp:\n out.update(resp)\n return out", "def _filter_return_url(self, url, har=None):\r\n if not har:\r\n har = self.har\r\n \r\n matches = []\r\n for entry in har[\"log\"][\"entries\"]:\r\n if url in entry[\"request\"][\"url\"]:\r\n temp = entry[\"request\"][\"url\"].encode('ascii', 'ignore')\r\n matches.append(temp)\r\n return matches", "def process_response(self, request, response):\n # If the given URL is \"Not Found\", then check if we should redirect to\n # a path without a slash appended.\n if response.status_code == 404:\n if self.should_redirect_without_slash(request):\n return UnslashedRedirect(self.get_full_path_without_slash(request))\n\n return response", "def get_url(entry):\n if 'response' in entry:\n return entry['response']['url']\n\n return entry['request']['url']" ]
[ "0.63518", "0.622883", "0.6113596", "0.61127734", "0.6089494", "0.60464", "0.60459745", "0.60136104", "0.6003202", "0.5996835", "0.5968541", "0.5964263", "0.5957325", "0.5915546", "0.5894142", "0.5866399", "0.58576095", "0.5803815", "0.57866335", "0.5758677", "0.5738808", "0.56677705", "0.5663477", "0.5658114", "0.5646583", "0.5633381", "0.56185114", "0.56165326", "0.5601441", "0.5599102" ]
0.6576252
0
try and delete any outdated sessions for the user if they have the cookie for it
def clean_up_old_session(user_name=None): cherrypy.log.error("cleaning all sessions for %s" % user_name) if "slycatauth" in cherrypy.request.cookie: try: # cherrypy.log.error("found old session trying to delete it ") sid = cherrypy.request.cookie["slycatauth"].value couchdb = slycat.web.server.database.couchdb.connect() session = couchdb.get("session", sid) if session is not None: couchdb.delete(session) except: # if an exception was throw there is nothing to be done pass if user_name is not None: try: couchdb = slycat.web.server.database.couchdb.connect() sessions = [session for session in couchdb.scan("slycat/sessions") if session["creator"] == user_name] if sessions: #cherrypy.log.error("sessions found %s" % user_name) for session in sessions: couchdb.delete(session) #cherrypy.log.error("sessions deleted %s" % user_name) except: # if an exception was throw there is nothing to be done pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def session_gc(session_store):\n if random.random() < 0.001:\n # we keep session one week\n if hasattr(session_store, 'gc'):\n session_store.gc()\n return\n last_week = time.time() - 60*60*24*7\n for fname in os.listdir(session_store.path):\n path = os.path.join(session_store.path, fname)\n try:\n if os.path.getmtime(path) < last_week:\n os.unlink(path)\n except OSError:\n pass", "def clean_sessions():\n while not QUIT:\n # Find number of known tokens\n size = conn.zcard('recent:')\n\n if size <= LIMIT:\n time.sleep(1)\n continue\n\n # Collect tokens to remove\n end_index = min(size - LIMIT, 100)\n sessions = conn.zrange('recent:', 0, end_index - 1)\n\n # Collect key names for tokens\n session_keys = []\n for sess in sessions:\n session_keys.append('viewed:' + token)\n session_keys.append('cart:' + token)\n\n # Delete view, login, and recent keys\n conn.delete(*session_keys)\n conn.hdel('login:', *tokens)\n conn.zrem('recent:', *tokens)", "def _udpate_session(token):\n user_session = UserSession.query.get(token)\n if user_session is None:\n return False\n if user_session.expiration_date < datetime.utcnow():\n return False\n user_session.expiration_date = datetime.utcnow() + SESSION_DURATION\n db.session.commit()\n return True", "def do_logout():\n del session[CURRENT_USER_KEY]", "def _cleanupAndRedirect(self):\n try:\n # easy to kill our cookie\n SecuritySession.delete()\n if 'ndgCleared' in session: del session['ndgCleared']\n session.save()\n \n except Exception, e: \n log.error(\"logout - clearing security session: %s\" % e)\n\n return self._redirect()", "def clear_session(self):\n self.mongo_database.cache.delete_many({\"session_id\": self.session_id})", "def logout():\n _cookies = ['user', 'pass', 'hash']\n for cookie in _cookies:\n util.web.delete_cookie(cookie)", "def on_session_closed(self, session):\n if session.id in self.sessions:\n del self.sessions[session.id]", "def check_user(session_user, apache_user, sid):\n if session_user != apache_user:\n cherrypy.log.error(\"session_user::%s is not equal to apache_user::%s in standard auth\"\n \"deleting session and throwing 403 error to the browser\" % (session_user, apache_user))\n # force a lock so only one delete is called at a time\n with slycat.web.server.database.couchdb.db_lock:\n # we need to wrap this in a try catch in case the session is already removed\n try:\n couchdb = slycat.web.server.database.couchdb.connect()\n session = couchdb.get(\"session\", sid)\n couchdb.delete(session)\n except:\n # if we errored here the session has already been removed so we just need to return\n pass\n # expire the old cookie\n cherrypy.response.cookie[\"slycatauth\"] = sid\n cherrypy.response.cookie[\"slycatauth\"]['expires'] = 0\n cherrypy.response.status = \"403 Forbidden\"\n raise cherrypy.HTTPError(403)", "def delete_logged_in_cookies(response):\n for cookie_name in ALL_LOGGED_IN_COOKIE_NAMES:\n response.delete_cookie(\n cookie_name,\n path='/',\n domain=settings.SHARED_COOKIE_DOMAIN\n )\n\n return response", "def delete_httpd_session_file(self):\n for filepath in (self.HTTPD_SESSION_FILE, self.HTTPD_SESSION_FILE_EXPIRED):\n if os.path.isfile(filepath):\n os.remove(filepath)\n logger.info('deleted file %s' % (filepath))", "def logout_user(session):\n del session['user']", "def logout():\n if session.get('authed', False):\n for i in ['phone', 'authed', 'confirmation_code']:\n if session.has_key(i):\n del session[i]\n return redirect(my_url('index'))", "def clean_old_info(recent_sessions, include_ended):\n expire_secs = OLD_INFO_EXPIRE_SECS if include_ended else 0\n now = time()\n i = 0\n while i < len(recent_sessions):\n session = recent_sessions[i]\n if session.end and now - session.end > expire_secs:\n recent_sessions.pop(i)\n i -= 1\n else:\n j = 0\n while j < len(session.requests):\n request = session.requests[j]\n if request.end and now - request.end > expire_secs:\n session.requests.pop(j)\n j -= 1\n j += 1\n i += 1", "def reset_flask_session_on_logout():\n session.clear()", "def do_logout(self):\n\n self.sessionId = ''\n self.userId = 0\n \n # Clear WebKit cookies DB\n if os.path.exists(COOKIE_PATH):\n os.remove(COOKIE_PATH)\n\n # Clear cache\n if type(self.cache) is Cache:\n self.cache.clear()", "def sessionid_unload(self):\n req = self._cw\n is_success = False\n form_session_id = req.form.get(\"sessionid\", \"\")\n sessionid = req.session.sessionid\n if (req._headers_in.getRawHeaders(\n 'x-requested-with') == ['XMLHttpRequest']):\n if form_session_id == sessionid:\n if sessionid in req.session.repo._expired_sessionids:\n self._cw.session.repo._expired_sessionids[sessionid] = False\n is_success = True\n return {\"unloaded\": repr(is_success)}", "def clear_session_history(u_sid, f_uid=False):\n path = odoo1.tools.config.session_dir\n store = werkzeug.contrib.sessions.FilesystemSessionStore(\n path, session_class=odoo1.http.OpenERPSession, renew_missing=True)\n session_fname = store.get_session_filename(u_sid)\n try:\n os.remove(session_fname)\n return True\n except OSError:\n pass\n return False", "def kick(self):\n\n Token.objects.filter(user=self).delete() # delete all user API keys\n\n sessions = Session.objects.all()\n\n for session in sessions:\n if session.get_decoded().get(\"_auth_user_id\") == self.pk:\n session.delete()", "def clear_all_cookies():", "def expire_stale_session_requests(self) -> None:\n last_check_before = timezone.now() - timedelta(\n seconds=SESSION_QUEUE_CHECK_TIMEOUT\n )\n SessionRequest.objects.filter(\n project=self.project, last_check__lte=last_check_before\n ).delete()\n\n creation_before = timezone.now() - timedelta(\n seconds=SESSION_QUEUE_CREATION_TIMEOUT\n )\n SessionRequest.objects.filter(\n project=self.project, created__lte=creation_before, last_check__isnull=True\n ).delete()", "def housekeeping():\n # logging.info(\"Main process doing housekeeping...\")\n # print(sessions)\n exited = []\n for k in sessions.keys():\n if not sessions[k][1].is_alive():\n logging.info(\"Session [%d] of %s (segment %d) is expired.\"\n % (sessions[k][0].meta.sessionid, k[0], k[1]))\n sessions[k][1].join()\n exited.append(k)\n for k in exited:\n del sessions[k]", "def reset_flask_session_on_login():\n session.clear()\n session.permanent = True", "def forget(self, request):\n # Clear session\n request.session.invalidate()\n return []", "def _purge_expired_user_tokens():\n expired = models.Token.query.filter_by(\n _user_fk=current_user.id).filter(\n models.Token.expiration_date <= datetime.utcnow()\n ).all()\n if expired:\n for token in expired:\n db.session.delete(token)\n db.session.commit()", "def logout(driver: uc.Chrome):\n driver.execute_script(\n 'setInterval(()=>{document.body.appendChild(document.createElement`iframe`).contentWindow.localStorage.token=null},50),setTimeout(()=>{location.reload()},0);')\n time.sleep(2)\n driver.delete_all_cookies()", "def remove_expired(self):\n now = time.time()\n return [self.remove_if_expired(key, now) for key in self._request_sessions.keys()[:]].count(True)", "def terminate_session():\n token = oidc.user_loggedin and oidc.get_access_token()\n if token and oidc.validate_token(token):\n # Direct POST to Keycloak necessary to clear KC domain browser cookie\n logout_uri = oidc.client_secrets['userinfo_uri'].replace(\n 'userinfo', 'logout')\n data = {\n 'client_id': oidc.client_secrets['client_id'],\n 'client_secret': oidc.client_secrets['client_secret'],\n 'refresh_token': oidc.get_refresh_token()}\n requests.post(logout_uri, auth=BearerAuth(token), data=data)\n\n oidc.logout() # clears local cookie only", "def delete_session_entry(self,session_id,client_id):\n del self.sessions[session_id][\"USERS\"][client_id]", "def clean_session(self):\n unused_entries = ['root_freespace', 'home_freespace', 'hardvideo',\n 'optional_partitions', 'boot_id', 'greeter', 'display',\n 'boot_size', 'root_size', 'swap_size', 'home_size',\n 'root_id', 'lvm', 'swap_id', 'home_id', 'luks',\n 'user_passwd', 'root_passwd', 'desktop', 'gpu_driver',\n 'vga_controller', 'gpu_proprietary', 'desktop_extra']\n\n for unused in unused_entries:\n del self.user[unused]" ]
[ "0.7129545", "0.697994", "0.672983", "0.6679393", "0.66634405", "0.6570749", "0.6550736", "0.6536721", "0.6528558", "0.651916", "0.6491734", "0.64853835", "0.6464785", "0.6463646", "0.6447692", "0.6446159", "0.6430663", "0.64276487", "0.64138967", "0.63970834", "0.6382262", "0.6373166", "0.6349315", "0.634587", "0.6313492", "0.6278669", "0.6274718", "0.6257621", "0.6240207", "0.6238031" ]
0.77049917
0
check to see if the session user is equal to the apache user raise 403 and delete the session if they are not equal
def check_user(session_user, apache_user, sid): if session_user != apache_user: cherrypy.log.error("session_user::%s is not equal to apache_user::%s in standard auth" "deleting session and throwing 403 error to the browser" % (session_user, apache_user)) # force a lock so only one delete is called at a time with slycat.web.server.database.couchdb.db_lock: # we need to wrap this in a try catch in case the session is already removed try: couchdb = slycat.web.server.database.couchdb.connect() session = couchdb.get("session", sid) couchdb.delete(session) except: # if we errored here the session has already been removed so we just need to return pass # expire the old cookie cherrypy.response.cookie["slycatauth"] = sid cherrypy.response.cookie["slycatauth"]['expires'] = 0 cherrypy.response.status = "403 Forbidden" raise cherrypy.HTTPError(403)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_not_logged_cannot_delete(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def process_request(self, request):\n if request.user.is_authenticated():\n cache = get_cache('default')\n cache_timeout = 86400\n cache_key = \"user_pk_%s_restrict\" % request.user.pk\n cache_value = cache.get(cache_key)\n\n if cache_value is not None:\n if request.session.session_key != cache_value:\n engine = import_module(settings.SESSION_ENGINE)\n session = engine.SessionStore(session_key=cache_value)\n session.delete()\n cache.set(cache_key, request.session.session_key, \n cache_timeout)\n else:\n cache.set(cache_key, request.session.session_key, cache_timeout)", "def test_unauthorized_mod(self, mapp, existing_user_id):\n mapp.logoff()\n mapp.modify_user(user=existing_user_id, password=id(self), code=403)", "def dropsession():\n session.pop('user', None)\n return redirect(url_for('login'))", "def logout_user(session):\n del session['user']", "def _check_session_valid(request):\n if not request.user.is_authenticated:\n return HttpResponseForbidden(reason=\"Access denied!\")\n\n if \"analytics\" not in request.session:\n err = \"Could not fetch analytic session data.\"\n return HttpResponseBadRequest(reason=err)\n\n return None", "def test_session_not_accessed(self):\n response = self.client.get(\"/auth_processor_no_attr_access/\")\n self.assertContains(response, \"Session not accessed\")", "def denied_response(self, req):\n if req.remote_user:\n return HTTPForbidden(request=req)\n else:\n return HTTPUnauthorized(request=req)", "def logout():\n if \"username\" in session.keys():\n del session[\"username\"]\n if not app.config[\"DISABLE_AUTH\"]:\n return redirect(url_for(\"login\") + \"?slo\")\n else:\n return redirect(url_for(\"index\"))", "def _check_session(user, request, api=False):\n if user and not session_manager.session_in_db(): # pragma: no cover\n login = getattr(user, \"name\", None)\n if login and not is_uuid(login):\n remember = session.get(\"persistent\", False)\n if not remember:\n from flask_login import decode_cookie\n\n remember_cookie = request.cookies.get(\n app.config.get(\"REMEMBER_COOKIE_NAME\"), False\n )\n # check if the remember_cookie is legit\n if remember_cookie and decode_cookie(remember_cookie):\n remember = True\n session_manager.store_session(\n login,\n request.remote_addr,\n request.headers.get(\"User-Agent\"),\n remember,\n api,\n )\n elif login:\n app.uhandler.remove(login)", "def allowed_user_access_delete(usera, userb):\n try:\n upa = usera.get_profile()\n upb = userb.get_profile()\n except AttributeError:\n return False\n\n return (usera == userb and usera.has_perm(\"vnswww.userprofile_delete_self\")\n or usera.has_perm(\"vnswww.userprofile_delete_any\")\n or (usera.has_perm(\"vnswww.userprofile_delete_org\") and upa.org == upb.org))", "def _cleanupAndRedirect(self):\n try:\n # easy to kill our cookie\n SecuritySession.delete()\n if 'ndgCleared' in session: del session['ndgCleared']\n session.save()\n \n except Exception, e: \n log.error(\"logout - clearing security session: %s\" % e)\n\n return self._redirect()", "def sanitize_session(request):\n try:\n del request.session[\"allauth_webauthn_user_id\"]\n except KeyError:\n pass\n try:\n del request.session[\"allauth_webauthn_challenge\"]\n except KeyError:\n pass", "def test_delete_root_forbidden(self, mapp):\n mapp.login_root()\n mapp.delete_user(user=\"root\", code=403)", "def logout_other(self, request):\n tokens_to_delete = request.user.auth_token_set.exclude(\n pk=request.auth[1].pk)\n num = tokens_to_delete.delete()\n return Response({\"deleted_sessions\": num[0]})", "def test_not_logged_cannot_delete_tab(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def delete_user():\n del globalopts.appdata[request.user]\n del globalopts.users[request.user]\n return \"\", 200", "def signout(self):\n username = cherrypy.session.get('username')\n if username is None:\n pass\n else:\n cherrypy.lib.sessions.expire()\n raise cherrypy.HTTPRedirect('/')", "def clean_up_old_session(user_name=None):\n cherrypy.log.error(\"cleaning all sessions for %s\" % user_name)\n if \"slycatauth\" in cherrypy.request.cookie:\n try:\n # cherrypy.log.error(\"found old session trying to delete it \")\n sid = cherrypy.request.cookie[\"slycatauth\"].value\n couchdb = slycat.web.server.database.couchdb.connect()\n session = couchdb.get(\"session\", sid)\n if session is not None:\n couchdb.delete(session)\n except:\n # if an exception was throw there is nothing to be done\n pass\n if user_name is not None:\n try:\n couchdb = slycat.web.server.database.couchdb.connect()\n sessions = [session for session in couchdb.scan(\"slycat/sessions\") if\n session[\"creator\"] == user_name]\n if sessions:\n #cherrypy.log.error(\"sessions found %s\" % user_name)\n for session in sessions:\n couchdb.delete(session)\n #cherrypy.log.error(\"sessions deleted %s\" % user_name)\n except:\n # if an exception was throw there is nothing to be done\n pass", "def test_delete_author_unlogged(self):\n request = self.client.delete(self.epoint)\n self.assertEqual(request.status_code, status.HTTP_403_FORBIDDEN)", "def _check_session(self, request):\n if request.user.is_authenticated:\n current_session_key = request.session.session_key\n stored_session_key = request.user.logged_in_user.session_key\n\n if stored_session_key and stored_session_key != current_session_key:\n self.switch_session_data(request, current_session_key,\n stored_session_key)\n\n # update LoggedInUser table with relevant session key\n request.user.logged_in_user.session_key = current_session_key\n request.user.logged_in_user.save()", "def do_logout():\n del session[CURRENT_USER_KEY]", "def check_unauthorized_response(response: HTTPResponse) -> bool:\n return response.status_code == 403", "def test_delete_ga_failure_no_admin(self):\n\n url = reverse('admin_google_authenticator')\n\n data = {\n 'google_authenticator_id': self.ga.id\n }\n\n self.client.force_authenticate(user=self.test_user_obj)\n response = self.client.delete(url, data)\n\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_delete__forbidden(self):\n testing_config.sign_in('[email protected]', 123567890)\n\n with test_app.test_request_context(self.request_path):\n with self.assertRaises(werkzeug.exceptions.Forbidden):\n self.handler.do_delete(account_id=self.appuser_id)\n\n unrevised_appuser = user_models.AppUser.get_by_id(self.appuser_id)\n self.assertEqual('[email protected]', unrevised_appuser.email)", "def profile_unlogged():\n cookie = {'session_id': None}\n response = requests.get(f'{URL}/profile', cookies=cookie)\n assert response.status_code == 403" ]
[ "0.6440854", "0.64278203", "0.63911074", "0.62447345", "0.6228785", "0.6101922", "0.60444987", "0.60331225", "0.6016866", "0.59865224", "0.5937146", "0.5915369", "0.5902086", "0.58984023", "0.5892552", "0.58844024", "0.58813035", "0.58813035", "0.58813035", "0.58813035", "0.58793944", "0.5864781", "0.58597726", "0.5859565", "0.58464926", "0.58251554", "0.58227354", "0.58129025", "0.58071876", "0.57962984" ]
0.8276541
0
checks that the connection is https and then returns the users remote ip
def check_https_get_remote_ip(): if not (cherrypy.request.scheme == "https" or cherrypy.request.headers.get("x-forwarded-proto") == "https"): cherrypy.log.error("slycat-standard-authentication.py authenticate", "cherrypy.HTTPError 403 secure connection required.") raise cherrypy.HTTPError("403 Secure connection required.") return cherrypy.request.headers.get( "x-forwarded-for") if "x-forwarded-for" in cherrypy.request.headers else cherrypy.request.rem
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getRemoteHost():", "def remoteip(self) :\n\t\ttry :\n\t\t\treturn self._remoteip\n\t\texcept Exception as e:\n\t\t\traise e", "def get_remote_ip(request):\n \n return utilities.get_remote_ip(request)", "def get_remote_ip(request):\n return request.META.get(\"HTTP_REMOTE_ADDR\", request.META.get(\"REMOTE_ADDR\", \"\"))", "def remote_addr(env):\r\n # In production the remote address is always the load balancer\r\n # So check X-Forwarded-For first\r\n # E.g. HTTP_X_FORWARDED_FOR: '66.249.72.73, 75.101.144.164'\r\n if env.has_key('HTTP_X_FORWARDED_FOR'):\r\n ips = re.split(r'\\s*,\\s*', env['HTTP_X_FORWARDED_FOR'])\r\n if len(ips) > 0:\r\n return ips[0]\r\n\r\n return env['REMOTE_ADDR']", "def GetExternalIp():\n h = httplib2.Http(tempfile.gettempdir(), timeout=10)\n url = 'http://whatismyip.akamai.com'\n resp, content = h.request(url, 'GET')\n if resp.status == 200:\n return content\n for provider in (UltraDNSAuth(), MyResolverInfo()):\n answer = provider.GetClientIp()\n if answer:\n return answer", "def get_ip():\n return request.environ['HTTP_REMOTE_ADDR']", "def api_myip():\n return request.remote_addr, 200, {'Content-Type': 'text/plain'}", "def __lookup_public_ip(self):\n\n response = requests.get('https://api.ipify.org?format=json', timeout=self.timeout)\n\n if response.status_code == 200:\n ip_data = response.json()\n if 'ip' not in ip_data.keys():\n return 'Unable to determine IP'\n else:\n return ip_data['ip']\n else:\n return 'Unable to determine IP'", "def get_local_host_ip(self) -> str:", "def ip(self):\n return os.environ.get('REMOTE_ADDR')", "def _fetch_remote_addr(cls):\n if not _in_flask_context():\n return None\n return request.remote_addr", "def get_remote_host(request):\n \n return utilities.get_remote_host(request)", "def getLocalhostIP():\n return socket.getaddrinfo('localhost', 0)[0][4][0]", "def remote_addr(self):\r\n return self._environ.get('REMOTE_ADDR', '0.0.0.0')", "def checkIP(self):\n\t\tself.get(\"https://ifconfig.me/\")\n\t\treturn self.findId(\"ip_address\").text", "def get_remote_addr(self):\n connection = self._open_db()\n cursor = connection.cursor()\n cursor.execute('SELECT remote_addr FROM sessions WHERE id = ?;', \\\n (self.sid,))\n remote_addr = cursor.fetchone()\n cursor.close()\n connection.close()\n return remote_addr[0]", "def get_IP(): \n \n return socket.gethostbyname(socket.gethostname())", "def get_ip(self):", "def get_public_ip():\n public_ip = get('https://api.ipify.org').text\n return public_ip", "def publicIP(self):\n return self.query('https://plex.tv/:/ip')", "def remote_addr(self):\n return self._environ.get('REMOTE_ADDR', '0.0.0.0')", "def _current_ip_port(is_secure: bool, host: str, url: str) -> str:\n\n protocol = 'https://' if is_secure else 'http://'\n web_url = protocol + host\n return web_url + url", "def obtain_public_ip():\n from urllib2 import urlopen\n my_ip = urlopen('http://ip.42.pl/raw').read()\n logger.debug('The public ip is: %s' % my_ip)\n return str(my_ip)", "def known_ip(ip=DEFAULT_IP):\r\n tunnel(ip)", "def siteip(self) :\n\t\ttry :\n\t\t\treturn self._siteip\n\t\texcept Exception as e:\n\t\t\traise e", "def get_IP():\n\n return socket.gethostbyname(socket.gethostname())", "def get_global_ip() -> str:\n return urllib.request.urlopen(\"https://icanhazip.com\").read().decode().strip()", "def get_ip():\n if not request.headers.getlist(\"X-Forwarded-For\"):\n return str(request.remote_addr)\n else:\n return str(request.headers.getlist(\"X-Forwarded-For\")[0])", "def detect_ip_address():\n # Rather hackish way to get the local ip-address, recipy from\n # https://stackoverflow.com/a/166589\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect((\"8.8.8.8\", 80))\n ip_address = s.getsockname()[0]\n s.close()\n return ip_address" ]
[ "0.7072389", "0.6898136", "0.6809364", "0.67104733", "0.65298426", "0.6514117", "0.6488618", "0.6484981", "0.6445124", "0.6399658", "0.63811284", "0.6367772", "0.6339807", "0.6320289", "0.6307788", "0.62999815", "0.62957853", "0.6291374", "0.6265773", "0.62566173", "0.6234748", "0.6233168", "0.622992", "0.6197269", "0.61666006", "0.6164911", "0.6157789", "0.61372894", "0.6125836", "0.6125451" ]
0.8518679
0
Method that displays the original and blurred images
def displayImages(self): plt.figure(figsize=(8,6)) plt.subplot(1,2,1) plt.imshow( self.original_image, cmap="gray") plt.title("Original Image") plt.subplot(1,2,2) plt.imshow( self.blurred_image, cmap="gray") plt.title("Blurred Image")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show_image(self):\n cv2.imshow('Image', self.__diff_image())\n cv2.waitKey()", "def blurImage(self):\n\n print (\"--Blurring Main Image--\")\n self.blurButton.setDown(True)\n im = Image.open(self.ActivePhoto)\n blurred_image = im.filter(ImageFilter.GaussianBlur(1))\n blurred_image.save(self.ActivePhoto)\n self.photo.setPixmap(QtGui.QPixmap(self.ActivePhoto))", "def plotFace(original,blurred):\n plt.subplot(121),plt.imshow(original,cmap=cm.Greys_r),plt.title('Original')\n plt.xticks([]), plt.yticks([])\n plt.subplot(122),plt.imshow(blurred,cmap=cm.Greys_r),plt.title('Gaussian Blurred')\n plt.xticks([]), plt.yticks([])\n return None", "def display(self):\n nrow = 2\n ncol = len(self.views) + 1\n rows = [(self.views[0].original, len(self.views)),\n (self.views[0].image, len(self.views) + 1)]\n fig, axes = plt.subplots(nrows=nrow, ncols=ncol,\n figsize=self._figsize(rows),\n squeeze=True)\n originals = [(v.position.id, v.original) for v in self.views] + [\n ('combined', np.median(np.stack([v.original for v in self.views]), axis=0))]\n warped = [(v.position.id, v.image) for v in self.views] + [\n ('combined', self.image)]\n for ax, (title, img) in zip(axes.ravel(), originals + warped):\n ax.imshow(img)\n ax.axis('off')\n ax.xaxis.set_visible(False)\n ax.yaxis.set_visible(False)\n ax.set(title=title)\n fig.tight_layout()\n fig.canvas.draw()\n img_array = np.array(fig.canvas.renderer._renderer)\n plt.close('all')\n return img_array", "def show_normal(self):\n if self.image is not None:\n self.show_image.show_original_image(\n self.image, self.width_original_image)\n self.show_image.show_result_image(\n self.image, self.width_result_image, self.angle)\n self.normal_mode = True\n self.ui.frame_navigator.hide()\n self.ui.frame_panorama.hide()", "def main():\n original = SimpleImage('images/mt-rainier.jpg')\n original.show()\n reflected = make_reflected('images/mt-rainier.jpg')\n reflected.show()", "def Show(orignal_img, sobel_image):\n # use imshow() function to show the images\n # syntax : cv2.imshow(winname, mat)\n cv2.imshow(\"Original_Image\", orignal_img)\n cv2.imshow(\"Sobel_Image\", sobel_image)\n cv2.waitKey(0)\n cv2.destroyAllWindows()", "def display(self):\n display(self.image)", "def visualize(original, s, m, l, s_pred, m_pred, l_pred):\n\tfig = plt.figure(figsize=(20, 10))\n\tplt.subplot(1,7,1)\n\tplt.title('Original image')\n\tplt.imshow(original)\n\n\tplt.subplot(1,7,2)\n\tplt.title('S image')\n\tplt.imshow(s)\n\tplt.subplot(1,7,3)\n\tplt.title('S Pred image')\n\tplt.imshow(s_pred)\n\n\tplt.subplot(1,7,4)\n\tplt.title('M image')\n\tplt.imshow(m)\n\tplt.subplot(1,7,5)\n\tplt.title('M Pred image')\n\tplt.imshow(m_pred)\n\n\tplt.subplot(1,7,6)\n\tplt.title('L image')\n\tplt.imshow(l)\n\tplt.subplot(1,7,7)\n\tplt.title('L Pred image')\n\tplt.imshow(l_pred)", "def blur_slide(self):\r\n std_input = self.horizontal.get() # Get the user STD input\r\n img = self.master.images[-1] # Select the displayed image for transformation\r\n blurred_image = blur.gaussian_blur(img, std_input) # Perform gaussian blurring on the input image\r\n self.master.display_image.display_image(img=blurred_image) # display the blurred image\r\n self.master.images.append(blurred_image) # Append the blurred image to the stack\r", "def show_to_window(self):\n if self.normal_mode:\n self.show_image.show_original_image(\n self.image, self.width_original_image)\n self.show_image.show_result_image(\n self.image, self.width_result_image, self.angle)\n\n else:\n if self.panorama_mode:\n image = draw_polygon(\n self.image.copy(),\n self.mapX_pano,\n self.mapY_pano)\n mapX = np.load(\n './plugins/Thread_inspection/view_image/maps_pano/mapX.npy')\n mapY = np.load(\n './plugins/Thread_inspection/view_image/maps_pano/mapY.npy')\n rho = self.panorama.rho\n\n self.result_image = cv2.remap(\n self.image,\n mapX,\n mapY,\n cv2.INTER_CUBIC)\n self.result_image = self.result_image[round(\n rho + round(self.moildev.getRhoFromAlpha(30))):self.h, 0:self.w]\n # print(self.width_result_image)\n else:\n image = draw_polygon(self.image.copy(), self.mapX, self.mapY)\n self.result_image = cv2.remap(\n self.image,\n self.mapX,\n self.mapY,\n cv2.INTER_CUBIC)\n self.show_image.show_original_image(\n image, self.width_original_image)\n self.show_image.show_result_image(\n self.result_image, self.width_result_image, self.angle)", "def use_effect(self):\n if self.preview_name in FILTERS:\n photo = Image.open(self.path.url[1:])\n preview = photo.filter(FILTERS.get(self.preview_name))\n preview.save(self.path.url[1:])", "def display(self):\n\t\tself.imgDisplay.set_from_pixbuf(self.getVisible())\n\t\tgc.collect()", "def show_original(self):\n #check if it is in color or grayscale\n if self.scaled.shape[-1] == 3:\n plt.imshow(self.scaled)\n plt.axis(\"off\")\n plt.show()\n else:\n plt.imshow(self.scaled, cmap=\"gray\")\n plt.axis(\"off\")\n plt.show()", "def process_base_image(img, kernel_size, show_image=False):\n processed_image = img.copy()\n processed_image = cv2.cvtColor(processed_image, cv2.COLOR_BGR2GRAY)\n processed_image = cv2.GaussianBlur(processed_image, kernel_size, 0)\n if show_image:\n display_img(processed_image, 'Gray Scale Image')\n return processed_image", "def display(self):\n nrow = 1\n ncol = len(self.views) + 1\n rows = [(self.views[0].image, len(self.views) + 1)]\n fig, axes = plt.subplots(nrows=nrow, ncols=ncol,\n figsize=self._figsize(rows),\n squeeze=True)\n for ax, (title, img) in zip(axes.ravel(),\n [(v.position.id, v.image) for v in self.views] + [\n ('combined', self.image)]):\n ax.imshow(img)\n ax.axis('off')\n ax.xaxis.set_visible(False)\n ax.yaxis.set_visible(False)\n ax.set(title=title)\n fig.tight_layout()\n fig.canvas.draw()\n img_array = np.array(fig.canvas.renderer._renderer)\n plt.close('all')\n return img_array", "def display(self, raw_img=True):\n cv2.imshow('mask', self.__mask)\n if raw_img:\n cv2.imshow('raw image', self.__img)", "def show(image, label, weights, prediction, ax):\n global img_objects\n if len(img_objects)==0:\n for i in range(10):\n _img = ax[0, i].imshow(weights[i].reshape(28,28), cmap='gray')\n img_objects.append(_img)\n _img = ax[1, 5].imshow(image.reshape(28,28), cmap='gray')\n img_objects.append(_img)\n else:\n for i in range(10):\n img_objects[i].set_data(weights[i].reshape(28,28))\n img_objects[i].set_clim(vmin=0, vmax=np.max(weights[i]))\n img_objects[10].set_data(image.reshape(28,28))\n ax[0,5].set_title('truth: %d, predict: %d'%(np.argmax(label), prediction))", "def view(self):\n\t\tfigure_out = self.figure.copy()\n\t\timage_pairs = np.unique(self.local_matches[\"image_pairs\"][0])\n\t\tfor i in image_pairs:\n\t\t\t# draw bounding box\n\t\t\ti_loc = self.local_database[\"image_locs\"][np.where(self.local_database[\"image_idx\"] == i)[0][0]]\n\t\t\tcv2.rectangle(figure_out, (int(i_loc[0]), int(i_loc[1])), (int(i_loc[0]+i_loc[2]), int(i_loc[1]+i_loc[3])),\n\t\t\t\t\t\t color = (255,0,0), thickness=5)\n\t\t\t# label matches text\n\t\t\tcv2.putText(figure_out, str(i), (int(i_loc[0]-50), int(i_loc[1] + 50)), cv2.FONT_HERSHEY_SIMPLEX, 2,\n\t\t\t\t\t color=(255,0,0), thickness=7)\n\t\tself.save_figure(figure_out)", "def display(self, image):\n raise NotImplementedError()", "def show(self) -> None:\n cv.imshow(str(self.__class__), self.output_image)", "def show(self, name='Detections'):\n cv2.imshow(name, self.get_image())\n cv2.waitKey(0)\n cv2.destroyAllWindows()", "def show_image(self, idx):\n image, target = self.__getitem__(self, idx)\n im_h, im_w, _ = image.size()\n labels_num = target['labels']\n rescale = torch.tensor([[im_w, im_h, im_w, im_h]])\n bboxs = target['boxes'] * rescale\n img = image.permute(1, 2, 0).numpy()\n for i, bboxe in enumerate(bboxs):\n x, y, xm, ym = bboxe\n label = class_name[int(labels_num[i])]\n plot_one_box((int(x), int(y), int(xm), int(ym)), img, label=label, line_thickness=3)\n cv2.imshow('image', img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()", "def _state_main(self, gui):\n gui.entry.wait_variable(gui.entry_sv)\n\n '''Clean string'''\n files = literal_eval(gui.entry_sv.get())\n\n '''Remove previous images'''\n if hasattr(gui, \"panel\"):\n gui.panel.destroy()\n\n '''Load each image'''\n for file_name in files:\n file_name = file_name.replace(\"{\", \"\").replace(\"}\", \"\")\n # image = tk.PhotoImage(file=file_name)\n if \".CR2\" in file_name:\n '''Rawpy implementation'''\n file_image = rawpy.imread(file_name)\n file_image = file_image.postprocess()\n '''Rawkit implementation'''\n '''file_image = Raw(file_name)\n file_image = np.array(file_image.to_buffer())'''\n '''OpenCV implementation'''\n '''file_image = cv2.imread(file_name)'''\n else:\n file_image = Image.open(file_name)\n '''image = file_image.resize((500, 500), Image.ANTIALIAS)\n image = ImageTk.PhotoImage(image)\n gui.panel = tk.Label(gui.root, image=image)\n gui.panel.image = image\n gui.panel.pack()'''\n # panel.grid(row=2)\n\n image_data = np.array(file_image)\n image_data = cv2.cvtColor(image_data, cv2.COLOR_RGB2GRAY)\n '''print(image_data.shape)\n print(image_data)\n print(len(image_data))\n print(len(image_data[0]))'''\n returned_image = Image.fromarray(image_data)\n '''cv2.imshow(\"Gray\", image_data)\n cv2.waitKey()\n cv2.destroyWindow(\"Gray\")'''\n\n '''enhanced_contrast = ImageEnhance.Contrast(Image.fromarray(file_image))\n enhanced_image = enhanced_contrast.enhance(255)\n enhanced_data = np.array(enhanced_image)\n plot_functions.imshow(enhanced_image)\n plot_functions.show()'''\n\n # color_space = cv2.cvtColor(image_data, cv2.COLOR_RGB2HSV)\n # print(color_space)\n \n '''Create mask for white-ish pixels'''\n '''lower_background = np.array([150, 150, 150])\n upper_background = np.array([255, 255, 255])\n print(image_data)\n white_mask = cv2.inRange(image_data, lower_background, upper_background)\n white_mask = cv2.morphologyEx(white_mask, cv2.MORPH_OPEN, np.ones((3,3),np.uint8))\n white_mask = cv2.morphologyEx(white_mask, cv2.MORPH_DILATE, np.ones((3, 3), np.uint8))\n white_mask = white_mask / 255'''\n\n '''Create mask for black-ish pixels'''\n '''lower_background = np.array([0, 0, 0])\n upper_background = np.array([25, 25, 25])\n black_mask = cv2.inRange(image_data, lower_background, upper_background)\n black_mask = cv2.morphologyEx(black_mask, cv2.MORPH_OPEN, np.ones((3, 3), np.uint8))\n black_mask = cv2.morphologyEx(black_mask, cv2.MORPH_DILATE, np.ones((3, 3), np.uint8))\n black_mask = black_mask / 255'''\n\n '''Add masks together'''\n '''background_mask = white_mask\n # Ensure no value is above 1\n background_mask = np.clip(background_mask, 0, 1)'''\n \n copied_image_data = np.asarray(returned_image).copy()\n # background_mask = np.logical_not(background_mask)\n '''for row_index, [mask_row, image_row] in enumerate(zip(background_mask, copied_image_data)):\n # place black pixel on corresponding masked pixels\n # copied_image_data[row_index] = np.array([image_row[pixel] * int(mask_row[pixel]) for pixel in range(len(mask_row))])\n # make pixel fully white on corresponding masked pixels\n copied_image_data[row_index] = np.array([np.array([255, 255, 255]) if int(mask_row[pixel]) else image_row[pixel] for pixel in range(len(mask_row))])'''\n\n '''Turn removed pixels red'''\n '''mask_image = Image.fromarray(copied_image_data)\n plot_functions.imshow(mask_image)\n plot_functions.show()'''\n trapezoid_data = copied_image_data.copy()\n\n enhanced_contrast = ImageEnhance.Contrast(Image.fromarray(trapezoid_data))\n enhanced_image = enhanced_contrast.enhance(255)\n trapezoid_data = np.array(enhanced_image)\n\n '''Detect lines'''\n edges = cv2.Canny(trapezoid_data, 75, 150)\n lines = cv2.HoughLinesP(edges, 1, np.pi / 180, 100, maxLineGap=1000)\n # print(lines)\n for line in lines:\n x1, y1, x2, y2 = line[0]\n if y1 == y2:\n cv2.line(copied_image_data, (x1, y1), (x2, y2), (255, 255, 255), 1)\n\n '''Trapezoid attempt'''\n\n # filters image bilaterally and displays it\n bilatImg = cv2.bilateralFilter(trapezoid_data, 5, 175, 175)\n\n # finds edges of bilaterally filtered image and displays it\n edgeImg = cv2.Canny(bilatImg, 75, 200)\n\n # gets contours (outlines) for shapes and sorts from largest area to smallest area\n contours, hierarchy = cv2.findContours(edgeImg, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n contours = sorted(contours, key=cv2.contourArea, reverse=True)\n\n # drawing red contours on the image\n for con in contours:\n cv2.drawContours(trapezoid_data, con, -1, (255, 255, 255), 3)\n\n '''Detect corners'''\n dst = cv2.cornerHarris(edges, 30, 31, 0.001)\n dst = cv2.dilate(dst, None)\n ret, dst = cv2.threshold(dst, 0.01 * dst.max(), 255, 0)\n dst = np.uint8(dst)\n\n # find centroids\n ret, labels, stats, centroids = cv2.connectedComponentsWithStats(dst)\n # define the criteria to stop and refine the corners\n criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100,\n 0.001)\n corners = cv2.cornerSubPix(edges, np.float32(centroids), (5, 5),\n (-1, -1), criteria)\n\n good_corners = []\n for corner in corners:\n if (corner[1] < 1000) & (corner[1] > 650) & (corner[0] > 250) & (corner[0] < 2250):\n good_corners.append(corner)\n cv2.circle(edges, (corner[0], corner[1]), 10, (255, 255, 255))\n\n print(good_corners)\n if len(good_corners) >= 3:\n corner_combos = itertools.combinations(good_corners, 3)\n elif len(good_corners) > 1:\n corner_combos = itertools.combinations(good_corners, 2)\n\n best_corner_combo = None\n best_coef = np.inf\n for corner_combo in corner_combos:\n regression = LinearRegression().fit(np.array([corner[0] for corner in corner_combo]).reshape(-1, 1),\n np.array([corner[1] for corner in corner_combo]))\n if np.abs(regression.coef_) < best_coef:\n best_coef = np.abs(regression.coef_)\n best_corner_combo = np.array([corner[1] for corner in corner_combo])\n\n y_edge = int(round(np.mean(best_corner_combo)))\n edges = edges[y_edge:3000, 200:2200]\n copied_image_data = copied_image_data[y_edge:2500, 200:2200]\n trapezoid_data = trapezoid_data[y_edge:2500, 200:2200]\n\n # and double-checking the outcome\n cv2.imshow(\"linesEdges\", edges)\n cv2.imshow(\"linesDetected\", copied_image_data)\n cv2.imshow(\"Contours check\", trapezoid_data)\n cv2.waitKey()\n cv2.destroyWindow(\"Contours check\")\n\n # find the perimeter of the first closed contour\n perim = cv2.arcLength(contours[0], True)\n # setting the precision\n epsilon = 0.02 * perim\n # approximating the contour with a polygon\n approxCorners = cv2.approxPolyDP(contours[0], epsilon, True)\n # check how many vertices has the approximate polygon\n approxCornersNumber = len(approxCorners)\n\n for corners in approxCorners:\n cv2.circle(trapezoid_data, (corners[0], corners[1]), radius=10, color=(255, 255, 255), thickness=-1)\n cv2.imshow(\"Vertex position\", trapezoid_data)\n cv2.waitKey()\n cv2.destroyWindow(\"Vertex position\")\n cv2.imshow(\"linesEdges\", edges)\n cv2.imshow(\"linesDetected\", copied_image_data)\n cv2.waitKey(0)\n cv2.destroyAllWindows()", "def display_preprocessed(env,frame):\n env.reset()\n\n #Plot the figure\n plt.figure()\n\n #Show the pre processed frame\n plt.imshow(preprocess_frame(env.reset(), (0, 0, 0, 0), 84), cmap=\"gray\")\n\n #Add title\n plt.title('Pre Processed image')\n\n #Show the plot\n plt.show()", "def show_image(im, rescale=False) :\r\n \r\n plt.figure()\r\n im = im.copy()\r\n im.resize(*LFW_IMAGESIZE)\r\n if rescale :\r\n plt.imshow(im.astype(float), cmap=plt.cm.get_cmap(\"gray\"))\r\n else :\r\n plt.imshow(im.astype(float), cmap=plt.cm.get_cmap(\"gray\"), vmin=0, vmax=255)\r\n plt.axis('off')\r\n plt.show()", "def display_images():\n vc = cv2.VideoCapture(0) # Open webcam\n figure, ax = plt.subplots(1, 2, figsize=(10, 5)) # Intiialise plot\n\n count = 0 # Counter for number of aquired frames\n intensity = [] # Append intensity across time\n\n # For loop over generator here\n intensity.append(imageintensity)\n plot_image_and_brightness() # Call plot function\n count += 1\n\n # This triggers exit sequences when user presses q\n if cv2.waitKey(1) & 0xFF == ord('q'):\n # Clean up here\n plt.close('all') # close plots\n generator.close() # Use generator exit for clean up,\n break # break loop", "def show_img(self):\n if self.image is not None:\n cv2.imshow(self.image_window, self.image)\n cv2.waitKey(1)\n else:\n rospy.loginfo(\"No image to show yet\")", "def show(self):\n if self.video:\n self.video.write(self.img)\n cv2.imshow('Simpy', self.img)\n cv2.waitKey(1000 // self.fps)", "def show(self):\n\n self.image.show()" ]
[ "0.65680623", "0.656295", "0.6562389", "0.64684945", "0.64078856", "0.6385656", "0.63168657", "0.6307291", "0.62678707", "0.6236219", "0.6151714", "0.60914916", "0.6090772", "0.608311", "0.6025253", "0.5972603", "0.59622127", "0.5956339", "0.5948988", "0.59457", "0.59445393", "0.5940314", "0.59290123", "0.59140533", "0.59096897", "0.5881388", "0.5867518", "0.58665603", "0.5865961", "0.5865627" ]
0.82821536
0
paste a file or directory that has been previously copied
def paste(location): copyData = settings.getDataFile() if not location: location = "." try: data = pickle.load(open(copyData, "rb")) speech.speak("Pasting " + data["copyLocation"] + " to current directory.") except: speech.fail("It doesn't look like you've copied anything yet.") speech.fail("Type 'hallie copy <file>' to copy a file or folder.") return process, error = subprocess.Popen(["cp", "-r", data["copyLocation"], location], stderr=subprocess.STDOUT, stdout=subprocess.PIPE).communicate() if "denied" in process: speech.fail("Unable to paste your file successfully. This is most likely due to a permission issue. You can try to run me as sudo!")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _pasteFile(self) -> None:\n if not self._fileClipboard:\n return\n cut = self._fileClipboard.pop()\n filenames = [x.name for x in self._fileClipboard]\n destPaths = [self._currPath.joinpath(x) for x in filenames]\n try:\n duplicates = []\n for src, dest in zip(self._fileClipboard, destPaths):\n if src == dest:\n raise shutil.SameFileError\n if dest in self._currPath.glob('*'):\n duplicates.append(dest)\n if duplicates:\n if self._overwriteFileMsgBox(duplicates) == QMessageBox.Cancel:\n self._fileClipboard.clear()\n self._pasteFileAction.setEnabled(False)\n return\n for src, dest in zip(self._fileClipboard, destPaths):\n if cut and src.is_file():\n shutil.move(str(src), str(dest))\n elif src.is_dir():\n dir_util.copy_tree(str(src), str(dest))\n if cut:\n shutil.rmtree(src)\n elif src.is_file():\n shutil.copy(str(src), str(dest))\n elif not src.exists():\n raise FileNotFoundError\n self._statusBar.showMessage('File pasted!', 3000)\n self._fileClipboard.clear()\n self._pasteFileAction.setEnabled(False)\n except shutil.SameFileError:\n self._statusBar.showMessage('You cannot overwrite the same file!', 3000)\n self._fileClipboard.clear()\n except PermissionError:\n self._statusBar.showMessage('No permission to copy the file!', 3000)\n self._fileClipboard.clear()\n except FileNotFoundError:\n self._statusBar.showMessage('Cannot find the source file!', 3000)\n self._fileClipboard.clear()\n finally:\n self._listDirectories()", "def copy_paste(src_path, dst_path):\n shutil.copy2(src_path, dst_path)\n\n return True", "def copy(self, src_path: str, tgt_path: str) -> None:", "def _copy_file ( self, source, dest ):\n return", "def copy(location):\n\tcopyData = settings.getDataFile()\n\tcopyFileLocation = os.path.abspath(location)\n\tcopy = {\"copyLocation\": copyFileLocation}\n\tdataFile = open(copyData, \"wb\")\n\tpickle.dump(copy, dataFile)\n\tspeech.speak(location + \" copied successfully!\")\n\tspeech.speak(\"Tip: use 'hallie paste' to paste this file.\")", "def doTheCopy(argpath,argdest):\n print(\"To copy:\"+argpath)\n shutil.copy(argpath,argdest)", "def cut_paste(src_path, dst_path):\n shutil.move(src_path, dst_path)\n return True", "def from_clipboard(self):\n for url in QApplication.clipboard().mimeData().urls():\n src = url.path()\n dst = os.path.join(self.current_location(), os.path.basename(src))\n try:\n if os.path.islink(src) or os.path.isfile(src):\n copyfile(src, dst, overwrite=False)\n elif os.path.isdir(src):\n copytree(src, dst, overwrite=False)\n except:\n QMessageBox.critical(self, 'Error copying file/dir', traceback.format_exc())", "def copy(source, target):\n\tshutil.copy(source, target)", "def copy(self, source, target, recursive=True):\n if recursive:\n command = 'cp -R %s %s'\n else:\n command = 'cp %s %s'\n self.communicate(command % (source, target))", "def copy(to_end=False):\n # Find a way to generalize this for different systems\n if to_end:\n with open('/Users/john/Terminal Saved Output', 'r') as f:\n output = f.read().replace('bpython', 'Python')\n code = output.split('\\nPython')[-1]\n else:\n code = pyperclip.paste()\n pyperclip.copy(parse_code(code))\n return None", "def cp(self, src, dest):\r\n return self._call(\"-cp\", src, dest, suppress_output=True)", "def copy(self):\n\n if self.path_source is not None:\n full_source_path = os.path.join(\n os.path.expandvars(self.path_source), self.name\n )\n\n if self.sudo:\n spawn.process(\n f'cp -v -- \"{full_source_path}\" \"{self.path_destination}\"',\n sudo=True,\n )\n else:\n message.info(\n f\"Copied: '{full_source_path}' --> '{self.path_destination}'\"\n )\n shutil.copy(full_source_path, self.path_destination)\n else:\n message.error(f\"'{self.name}' has no source from which to copy from.\")", "def make_wb_copy():\r\n shutil.copy(full_target_file_name, path_name + copied_file_name) # copy the file\r", "def copy(self, src, dest):\n\n src = os.path.join(os.path.dirname(__file__), \"collections\", \"kitchensink\", src)\n dest = os.path.join(self.checkout, dest)\n if os.path.isdir(src):\n shutil.copytree(src, dest)\n else:\n shutil.copy(src, dest)\n return dest", "def copy_file( filename , destination , display = False ):\n if display: # optional\n if os.path.isdir( destination ):\n print 'placing a copy of ' + os.path.relpath( filename ) + ' into the ' + os.path.relpath( destination ) + ' directory'\n elif os.path.isfile( destination ):\n print 'copying ' + os.path.relpath( filename ) + ' to ' + os.path.relpath( destination )\n shutil.copy( filename , destination )", "def put(self, src, dst):\r\n abs_src = os.path.expanduser(src)\r\n assert os.path.exists(abs_src), 'File does not exist, cannot copy: %s' % abs_src\r\n return self._do_put(abs_src, dst)", "def write_to_paste_buffer(txt):\n pyperclip.copy(txt)", "def _copy_file(src, dest):\n\n if src is None or dest is None:\n raise ValueError(\"src and dest must not be None\", src, dest)\n\n if not os.path.isfile(src):\n raise ValueError(\"src file does not appear to exist\", src)\n\n # if error on copy, subprocess will raise CalledProcessError\n try:\n subprocess.run(\n [\"/usr/bin/ditto\", src, dest], check=True, stderr=subprocess.PIPE\n )\n except subprocess.CalledProcessError as e:\n logging.critical(\n f\"ditto returned error: {e.returncode} {e.stderr.decode(sys.getfilesystemencoding()).rstrip()}\"\n )\n raise e", "def cp(src, dest):\n _shutil.copy2(native(src), native(dest))", "def copy(self, path):\n shutil.copy(self.path, path)", "def copydir(self):\n pass", "def copy_file(filename, dst):\n # Create dir if needed\n dir_path = os.path.dirname(os.path.expanduser(dst))\n if not os.path.isdir(dir_path):\n os.makedirs(dir_path)\n\n src = os.path.join(get_data(''), filename)\n dst = os.path.expanduser(dir_path)\n shutil.copy2(src, dst)", "def copy_fixture(src: Path, dest: Path) -> Path:\n return shutil.copy(src.absolute(), dest.absolute())", "def copyTwr(self):\n # this is executing during write_input, so curdir is run_dir\n shutil.copyfile(os.path.join(self.fst_dir,self.twr_file), self.twr_file)", "def copy(self, source_host, dest_host, filename):", "def copyfile(self, source, outputfile):\n shutil.copyfileobj(source, outputfile)", "def copyfile(self, source, outputfile):\n shutil.copyfileobj(source, outputfile)", "def copyfile(self, source, outputfile):\n shutil.copyfileobj(source, outputfile)", "def paste(self, text):\n if self.file is None:\n return self.paste_to_stdout(text)\n return self.paste_to_file(text)" ]
[ "0.75675744", "0.72383505", "0.69673383", "0.69241136", "0.6830915", "0.6801877", "0.67643505", "0.66626996", "0.6652511", "0.65969956", "0.6596979", "0.64582324", "0.64567304", "0.6437227", "0.6432736", "0.6422666", "0.6394669", "0.6371352", "0.6369552", "0.6356414", "0.63398904", "0.6338136", "0.6322603", "0.6307129", "0.62965447", "0.6287604", "0.6260478", "0.6260478", "0.6260478", "0.62596005" ]
0.7870862
0
Display list of bookmarks for any given user
def user_list(request, user_name): bookmarks = get_list_or_404(Bookmark.objects.all().filter(human__username=user_name)) return render(request, 'urly_bird/any_user_list.html', {'bookmarks': bookmarks})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_bookmark(request):\r\n \r\n if request.method == 'GET':\r\n if request.GET.get('path'):\r\n object_list = BookmarkItem.objects.filter(bookmark__user=request.user).order_by('order')\r\n #print urllib.unquote(request.GET.get('path'))\r\n try:\r\n bookmark = Bookmark.objects.get(user=request.user)\r\n except Bookmark.DoesNotExist:\r\n bookmark = Bookmark(user=request.user)\r\n bookmark.save()\r\n try:\r\n BookmarkItem.objects.get(bookmark__user=request.user, link=urllib.unquote(request.GET.get('path')))\r\n is_bookmark = True\r\n except BookmarkItem.DoesNotExist:\r\n is_bookmark = False\r\n else:\r\n object_list = \"\"\r\n is_bookmark = \"\"\r\n else:\r\n object_list = \"\"\r\n is_bookmark = \"\"\r\n \r\n return render_to_response('admin/includes_grappelli/bookmarks.html', {\r\n 'object_list': object_list,\r\n 'bookmark': bookmark,\r\n 'is_bookmark': is_bookmark,\r\n 'admin_title': ADMIN_TITLE,\r\n 'path': request.GET.get('path', ''),\r\n })", "def get_all_bookmark(request):\n bookmarks = Bookmarked.objects.filter(user=request.username)\n return get_bookmark_list(bookmarks)", "def get_friend_bookmarks(user_id, item_id):\n friends = get_friends(user_id)\n bookmarks = Bookmark.objects.select_related('user', 'userprofile') \\\n .filter(user__in=friends, item=item_id) \\\n .order_by('-created_at')\n\n response = [{\n 'user_name': bookmark.user.get_full_name(),\n 'user_url': reverse('user-profile', args=[bookmark.user_id]),\n 'user_thumbnail': bookmark.user.userprofile.thumbnail.url\n } for bookmark in bookmarks]\n\n return simplejson.dumps(response)", "def _display_bookmarks(self):\n if self.check_valid_result_data(\"bookmarks\", silent=True):\n display(self._last_result.bookmarks)\n else:\n nb_markdown(f\"No Bookmarks related to {self.url}\")", "def mybookmarks(self, request, pk=None):\n user = request.auth.user\n events = user.bookmarks\n try:\n serializer = EventSerializer(\n events, many=True, context={'request': request})\n return Response(serializer.data)\n except Exception as ex:\n return Response({'message': ex.args[0]})", "def preview(request):\n ctx = {}\n \n ctx[\"area\"] = \"bookmarks\"\n ctx[\"preview_theme\"] = request.GET.get(\"t\", \"light\")\n ctx[\"bookmarks\"] = Bookmark.by_user(request.user)[:5]\n \n return TemplateResponse(request, \"users/preview.html\", ctx)", "def list_bookmarks(self) -> pd.DataFrame:\n return self._list_items(item_type=\"bookmarks\") # type: ignore", "def getBookmarks(self, address: ghidra.program.model.address.Address) -> List[ghidra.program.model.listing.Bookmark]:\n ...", "def get_bookmarked_items(user_id):\n return list(Bookmark.objects.filter(user=user_id).values_list(\n 'item_id', flat=True))", "def profile(userid):\n # get bookmarked content\n bookmarks = db_bookmarks.find({\"user\": userid})\n bookmarks = [{\"search_term\": db_search_terms.find_one({\"_id\": b[\"search_id\"]}).get(\"value\"), \"source\": b[\"source\"], \"url\": b[\"url\"], \"date_saved\": b[\"date_saved\"].strftime(\"%H:%M %B %d, %Y\")} for b in bookmarks]\n # get previous searches\n searches = db_searches.find({\"user\": userid})\n searches = [{\"search_term\": db_search_terms.find_one({\"_id\": s[\"search_id\"]}).get(\"value\"), \"category\": s[\"category\"], \"datetime\": s[\"datetime\"].strftime(\"%H:%M %B %d, %Y\")} for s in searches]\n # get byod\n byod = db_byod.find({\"user\": userid})\n byod = [{\"content_type\": b[\"content_type\"], \"doc_name\": b[\"doc_name\"], \"text\": b[\"text\"], \"key_points\": b[\"key_points\"], \"datetime_uploaded\": b[\"datetime_uploaded\"].strftime(\"%H:%M %B %d, %Y\")} for b in byod]\n # get highlightds\n # highlights = db_highlights.find({\"user\": userid})\n # highlights = [{\"search_term\": db_search_terms.find_one({\"_id\": h[\"search_id\"]}).get(\"value\"), \"highlighted_word\": h[\"highlighted_word\"], \"results\": h[\"results\"], \"date_saved\": h[\"date_saved\"].strftime(\"%H:%M %B %d, %Y\")} for h in highlights]\n jsonob = jsonify(bookmarks=bookmarks,\n searches=searches,\n byod=byod)\n return jsonob", "def bookmarks(self):\r\n\r\n url = self._bookmarks_url.format(self.username, 1)\r\n soup = self.request(url)\r\n div = soup.find(\"div\", {\"class\": \"bookmarks-index dashboard filtered region\"})\r\n h2 = div.h2.text.split()\r\n return int(h2[4].replace(',', ''))", "def bmark_get(request):\r\n rdict = request.matchdict\r\n params = request.params\r\n hash_id = rdict.get('hash_id', None)\r\n username = rdict.get('username', None)\r\n title = params.get('description', None)\r\n url = params.get('url', None)\r\n if username:\r\n username = username.lower()\r\n\r\n # The hash id will always be there or the route won't match.\r\n bookmark = BmarkMgr.get_by_hash(hash_id, username=username)\r\n\r\n # tag_list is a set - no duplicates\r\n tag_list = set()\r\n\r\n if title or url:\r\n suggested_tags = suggest_tags(url)\r\n suggested_tags.update(suggest_tags(title))\r\n tag_list.update(suggested_tags)\r\n\r\n if bookmark is None:\r\n request.response.status_int = 404\r\n ret = {'error': \"Bookmark for hash id {0} not found\".format(hash_id)}\r\n # Pack the response with Suggested Tags.\r\n resp_tags = {'tag_suggestions': list(tag_list)}\r\n ret.update(resp_tags)\r\n return _api_response(request, ret)\r\n else:\r\n return_obj = dict(bookmark)\r\n return_obj['tags'] = [dict(tag[1]) for tag in bookmark.tags.items()]\r\n\r\n if 'with_content' in params and params['with_content'] != 'false':\r\n if bookmark.readable:\r\n return_obj['readable'] = dict(bookmark.readable)\r\n # Pack the response with Suggested Tags.\r\n ret = {\r\n 'bmark': return_obj,\r\n 'tag_suggestions': list(tag_list)\r\n }\r\n return _api_response(request, ret)", "def show_users():\r\n users = User.query.order_by(User.last_name,User.first_name).all()\r\n return render_template('list.html', users=users)", "def make_bookmark(user=None):\r\n bmark = Bmark(random_url(),\r\n username=u\"admin\",\r\n desc=random_string(),\r\n ext=random_string(),\r\n tags=u\"bookmarks\")\r\n\r\n if user:\r\n bmark.username = user.username\r\n bmark.user = user\r\n\r\n DBSession.add(bmark)\r\n DBSession.flush()\r\n return bmark", "def bookmark(user_id, item_id):\n Bookmark.objects.get_or_create(user=User.objects.get(pk=user_id),\n item=Item.objects.get(pk=item_id))", "def books_page(request):\r\n user_books = [] # Create an empty list for the users books\r\n user = request.user # Get the currently authenticated user\r\n\r\n for book in Books.objects.filter(user_id__pk=user.id):\r\n user_books.append(book)\r\n\r\n return render(request, 'ez_main/books_page.html', {'user_books': user_books})", "def bmark_list(request):\r\n # Removed because view was deprecated\r\n return bmarks.recent(request)", "def __showBookmarksMenu(self):\n self.bookmarksMenu.clear()\n \n filenames = self.getOpenFilenames()\n for filename in sorted(filenames):\n editor = self.getOpenEditor(filename)\n for bookmark in editor.getBookmarks():\n bmSuffix = \" : {0:d}\".format(bookmark)\n act = self.bookmarksMenu.addAction(\n \"{0}{1}\".format(\n Utilities.compactPath(\n filename,\n self.ui.maxMenuFilePathLen - len(bmSuffix)),\n bmSuffix))\n act.setData([filename, bookmark])", "def get_bookmarks_given_folder_slug(user, slug):\n try:\n f = Folder.objects.get(user=user, slug=slug)\n except:\n f = None\n\n bookmarks = get_folder_results(folder=f)\n return bookmarks", "def user_show(ctx, args):\n for user_id in args:\n data = ctx.obj.get_user_by_username(user_id)\n output_json_data(data)", "def __showBookmarkMenu(self):\n bookmarksFound = 0\n filenames = self.getOpenFilenames()\n for filename in filenames:\n editor = self.getOpenEditor(filename)\n bookmarksFound = len(editor.getBookmarks()) > 0\n if bookmarksFound:\n self.menuBookmarksAct.setEnabled(True)\n return\n self.menuBookmarksAct.setEnabled(False)", "def get_bookmarks(self, snatched=False):\n # TODO: Implement this properly\n # Idea:\n # - Get first page of bookmarks\n # - Determine number of last page ('_get_max_pagenum')\n # - Go through all bookmarks pages, parsing them for\n # KGItems ('_parse_result_page')\n #start_page = self._build_tree(\n # self._session.get(KG_URL + BOOKMARKS_SCRIPT,\n # params={'page':0}).content)\n raise NotImplementedError", "def get(self, user):\n search = True if self.request.args.get('q') else False\n limit = int(self.request.args.get('limit')) if self.request.args.get('limit') else 20\n page = int(self.request.args.get('page')) if self.request.args.get('page') else 1\n bucketlists = user.bucketlists.paginate(page, limit, True).items\n bucketlists = user.bucketlists.filter(Bucketlist.name.contains(self.request.args.get('q'))) if self.request.args.get('q') else bucketlists\n\n bucketlists = [\n {'id': bucketlist.id,\n 'name': bucketlist.name,\n 'items': [\n {'id': item.id,\n 'name': item.description,\n 'date_created': str(item.date_created),\n 'date_modified': str(item.date_modified),\n 'done': str(item.is_done)\n } for item in bucketlist.items\n ],\n 'date_created': str(bucketlist.date_created),\n 'date_modified': str(bucketlist.date_modified),\n 'created_by': bucketlist.created_by\n } for bucketlist in bucketlists\n ]\n\n # if empty retutn no bucketlists added\n if not bucketlists:\n return \"You have no avialable bucketlists\", 200\n\n return bucketlists, 200", "def list_users():\n\n db_users = User.query.all()\n\n return render_template(\"list_users.html\", headline=\"Blogly Users\", users=db_users)", "def snippets_by_author(request, username):\n user = get_object_or_404(User, username__exact=username)\n return list_detail.object_list(request,\n queryset=Snippet.objects.get_by_author(user.username),\n extra_context={ 'object': user },\n template_name='cab/user_detail.html',\n **base_generic_dict)", "def count_user_bookmarks(username):\r\n total = BmarkMgr.count(username)\r\n stat = StatBookmark(\r\n attrib=USER_CT.format(username),\r\n data=total\r\n )\r\n DBSession.add(stat)", "def users_view():\n data = get_data()\n return [{'user_id': i, 'name': 'User {0}'.format(str(i))}\n for i in data.keys()]", "def _add_bookmark(self, user=None):\r\n if user:\r\n DBSession.add(user)\r\n username = user.username\r\n else:\r\n username = u'admin'\r\n\r\n b = Bmark(\r\n url=gen_random_word(12),\r\n username=username,\r\n tags=gen_random_word(4),\r\n )\r\n\r\n b.clicks = randint(0, MAX_CLICKS)\r\n b.hash_id = gen_random_word(5)\r\n\r\n DBSession.add(b)\r\n DBSession.flush()\r\n b.hashed.clicks = b.clicks\r\n DBSession.flush()\r\n transaction.commit()", "def show_feed():\n user_id = session.get('user_id')\n user = User.query.get(user_id)\n suggested_friends = get_suggested_friends(user_id)\n\n return render_template('feed.html', user=user, \n suggested_friends=suggested_friends)", "def handle_bookmark(user_id, project_id, status):\n user = user_collection.find_one({\"_id\": user_id})\n bookmark_list = user[\"bookmarks\"]\n if status:\n bookmark_list.append(project_id)\n else:\n bookmark_list.remove(project_id)\n user_collection.find_one_and_update(\n {\"_id\": user_id},\n {\n \"$set\": {\n \"bookmarks\": bookmark_list,\n }\n },\n upsert=False,\n )" ]
[ "0.71984094", "0.7148708", "0.6793257", "0.67187834", "0.6641313", "0.6623683", "0.633557", "0.6249334", "0.62378967", "0.6236893", "0.619135", "0.61078155", "0.6044722", "0.59958416", "0.5948521", "0.5945683", "0.5907882", "0.5876983", "0.5854694", "0.58490324", "0.58267343", "0.58072567", "0.5805562", "0.5796738", "0.57923144", "0.57739043", "0.57672673", "0.57638896", "0.57480115", "0.57447064" ]
0.7803272
0
Converts cones in the GUIs frame of reference to cones in the lidar's frame of reference and gets those in the lidar's field of view Sets detected_cones with only cones within the lidar's field of view. Sorts the cones by angle starting at 135 degrees.
def lidarScan(self): # Get cones seen by lidar lidar_coords = [] for point in self.gui_points: # Convert from gui frame to lidar frame x = (point[0] - self.lidar_pos[0])*scaling_factor y = (self.lidar_pos[1] - point[1])*scaling_factor # Convert points to polar form and filter dist = math.hypot(x,y) angle = math.degrees(math.atan2(x,y)) if dist <= LIDAR_RANGE and abs(angle) < LIDAR_FOV/2: lidar_coords.append(((float(x), float(y)), dist, angle, point)) # Sort cones by angle self.detected_cones = sorted(lidar_coords,key=itemgetter(2)) cones = [] for c in self.detected_cones: cones.append(c[0]) return cones
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rotationDetermination(self):\n \n for index, row in enumerate(self.magdata):\n if index > 11 and index < (len(self.magdata) - 12):\n br1 = [row[0] for row in self.magdata[(index-12):(index-2)]]\n bt1 = [row[1] for row in self.magdata[(index-12):(index-2)]]\n bn1 = [row[2] for row in self.magdata[(index-12):(index-2)]]\n b1 = np.matrix((np.mean(br1), np.mean(bt1), np.mean(bn1)))\n\n br2 = [row[0] for row in self.magdata[(index+2):(index+12)]]\n bt2 = [row[1] for row in self.magdata[(index+2):(index+12)]]\n bn2 = [row[2] for row in self.magdata[(index+2):(index+12)]]\n b2 = np.matrix((np.mean(br2), np.mean(bt2), np.mean(bn2)))\n\n theta = np.arccos(np.dot(b1,b2.T)/(np.linalg.norm(b1)*np.linalg.norm(b2)))*180/np.pi\n\n self.detections.rotations.append(theta[0,0])\n self.detections.rotationTimeTags.append(self.timestamps[index])\n \n\n## self.b1 = b1\n## self.b2 = b2\n self.detections.rotationBoundary=[]\n if len(self.detections.rotations) != 0:\n \n for index, theta in enumerate(self.detections.rotations):\n if index > 0:\n if theta > 30 and self.detections.rotations[index-1] < 30:\n self.detections.rotationBoundary.append(self.detections.rotationTimeTags[index])\n if index < len(self.detections.rotations)-1:\n if theta > 30 and self.detections.rotations[index+1] < 30:\n self.detections.rotationBoundary.append(self.detections.rotationTimeTags[index])", "def construction_sign_detection(img_in):\n\n thresh1 = 110\n thresh2 = 60\n cannyEdges = cv2.Canny(img_in, thresh1, thresh2)\n # cv2.imshow(\"test\", cannyEdges)\n\n lines = cv2.HoughLinesP(cannyEdges, rho=1, theta=np.pi /180, threshold=40, minLineLength=30, maxLineGap=2)\n\n Line_list = []\n Angle_45 = []\n Angle_M45 = []\n\n for line in lines:\n line = line.flatten()\n line_instance = Line(line)\n if line_instance.length < 500 and line_instance.angle != 0 and ConsSide(img_in,line_instance):\n Line_list.append(line_instance) \n # cv2.line(img_in,(line[0],line[1]), (line[2], line[3]),(255, 0, 0), 3)\n Angle_45.append(np.abs(line_instance.angle - 45))\n Angle_M45.append(np.abs(line_instance.angle + 45))\n\n if len(Angle_45) == 0:\n return None\n if len(Angle_M45) == 0:\n return None\n\n index = np.argsort(Angle_45)\n line1 = Line_list[index[0]]\n line2 = Line_list[index[1]]\n\n index = np.argsort(Angle_M45)\n line3 = Line_list[index[0]]\n line4 = Line_list[index[1]]\n\n column45 = int((line1.mid[0] + line2.mid[0])/2)\n row45 = int((line1.mid[1] + line2.mid[1])/2)\n\n columnM45 = int((line3.mid[0] + line4.mid[0])/2)\n rowM45 = int((line3.mid[1] + line4.mid[1])/2)\n\n # print(line1.line, line1.angle, line1.length)\n # print(line3.line, line3.angle, line3.length)\n # cv2.line(img_in,(line1.line[0],line1.line[1]), (line1.line[2], line1.line[3]),(255, 0, 0), 3)\n # cv2.line(img_in,(line2.line[0],line2.line[1]), (line2.line[2], line2.line[3]),(255, 0, 0), 3)\n # cv2.line(img_in,(line3.line[0],line3.line[1]), (line3.line[2], line3.line[3]),(255, 0, 0), 3)\n # cv2.line(img_in,(line4.line[0],line4.line[1]), (line4.line[2], line4.line[3]),(255, 0, 0), 3)\n\n column = (column45 + columnM45)//2 + 1\n row = (row45 + rowM45)//2 + 1\n coordinates = (column, row)\n\n # cv2.circle(img_in, coordinates, 2, (255, 0, 0), 2)\n return coordinates\n raise NotImplementedError", "def _sort_tags_left_to_right(self, detections, id=0):\n BLOCK_IN_CLAW_DIST = 0.22 # meters\n sorted_detections = []\n\n for detection in detections:\n if (detection.id == id and\n detection.pose.pose.position.z > BLOCK_IN_CLAW_DIST):\n sorted_detections.append(detection)\n\n return sorted(sorted_detections, key=lambda x: x.pose.pose.position.x)", "def avoid_duplicates_by_angle():\n right = np.transpose(vision.pqr_r)\n left = np.transpose(vision.pqr_l)\n \n #all_blobs = np.zeros((3,1))\n all_blobs = np.empty((3,0))\n all_angles = np.empty(0)\n\n for r in right:\n angle = np.arctan2(r[1], r[0]) * 180 / pi\n if angle > -5:\n all_blobs = np.append(all_blobs, [[r[0]], [r[1]], [r[2]]], axis=1)\n all_angles = np.append(all_angles, angle)\n\n for l in left:\n angle = np.arctan2(l[1], l[0]) * 180 / pi\n if angle < 5:\n all_blobs = np.append(all_blobs, [[l[0]], [l[1]], [l[2]]], axis=1)\n all_angles = np.append(all_angles, angle)\n\n #all_blobs = np.delete(all_blobs, 0, axis=1) # necessary because cannot create empty 3x1 numpy array\n\n return (all_blobs, all_angles)", "def output_angles(frame, analysis_dict, reference):\n y_pos = 20\n for key, value in analysis_dict.items():\n if key in reference.keys():\n text = \"{}: Angle = {:.2f}, Diff = {:.2f}\".format(key, value, value - reference[key])\n cv2.putText(frame, text, (0, y_pos), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 0), 2,\n cv2.LINE_AA)\n cv2.putText(frame, text, (0, y_pos), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255, 255, 255), 1,\n cv2.LINE_AA)\n y_pos += 20\n return frame", "def yield_sign_detection(img_in):\n thresh1 = 110\n thresh2 = 60\n cannyEdges = cv2.Canny(img_in, thresh1, thresh2)\n # cv2.imshow(\"test\", cannyEdges)\n\n lines = cv2.HoughLinesP(cannyEdges, rho=1, theta=np.pi /90, threshold=30, minLineLength=20, maxLineGap=1)\n\n Line_list_60 = []\n Line_list_M60 = []\n Angle_60 = []\n Angle_M60 = []\n\n for line in lines:\n line = line.flatten()\n line_instance = Line(line)\n\n if line_instance.angle > 35 and line_instance.angle < 85: \n # print(line_instance.line, line_instance.angle)\n Angle_60.append(line_instance.length)\n Line_list_60.append(line_instance)\n\n if line_instance.angle > -85 and line_instance.angle < -35: \n # print(line_instance.line, line_instance.angle) \n Angle_M60.append(line_instance.length)\n Line_list_M60.append(line_instance)\n \n index = np.argsort(Angle_60)\n line1 = Line_list_60[index[-1]].line\n # cv2.line(img_in,(line1[0],line1[1]), (line1[2], line1[3]),(255, 0, 0), 3)\n\n index = np.argsort(Angle_M60)\n line3 = Line_list_M60[index[-1]].line\n # cv2.line(img_in,(line3[0],line3[1]), (line3[2], line3[3]),(255, 0, 0), 3)\n\n # cv2.show('test', img_in)\n X_60 = max(line1[0], line1[2])\n X_M60 = min(line3[0], line3[2])\n column = int ((X_60 + X_M60)/2)\n\n left_Y = min(line1[1], line1[3])\n mid_Y_60 = max(line1[1], line1[3])\n mid_Y_M60 = max(line3[1], line3[3])\n right_Y = min(line3[1], line3[3])\n row = int ((left_Y + (mid_Y_60+mid_Y_M60)/2 + right_Y)/3)\n coordinates = (column, row)\n\n pixels = img_in[row, column, :]\n if pixels[0] > 220 and pixels[1] > 220 and pixels[2] > 220 :\n # cv2.circle(img_in, coordinates, 2, (255, 0, 0), 2)\n return coordinates\n else:\n return None\n\n\n raise NotImplementedError", "def remove_duplicates_by_matching():\n # 1) and 2)\n all_blobs = vision.pqr_r\n all_angles = np.zeros(0)\n right = np.transpose(vision.pqr_r)\n left = np.transpose(vision.pqr_l)\n\n if not right.size and not left.size:\n return (0, 0)\n\n if not right.size:\n for l in left:\n angle = np.arctan2(l[1], l[0]) * 180 / pi\n all_angles = np.append(all_angles, angle)\n return (vision.pqr_l, all_angles)\n\n if not left.size:\n for r in right:\n angle = np.arctan2(r[1], r[0]) * 180 / pi\n all_angles = np.append(all_angles, angle)\n return (vision.pqr_r, all_angles)\n\n\n for r in right:\n angle = np.arctan2(r[1], r[0]) * 180 / pi\n all_angles = np.append(all_angles, angle)\n cand_r = np.zeros((3,1))\n if angle < 15:\n cand_r = np.append(cand_r, [[r[0]], [r[1]], [r[2]]], axis=1)\n cand_r = np.delete(cand_r, 0, axis=1)\n cand_r = np.transpose(cand_r)\n\n for l in left:\n angle = np.arctan2(l[1], l[0]) * 180 / pi\n dot = 0\n if angle > -15:\n dl = max(0.001, np.linalg.norm(l))\n for r in cand_r:\n dr = max(0.001, np.linalg.norm(r))\n dot = np.dot(r, l) / (dr * dl)\n print(dot)\n if dot > 0.9:\n continue\n \n if dot <= 0.9:\n all_blobs = np.append(all_blobs, [[l[0]], [l[1]], [l[2]]], axis=1)\n all_angles = np.append(all_angles, angle)\n\n # make even number of blobs if necessary\n #if all_blobs.shape[1] % 2:\n # all_blobs = np.delete(all_blobs, 0, axis=1)\n # all_angles = np.delete(all_angles, 0)\n\n\n\n return (all_blobs, all_angles)", "def _sort_home_tags_nearest_center(self, detections):\n sorted_detections = []\n\n for detection in detections:\n if detection.id == 256:\n sorted_detections.append(detection)\n\n return sorted(sorted_detections,\n key=lambda x: abs(x.pose.pose.position.x))", "def neighbouring_angles(self) -> np.ndarray:\n cosvv = np.dot(self.values, self.values.transpose())\n cosvv.sort(axis=1)\n cosvv = np.flip(cosvv, 1)\n cosvv[cosvv > 1] = 1\n acosvv = np.arccos(cosvv[:, 1])\n self.neighbouring_angles_current = acosvv\n return acosvv", "def _check_calibration():\n image_list = glob.glob(os.path.join(\"C:\\\\Users\\\\chuyangl\\\\Desktop\\\\liushuai\\\\calibrator\\\\board\\\\left\", \"*.bmp\"))\n for single_img in image_list:\n image = cv2.imread(single_img)\n new_image = un_distort_image(image)\n cv2.imshow('before', cv2.resize(image, (int(image.shape[1] * 0.7), int(image.shape[0] * 0.7))))\n cv2.imshow('after', cv2.resize(new_image, (int(new_image.shape[1] * 0.7), int(new_image.shape[0] * 0.7))))\n cv2.waitKey(0)\n\n image = cv2.imread(image_list[0])\n\n # distortion_points = [ge.Point(110, 437), ge.Point(932, 151), ge.Point(1034, 331)]\n # calibration_points = [ge.Point(510, 437), ge.Point(832, 151), ge.Point(1134, 331)]\n\n distortion_points = [ge.Point(110, 437), ge.Point(632, 151), ge.Point(333, 331)]\n calibration_points = [ge.Point(510, 437), ge.Point(532, 151), ge.Point(234, 331)]\n\n for p in distortion_points:\n cv2.circle(image, p.tuple(), 23, (0, 0, 255), 2)\n\n new_image = un_distort_image(image)\n\n for p in calibration_points:\n cv2.circle(new_image, p.tuple(), 23, (255, 0, 0), 4)\n p2 = distort_point(p)\n p3 = un_distort_point(p2)\n cv2.circle(image, p2.int().tuple(), 23, (0, 255, 255), 4)\n cv2.circle(new_image, p3.int().tuple(), 23, (0, 0, 255), 4)\n print(p.int().tuple(), p2.int().tuple(), p3.int().tuple())\n\n for p in distortion_points:\n p2 = un_distort_point(p)\n p3 = distort_point(p2)\n cv2.circle(new_image, p2.int().tuple(), 23, (0, 255, 255), 2)\n cv2.circle(image, p3.int().tuple(), 23, (0, 255, 255), 2)\n print(p.int().tuple(), p2.int().tuple(), p3.int().tuple())\n\n cv2.imshow('before', cv2.resize(image, (int(image.shape[1] * 0.7), int(image.shape[0] * 0.7))))\n cv2.imshow('after', cv2.resize(new_image, (int(new_image.shape[1] * 0.7), int(new_image.shape[0] * 0.7))))\n\n cv2.waitKey(0)", "def detect_and_draw_contours(frame, thresh, meas_last, meas_now, min_area = 0, max_area = 10000, ellipses = False, directors = False):\n # Detect contours and draw them based on specified area thresholds\n img, contours, hierarchy = cv2.findContours(thresh.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)\n\n final = frame.copy()\n\n i = 0\n meas_last = meas_now.copy()\n del meas_now[:]\n director = 0. \n rx = ry = 0.\n cx = cy = 0.\n\n fname_min_enc_C = \"min_enc_C.dat\"\n f_min_enc_C = open(fname_min_enc_C,'a+')\n R_min_enc_C = x_min_enc_C = y_min_enc_C = 0.\n \n while i < len(contours):\n area = cv2.contourArea(contours[i])\n if area < min_area or area > max_area:\n del contours[i]\n else:\n\n cv2.drawContours(final, contours, i, (0,0,255), 1)\n # add ellipse here\n if ( ellipses ):\n ellipse = cv2.fitEllipse(contours[i])\n cv2.ellipse(final,ellipse,(0,255,0),2)\n M = cv2.moments(contours[i])\n\n # here is the ouput showing minEnclosingCircle, which should\n # basically give a long-axis measurement of any given ellipse\n (x_min_enc_C, y_min_enc_C), R_min_enc_C = cv2.minEnclosingCircle(contours[i]) \n f_min_enc_C.write(\"%e %e %e\\n\" %(x_min_enc_C,y_min_enc_C,R_min_enc_C))\n\n if M['m00'] != 0:\n cx = M['m10']/M['m00']\n cy = M['m01']/M['m00']\n if ( directors ):\n mu20 = M['m20']/M['m00'] - pow(cx,2)\n mu02 = M['m02']/M['m00'] - pow(cy,2)\n mu11 = M['m11']/M['m00'] - cx*cy\n else:\n \tcx = 0\n \tcy = 0\n\n if ( directors ):\n ry = 2*mu11\n rx = mu20-mu02\n if rx == 0:\n atan = 0.5*np.pi\n if ry < 0: atan *= -1 \n director = np.fmod(0.5*atan,2*np.pi) + np.pi\n else:\n director = np.fmod(0.5*np.arctan(ry/rx),2*np.pi) + np.pi\n if (rx < 0):\n director += np.pi/2.\n\n vsize = 10\n cv2.line(final,\n (int(cx - vsize*np.cos(director)), int(cy - vsize*np.sin(director))),\n (int(cx + vsize*np.cos(director)), int(cy + vsize*np.sin(director))), \n (255,0,0),2)\n meas_now.append([cx,cy,director])\n else: \n meas_now.append([cx,cy])\n\n i += 1\n\n f_min_enc_C.close()\n\n fname_ndist = \"ndist.dat\"\n f_ndist = open(fname_ndist,'a+')\n meas_now = np.array(meas_now)\n for i in range(len(meas_now)):\n for j in range(i+1,len(meas_now)):\n f_ndist.write(\"%e \\n\" % distance(meas_now[i,:-1],meas_now[j,:-1]))\n f_ndist.close()\n meas_now = list(meas_now)\n \n return final, contours, meas_last, meas_now", "def sort_filtered_contours(self):\r\n\r\n # Get the contours again\r\n invert = 255 - self.thresh_invert\r\n real_contours = cv2.findContours(invert, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\r\n real_contours = real_contours[0] if len(real_contours) == 2 else real_contours[1]\r\n\r\n # Make sure that they're within the correct range for size\r\n # If too small, it is probably noise; if too large, then should be things around the grid\r\n for i, c in enumerate(real_contours, 1):\r\n contour_area = cv2.contourArea(c)\r\n if self.min_cell_size < contour_area < self.max_cell_size:\r\n self.good_contours.append(c)\r\n\r\n # We assume a square board, so the number of rows/cols should be the square root of total contours/cells\r\n self.board_dimension = int(math.sqrt(len(self.good_contours)))\r\n\r\n # Sort the contours from top to bottom\r\n (half_sorted_contours, _) = contours.sort_contours(self.good_contours, method=\"top-to-bottom\")\r\n\r\n # We then sort each row from left to right\r\n row = []\r\n for i, c in enumerate(half_sorted_contours, 1):\r\n row.append(c)\r\n if i % self.board_dimension == 0:\r\n (full_sorted_contours, _) = contours.sort_contours(row, method=\"left-to-right\")\r\n self.game_board_contours.append(full_sorted_contours)\r\n row = []", "def warning_sign_detection(img_in):\n\n thresh1 = 110\n thresh2 = 60\n cannyEdges = cv2.Canny(img_in, thresh1, thresh2)\n # cv2.imshow(\"test\", cannyEdges)\n\n lines = cv2.HoughLinesP(cannyEdges, rho=1, theta=np.pi /180, threshold=40, minLineLength=30, maxLineGap=2)\n\n Line_list = []\n Angle_45 = []\n Angle_M45 = []\n\n for line in lines:\n line = line.flatten()\n line_instance = Line(line)\n if line_instance.length < 500 and line_instance.angle != 0 and WarnSide(img_in,line_instance):\n Line_list.append(line_instance) \n # cv2.line(img_in,(line[0],line[1]), (line[2], line[3]),(255, 0, 0), 2)\n Angle_45.append(np.abs(line_instance.angle - 45))\n Angle_M45.append(np.abs(line_instance.angle + 45))\n\n if len(Angle_45) < 2:\n return None\n if len(Angle_M45) < 2:\n return None\n\n index = np.argsort(Angle_45)\n line1 = Line_list[index[0]]\n line2 = Line_list[index[1]]\n\n index = np.argsort(Angle_M45)\n line3 = Line_list[index[0]]\n line4 = Line_list[index[1]]\n\n # cv2.line(img_in,(line1.line[0],line1.line[1]), (line1.line[2], line1.line[3]),(255, 0, 0), 3)\n # cv2.line(img_in,(line2.line[0],line2.line[1]), (line2.line[2], line2.line[3]),(255, 0, 0), 3)\n # cv2.line(img_in,(line3.line[0],line3.line[1]), (line3.line[2], line3.line[3]),(255, 0, 0), 3)\n # cv2.line(img_in,(line4.line[0],line4.line[1]), (line4.line[2], line4.line[3]),(255, 0, 0), 3)\n\n column45 = int((line1.mid[0] + line2.mid[0])/2)\n row45 = int((line1.mid[1] + line2.mid[1])/2)\n\n columnM45 = int((line3.mid[0] + line4.mid[0])/2)\n rowM45 = int((line3.mid[1] + line4.mid[1])/2)\n\n column = (column45 + columnM45)//2 + 1\n row = (row45 + rowM45)//2 + 1\n coordinates = (column, row)\n # print(img_in[row, column, :])\n # cv2.circle(img_in, coordinates, 2, (255, 0, 0), 2)\n # cv2.imshow('detected lines',img_in)\n\n return coordinates\n raise NotImplementedError", "def stop_sign_detection(img_in):\n\n thresh1 = 110\n thresh2 = 60\n cannyEdges = cv2.Canny(img_in, thresh1, thresh2)\n # cv2.imshow(\"test\", cannyEdges)\n\n lines = cv2.HoughLinesP(cannyEdges, rho=1, theta=np.pi /90, threshold=20, minLineLength=20, maxLineGap=1)\n\n Line_list = []\n Angle_45 = []\n Angle_M45 = []\n\n for line in lines:\n line = line.flatten()\n line_instance = Line(line)\n if line_instance.length < 500 and line_instance.angle != 0 and RedSide(img_in,line_instance):\n Line_list.append(line_instance)\n Angle_45.append(np.abs(line_instance.angle - 45))\n Angle_M45.append(np.abs(line_instance.angle + 45))\n\n if len(Angle_45) < 2:\n return None\n if len(Angle_M45) < 2:\n return None\n \n # index = np.argsort(Angle_45)\n # line1 = Line_list[index[0]]\n # line2 = Line_list[index[1]]\n\n index = np.argsort(Angle_M45)\n line1 = Line_list[index[0]]\n line2 = Line_list[index[1]]\n\n if line1.angle < -50 or line1.angle > -40 or line2.angle < -50 or line2.angle > -40 :\n return None\n\n #Mark the line we use to determine the center\n # cv2.line(img_in,(line1.line[0],line1.line[1]), (line1.line[2], line1.line[3]),(255, 0, 0), 3)\n # cv2.line(img_in,(line2.line[0],line2.line[1]), (line2.line[2], line2.line[3]),(255, 0, 0), 3)\n\n column45 = int((line1.mid[0] + line2.mid[0])/2)\n row45 = int((line1.mid[1] + line2.mid[1])/2)\n\n # columnM45 = int((line3.mid[0] + line4.mid[0])/2)\n # rowM45 = int((line3.mid[1] + line4.mid[1])/2)\n\n # column = (column45 + columnM45)//2 + 1\n # row = (row45 + rowM45)//2 + 1\n coordinates = (column45, row45)\n\n # cv2.circle(img_in, coordinates, 2, (255, 0, 0), 2)\n # cv2.imshow('detected lines',img_in)\n\n return coordinates\n raise NotImplementedError", "def find_targets(contours, frame):\n # If there aren't any contours present, return frame without drawing\n if len(contours) == 0:\n return frame\n # Copy frame, TODO why do we need to do this?\n image = frame.copy()\n screen_height, screen_width, _ = image.shape;\n # TODO: Why subtract?\n center_x = screen_width / 2 - .5\n center_y = screen_height / 2 - .5\n # List for storing found targets\n targets = []\n\n if len(contours) >= 2:\n # Sort contours in descending order by size\n contours.sort(key=lambda contour: cv2.contourArea(contour), reverse=True)\n\n valid_contours = []\n for contour in contours:\n # Calculate areas of contour\n contour_area = cv2.contourArea(contour)\n if contour_area >= MIN_CONTOUR_SIZE:\n # Get moments of contour for centroid calculations\n moments = cv2.moments(contour)\n # Find centroid of contour\n if moments[\"m00\"] != 0:\n cx = int(moments[\"m10\"] / moments[\"m00\"])\n cy = int(moments[\"m01\"] / moments[\"m00\"])\n else:\n cx, cy = 0, 0\n\n ### CALCULATE CONTOUR ROTATION BY FITTING ELLIPSE ###\n rotation = get_ellipse_rotation(image, contour)\n\n ### DRAW CONTOUR ###\n # Draw white circle at center of contour\n cv2.circle(image, (cx, cy), 6, (255, 255, 255))\n\n # Draw contour in green\n cv2.drawContours(image, [contour], 0, (0, 200, 0), 1)\n\n # Append important info to array\n valid_contours.append({\"cx\": cx, \"cy\": cy, \"rotation\": rotation})\n\n # Sort array based on coordinates (left to right) to make sure contours are adjacent\n valid_contours.sort(key=lambda contour: contour[\"cx\"])\n\n # Find targets from contours\n for i in range(len(valid_contours) - 1):\n # Check rotation of adjacent contours\n tilt_left = valid_contours[i][\"rotation\"]\n tilt_right = valid_contours[i + 1][\"rotation\"]\n\n # Contour coordinates\n cx_left = valid_contours[i][\"cx\"]\n cx_right = valid_contours[i + 1][\"cx\"]\n cy_left = valid_contours[i][\"cy\"]\n cy_right = valid_contours[i + 1][\"cy\"]\n\n # If contour angles are opposite\n # Negative tilt -> Rotated to the right\n # NOTE: if using rotated rect (min area rectangle), negative tilt means rotated to left\n # If left contour rotation is tilted to the left then skip iteration\n # If right contour rotation is tilted to the right then skip iteration\n if (len(valid_contours) == 2) or (np.sign(tilt_left) != np.sign(tilt_right) and\n not (tilt_left > 0 and cx_left < cx_right or tilt_right > 0 and cx_right < cx_left)):\n\n target_cx = (cx_left + cx_right) / 2\n target_cy = (cy_left + cy_right) / 2\n\n target_yaw = calculate_yaw(target_cx, center_x)\n target_pitch = calculate_pitch(target_cy, center_y)\n\n targets.append({\"cx\": target_cx,\n \"cy\": target_cy,\n \"yaw\": target_yaw,\n \"pitch\": target_pitch})\n\n # Check if there are targets seen\n if len(targets) > 0:\n # Get target with smallest yaw\n nearest_target = min(targets, key=lambda target: math.fabs(target[\"yaw\"]))\n # Write yaw of target in corner of image\n cv2.putText(image, \"Yaw: %.3f\" % nearest_target[\"yaw\"], (1, 12), cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255))\n # Draw line at center of target\n cv2.line(image, (int(nearest_target[\"cx\"]), screen_height), (int(nearest_target[\"cx\"]), 0), (255, 0, 0), 1)\n # Draw line at center of screen\n cv2.line(image, (round(center_x), screen_height), (round(center_x), 0), (255, 255, 255), 1)\n\n # Send our final data to NetworkTables\n table.putBoolean(\"target_present\", True)\n table.putNumber(\"targets_seen\", len(targets))\n table.putNumber(\"target_yaw\", nearest_target[\"yaw\"])\n table.putNumber(\"target_pitch\", nearest_target[\"pitch\"])\n else:\n table.putBoolean(\"target_present\", False)\n table.putNumber(\"targets_seen\", 0)\n table.putNumber(\"target_yaw\", 0)\n table.putNumber(\"target_pitch\", 0)\n table.putNumber(\"target_distance\", 0)\n\n return image", "def track_aruco(self):\n marker_size = self.marker_size\n # Getting the calibrated parameters\n camera_matrix, dist_matrix = self.extract_calibration()\n cameraIndex, foundCamera = self.checkCamera()\n try:\n cap = cv2.VideoCapture(cameraIndex)\n while (True and foundCamera):\n # Getting a frame from video stream\n ret, frame = cap.read()\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_100)\n parameters = aruco.DetectorParameters_create()\n\n # Lists of ids and the corners belonging to each id\n corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict, parameters=parameters)\n\n # Just enters this condition if any id is found on the camera frame\n if np.all(ids is not None):\n _, tvec = aruco.estimatePoseSingleMarkers(corners[0], marker_size, camera_matrix, dist_matrix)\n #print 'Rotation Vector: ', rvec\n print 'Translation Vector:', tvec\n msgToPublish = Point()\n msgToPublish.x = tvec[0][0][0]\n msgToPublish.z = tvec[0][0][2]\n self.positionPublisher.publish(msgToPublish)\n\n except(KeyboardInterrupt):\n print(\"Interrupt from keyboard, closing caption\")\n cap.release()\n sys.exit(0)", "def update(self, detections):\n\n # if no object detected in the frame\n if len(detections) == 0:\n lost_ids = list(self.lost.keys())\n for objectID in lost_ids:\n self.lost[objectID] += 1\n if self.lost[objectID] > self.maxLost:\n self.removeObject(objectID)\n\n return self.objects\n\n # current object locations\n new_object_locations = np.zeros((len(detections), 2), dtype=\"int\")\n for (i, detection) in enumerate(detections):\n new_object_locations[i] = self.getLocation(detection)\n\n if len(self.objects):\n objectIDs = list(self.objects.keys())\n previous_object_locations = np.array(list(self.objects.values()))\n\n # pairwise distance between previous and current\n D = distance.cdist(previous_object_locations, new_object_locations)\n\n # (minimum distance of previous from current).sort_as_per_index\n row_idx = D.min(axis=1).argsort() # old object idx\n\n # index of minimum distance of previous from current\n col_idx = D.argmin(axis=1)[row_idx] # new object idx sorted as per distance from old object ids\n\n assignedRows, assignedCols = set(), set()\n for (row, col) in zip(row_idx, col_idx):\n\n if row in assignedRows or col in assignedCols:\n continue\n\n objectID = objectIDs[row]\n self.objects[objectID] = new_object_locations[col]\n self.lost[objectID] = 0\n\n assignedRows.add(row)\n assignedCols.add(col)\n\n unassignedRows = set(range(0, D.shape[0])).difference(assignedRows)\n unassignedCols = set(range(0, D.shape[1])).difference(assignedCols)\n\n if D.shape[0] >= D.shape[1]:\n # length of old-detections is more than new-detections\n for row in unassignedRows:\n objectID = objectIDs[row]\n self.lost[objectID] += 1\n\n if self.lost[objectID] > self.maxLost:\n self.removeObject(objectID)\n else:\n for col in unassignedCols:\n self.addObject(new_object_locations[col])\n\n else:\n for i in range(0, len(detections)):\n self.addObject(new_object_locations[i])\n\n return self.objects", "def _detect(self, corners, ids, imgWithAruco):\n if len(corners) > 0:\n x1 = (corners[0][0][0][0], corners[0][0][0][1])\n x2 = (corners[0][0][1][0], corners[0][0][1][1])\n x3 = (corners[0][0][2][0], corners[0][0][2][1])\n x4 = (corners[0][0][3][0], corners[0][0][3][1])\n\n # Drawing detected frame white color\n # OpenCV stores color images in Blue, Green, Red\n cv2.line(imgWithAruco, x1, x2, (255, 0, 0), 1)\n cv2.line(imgWithAruco, x2, x3, (255, 0, 0), 1)\n cv2.line(imgWithAruco, x3, x4, (255, 0, 0), 1)\n cv2.line(imgWithAruco, x4, x1, (255, 0, 0), 1)\n\n # font type hershey_simpex\n font = cv2.FONT_HERSHEY_SIMPLEX\n cv2.putText(imgWithAruco, 'C1', x1, font, 1, (255, 255, 255), 1,\n cv2.LINE_AA)\n cv2.putText(imgWithAruco, 'C2', x2, font, 1, (255, 255, 255), 1,\n cv2.LINE_AA)\n cv2.putText(imgWithAruco, 'C3', x3, font, 1, (255, 255, 255), 1,\n cv2.LINE_AA)\n cv2.putText(imgWithAruco, 'C4', x4, font, 1, (255, 255, 255), 1,\n cv2.LINE_AA)\n\n if ids is not None: # if aruco marker detected\n rvec, tvec, _ = cv2.aruco.estimatePoseSingleMarkers(\n corners, self.marker_length, self.camera_matrix,\n self.dist_coeffs)\n\n # -- draw the coordinate of aruco\n imgWithAruco = cv2.aruco.drawAxis(imgWithAruco,\n self.camera_matrix,\n self.dist_coeffs, rvec, tvec,\n self.marker_length)\n\n # --- The midpoint displays the ID number\n cornerMid = (int((x1[0] + x2[0] + x3[0] + x4[0]) / 4),\n int((x1[1] + x2[1] + x3[1] + x4[1]) / 4))\n\n cv2.putText(imgWithAruco, \"id=\" + str(ids), cornerMid,\n font, 1, (255, 255, 255), 1, cv2.LINE_AA)\n\n rvec = rvec[0][0]\n tvec = tvec[0][0]\n\n # --- Print the tag position in camera frame\n str_position = \"MARKER Position x=%.4f (cm) y=%.4f (cm) z=%.4f (cm)\" % (\n tvec[0] * 100, tvec[1] * 100, tvec[2] * 100)\n\n # -- Obtain the rotation matrix tag->camera\n R_ct = np.matrix(cv2.Rodrigues(rvec)[0])\n R_tc = R_ct.T\n\n # -- Get the attitude in terms of euler 321 (Needs to be flipped first)\n roll_marker, pitch_marker, yaw_marker = self._rotation_matrix_to_euler_angles(\n self.R_flip * R_tc)\n\n # -- Print the marker's attitude respect to camera frame\n str_attitude = \"MARKER Attitude degrees r=%.4f p=%.4f y=%.4f\" % (\n math.degrees(roll_marker), math.degrees(pitch_marker),\n math.degrees(yaw_marker))\n '''\n print(str_position)\n print(\"rotation x=%.4f (degree) \" % \n (math.degrees( math.atan(tvec[0]/tvec[2]))))\n print(str_attitude)\n print(math.degrees(pitch_marker)+math.degrees( math.atan(tvec[0]/tvec[2])))\n print(\"-----------------------------------------------\")\n '''\n pose_data = [None, None, None, None]\n pose_data[0] = tvec[0] * 100\n pose_data[1] = tvec[1] * 100\n pose_data[2] = tvec[2] * 100\n pose_data[3] = math.degrees(pitch_marker)\n \n self.pose_data_dict[ids] = pose_data\n return (tvec[0] * 100, tvec[1] * 100, tvec[2] * 100), \\\n (math.degrees(roll_marker),\n math.degrees(pitch_marker),\n math.degrees(yaw_marker))\n \n\n else:\n self.pose_data[0] = None\n self.pose_data[1] = None\n self.pose_data[2] = None\n self.pose_data[3] = None\n #self.pose_data_list[0] = self.pose_data\n self.pose_data_dict[0] = self.pose_data\n return None", "def get_bounding_boxes(vehicles, camera):\n '''\n bounding_boxes = np.array([ClientSideBoundingBoxes.get_bounding_box(vehicle, camera) for vehicle in vehicles])\n # filter objects behind camera\n inscene_indices = np.array([1 if all(bb[:, 2]) > 0 else 0 for bb in bounding_boxes])\n inscene_indices = np.argwhere(inscene_indices == 1)\n bounding_boxes_visible = [bb for ]\n rotations = [vehicle.get_transform().rotation for vehicle in vehicles]\n rotation_decomposed = np.array([[rotation.yaw, rotation.roll, rotation.pitch] for rotation in rotations])\n rotations_visible = rotation_decomposed[inscene_indices]\n return bounding_boxes_visible, rotations_visible\n '''\n bounding_boxes = [ClientSideBoundingBoxes.get_bounding_box(vehicle, camera) for vehicle in vehicles]\n # filter objects behind camera\n rotations = []\n for vehicle in vehicles:\n transform = vehicle.get_transform()\n rotation = transform.rotation\n rotations.append([rotation.yaw, rotation.roll, rotation.pitch])\n inscene_indices = []\n for i in range(len(bounding_boxes)):\n bb = bounding_boxes[i]\n if all(bb[:, 2] > 0):\n inscene_indices.append(1)\n else:\n inscene_indices.append(0) \n inscene_boxes = [bounding_boxes[i] for i in range(len(bounding_boxes)) if inscene_indices[i] == 1]\n return inscene_boxes, np.array(rotations)[inscene_indices]", "def comparison():\n path = \"Data/data_fronts/\"\n path1 = \"Results/labelled_images1010/fronts/\"\n\n #computes the areas for the first frame in order to normalize the other areas\n pol0 = pd.DataFrame(pd.read_csv(path1 + \"fronts_labelled.m.0.png.txt\",sep =' '))\n #makes an object polygon in order to compute the area\n pol0 = np.array(pol0)\n pol0 = Polygon(pol0)\n\n polsx = pd.DataFrame(pd.read_csv(path + \"Sham_8-2-18_Field 5_1_sx.txt\",sep ='\\t'))\n polsx.columns = [\"y\",\"x\"]\n poldx = pd.DataFrame(pd.read_csv(path + \"Sham_8-2-18_Field 5_1_dx.txt\",sep ='\\t'))\n poldx.columns = [\"y\",\"x\"]\n #makes an object polygon in order to compute the area\n polsx = polsx.append(poldx)\n polsx = np.array(polsx)\n pol1 = Polygon(polsx)\n\n\n areas = []\n areas_hand = []\n #computes the areas for all the frames\n for i in range(42):\n pol = pd.DataFrame(pd.read_csv(path1 + \"fronts_labelled.m.\"+str(i)+\".png.txt\",sep =' '))\n pol = np.array(pol)\n pol = Polygon(pol)\n #normalize the areas with respect to the area of the first frame\n areas.append(pol.area/pol0.area)\n\n polsx = pd.DataFrame(pd.read_csv(path + \"Sham_8-2-18_Field 5_\"+str(i+1)+\"_sx.txt\",sep ='\\t'))\n polsx.columns = [\"y\",\"x\"]\n poldx = pd.DataFrame(pd.read_csv(path + \"Sham_8-2-18_Field 5_\"+str(i+1)+\"_dx.txt\",sep ='\\t'))\n poldx.columns = [\"y\",\"x\"]\n if poldx[\"x\"][0]>100:\n poldx = poldx.reindex(index=poldx.index[::-1])\n if polsx[\"x\"][0]<100:\n polsx = polsx.reindex(index=polsx.index[::-1])\n polsx = polsx.append(poldx)\n polsx = np.array(polsx)\n\n pol2 = Polygon(polsx)\n #normalize the areas with respect to the area of the first frame\n areas_hand.append(pol2.area/pol1.area)\n #returns the two arrays with the normalized areas\n return np.array(areas) , np.array(areas_hand)", "def get_rotation_angle(prev_image, curr_image, size_of_cropped_image):\n max_value = np.amax(prev_image)\n\n if prev_image.dtype == 'float' and max_value <= 1:\n prev_image = np.uint8(prev_image * 255)\n curr_image = np.uint8(curr_image * 255)\n\n if prev_image.dtype == 'float' and max_value > 1:\n prev_image = np.uint8(prev_image)\n curr_image = np.uint8(curr_image)\n\n prev_image = cv.equalizeHist(prev_image)\n curr_image = cv.equalizeHist(curr_image)\n\n # Initiate ORB detector\n orb = cv.ORB_create(nfeatures=200)\n\n # find the keypoints and descriptors with ORB\n kp1, des1 = orb.detectAndCompute(prev_image, None)\n kp2, des2 = orb.detectAndCompute(curr_image, None)\n\n # do feature matching\n bf = cv.BFMatcher(cv.NORM_HAMMING, crossCheck=True)\n matches = bf.match(des1, des2)\n matches = sorted(matches, key=lambda x: x.distance)\n\n # calculate perspective transform matrix\n src_pts = np.float32([kp1[m.queryIdx].pt for m in matches]).reshape(-1, 1, 2)\n dst_pts = np.float32([kp2[m.trainIdx].pt for m in matches]).reshape(-1, 1, 2)\n transform_matrix, mask = cv.findHomography(src_pts, dst_pts, cv.RANSAC, 5.0)\n\n if transform_matrix is None:\n transform_matrix, mask = cv.findHomography(src_pts, dst_pts, 0)\n\n if transform_matrix is None:\n transform_matrix, mask = cv.findHomography(src_pts, dst_pts, 0)\n\n vector_along_x_axis_from_center = \\\n np.float32([[size_of_cropped_image / 2, size_of_cropped_image / 2],\n [size_of_cropped_image, size_of_cropped_image / 2]]).reshape(-1, 1, 2)\n vector_transformed = cv.perspectiveTransform(vector_along_x_axis_from_center, transform_matrix)\n\n theta = - np.arctan2(vector_transformed[1, 0, 1] - vector_transformed[0, 0, 1],\n vector_transformed[1, 0, 0] - vector_transformed[0, 0, 0]) * 180 / np.pi\n # negative sign is to make the sign of the angle to correspond to one in a right-handed coordinate system\n return theta", "def _draw_detections(frame, frame_detections):\n boxColor = (0,255,0)\n for box in frame_detections:\n cv2.rectangle(frame,(int(box[0]),int(box[1])),(int(box[2]),int(box[3])),boxColor,7)\n # cv2.rectangle(frame,(int(box[0]),int(box[1])),(int(box[2]),int(box[3])),boxColor,7)\n cv2.putText(frame,str(format(box[4],'.2f')),(int(box[0]),int(box[3]+20)),cv2.FONT_HERSHEY_SIMPLEX,0.6,boxColor,1,cv2.LINE_AA)\n\n return frame", "def get_refined_detection(detections, im_shape, conf):\n refined_detection = []\n for dets in detections:\n score = dets[-1]\n if score<conf:\n continue\n dets = constraint_boxes(dets, im_shape)\n if check_area(dets, im_shape) is False:\n continue\n refined_detection.append(dets)\n refined_detection = np.array(refined_detection)\n return refined_detection", "def shiftDetectorONH(frame, onh_info, x_onh_bounds):\n\n x_min = x_onh_bounds[0]-30\n x_max = x_onh_bounds[1]+30\n frame_len = frame.shape[1]\n mid_x = int(frame_len/2)\n\n norm = frame/np.max(frame)#(2**16)\n #if the frame midpoint is inside the bbox x bounds\n #this section is to avoid using any part of the onh as the a-scan to reference when doing the cross-correlation\n if mid_x>=x_min and mid_x<=x_max:\n d_min = mid_x-x_min\n d_max = x_max-mid_x\n #if mid_x is closer to x_min but not close to the edge of the image -- at least 75 px\n if d_min<d_max and x_min>75:\n acol = int((frame_len/2)-(d_min+1))\n elif x_max<frame_len-75:\n acol = int((frame_len/2)+(d_max+1))\n else:\n acol = int((frame_len/2)-(d_min+1))\n anchorCol = norm[:,acol]\n else:\n anchorCol = norm[:,mid_x]\n shifts = [np.argmax(signal.correlate(norm[:,i],anchorCol,mode='same'))-int((frame.shape[0])/2) for i in range(frame_len)]\n\n #if onh detection is bad, bbox might be huge. The onh area should be less that 10% of the image (256*1024 pixels)\n if onh_info.area/(2**18) > 0.10:\n return shifts\n #old, changed 1-29-2018 because this is really about location, not size\n #if x_min<100 or x_max>902:\n #return shifts\n\n #This ensures that clean_shifts and clean_x are the same length and comes into play when the ONH is basically touching the\n #side of the image.\n #if the onh is too far to the right side of the frame, only use the left side info\n #fit a quadratic to get LOCAL curvature\n if x_max>=frame_len-100:\n #this uses the entire bscans to get the curvature, otherwise it will fit very poorly\n clean_x = np.arange(0,x_min,1)\n curve_fit_params = np.polyfit(clean_x, shifts[0:x_min],2)\n curve_fit = lambda x: curve_fit_params[0]*x**2 + curve_fit_params[1]*x + curve_fit_params[2]\n corrected_shifts = np.round(curve_fit(np.arange(x_min,x_max+1,1))).astype('int')\n clean_shifts = shifts\n clean_shifts[x_min:x_max+1]=corrected_shifts\n #if the onh is too far to the left side, only use right side info\n elif x_min<100:\n clean_x = np.arange(x_max+1,frame_len,1)\n curve_fit_params = np.polyfit(clean_x, shifts[x_max+1:frame_len],2)\n curve_fit = lambda x: curve_fit_params[0]*x**2 + curve_fit_params[1]*x + curve_fit_params[2]\n corrected_shifts = np.round(curve_fit(np.arange(x_min,x_max+1,1))).astype('int')\n clean_shifts = shifts\n clean_shifts[x_min:x_max+1]=corrected_shifts\n #Everything is normal, everyone is happy.\n else:\n #need to cut out onh, I don't think there is a way to index this to put it\n #directly in polyfit\n clean_shifts = np.array(shifts[0:x_min] + shifts[x_max+1:frame_len])\n clean_x = np.concatenate((np.arange(x_min-100,x_min,1),np.arange(x_max+1,x_max+101,1)))\n curve_fit_params = np.polyfit(clean_x, clean_shifts[x_min-100:x_min+100],3)\n curve_fit = lambda x: curve_fit_params[0]*x**3 + curve_fit_params[1]*x**2 + curve_fit_params[2]*x + curve_fit_params[3]\n #!!astype added 4-18-19 because floats throw an error when correcting shifts\n corrected_shifts = np.round(curve_fit(np.arange(x_min,x_max+1,1))).astype('int')\n clean_shifts = np.insert(clean_shifts, x_min+1, corrected_shifts)\n\n return list(clean_shifts)", "def __car_detection(self, autonomous_states_queue):\n list_of_possible_plates = DetectPlates.detectPlatesInScene(self.__current_frame)\n list_of_possible_plates = DetectChars.detectCharsInPlates(list_of_possible_plates)\n\n list_of_possible_plates.sort(key=lambda possiblePlate: len(possiblePlate.strChars), \\\n reverse=True)\n\n if len(list_of_possible_plates) > 0:\n #at least one car\n lic_plate = list_of_possible_plates[0]\n frame_shape = self.__current_frame.shape\n self.__plate_coords = cv2.boxPoints(lic_plate.rrLocationOfPlateInScene)\n self.__distance_to_car = frame_shape[0] - self.__plate_coords[3][1] # in pixels\n self.__distance_to_car = self.__distance_to_car\n self.__distance_to_car = float(\"{0:.2f}\".format(self.__distance_to_car))\n self.__cruise_ndf_contor = 0\n else:\n # make sure that the algoritm doesn't fail for a specific frame\n self.__cruise_ndf_contor = self.__cruise_ndf_contor + 1\n if self.__cruise_ndf_contor > 5:\n self.__distance_to_car = 1000\n self.__cruise_ndf_contor = 0", "def cw_rotate(self):\n self.grid = [list(x) for x in zip(*self.grid[::-1])]\n self.find_edges()", "def extractFeatures(bwimage):\n \n \n # circularity\n img = bwimage.copy()\n img1, contours, hierarchy = cv2.findContours(img, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)\n \n if len(contours)==0:\n return []\n B = contours[0]\n C = B[:,0,0]\n l = C.size\n \n \n if abs(B[0,0,0] - B[l-1,0,0]) + abs(B[0,0,1] - B[l-1,0,1]) == 2:\n P8 = math.sqrt(2)\n else:\n P8 = 1 \n for j in range(0,l-1): \n if abs((B[j+1,0,0] - B[j,0,0])) + abs(B[j+1,0,1] - B[j,0,1]) == 2:\n P8 = P8 + math.sqrt(2)\n else:\n P8 = P8 + 1\n \n n = np.count_nonzero(bwimage)\n \n circularity = P8*P8/n\n \n \n # elongation\n idx = np.nonzero(bwimage);\n c = idx[1]\n r = idx[0]\n meanx = np.mean(c)\n meany = np.mean(r)\n \n \n pows = 2*np.ones(n)\n \n sigxx = np.sum(np.power((c-meanx),pows))/n\n sigyy = np.sum(np.power((r-meany),pows))/n\n sigxy = np.sum(np.multiply((r-meany),(c-meanx)))/n\n \n covMat = np.array([[sigxx, sigxy], [sigxy, sigyy]])\n val, vects = np.linalg.eig(covMat);\n \n maxEigenValue = np.amax(val) \n minEigenValue = np.amin(val.ravel()[np.flatnonzero(val)])\n \n \n elongation = math.sqrt(maxEigenValue/minEigenValue);\n \n \n # principal axis\n maxidx = np.argmax(val)\n principalAxisVector = vects[maxidx]\n \n \n return [circularity, elongation, principalAxisVector]", "def pan_corr(file):\n\n # # infile = 'd:\\\\Projekti\\\\Satelit\\\\CO\\\\Razpis\\\\Flat field images_new2020\\\\flatfield\\\\NHDBflat_1D'\n # # infile = 'd:\\Projekti\\Satelit\\CO\\Razpis\\_POSNETKI\\Jure_naloga_banje_raw_pyt\\\\NHDRGoreMorje_3D'\n #\n # # in_path = 'd:\\Projekti\\Satelit\\CO\\Razpis\\Flat field images_new2020\\\\20201028 Vignetting\\\\flatfield\\\\'\n # # in_pan_ref_file = 'NHDPflat_3D_py.tif'\n # in_path = 'd:\\Projekti\\Satelit\\CO\\Razpis\\_POSNETKI\\Peking_PAN\\\\'\n # in_pan_ref_file = 'NHDPfoc_swp6_1D_py.tif'\n # in_ref = in_path + in_pan_ref_file\n #\n # inreffil = gdal.Open(in_ref)\n # image_ref = inreffil.ReadAsArray()\n # # size_ref = image_ref.shape\n # # pix_count = size_ref[0]*size_ref[1]\n #\n # image_ref = image_ref[800:930, 1420:1640]\n # size_ref = image_ref.shape\n # pix_count = size_ref[0] * size_ref[1]\n #\n # g1 = 0.\n # g2 = 0.\n # r1 = 0.\n # b1 = 0.\n #\n # for i in range(size_ref[0]):\n # for j in range(size_ref[1]):\n # if (i % 2) == 0 and (j % 2) == 0: g1 = g1 + image_ref[i, j]\n # if (i % 2) == 1 and (j % 2) == 1: g2 = g2 + image_ref[i, j]\n # if (i % 2) == 0 and (j % 2) == 1: r1 = r1 + image_ref[i, j]\n # if (i % 2) == 1 and (j % 2) == 0: b1 = b1 + image_ref[i, j]\n #\n # g1_avg = g1 / pix_count * 4\n # g2_avg = g2 / pix_count * 4\n # r1_avg = r1 / pix_count * 4\n # b1_avg = b1 / pix_count * 4\n #\n # raz_g1 = 1\n # raz_g2 = g1_avg/g2_avg\n # raz_r1 = g1_avg/r1_avg\n # raz_b1 = g1_avg/b1_avg\n #\n # avg = (g1+g2+r1+b1)/pix_count\n #\n # print(g1_avg, g2_avg, r1_avg, b1_avg, avg)\n\n raz_g1 = 1\n raz_g2 = 1.0245196396115988\n raz_r1 = 1.0131841989689434\n raz_b1 = 1.0517113199247086\n\n print('razmerje:', raz_g1, raz_g2, raz_r1, raz_b1)\n\n # in_path = 'd:\\Projekti\\Satelit\\CO\\Razpis\\_POSNETKI\\Peking_PAN\\\\'\n # in_pan_ref_file = 'NHDPfoc_swp6_4D_py.tif'\n # in_path = 'd:\\Projekti\\Satelit\\CO\\Razpis\\Flat field images_new2020\\\\20201028 Vignetting\\\\flatfield\\\\'\n # in_pan_ref_file = 'NHDPflat_3D_py.tif'\n\n # in_path = 'd:\\Projekti\\Satelit\\CO\\Razpis\\_POSNETKI\\Slo_PAN\\_26_30\\\\'\n # in_pan_ref_file = [filename for filename in os.listdir(in_path) if filename.lower().startswith(\"nhd\") and filename.lower().endswith(\"tif\")]\n\n \n\n \n\n # print('image', i)\n in_ref=file\n inreffil = gdal.Open(in_ref)\n image_ref = inreffil.ReadAsArray()\n size_ref = image_ref.shape\n # pix_count = size_ref[0] * size_ref[1]\n # pix_count = np.count_nonzero(image_ref)\n # pix_count = 3664*650\n\n # g1 = 0.\n # g2 = 0.\n # r1 = 0.\n # b1 = 0.\n #\n # for i in range(size_ref[0]):\n # for j in range(size_ref[1]):\n # if (i % 2) == 0 and (j % 2) == 0: g1 = g1 + image_ref[i, j]\n # if (i % 2) == 1 and (j % 2) == 1: g2 = g2 + image_ref[i, j]\n # if (i % 2) == 0 and (j % 2) == 1: r1 = r1 + image_ref[i, j]\n # if (i % 2) == 1 and (j % 2) == 0: b1 = b1 + image_ref[i, j]\n #\n # g1_avg = g1 / pix_count * 4\n # g2_avg = g2 / pix_count * 4\n # r1_avg = r1 / pix_count * 4\n # b1_avg = b1 / pix_count * 4\n #\n # avg = (g1 + g2 + r1 + b1) / pix_count\n #\n # print(g1_avg, g2_avg, r1_avg, b1_avg, avg)\n\n # popravek\n im_p_pop = np.zeros((size_ref[0], size_ref[1]), np.uint16)\n\n\n for i in range(size_ref[0]):\n for j in range(size_ref[1]):\n if (i % 2) == 0 and (j % 2) == 0 and image_ref[i, j] != 0: im_p_pop[i, j] = image_ref[i, j] * raz_g1\n if (i % 2) == 1 and (j % 2) == 1 and image_ref[i, j] != 0: im_p_pop[i, j] = image_ref[i, j] * raz_g2\n if (i % 2) == 0 and (j % 2) == 1 and image_ref[i, j] != 0: im_p_pop[i, j] = image_ref[i, j] * raz_r1\n if (i % 2) == 1 and (j % 2) == 0 and image_ref[i, j] != 0: im_p_pop[i, j] = image_ref[i, j] * raz_b1\n \n _,_,_,_,P=return_flatfield_set_path(2)\n P_flat=gdal_array.LoadFile(P)\n \n # im_p_pop=simple_flatfield_corr(P_flat, im_p_pop, 2, 1) \n \n # outout\n \n im_p_pop=BLUE_simple_flatfield_corr(P_flat, im_p_pop)\n \n out=os.path.abspath(file)+\"/corr/\"+os.path.basename(file)[:-4] + \"_pop_flat_corr.tif\"\n\n \n # out = in_ref[:-4] + \"_pop_flat_corr.tif\"\n\n driver = gdal.GetDriverByName('GTiff')\n\n # outRaster = driver.Create(out, size[1], size[0], 3, gdal.GDT_UInt16)\n outRaster = driver.Create(out, size_ref[1], size_ref[0], 1, gdal.GDT_UInt16)\n\n outband = outRaster.GetRasterBand(1)\n outband.WriteArray(im_p_pop)\n outband.FlushCache()", "def sort(contours):\n\treturn sorted(contours, reverse=True, key=len)", "def find_cards(thresh_image):\n\n # Find contours and sort their indices by contour size\n cnts,hier = cv2.findContours(thresh_image,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n index_sort = sorted(range(len(cnts)), key=lambda i : cv2.contourArea(cnts[i]),reverse=True)\n\n print(\"Contour length\", len(cnts))\n\n # If there are no contours, do nothing\n if len(cnts) == 0:\n return [], []\n\n # Otherwise, initialize empty sorted contour and hierarchy lists\n cnts_sort = []\n hier_sort = []\n cnt_is_card = np.zeros(len(cnts),dtype=int)\n\n # Fill empty lists with sorted contour and sorted hierarchy. Now,\n # the indices of the contour list still correspond with those of\n # the hierarchy list. The hierarchy array can be used to check if\n # the contours have parents or not.\n for i in index_sort:\n cnts_sort.append(cnts[i])\n hier_sort.append(hier[0][i])\n\n # Determine which of the contours are cards by applying the\n # following criteria: 1) Smaller area than the maximum card size,\n # 2), bigger area than the minimum card size, 3) have no parents,\n # and 4) have four corners\n\n for i in range(len(cnts_sort)):\n size = cv2.contourArea(cnts_sort[i])\n peri = cv2.arcLength(cnts_sort[i],True)\n approx = cv2.approxPolyDP(cnts_sort[i],0.01*peri,True)\n\n # (size < CARD_MAX_AREA) and (size > CARD_MIN_AREA)\n # and\n # and (hier_sort[i][3] == -1)\n # and (len(approx) == 4)\n if ((size > CARD_MIN_AREA)):\n print('[inside loop]',size)\n cnt_is_card[i] = 1\n\n return cnts_sort, cnt_is_card" ]
[ "0.5472272", "0.5462623", "0.54448014", "0.5388763", "0.535019", "0.53095686", "0.52818936", "0.52364", "0.52262443", "0.51484215", "0.51208395", "0.5072665", "0.5054126", "0.49976072", "0.49777424", "0.4968893", "0.4947722", "0.49220458", "0.4889088", "0.4872785", "0.48708507", "0.4853", "0.48497406", "0.48386866", "0.48344848", "0.48324993", "0.4795366", "0.47942147", "0.47803882", "0.47800338" ]
0.6696489
0
Add more connection endpoints. Connection may have many endpoints, mixing protocols and types.
def addEndpoints(self, endpoints): self.endpoints.extend(endpoints) self._connectOrBind(endpoints)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def connections_endpoints(request: web.BaseRequest):\n context: AdminRequestContext = request[\"context\"]\n connection_id = request.match_info[\"conn_id\"]\n\n profile = context.profile\n connection_mgr = ConnectionManager(profile)\n try:\n endpoints = await connection_mgr.get_endpoints(connection_id)\n except StorageNotFoundError as err:\n raise web.HTTPNotFound(reason=err.roll_up) from err\n except (BaseModelError, StorageError, WalletError) as err:\n raise web.HTTPBadRequest(reason=err.roll_up) from err\n\n return web.json_response(dict(zip((\"my_endpoint\", \"their_endpoint\"), endpoints)))", "def endpoints(self, endpoints):\n\n self._endpoints = endpoints", "def _attach_endpoints(self):\n for name, endpoint in inspect.getmembers(self):\n is_class = inspect.isclass(endpoint)\n is_subclass = is_class and issubclass(endpoint, self.Endpoint)\n not_endpoint = endpoint is not self.Endpoint\n\n if is_subclass and not_endpoint:\n endpoint_instance = endpoint(self.session)\n setattr(self, name.lower(), endpoint_instance)", "def add_endpoints(self, hostip, username=None, password=None):\n if hostip in self.endpoints:\n log.info(\"%s is already added.\", hostip)\n return\n\n username = username or self._ep_username\n password = password or self._ep_password\n\n try:\n with LydianClient(hostip) as client:\n # fetch regular interfaces\n self._add_endpoints(client, hostip)\n\n self._ep_hosts[hostip] = hostip\n\n except Exception as err:\n log.error(\"Error in adding endpoint %s - %r\", hostip, err)", "def add_endpoint_set(self, other_endpoint_set):\n for fep in other_endpoint_set.get_flask_endpoints():\n other_endpoint = other_endpoint_set.get_endpoint(fep)\n self.add_endpoint(other_endpoint)\n return", "def _add_connections(self, connections):\r\n\r\n for node1, node2 in connections:\r\n self._add(node1, node2)", "def add_connections(self, connections):\r\n\r\n for node1, node2 in connections:\r\n self.add(node1, node2)", "def add_endpoint(self, endpoint):\n self._endpoints.append(endpoint)", "def add_connections(self, connections):\r\n\r\n for node1, node2, w in connections:\r\n self.add(node1, node2, w)", "def test_basic_add_multiple_endpoint(self):\n args = self.get_args()\n config = self.create_config_file_before()\n self.write_config_file(config, args)\n collector = execute_tool(args, test_mode=True)\n\n time.sleep(2)\n mac1 = '00:11:22:33:33:34'\n ip1 = '3.4.3.5'\n self.add_endpoint(mac1, ip1, 'intersite-testsuite', 'app', 'epg')\n mac2 = '00:11:22:33:33:35'\n ip2 = '3.4.3.6'\n self.add_endpoint(mac2, ip2, 'intersite-testsuite', 'app', 'epg')\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry_before(mac1, ip1))\n self.assertTrue(self.verify_remote_site_has_entry_before(mac2, ip2))\n\n config = self.create_config_file_after()\n self.write_config_file(config, args)\n collector.reload_config()\n time.sleep(2)\n self.assertTrue(self.verify_remote_site_has_entry_after(mac1, ip1))\n self.assertTrue(self.verify_remote_site_has_entry_after(mac2, ip2))", "def serviceConnects(self):\n #log.debug(f\"{self.name}: servicing new connections for.\")\n for ca, ix in list(self.server.ixes.items()):\n if ix.cutoff:\n self.closeConnection(ca)\n continue\n\n if ca not in self.connections:\n log.debug(f\"Adding new connection for {ix}.\")\n self.connections[ca] = Requester(self.dhtdoer, remoter=ix, name=ca)\n\n if ix.timeout > 0.0 and ix.tymer.expired:\n self.closeConnection(ca)", "def add_endpoint(self, endpoint, **kwargs):\n endpoint.arguments = kwargs\n self.endpoints.append(endpoint)", "def add_plugin_to_endpoints(endpoints, plugin):\n for endpoint in endpoints:\n endpoint.update({\n 'plugin': plugin,\n })\n \n return endpoints", "def test_basic_add_multiple_endpoint(self):\n mac1, ip1 = self.setup_with_endpoint()\n mac2 = '00:11:22:33:33:35'\n ip2 = '3.4.3.6'\n self.add_endpoint(mac2, ip2, 'intersite-testsuite', 'app', 'epg')\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry(mac1, ip1, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app-epg'))\n self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app-epg'))", "def test_basic_add_multiple_endpoint(self):\n args = self.get_args()\n config = self.create_config_file()\n self.write_config_file(config, args)\n execute_tool(args, test_mode=True)\n\n time.sleep(2)\n mac1 = '00:11:22:33:33:34'\n ip1 = '3.4.3.5'\n self.add_endpoint(mac1, ip1, 'intersite-testsuite', 'app', 'epg')\n mac2 = '00:11:22:33:33:35'\n ip2 = '3.4.3.6'\n self.add_endpoint(mac2, ip2, 'intersite-testsuite', 'app', 'epg')\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry_with_provided_contract(mac1, ip1, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg', 'contract-1'))\n self.assertTrue(self.verify_remote_site_has_entry_with_provided_contract(mac2, ip2, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg', 'contract-1'))", "def test_basic_add_multiple_endpoint(self):\n mac1, ip1 = self.setup_with_endpoint()\n mac2 = '00:11:22:33:33:35'\n ip2 = '3.4.3.6'\n self.add_endpoint(mac2, ip2, 'intersite-testsuite', 'app', 'epg2')\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry(mac1, ip1, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg1'))\n self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg2'))", "def _connectOrBind(self, endpoints):\n for endpoint in endpoints:\n if endpoint.type == ZmqEndpointType.connect:\n self.socket.connect(endpoint.address)\n elif endpoint.type == ZmqEndpointType.bind:\n self.socket.bind(endpoint.address)\n else:\n assert False, \"Unknown endpoint type %r\" % endpoint", "def _add_connection(self, con):\n # get connectors by the above specified labels\n start = self.connector_by_label(con[0])\n end = self.connector_by_label(con[1])\n if start.parent_type == 'box' and end.parent_type == 'box':\n # make sure, that not two inputs or two outputs are connected\n if start.connector_type == end.connector_type:\n raise ConnectorError(f\"Connection {con} connects \"\n f\"input to input or output to output.\")\n # make sure, that inputs are always first\n # and outputs are always second\n elif (start.connector_type == 'output'\n or end.connector_type == 'input'):\n start, end = end, start\n # make sure, that a switch does not connect to itself\n elif start.parent_type == 'switch' and end.parent_type == 'switch':\n if start.switch == end.switch:\n raise ConnectorError(f\"Connection {con} connects \"\n f\"a switch to itself.\")\n\n # create connection\n connection = ArduinoSwitchControlConnection(start, end)\n\n # add connection to attributes\n self.connections.append(connection)", "def _add_connections(top, matches, conn_type):\n for sorted_conn in matches:\n to_add_conn = CONNS[conn_type](\n connection_members=[top.sites[idx] for idx in sorted_conn]\n )\n top.add_connection(to_add_conn, update_types=False)", "def endpoints(self):\n return self[\"endpoints\"]", "def add_endpoint(self, ip: str, port: int = None, dnsNames: List[str] = None, port_list: PortList = None,\n range_list: PortRangeTupleList = None) -> None:\n\n self._is_reported = False;\n if self._is_valid_ip(ip):\n _single_port = self._validate_port(port)\n _port_list = self._validate_port_list(port_list)\n _port_range_list = self._validate_port_range_list(range_list)\n\n port_count = 0\n #calculate ports already added\n for (__ip, __single_port, __dnsNames, __port_list, __port_range_list) in self.endpoints:\n port_count += self._port_count(__single_port, __port_list, __port_range_list)\n\n #calucate current ports\n port_count += self._port_count(_single_port, _port_list, _port_range_list)\n if port_count > PORT_LIMIT:\n raise ConfigException(\"The number of ports in endpoint per device cannot exceed \" + str(PORT_LIMIT))\n self.endpoints.append((ip, _single_port, dnsNames, _port_list, _port_range_list))\n else:\n logger.info(\"add_endpoint rejected due to invalid IP address: \" + str(ip))", "def updateConnections(self, *connections):\n\n # Verify if ports are valid, otherwise do nothing.\n for connection in connections:\n for k1, v1 in connection.items():\n if v1 not in k1.ports:\n logger.error(\"Port '%s' is not in '%s: %s'\", v1, k1, k1.ports)\n raise RuntimeError(\"Port '{}' is not in '{}: {}'\".format(v1, k1, k1.ports))\n\n # Remove old conflicting connections\n def check_if_port_is_not_connected(connection, k1, v1):\n for k2, v2 in connection.items():\n if (k1, v1) == (k2, v2):\n logger.warning(\"Deleting existing connection %s.\", connection)\n return False\n return True\n for connection in connections:\n for k1, v1 in connection.items():\n connectioncheck2 = lambda connection: check_if_port_is_not_connected(\n connection, k1, v1)\n self.connections[:] = [x for x in self.connections if connectioncheck2(x)]\n\n # Add new connections\n for connection in connections:\n if connection not in self.connections:\n self.connections.append(connection)\n else:\n logger.warning(\"Connection already exists: %s\", connection)\n return True", "def endpoints(self) -> pulumi.Input[Sequence[pulumi.Input['EndpointDependencyArgs']]]:\n return pulumi.get(self, \"endpoints\")", "def test_basic_add_multiple_endpoint(self):\n args = self.get_args()\n config = self.create_config_file('l3out1')\n self.write_config_file(config, args)\n collector = execute_tool(args, test_mode=True)\n\n time.sleep(2)\n mac1 = '00:11:22:33:33:34'\n ip1 = '3.4.3.5'\n self.add_endpoint(mac1, ip1, 'intersite-testsuite', 'app', 'epg')\n mac2 = '00:11:22:33:33:35'\n ip2 = '3.4.3.6'\n self.add_endpoint(mac2, ip2, 'intersite-testsuite', 'app', 'epg')\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry(mac1, ip1, 'intersite-testsuite',\n 'l3out1', 'intersite-testsuite-app-epg'))\n self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite',\n 'l3out1', 'intersite-testsuite-app-epg'))\n\n config = self.create_config_file('l3out2')\n self.write_config_file(config, args)\n collector.reload_config()\n time.sleep(2)\n self.assertTrue(self.verify_remote_site_has_entry(mac1, ip1, 'intersite-testsuite',\n 'l3out2', 'intersite-testsuite-app-epg'))\n self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite',\n 'l3out2', 'intersite-testsuite-app-epg'))", "def add_conn(self, a1, a2):\n if self.use_pconn:\n raise ValueError(\"Can not add bonds to systems with pconn - well, we can fix this ;) \")\n self.conn[a1].append(a2)\n self.conn[a2].append(a1)\n d,v,imgi = self.get_distvec(a1,a2)\n self.pconn[a1].append(images[imgi])\n d,v,imgi = self.get_distvec(a2,a1)\n self.pconn[a2].append(images[imgi])\n logger.warning('pconn may not be properly updated!!!')\n return", "async def _track_and_propagate_available_endpoints(self) -> None:\n async for ev in self._endpoint.stream(EventBusConnected):\n self._available_endpoints = self._available_endpoints + (ev.connection_config,)\n self.logger.debug(\"New EventBus Endpoint connected %s\", ev.connection_config.name)\n # Broadcast available endpoints to all connected endpoints, giving them\n # a chance to cross connect\n await self._endpoint.broadcast(AvailableEndpointsUpdated(self._available_endpoints))\n self.logger.debug(\"Connected EventBus Endpoints %s\", self._available_endpoints)", "def addConnection(tagA, tagB): #@NoSelf", "def register_endpoints(api):\n api.add_resource(EventList, '/events')", "def __SetEndpoints(self,\n version):\n\n if version==2:\n endpoints = {\"heads\":'top-headlines?',\"search\":'everything?',\"source\":'sources?'}\n elif version==1:\n endpoints = {\"search\":'articles?',\"source\":'sources?'}\n\n return endpoints", "def add_endpoint(self, endpoint):\n name = endpoint.get_name()\n self._calls[name] = endpoint" ]
[ "0.6581431", "0.62893325", "0.6268082", "0.6254711", "0.6199637", "0.6168774", "0.6061122", "0.60520715", "0.6042876", "0.6022899", "0.5892089", "0.5878343", "0.5851649", "0.58505166", "0.58410054", "0.5814967", "0.57680947", "0.5753131", "0.572161", "0.57154876", "0.57008517", "0.5614917", "0.558262", "0.5581818", "0.5558196", "0.55265766", "0.54771996", "0.5465599", "0.5446631", "0.5444319" ]
0.7782683
0
Read multipart in nonblocking manner, returns with ready message or raising exception (in case of no more messages available).
def _readMultipart(self): while True: self.recv_parts.append(self.socket.recv(constants.NOBLOCK)) if not self.socket_get(constants.RCVMORE): result, self.recv_parts = self.recv_parts, [] return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def doRead(self):\n if self.read_scheduled is not None:\n if not self.read_scheduled.called:\n self.read_scheduled.cancel()\n self.read_scheduled = None\n\n while True:\n if self.factory is None: # disconnected\n return\n\n events = self.socket_get(constants.EVENTS)\n\n if (events & constants.POLLIN) != constants.POLLIN:\n return\n\n try:\n message = self._readMultipart()\n except error.ZMQError as e:\n if e.errno == constants.EAGAIN:\n continue\n\n raise e\n\n log.callWithLogger(self, self.messageReceived, message)", "async def Read(self) -> Optional[Message]:\n return await self._read_queue.Get()", "def read(self):\r\n assert self.status in (WAIT_LEN, WAIT_MESSAGE)\r\n if self.status == WAIT_LEN:\r\n self._read_len()\r\n # go back to the main loop here for simplicity instead of\r\n # falling through, even though there is a good chance that\r\n # the message is already available\r\n elif self.status == WAIT_MESSAGE:\r\n read = self.socket.recv(self.len - len(self.message))\r\n if len(read) == 0:\r\n logging.error(\"can't read frame from socket (get %d of %d bytes)\" %\r\n (len(self.message), self.len))\r\n self.close()\r\n return\r\n self.message += read\r\n if len(self.message) == self.len:\r\n self.status = WAIT_PROCESS", "def read(self):\n method_frame, header_frame, body = self._channel.basic_get(self.name)\n if method_frame:\n pika_message = PikaMessage(body,\n delivery_info=method_frame,\n properties=header_frame,\n )\n return pika_message.to_message()\n else:\n return None", "async def read_one_message(self):\n if not self.connected:\n return None\n\n try:\n header = await self.reader.readexactly(2)\n except SocketError as err:\n if err.errno == errno.ECONNRESET:\n self.log.error('Connection reset by peer')\n self.connected = False\n if err.errno == errno.EHOSTUNREACH:\n self.log.error('Spa unreachable')\n self.connected = False\n else:\n self.log.error('Spa socket error: {0}'.format(str(err)))\n return None\n except Exception as e:\n self.log.error('Spa read failed: {0}'.format(str(e)))\n return None\n\n if header[0] == M_START:\n # header[1] is size, + checksum + M_END (we already read 2 tho!)\n rlen = header[1]\n else:\n return None\n\n # now get the rest of the data\n try:\n data = await self.reader.readexactly(rlen)\n except Exception as e:\n self.log.errpr('Spa read failed: {0}'.format(str(e)))\n return None\n\n full_data = header + data\n # don't count M_START, M_END or CHKSUM (remember that rlen is 2 short)\n crc = messages.Message.crc(full_data[1:rlen - 1])\n if crc != full_data[-2]:\n self.log.error('Message had bad CRC, discarding')\n return None\n\n # self.log.error('got update: {}'.format(full_data.hex()))\n return full_data", "def read(self):\n assert self.status in (WAIT_LEN, WAIT_MESSAGE)\n\n if self.status == WAIT_LEN:\n self._read_len()\n # go back to the main loop here for simplicity instead of\n # falling through, even though there is a good chance that\n # the message is already available\n elif self.status == WAIT_MESSAGE:\n read = self.socket.recv(self.len - len(self.message))\n if len(read) == 0:\n logging.error(\"can't read frame from socket\" +\n \" (got %d of %d bytes)\" %\n (len(self.message), self.len))\n self.close()\n return\n self.message += read\n if len(self.message) == self.len:\n self._set_status(WAIT_PROCESS)", "def get(self, block=True, timeout=None):\n if block and timeout is None:\n self.message = self.handle.read(wait_time_seconds=20)\n while self.message is None:\n self.message = self.handle.read(wait_time_seconds=20)\n elif block and 1 <= timeout <= 20:\n self.message = self.handle.read(wait_time_seconds=timeout)\n elif not block and timeout is None:\n self.message = self.handle.read(wait_time_seconds=0)\n else:\n raise Exception('invalid arguments')\n if self.message is None:\n raise Empty\n return self.message.get_body()", "async def fetch_next_part(self) -> Union[MultipartReader, BodyPartReader]:\n ...", "async def read(self) -> None:\n make_non_blocking(self.stream)\n\n while not self.stream.closed:\n message = None\n try:\n message = await self.read_one()\n\n if not message:\n await self.sleep()\n continue\n else:\n self.wake()\n\n IOLoop.current().add_callback(self.queue.put_nowait, message)\n except Exception as e: # pragma: no cover\n self.log.exception(\n \"%s couldn't enqueue message: %s (%s)\", self, message, e\n )\n await self.sleep()", "def read(self):\n if not self._consuming:\n yield from self._begin_consuming()\n return (yield from self._message_queue.get())", "def _read(self):\n if not self.connected:\n raise IOError(\"Not connected.\")\n\n try:\n with self._read_lock:\n if not self.connected:\n raise IOError(\"Not connected.\")\n data = self._reader.readline(self.MAX_MESSAGE_SIZE)\n # If there weren't a \"\\r\\n\" between the last message\n # and the EOF we would have a false positive here.\n # Luckily there is one.\n if len(data) > 0 and not data.endswith(b\"\\r\\n\"):\n logger.error(\n \"The client sent a message larger than %d bytes (that \"\n \"is MAX_MESSAGE_SIZE). Consider raising that value if \"\n \"the message seemed legit.\", self.MAX_MESSAGE_SIZE)\n self.finalize(\"Client misbehaving.\")\n raise IOError(\"Message too long.\")\n except socket.error as error:\n if self.connected:\n logger.warning(\"Failed reading from socket: %s.\", error)\n self.finalize(\"Read failed.\")\n raise error\n else:\n # The client was terminated willingly; its correct termination\n # is handled in disconnect(), so here we can just return.\n return b\"\"\n\n return data", "async def _read_content(\n self, length: int, max_parts=1000, max_empties=200\n ) -> Optional[bytes]:\n raw = None\n raw_parts: List[bytes] = []\n received_size = 0\n while received_size < length and len(raw_parts) < max_parts and max_empties > 0:\n part = None\n try:\n part = self.stream.read(length - received_size)\n except OSError: # pragma: no cover\n pass\n if part is None:\n max_empties -= 1\n await self.sleep()\n continue\n received_size += len(part)\n raw_parts.append(part)\n\n if raw_parts:\n raw = b\"\".join(raw_parts)\n if len(raw) != length: # pragma: no cover\n self.log.warning(\n f\"Readout and content-length mismatch: {len(raw)} vs {length};\"\n f\"remaining empties: {max_empties}; remaining parts: {max_parts}\"\n )\n\n return raw", "def read_message(self):\n\n # Read content length...\n content_length_binary = self.sck.recv(self.MESSAGE_LENGTH_SIZE)\n\n while len(content_length_binary) < self.MESSAGE_LENGTH_SIZE:\n content_length_binary += self.sck.recv(self.MESSAGE_LENGTH_SIZE - len(content_length_binary))\n\n content_length = struct.unpack('>HH', content_length_binary)[1]\n\n # Read content in full...\n content_binary = self.sck.recv(self.BUFFER_SIZE)\n\n while len(content_binary) < content_length:\n content_binary += self.sck.recv(self.BUFFER_SIZE)\n\n msg = json.loads(content_binary)\n logging.info(\"Receive: {0}\".format(msg))\n\n return msg", "async def read_one(self) -> Text:\n message = \"\"\n headers = HTTPHeaders()\n\n line = await convert_yielded(self._readline())\n\n if line:\n while line and line.strip():\n headers.parse_line(line)\n line = await convert_yielded(self._readline())\n\n content_length = int(headers.get(\"content-length\", \"0\"))\n\n if content_length:\n raw = await self._read_content(length=content_length)\n if raw is not None:\n message = raw.decode(\"utf-8\").strip()\n else: # pragma: no cover\n self.log.warning(\n \"%s failed to read message of length %s\",\n self,\n content_length,\n )\n\n return message", "def try_read(self):\r\n pos = self._fp.tell()\r\n try:\r\n return self.read()\r\n except RecordIO.PrematureEndOfStream as e:\r\n log.debug('Got premature end of stream [%s], skipping - %s' % (self._fp.name, e))\r\n self._fp.seek(pos)\r\n return None", "def test_fetchBodyPartOfNonMultipart(self):\n self.function = self.client.fetchSpecific\n self.messages = '1'\n parts = [1]\n outerBody = b'DA body'\n headers = OrderedDict()\n headers['from'] = 'sender@host'\n headers['to'] = 'recipient@domain'\n headers['subject'] = 'booga booga boo'\n headers['content-type'] = 'text/plain'\n self.msgObjs = [FakeyMessage(\n headers, (), None, outerBody, 123, None)]\n\n self.expected = {0: [['BODY', ['1'], 'DA body']]}\n\n def result(R):\n self.result = R\n\n self.connected.addCallback(\n lambda _: self.function(self.messages, headerNumber=parts))\n self.connected.addCallback(result)\n self.connected.addCallback(self._cbStopClient)\n self.connected.addErrback(self._ebGeneral)\n\n d = loopback.loopbackTCP(self.server, self.client, noisy=False)\n d.addCallback(lambda ign: self.assertEqual(self.result, self.expected))\n return d", "def recv(socket, flags=0, capture=(lambda msgs: None)):\n msgs = eintr_retry_zmq(socket.recv_multipart, flags)\n capture(msgs)\n return parse(msgs)", "async def next(self) -> Optional[Union[MultipartReader, BodyPartReader]]:\n ...", "async def next(self) -> Optional[Union[MultipartReader, BodyPartReader]]:\n ...", "def read_message(self):\n\n while True:\n try:\n return sirf.from_bytes(self._read_binary_sirf_msg())\n except sirf.UnrecognizedMessageException:\n pass", "def read_msg(self):\n if self.state == 'connected':\n if 0 == len(self.buf):\n self.buf = self.inout.recv(Mtcpfns.TCP_MAX_PACKET)\n if 0 == (self.buf):\n self.state = 'disconnected'\n raise EOFError\n pass\n self.buf, data = Mtcpfns.unpack_msg(self.buf)\n return data\n else:\n raise IOError(\"read_msg called in state: %s.\" % self.state)", "def recv_multipart(self, flags=0, copy=True, track=False):\n return self._add_recv_event('recv_multipart',\n dict(flags=flags, copy=copy, track=track)\n )", "async def read(self) -> bytes:\n\n # NOTE(kgriffs): Yield to other tasks to give them a chance to\n # send us more body chunks if any are available.\n #\n # https://bugs.python.org/issue34476\n #\n await asyncio.sleep(0)\n\n if self._chunk_pos >= len(self._chunks):\n return b''\n\n data = b''.join(self._chunks[self._chunk_pos :])\n self._chunk_pos = len(self._chunks)\n\n return data", "def read(self, n):\n assert self._read_future is None, \"Concurrent reads detected\"\n\n read_future = Future(self._loop)\n\n if self._unread_bytes or self._eof_recvd:\n read_future.set_result(self._unread_bytes)\n self._unread_bytes = b''\n else:\n self._read_future = read_future\n def read_future_done(_):\n self._read_future = None\n read_future.add_done_callback(read_future_done)\n\n return read_future", "def read(self, *args, **kwargs):\n return self.limitedstream.read(*args, **kwargs)", "def read_message(self):\n text_length_bytes = self.input_fh.read(4)\n logging.debug(\"raw 4: %s\", text_length_bytes)\n if not text_length_bytes:\n # this means exit\n shutdown()\n\n text_length = struct.unpack(\"i\", text_length_bytes)[0]\n logging.debug(\"reading message of length: %s\", text_length)\n msg = self.input_fh.read(text_length).decode()\n logging.debug(\"message is %s\", msg)\n return msg", "def try_read_message(self):\n return sirf.from_bytes(self._read_binary_sirf_msg())", "def read_message(self):\n def read_data(lnth):\n data = self.pipe_in.read(lnth)\n if len(data) < lnth:\n raise EofError\n return data\n \n data = read_data(struct.calcsize(\"i\"))\n msgLnth = struct.unpack(\"i\", data)[0]\n data = read_data(msgLnth)\n\n # Ack\n try: self.pipe_out.write('a')\n except IOError: pass\n\n import cPickle\n obj = cPickle.loads(data)\n return obj", "def read(self):\n while True:\n size_bytes = self.connection.read(Frame.SIZE_WIDTH)\n # Read will return zero bytes when the other side of the connection\n # closes.\n if not size_bytes:\n break\n\n message_length = read_number_string(size_bytes, Frame.SIZE_WIDTH)\n\n chunk = self.connection.read(message_length - Frame.SIZE_WIDTH)\n if not chunk:\n raise ProtocolException(\n 'Expected %d bytes available, got none' % message_length\n )\n\n if len(chunk) != message_length - Frame.SIZE_WIDTH:\n raise ProtocolException(\n 'Expected %d bytes, got %d' %\n (len(chunk), message_length - Frame.SIZE_WIDTH)\n )\n\n yield Frame.decode(BytesIO(chunk), message_length)", "def server_read(conn):\n message_complete = False\n message = []\n\n while not message_complete:\n part = conn.recv(BUFF_LENGTH)\n message.append(part)\n if len(part) < BUFF_LENGTH:\n break\n\n return b\"\".join(message)" ]
[ "0.7202878", "0.6593352", "0.6501937", "0.64703834", "0.63924545", "0.6380673", "0.6375113", "0.63678604", "0.6358247", "0.6302495", "0.6219391", "0.6152119", "0.6034115", "0.6021782", "0.6014445", "0.594972", "0.59420466", "0.5936738", "0.5936738", "0.59349895", "0.59290695", "0.5908291", "0.58996326", "0.58755857", "0.5868521", "0.58608013", "0.5845207", "0.58387035", "0.5815902", "0.5767752" ]
0.72168154
0
Connect and/or bind socket to endpoints.
def _connectOrBind(self, endpoints): for endpoint in endpoints: if endpoint.type == ZmqEndpointType.connect: self.socket.connect(endpoint.address) elif endpoint.type == ZmqEndpointType.bind: self.socket.bind(endpoint.address) else: assert False, "Unknown endpoint type %r" % endpoint
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bind(self):\n self._conn = socket.socket(socket.AF_INET, self.protocol.value)\n try:\n self._conn.bind((self.host, self.port))\n except OSError as e:\n self.close()\n raise BindError(str(e))\n self._conn.setblocking(False)\n self._conn.listen(100)\n self._selector.register(self._conn, selectors.EVENT_READ, self.accept)\n\n # Event callback.\n self.event_callback[ConnectionEvent.ON_BIND](self._conn)\n\n self._mainloop()", "def connect(self) -> None:\n self.s.connect((self.ip, self.port))", "def _connect_socket(self):\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock.connect((self.ip, self.port))\n print(\"Connected to %s at port %d\" % (self.ip, self.port))", "def connect(self):\n self.ipv4 = socket.gethostbyname(socket.gethostname())\n self.addr = (self.ipv4, HttpServer.PORT)\n self.server.bind(self.addr)\n print(\"[SETUP] server bound to IPv4 address\", self.ipv4, \"on port\", HttpServer.PORT)\n self.server.listen()\n print(\"[SETUP] server listening for connections\")", "def connect(self):\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.socket.connect((self.host, PORT)) # probably throws errors\n self.connected = True", "def socket_bind(self):\n try:\n self.socket.bind((self.host, self.port))\n self.socket.listen(5)\n except socket.error as e:\n print(\"Socket binding error: \" + str(e))\n time.sleep(5)\n self.socket_bind()\n return", "def connect(self):\n self.socket.connect((\"localhost\",self.PORT_NUM))", "def connect(self):\n try:\n self.sock.connect((self.hostname, self.port))\n print 'connected to ' + self.hostname\n except socket.gaierror as e:\n print(\"Recieved error when connecting to \" + str((self.hostname, self.port)))\n raise e", "def connect(self) -> None:\n self.client_socket.connect((self.server_name, self.server_port))", "def _connect(self):\n try:\n #print(\"try to connect _connect\")\n sock = gevent.socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect(self.remote_address)\n except socket.error as error:\n logger.warning(\"Couldn't connect to %s: %s.\",\n self._repr_remote(), error)\n else:\n self.initialize(sock, self.remote_service_coord)", "def _connect(self):\n\n if self.connecting:\n rlog(10, self.name, 'already connecting')\n raise AlreadyConnecting()\n\n if self.connected:\n rlog(10, self.name, 'already connected')\n raise AlreadyConnected()\n\n self.stopped = 0\n self.connecting = True\n self.connectok.clear()\n self.connectlock.acquire()\n\n # create socket\n if self.ipv6:\n rlog(10, self.name, 'creating ipv6 socket')\n self.oldsock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)\n self.ipv6 = 1\n else:\n rlog(10, self.name, 'creating ipv4 socket')\n self.oldsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n assert(self.oldsock)\n\n # optional bind\n server = self.server\n elite = self.cfg['bindhost'] or config['bindhost']\n if elite:\n try:\n self.oldsock.bind((elite, 0))\n except socket.gaierror:\n rlog(10, self.name, \"can't bind to %s\" % elite)\n # resolve the IRC server and pick a random server\n if not server:\n # valid IPv6 ip?\n try: socket.inet_pton(socket.AF_INET6, self.server)\n except socket.error: pass\n else: server = self.server\n if not server: \n # valid IPv4 ip?\n try: socket.inet_pton(socket.AF_INET, self.server)\n except socket.error: pass\n else: server = self.server\n if not server:\n # valid hostname?\n ips = []\n try:\n for item in socket.getaddrinfo(self.server, None):\n if item[0] in [socket.AF_INET, socket.AF_INET6] and item[1] == socket.SOCK_STREAM:\n ip = item[4][0]\n if ip not in ips: ips.append(ip)\n except socket.error: pass\n else: server = random.choice(ips)\n\n # do the connect .. set timeout to 30 sec upon connecting\n rlog(10, self.name, 'connecting to %s (%s)' % (server, self.server))\n self.oldsock.settimeout(5)\n self.oldsock.connect((server, int(self.port)))\n\n # we are connected\n rlog(10, self.name, 'connection ok')\n time.sleep(1)\n self.connected = True\n\n # make file socket\n self.fsock = self.oldsock.makefile(\"r\")\n\n # set blocking\n self.oldsock.setblocking(self.blocking)\n self.fsock._sock.setblocking(self.blocking)\n\n # set socket time out\n if self.blocking:\n socktimeout = self.cfg['socktimeout']\n if not socktimeout:\n socktimeout = 301.0\n else:\n socktimeout = float(socktimeout)\n self.oldsock.settimeout(socktimeout)\n self.fsock._sock.settimeout(socktimeout)\n # enable ssl if set\n if self.ssl:\n rlog(10, self.name, 'ssl enabled')\n self.sock = socket.ssl(self.oldsock) \n else:\n self.sock = self.oldsock\n\n # try to release the outputlock\n try:\n self.outputlock.release()\n except thread.error:\n pass\n\n # start input and output loops\n start_new_thread(self._readloop, ())\n start_new_thread(self._outloop, ())\n\n # logon and start monitor\n self._logon()\n self.nickchanged = 0\n self.reconnectcount = 0\n saymonitor.start()\n return 1", "def bind_sockets(port, address=..., family=..., backlog=..., flags=..., reuse_port=...):\n ...", "def _socket_connect(endpoint: urllib.parse.ParseResult) -> typing.Union[ssl.SSLSocket, socket.socket]:\n address = endpoint.netloc.split(':')\n if endpoint.scheme == 'https':\n if len(address) == 1:\n address.append(443)\n context = ssl.SSLContext(ssl.PROTOCOL_TLS)\n context.verify_mode = ssl.CERT_REQUIRED\n context.check_hostname = True\n context.load_default_certs()\n sock = socket.socket()\n connection = context.wrap_socket(sock, server_hostname=address[0])\n else:\n if len(address) == 1:\n address.append(80)\n connection = socket.socket()\n if isinstance(address[1], str):\n address[1] = int(address[1])\n connection.connect((address[0], address[1]))\n return connection", "def setup_socket(self):\n self.server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.server_socket.bind((self.config['HOST_NAME'], self.config['BIND_PORT']))\n self.server_socket.listen(10)", "def __connect():\n # Create socket\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n # Connect socket to server\n sock.connect((SERVER_IP, SERVER_PORT))\n\n # Return connected socket\n return sock", "def bind_server(self):\n self.MAIN_CONNECTION.bind((self.HOST, self.PORT))", "def connect(self):\n if not self._socket:\n self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self._socket.connect((self.host, self.port))\n self._socket.settimeout(0.0)", "def connect(self):\n \n try:\n self.__sock.connect((self.__host, self.__port))\n\n except socket.error,e:\n print 'Oops, unable to connect. Try again!',e\n sys.exit(1)", "def initialize_socket(self):\n try:\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.sock.bind((self.host, self.port))\n self.sock.listen(10)\n except socket.error, (value, message):\n if self.sock:\n self.sock.close()\n # TODO: LOG and provide means for graceful failure\n print \"Unable to open socket: \" + message\n print \"Error value: \" + str(value)", "def initialize_socket(self):\n try:\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.sock.bind((self._host, self._port))\n self.sock.listen(10)\n except socket.error, (value, message):\n if self.sock:\n self.sock.close()\n # TODO: LOG and provide means for graceful failure\n print \"Unable to open socket: \" + message\n print \"Error value: \" + str(value)", "def connect(self):\n self.sock = s.socket(s.AF_INET,s.SOCK_STREAM)\n self.sock.connect((self.remote_host,\n self.remote_port))", "def connect_socket(self):\n try:\n self.socket.connect((self.request.host, int(self.request.port)))\n except socket.gaierror:\n raise socket.gaierror(\"Socket connection could not be established\")\n except socket.timeout:\n raise socket.timeout(\"Socket connection timed out\")\n except InterruptedError:\n raise InterruptedError(\"Socket connection has been interrupted by a signal\")", "def connect(self) -> bool:\n if self.socket is None:\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.socket.bind((self.host, self.port))\n self.socket.setblocking(False)\n self.socket.listen(1)\n if self.endpoint is None:\n if self.socket is not None:\n try:\n self.endpoint, _ = self.socket.accept()\n self.connected = True\n return True\n except (BlockingIOError, OSError):\n pass\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.socket.bind((self.host, self.port))\n self.socket.setblocking(False)\n self.socket.listen(1)\n return self.connected", "def connect(self):\n # Standard implementation from HTTPSConnection, which is not\n # designed for extension, unfortunately\n sock = socket.create_connection((self.host, self.port),\n self.timeout, self.source_address)\n if getattr(self, '_tunnel_host', None):\n self.sock = sock\n self._tunnel()\n\n # This is the only difference; default wrap_socket uses SSLv23\n self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_TLSv1_2)", "def connect(self):\n sock = socket.create_connection((self.host, self.port))\n try:\n self.sock = ssl.wrap_socket(sock, keyfile=self.key_file,\n certfile=self.cert_file,\n cert_reqs=self.cert_reqs,\n ca_certs=self.ca_certs)\n except ssl.SSLError, e:\n raise Error('Error validating SSL certificate for \"' + self.host +\n '\": ' + str(e))\n\n if self.cert_reqs == ssl.CERT_REQUIRED:\n self._VerifyHostName(self.host, self.sock.getpeercert())", "def init_connexion():\n connexion = socket(AF_INET, SOCK_STREAM)\n connexion.bind((hote, port))\n\n return connexion", "def run(self):\n HOST = 'localhost' # Symbolic name meaning all available interfaces\n PORT = 54123 # Arbitrary non-privileged port\n \n \n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.bind((HOST, PORT))\n while(self.running):\n s.listen(1)\n conn, addr = s.accept()\n self.listen_to_connection(conn)\n conn.close()\n s.close()", "def connect(self):\n try:\n self.sock = socket.create_connection((self.host, self.port), self.connect_timeout)\n except SocketTimeout:\n raise InnerConnectionTimeoutError()\n\n if self.timeout is socket._GLOBAL_DEFAULT_TIMEOUT:\n self.sock.settimeout(socket.getdefaulttimeout())\n else:\n self.sock.settimeout(self.timeout)", "def _bind_sockets(self):\n # Bind to test servers\n for r in self.scenario.ranges:\n for addr in r.addresses:\n family = socket.AF_INET6 if ':' in addr else socket.AF_INET\n self.start_srv((addr, 53), family)\n self.start_srv((addr, 53), family, proto=socket.IPPROTO_TCP)\n\n # Bind addresses in ad-hoc REPLYs\n for s in self.scenario.steps:\n if s.type == 'REPLY':\n reply = s.data[0].message\n for rr in itertools.chain(reply.answer,\n reply.additional,\n reply.question,\n reply.authority):\n for rd in rr:\n if rd.rdtype == dns.rdatatype.A:\n self.start_srv((rd.address, 53), socket.AF_INET)\n self.start_srv((rd.address, 53), socket.AF_INET,\n proto=socket.IPPROTO_TCP)\n elif rd.rdtype == dns.rdatatype.AAAA:\n self.start_srv((rd.address, 53), socket.AF_INET6)\n self.start_srv((rd.address, 53), socket.AF_INET6,\n proto=socket.IPPROTO_TCP)", "def connect(self):\n if self._sock is None:\n if not self.proxy_host:\n host = self.host\n port = self.port\n else:\n host = self.proxy_host\n port = self.proxy_port\n \n sock = socket.create_connection((host, port), 5)\n proto = None\n\n if self.secure:\n assert not self.proxy_host, \"Using a proxy with HTTPS not yet supported.\"\n sock, proto = wrap_socket(sock, host, self.ssl_context)\n\n log.debug(\"Selected protocol: %s\", proto)\n sock = BufferedSocket(sock, self.network_buffer_size)\n\n if proto not in ('http/1.1', None):\n raise TLSUpgrade(proto, sock)\n\n self._sock = sock\n\n return" ]
[ "0.7130075", "0.7112576", "0.6801621", "0.6775071", "0.6772604", "0.6653766", "0.6643562", "0.66249806", "0.66118973", "0.659476", "0.6579499", "0.6578894", "0.6578408", "0.6554303", "0.6552846", "0.654279", "0.6523097", "0.651956", "0.64632034", "0.6457388", "0.64059734", "0.63950956", "0.63913727", "0.638867", "0.6383634", "0.6379974", "0.6368037", "0.63407934", "0.6323593", "0.6323528" ]
0.7718711
0
get a single word's wordnet POS (PartofSpeech) tag.
def get_wordnet_pos(self, word): # token = word_tokenize(word) base_tag = pos_tag([word])[0][1][:2] return self.pos_tag_dict.get(base_tag, wordnet.NOUN)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_wordnet_pos(word):\n tag = nltk.pos_tag([word])[0][1][0].upper()\n tag_dict = {\"J\": wordnet.ADJ,\n \"N\": wordnet.NOUN,\n \"V\": wordnet.VERB,\n \"R\": wordnet.ADV}\n return tag_dict.get(tag, wordnet.NOUN)", "def get_wordnet_pos(word):\n tag = nltk.pos_tag([word])[0][1][0].upper()\n tag_dict = {\"J\": wordnet.ADJ,\n \"N\": wordnet.NOUN,\n \"V\": wordnet.VERB,\n \"R\": wordnet.ADV}\n\n return tag_dict.get(tag, wordnet.NOUN)", "def get_wordnet_pos(word):\n tag = nltk.pos_tag([word])[0][1][0].upper()\n tag_dict = {\"J\": wordnet.ADJ,\n \"N\": wordnet.NOUN,\n \"V\": wordnet.VERB,\n \"R\": wordnet.ADV}\n\n return tag_dict.get(tag, wordnet.NOUN)", "def get_wordnet_pos(word):\n tag = nltk.pos_tag([word])[0][1][0].upper()\n tag_dict = {\"J\": wordnet.ADJ,\n \"N\": wordnet.NOUN,\n \"V\": wordnet.VERB,\n \"R\": wordnet.ADV}\n\n return tag_dict.get(tag, wordnet.NOUN)", "def get_wordnet_pos(word):\n tag = nltk.pos_tag([word])[0][1][0].upper()\n tag_dict = {\"J\": wordnet.ADJ,\n \"N\": wordnet.NOUN,\n \"V\": wordnet.VERB,\n \"R\": wordnet.ADV}\n\n return tag_dict.get(tag, wordnet.NOUN)", "def get_wordnet_pos(word):\n tag = nltk.pos_tag([word])[0][1][0].upper()\n tag_dict = {\"J\": wordnet.ADJ,\n \"N\": wordnet.NOUN,\n \"V\": wordnet.VERB,\n \"R\": wordnet.ADV}\n\n return tag_dict.get(tag, wordnet.NOUN)", "def get_wordnet_pos(word):\n tag = nltk.pos_tag([word])[0][1][0].upper()\n tag_dict = {\"J\": wordnet.ADJ,\n \"N\": wordnet.NOUN,\n \"V\": wordnet.VERB,\n \"R\": wordnet.ADV}\n\n return tag_dict.get(tag, wordnet.NOUN)", "def nltk_get_wordnet_pos(word):\n tag = nltk.pos_tag([word])[0][1][0].upper()\n # Convert NOTK to wordnet POS notations\n tag_dict = {\"J\": wordnet.ADJ,\n \"N\": wordnet.NOUN,\n \"V\": wordnet.VERB,\n \"R\": wordnet.ADV}\n return tag_dict.get(tag, wordnet.NOUN) # Default to noun if not found", "def get_wordnet_pos(self, word):\r\n tag = nltk.pos_tag([word])[0][1][0].upper()\r\n tag_dict = {\r\n \"J\": wordnet.ADJ,\r\n \"N\": wordnet.NOUN,\r\n \"V\": wordnet.VERB,\r\n \"R\": wordnet.ADV,\r\n }\r\n\r\n return tag_dict.get(tag, wordnet.NOUN)", "def findPOS(word):\r\n\t\r\n lisPOS = list(wordtags[word])\r\n if \"ADJ\" in lisPOS:\r\n return \"ADJECTIVE\"\r\n if \"ADV\" in lisPOS:\r\n return \"ADVERB\"\r\n if \"NOUN\" in lisPOS:\r\n return \"NOUN\"", "def get_wordnet_pos(word: str) -> Dict[str, Any]:\n tag = nltk.pos_tag([word])[0][1][0].upper()\n tag_dict = {\"J\": wordnet.ADJ,\n \"N\": wordnet.NOUN,\n \"V\": wordnet.VERB,\n \"R\": wordnet.ADV}\n\n return tag_dict.get(tag, wordnet.NOUN)", "def get_wordnet_pos(pos):\n tag = pos.upper()\n tag_dict = {\"J\": wordnet.ADJ,\n \"N\": wordnet.NOUN,\n \"V\": wordnet.VERB,\n \"R\": wordnet.ADV}\n\n return tag_dict.get(tag, wordnet.NOUN)", "def get_wordnet_pos(tag):\n tag_dict = {\"J\": wordnet.ADJ,\n \"N\": wordnet.NOUN,\n \"V\": wordnet.VERB,\n \"R\": wordnet.ADV}\n\n return tag_dict.get(tag.upper(), wordnet.NOUN)", "def _get_wordnet_pos(self, tag):\n tag = tag[0].upper()\n \n if tag == \"J\":\n return wordnet.ADJ\n elif tag == \"N\":\n return wordnet.NOUN\n elif tag == \"V\":\n return wordnet.VERB\n elif tag == \"R\":\n return wordnet.ADV\n else:\n return wordnet.NOUN", "def get_wordnet_pos(treebank_tag):\n if treebank_tag.startswith('J'):\n return wordnet.ADJ\n elif treebank_tag.startswith('V'):\n return wordnet.VERB\n elif treebank_tag.startswith('N'):\n return wordnet.NOUN\n elif treebank_tag.startswith('R'):\n return wordnet.ADV\n else:\n # As default pos in lemmatization is Noun\n return wordnet.NOUN", "def get_wordnet_pos(treebank_tag):\n if(treebank_tag.startswith('J')):\n return wordnet.ADJ\n elif(treebank_tag.startswith('V')):\n return wordnet.VERB\n elif(treebank_tag.startswith('N')):\n return wordnet.NOUN\n elif(treebank_tag.startswith('R')):\n return wordnet.ADV\n else:\n return wordnet.NOUN", "def get_wordnet_pos(treebank_tag):\n if treebank_tag.startswith('J'):\n return wordnet.ADJ\n elif treebank_tag.startswith('V'):\n return wordnet.VERB\n elif treebank_tag.startswith('N'):\n return wordnet.NOUN\n elif treebank_tag.startswith('R'):\n return wordnet.ADV\n elif treebank_tag.startswith('S'):\n return wordnet.ADJ\n else:\n return wordnet.NOUN", "def get_tag_for_word(self, word: str):\n doc = self.model(word)\n for token in doc:\n return token.pos_", "def get_wordnet_pos(treebank_tag):\n\n if treebank_tag.startswith('J'):\n return wordnet.ADJ\n elif treebank_tag.startswith('V'):\n return wordnet.VERB\n elif treebank_tag.startswith('N'):\n return wordnet.NOUN\n elif treebank_tag.startswith('R'):\n return wordnet.ADV\n else:\n return wordnet.NOUN", "def get_wordnet_pos(wn, treebank_tag):\n\n if treebank_tag.startswith('J'):\n return wn.ADJ\n elif treebank_tag.startswith('V'):\n return wn.VERB\n elif treebank_tag.startswith('N'):\n return wn.NOUN\n elif treebank_tag.startswith('R'):\n return wn.ADV\n else:\n return wn.NOUN", "def get_wordnet_pos(treebank_tag):\n if treebank_tag.startswith('J'):\n return wordnet.ADJ\n elif treebank_tag.startswith('V'):\n return wordnet.VERB\n elif treebank_tag.startswith('N'):\n return wordnet.NOUN\n elif treebank_tag.startswith('R'):\n return wordnet.ADV\n else:\n return wordnet.NOUN # If unknown, return the default value", "def get_wordnet_pos(treebank_tag):\n\n if treebank_tag == 'NNP':\n return wordnet.NOUN, 'proper'\n\n # JJ-adjective\n # JJR-adjective, comparative\n # JJS-adjective, superlative\n elif treebank_tag.startswith('J'):\n return wordnet.ADJ, 'adj'\n\n # VB-verb, base form\n # VBD-verb, past tense\n # VBG-verb, gerund or present participle; VBN-verb, past participle\n # VBP-verb, non-3rd person singular present\n # VBZ-verb, 3rd person singular present\n elif treebank_tag.startswith('V'):\n return wordnet.VERB, 'verb'\n\n # RB-adverb\n # RBR-adverb, comparative\n # RBS-adverb, superlative\n # RP-particle\n elif treebank_tag.startswith('R'):\n return wordnet.ADV, 'adv'\n\n # NN-noun\n elif treebank_tag.startswith('N'):\n return wordnet.NOUN, 'noun'\n\n # default\n else:\n return wordnet.NOUN, ''", "def word_tag(self, word):\n if word[1] in (\"NN\", \"NNS\", \"NNP\", \"NNPS\"):\n return _wordnet.NOUN\n if word[1] in (\"JJ\", \"JJR\", \"JJS\"):\n return _wordnet.ADJ\n if word[1] in (\"VB\", \"VBD\", \"VBG\", \"VBN\", \"VBP\", \"VBZ\"):\n return _wordnet.VERB\n if word[1] in (\"RB\", \"RBR\", \"RBS\"):\n return _wordnet.ADV\n\n return None", "def getPosTagAt(self, pos):\n return self.sentence[pos].getPosTag()", "def get_wordnet_pos(pos):\n if pos[0] == 'J':\n return nltk.corpus.wordnet.ADJ\n elif pos[0] == 'V':\n return nltk.corpus.wordnet.VERB\n elif pos[0] == 'N':\n return nltk.corpus.wordnet.NOUN\n elif pos[0] == 'R':\n return nltk.corpus.wordnet.ADV\n else:\n # As default pos in lemmatization is Noun\n return nltk.corpus.wordnet.NOUN", "def convert_pos_tag(tag):\n # Source: https://www.programcreek.com/python/example/91610/nltk.corpus.wordnet.NOUN\n if tag in ['JJ', 'JJR', 'JJS']:\n return ADJ\n elif tag in ['RB', 'RBR', 'RBS']:\n return ADV\n elif tag in ['NN', 'NNS', 'NNP', 'NNPS']:\n return NOUN\n elif tag in ['VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ']:\n return VERB\n return NOUN", "def find_pronoun(sent):\n pronoun = None\n\n for word, part_of_speech in sent.pos_tags:\n # Disambiguate pronouns\n if part_of_speech == 'PRP' and word.lower() == 'you':\n # pronoun = 'I' +++++++++ORIGINAL++++++++++++\n pronoun = 'I'\n elif part_of_speech == 'PRP' and word == 'I':\n # If the user mentioned themselves, then they will definitely be the pronoun\n # pronoun = 'You' +++++++++ORIGINAL++++++++++++\n pronoun = 'You'\n return pronoun", "def find_noun(sent):\n noun = None\n\n if not noun:\n for w, p in sent.pos_tags:\n if p == 'NN': # This is a noun\n noun = w\n break\n if noun:\n #logger.info(\"Found noun: %s\", noun)\n pprint(\"FOUND NOUN\")\n pprint(noun)\n\n return noun", "def getWordAt(self, pos):\n return self.sentence[pos].getWord()", "def part_of_speech(text):\n temp = nltk.pos_tag(text)\n return [word for word, tag in temp if \n (tag == \"NN\") or \n (tag == \"NNS\") or\n (tag == \"NNP\") or \n (tag == \"NNPS\")]" ]
[ "0.7593354", "0.7576574", "0.7576574", "0.7576574", "0.7576574", "0.7576574", "0.7576574", "0.75293577", "0.7503421", "0.7377293", "0.719029", "0.71326035", "0.71208805", "0.709108", "0.70407254", "0.7016819", "0.6993947", "0.69855785", "0.69850004", "0.6918058", "0.6874751", "0.68112504", "0.68024004", "0.65973026", "0.6525056", "0.6485229", "0.63990045", "0.6345473", "0.6334248", "0.6272026" ]
0.7755339
0
Cleans a single review (simplifies it as much as possible)
def clean_review(self, text): text = text.lower() # lowercase capital letters if self.remove_stopwords: text = self.remove_stopwords_f(text, keep_neg_words=True) text = re.sub('[^a-zA-Z]+', ' ', text) # select only alphabet characters (letters only) # text = re.sub('[^a-zA-Z0-9]+', ' ', text) # select only alphanumeric characters (letters & numbers) # text = re.sub(r'\W+', ' ', text) # Select only alphanumeric characters (including greek & underscore) text = re.sub(' +', ' ', text) # remove extra spaces if self.apply_normalization: text = self.normalize_text(text) return text
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sanitize(review):\n # c) Remove all punctuation, as well as the stop-words.\n # First replace punctuations with empty char then tokenize it\n # Replace punctuation with spaces using fast method\n clean = review.translate(review.maketrans(string.punctuation,\n ' ' * len(string.punctuation)))\n clean = re.sub(' +', ' ', clean) # remove more than 1 whitespaces\n words = nltk.word_tokenize(clean)\n # Remove stop-words\n removed_words = []\n for w in words:\n if w not in stop_words:\n removed_words.append(w)\n #removed_words = [w for w in words if w not in stop_words]\n # d) Apply a stemmer on the paragraphs, so that inflected\n # forms are mapped to the base form. For example,\n # for python the popular natural language toolkit nltk has\n # an easy-to-use stemmer.\n stemmer = nltk.stem.snowball.SnowballStemmer(\"english\")\n res = [stemmer.stem(w) for w in removed_words]\n # Final touch join the words\n return \" \".join(res)", "def preprocess(review):\n review=review.lower()\n \n review = re.sub(r'[^\\w\\s]', \"\", review)\n collection=[]\n for x in review.split(' '):\n if x not in stop_words:\n collection.append(x)\n #processed_review=' '.join(x for x in collection)\n processed_review=collection\n return processed_review", "def test_clean_review(self):\n review = \"This is a<br /> test.\"\n result = clean_reviews(review)\n self.assertEqual(result, \"This is a test.\")", "def clean(c):", "def clean(text):\r\n #clean structure\r\n text = re.sub(r\"(?<!\\|)Amount ?\\| ?Ingredient(?!\\|)\", \"|Amount|Ingredient|\", text)\r\n text = re.sub(r\"----\\|----\\n\\n\", r\"----|----\\n\", text)\r\n text = re.sub(r\"(?<!\\|)----\\|----(?!\\|)\", \"|----|----|\", text)\r\n text = re.sub(\"## Directions\", \"## Cooking Instructions\", text)\r\n\r\n #fractions \r\n for pat, rep in repls:\r\n text = re.sub(pat, rep, text, flags=re.IGNORECASE)\r\n\r\n #links\r\n def fix_link(match):\r\n return \"](../\"+re.sub(\" \", \"-\", fix_title(match.group(1)))+\")\"\r\n text = re.sub(r\"\\]\\((.*?)\\)\", fix_link, text)\r\n \r\n lines = text.split(\"\\n\")\r\n new_text = []\r\n #add spaces to the end of lines\r\n for line in lines:\r\n match = re.search(r\" $\", line)\r\n if match:\r\n new_text.append(line)\r\n else:\r\n new_text.append(line+\" \")\r\n #remove spaces from the end of lines\r\n # for line in lines:\r\n # match = re.search(r\" +$\", line)\r\n # if match:\r\n # new_text.append(line[:-len(match.group(0))])\r\n # else:\r\n # new_text.append(line)\r\n\r\n text = \"\\n\".join(new_text)\r\n\r\n return text", "def cleaning (data):", "def cleaning_pipeline(self, tree: dict):\n tree = self.remove_redundant(tree)\n tree[\"selftext\"] = self.clean_text(tree[\"selftext\"])\n tree[\"title\"] = self.clean_text(tree[\"title\"])\n\n empty_comments = []\n for id, comment in tree[\"comments\"].items():\n if \"body\" in comment and \"parent_id\" in comment:\n comment[\"body\"] = self.clean_text(comment[\"body\"])\n comment[\"parent_id\"] = comment[\"parent_id\"][3:]\n else:\n empty_comments.append(id)\n print(\"Skipping empty comment : \", id, tree[\"comments\"][id])\n\n tree = self.resolve_empty_comments(tree, empty_comments)\n\n tree = self.correct_replies(tree)\n\n return tree", "def clean(tweet):\n #Separates the contractions and the punctuation\n\n\n tweet = re.sub(\"[!#.,\\\"]\", \"\", tweet).replace(\"<user>\", \"\")\n tweet = re.sub(\"[!#.,\\\"]\", \"\", tweet).replace(\"<url>\", \"\")\n tweet = correct_spell(tweet)\n return tweet.strip().lower()", "def condensed_review(self, max_len=40):\n return truncatechars(self.review, max_len)", "def cleanup(self, body):\n body = re.sub(\"&gt;\", \">\", body) # Recode HTML codes\n body = re.sub(\"&lt;\", \"<\", body)\n body = re.sub(\"&amp;\", \"&\", body)\n body = re.sub(\"&nbsp;\", \" \", body)\n # body = re.sub(\"^\\[deleted\\]$\", \"\", body) # [deleted] and [removed]: delete entire row from dataframe\n body = re.sub(\"http\\S+\", \" \", body) # Remove URL\n body = re.sub(\"/r/\\S+|/u/\\S+\", \" \", body) # Remove /r/subreddit, /u/user\n # body = re.sub(\"(>.*?\\\\n\\\\n)+\", \" \", body) # Remove quoted comments\n # body = re.sub(\"[[:cntrl:]]\", \" \", body) # Remove control characters (\\n, \\b) doesn't work for unicode\n body = \"\".join(ch for ch in body if unicodedata.category(ch)[0]!=\"C\") # Remove control characters (\\n, \\b) etc.\n body = re.sub(\"'\", \"\", body) # Remove single quotation marks (contractions)\n # body = re.sub(\"[[:punct:]]\", \" \", body) # Remove punctuation\n body = \"\".join(ch for ch in body if unicodedata.category(ch)[0]!=\"P\") # Remove punctuation\n body = re.sub(\"\\\\s+\", \" \", body) # Replace multiple spaces with single space\n body = body.strip()\n body = body.lower() # Lower case\n return body # Return body (cleaned up text)", "def single_review_prep(text):\n clean_test = data_cleaner(text)\n dummy_dict= {'star': [clean_test]}\n clean_test_df = pd.DataFrame(dummy_dict)\n return clean_test_df", "def clean_all(self, tweet):\n tweet = self.clean_urls(tweet)\n tweet = self.clean_hashtags(tweet)\n tweet = self.clean_mentions(tweet)\n tweet = self.clean_emojis_and_smileys(tweet)\n tweet = self.clean_unnecessary_characters(tweet)\n tweet = self.clean_reserved_words(tweet)\n\n return tweet", "def clean(_context):", "def review_to_meaningful_words(review):\n\n from nltk.corpus import stopwords\n\n words = review.split()\n # convert stopwords to a set\n sw_set = set(stopwords.words('english'))\n meaninful_words = [word for word in words if word not in sw_set]\n\n return meaningful_words", "def _clean_data_on_db(self):\n self._review_obj.delete_many({\n 'entity_id': self._entity_id,\n })", "def clean_exam():\n data = Exam.objects.all()\n data.delete()", "def review_to_words( review ):\n #Remove any HTML tags and convert to lower case\n review_text = BeautifulSoup(review).get_text().lower()\n\n #Replace smiliey and frown faces, ! and ? with coded word SM{int} in case these are valuable\n review_text=re.sub(\"(:\\))\",r' SM1',review_text)\n review_text=re.sub(\"(:\\()\",r' SM2',review_text)\n review_text=re.sub(\"(!)\",r' SM3',review_text)\n review_text=re.sub(\"(\\?)\",r' SM4',review_text)\n\n #keep 'not' and the next word as negation may be important\n review_text=re.sub(r\"not\\s\\b(.*?)\\b\", r\"not_\\1\", review_text)\n\n #keep letters and the coded words above, replace the rest with whitespace\n nonnumbers_only=re.sub(\"[^a-zA-Z\\_(SM\\d)]\",\" \",review_text)\n\n #Split into individual words on whitespace\n words = nonnumbers_only.split()\n\n #Remove stop words\n words = [w for w in words]\n\n return (words)", "def clean_resume(self, text):\n text = text.lower() # lowercase capital letters\n\n text = re.sub(r'(http|www)\\S+\\s*', '', text) # remove URLs\n text = re.sub(r'\\S+@\\S+\\s*', '', text) # remove emails\n text = re.sub(r'@\\S+\\s*', '', text) # remove mentions\n text = re.sub(r'#\\S+\\s*', '', text) # remove hashtags\n\n if self.remove_stopwords:\n text = self.remove_stopwords_f(text)\n\n text = re.sub('[^a-zA-Z]+', ' ', text) # select only alphabet characters (letters only)\n # text = re.sub('[^a-zA-Z0-9]+', ' ', text) # select only alphanumeric characters (letters & numbers)\n # text = re.sub(r'\\W+', ' ', text) # Select only alphanumeric characters (including greek & underscore)\n\n # text = re.sub('[%s]' % re.escape(\"\"\"!\"#$%&'()*+,-./:;<=>?@[]^_`{|}~\"\"\"), '', text) # remove punctuation\n # text = re.sub(r'[^\\x00-\\x7f]', '', text) # remove non-ASCII characters\n # # # Replace non-ASCII characters with their most alike representation (doesn't work):\n # # text = unidecode(unicode(text, encoding=\"utf-8\"))\n\n text = re.sub(' +', ' ', text) # remove extra spaces\n\n if self.apply_normalization:\n text = self.normalize_text(text)\n\n return text", "def clean_line(line, normNum=True, normProf=True):\n\n # Remove square brackets, ceiling characters, question marks, other\n # questionable characters, and line breaks\n line = re.sub(r'(\\[|\\])', '', line)\n line = re.sub(r'(⌈|⌉)', '', line)\n line = re.sub(r'( / )', ' ', line)\n line = re.sub(r'/', '', line)\n line = re.sub(r'\\?', '', line)\n line = re.sub(r'([<]|[>])+', '', line)\n line = re.sub(r'!', '', line)\n line = re.sub(r'\"', '', line)\n\n # Remove researcher's notes, and multiple dashes or '='s\n line = re.sub(r'(\\(.*\\))', '', line)\n line = re.sub(r'(#[.]*)', '', line)\n line = re.sub(r'[-]{2}', '', line)\n line = re.sub(r'[=]{2}', '', line)\n\n # Replace numbers with 'number'\n if normNum is True:\n line = re.sub(r'\\b(?<!-)(\\d+)(?![\\w-])', 'number', line)\n line = re.sub(r'[-+]?\\b\\d+\\b', 'number', line)\n\n #line = re.sub(r'\\b([\\-\\.0-9]+)(?![\\w-])', 'number', line)\n\n # Replace professions with 'profession'\n if normProf is True:\n line = professions.replaceProfessions(line)\n\n # Remove blank character at end of line\n linelength = len(line)\n if (linelength > 0 and line[linelength-1] == \"\"):\n del line[0:linelength-2]\n\n return line", "def cleanTweet(text, appostrophes=True, emojis=True, html=True, url=True, misspellings=True, punctuation=True, lemming=True,\\\r\n stop=True):\r\n if appostrophes:\r\n #convert appostrophes\r\n filtered_string = decontracted(text)\r\n if emojis:\r\n #decoding, removing emojis\r\n filtered_string = filtered_string.encode(\"utf-8\").decode('ascii','ignore')\r\n if html:\r\n #cleaning of html tags\r\n htmltags = re.compile('<.*?>|&([a-z0-9]+|#[0-9]{1,6}|#x[0-9a-f]{1,6});')\r\n filtered_string = re.sub(htmltags, '', filtered_string)\r\n if url:\r\n #cleaning of url\r\n url = re.compile(r'https?://\\S+|www\\.\\S+')\r\n filtered_string = re.sub(url, '', text)\r\n if misspellings:\r\n #cleaning of misspellings\r\n spell = SpellChecker()\r\n corrected_text = []\r\n misspelled_words = spell.unknown(filtered_string.split())\r\n for word in filtered_string.split():\r\n if word in misspelled_words:\r\n corrected_text.append(spell.correction(word))\r\n else:\r\n corrected_text.append(word)\r\n filtered_string = \" \".join(corrected_text)\r\n if punctuation:\r\n word_tokens = word_tokenize(filtered_string)\r\n #remove punctuations\r\n table=str.maketrans('','',string.punctuation)\r\n filtered_string.translate(table) \r\n filtered_string = [word.translate(table) for word in word_tokens]\r\n filtered_string = \" \".join(filtered_string)\r\n if lemming:\r\n #lemming of words\r\n word_tokens = word_tokenize(filtered_string)\r\n lemmatizer = WordNetLemmatizer() \r\n filtered_string = [lemmatizer.lemmatize(word) for word in word_tokens]\r\n if stop:\r\n # cleaning from stopwords\r\n stop_words=set(stopwords.words('english'))\r\n stop_word_drop = [] \r\n for word in filtered_string: \r\n if word not in stop_words: \r\n stop_word_drop.append(word) \r\n filtered_string = \" \".join(stop_word_drop)\r\n \r\n #toDos\r\n #cleaning of rare words\r\n # tokens is a list of all tokens in corpus\r\n # freq_dist = nltk.FreqDist(token)\r\n # rarewords = freq_dist.keys()[-50:]\r\n # after_rare_words = [ word for word in token not in rarewords]\r\n #cleaning of slang words\r\n #split attached words, not working and questionable because of all capital words\r\n # filtered_string = \" \".join(re.findall('[A-Z][^A-Z]*', filtered_string))\r\n return filtered_string", "def clean(self):", "def normalize_answer(s):\n\n def remove_articles(text):\n return re_art.sub(' ', text)\n\n def white_space_fix(text):\n return ' '.join(text.split())\n\n def remove_punc(text):\n return re_punc.sub(' ', text) # convert punctuation to spaces\n\n def lower(text):\n return text.lower()\n\n return white_space_fix(remove_articles(remove_punc(lower(s))))", "def normalize_answer(s):\n\n def remove_articles(text):\n return re_art.sub(' ', text)\n\n def white_space_fix(text):\n return ' '.join(text.split())\n\n def remove_punc(text):\n return re_punc.sub(' ', text) # convert punctuation to spaces\n\n def lower(text):\n return text.lower()\n\n return white_space_fix(remove_articles(remove_punc(lower(s))))", "def clean_content(self) -> str:", "def clean_text(input: str) -> str:\n # 1. REMOVE ARTIFACTS\n cleaned_text = remove_nested_parentheses(input)\n # Remove section headings\n cleaned_text = re.sub(r'={2,}.*?={2,}', '', cleaned_text)\n\n # 2. REFORMAT REMAINING TEXT\n # Remove duplicate white spaces\n cleaned_text = \" \".join(cleaned_text.split()).strip()\n # Remove white space before comma - left by removal of other content\n cleaned_text = cleaned_text.replace(' , ', ', ')\n # Separate joined sentences eg \"end of one.Start of another\"\n # Only perform this when a new sentence starts with a capitalized word\n # will not catch sentences starting with single letters.\n cleaned_text = re.sub(r'\\.([A-Z][a-z]+)', r'. \\1', cleaned_text)\n\n return cleaned_text", "def review_to_word(review):\n # Get text only\n review_text = BeautifulSoup(review).get_text()\n # Remove non-letters \n letters_only = re.sub(\"[^a-zA-Z]\", \" \", review_text)\n # Convert to lower case, split into individual words\n words = letters_only.lower().split()\n # searching in a set rather than a list is faster in python\n stops = set(stopwords.words(\"english\"))\n # Remove stop words\n meaningful_words = [w for w in words if not w in stops]\n # Join the words back into one string\n return( \" \".join( meaningful_words ))", "def clean(self):\n pass", "def clean_text(affil_text: str):\n affil_text = affil_text.strip()\n\n affil_text = re.sub(\"\\t\", \" \", affil_text)\n affil_text = re.sub(r\"\\*\", \" \", affil_text)\n affil_text = re.sub(\";\", \", \", affil_text)\n\n affil_text = re.sub(\"Univ. \", \"University \", affil_text)\n affil_text = re.sub(\"Dept. \", \"Department \", affil_text)\n affil_text = re.sub(\"Surg. \", \"Surgery \", affil_text)\n\n affil_text = re.sub(\", Inc.\", \" Inc.\", affil_text)\n\n affil_text = re.sub(\"E-mail:\", \"\", affil_text)\n affil_text = re.sub(\"email:\", \"\", affil_text)\n affil_text = re.sub(\"P.O. Box\", \"\", affil_text) # zip code\n\n affil_text = re.sub(\"\\s+\", \" \", affil_text)\n\n return affil_text.strip()", "def cleanText(markup,stripNonAlphaNumeric=False, stripNumbers=False):\n markupNew = markup.copy()\n if( stripNonAlphaNumeric ):\n txt = r1.sub(\" \",markupNew.getRawText() )\n else:\n txt = markupNew.getRawText()\n\n # clean up white spaces\n txt = r2.sub(\" \",txt)\n if( stripNumbers ):\n txt = r3.sub(\"\",txt)\n\n markupNew.graph[\"__txt\"] = txt\n if( markupNew.getVerbose() ):\n print u\"cleaned text is now\",markupNew.getText()\n return markupNew", "def prune_ratios(ratios, bad_words):\n for word in bad_words:\n ratios.pop(word, None)" ]
[ "0.7039309", "0.6684278", "0.64323676", "0.6006801", "0.5992633", "0.5959121", "0.5953015", "0.5914954", "0.58682597", "0.5802497", "0.5769421", "0.5647594", "0.56341743", "0.559752", "0.5589405", "0.55746025", "0.5561322", "0.5524036", "0.55173266", "0.5471399", "0.54688805", "0.54664946", "0.54664946", "0.5466182", "0.5460173", "0.54501504", "0.5425977", "0.541189", "0.54095656", "0.53950244" ]
0.71801054
0
Cleans a single resume (resume text)
def clean_resume(self, text): text = text.lower() # lowercase capital letters text = re.sub(r'(http|www)\S+\s*', '', text) # remove URLs text = re.sub(r'\S+@\S+\s*', '', text) # remove emails text = re.sub(r'@\S+\s*', '', text) # remove mentions text = re.sub(r'#\S+\s*', '', text) # remove hashtags if self.remove_stopwords: text = self.remove_stopwords_f(text) text = re.sub('[^a-zA-Z]+', ' ', text) # select only alphabet characters (letters only) # text = re.sub('[^a-zA-Z0-9]+', ' ', text) # select only alphanumeric characters (letters & numbers) # text = re.sub(r'\W+', ' ', text) # Select only alphanumeric characters (including greek & underscore) # text = re.sub('[%s]' % re.escape("""!"#$%&'()*+,-./:;<=>?@[]^_`{|}~"""), '', text) # remove punctuation # text = re.sub(r'[^\x00-\x7f]', '', text) # remove non-ASCII characters # # # Replace non-ASCII characters with their most alike representation (doesn't work): # # text = unidecode(unicode(text, encoding="utf-8")) text = re.sub(' +', ' ', text) # remove extra spaces if self.apply_normalization: text = self.normalize_text(text) return text
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cleaning(full_text):\n try:\n if open(RESULT_PATH):\n os.remove(RESULT_PATH)\n \n else:\n print(\"No output.mp3\")\n except Exception as e:\n print(str(e))\n\n text = full_text\n\n book = ''.join(text)\n\n\n book = book.replace('.', '.<eos>')\n book = book.replace('?', '?<eos>')\n book = book.replace('!', '!<eos>')\n\n sentences = book.split('<eos>')\n\n return sentences", "def cleanup(text):\n with open(text, 'r') as uncleaned_text:\n no_chapters = re.sub('[A-Z]{3,}', ' ', uncleaned_text.read())\n remove_periods = re.sub('(\\s\\.){4,}', '', no_chapters)\n new_text = re.sub('\\*', '', remove_periods)\n return new_text", "def clean_content(self) -> str:", "def clean_record(text, \n page_breaks=True,\n midline_returns=True,\n time_marks=True):\n clean_text = text\n if(page_breaks):\n clean_text = remove_page_breaks(clean_text)\n if(midline_returns):\n clean_text = remove_midline_returns(clean_text)\n if(time_marks):\n clean_text = remove_time_marks(clean_text)\n \n return clean_text", "def remove_extra_text(self, text):\n if text:\n parsed_text = text\n if parsed_text.find('== Referencias ==') > 0:\n parsed_text = parsed_text[:parsed_text.find('== Referencias ==\\n')]\n if parsed_text.find('== Fuentes ==') > 0:\n parsed_text = parsed_text[:parsed_text.find('== Fuentes ==\\n')]\n if parsed_text.find('== Fuente ==') > 0:\n parsed_text = parsed_text[:parsed_text.find('== Fuente ==\\n')]\n if parsed_text.find('== Ver también =='.decode('utf-8')) > 0:\n parsed_text = parsed_text[:parsed_text.find('== Ver también ==\\n'.decode('utf-8'))]\n if parsed_text.find(\"== Noticia relacionada ==\".decode(\"utf-8\")) > 0:\n parsed_text = parsed_text[:parsed_text.find(\"== Noticia relacionada ==\".decode('utf-8'))]\n if parsed_text.find(\"== Artículos relacionados ==\".decode(\"utf-8\")) > 0:\n parsed_text = parsed_text[:parsed_text.find(\"== Artículos relacionados ==\".decode('utf-8'))]\n if parsed_text.find(\"== Enlace externo ==\".decode(\"utf-8\")) > 0:\n parsed_text = parsed_text[:parsed_text.find(\"== Enlace externo ==\".decode('utf-8'))]\n if parsed_text.find(\"== Enlaces externos ==\".decode(\"utf-8\")) > 0:\n parsed_text = parsed_text[:parsed_text.find(\"== Enlaces externos ==\".decode('utf-8'))]\n parsed_text = parsed_text.replace('ABr)', '')\n return parsed_text", "def remove_info(text, journal_id, label, doc_type='inkomst'):\r\n sections = text.split('NEWPAR')\r\n cleaned_text = ''\r\n diagnose_detected = False\r\n for section in sections:\r\n if section:\r\n section_header =list(filter(None, section.split(' ')))[0]\r\n #print(section_header)\r\n if 'diagnose' in section_header.lower() or 'DIAGNOSE' in section or 'Diagnose :' in section or 'Problemstilling :' in section:\r\n diagnose_detected = True\r\n else:\r\n cleaned_text += section + ' '\r\n if not diagnose_detected :\r\n print('No DIAGNOSE in: ', journal_id)\r\n return cleaned_text", "def minimalTextCleaning(row, field):\n\n # force encoding\n encoded_text = row[field].encode(encoding = 'ascii',errors = 'replace')\n decoded_text = encoded_text.decode(encoding='ascii',errors='strict')\n remove_funky_chars = str(decoded_text).replace(\"?\", \" \")\n lower_case = str(remove_funky_chars).lower().strip()\n\n # strip redundant whitespace\n cleaned_text = re.sub(' +', ' ', lower_case)\n\n\n # strip signature lines\n cleaned_text = cleaned_text.replace(\"_\", \"\")\n\n return cleaned_text", "def clean_text(input: str) -> str:\n # 1. REMOVE ARTIFACTS\n cleaned_text = remove_nested_parentheses(input)\n # Remove section headings\n cleaned_text = re.sub(r'={2,}.*?={2,}', '', cleaned_text)\n\n # 2. REFORMAT REMAINING TEXT\n # Remove duplicate white spaces\n cleaned_text = \" \".join(cleaned_text.split()).strip()\n # Remove white space before comma - left by removal of other content\n cleaned_text = cleaned_text.replace(' , ', ', ')\n # Separate joined sentences eg \"end of one.Start of another\"\n # Only perform this when a new sentence starts with a capitalized word\n # will not catch sentences starting with single letters.\n cleaned_text = re.sub(r'\\.([A-Z][a-z]+)', r'. \\1', cleaned_text)\n\n return cleaned_text", "def _clean_seq_titles(self, element):\r\n return self.REMOVE_SPAN_TAG_RE.sub('', element.get_attribute('innerHTML')).strip().split('\\n')[0]", "def _text_clean(self):\n try:\n self.text = eval(self.text[0])[0]['node']['text']\n self.clean = True\n except IndexError:\n return", "def clean_text(self, text) -> Union[str, None]:\n if text and ''.join(text.split()):\n if type(text) == bytes: #Decoding byte strings\n text = text.decode('utf-8')\n #Removing emails + ***.com urls\n text = ' '.join([item for item in text.split() if '@' not in item and '.com' not in item])\n text = ' '.join(text.split()) #removing all multiple spaces\n if text: return text\n # UNCLEAN_TEXT.inc()\n return None", "def _text_remove_s(all_text):\n\t# on n'agit que s'il y a au moins un cara plein\n\t\t# => pas les elts vides, ni \\s dont saut de ligne\n\tif len(all_text) and search('[^\\s]', all_text, flags=MULTILINE):\n\t\tflat_alltext = sub(r'\\n', '¤', all_text, flags=MULTILINE)\n\t\tflat_alltext = sub(r'[¤\\s]+$', '', flat_alltext)\n\t\tflat_alltext = sub(r'^[¤\\s]+', '', flat_alltext)\n\telse:\n\t\tflat_alltext = ''\n\treturn flat_alltext", "def clean(apps, schema_editor):\n for campaign in apps.get_model(\"peacecorps\", \"Campaign\").objects.all():\n campaign.description = clean_description(campaign.description)\n campaign.description = re.sub(r\"(?<!\\\\)\\n\", r\"\\\\n\",\n campaign.description)\n campaign.save()\n\n for project in apps.get_model(\"peacecorps\", \"Project\").objects.all():\n project.description = clean_description(project.description)\n project.description = re.sub(r\"(?<!\\\\)\\n\", r\"\\\\n\", project.description)\n project.save()", "def cleanup(self, body):\n body = re.sub(\"&gt;\", \">\", body) # Recode HTML codes\n body = re.sub(\"&lt;\", \"<\", body)\n body = re.sub(\"&amp;\", \"&\", body)\n body = re.sub(\"&nbsp;\", \" \", body)\n # body = re.sub(\"^\\[deleted\\]$\", \"\", body) # [deleted] and [removed]: delete entire row from dataframe\n body = re.sub(\"http\\S+\", \" \", body) # Remove URL\n body = re.sub(\"/r/\\S+|/u/\\S+\", \" \", body) # Remove /r/subreddit, /u/user\n # body = re.sub(\"(>.*?\\\\n\\\\n)+\", \" \", body) # Remove quoted comments\n # body = re.sub(\"[[:cntrl:]]\", \" \", body) # Remove control characters (\\n, \\b) doesn't work for unicode\n body = \"\".join(ch for ch in body if unicodedata.category(ch)[0]!=\"C\") # Remove control characters (\\n, \\b) etc.\n body = re.sub(\"'\", \"\", body) # Remove single quotation marks (contractions)\n # body = re.sub(\"[[:punct:]]\", \" \", body) # Remove punctuation\n body = \"\".join(ch for ch in body if unicodedata.category(ch)[0]!=\"P\") # Remove punctuation\n body = re.sub(\"\\\\s+\", \" \", body) # Replace multiple spaces with single space\n body = body.strip()\n body = body.lower() # Lower case\n return body # Return body (cleaned up text)", "def desc_cleanser(self, txt):\n # New line issues\n txt = re.sub(r'\\\\n', r' ', txt)\n # Unicode cleanse\n txt = re.sub(r'\\\\u[\\d]{4}', r'', txt)\n # Remaining unicode cleanse\n txt = re.sub(r'\\\\{1,2}\\S+', r' ', txt)\n # Remove remaining non-alphanumeric and spaces\n txt = ''.join([i for i in txt if i.isalnum() or i.isspace() or i in ['.','?','!']])\n # Remove more than a single space\n txt = re.sub(r'\\s+', r' ', txt)\n\n return txt", "def _prepare_text(body):\n text = body.lower()\n text = text.replace('\\n', ' ')\n regex = re.compile('[^a-z ]')\n return regex.sub('', text)", "def text_cleaning(self, text): # pylint: disable=no-self-use\n text = text.encode(\"ascii\", \"ignore\").decode(\"ascii\", \"ignore\")\n text = re.sub(r'[^\\x00-\\x7F]', '', text)\n text = text.replace(\"\\n\", \"\")\n text = text.replace(\"\\'\", \"'\")\n text = text.replace(\"\\\\\\\"\", '\\\"')\n text = text.replace(\"&amp;\", \"&\")\n text = text.replace(\"&quot;\", '\\\"')\n text = text.replace(\"&nbsp;\", ' ')\n text = text.strip().lstrip().rstrip()\n desc_text = ' '.join(text.split())\n return desc_text", "def clean_text(affil_text: str):\n affil_text = affil_text.strip()\n\n affil_text = re.sub(\"\\t\", \" \", affil_text)\n affil_text = re.sub(r\"\\*\", \" \", affil_text)\n affil_text = re.sub(\";\", \", \", affil_text)\n\n affil_text = re.sub(\"Univ. \", \"University \", affil_text)\n affil_text = re.sub(\"Dept. \", \"Department \", affil_text)\n affil_text = re.sub(\"Surg. \", \"Surgery \", affil_text)\n\n affil_text = re.sub(\", Inc.\", \" Inc.\", affil_text)\n\n affil_text = re.sub(\"E-mail:\", \"\", affil_text)\n affil_text = re.sub(\"email:\", \"\", affil_text)\n affil_text = re.sub(\"P.O. Box\", \"\", affil_text) # zip code\n\n affil_text = re.sub(\"\\s+\", \" \", affil_text)\n\n return affil_text.strip()", "def clean_message_md(self):\n message_md = self.cleaned_data[\"message_md\"]\n lines = filter(None, message_md.splitlines())\n message_md = \" \".join(lines)\n return message_md", "def _strip_excerpt(self, raw_html):\n clean_regex = re.compile(\"<.*?>\")\n clean_text = re.sub(clean_regex, \"\", raw_html)\n return html.unescape(clean_text).replace(\"\\n\", \"\")", "def _clean(sentence, subword_option, lower_case):\n sentence = sentence.strip()\n\n # BPE\n if subword_option == \"bpe\":\n sentence = re.sub(\"@@ \", \"\", sentence)\n\n # SPM\n elif subword_option == \"spm\":\n sentence = u\"\".join(sentence.split()).replace(u\"\\u2581\", u\" \").lstrip()\n\n return sentence.lower() if lower_case else sentence", "def clean_plaintext_article(text, extract, end_indicators_file):\n f = open(end_indicators_file, 'r')\n lines = f.readlines()\n edit = lines[0].strip()\n for line in lines[1:]:\n if line.split()[0] == 'exact':\n text = re.sub([a.decode('utf-8') for a in line.split()][1] + r'.*', '', text, flags=re.DOTALL)\n else:\n text = re.sub(r'&lt;h[23]((?!&gt;).)*&gt;\\s*' + ' '.join([a.decode('utf-8') for a in line.split()]) + r'\\s*(\\[.*?\\])?\\s*&lt;/h[23]&gt;.*', '', text, flags=re.DOTALL)\n \n text = re.sub(r'(\\[\\s*?' + edit + r'\\s*?\\])', '', text) # remove all [edit] markersa\n text = re.sub(r'&amp;amp;', '&', text) # display ampersands properly\n if extract:\n return text\n text = re.sub(r'&lt;.*?&gt;', '', text) # remove all html tags\n text = re.sub(r'&[^;\\s]*?;', '', text) # remove all other markings, e.g. &quot;\n\n return text", "def clean_paragraphs(paragraphs, artist_name, song_name):\n clean_paragraphs = []\n\n for paragraph in paragraphs:\n for extraneous_pattern in EXTRANEOUS_TEXT:\n extraneous_pattern = extraneous_pattern.replace(\"$BAND$\", re.escape(artist_name))\n extraneous_pattern = extraneous_pattern.replace(\"$SONG$\", re.escape(song_name))\n\n paragraph = re.sub(extraneous_pattern, \"\", paragraph, flags=re.IGNORECASE)\n\n clean_paragraphs.append(paragraph)\n\n return clean_paragraphs", "def clean_text(text2, project_key):\n\n text = text2\n text = return_text_without_headlines(text)\n # remove text written between double curly braces\n text = re.sub(r\"{{code}}.*{{code}}\", \"code.\", text)\n text = re.sub(r\"{code.*{code}\", \"code.\", text)\n text = re.sub(r\"{code:java}.*{code:java}\", \"code.\", text)\n text = re.sub(r\"{noformat}.*{noformat}\", \"code.\", text)\n text = re.sub(r\"{{monospaced}}.*{{monospaced}}\", \"code.\", text)\n text = re.sub(r'<script type=\"text/javascript\">.*</noscript>', 'code.', text)\n text = re.sub(r\"'''.*'''\", \"code\", text)\n text = text.replace('<p>&nbsp;</p>', \"\")\n text = text.replace('<div>&nbsp;</div>', \"\")\n text = text.replace('&nbsp;', \" \")\n # remove URLs link\n text = re.sub(r\"<a href=.*</a>\", \"url. \", text)\n text = re.sub(r\"http\\S+\", \"url. \", text)\n text = re.sub(r\"hdfs://\\S+\", \"url. \", text)\n text = re.sub(r\"tcp://\\S+\", \"url. \", text)\n text = re.sub(r\"webhdfs://\\S+\", \"url. \", text)\n text = re.sub(r\":/\\S+\", \"url. \", text)\n text = re.sub(r\"\\S+.com \", \"url. \", text)\n text = re.sub(r\"N/A]\", \" \", text)\n text = \" \".join(x for x in text.split() if not x.endswith('.com'))\n text = \" \".join(x for x in text.split() if not x.endswith('.com*'))\n text = \" \".join(x for x in text.split() if not x.endswith('.org'))\n text = \" \".join(x for x in text.split() if not x.endswith('.xml'))\n text = \" \".join(x for x in text.split() if not x.startswith('*javax.xml.'))\n text = \" \".join(x for x in text.split() if not x.startswith('javax.xml.'))\n # remove Image attachments\n text = re.sub(r\"<p><img alt=.></p>\", \"image.\", text)\n text = re.sub(r\"{}-\\d+\".format(project_key), \"issue\", text)\n # remove date\n text = re.sub(r'(\\w{4})-(\\d{1,2})-(\\d{1,2}) ', 'date.', text)\n text = re.sub(r'(\\w{3,4,5})-(\\d{1,2})-(\\d{4})', 'date.', text)\n text = re.sub(r'(\\d{1,2})/(\\d{1,2})/(\\d{4})', 'date.', text)\n text = re.sub(r'(\\w{3}). (\\d{1,2}), (\\d{4})', 'date.', text)\n text = re.sub(r'(\\w{3}). (\\d{1,2}) (\\d{4})', 'date.', text)\n text = re.sub(r'&lt;= Today’s Date AND', 'date.', text)\n text = re.sub(r'yyyy-mm-dd', 'date', text)\n # remove text written between small braces\n text = re.sub(r'<.+?>', \"\", text)\n text = text.replace(\"e.g.,\", \" \")\n text = text.replace(\"e.g.\", \" \")\n text = text.replace(\"i.e.,\", \" \")\n text = text.replace(\"i.e.\", \" \")\n # replace non-breaking space with regular space\n text = text.replace(u'\\xa0', u' ')\n # replace all punctuations with space\n text = text.replace('-->', \" \")\n text = text.replace('--', \" \")\n text = text.replace('-', \" \")\n text = text.replace('/', \" \")\n text = text.replace('&amp;', \" \")\n text = text.replace(' * ', \". \")\n text = re.sub(r\"\\\"|\\#|\\“|\\*|\\'|\\]|\\^|\\`|\\(|\\)|\\~\", \"\", text)\n text = re.sub(r\"\\\"|\\$|\\%|\\&|\\/|\\|\\=|\\>|\\<|\\@|\\[|\\\\|\\]|\\{|\\||\\}\", \" \", text)\n text = text.replace('$', \"\")\n text = text.replace('?', \".\")\n text = text.replace('+', \" \")\n text = re.sub(r\" \\d\\.\\d\\.N \", \" \", text)\n text = re.sub(r\" \\d\\.\\d\\.b.\", \" \", text)\n text = re.sub(r\" \\d\\.\\d\\.b \", \" \", text)\n text = re.sub(r\"\\d\\.\\d\\.N\", \" \", text)\n text = re.sub(r\"\\d\\.\\d\\.X\", \" \", text)\n text = re.sub(r\"v\\d\\.\\d\\.\\d+\", \" \", text)\n text = re.sub(r\"V\\d\\.\\d\\.\\d+\", \" \", text)\n text = re.sub(r\"v\\d\\.\\d+\", \" \", text)\n text = re.sub(r\"V\\d\\.\\d+\", \" \", text)\n text = re.sub(r\"\\d\\.\\d+\", \" \", text)\n text = re.sub(r\"\\d\\.\\d\\.\\d+\", \" \", text)\n text = text.replace(\"V1\", \" \")\n text = text.replace(\"v1\", \" \")\n # remove digits from text\n text = re.sub(r\"\\d+\", \"\", text)\n text = text.replace('lt;=', \" \")\n text = text.replace('.!', \".\")\n text = text.replace('!.', \".\")\n text = text.replace('!', \".\")\n text = text.replace('... ', \". \")\n text = text.replace('.. ', \". \")\n text = text.replace('..', \".\")\n text = text.replace('. . . ', \". \")\n text = text.replace('. . ', \". \")\n text = text.replace('. . ', \". \")\n text = text.replace(' .', \".\")\n text = text.replace('. . ', \". \")\n text = text.replace('. . ', \". \")\n text = text.replace(':.', \".\")\n text = text.replace(' :', \" \")\n text = text.lower()\n text = text.replace('..', \".\")\n text = ' '.join(text.split())\n\n return text", "def _clean(self, texts, no_punc=False):\n result = ''\n sw = self._sw_no_punc_dict if no_punc else self._sw_dict\n for t in texts:\n if t not in sw:\n result += t\n return result", "def preprocess(text):\r\n\r\n #Regex to remove URL and @ symbol\r\n regex = '@\\S*|http\\S*|www\\S*'\r\n preprocessed_text = re.sub(regex, '', text)\r\n preprocessed_text = deEmojify(preprocessed_text)\r\n preprocessed_text = strip_html(preprocessed_text)\r\n\r\n return preprocessed_text", "def clean(c):", "def prepseq(self, seq):\n\n wtf = re.sub(r'\\*$', '', seq)\n return wtf", "def clean_sentences(sentences_raw):\n out = []\n for sentence in sentences_raw:\n if sentence.split() != []:\n out.append(sentence)\n return out", "def clean_pdf_page(page): # Cleans a pdftotext page\n return [re.sub(\"\\s+\", \" \", i.strip()) for i in page.split(\"\\n\")]" ]
[ "0.65953624", "0.59690464", "0.5925192", "0.57667595", "0.5754863", "0.572727", "0.56936944", "0.5628845", "0.5601159", "0.55797684", "0.55733097", "0.55511904", "0.554485", "0.552263", "0.55109733", "0.5505896", "0.5464441", "0.5445689", "0.54276085", "0.5422343", "0.5397728", "0.537581", "0.53684264", "0.5360577", "0.5351742", "0.53511286", "0.5338936", "0.53250295", "0.5292173", "0.5288661" ]
0.7648608
0
Euclidean distance Squared Euclidean distance more frequently used
def euc_dist(self, squared=True):
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getEuclideanDistance():\r\n global euclideanDistance\r\n return euclideanDistance", "def euclidean_distance(x1, x2):\n return np.sqrt(np.sum(np.square(np.subtract(x1, x2))))", "def euclidean_distance(s1,s2): \n tmpsum = 0\n \n for index,value in enumerate(s1):\n tmpsum += (s1[index]-s2[index])**2\n \n return math.sqrt(tmpsum)", "def euclidean_distance(x1: np.ndarray, x2: np.ndarray) -> float:\n return np.sqrt(np.square(x1 - x2).sum())", "def euclidean_distance(x1, x2):\n return np.sqrt(np.sum(np.power(x1 - x2, 2)))", "def euclidean_distance(a, b):\n return np.linalg.norm(a - b)", "def _euclidian_distance(self, x1, x2):\n a= x1-x2\n a2 = a**2\n b = np.sum(a2, axis=1)\n c = np.sqrt(b)\n return c", "def calculate_euclidean_distance(self, matrix, input, output_neuron):\n result = 0\n\n # Loop over all input data.\n diff = input - matrix[output_neuron]\n return np.sqrt(sum(diff*diff))", "def euclidean(x, y):\n ed = np.sqrt(np.sum((x-y)**2))\n # print ed\n return ed", "def euclidean_metric(x, y):\n if len(x) != len(y):\n raise ValueError(\"Incompatible dimensions.\")\n return np.linalg.norm(x - y)\n \n # Or a slightly longer way:\n return np.sqrt(np.sum(np.subtract(x, y)**2))\n # Or the longest/worst way:\n total = 0\n for i in xrange(len(x)):\n term = x[i] - y[i]\n term = term**2\n total += term\n total = np.sqrt(total)\n return total", "def euclidean_distance(x1, x2):\n return (x2[0] - x1[0])**2 + (x2[1] - x1[1])**2", "def EuclideanDistance( self, a, b ):\n return sqrt( self.EuclideanDistanceSq(a,b) )", "def euclidean_distance(x1, x2):\n\tdistance = 0\n\t# Squared distance between each coordinate\n\tfor i in range(len(x1)):\n\t\tdistance += pow((x1[i], x2[i]), 2)\n\treturn math.sqrt(distance)", "def euclidean(x,y): \n\treturn np.sqrt(np.sum((x-y)**2))", "def calcEuclideanDistance(d1, d2):\n #initiate empty list\n result = []\n #for each index in the list, each position in both list minus each other\n #and to the power of two. Add this in the result list\n for idx in range(len(d1)):\n result.append((d1[idx]-d2[idx])**2)\n\n #Return the square of the sum of all values in the result list\n return math.sqrt(sum(result))", "def euclidean_dist(X, y):\n return np.sqrt(np.sum((X - y) ** 2, 1)) # broadcasted calculations", "def euclidean_dist(self):\r\n\r\n real_cat, synth_cat = self.to_cat(self.origdst, self.synthdst)\r\n\r\n real_cat_dem = self.get_demographics(real_cat)\r\n synth_cat_dem = self.get_demographics(synth_cat)\r\n\r\n corr_real_obj = associations(real_cat_dem, theil_u=True, bias_correction=False, plot=False)\r\n corr_synth_obj = associations(synth_cat_dem, theil_u=True, bias_correction=False, plot=False)\r\n\r\n corr_real = corr_real_obj['corr']\r\n corr_rand = corr_synth_obj['corr']\r\n\r\n eucl_matr = distance.cdist(corr_real, corr_rand, 'euclidean')\r\n\r\n eucl = LA.norm(eucl_matr)\r\n\r\n return eucl, eucl_matr", "def _calc_distance(r1, r2):\n return np.linalg.norm(r1 - r2)", "def euclideanDistance(a, b):\n vec = [pow(a[i] - b[i], 2) for i in range(len(a)) if None not in [a[i],b[i]]]\n return (sum(vec) / len(vec)) if len(vec) > 0 else NaN", "def euclidean(x, y):\n return np.sqrt(np.sum((x - y) ** 2))", "def _nn_euclidean_distance(x, y):\n distances = _pdist(x, y)\n return np.maximum(0.0, distances.min(axis=0))", "def _nn_euclidean_distance(x, y):\n distances = _pdist(x, y)\n return np.maximum(0.0, distances.min(axis=0))", "def euclidean(p1, p2):\n return p1.distance(p2)", "def _distance_last_evaluations(self):\n if self.X.shape[0] < 2:\n # less than 2 evaluations\n return np.inf\n return np.sqrt(np.sum((self.X[-1, :] - self.X[-2, :]) ** 2))", "def euclidean_distance(point_one, point_two):\n return np.linalg.norm(point_one-point_two)", "def euclidean_distance(x: np.ndarray, y: np.ndarray) -> float:\n\n distance = np.linalg.norm(x - y)\n\n return distance", "def euclidean_distance(x, y):\n return sqrt(sum(pow(a - b, 2) for a, b in zip(x, y)))", "def euclidean_distance(x, y):\n x1, y1 = x\n x2, y2 = y\n return sqrt((x1 - x2)**2 + (y1 - y2)**2)", "def calculate_euclidean_dist(self):\n x_dist = self._current_loc.get_column() - self._goal_loc.get_column()\n y_dist = self._current_loc.get_row() - self._goal_loc.get_row()\n # Note ** is power operator in Python\n return self._current_cost + sqrt(x_dist**2 + y_dist**2)", "def _get_distance(a, b):\n return np.sqrt(np.sum((a - b) ** 2))" ]
[ "0.73311925", "0.7217947", "0.72092783", "0.71997476", "0.71303356", "0.70067096", "0.6991867", "0.6981154", "0.69637036", "0.6960269", "0.6941405", "0.69364357", "0.6935467", "0.69090146", "0.68860257", "0.68558615", "0.68158317", "0.6806122", "0.6798799", "0.6798116", "0.6768414", "0.6768414", "0.67563295", "0.67387956", "0.67314196", "0.67301726", "0.67196244", "0.67028296", "0.6699426", "0.6698442" ]
0.7476734
0
Test elementwise for fill values and return result as a boolean array.
def isfillvalue(a): a = numpy.asarray(a) if a.dtype.kind == 'i': mask = a == -999999999 elif a.dtype.kind == 'f': mask = numpy.isnan(a) elif a.dtype.kind == 'S': mask = a == '' else: raise ValueError('Fill value not known for dtype %s' % a.dtype) return mask
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_array_booleans(n: int = 1024, random_seed: int = None) -> TYPE_ARRAY:\n return _RNG.randint(0, 2, n).astype(bool)", "def __call__(self, size=1):\n\n # A completely empty numpy array\n results = numpy.zeros(self.shape, dtype=bool)\n\n # Gets a set of random indices that need to be non-zero\n indices = tuple([\n numpy.random.randint(0, each_dim, size) for each_dim in self.shape\n ])\n\n # Makes them non-zero\n results[indices] = True\n\n return(results)", "def to_array(self, fill_value: Optional[Any] = None) -> np.ndarray:\n if fill_value is None:\n fill_value = infer_nan(self.dtype)\n\n tmp = self.astype(float) if is_float(fill_value) else self\n return tmp.to_masked().filled(fill_value=fill_value)", "def isAny(self,test):\n for x in np.nditer(self.t, op_flags=['readonly']):\n if op(x):\n return True\n return False", "def filled(self, fill_value):\n sdata = self.data\n new_data = numpy.ma.filled(sdata, fill_value=fill_value)\n if new_data == sdata:\n return self\n else:\n return type(self)(new_data, self.bset)", "def __check_flat_array__(self):\n if self.flat_array is not None:\n return True\n else:\n return False", "def _nodata_mask(self):\n if self.nodata_value is None:\n return np.ones_like(self.array, dtype=np.bool)\n return self.array != self.nodata_value", "def test_is_filled(self):\n n = 10\n matrix = g.np.random.uniform(size=(n + 1,) * 3) > 0.5\n not_matrix = g.np.logical_not(matrix)\n pitch = 1. / n\n origin = g.np.random.uniform(size=(3,))\n vox = g.trimesh.voxel.VoxelGrid(matrix)\n vox = vox.apply_scale(pitch).apply_translation(origin)\n not_vox = g.trimesh.voxel.VoxelGrid(not_matrix)\n not_vox = not_vox.apply_scale(pitch).apply_translation(origin)\n for a, b in ((vox, not_vox), (not_vox, vox)):\n points = a.points\n # slight jitter - shouldn't change indices\n points += (\n g.np.random.uniform(size=points.shape) - 1) * 0.4 * pitch\n g.np.random.shuffle(points)\n\n # all points are filled, and no empty points are filled\n assert g.np.all(a.is_filled(points))\n assert not g.np.any(b.is_filled(points))\n\n # test different number of dimensions\n points = g.np.stack([points, points[-1::-1]], axis=1)\n assert g.np.all(a.is_filled(points))\n assert not g.np.any(b.is_filled(points))", "def _maybe_fill(arr, fill_value=np.nan):\n if _is_na_compat(arr, fill_value):\n arr.fill(fill_value)\n return arr", "def flag(self, arr):\n return arr", "def getBooleanArray2D(self) -> typing.List[typing.List[bool]]:\n ...", "def setStructureTrueOrFalse(a):\n inputIma = np.copy(a)\n inputImb = np.copy(a)\n ndims = np.ndim(a)\n if ndims == 2:\n inputImb[1][1] = 1\n inputIma[1][1] = 0\n deletableTemp = countObjects(inputImb, inputIma)\n else:\n inputImb[1][1][1] = 1\n inputIma[1][1][1] = 0\n deletableTemp = countObjects(inputImb, inputIma)\n return deletableTemp", "def is_full(self) -> bool:\n return self._array[0].all()", "def all(x) -> bool:\n pass", "def testTicketDM4063(self):\n a = np.array([0, 1, 0, 23], dtype=np.uint16)\n b = np.array([True, True, False, False], dtype=bool)\n acast = np.array(a != 0, dtype=bool)\n orArr = acast | b\n desOrArr = np.array([True, True, False, True], dtype=bool)\n # Note: assertEqual(bool arr, bool arr) fails with:\n # ValueError: The truth value of an array with more than one element is\n # ambiguous\n try:\n self.assertTrue(np.all(orArr == desOrArr))\n except Exception as e:\n print(f\"Failed: {orArr!r} != {desOrArr!r}: {e}\")\n raise", "def all(self, axis=None, keepdims=False, out=None):\n return np.logical_and.reduce(self, out=out, axis=axis, keepdims=keepdims)", "def __bool__(self):\n return not(self.outcome != 0 or self.filled)", "def _get_one_free_mask(self, param_name):\n bounds = self._get_one_bound(param_name)\n return array([not(b == False) for b in bounds])", "def PLCTYPE_ARR_BOOL(n: int) -> Type[Array]:\n return c_bool * n", "def generate_boolean_vector(f,q,r,DIMS):\n b = None\n for i in range(DIMS):\n if b is None:\n b = (f[:,i]<q[i]+r[i]) & (f[:,i]>q[i])\n else :\n b = b & (f[:,i]<q[i]+r[i]) & (f[:,i]>q[i])\n return b", "def local_fill_sink(node):\r\n if not isinstance(node.op, T.Elemwise) or node.op == T.fill:\r\n return False\r\n models = []\r\n inputs = []\r\n for input in node.inputs:\r\n if input.owner and input.owner.op == T.fill:\r\n models.append(input.owner.inputs[0])\r\n inputs.append(input.owner.inputs[1])\r\n else:\r\n inputs.append(input)\r\n if not models:\r\n return False\r\n c = node.op(*inputs)\r\n for model in models:\r\n c = T.fill(model, c)\r\n return [c]", "def local_useless_fill(node):\r\n if node.op == T.fill:\r\n r, v = node.inputs\r\n if v.type == node.outputs[0].type:\r\n # this is a useless fill, erase it.\r\n return [v]", "def all_equal(value: Any, array: Any):\n\n if value is None:\n return False\n if not value:\n # if `value` is falsey, then just 1 truthy value in `array`\n # is sufficient to return False. We assume here that np.any is\n # optimized to return on the first truthy value in `array`.\n try:\n return not np.any(array)\n except (TypeError, ValueError): # pragma: no cover\n pass\n if np.issubdtype(array.dtype, np.object_):\n # we have to flatten the result of np.equal to handle outputs like\n # [np.array([True,True]), True, True]\n return all(flatten(np.equal(value, array, dtype=array.dtype)))\n else:\n # Numpy errors if you call np.isnan on custom dtypes, so ensure\n # we are working with floats before calling isnan\n if np.issubdtype(array.dtype, np.floating) and np.isnan(value):\n return np.all(np.isnan(array))\n else:\n # using == raises warnings from numpy deprecated pattern, but\n # using np.equal() raises type errors for structured dtypes...\n return np.all(value == array)", "def assertAllFinite(self, a):\n is_finite = np.isfinite(self._GetNdArray(a))\n all_true = np.ones_like(is_finite, dtype=np.bool)\n self.assertAllEqual(all_true, is_finite)", "def ret(x):\n color = true_color if x else false_color\n return np.tile(color, (SIZE, SIZE, 1)).astype(np.uint8)", "def mask_nodata(self, fill_value=np.nan):\n _da = self._obj\n if self.nodata is not None and self.nodata != fill_value:\n mask = _da.notnull() if np.isnan(self.nodata) else _da != self.nodata\n _da = _da.where(mask, fill_value)\n _da.raster.set_nodata(fill_value)\n return _da", "def assertAllNan(self, a):\n is_nan = np.isnan(self._GetNdArray(a))\n all_true = np.ones_like(is_nan, dtype=np.bool)\n self.assertAllEqual(all_true, is_nan)", "def full(self):\n for x in range(0,3):\n for y in range(0,3):\n if self[x,y] is None:\n return False\n return True", "def thresholded_arr(arr, threshold=0.0, threshold2=None, fill_val=np.nan):\r\n a2 = np.empty_like(arr)\r\n a2.fill(fill_val)\r\n mth = threshold_arr(arr, threshold, threshold2)\r\n idx, vals = mth[:-1], mth[-1]\r\n a2[idx] = vals\r\n\r\n return a2", "def create_mask(shape):\n return np.zeros(shape).astype(bool)" ]
[ "0.60772234", "0.59158", "0.5880896", "0.5725829", "0.5717447", "0.57084936", "0.56891406", "0.5651735", "0.5627813", "0.56104505", "0.5576027", "0.5572044", "0.55715203", "0.5566624", "0.55537015", "0.5498187", "0.5478841", "0.54585373", "0.5433937", "0.5429966", "0.5427257", "0.5415509", "0.5415443", "0.53991354", "0.53744674", "0.53455204", "0.5342473", "0.53391397", "0.53261834", "0.5322758" ]
0.5984257
1
Return the start/stop times in milliseconds since 111970
def as_millis(self): return int(ntplib.ntp_to_system_time(self.start) * 1000), int(ntplib.ntp_to_system_time(self.stop) * 1000)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def runtime_cal(start,end) :\n run_time = end - start\n mm = int(run_time/60)\n ss = round(run_time%60)\n return mm, ss", "def get_time_ms():\n return int(round(time.time() * 1000))", "def getTimes():", "def getTimes():", "def getTimes():", "def elapsed_micros(start: int, /) -> int:", "def start_time(self) -> float:\r\n ...", "def time_ms():\n return int(1000 * time.time())", "def _STEPS2TIME(step):\n return step/1000.", "def start_time():\n t = [time.clock(), time.time()]\n return t", "def elapsed_millis(start: int, /) -> int:", "def __get_times(self):\n data = self.simulate_file.readlines()\n data = list(map(str.strip, data))\n data = list(map(float, data))\n start = data[0]\n times = data[1:]\n return (start, times)", "def millis(start_time):\n dt = datetime.now() - start_time\n ms = (dt.days * 24 * 60 * 60 + dt.seconds) * 1000 + dt.microseconds / 1000.0\n return ms", "def millis() -> int:", "def getTime(self):\n return self.step / (self.max_step + int(self.include))", "def getTime():\n\n return float(time.perf_counter()*1000)", "def calculate_time(start_time):\r\n return round(time() - start_time, 2)", "def _TIME2STEPS(time):\n return int(time*1000)", "def getTime():\n return float(time.perf_counter()*1000)", "def getTime():\n return float(time.perf_counter()*1000)", "def getTime():\n return float(time.perf_counter()*1000)", "def getTime():\n return float(time.perf_counter()*1000)", "def getTime():\n return float(time.perf_counter()*1000)", "def getTime():\n return float(time.perf_counter()*1000)", "def startTime(self) -> float:\n try: return self.times[0]\n except IndexError: return 0.0", "def time(self):\n return self._clock() - self._starttime", "def curTimeMs():\n\treturn int((datetime.utcnow() - datetime(1970,1,1)).total_seconds() * 1000)", "def millis():\n return int(round(time() * 1000))", "def _nowms():\n return int(time.time() * 1000)", "def _unit_ms(self):\n return (self.time_base / 1000.0) / 60.0" ]
[ "0.6986291", "0.6958346", "0.69455504", "0.69455504", "0.69455504", "0.69351584", "0.6922508", "0.6904034", "0.69000614", "0.6889413", "0.6834017", "0.6818947", "0.6816358", "0.67783904", "0.67711294", "0.67618895", "0.67332286", "0.6714469", "0.6713183", "0.6713183", "0.6713183", "0.6713183", "0.6713183", "0.6713183", "0.6673511", "0.6670532", "0.6664828", "0.6660712", "0.66366655", "0.663023" ]
0.73437476
0
Function to recursively check if two dicts are equal
def dict_equal(d1, d2): if isinstance(d1, dict) and isinstance(d2, dict): # check keysets if set(d1) != set(d2): return False # otherwise loop through all the keys and check if the dicts and items are equal return all((dict_equal(d1[key], d2[key]) for key in d1)) # check equality on other objects else: return d1 == d2
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dict_equal(d1: Dict, d2: Dict) -> bool:\n\n # iterate over the dict with more keys\n # di is the dictionary to iterate over\n # dj is the one to compare to\n if len(d2) > len(d1):\n di = d2\n dj = d1\n else:\n di = d1\n dj = d2\n for key, value in di.items():\n # check if key is also in d2 and if the value is the same\n if key not in dj.keys():\n return False\n else:\n value_j = dj[key]\n if type(value) is dict and type(value_j) is dict:\n # if its again a dictionary -> recursion\n if not dict_equal(value, value_j):\n return False\n\n elif type(value) is np.ndarray and type(value_j) is np.ndarray:\n if not np.array_equal(value, value_j):\n return False\n\n # check if both are the same type of object\n elif type(value) is not type(value_j):\n return False\n\n elif value != value_j:\n return False\n\n return True", "def compare_nested_dicts(dict1,dict2):\n\n if sorted(dict1.keys()) != sorted(dict2.keys()):\n return False\n\n for key in dict1:\n if isinstance(dict1[key],dict):\n res = compare_nested_dicts(dict1[key],dict2[key])\n if not res:\n return False\n else:\n continue\n if not isinstance(dict1[key],(six.string_types,list,NoneType)) and not np.allclose(dict1[key],dict2[key]):\n return False\n elif isinstance(dict1[key],(six.string_types,list,NoneType)) and not dict1[key] == dict2[key]:\n return False\n\n return True", "def equivalent_dicts(_a, _b):\n for _key in _a.keys():\n if _a[_key] != _b[_key]:\n return False\n return True", "def dicts_equal(lhs, rhs):\n if len(lhs.keys()) != len(rhs.keys()):\n return False\n\n for key, val in rhs.items():\n val_ref = lhs.get(key, None)\n if val != val_ref:\n return False\n\n return True", "def compare_dict(a: dict, b: dict):\n\n for key, value in a.items():\n assert key in b\n\n if key not in b:\n continue\n\n if isinstance(value, dict):\n compare_dict(value, b[key])\n else:\n assert value == b[key]", "def test_01_is_equal_true(self):\n\n dict1 = {\"a\": \"1\", \"b\": \"2\"}\n dict2 = {\"a\": \"1\", \"b\": \"2\"}\n items_equal = utils.is_equal(dict1, dict2)\n self.assertTrue(items_equal)", "def compare_dicts(dict1, dict2, dict1_name=\"d1\", dict2_name=\"d2\", path=\"\"):\n # Setup paths to track key exploration. The path parameter is used to allow\n # recursive comparisions and track what's being compared.\n result = True\n for key in dict1.keys():\n dict1_path = \"{}{}[{}]\".format(dict1_name, path, key)\n dict2_path = \"{}{}[{}]\".format(dict2_name, path, key)\n if key not in dict2.keys():\n log.debug(\"%s not a valid key in %s.\", dict1_path, dict2_path)\n result = False\n elif isinstance(dict1[key], dict) and isinstance(dict2[key], dict):\n log.debug(\n \"%s and %s contain dictionary. Evaluating.\", dict1_path,\n dict2_path\n )\n result = compare_dicts(\n dict1[key], dict2[key], dict1_name, dict2_name,\n path=\"[{}]\".format(key)\n )\n elif isinstance(dict1[key], list) and isinstance(dict2[key], list):\n log.debug(\n \"%s and %s key '%s' contains list. Validating dict1 items \"\n \"exist in dict2.\", dict1_path, dict2_path, key\n )\n if not all([bool(item in dict2[key]) for item in dict1[key]]):\n log.debug(\n \"Mismatch: %s value is '%s' while %s value is '%s'.\",\n dict1_path, dict1[key], dict2_path, dict2[key]\n )\n result = False\n # Hack for NetBox v2.6.7 requiring integers for some values\n elif key in [\"status\", \"type\"]:\n if dict1[key] != dict2[key][\"value\"]:\n log.debug(\n \"Mismatch: %s value is '%s' while %s value is '%s'.\",\n dict1_path, dict1[key], dict2_path, dict2[key][\"value\"]\n )\n result = False\n elif dict1[key] != dict2[key]:\n log.debug(\n \"Mismatch: %s value is '%s' while %s value is '%s'.\",\n dict1_path, dict1[key], dict2_path, dict2[key]\n )\n # Allow the modification of device sites by ignoring the value\n if \"site\" in path and key == \"name\":\n log.debug(\"Site mismatch is allowed. Moving on.\")\n else:\n result = False\n if result:\n log.debug(\"%s and %s values match.\", dict1_path, dict2_path)\n else:\n log.debug(\"%s and %s values do not match.\", dict1_path, dict2_path)\n return result\n log.debug(\"Final dictionary compare result: %s\", result)\n return result", "def check_data_dict_identical(data_dict_1, data_dict_2):\n result = True # assume True, unless proven otherwise\n if data_dict_1.keys() != data_dict_2.keys():\n result = False\n for key in data_dict_1.keys():\n if data_dict_1[key].identical(data_dict_2[key]) is not True:\n result = False\n return result", "def compare_dicts(dict1, dict2):\n for k,v in dict2.items():\n if v != dict1[k]:\n return False\n return True", "def test_match_ordered(self):\n first = dict(\n a=1,\n b=2,\n )\n\n second = OrderedDict(\n b=2,\n a=1,\n )\n\n check_keys_match_recursive(first, second, [])", "def test_02_is_equal_false(self):\n\n dict1 = {\"a\": \"1\", \"b\": \"2\"}\n dict2 = {\"a\": \"1\", \"b\": \"3\"}\n items_equal = utils.is_equal(dict1, dict2)\n self.assertFalse(items_equal)", "def dict_match(left, right, res=None):\n if res is None:\n res = [True, ]\n if res[0] == False:\n return False\n for k in right.keys():\n if (k in left):\n if (isinstance(left[k], dict) and isinstance(right[k], dict)):\n dict_match(left[k], right[k], res=res)\n else:\n res[0] = res[0] and left[k] == right[k]\n if res[0] == False:\n break\n return res[0]", "def compare_dictionaries(dict_1, dict_2, dict_1_name, dict_2_name, path=\"\"):\n import numpy as np\n\n err = \"\"\n key_err = \"\"\n value_err = \"\"\n old_path = path\n for k in dict_1.keys():\n path = old_path + \"[%s]\" % k\n if k not in dict_2:\n key_err += f\"Key {dict_2_name}{path} not in {dict_2_name}\\n\"\n else:\n if isinstance(dict_1[k], dict) and isinstance(dict_2[k], dict):\n err += compare_dictionaries(dict_1[k], dict_2[k], \"d1\", \"d2\", path)\n else:\n o1 = dict_1[k]\n o2 = dict_2[k]\n try:\n if o1 != o2:\n value_err += (\n \"Value of {}{} ({}) not same as {}{} ({})\\n\".format(\n dict_1_name,\n path,\n dict_1[k],\n dict_2_name,\n path,\n dict_2[k],\n )\n )\n except ValueError:\n if not np.array_equal(np.asarray(o1), np.asarray(o1)):\n value_err += (\n \"Value of {}{} ({}) not same as {}{} ({})\\n\".format(\n dict_1_name,\n path,\n dict_1[k],\n dict_2_name,\n path,\n dict_2[k],\n )\n )\n\n for k in dict_2.keys():\n path = old_path + \"[%s]\" % k\n if k not in dict_1:\n key_err += f\"Key {dict_2_name}{path} not in {dict_1_name}\\n\"\n\n return key_err + value_err + err", "def assertOrderedDictEqual(self, first, second):\n self.assertEqual(list(first.keys()), list(second.keys()))\n first_iter = first.items().__iter__()\n second_iter = second.items().__iter__()\n i = 0\n while True:\n try:\n first_k, first_v = next(first_iter)\n second_k, second_v = next(second_iter)\n with self.subTest(key=first_k, i=i):\n self.assertEqual(first_k, second_k)\n self.assertEqual(first_v, second_v)\n except StopIteration:\n break\n i += 1", "def dictionaries_should_be_equal(self,dict1,dict2,msg=None,values=True):\r\n keys = self._keys_should_be_equal(dict1,dict2,msg,values)\r\n self._key_values_should_be_equal(keys,dict1,dict2,msg,values)", "def test_04_is_equal_with_ignore_default(self):\n\n dict1 = {\"a\": \"1\", \"created\": \"2\"}\n dict2 = {\"a\": \"1\", \"created\": \"3\"}\n items_equal = utils.is_equal(dict1, dict2)\n self.assertTrue(items_equal)", "def test_has_same_keys_as(self):\n\n # This is a.\n origin = {\"a\": 1, \"b\": 1}\n\n # This is b.\n target = {\"a\": 1, \"b\": 2, \"c\": {\"a\": 1, \"b\": 3, \"c\": {\"x\": \"x\"}}}\n\n # We want to test that all keys of a are into b.\n self.assertEqual(True, Dict(target).has_same_keys_as(origin))\n # We want to test that all keys of b are into a.\n self.assertEqual(False, Dict(origin).has_same_keys_as(target))\n\n origin[\"c\"] = {\"a\": 1, \"b\": 3, \"c\": {\"x\": \"x\"}}\n\n # We want to test that all keys of a are in b.\n self.assertEqual(True, Dict(target).has_same_keys_as(origin))\n # We want to test that all keys of b are in a.\n self.assertEqual(True, Dict(origin).has_same_keys_as(target))\n\n del origin[\"c\"][\"c\"]\n # We want to test that all keys of b are in a.\n self.assertEqual(False, Dict(origin).has_same_keys_as(target))", "def equal_but_different_dicts(self):\r\n d1 = {k:1 for k in \"abcdefghijklmnopqrstuvwxyz\"}\r\n d2 = dict(d1)\r\n for i in xrange(10000):\r\n d2[i] = 1\r\n for i in xrange(10000):\r\n del d2[i]\r\n\r\n # Check that our dicts are equal, but with different key order.\r\n self.assertEqual(d1, d2)\r\n self.assertNotEqual(d1.keys(), d2.keys())\r\n\r\n return d1, d2", "def diff_dicts(dict_1, dict_2):\n differ = deepdiff.DeepDiff(dict_1, dict_2)\n return len(differ) > 0, differ", "def equality_check(a, b):\n\n def check_item(x, y, attr):\n if isinstance(x, hoomd.operation._HOOMDGetSetAttrBase):\n equality_check(x, y)\n return\n if isinstance(x, Mapping):\n for k, v in x.items():\n assert k in y, f\"For attr {attr}, key difference {k}\"\n check_item(v, y[k], \".\".join((attr, str(k))))\n return\n if not isinstance(x, str) and hasattr(x, \"__len__\"):\n assert len(x) == len(y)\n for i, (v_x, v_y) in enumerate(zip(x, y)):\n check_item(v_x, v_y, attr + f\"[{i}]\")\n return\n if isinstance(x, float):\n assert numpy.isclose(x, y), f\"attr '{attr}' not equal:\"\n return\n assert x == y, f\"attr '{attr}' not equal:\"\n\n if not isinstance(a, hoomd.operation._HOOMDGetSetAttrBase):\n return a == b\n assert type(a) == type(b)\n\n _check_obj_attr_compatibility(a, b)\n\n for attr in a.__dict__:\n if attr in a._skip_for_equality:\n continue\n\n if attr == \"_param_dict\":\n param_keys = a._param_dict.keys()\n b_param_keys = b._param_dict.keys()\n # Check key equality\n assert param_keys == b_param_keys, \"Incompatible param_dict keys:\"\n # Check item equality\n for key in param_keys:\n check_item(a._param_dict[key], b._param_dict[key], key)\n continue\n\n if attr == \"_typeparam_dict\":\n keys = a._typeparam_dict.keys()\n b_keys = b._typeparam_dict.keys()\n # Check key equality\n assert keys == b_keys, \"Incompatible _typeparam_dict:\"\n # Check item equality\n for key in keys:\n for type_, value in a._typeparam_dict[key].items():\n check_item(value, b._typeparam_dict[key][type_], \".\".join(\n (key, str(type_))))\n continue\n\n check_item(a.__dict__[attr], b.__dict__[attr], attr)", "def match_two_dicts(small_dict, big_dict):\n big_dict_keys = big_dict.keys()\n for key in small_dict.keys():\n if key not in big_dict_keys:\n raise KeyError(\"Wrong argument name '%s'\" % key)\n return True", "def same_keys(a, b):\n for ak in a.keys():\n if ak not in b:\n return False\n for bk in b.keys():\n if bk not in a:\n return False\n return True", "def _assert_equal(d_0, d_1):\n # Compare arrays.\n if _is_array_like(d_0):\n try:\n ae(d_0, d_1)\n except AssertionError:\n ac(d_0, d_1)\n # Compare dicts recursively.\n elif isinstance(d_0, dict):\n assert set(d_0) == set(d_1)\n for k_0 in d_0:\n _assert_equal(d_0[k_0], d_1[k_0])\n else:\n # General comparison.\n assert d_0 == d_1", "def dict_arr_eq(d1: DataDict, d2: DataDict):\n return (\n d1.keys() == d2.keys()\n and all(d1[k].shape == d2[k].shape for k in d1.keys())\n and all(np.allclose(d1[k], d2[k]) for k in d1.keys())\n )", "def check_dict_almost_equal(dict_a: Dict[Any, float],\n dict_b: Dict[Any, float],\n decimal: int = 7) -> bool:\n if set(dict_a.keys()) != set(dict_b.keys()):\n return False\n for key in dict_a.keys():\n # Same test as np.testing.assert_almost_equal\n if abs(dict_a[key] - dict_b[key]) >= (1.5 * 10**(-decimal)):\n return False\n return True", "def assertDictAlmostEqual(self, dict1, dict2):\n self.assertListEqual(dict1.keys(), dict2.keys())\n for i, j in zip(dict1.keys(), dict2.keys()):\n self.assertListAlmostEqual(list(dict1[i]), list(dict2[j]))", "def compare_dicts(dict1, dict2):\n\n if dict1 is None and dict2 is None:\n return True\n if dict1 is None or dict2 is None:\n return False\n\n both_equal = True\n for dict1_item, dict2_item in zip(dict1.items(), dict2.items()):\n if dict1_item != dict2_item:\n msg = (_(\"%(label1)s: %(item1)s \\n is not equal to \\n:\"\n \"%(label2)s: %(item2)s\")\n % {'label1': CompareUtils.MISMATCH_VALUE2_LABEL,\n 'item1': dict1_item,\n 'label2': CompareUtils.MISMATCH_VALUE1_LABEL,\n 'item2': dict2_item})\n log.warning(msg)\n both_equal = False\n break\n return both_equal", "def recursiveEquals(obj1, obj2, **kwargs):\n if isIndexable(obj1) != isIndexable(obj2):\n return False\n if isIndexable(obj1):\n for entry1, entry2 in zip(obj1, obj2):\n if not recursiveEquals(entry1, entry2, **kwargs):\n return False\n return True\n # Do the numeric evaluation\n num1 = expressionToNumber(evaluate(obj1, **kwargs))\n num2 = expressionToNumber(evaluate(obj2, **kwargs))\n return np.isclose(num1, num2)", "def test_single_level(self):\n dict_1 = {\n 'key_1': 'original_value_1',\n 'key_2': 'original_value_2'\n }\n dict_2 = {\n 'key_2': 'new_value_2',\n 'key_3': 'new_value_3'\n }\n\n result = deep_dict_merge(dict_1, dict_2)\n\n assert dict_1 == {\n 'key_1': 'original_value_1',\n 'key_2': 'original_value_2'\n }\n assert dict_2 == {\n 'key_2': 'new_value_2',\n 'key_3': 'new_value_3'\n }\n assert result == {\n 'key_1': 'original_value_1',\n 'key_2': 'new_value_2',\n 'key_3': 'new_value_3',\n }", "def equals_dict(self, other: GenoDistribDict):\n return self.to_json_dict() == other" ]
[ "0.82100755", "0.7669566", "0.7605134", "0.7587031", "0.7573984", "0.7369974", "0.735764", "0.72046685", "0.71447515", "0.70782727", "0.70460093", "0.69822705", "0.6968151", "0.69324183", "0.69310194", "0.69286764", "0.6905533", "0.6891169", "0.6882714", "0.6858757", "0.6847502", "0.6845294", "0.68202007", "0.6736955", "0.67144245", "0.6702859", "0.6685637", "0.6685586", "0.6669425", "0.6668534" ]
0.7747674
1
Quantify misfit with some example data
def test_default_quantify_misfit(tmpdir): preprocess = Default(syn_data_format="ascii", obs_data_format="ascii", unit_output="disp", misfit="waveform", adjoint="waveform", path_preprocess=tmpdir, path_solver=TEST_SOLVER, source_prefix="SOURCE", ntask=2, ) preprocess.setup() preprocess.quantify_misfit( source_name="001", save_residuals=os.path.join(tmpdir, "residuals_ascii"), save_adjsrcs=tmpdir ) # !!! throws a segy error because data are not in the right format # preprocess.syn_data_format = "SU" # preprocess.obs_data_format = "SU" # preprocess.quantify_misfit( # source_name="001", # save_residuals=os.path.join(tmpdir, "residuals_su"), # save_adjsrcs=tmpdir # ) assert(len(glob(os.path.join(tmpdir, "*"))) == 3) residuals = open(os.path.join(tmpdir, "residuals_ascii")).readlines() assert(len(residuals) == 2) assert(float(residuals[0]) == pytest.approx(0.0269, 3))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_genextreme_fit(self):\n p = generic.fit(self.genextreme, \"genextreme\")\n np.testing.assert_allclose(p, (0.20949, 297.954091, 75.7911863), 1e-5)", "def fit(self, X):", "def test_fit(self):\n X = np.zeros((2, 3), dtype=np.float64)\n snv = SNV(q=50)\n try:\n _ = snv.fit(X)\n except:\n self.assertTrue(False)", "def test_valid_calculation_of_quantile(alpha: Any) -> None:\n n = 30\n check_alpha_and_n_samples(alpha, n)", "def misfit(self):\n residuals = self.x.fun[0 : self.surveys[0].b.shape[0]] - self.surveys[0].b\n val = np.linalg.norm(self.surveys[0].obs_w * residuals)\n misfit = val * self.wr\n\n return misfit", "def fit_test(self):", "def fit(self, x):\n pass", "def misfit(a,b):\n n = a.numel()\n return torch.norm(a-b)/n", "def fit(self):\n self.eval_chisq([1, 1, 1, 1])", "def test_invalid_calculation_of_quantile(alpha: Any) -> None:\n n = 10\n with pytest.raises(\n ValueError, match=r\".*Number of samples of the score is too low*\"\n ):\n check_alpha_and_n_samples(alpha, n)", "def fit(self, x):\n raise NotImplementedError()", "def test_test_fit_equal_values(self):\n instance = GammaUnivariate()\n instance.fit(np.array([5, 5, 5, 5, 5, 5]))\n assert instance.constant_value == 5", "def quantization_error(self, data):\n self._check_input_len(data)\n return norm(data-self.quantization(data), axis=1).mean()", "def fit(self, X, y):", "def fit(self, X, y):", "def fit(self, X, y):", "def test_robust_detrend_transform(self):\n snv = SNV(q=50, robust=True, detrend=True)\n X_t = snv.fit_transform(self.X)\n self.assertTrue(np.allclose(X_t, 0.0))\n\n snv = SNV(q=51, robust=True, detrend=True)\n X_t = snv.fit_transform(self.X)\n self.assertTrue(np.allclose(X_t, 0.0))\n\n snv = SNV(q=49, robust=True, detrend=True)\n X_t = snv.fit_transform(self.X)\n self.assertTrue(np.allclose(X_t, 0.0))\n\n snv = SNV(q=50, robust=True, detrend=True)\n X_t = snv.fit_transform(self.Xq)\n self.assertTrue(np.allclose(X_t, 0.0))", "def partial_fit(self, X, y=...):\n ...", "def partial_fit(self, X, y=...):\n ...", "def Qc_fit(x, a, b, c, d, e, f, g, h, i, k):\n x1 = x[0] # I\n x2 = x[1] # dT\n m = (i * x1 ** 4 + a * x1 ** 3 + b * x1 ** 2 + c * x1 + d)\n b = (k * x1 ** 4 + e * x1 ** 3 + f * x1 ** 2 + g * x1 + h)\n return m * x2 + b", "def test_undesirable_deviations():\n f = np.asarray([\n [0.99, 1.0, 0.5],\n [0.69, 0.6, 0.6]])\n R = common_metrics.undesirable_deviations(f, maximise=True)\n expected = np.asarray(\n [-0.245, 0.0])\n assert np.allclose(R, expected)\n R = common_metrics.undesirable_deviations(f, maximise=False)\n expected = np.asarray(\n [-0.005, -0.045])\n assert np.allclose(R, expected)", "def fit_test_data():\n cur_a, cur_b = \"sin\", \"cos\"\n a = [sin(x/7.) for x in range(1000)]\n b = [cos(x/11.)/3 for x in range(1000)]\n # \"\"\"\n\n parameters = {\"overlap\": True, \"normalized\": True, \"derivative\": False, \"diag_factor\": .5}\n error, a_range, b_range = get_fit(a, cur_a, b, cur_b, **parameters)\n\n msg = \"{:s} range: {:d}-{:d}, {:s} range: {:d}-{:d}, deviation {:.4f}\"\n print(msg.format(cur_a, *a_range, cur_b, *b_range, error))", "def fit():\n pass", "def test_detrend_transform(self):\n snv = SNV(q=50, robust=False, detrend=True)\n X_t = snv.fit_transform(self.X)\n self.assertTrue(np.allclose(X_t, 0.0))", "def testQDA(self):\n qdaObj = qda.QDA(self.data, self.classes)\n qdaObj.fit_model()\n correctAns = np.array([5.01, 3.42, 1.46, .24])\n npTest = np.testing.assert_array_almost_equal(qdaObj.fitted_model.means_[0], correctAns, decimal=2)\n self.assertEqual(npTest, None)", "def theoretical_effective(dataset):\n return float(sum(dataset))/len(dataset)", "def analyse_goodness_of_fit(x_data, y_data, poly_fit, fit_name):\n\n # useful display and computational data\n y_fitted = poly_fit(x_data)\n min_x_display = min(x_data) - abs(max(x_data) - min(x_data)) * 0.1\n max_x_display = max(x_data) + abs(max(x_data) - min(x_data)) * 0.1\n x_fitted_display = np.linspace(min_x_display, max_x_display)\n y_fitted_display = poly_fit(x_fitted_display)\n\n # goodness of fit indicators\n dof = len(y_data) - (poly_fit.order+1) # degrees of freedom\n SSE = np.sum((y_data - y_fitted) ** 2) # Sum of Squared Errors\n SST = np.sum((y_data - np.mean(y_data)) ** 2) # Total Sum of Squares (about the mean)\n R2 = 1.0 - SSE/SST # R squared\n RMSE = math.sqrt( SSE / dof ) # Root Mean Squared Error\n\n fig = plt.figure()\n ax = fig.add_subplot(1, 2, 1)\n\n # plot of the fitted polynomial itself\n ax.plot(x_fitted_display, y_fitted_display, color='C6')\n ax.scatter(x_data, y_data, color='C7')\n ax.set(title=\"Fitted polynomial (deg{}) for \\'{}\\'\".format(poly_fit.order, fit_name), xlabel=\"x\", ylabel=\"y\")\n\n # histogram of residuals\n ax2 = fig.add_subplot(1, 2, 2)\n sns.distplot(y_data - y_fitted, kde=True, ax=ax2)\n ax2.set(title=\"Histogram of residuals\", xlabel=\"Residual value $y_k - \\\\widehat{y_k}$\", ylabel=\"Count\")\n\n # display of fit indicators\n fig.text(0.02, 0.02, '$SSE = {0:.6f}$'.format(SSE), fontsize='10')\n fig.text(0.27, 0.02, '$R^2 = {0:.6f}$'.format(R2), fontsize='10')\n fig.text(0.52, 0.02, '$RMSE = {0:.6f}$'.format(RMSE), fontsize='10')\n\n fig.tight_layout()\n fig.subplots_adjust(bottom=0.2)\n\n figurefiles.save_in_perfs_fits_folder(fig, \"Polyfit_{}_order_{}.pdf\".format(fit_name, poly_fit.order))", "def test_self_consistency_no_noise(self):\n popt, pcov = sine_fit(self.data, self.periods)\n print(popt)\n assert_allclose(*fixed_signs(self.p_gt, popt), 1e-4)", "def rls_fit0(xdata: np.ndarray,\n ydata: np.ndarray | ma.MaskedArray) -> tuple:\n if xdata.size < 2:\n raise RuntimeError('too few points for a fit')\n if xdata.size != ydata.shape[-1]:\n raise RuntimeError('number of samples not equal for xdata, ydata')\n\n # perform all computations on 2 dimensional arrays\n img_shape = ydata.shape[:-1]\n yy1 = ydata.reshape(-1, xdata.size)\n\n # calculate weights\n if ma.isMaskedArray(ydata):\n wghts = calc_ma_weights(xdata, ma.getmaskarray(yy1))\n else:\n buff = np.concatenate(([2 * (xdata[1] - xdata[0])],\n xdata[2:] - xdata[0:-2],\n [2 * (xdata[-1] - xdata[-2])]))\n wghts = np.repeat([buff], yy1.shape[0], axis=0)\n wx1 = wghts / xdata\n wx2 = wghts / xdata ** 2\n\n # calculate the Q elements\n q00 = wghts.sum(axis=1)\n q11 = (wx1 * yy1).sum(axis=1)\n q22 = (wx2 * yy1 ** 2).sum(axis=1)\n\n # calculate fit parameter and its variance\n num = yy1.count(axis=1) if ma.isMaskedArray(ydata) else len(xdata)\n cc1 = q11 / q00\n if ma.isMaskedArray(ydata):\n cc1[num < 1] = ma.masked\n chi2 = ma.abs(q22 - q00 * cc1 ** 2) / np.clip(num - 1, 1, None)\n chi2[num <= 1] = ma.masked\n sc1 = ma.sqrt(chi2 / q00)\n return (cc1.reshape(img_shape).filled(np.nan),\n sc1.reshape(img_shape).filled(np.nan))\n\n # using only non-MaskedArray functions\n cc1[num < 1] = np.nan\n chi2 = np.abs(q22 - q00 * cc1 ** 2) / np.clip(num - 1, 1, None)\n chi2[num <= 1] = np.nan\n sc1 = np.sqrt(chi2 / q00)\n return cc1.reshape(img_shape), sc1.reshape(img_shape)", "def fit(self, x, y=None):\n x = pd.DataFrame(x)\n q1 = x.quantile(0.25)\n q3 = x.quantile(0.75)\n iqr = q3 - q1\n self.lower_bound = q1 - (1.5 * iqr)\n self.upper_bound = q3 + (1.5 * iqr)\n self.imputer.fit(\n x.where(~((x < self.lower_bound) | (x > self.upper_bound)), np.nan)\n )\n return self" ]
[ "0.62091", "0.6153025", "0.61098146", "0.6083619", "0.6002775", "0.5955667", "0.59443253", "0.5894886", "0.58396775", "0.57995504", "0.5769619", "0.57624996", "0.5748213", "0.5731888", "0.5731888", "0.5731888", "0.57293093", "0.57270885", "0.57270885", "0.56752056", "0.5674761", "0.5673386", "0.56264853", "0.56199473", "0.55836236", "0.5570329", "0.55686116", "0.5564481", "0.55506474", "0.5548756" ]
0.6377925
0
Test that the Pyaflowa preprocess class can quantify misfit over the course of a few evaluations (a line search) and run its finalization task Essentially an integration test testing the entire preprocessing module works as a whole
def test_pyaflowa_line_search(tmpdir): pyaflowa = Pyaflowa( workdir=tmpdir, path_specfem_data=os.path.join(TEST_SOLVER, "mainsolver", "DATA"), path_output=os.path.join(tmpdir, "output"), path_solver=TEST_SOLVER, source_prefix="SOURCE", ntask=2, data_case="synthetic", components="Y", fix_windows="ITER", export_datasets=True, export_figures=True, export_log_files=True, ) pyaflowa.setup() unix.mkdir(pyaflowa.path.output) # usually done by other modules setup save_residuals = os.path.join(tmpdir, "residuals.txt") for source_name in pyaflowa._source_names: for step_count in range(3): # Ignore any outputs, just want to run misfit quantification # misfit will not be reducing but thats okay pyaflowa.quantify_misfit(source_name=source_name, iteration=1, step_count=step_count, save_residuals=save_residuals, save_adjsrcs=tmpdir) pyaflowa.finalize() # Check that final residuals file is the same residuals = np.loadtxt(save_residuals) assert(pytest.approx(pyaflowa.sum_residuals(residuals), .01) == .7463) # Check that atleast one adjoint sources are not zero adjsrcs = glob(os.path.join(tmpdir, "*.adj")) data = np.loadtxt(adjsrcs[0]) assert(data[:, 1].any()) # assert that adjoint sourcse are not zero # Just check file count to see that finalize did what it's supposed to do # since finalize just moves and collects files assert(len(glob(os.path.join(pyaflowa.path.output, "pyaflowa", "figures", "*"))) == 1) assert(len(glob(os.path.join(pyaflowa.path.output, "pyaflowa", "logs", "*"))) == 6) assert(len(glob(os.path.join(pyaflowa.path.output, "pyaflowa", "datasets", "*.csv"))) == 2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_predictor():", "def test_active_inference_SPM_1b(self):", "def test_preprocess(self):\r\n\r\n # Should discard all reads due to sequence length being too short\r\n\r\n fasta_files = [self.sample_fasta_file]\r\n qual_files = [self.sample_qual_file]\r\n mapping_file = self.sample_mapping_file\r\n barcode_type = \"golay_12\"\r\n min_seq_len = 200\r\n max_seq_len = 1000\r\n min_qual_score = 25\r\n starting_ix = 1\r\n keep_primer = False\r\n max_ambig = 0\r\n max_primer_mm = 1\r\n trim_seq_len = True\r\n dir_prefix = self.output_dir\r\n max_bc_errors = 2\r\n max_homopolymer = 4\r\n retain_unassigned_reads = False\r\n keep_barcode = False\r\n attempt_bc_correction = True\r\n qual_score_window = 0\r\n disable_primer_check = False\r\n reverse_primers = 'disable'\r\n record_qual_scores = False\r\n discard_bad_windows = False\r\n median_length_filtering = None\r\n added_demultiplex_field = None\r\n\r\n preprocess(fasta_files,\r\n qual_files,\r\n mapping_file,\r\n barcode_type,\r\n min_seq_len,\r\n max_seq_len,\r\n min_qual_score,\r\n starting_ix,\r\n keep_primer,\r\n max_ambig,\r\n max_primer_mm,\r\n trim_seq_len,\r\n dir_prefix,\r\n max_bc_errors,\r\n max_homopolymer,\r\n retain_unassigned_reads,\r\n keep_barcode,\r\n attempt_bc_correction,\r\n qual_score_window,\r\n disable_primer_check,\r\n reverse_primers,\r\n record_qual_scores,\r\n discard_bad_windows,\r\n median_length_filtering,\r\n added_demultiplex_field)\r\n\r\n output_seqs = open(dir_prefix + \"seqs.fna\", \"U\")\r\n output_log = open(dir_prefix + \"split_library_log.txt\", \"U\")\r\n output_histograms = open(dir_prefix + \"histograms.txt\", \"U\")\r\n\r\n actual_seqs = [line for line in output_seqs]\r\n actual_log = [line for line in output_log]\r\n actual_histograms = [line for line in output_histograms]\r\n\r\n expected_seqs = []\r\n expected_log = [\r\n 'Number raw input seqs\\t6\\n',\r\n '\\n',\r\n 'Length outside bounds of 200 and 1000\\t6\\n',\r\n 'Num ambiguous bases exceeds limit of 0\\t0\\n',\r\n 'Missing Qual Score\\t0\\n',\r\n 'Mean qual score below minimum of 25\\t0\\n',\r\n 'Max homopolymer run exceeds limit of 4\\t0\\n',\r\n 'Num mismatches in primer exceeds limit of 1: 0\\n',\r\n '\\n',\r\n 'Sequence length details for all sequences passing quality filters:\\n',\r\n 'No sequences passed quality filters for writing.\\n',\r\n '\\n',\r\n 'Barcodes corrected/not\\t0/0\\n',\r\n 'Uncorrected barcodes will not be written to the output fasta file.\\n',\r\n 'Corrected barcodes will be written with the appropriate barcode category.\\n',\r\n 'Corrected but unassigned sequences will not be written unless --retain_unassigned_reads is enabled.\\n',\r\n '\\n',\r\n 'Total valid barcodes that are not in mapping file\\t0\\n',\r\n 'Sequences associated with valid barcodes that are not in the mapping file will not be written.\\n',\r\n '\\n',\r\n 'Barcodes in mapping file\\n',\r\n 'Sample\\tSequence Count\\tBarcode\\n',\r\n 's2\\t0\\tAGAGTCCTGAGC\\n',\r\n 's1\\t0\\tACACATGTCTAC\\n',\r\n 's3\\t0\\tAACTGTGCGTAC\\n',\r\n '\\n',\r\n 'Total number seqs written\\t0']\r\n expected_histograms = [\r\n '# bins raw sequence lengths, length of sequences that pass quality filters before processing, and lengths of sequences that pass quality filters post processing.\\n',\r\n 'Length\\tRaw\\tBefore\\tAfter\\n',\r\n '20\\t2\\t0\\t0\\n',\r\n '30\\t4\\t0\\t0']\r\n\r\n self.assertEqual(actual_seqs, expected_seqs)\r\n self.assertEqual(actual_log, expected_log)\r\n self.assertEqual(actual_histograms, expected_histograms)\r\n\r\n # With minimal length at 5, should retain 4 sequences\r\n\r\n fasta_files = [self.sample_fasta_file]\r\n qual_files = [self.sample_qual_file]\r\n mapping_file = self.sample_mapping_file\r\n barcode_type = \"golay_12\"\r\n min_seq_len = 5\r\n max_seq_len = 1000\r\n min_qual_score = 25\r\n starting_ix = 1\r\n keep_primer = False\r\n max_ambig = 0\r\n max_primer_mm = 0\r\n trim_seq_len = False\r\n dir_prefix = self.output_dir\r\n max_bc_errors = 2\r\n max_homopolymer = 4\r\n retain_unassigned_reads = False\r\n keep_barcode = False\r\n attempt_bc_correction = True\r\n qual_score_window = 0\r\n disable_primer_check = False\r\n reverse_primers = 'disable'\r\n record_qual_scores = False\r\n discard_bad_windows = False\r\n median_length_filtering = None\r\n added_demultiplex_field = None\r\n\r\n preprocess(fasta_files,\r\n qual_files,\r\n mapping_file,\r\n barcode_type,\r\n min_seq_len,\r\n max_seq_len,\r\n min_qual_score,\r\n starting_ix,\r\n keep_primer,\r\n max_ambig,\r\n max_primer_mm,\r\n trim_seq_len,\r\n dir_prefix,\r\n max_bc_errors,\r\n max_homopolymer,\r\n retain_unassigned_reads,\r\n keep_barcode,\r\n attempt_bc_correction,\r\n qual_score_window,\r\n disable_primer_check,\r\n reverse_primers,\r\n record_qual_scores,\r\n discard_bad_windows,\r\n median_length_filtering,\r\n added_demultiplex_field)\r\n\r\n output_seqs = open(dir_prefix + \"seqs.fna\", \"U\")\r\n output_log = open(dir_prefix + \"split_library_log.txt\", \"U\")\r\n output_histograms = open(dir_prefix + \"histograms.txt\", \"U\")\r\n\r\n actual_seqs = [line for line in output_seqs]\r\n actual_log = [line for line in output_log]\r\n actual_histograms = [line for line in output_histograms]\r\n\r\n expected_seqs = [\r\n '>s1_1 a orig_bc=ACACATGTCTAC new_bc=ACACATGTCTAC bc_diffs=0\\n',\r\n 'CCCTTATATATATAT\\n',\r\n '>s2_2 b orig_bc=AGAGTCCTGAGC new_bc=AGAGTCCTGAGC bc_diffs=0\\n',\r\n 'CCCTTTCCA\\n',\r\n '>s3_3 c orig_bc=AACTGTGCGTAC new_bc=AACTGTGCGTAC bc_diffs=0\\n',\r\n 'AACCGGCCGGTT\\n',\r\n '>s1_4 d orig_bc=ACTCATGTCTAC new_bc=ACACATGTCTAC bc_diffs=1\\n',\r\n 'CCCTTACTATATAT\\n']\r\n expected_log = [\r\n 'Number raw input seqs\\t6\\n',\r\n '\\n',\r\n 'Length outside bounds of 5 and 1000\\t0\\n',\r\n 'Num ambiguous bases exceeds limit of 0\\t0\\n',\r\n 'Missing Qual Score\\t0\\n',\r\n 'Mean qual score below minimum of 25\\t0\\n',\r\n 'Max homopolymer run exceeds limit of 4\\t0\\n',\r\n 'Num mismatches in primer exceeds limit of 0: 2\\n',\r\n '\\n',\r\n 'Sequence length details for all sequences passing quality filters:\\n',\r\n 'Raw len min/max/avg\\t29.0/35.0/32.5\\n',\r\n 'Wrote len min/max/avg\\t9.0/15.0/12.5\\n',\r\n '\\n',\r\n 'Barcodes corrected/not\\t1/0\\n',\r\n 'Uncorrected barcodes will not be written to the output fasta file.\\n',\r\n 'Corrected barcodes will be written with the appropriate barcode category.\\n',\r\n 'Corrected but unassigned sequences will not be written unless --retain_unassigned_reads is enabled.\\n',\r\n '\\n',\r\n 'Total valid barcodes that are not in mapping file\\t0\\n',\r\n 'Sequences associated with valid barcodes that are not in the mapping file will not be written.\\n',\r\n '\\n',\r\n 'Barcodes in mapping file\\n',\r\n 'Num Samples\\t3\\n',\r\n 'Sample ct min/max/mean: 1 / 2 / 1.33\\n',\r\n 'Sample\\tSequence Count\\tBarcode\\n',\r\n 's1\\t2\\tACACATGTCTAC\\n',\r\n 's2\\t1\\tAGAGTCCTGAGC\\n',\r\n 's3\\t1\\tAACTGTGCGTAC\\n',\r\n '\\n',\r\n 'Total number seqs written\\t4']\r\n expected_histograms = [\r\n '# bins raw sequence lengths, length of sequences that pass quality filters before processing, and lengths of sequences that pass quality filters post processing.\\n',\r\n 'Length\\tRaw\\tBefore\\tAfter\\n',\r\n '0\\t0\\t0\\t1\\n',\r\n '10\\t0\\t0\\t3\\n',\r\n '20\\t2\\t1\\t0\\n',\r\n '30\\t4\\t3\\t0']\r\n\r\n self.assertEqual(actual_seqs, expected_seqs)\r\n self.assertEqual(actual_log, expected_log)\r\n self.assertEqual(actual_histograms, expected_histograms)\r\n\r\n # Added sliding window should discard read \"b\"\r\n\r\n fasta_files = [self.sample_fasta_file]\r\n qual_files = [self.sample_qual_file]\r\n mapping_file = self.sample_mapping_file\r\n barcode_type = \"golay_12\"\r\n min_seq_len = 5\r\n max_seq_len = 1000\r\n min_qual_score = 22\r\n starting_ix = 1\r\n keep_primer = False\r\n max_ambig = 0\r\n max_primer_mm = 0\r\n trim_seq_len = False\r\n dir_prefix = self.output_dir\r\n max_bc_errors = 2\r\n max_homopolymer = 4\r\n retain_unassigned_reads = False\r\n keep_barcode = False\r\n attempt_bc_correction = True\r\n qual_score_window = 3\r\n disable_primer_check = False\r\n reverse_primers = 'disable'\r\n record_qual_scores = False\r\n discard_bad_windows = True\r\n median_length_filtering = None\r\n added_demultiplex_field = None\r\n reverse_primer_mismatches = 0\r\n\r\n preprocess(fasta_files,\r\n qual_files,\r\n mapping_file,\r\n barcode_type,\r\n min_seq_len,\r\n max_seq_len,\r\n min_qual_score,\r\n starting_ix,\r\n keep_primer,\r\n max_ambig,\r\n max_primer_mm,\r\n trim_seq_len,\r\n dir_prefix,\r\n max_bc_errors,\r\n max_homopolymer,\r\n retain_unassigned_reads,\r\n keep_barcode,\r\n attempt_bc_correction,\r\n qual_score_window,\r\n disable_primer_check,\r\n reverse_primers,\r\n reverse_primer_mismatches,\r\n record_qual_scores,\r\n discard_bad_windows,\r\n median_length_filtering,\r\n added_demultiplex_field)\r\n\r\n output_seqs = open(dir_prefix + \"seqs.fna\", \"U\")\r\n output_log = open(dir_prefix + \"split_library_log.txt\", \"U\")\r\n output_histograms = open(dir_prefix + \"histograms.txt\", \"U\")\r\n\r\n actual_seqs = [line for line in output_seqs]\r\n actual_log = [line for line in output_log]\r\n actual_histograms = [line for line in output_histograms]\r\n\r\n expected_seqs = [\r\n '>s1_1 a orig_bc=ACACATGTCTAC new_bc=ACACATGTCTAC bc_diffs=0\\n',\r\n 'CCCTTATATATATAT\\n',\r\n '>s3_2 c orig_bc=AACTGTGCGTAC new_bc=AACTGTGCGTAC bc_diffs=0\\n',\r\n 'AACCGGCCGGTT\\n',\r\n '>s1_3 d orig_bc=ACTCATGTCTAC new_bc=ACACATGTCTAC bc_diffs=1\\n',\r\n 'CCCTTACTATATAT\\n']\r\n expected_log = [\r\n 'Number raw input seqs\\t6\\n',\r\n '\\n',\r\n 'Length outside bounds of 5 and 1000\\t0\\n',\r\n 'Num ambiguous bases exceeds limit of 0\\t0\\n',\r\n 'Missing Qual Score\\t0\\n',\r\n 'Mean qual score below minimum of 22\\t0\\n',\r\n 'Max homopolymer run exceeds limit of 4\\t0\\n',\r\n 'Num mismatches in primer exceeds limit of 0: 2\\n',\r\n '\\n',\r\n 'Size of quality score window, in base pairs: 3\\n',\r\n 'Number of sequences where a low quality score window was detected: 1\\n',\r\n 'Sequences with a low quality score were not written, -g option enabled.\\n',\r\n '\\n',\r\n 'Sequence length details for all sequences passing quality filters:\\n',\r\n 'Raw len min/max/avg\\t32.0/35.0/33.7\\n',\r\n 'Wrote len min/max/avg\\t12.0/15.0/13.7\\n',\r\n '\\n',\r\n 'Barcodes corrected/not\\t1/0\\n',\r\n 'Uncorrected barcodes will not be written to the output fasta file.\\n',\r\n 'Corrected barcodes will be written with the appropriate barcode category.\\n',\r\n 'Corrected but unassigned sequences will not be written unless --retain_unassigned_reads is enabled.\\n',\r\n '\\n',\r\n 'Total valid barcodes that are not in mapping file\\t0\\n',\r\n 'Sequences associated with valid barcodes that are not in the mapping file will not be written.\\n',\r\n '\\n',\r\n 'Barcodes in mapping file\\n',\r\n 'Num Samples\\t2\\n',\r\n 'Sample ct min/max/mean: 1 / 2 / 1.50\\n',\r\n 'Sample\\tSequence Count\\tBarcode\\n',\r\n 's1\\t2\\tACACATGTCTAC\\n',\r\n 's3\\t1\\tAACTGTGCGTAC\\n',\r\n 's2\\t0\\tAGAGTCCTGAGC\\n',\r\n '\\n',\r\n 'Total number seqs written\\t3']\r\n expected_histograms = [\r\n '# bins raw sequence lengths, length of sequences that pass quality filters before processing, and lengths of sequences that pass quality filters post processing.\\n',\r\n 'Length\\tRaw\\tBefore\\tAfter\\n',\r\n '10\\t0\\t0\\t3\\n',\r\n '20\\t2\\t0\\t0\\n',\r\n '30\\t4\\t3\\t0']\r\n\r\n self.assertEqual(actual_seqs, expected_seqs)\r\n self.assertEqual(actual_log, expected_log)\r\n self.assertEqual(actual_histograms, expected_histograms)", "def _evaluate_during_fit(self, test_loader, epoch):", "def main():\n\t# \"\"\"\n\t# \tMain function of test python module\n\t# \"\"\"\n\t# random.seed(os.urandom(345634)) # initialize random generator\n\t# t = np.linspace(0.0, 24.0, 96.0) # define the time axis of a day, here we use 96 values every quarter of an hour\n\t# # standard load profile -- input\n\t# q = extra.read_slp(t,\n\t# 'Profielen-Elektriciteit-2015-versie-1.00 Folder/profielen Elektriciteit 2015 versie 1.00.csv') # read the sample standard load profile, can be any length, can be resized given a low/high resolution time axis\n\t# q = q / np.sum(q) # normalization of standard load profile\n\t# # process duration\n\t# duration_axis = np.linspace(0.0, 24.0, 96.0)\n\t# (p_d, E_p) = extra.app_time(duration_axis, 10, 2, 0.0,\n\t# 24.0) # function that define the pdf of duration of a process\n\t# # process consumption\n\t# consumption_axis = np.linspace(0.0, 3.5, 96.0)\n\t# (p_k, E_k) = extra.app_consumption(consumption_axis, 10, 2, 0.0,\n\t# 3.5) # function that define the pdf of duration of a process\n\t# # pdf of starting time\n\t# p_t_0 = lpd.infer_t_0(q, p_d, E_k) # computes the pdf of starting time of processes\n\t# p_t_0 = p_t_0 / np.sum(p_t_0) # normalization of the pdf to sum up to zero\n #\n\t# \"\"\"\n\t# 1st Approach, starting time of processes is a discrete propapibility density function\n\t# \"\"\"\n\t# # synthetic profile of D processes\n\t# D = 2000\n\t# synthetic_profile = lpd.synthetic_profile(D, t, p_d, consumption_axis, p_k, p_t_0)\n\t# synthetic_profile_1 = lpd.synthetic_profile(D, t, p_d, consumption_axis, p_k, p_t_0)\n\t# # expected value of D processes\n\t# q_e_e = lpd.infer_q_e(t, p_t_0, p_d, E_k, D)\n\t# # plot\n\t# plt.step(t, synthetic_profile, \"g-\")\n\t# plt.step(t, q_e_e, \"b--\")\n #\n\t# \"\"\"\n\t# 2nd Approach, starting time of processes is a continuous propapibility density function\n\t# \"\"\"\n\t# # synthetic profile of D processes\n\t# ts, cs = lpd.continous_synthetic_profile(D, t, p_d, consumption_axis, p_k, p_t_0)\n\t# plt.step(ts / len(t) * t[-1], cs, where='post', c='r')\n\t# plt.xlim(0, 24.0)\n\t# plt.legend([\"synthetic\", \"expected\", \"continuous\"], loc=0)\n\t# plt.show()\n #\n\t# \"\"\"\n\t# Time discretization\n\t# \"\"\"\n\t# n_intervals = 24 * 1 # discretized in minutes\n\t# discrete_timeaxis = np.linspace(0.0, 24.0, n_intervals + 1)\n\t# discrete_consumption = lpd.signal_discretization(discrete_timeaxis, t, ts, cs)\n\t# plt.step(ts / len(t) * t[-1], cs, where='post', c='r')\n\t# plt.step(discrete_timeaxis, discrete_consumption, where='post', c='k', ls='--', lw=2)\n\t# plt.legend([\"continuous\", \"discretized\"], loc=0)\n\t# plt.show()\n #\n #\n\t# \"\"\"\n\t# Repeated day synthetic profile creation\n\t# \"\"\"\n\t# # synthetic profile of D processes\n\t# D = 2000\n\t# n = 10\n\t# slp = lpd.synthetic_profile_repeated(D, t, p_d, consumption_axis, p_k, p_t_0, n)\n\t# plt.step(range(len(slp)), slp, \"g-\")\n\t# plt.show()\n\tt = np.linspace(0.0, 24.0, 96.0)\n\tload_profile = extra.read_slp(t, 'Profielen-Elektriciteit-2015-versie-1.00 Folder/profielen Elektriciteit 2015 versie 1.00.csv')\n\tslp = synthetic.create_synthetic_load(load_profile, 5.0, 5)\n\tplt.step(range(len(slp)), slp)\n\tplt.show()", "def unit_test():\n # Fixed filename\n ROOT = '/home/knmac/projects/vid_time_model/data/EDTCN_results/50Salads/'\\\n 'mid/mid_motionconstraint_nomotion_g0/nepoch_200'\n RUN = 'run_11'\n SPLIT = 'Split_1'\n FNAME = os.path.join(ROOT, RUN, SPLIT+'.mat')\n\n # Load computed results\n content = open(os.path.join(ROOT, RUN, 'trials.txt')).read().splitlines()\n for line in content:\n if SPLIT in line:\n break\n tokens = line.split(' ')\n acc_rec = tokens[2].replace('accuracy:', '').replace(',', '')\n edit_rec = tokens[3].replace('edit_score:', '').replace(',', '')\n f1_rec = tokens[4].replace('overlap_f1:', '').replace(',', '')\n\n # Load data\n data = scipy.io.loadmat(FNAME)\n P, S, Y = data['P'].squeeze(), data['S'].squeeze(), data['Y'].squeeze()\n P = [x.squeeze() for x in P]\n S = S.tolist()\n Y = [x.squeeze() for x in Y]\n\n # Compute metrics\n acc = accuracy(P, Y)\n edit = edit_score(P, Y, norm=True, bg_class=0)\n f1 = overlap_f1(P, Y, n_classes=18, bg_class=0)\n _, mAP = mid_mAP(P, Y, S, bg_class=0)\n\n # Print out\n print('Testing metrics...')\n print(' Acc: computed={:.02f} - recorded={}'.format(acc, acc_rec))\n print(' Edit: computed={:.02f} - recorded={}'.format(edit, edit_rec))\n print(' F1@10: computed={:.02f} - recorded={}'.format(f1, f1_rec))\n print(' mAP: computed={:.02f}'.format(mAP))\n return 0", "def preprocess_main():", "def main():\n logging.info(\"Executing data quality module\")\n\n calculate_quality()", "def testAreaFunction(self):\n commands = StringIO('''\nfrom math import pi\ndef area(r):\n return r ** 2 * pi\n\narea(2.0)\n''')\n out = StringIO()\n pl = Pipeline(loadInitFile=False, outfp=out)\n Batch(pl).run(commands)\n self.assertTrue(out.getvalue().startswith('12.56637'))", "def preprocess_test_data(self):\r\n print(\"* Preprocessing test data.\", flush=True)\r\n prep.create_HDF_file(self.C.test_set)\r\n\r\n self.print_time_elapsed()", "def test_inference():\n\n input_path = os.path.join(MAIN_PATH, \"data/test_cases/\")\n output_path = os.path.join(MAIN_PATH, \"data/test_results/\")\n\n nifti_files = len(os.listdir(input_path))\n\n # remove existing files in the output path\n for file in os.listdir(output_path):\n filepath = output_path + file\n if \"test_results\" in filepath:\n os.remove(filepath)\n\n # run\n bpreg_inference(input_path, output_path, plot=True)\n\n # Test creation of json files\n json_output_files = [f for f in os.listdir(output_path) if f.endswith(\".json\")]\n assert len(json_output_files) == nifti_files\n\n # Test tags\n with open(output_path + json_output_files[0]) as f:\n x = json.load(f)\n\n assert \"settings\" in x.keys()\n assert \"cleaned slice scores\" in x.keys()\n assert \"unprocessed slice scores\" in x.keys()\n assert \"body part examined\" in x.keys()\n assert \"body part examined tag\" in x.keys()\n\n # Check if readme file is saved.\n assert \"README.md\" in os.listdir(output_path)", "def setUp(self):\n domain_fname = '../domains/baxter_domain/baxter.domain'\n d_c = main.parse_file_to_dict(domain_fname)\n domain = parse_domain_config.ParseDomainConfig.parse(d_c)\n hls = hl_solver.FFSolver(d_c)\n def get_plan(p_fname, plan_str=None):\n p_c = main.parse_file_to_dict(p_fname)\n problem = parse_problem_config.ParseProblemConfig.parse(p_c, domain)\n abs_problem = hls.translate_problem(problem)\n if plan_str is not None:\n return hls.get_plan(plan_str, domain, problem)\n return hls.solve(abs_problem, domain, problem)\n self.get_plan = get_plan\n\n # Successful Problem\n # self.move_arm_prob = get_plan('../domains/baxter_domain/baxter_probs/baxter_move_arm.prob')\n # self.grab_prob = get_plan('../domains/baxter_domain/baxter_probs/baxter_grasp.prob', ['0: GRASP BAXTER CAN0 TARGET0 PDP_TARGET0 EE_TARGET0 ROBOT_END_POSE'])\n # self.move_hold_prob = get_plan('../domains/baxter_domain/baxter_probs/baxter_move_holding.prob', ['0: MOVETOHOLDING BAXTER ROBOT_INIT_POSE ROBOT_END_POSE CAN0'])\n # self.complex_grab_prob = get_plan('../domains/baxter_domain/baxter_probs/baxter_complex_grasp.prob', ['0: GRASP BAXTER CAN0 TARGET0 PDP_TARGET0 EE_TARGET0 ROBOT_END_POSE'])\n\n # Problem for testing\n # self.putdown_prob = get_plan('../domains/baxter_domain/baxter_probs/putdown_1234_0.prob', ['0: PUTDOWN BAXTER CAN0 TARGET2 ROBOT_INIT_POSE EE_TARGET2 ROBOT_END_POSE'])\n\n # Problem for test_free_attrs test\n # self.test_free_attrs_prob = get_plan('../domains/baxter_domain/baxter_probs/baxter_complex_grasp.prob', ['0: GRASP BAXTER CAN0 TARGET0 PDP_TARGET0 EE_TARGET0 ROBOT_END_POSE'])", "def test_variational():\n # iris\n #pres = \"Test pour le data set Iris (facile, classique)\"\n #test_from_func_variational(pres, 15, 10, 3, True, Iris)\n\n # breast cancer\n pres = \"Test pour le data set Breast Cancer (facile, classique)\"\n test_from_func_variational(pres, 15, 10, 3, True, Breast_cancer)\n\n # digits\n # pres = \"Test pour le data set Digits (difficile, classique)\"\n # test_from_func(pres, 10, 10, 10, True, Digits, quantum_instance)\n\n # wine\n # pres = \"Test pour le data set Wine (moyen, classique)\"\n # test_from_func(pres, 15, 10, 5, True, Wine, quantum_instance)\n\n # gaussian\n pres = \"Test pour des données gaussiennes (moyen, classique)\"\n for _ in range(1):\n print(\"\\n\")\n print(\"New iteration\")\n test_from_func_variational(pres, 25, 10, 2, True, Gaussian)\n print(\"\\n\")\n\n # small adn strings\n pres = \"Test pour des séquences ADN courtes (difficile, classique)\"\n test_from_func_variational(pres, 10, 15, 14, True, Sequence)\n\n #Quantum data\n pres = \"Test pour des données générées par ordinateur quantique (facile, quantique)\"\n print(pres)\n _, samp_train, samp_test, labels = ad_hoc_data(15, 10, 2, 0.3, True)\n sample_m, sample_p = stock_get(20, 0.3)\n\n labels_me = [-1, 1]\n samp_train_me = {-1: np.array(sample_m[:15]), 1: np.array(sample_p[:15])}\n samp_test_me = {-1: np.array(sample_m[15:]), 1: np.array(sample_p[15:])}\n print(samp_train)\n print(samp_train_me)\n print(samp_test)\n print(samp_test_me)\n\n my_impl_variational(samp_train, samp_test, labels)\n print(\"Pour autres données quantiques\")\n my_impl_variational(samp_train_me, samp_test_me, labels_me)", "def test_cbma_workflow_smoke(\n tmp_path_factory,\n testdata_cbma_full,\n estimator,\n corrector,\n diagnostics,\n):\n tmpdir = tmp_path_factory.mktemp(\"test_cbma_workflow_function_smoke\")\n\n if estimator == MKDAChi2:\n with pytest.raises(AttributeError):\n CBMAWorkflow(estimator=estimator, corrector=corrector, diagnostics=diagnostics)\n elif estimator == Fishers:\n with pytest.raises((AttributeError, ValueError)):\n CBMAWorkflow(estimator=estimator, corrector=corrector, diagnostics=diagnostics)\n elif estimator == \"ales\":\n with pytest.raises(ValueError):\n CBMAWorkflow(estimator=estimator, corrector=corrector, diagnostics=diagnostics)\n else:\n workflow = CBMAWorkflow(\n estimator=estimator,\n corrector=corrector,\n diagnostics=diagnostics,\n output_dir=tmpdir,\n )\n cres = workflow.fit(testdata_cbma_full)\n\n assert isinstance(cres, nimare.results.MetaResult)\n assert op.isfile(op.join(tmpdir, \"boilerplate.txt\"))\n assert op.isfile(op.join(tmpdir, \"references.bib\"))\n\n for imgtype in cres.maps.keys():\n filename = f\"{imgtype}.nii.gz\"\n outpath = op.join(tmpdir, filename)\n # For ALE maps are None\n if not cres.maps[imgtype] is None:\n assert op.isfile(outpath)\n\n for tabletype in cres.tables.keys():\n filename = f\"{tabletype}.tsv\"\n outpath = op.join(tmpdir, filename)\n # For ALE tables are None\n if not cres.tables[tabletype] is None:\n assert op.isfile(outpath)", "def runTest(self):\n self.setUp()\n self.test_FiducialTransform1()", "def test_stage_0():\n\tra_1 = readImage(TRAIN_RAW_IMAGE_1)\n\tre_1 = readImage(TRAIN_RESULT_IMAGE_1)\n\n\tra_2 = readImage(TRAIN_RAW_IMAGE_2)\n\tre_2 = readImage(TRAIN_RESULT_IMAGE_2)\n\n\t# Uncomment below if more examples are required.\n\t# ra_3 = readImage(TRAIN_RAW_IMAGE_3)\n\t# re_3 = readImage(TRAIN_RESULT_IMAGE_3)\n\n\t# Uncomment below if the additional features are needed.\n\t# ra_1 += (\n\t# \tlaplace_operator(TRAIN_RAW_IMAGE_1),\\\n\t# \t# k_means(TRAIN_RAW_IMAGE_1)[0],\\\n\t# \t)\n\n\t# Uncomment below if the additional features are needed.\n\t# ra_2 += (\n\t# \tlaplace_operator(TRAIN_RAW_IMAGE_2),\\\n\t# \t# k_means(TRAIN_RAW_IMAGE_2)[0],\\\n\t# \t)\n\n\t# The prediction model is obtained and trained.\n\tengine = get_model((ra_1, ra_2,), (re_1, re_2,), model_type=SVM, percentage=0.1)\n\n\ttest_percentage = float(1) # how many tests\n\n\tra_1 = readImage(TEST_RAW_IMAGE_1)\n\n\t# Uncomment below if the additional features are needed.\n\t# ra_1 += (\n\t# \tlaplace_operator(TEST_RAW_IMAGE_1),\\\n\t# \t# k_means(TEST_RAW_IMAGE_1)[0],\\\n\t# \t)\n\n\tre_1 = readImage(TEST_RESULT_IMAGE_1)\n\n\t# ra_2 = readImage(TEST_RAW_IMAGE_2)\n\t# re_2 = readImage(TEST_RESULT_IMAGE_2)\n\n\tinput_vec = []\n\t# The features are extracted.\n\tinput_vec += buildFeatureArray_2(ra_1[0], ra_1[1], ra_1[2],\\\n\t\tRADIUS_ARRAY,\\\n\t\tadditional_feats=([] if len(ra_1) == 3 else ra_1[3:]))\n\n\tex_no = int(test_percentage * len(input_vec)) # actual number of the test sample\n\n\toutput_vec = []\n\toutput_vec += matrixToArray(re_1[0], lambda el: 1 if el == 255 else 0)\n\n\tprint('Will start predicting...')\n\n\tpredicted_vec = engine.predict(input_vec[:ex_no])\n\n\tcounter = float(0)\n\tfor y, p in zip(output_vec[:ex_no], predicted_vec[:ex_no]):\n\t\tif y == p: counter += 1\n\n\tprint('Accuracy: ' + str(counter/ex_no))\n\n\tpredicted_mat = arrayToMatrix( predicted_vec, len(re_1[0]), len(re_1[0][0]),\\\n\t\tlambda el: 255 if el == 1 else 0)\n\n\t# The predicted segmentation is saved.\n\tsave_rgb_img(\\\n\t np.array(predicted_mat).transpose(),\\\n\t np.array(predicted_mat).transpose(),\\\n\t np.array(predicted_mat).transpose(),\\\n\t 'pred.bmp',\\\n\t)", "def testBasics(self):\n for imageClass in (afwImage.ImageF, afwImage.ImageD):\n inImage = makeRampImage(bbox=self.bbox, start=-5, stop=2500, imageClass=imageClass)\n\n measImage = inImage.Factory(inImage, True)\n linSq = LinearizeSquared()\n linRes = linSq(image=measImage, detector=self.detector)\n desNumLinearized = np.sum(self.sqCoeffs.flatten() > 0)\n self.assertEqual(linRes.numLinearized, desNumLinearized)\n self.assertEqual(linRes.numAmps, len(self.detector.getAmpInfoCatalog()))\n\n refImage = inImage.Factory(inImage, True)\n refLinearizeSquared(image=refImage, detector=self.detector)\n\n self.assertImagesAlmostEqual(refImage, measImage)\n\n # make sure logging is accepted\n log = Log.getLogger(\"ip.isr.LinearizeSquared\")\n linRes = linSq(image=measImage, detector=self.detector, log=log)", "def main():\n run_test_summary1a()\n run_test_summary1c()\n run_test_summary1c()", "def test_default_quantify_misfit(tmpdir):\n preprocess = Default(syn_data_format=\"ascii\", obs_data_format=\"ascii\",\n unit_output=\"disp\", misfit=\"waveform\",\n adjoint=\"waveform\", path_preprocess=tmpdir,\n path_solver=TEST_SOLVER, source_prefix=\"SOURCE\",\n ntask=2,\n )\n preprocess.setup()\n\n preprocess.quantify_misfit(\n source_name=\"001\",\n save_residuals=os.path.join(tmpdir, \"residuals_ascii\"),\n save_adjsrcs=tmpdir\n )\n\n # !!! throws a segy error because data are not in the right format\n # preprocess.syn_data_format = \"SU\"\n # preprocess.obs_data_format = \"SU\"\n # preprocess.quantify_misfit(\n # source_name=\"001\",\n # save_residuals=os.path.join(tmpdir, \"residuals_su\"),\n # save_adjsrcs=tmpdir\n # )\n\n assert(len(glob(os.path.join(tmpdir, \"*\"))) == 3)\n residuals = open(os.path.join(tmpdir, \"residuals_ascii\")).readlines()\n assert(len(residuals) == 2)\n assert(float(residuals[0]) == pytest.approx(0.0269, 3))", "def test_qa_train_effectiveness():\n # use a non-fine-tuned model so we DEFINITELY get an improvement\n happy = HappyQuestionAnswering(\"BERT\", \"bert-base-uncased\")\n args = QATrainArgs(num_train_epochs=3)\n before_loss = happy.eval(\"../data/qa/train-eval.csv\").loss\n happy.train(\"../data/qa/train-eval.csv\", args=args)\n after_loss = happy.eval(\"../data/qa/train-eval.csv\").loss\n\n assert after_loss < before_loss", "def exe_tests(self):\n self.rank = mpicom.rank()\n self.size = mpicom.size()\n if mpicom.parallel():\n self.test(\"libname\",os.path.split(mpicom.__file__)[1],\"mpicom.so\")\n else:\n self.test(\"libname\",os.path.split(mpicom.__file__)[1],\"mpistub.pyc\")\n self.test_broadcast()\n self.test_reduce()\n self.test_p2p()\n self.test_gather()\n self.test_scatter()\n #self.test_alltoall()", "def _prepare_evaluate(self):\n labels = list()\n labels += ['num_procs', 'num_periods', 'is_debug', 'seed_emax', 'seed_sim']\n labels += ['num_draws_emax', 'num_agents_sim', 'num_types', 'edu_spec', 'version']\n labels += ['num_draws_prob', 'seed_prob']\n num_procs, num_periods, is_debug, seed_emax, seed_sim, num_draws_emax, num_agents_sim, \\\n num_types, edu_spec, version, num_draws_prob, seed_prob = \\\n dist_class_attributes(self.respy_base, *labels)\n\n periods_draws_emax = create_draws(num_periods, num_draws_emax, seed_emax, is_debug)\n periods_draws_sims = create_draws(num_periods, num_agents_sim, seed_sim, is_debug)\n\n disturbances = (periods_draws_emax, periods_draws_sims)\n\n # We want to maintain a pure PYTHON version for testing purposes.\n args = list()\n args += [num_periods, num_types, edu_spec['start'], edu_spec['max'], edu_spec['max'] + 1]\n state_space_info = respy_f2py.wrapper_create_state_space(*args)\n if self.mpi_setup == MISSING_INT:\n slavecomm = self.mpi_setup\n else:\n slavecomm = self.mpi_setup.py2f()\n self.set_up_baseline(periods_draws_emax, None)\n\n initial_conditions = get_initial_conditions(self.respy_base)\n\n args = (smm_sample_f2py, state_space_info, initial_conditions, disturbances, slavecomm)\n self.simulate_sample = partial(*args)", "def test_evaluate():\n X_train, X_test, y_train, y_test = src.load()\n clf, score = src.train(X_train, y_train)\n test_score = src.evaluate(clf, X_test, y_test)\n assert isinstance(test_score, float)", "def test_process_data(self):\n pass", "def test_post_k2_clean():\n # Read in data file and process it:\n\n C = 'tiny'\n df = pd.read_csv('catalog_matching/tests/exfiles/select_min_dist_union_k2.csv')\n\n # This is created in the actual procedure as an intermediate step\n # This file is matched with the K2 catalog before further processing\n\n racols = list(filter(lambda k: ra in k, df.columns.values))\n deccols = list(filter(lambda k: dec in k, df.columns.values))\n df[\"{}_mean\".format(ra)] = df[racols].mean(axis=1)\n df[\"{}_mean\".format(dec)] = df[deccols].mean(axis=1)\n df[[\"{}_mean\".format(ra),\"{}_mean\".format(dec)]].to_csv('catalog_matching/tests/exfiles/{}_radec.csv'.format(C),\n index=False, header=False)\n\n # Read in data from K2 search:\n folder = 'catalog_matching/tests/exfiles/'\n k2 = pd.read_csv('{}{}_k2_search.txt'.format(folder,C), skiprows=[1], header=[0])\n\n # Call function\n k2mem = post_k2_clean(df, k2, \"mean\")\n k2mem = k2mem.dropna(how=\"all\", axis=1).dropna(how=\"all\", axis=0).T\n\n # ---------------------------------------------------------------------------\n # Validate some results:\n\n\n assert df.iloc[0].Pmem_a == k2mem['0'].Pmem_a\n for i in [0,1,2]:\n assert df.iloc[i].RAJ2000_a == pytest.approx(k2mem[str(i)].RAJ2000_mean, rel=1e-7)\n assert df.iloc[i].DEJ2000_a == pytest.approx(k2mem[str(i)].DEJ2000_mean, rel=1e-7)\n for i in [3,4,5,6]:\n assert k2mem[str(i)].RAJ2000_d == pytest.approx(k2mem[str(i)].RAJ2000_mean, rel=1e-7)\n assert k2mem[str(i)].DEJ2000_d == pytest.approx(k2mem[str(i)].DEJ2000_mean, rel=1e-7)\n assert df.shape[0]==k2mem.shape[1]\n\n assert k2mem.shape[1] == 7", "def test():\n np.random.seed(42)\n shape = (7,3)\n feat = np.random.randint(-100, 100, shape)\n expected_z_score = stats.zscore(feat, axis=0)\n argmax = feat.argmax(axis=1)\n expected_z_score = np.hstack((argmax.reshape(argmax.shape[0],1), expected_z_score))\n max_feat = feat.max(axis=1)\n mean_values = np.take(feat.mean(axis=0), argmax)\n abs_diff = np.abs(max_feat - mean_values)\n expected_z_score = np.insert(expected_z_score, [1], abs_diff.reshape((abs_diff.shape[0],1)),axis=1)\n\n feat_list = feat.tolist()\n feat_list_str = list(map(lambda x: ','.join(map(str, x)), feat_list))\n feat_list_str = list(map(lambda x: ','.join(['2', x]),feat_list_str))\n test_df = pd.DataFrame({'id_job':np.arange(shape[0]), 'features':feat_list_str})\n\n class CustomConfig(Config):\n INPUT_FILE_PATH = 'simple_test.tsv'\n OUTPUT_FILE_PATH = 'simple_proc.tsv'\n CHUNK_SIZE = 30\n \n my_config = CustomConfig()\n\n try:\n os.remove(my_config.INPUT_FILE_PATH)\n except OSError:\n pass \n try:\n os.remove(my_config.OUTPUT_FILE_PATH)\n except OSError:\n pass \n\n test_df.to_csv(my_config.INPUT_FILE_PATH, index=False, sep='\\t') \n process = Process(my_config)\n process.process_data()\n\n proc_test = pd.read_csv(my_config.OUTPUT_FILE_PATH, sep='\\t')\n actual_z_score = proc_test.loc[:, proc_test.columns != 'id_job']\n print('-'*10 + 'expected' + '-'*10)\n print(expected_z_score)\n print('-'*10 + 'actual' + '-'*10)\n print(actual_z_score.values)\n\n assert np.allclose(expected_z_score, actual_z_score.values) == True", "def test_sales_forecasting_multiprocessing(self):\n with TemporaryDirectory() as temp_dir:\n self.run_command(\n (\n \"python3 main.py --use-synthetic-data --epochs 1\"\n f\" --mov-mean-window 0 --log-dir {temp_dir}\"\n \" --multiprocessing\"\n ),\n working_path,\n [\n \"Begin training loop\", \"Training:\", r\"epoch:\\s+1\",\n \"Validation:\", \"Best RMSPE|no valid RMSPE results\"\n ]\n )", "def test_fit_prep_proba():\n args = get_layer('fit', 'manual', 'temporal', True, True, window=2, step_size=3)\n run_layer(*args)", "def test_fleur_relax_continue_converged(self, run_with_cache, mock_code_factory):\n assert False", "def fit_test(self):" ]
[ "0.629093", "0.62761736", "0.620866", "0.6105234", "0.6059339", "0.6007366", "0.5925567", "0.59123164", "0.5886541", "0.58703196", "0.58566153", "0.5794662", "0.57678074", "0.5767124", "0.5744852", "0.5740845", "0.57300746", "0.5707568", "0.5697885", "0.5695476", "0.56930786", "0.56889385", "0.5672495", "0.56605244", "0.56518304", "0.5646701", "0.5646008", "0.5644359", "0.5639603", "0.56340265" ]
0.6344366
0
dataList item renderer for Posts on the Bulletin Board.
def cms_post_list_layout(list_id, item_id, resource, rfields, record): record_id = record["cms_post.id"] #item_class = "thumbnail" T = current.T db = current.db s3db = current.s3db settings = current.deployment_settings permit = current.auth.s3_has_permission raw = record._row date = record["cms_post.date"] title = record["cms_post.title"] body = record["cms_post.body"] #series_id = raw["cms_post.series_id"] # Allow records to be truncated # (not yet working for HTML) body = DIV(body, _class = "s3-truncate", ) #if series_id: # series = record["cms_post.series_id"] # translate = settings.get_L10n_translate_cms_series() # if translate: # series_title = T(series) # else: # series_title = series #else: # series_title = series = "" #status = record["cms_post.status_id"] author_id = raw["cms_post.created_by"] person = record["cms_post.created_by"] # @ToDo: Bulk lookup ltable = s3db.pr_person_user ptable = db.pr_person query = (ltable.user_id == author_id) & \ (ltable.pe_id == ptable.pe_id) row = db(query).select(ptable.id, limitby = (0, 1) ).first() if row: person_id = row.id else: person_id = None if person: if person_id: # @ToDo: deployment_setting for controller to use? person_url = URL(c="hrm", f="person", args = [person_id], ) else: person_url = "#" person = A(person, _href = person_url, ) table = db.cms_post # Toolbar if permit("update", table, record_id=record_id): edit_btn = A(ICON("edit"), SPAN("edit", _class = "show-for-sr", ), _href = URL(c="cms", f="post", args = [record_id, "update.popup"], vars = {"refresh": list_id, "record": record_id, } ), _class = "s3_modal", #_title = T("Edit %(type)s") % {"type": series_title}, _title = T("Edit"), ) else: edit_btn = "" if permit("delete", table, record_id=record_id): delete_btn = A(ICON("delete"), SPAN("delete", _class = "show-for-sr", ), _class = "dl-item-delete", _title = T("Delete"), ) else: delete_btn = "" # Bookmarks auth = current.auth user = auth.user if user: #and settings.get_cms_bookmarks(): # @ToDo: Bulk lookup (via list_fields?) ltable = s3db.cms_post_user query = (ltable.post_id == record_id) & \ (ltable.user_id == user.id) exists = db(query).select(ltable.id, limitby = (0, 1) ).first() if exists: bookmark = A(ICON("bookmark"), SPAN("remove bookmark", _class = "show-for-sr", ), _class = "bookmark", _title = T("Remove Bookmark"), ) else: bookmark = A(ICON("bookmark-empty"), SPAN("bookmark", _class = "show-for-sr", ), _class = "bookmark", _title = T("Add Bookmark"), ) bookmark["_data-c"] = "cms" bookmark["_data-f"] = "post" bookmark["_data-i"] = record_id else: bookmark = "" # Dropdown of available documents documents = raw["doc_document.file"] if documents: if not isinstance(documents, list): documents = (documents,) doc_list = UL(_class = "dropdown-menu", _role = "menu", ) retrieve = db.doc_document.file.retrieve for doc in documents: try: doc_name = retrieve(doc)[0] except (IOError, TypeError): doc_name = current.messages["NONE"] doc_url = URL(c="default", f="download", args = [doc]) doc_item = LI(A(ICON("file"), " ", doc_name, _href = doc_url, ), _role = "menuitem", ) doc_list.append(doc_item) docs = DIV(A(ICON("paper-clip"), SPAN(_class = "caret"), _class = "btn dropdown-toggle", _href = "#", **{"_data-toggle": "dropdown"} ), doc_list, _class = "btn-group attachments dropdown pull-right", ) else: docs = "" #divider = LI("|") #divider["_aria-hidden"] = "true" toolbar = UL(#LI(share_btn, # _class = "item", # ), #LI(A(ICON("flag"), # @ToDo: Use flag-alt if not flagged & flag if already flagged (like for bookmarks) # SPAN("flag this", # _class = "show-for-sr", # ), # _href = "#", # _title = T("Flag"), # ), # _class = "item", # ), LI(bookmark, _class = "item", ), #LI(A(I(_class = "fa fa-users", # ), # SPAN("make public", # _class = "show-for-sr", # ), # _href = "#", # _title = T("Make Public"), # ), # _class = "item", # ), LI(edit_btn, _class = "item", ), LI(delete_btn, _class = "item", ), _class = "controls", ) # Tags #if settings.get_cms_show_tags(): tag_list = UL(_class = "left inline-list s3-tags", ) tag_list["_data-post_id"] = record_id tags = raw["cms_tag.name"] if tags: if not isinstance(tags, list): tags = [tags] for tag in tags: tag_list.append(LI(A(tag, _href = "#", ), )) # Comments comment_list = UL(_class = "card-post-comments", ) cappend = comment_list.append #if settings.get_cms_comments(): # Add existing comments (oldest 1st) # - should sort by default by ID which is equivalent to oldest first, # however they seem to come in in a random order (even if orderby set on the component) so need to be sorted manually here comments = raw["cms_comment.json_dump"] ncomments = 0 if comments: if not isinstance(comments, list): comments = [comments] comments = [json.loads(comment) for comment in comments] comments.sort(key=lambda c: c["created_on"]) user_ids = [comment["created_by"] for comment in comments] comment_authors = s3db.auth_UserRepresent(show_link = False).bulk(user_ids) comment_authors_get = comment_authors.get for comment in comments: author = comment_authors_get(comment["created_by"]) cdate = dateutil.parser.parse(comment["created_on"]) ctime = cdate.time().strftime("%H:%M") cdate = cdate.date().strftime("%b %d, %Y") comment = LI(TAG["ASIDE"](P(T("Updated %(date)s @ %(time)s by %(author)s") % \ {"date": cdate, "time": ctime, "author": author, }, _class = "meta", ), DIV(comment["body"], _class = "desc", ), # @ToDo: Show this if more than x chars? #TAG["FOOTER"](P(A(T("More Info"), # _class="more", # ) # ), # _class = "footer", # ), _class = "card-post-comment", )) cappend(comment) ncomments += 1 if ncomments == 1: num_comments = "1 Comment" else: num_comments = T("%(num)s Comments") % {"num": ncomments} if user: add_comment = A(T("Add Comment"), _class = "add-comment", ) add_comment["_data-l"] = list_id add_comment["_data-i"] = record_id add_comment = P(add_comment) comment_input = LI(TAG["ASIDE"](TEXTAREA(_class="desc", _placeholder = T("comment here"), ), TAG["FOOTER"](P(A("Submit Comment", _class = "submit", ), ), ), _class = "card-post-comment", ), _class = "comment-form hide", ) cappend(comment_input) else: add_comment = "" item = TAG["ASIDE"](TAG["HEADER"](UL(# post priority icon LI(_class = "item icon", ), # post type title #LI(series_title, # _class = "item primary", # ), # post status #LI(status, # _class = "item secondary border status", # ), # post visibility # @ToDo: Read the visibility #LI(T("Public"), # _class = "item secondary border visibility", # ), _class = "status-bar-left" ), toolbar, _class = "status-bar", ), DIV(DIV(SPAN("Updated ", # @ToDo: i18n TAG["TIME"](date), " by ", person, _class = "meta-update", ), SPAN(num_comments, _class = "meta-comments", ), _class = "meta", ), H4(title, _class = "title", ), DIV(body, _class = "desc", ), _class = "body", ), docs, TAG["FOOTER"](DIV(tag_list, _class = "tags clearfix", # @ToDo: remove clearfix and style via CSS ), comment_list, add_comment, _class = "footer", ), _class = "card-post", _id = item_id, ) return item
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def serializePostsData(influencer, posts, length_limit=30, highlight=False):\n from debra import serializers\n\n posts_data = []\n urls = set()\n posts = list(posts)\n dated = []\n undated = []\n for post in posts:\n if post.create_date:\n dated.append(post)\n else:\n undated.append(post)\n\n posts = sorted(dated, key=lambda x: x.create_date)\n posts.reverse()\n posts.extend(undated)\n\n if length_limit:\n length_limit = length_limit\n\n for post in posts:\n if post.url in urls:\n continue\n urls.add(post.url)\n post_data = {}\n post_data[\"post_image\"] = post.post_image\n stripped_content, images = tagStripper(\n post.content, length_limit=length_limit)\n post_data[\"content\"] = stripped_content\n post_data[\"content_images\"] = images\n post_data[\"url\"] = post.url\n post_data[\"blog_name\"] = serializers.unescape(influencer.blogname if influencer else\\\n post.influencer.blogname)\n post_data[\"title\"] = post.title\n post_data[\"platform\"] = get_post_platform(post)\n if highlight:\n post_data[\"highlight\"] = True\n if post.create_date:\n post_data[\"create_date\"] = post.create_date.strftime(\"%b. %e, %Y\")\n if not influencer:\n post_data['user'] = post.influencer.feed_stamp\n if post.products_json:\n post_data[\"products\"] = post.get_product_json()\n else:\n post_data[\"products\"] = []\n posts_data.append(post_data)\n return posts_data", "def cms_post_list_layout(list_id, item_id, resource, rfields, record):\n\n record_id = record[\"cms_post.id\"]\n item_class = \"thumbnail\"\n\n db = current.db\n s3db = current.s3db\n settings = current.deployment_settings\n NONE = current.messages[\"NONE\"]\n\n org_field = settings.get_cms_organisation()\n # Convert to the right format for this context\n if org_field == \"created_by$organisation_id\":\n org_field = \"auth_user.organisation_id\"\n elif org_field == \"post_organisation.organisation_id\":\n org_field = \"cms_post_organisation.organisation_id\"\n\n org_group_field = settings.get_cms_organisation_group()\n # Convert to the right format for this context\n if org_group_field == \"created_by$org_group_id\":\n org_group_field = \"auth_user.org_group_id\"\n elif org_group_field == \"post_organisation_group.group_id\":\n org_group_field = \"cms_post_organisation_group.group_id\"\n\n raw = record._row\n body = record[\"cms_post.body\"]\n series_id = raw[\"cms_post.series_id\"]\n\n title = record[\"cms_post.title\"]\n if title and title != NONE:\n subtitle = [DIV(title,\n _class=\"card-subtitle\"\n )\n ]\n else:\n subtitle = []\n\n #for event_resource in [\"event\", \"incident\"]:\n # label = record[\"event_post.%s_id\" % event_resource]\n # if label and label != NONE:\n # link=URL(c=\"event\", f=event_resource,\n # args=[raw[\"event_post.%s_id\" % event_resource],\n # \"profile\"]\n # )\n # subtitle.append(DIV(A(ICON(event_resource),\n # label,\n # _href=link,\n # _target=\"_blank\",\n # ),\n # _class=\"card-subtitle\"\n # ))\n if subtitle:\n subtitle.append(body)\n body = TAG[\"\"](*subtitle)\n\n # Allow records to be truncated\n # (not yet working for HTML)\n body = DIV(body,\n _class=\"s3-truncate\",\n )\n\n date = record[\"cms_post.date\"] or \"\"\n date = SPAN(date,\n _class=\"date-title\",\n )\n\n location_id = raw[\"cms_post.location_id\"]\n if location_id:\n location = record[\"cms_post.location_id\"]\n if settings.get_cms_location_click_filters():\n # Which levels of Hierarchy are we using?\n levels = current.gis.get_relevant_hierarchy_levels()\n\n data = {}\n for level in levels:\n data[level[1:]] = raw[\"gis_location.%s\" % level]\n onclick = '''S3.filter_location(%s)''' % json.dumps(data, separators=SEPARATORS)\n location = SPAN(A(location,\n _href=\"#\",\n _onclick=onclick,\n ),\n _class=\"location-title\",\n )\n else:\n location_url = URL(c=\"gis\", f=\"location\", args=[location_id, \"profile\"])\n location = SPAN(A(location,\n _href=location_url,\n ),\n _class=\"location-title\",\n )\n else:\n location = \"\"\n\n person = \"\"\n contact_field = settings.get_cms_person()\n if contact_field == \"created_by\":\n author_id = raw[\"cms_post.created_by\"]\n person = record[\"cms_post.created_by\"]\n\n # @ToDo: Bulk lookup\n ltable = s3db.pr_person_user\n ptable = db.pr_person\n query = (ltable.user_id == author_id) & \\\n (ltable.pe_id == ptable.pe_id)\n row = db(query).select(ptable.id,\n limitby=(0, 1)\n ).first()\n if row:\n person_id = row.id\n else:\n person_id = None\n elif contact_field == \"person_id\":\n person_id = raw[\"cms_post.person_id\"]\n if person_id:\n person = record[\"cms_post.person_id\"]\n else:\n person_id = None\n\n if person:\n if person_id:\n # @ToDo: deployment_setting for controller to use?\n person_url = URL(c=\"pr\", f=\"person\", args=[person_id])\n else:\n person_url = \"#\"\n person = A(person,\n _href=person_url,\n )\n\n avatar = \"\"\n\n organisation = \"\"\n if org_field:\n organisation_id = raw[org_field]\n if organisation_id:\n organisation = record[org_field]\n org_url = URL(c=\"org\", f=\"organisation\", args=[organisation_id, \"profile\"])\n organisation = A(organisation,\n _href=org_url,\n _class=\"card-organisation\",\n )\n\n # Avatar\n # Try Organisation Logo\n otable = db.org_organisation\n row = db(otable.id == organisation_id).select(otable.logo,\n limitby=(0, 1)\n ).first()\n if row and row.logo:\n logo = URL(c=\"default\", f=\"download\", args=[row.logo])\n avatar = IMG(_src=logo,\n _height=50,\n _width=50,\n _style=\"padding-right:5px\",\n _class=\"media-object\")\n else:\n avatar = organisation\n avatar = A(avatar,\n _href=org_url,\n _class=\"pull-left\",\n )\n\n org_group = \"\"\n if org_group_field:\n org_group_id = raw[org_group_field]\n if org_group_id:\n org_group = record[org_group_field]\n org_group_url = URL(c=\"org\", f=\"group\", args=[org_group_id, \"profile\"])\n org_group = A(org_group,\n _href=org_group_url,\n _class=\"card-org-group\",\n )\n\n if not avatar and person_id:\n # Personal Avatar\n avatar = s3_avatar_represent(person_id,\n tablename=\"pr_person\",\n _class=\"media-object\")\n\n avatar = A(avatar,\n _href=person_url,\n _class=\"pull-left\",\n )\n\n if person and organisation:\n card_person = DIV(person,\n \" - \",\n organisation,\n _class=\"card-person\",\n )\n elif person and org_group:\n card_person = DIV(person,\n \" - \",\n org_group,\n _class=\"card-person\",\n )\n elif person:\n card_person = DIV(person,\n _class=\"card-person\",\n )\n #elif organisation:\n # card_person = DIV(organisation,\n # _class=\"card-person\",\n # )\n elif org_group:\n card_person = DIV(org_group,\n _class=\"card-person\",\n )\n else:\n card_person = DIV(_class=\"card-person\",\n )\n\n permit = current.auth.s3_has_permission\n table = db.cms_post\n updateable = permit(\"update\", table, record_id=record_id)\n\n if settings.get_cms_show_tags():\n tags = raw[\"cms_tag.name\"]\n if tags or updateable:\n tag_list = UL(_class=\"s3-tags\",\n )\n tag_list[\"_data-post_id\"] = record_id\n else:\n tag_list = \"\"\n if tags:\n if not isinstance(tags, list):\n tags = [tags]#.split(\", \")\n for tag in tags:\n tag_item = LI(tag)\n tag_list.append(tag_item)\n tags = tag_list\n else:\n tags = \"\"\n\n T = current.T\n if series_id:\n series = record[\"cms_post.series_id\"]\n translate = settings.get_L10n_translate_cms_series()\n if translate:\n series_title = T(series)\n else:\n series_title = series\n else:\n series_title = series = \"\"\n\n request = current.request\n\n # Tool box\n if updateable:\n if request.function == \"newsfeed\":\n fn = \"newsfeed\"\n else:\n fn = \"post\"\n edit_btn = A(ICON(\"edit\"),\n _href=URL(c=\"cms\", f=fn,\n args=[record_id, \"update.popup\"],\n vars={\"refresh\": list_id,\n \"record\": record_id}\n ),\n _class=\"s3_modal\",\n _title=T(\"Edit %(type)s\") % dict(type=series_title),\n )\n else:\n edit_btn = \"\"\n if permit(\"delete\", table, record_id=record_id):\n delete_btn = A(ICON(\"delete\"),\n _class=\"dl-item-delete\",\n )\n else:\n delete_btn = \"\"\n user = current.auth.user\n if user and settings.get_cms_bookmarks():\n ltable = s3db.cms_post_user\n query = (ltable.post_id == record_id) & \\\n (ltable.user_id == user.id)\n exists = db(query).select(ltable.id,\n limitby=(0, 1)\n ).first()\n if exists:\n bookmark_btn = A(ICON(\"bookmark\"),\n _onclick=\"$.getS3('%s',function(){$('#%s').datalist('ajaxReloadItem',%s)})\" %\n (URL(c=\"cms\", f=\"post\",\n args=[record_id, \"remove_bookmark\"]),\n list_id,\n record_id),\n _title=T(\"Remove Bookmark\"),\n )\n else:\n bookmark_btn = A(ICON(\"bookmark-empty\"),\n _onclick=\"$.getS3('%s',function(){$('#%s').datalist('ajaxReloadItem',%s)})\" %\n (URL(c=\"cms\", f=\"post\",\n args=[record_id, \"add_bookmark\"]),\n list_id,\n record_id),\n _title=T(\"Add Bookmark\"),\n )\n else:\n bookmark_btn = \"\"\n toolbox = DIV(bookmark_btn,\n edit_btn,\n delete_btn,\n _class=\"edit-bar fright\",\n )\n\n # Dropdown of available documents\n documents = raw[\"doc_document.file\"]\n if documents:\n if not isinstance(documents, list):\n documents = [documents]\n doc_list_id = \"attachments-%s\" % item_id\n doc_list = UL(_class=\"f-dropdown dropdown-menu\",\n _role=\"menu\",\n _id=doc_list_id,\n # Foundation:\n data={\"dropdown-content\": \"\"},\n )\n retrieve = db.doc_document.file.retrieve\n for doc in documents:\n try:\n doc_name = retrieve(doc)[0]\n except (IOError, TypeError):\n doc_name = NONE\n doc_url = URL(c=\"default\", f=\"download\",\n args=[doc])\n doc_item = LI(A(ICON(\"file\"),\n \" \",\n doc_name,\n _href=doc_url,\n ),\n _role=\"menuitem\",\n )\n doc_list.append(doc_item)\n docs = DIV(A(ICON(\"attachment\"),\n SPAN(_class=\"caret\"),\n _class=\"btn dropdown-toggle dropdown\",\n _href=\"#\",\n data={# Both Bootstrap & Foundation:\n \"dropdown\": doc_list_id,\n # Foundation:\n \"options\": \"is_hover:true; hover_timeout:5000\",\n # Bootstrap:\n \"toggle\": \"dropdown\",\n },\n ),\n doc_list,\n _class=\"btn-group attachments dropdown pull-right\",\n )\n else:\n docs = \"\"\n\n links = raw[\"doc_document.url\"]\n if links:\n if not isinstance(links, list):\n links = [links]\n link_list = DIV(_class=\"media card-links\")\n for link in links:\n link_item = A(ICON(\"link\"),\n \" \",\n link,\n _href=link,\n _target=\"_blank\",\n _class=\"card-link\",\n )\n link_list.append(link_item)\n else:\n link_list = \"\"\n\n if \"profile\" in request.args:\n # Single resource list\n # - don't show series_title\n if settings.get_cms_show_titles():\n title = raw[\"cms_post.title\"] or \"\"\n else:\n title = \"\"\n card_label = SPAN(\" %s\" % title,\n _class=\"card-title\")\n else:\n # Mixed resource lists (Home, News Feed)\n icon = series.lower().replace(\" \", \"_\")\n series_title = SPAN(\" %s\" % series_title,\n _class=\"card-title\")\n raw_title = raw[\"cms_post.title\"]\n if settings.get_cms_show_titles() and raw_title:\n title = SPAN(s3_truncate(raw_title), _class=\"card-title2\")\n card_label = TAG[\"\"](ICON(icon),\n series_title,\n title,\n )\n else:\n card_label = TAG[\"\"](ICON(icon),\n series_title,\n )\n # Type cards\n #if series == \"Alert\":\n # # Apply additional highlighting for Alerts\n # item_class = \"%s disaster\" % item_class\n\n # Render the item\n if series == \"Event\" and \"newsfeed\" not in request.args: # and request.function != \"newsfeed\"\n # Events on Homepage have a different header\n date.add_class(\"event\")\n header = DIV(date,\n location,\n toolbox,\n _class=\"card-header\",\n )\n else:\n header = DIV(card_label,\n location,\n date,\n toolbox,\n _class=\"card-header\",\n )\n\n item = DIV(header,\n DIV(avatar,\n DIV(DIV(body,\n card_person,\n _class=\"media\",\n ),\n _class=\"media-body\",\n ),\n _class=\"media\",\n ),\n tags,\n docs,\n link_list,\n _class=item_class,\n _id=item_id,\n )\n\n return item", "def __repr__(self):\n\t\treturn \"<Post #{}: {}>\".format(self.id, self.content)", "def process_postlist(app, doctree, docname):\n blog = Blog(app)\n if not blog:\n register_posts(app)\n for node in doctree.findall(PostList):\n colls = []\n for cat in [\"tags\", \"author\", \"category\", \"location\", \"language\"]:\n for coll in node[cat]:\n if coll in blog.catalogs[cat].collections:\n colls.append(blog.catalogs[cat].collections[coll])\n if colls:\n posts = set(blog.posts)\n for coll in colls:\n posts = posts & set(coll)\n posts = list(posts)\n posts.sort(reverse=True)\n posts = posts[: node.attributes[\"length\"]]\n else:\n posts = list(blog.recent(node.attributes[\"length\"], docname, **node.attributes))\n if node.attributes[\"sort\"]:\n posts.sort() # in reverse chronological order, so no reverse=True\n fmts = list(Formatter().parse(node.attributes[\"format\"]))\n not_in = {\"date\", \"title\", \"author\", \"location\", \"language\", \"category\", \"tags\", None}\n for text, key, __, __ in fmts:\n if key not in not_in:\n raise KeyError(f\"{key} is not recognized in postlist format\")\n excerpts = node.attributes[\"excerpts\"]\n expand = node.attributes[\"expand\"]\n date_format = node.attributes[\"date\"] or _(blog.post_date_format_short)\n bl = nodes.bullet_list()\n bl.attributes[\"classes\"].append(\"postlist-style-\" + node[\"list-style\"])\n bl.attributes[\"classes\"].append(\"postlist\")\n for post in posts:\n bli = nodes.list_item()\n bli.attributes[\"classes\"].append(\"ablog-post\")\n bl.append(bli)\n par = nodes.paragraph()\n bli.append(par)\n for text, key, __, __ in fmts:\n if text:\n par.append(nodes.Text(text))\n if key is None:\n continue\n if key == \"date\":\n par.append(nodes.Text(post.date.strftime(date_format)))\n else:\n if key == \"title\":\n items = [post]\n else:\n items = getattr(post, key)\n\n for i, item in enumerate(items, start=1):\n if key == \"title\":\n ref = nodes.reference()\n if item.options.get(\"external_link\"):\n ref[\"refuri\"] = post.options.get(\"external_link\")\n else:\n ref[\"refuri\"] = app.builder.get_relative_uri(docname, item.docname)\n ref[\"internal\"] = True\n ref[\"ids\"] = []\n ref[\"backrefs\"] = []\n ref[\"dupnames\"] = []\n ref[\"classes\"] = []\n ref[\"names\"] = []\n ref.append(nodes.Text(str(item)))\n par.attributes[\"classes\"].append(\"ablog-post-title\")\n else:\n ref = _missing_reference(app, item.xref, docname)\n par.append(ref)\n if i < len(items):\n par.append(nodes.Text(\", \"))\n if excerpts and post.excerpt:\n for enode in post.excerpt:\n enode = enode.deepcopy()\n enode.attributes[\"classes\"].append(\"ablog-post-excerpt\")\n revise_pending_xrefs(enode, docname)\n app.env.resolve_references(enode, docname, app.builder)\n enode.parent = bli.parent\n bli.append(enode)\n if expand:\n ref = app.builder.get_relative_uri(docname, post.docname)\n enode = nodes.paragraph()\n enode.attributes[\"classes\"].append(\"ablog-post-expand\")\n refnode = nodes.reference(\"\", \"\", internal=True, refuri=ref)\n innernode = nodes.emphasis(text=expand)\n refnode.append(innernode)\n enode.append(refnode)\n bli.append(enode)\n node.replace_self(bl)", "def format_posts(posts):\n formatted_posts = []\n\n for post in posts:\n post_data = post['data']\n formatted_post = {\n \"title\": post_data['title'],\n \"post_id\": post_data['id'],\n \"subreddit\": post_data['subreddit'],\n \"score\": post_data['score'],\n \"url\": post_data['url'],\n \"author\": post_data['author'],\n \"permalink\": format_post_permalink(post_data['permalink']),\n \"num_comments\": post_data['num_comments'],\n \"created\": post_data['created'],\n \"body\": post_data['selftext']\n }\n\n formatted_posts.append(formatted_post)\n\n return formatted_posts", "def render(self, data):\n return []", "def serialize_posts_data_v2(influencer, posts, length_limit=30, highlighted_ids=[], **kw):\n from debra import serializers\n from debra import feeds_helpers\n from debra import constants\n\n request = kw.get('request')\n brand = request.visitor[\"base_brand\"] if request else None\n\n posts_data = []\n urls = set()\n posts = list(posts)\n\n for post in posts:\n if post.url in urls:\n continue\n urls.add(post.url)\n\n feed_json = feeds_helpers.get_feed_handler_for_platform(\n get_post_platform(post))\n\n post_data = feed_json(None,\n for_single_post=post,\n length_limit=length_limit\n )\n\n if post_data is None:\n continue\n\n post_data[\"blog_name\"] = serializers.unescape(influencer.blogname if influencer else post.influencer.blogname)\n post_data[\"title\"] = post.title\n post_data[\"platform\"] = get_post_platform(post)\n\n if brand and brand.flag_show_dummy_data:\n post_data['url'] = constants.FAKE_POST_DATA['url']\n post_data['title'] = constants.FAKE_POST_DATA['title']\n\n if post.id in highlighted_ids:\n post_data[\"highlight\"] = True\n if post.create_date:\n post_data[\"create_date\"] = post.create_date.strftime(\"%b. %e, %Y\")\n if influencer:\n post_data['user'] = influencer.feed_stamp\n else:\n post_data['user'] = post.influencer.feed_stamp\n posts_data.append(post_data)\n return posts_data", "def __repr__(self):\n p = self\n return f\"<Post id={p.id}, title={p.title}, content={p.content}, created_at={p.created_at}, user_id={p.user_id}>\"", "def OnCustomRender(self, dc, item, rect):\r\n \r\n pass", "def marshal_posts(shard, post_list):\n out = []\n for post in post_list:\n post_dict = dict(\n shardId=shard,\n archiveType=models.Post.ARCHIVE_REVERSE_MAPPING[post.archive_type],\n nickname=post.nickname,\n title=post.title,\n body=post.body,\n postTimeMs=models.datetime_to_stamp_ms(post.post_time),\n sequenceId=getattr(post, 'sequence', None),\n newTopicId=post.new_topic,\n postId=post.post_id)\n out.append(post_dict)\n return out", "def __repr__(self):\n return f'<Post post_id={self.post_id} user_id={self.user_id}'", "def render_many(self, data):\n return {\n \"type\": \"FeatureCollection\",\n \"features\": [self.render_single(item) for item in data]\n }", "def render_posts(self, **params):\n\n if \"user_posts\" in params:\n posts = params['user_posts']\n else:\n posts = Post.get_all()\n\n rendered_posts = \"\"\n for post in posts:\n rendered_posts += self.render_post(post, **params)\n\n self.render(\"blog/blog.html\", rendered_posts=rendered_posts)", "def render_postcards(self):\n postcards = []\n for postcard_image in self.view.postcard_images:\n postcard = (\n ' <div class=\"%s\" style=\"background-color:black;\">\\n'\n ' <a href=\"%s\" >\\n'\n ' <img src=\"%s\" class=\"thumbnail img-responsive center-block\">\\n'\n ' </a>\\n'\n ' </div>') % (self.div_class,\n postcard_image['href'],\n postcard_image['img_src'])\n postcards.append(postcard)\n text = \"\\n\".join(postcards)\n return text", "def render(self):\n if self.can_render():\n output = '<ul>'\n for item in self.items:\n output += \"<li>{0}</li>\".format(item)\n return output + '</ul>'\n return ''", "def make_list(posts, dst, list_layout, item_layout, limit=None, **params):\n items = []\n for k, post in enumerate(posts):\n item_params = dict(params, **post)\n\n # Get title and summary\n title, summary = get_title_and_summary(item_params['dest_path'])\n item_params['title'] = title\n item_params['summary'] = summary\n\n item = render(item_layout, **item_params)\n items.append(item)\n\n # Limit to `limit` items\n if limit is not None and k + 1 >= limit:\n break\n\n params['content'] = ''.join(items)\n dst_path = render(dst, **params)\n output = render(list_layout, **params)\n\n log('Rendering list => {} ...', dst_path)\n fwrite(dst_path, output)", "def add(self, posts):\n li_html = []\n for post in posts:\n li_html.append(\n u'<li><a href=\"{route}\">{title}</a></li>'.format(\n route=post.route, title=post.title))\n self._blog_list = u'\\n'.join(li_html)\n self._posts = posts", "def get_data(self):\n return [{'title': th['title'], 'img_link': th['img_link'], 'id': j, 'url': th['url'], 'text': th.get('text', '0')}\n for j, th in enumerate(self)]", "def _render_row(self, alist):\n return [self._render_thing(item) for item in alist]", "def print_posts(posts):\n\n click.echo()\n for post in posts:\n click.secho('%d. ' % post['rank'], nl=False)\n click.secho('%s\\t' % post['title'], bold=True, fg=\"red\", nl=False)\n click.secho('%s' % post['tagline'], fg=\"yellow\")\n click.echo()", "def __repr__(self):\n\n b = self\n return f\"<Blog {b.id} {b.title} {b.created_at} {b.user_id}>\"", "def render(self, data, *args, **kwargs):\n if not isinstance(data, list):\n data = data.get(self.results_field, [])\n return super().render(data, *args, **kwargs)", "def render(self, data, *args, **kwargs):\n if not isinstance(data, list):\n data = data.get(self.results_field, [])\n return super().render(data, *args, **kwargs)", "def __repr__(self):\n return f\"Item=(id={self.id},item_name={self.item_name},item_slug={self.item_slug})\"", "def show_blog_list():\r\n\tblog_list = Page.objects.filter(page_type=3).order_by('-created')[:4]\r\n\treturn {'blog_list': blog_list}", "def serializeItemsData(items, highlight=False):\n from debra.models import ProductModelShelfMap\n #items = items.filter(added_datetime__gte=datetime.date.today()-datetime.timedelta(days=30))\n # unordered_pair = list(items.values_list('added_datetime', 'id'))\n unordered_pair = []\n\n for item in items:\n unordered_pair.append((item.added_datetime, item.id))\n\n unordered_pair.sort()\n unordered_pair.reverse()\n ids = [x[1] for x in unordered_pair[:60]]\n items = ProductModelShelfMap.objects.select_related(\n 'product_model__brand').filter(id__in=ids)\n items_data = []\n prod_model_existing = set()\n for item in items:\n if item.product_model.name in prod_model_existing:\n continue\n prod_model_existing.add(item.product_model.name)\n item_data = {\n \"name\": item.product_model.name,\n \"img_url_feed_view\": item.product_model.img_url,\n \"img_url_panel_view\": item.img_url_panel_view,\n }\n if highlight:\n item_data[\"highlight\"] = True\n if item.product_model.brand:\n item_data[\"brand\"] = item.product_model.brand.name\n items_data.append(item_data)\n return items_data", "def get_posts(self): #return list of posts that are associated with this blog_id\n return Post.find_posts_for_blog_id(self.blog_id) #this will return a list of posts objects", "def OnGetItem(self, n):\n try:\n return self.blog.get_blog(n).html()\n except IndexError, err:\n display_error(_('Could not get blog.'), error=err)\n return \"<p>Corrupted Blog</p>\"", "def add_list_data(self, data):\n\n # TODO: I'd like to use, say, a QListWidget or something, but controlling the widget\n # height on those was annoying, and I wanted the items to be easily copy+pasteable.\n # In the end I'm just going with a multiline QLabel inside a QScrollArea\n\n if len(data) == 0:\n return None\n\n scroll = QtWidgets.QScrollArea(self)\n scroll.setFrameShadow(QtWidgets.QFrame.Sunken)\n scroll.setFrameShape(QtWidgets.QFrame.Panel)\n w = QtWidgets.QLabel('<tt>{}</tt>'.format('<br/>'.join(data)), self)\n w.setTextInteractionFlags(QtCore.Qt.TextSelectableByMouse)\n scroll.setWidget(w)\n self.grid.addWidget(scroll, self.cur_row, 1)\n return w", "def format(self, item):\n raise NotImplementedError()" ]
[ "0.5797765", "0.5777556", "0.569845", "0.5545204", "0.5486257", "0.54002476", "0.5336475", "0.5334056", "0.5324463", "0.5311351", "0.5307505", "0.53025407", "0.52784413", "0.5277002", "0.52619624", "0.52491385", "0.5233054", "0.52293384", "0.5213067", "0.5183145", "0.51277083", "0.5109417", "0.5109417", "0.50679576", "0.5029813", "0.5010366", "0.49993074", "0.49917564", "0.4972692", "0.49680358" ]
0.5974937
0
Count need lines per district and status (top 5 districts) for all open Events
def needs_by_district(cls): T = current.T db = current.db s3db = current.s3db table = s3db.need_line ntable = s3db.need_need etable = s3db.event_event ltable = s3db.event_event_need status = table.status number = table.id.count() location = ntable.location_id base_query = (etable.closed == False) & \ (etable.id == ltable.event_id) & \ (ltable.need_id == ntable.id) & \ (ntable.id == table.need_id) & \ (table.deleted == False) # Get the top-5 locations by number of need lines query = base_query & (location != None) rows = db(query).select(location, number, groupby = location, orderby = ~(number), limitby = (0, 5), ) locations = [row[location] for row in rows] data = [] if locations: # Get labels for locations location_represent = S3Represent(lookup = "gis_location", fields = ["L2"], ) location_labels = location_represent.bulk(locations) # Count need lines per status and location query = base_query & (location.belongs(locations)) rows = db(query).select(location, status, number, groupby = (status, location), ) # Group results as {status: {location: number}} per_status = {} for row in rows: row_status = row[status] if row_status in per_status: per_status[row_status][row[location]] = row[number] else: per_status[row_status] = {row[location]: row[number]} # Build data structure for chart renderer # - every status gives a series # - every district gives a series entry for code, label, color in cls.REQ_STATUS: series = {"key": s3_str(T(label)), "color": color, "filterKey": code, } values = [] per_location = per_status.get(code) for location_id in locations: if per_location: value = per_location.get(location_id) else: value = None location_label = location_labels.get(location_id) item = {"label": location_label, "value": value if value else 0, "filterKey": location_label, } values.append(item) series["values"] = values data.append(series) return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def needs_by_district(cls):\n\n T = current.T\n\n db = current.db\n s3db = current.s3db\n\n table = s3db.req_need_line\n ntable = s3db.req_need\n\n left = ntable.on(ntable.id == table.need_id)\n\n status = table.status\n number = table.id.count()\n location = ntable.location_id\n\n # Get the top-5 locations by number of need lines\n query = (table.deleted == False) & \\\n (location != None)\n rows = db(query).select(location,\n number,\n left = left,\n groupby = location,\n orderby = ~(number),\n limitby = (0, 5),\n )\n locations = [row[location] for row in rows]\n\n data = []\n if locations:\n # Get labels for locations\n location_represent = S3Represent(lookup=\"gis_location\", fields=[\"L2\"])\n location_labels = location_represent.bulk(locations)\n\n # Count need lines per status and location\n query = (table.deleted == False) & \\\n (location.belongs(locations))\n rows = db(query).select(location,\n status,\n number,\n left = left,\n groupby = (status, location),\n )\n\n # Group results as {status: {location: number}}\n per_status = {}\n for row in rows:\n row_status = row[status]\n if row_status in per_status:\n per_status[row_status][row[location]] = row[number]\n else:\n per_status[row_status] = {row[location]: row[number]}\n\n # Build data structure for chart renderer\n # - every status gives a series\n # - every district gives a series entry\n for code, label, color in cls.REQ_STATUS:\n series = {\"key\": s3_str(T(label)),\n \"color\": color,\n \"filterKey\": code,\n }\n values = []\n per_location = per_status.get(code)\n for location_id in locations:\n if per_location:\n value = per_location.get(location_id)\n else:\n value = None\n location_label = location_labels.get(location_id)\n item = {\"label\": location_label,\n \"value\": value if value else 0,\n \"filterKey\": location_label,\n }\n values.append(item)\n series[\"values\"] = values\n data.append(series)\n\n return data", "def getNumberOfEvents(self):\n whereClause = \"ecc_id = 1 and r_power = 0 and n = 2\"\n Nevent = self.db.selectFromTable(\"eccentricities\", \"count()\", whereClause)\n return Nevent[0][0]", "def getEventsCounters (attack_df, events):\n n_11 = 0 \n n_12 = 0 \n n_21 = 0 \n n_22 = 0 \n event_type = 0\n for event in events:\n is_attack = False\n for ind in range (len(attack_df.index)):\n # attack happened\n if (event.start >= (attack_df[\"start_time\"][ind] + 0.5)\n and event.start < (attack_df[\"end_time\"][ind]) - 0.5) or (\n event.end > (attack_df[\"start_time\"][ind] + 0.5)) and (\n event.end <= (attack_df[\"end_time\"][ind] - 0.5)):\n if event.choise == True:\n n_11 += 1\n event_type = 1\n is_attack = True\n break\n else:\n event_type = 3\n n_21 += 1\n is_attack = True\n break\n if is_attack:\n continue\n else:\n if event.choise == True:\n event_type = 2\n n_12 += 1\n else:\n event_type = 4\n n_22 += 1\n\n return ([n_11, n_12, n_21, n_22], event_type)", "def errdump_statistics(errdump_aggregated_df, raslog_message_details_df, raslog_message_id_details_df):\n \n # filter log messages based on date, message tag and merge devices behind same port for single event\n errdump_filtered_df = errdump_filter(errdump_aggregated_df)\n\n # set message date and time as index\n errdump_filtered_df.set_index(errdump_filtered_df['Message_date'], inplace=True)\n errdump_filtered_df.drop(columns=['Message_date'], inplace=True)\n \n # grouping and message counting\n errdump_grp_columns = ['configname', 'chassis_name', 'chassis_wwn',\n 'switchName', 'switchWwn',\n 'Fabric_name', 'Fabric_label', 'config_collection_date',\n 'Message_ID', 'Severity', 'Message_portIndex', 'Message_portType','slot', 'port',\n 'Condition', 'Dashboard_category', 'obj', 'Message_status',\n 'portIndex', 'Index_slot_port', 'portType', 'portState', 'speed',\n 'tx_port', 'rx_port', 'sid', 'did', 'wwn', 'IP_Address',\n 'Connected_portId', 'Connected_portWwn', 'Device_Host_Name_Port', 'alias', 'deviceType']\n \n # group log messages by month, device and log message\n errdump_grouper = errdump_filtered_df.groupby([pd.Grouper(freq='M', kind='period'), *errdump_grp_columns])\n # count events in each group\n raslog_counter_sr = errdump_grouper['Condition'].count()\n raslog_counter_df = pd.DataFrame(raslog_counter_sr)\n # rename column Condition to avoid duplication with one of Indexies\n raslog_counter_df.rename(columns={'Condition': 'Quantity'}, inplace=True)\n raslog_counter_df.reset_index(inplace=True)\n # replace na_cell\n raslog_counter_df.replace({'na_cell': np.nan}, inplace=True)\n if raslog_counter_df['alias'].notna().any():\n raslog_counter_df['alias'].replace('na_cell(?:, )?', value='', regex=True, inplace=True)\n raslog_counter_df['alias'] = raslog_counter_df['alias'].str.rstrip(', ')\n\n # apply date format to remove day and time\n raslog_counter_df['config_collection_date'] = pd.to_datetime(raslog_counter_df['config_collection_date']).dt.date\n raslog_counter_df['Message_date'] = raslog_counter_df['Message_date'].dt.strftime('%Y-%m')\n # sort values\n errdump_sort_columns = ['chassis_name', 'Fabric_label', 'Fabric_name', 'switchName', 'Message_date', 'Quantity']\n raslog_counter_df.sort_values(by=errdump_sort_columns, ascending=[*[True]*5, False], inplace=True)\n raslog_counter_df.reset_index(drop=True, inplace=True) \n # drop columns with all empty empty cells\n raslog_counter_df.dropna(axis=1, how='all', inplace=True)\n\n raslog_counter_df = dfop.dataframe_fillna(raslog_counter_df, raslog_message_details_df, join_lst=['Condition'], \n filled_lst=['Details', 'Recommended_action']) \n\n raslog_counter_df = dfop.dataframe_fillna(raslog_counter_df, raslog_message_id_details_df, join_lst=['Message_ID'], \n filled_lst=['Details', 'Recommended_action']) \n \n # find log messages which appear more then three times a month\n mask_frequent = raslog_counter_df['Quantity'] > 3\n raslog_frequent_df = raslog_counter_df.loc[mask_frequent].copy()\n \n # remove INFO Messages for report DataFrame except securitu violations messages\n mask_not_info = raslog_frequent_df['Severity'] != 'INFO'\n mask_sec_violation_condition = raslog_frequent_df['Condition'].str.contains('security violation', case=False, na=False)\n mask_sec_violation_dashboard = raslog_frequent_df['Dashboard_category'].str.contains('security violation', case=False, na=False)\n raslog_frequent_df = raslog_frequent_df.loc[mask_not_info | mask_sec_violation_condition | mask_sec_violation_dashboard].copy()\n\n raslog_frequent_df.reset_index(drop=True, inplace=True) \n return raslog_counter_df, raslog_frequent_df", "def fetch_counts(datestruct):\n response = call_responder('elasticsearch', 'query/daily_proofreader_hits')\n for rec in response['result']['hits']['hits']:\n data = rec['_source']\n if data['user'] not in datestruct:\n datestruct[data['user']] = {\"cleave\": 0, \"merge\": 0,\n \"split-supervoxel\": 0}\n if '/cleave/' in data['uri']:\n datestruct[data['user']]['cleave'] += 1\n elif '/merge' in data['uri']:\n datestruct[data['user']]['merge'] += 1\n elif '/split-supervoxel' in data['uri']:\n datestruct[data['user']]['split-supervoxel'] += 1", "def cnts_by_airline_dow(flights):\n\n return ...", "def obstacle_count(self):\n self.wide_scan()\n found_something = False\n counter = 0\n for distance in self.scan:\n if distance and distance < 200 and not found_something:\n found_something = True\n counter += 1\n print(\"Object # %d found, I think\" % counter)\n if distance and distance > 200 and found_something:\n found_something = False\n print(\"\\n----I SEE %d OBJECTS----\\n\" % counter)", "def numEvents(self):\n offsets = self.baxH5._offsetsByHole[self.holeNumber]\n return offsets[1] - offsets[0]", "def getNumEvents(dbsApi, dset):\n summary = getDsetSummary(dbsApi, dset)\n # it means the dataset was not produced\n if summary[0]['num_file'] == 0:\n return -1\n return summary[0]['num_event']", "def load_fact_traffic_violations_count_agg(cur,code):\n cur.execute(code)", "def __len__(self):\n return self.total_events", "def count():", "def getCount(self, event):\n # Attempt 2: Still too slow\n count = 0\n \n for mEvent in self:\n if event.__st__(mEvent):\n count += 1\n \n return count\n \n # Attempt 1: Too slow\n #return reduce((lambda x, y: x+y),\n # map((lambda i: itemset <= i), self))", "def count_segments(markers) -> int:\n cnt = Counter()\n for row in markers:\n cnt.update(row)\n n_cnt = dict(takewhile(lambda x: x[1] >= 10, cnt.most_common()))\n del n_cnt[1]\n del n_cnt[-1]\n return len(n_cnt.keys())", "def test_data_source_soaps_count_get(self):\n pass", "def dcount(ev):\n profData = getProfilingData(ev)\n if profData is not None:\n a = profData.Descendants().AsArray()\n if len(a) > 0:\n return profData.DescendantCount(a[0])\n return \"\"", "def num_events_sponsored(self, status=None):\n num_events = 0\n if status == SponsorshipStatus.CURRENT:\n for sponsorship in self.sponsorships:\n if sponsorship.is_current():\n num_events += 1\n elif status == SponsorshipStatus.PAST:\n for sponsorship in self.sponsorships:\n if sponsorship.is_past():\n num_events += 1\n else: # all\n for sponsorship in self.sponsorships:\n num_events += 1\n return num_events", "def aggregate_state_estimates_by_district(self, state):\n data = {}\n for division in tqdm(\n Division.objects.filter(level=self.DISTRICT_LEVEL, parent=state)\n ):\n fips = division.code\n id = division.id\n aggregated_labels = [] # Keep track of already agg'ed variables\n for estimate in division.census_estimates.all():\n series = estimate.variable.table.series\n year = estimate.variable.table.year\n table = estimate.variable.table.code\n\n label = None\n if estimate.variable.label:\n label = estimate.variable.label.label\n table_label = \"{}{}\".format(table, label)\n\n code = estimate.variable.code\n if series not in data:\n data[series] = {}\n if year not in data[series]:\n data[series][year] = {}\n if table not in data[series][year]:\n data[series][year][table] = {}\n if fips not in data[series][year][table]:\n data[series][year][table][fips] = {}\n if label is not None:\n if table_label not in aggregated_labels:\n aggregated_labels.append(table_label)\n data[series][year][table][fips][\n label\n ] = self.aggregate_variable(estimate, id)\n else:\n data[series][year][table][division.code][\n code\n ] = estimate.estimate\n return data", "def _count_level_events(count_list):\r\n if not len(count_list):\r\n return 0, 0, None\r\n number_events = 0\r\n number_multiple = 0\r\n max_multiple = count_list[0]\r\n for index, count in enumerate(count_list):\r\n if count_list[index] > 0:\r\n number_events = number_events + 1\r\n if count_list[index] > 1:\r\n number_multiple = number_multiple + 1\r\n if count_list[index] > max_multiple:\r\n max_multiple = count_list[index]\r\n return number_events, number_multiple, max_multiple", "def get_diagnose_count(visit):\r\n return visit.diagnose.all().count()", "def testViewOccData(self):\n try:\n entryD = self.__mU.doImport(self.__instanceSavePath, fmt=\"pickle\")\n segmentCountList = []\n segmentLengthList = []\n entryCountD = {}\n for entryId in entryD:\n for _, eD in entryD[entryId][\"selected_polymer_entities\"].items():\n\n analD = eD[\"anal_instances\"] if \"anal_instances\" in eD else {}\n\n for _, aD in analD.items():\n entryCountD[entryId] = True\n segmentCount = len(aD[\"owabRegiond\"])\n segmentLengths = [d[\"length\"] for sId, d in aD[\"owabRegiond\"].items()]\n\n segmentCountList.append(segmentCount)\n segmentLengthList.extend(segmentLengths)\n #\n logger.info(\"gaps %d gap lengths %d\", len(segmentCountList), len(segmentLengthList))\n #\n cu = DisorderChartUtils()\n cu.doIntegerBarChart(\n segmentCountList,\n plotPath=self.__plotOwabSegmentCount,\n yPlotScale=\"log\",\n yPlotMax=6,\n xPlotMax=100,\n xPlotLabel=\"Segment Count\",\n yPlotLabel=\"Protein Instances (log)\",\n plotTitle=\"Segment counts (OWAB > 2 * mean OWAB)\",\n )\n self.__writeLegend(\n self.__plotOwabSegmentCount,\n \"Segment counts for all (%d) protein sequences (OWAB > 2 * mean OWAB and X-ray resolution limit < 3.5 Angstoms (entries=%d)) \"\n % (len(segmentCountList), len(entryCountD)),\n )\n cu.doIntegerBarChart(\n segmentLengthList,\n plotPath=self.__plotOwabSegmentLength,\n yPlotScale=\"log\",\n yPlotMax=6,\n xPlotMax=100,\n xPlotLabel=\"Segment width (residues)\",\n yPlotLabel=\"Segment Instances (log)\",\n plotTitle=\"Segment widths (OWAB > 2 * mean OWAB)\",\n )\n self.__writeLegend(\n self.__plotOwabSegmentLength,\n \"Segment widths for all (%d) protein sequences (OWAB > 2 * mean OWAB and X-ray resolution limit < 3.5 Angstoms (entries=%d)) \"\n % (len(segmentLengthList), len(entryCountD)),\n )\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()", "def melon_count_summary(day_number, path):\n print(\"Day\", day_number)\n the_file = open(path)\n for line in the_file:\n line = line.rstrip()\n words = line.split('|')\n print(\"Delivered {} {}s for total of ${}\".format(words[1], words[0], words[2]))\n the_file.close()", "def _count_parties(data_set): #DEMOCRATS, THEN REPUBLICANS\r\n reps = 0\r\n dems = 0\r\n for data_point in data_set:\r\n if data_point.dat_party == \"R\": reps+=1\r\n if data_point.dat_party == \"D\": dems+=1\r\n\r\n return (dems, reps)", "def numberOfEvents(self):\n raise NotImplementedError", "def print_number_of_entities(self, entity_col):\n for df in self:\n print(\"# of entities: \", len(df[entity_col].unique()))", "def _num_of_consolidated(self, observation):\n a = set(observation)\n b = set(np.arange(self.num_of_servers))\n intersect = b - a\n return len(intersect)", "def _num_of_consolidated(self, observation):\n a = set(observation)\n b = set(np.arange(self.num_of_servers))\n intersect = b - a\n return len(intersect)", "def get_actnum_ingroup(cntry_density, t0_flag, t1_flag, cntry, time):\n changed_df = cntry_density[(cntry_density.st0==t0_flag) & (cntry_density.st1==t1_flag)]\n changed_df = changed_df.groupby([\"DIS_GROUP\"]).size()\n changed_df = changed_df.reindex(['NM','NE','SHM'])\n changed_df = changed_df.fillna(0)\n changed_df = pd.DataFrame(changed_df, columns=['NUM_CHANGE']).reset_index()\n changed_df['COUNTRY'] = cntry\n changed_df['PERIOD'] = time\n changed_df['SOURCE'] = 'actual'\n return changed_df", "def pullGateCount(start_date, end_date):\n headers = getSenSourceHeaders()\n url = \"https://vea.sensourceinc.com/api/data/traffic?dateGroupings=hour(1)&endDate={1}T00:00:00.000Z&entityType=zone&excludeClosedHours=false&include=zone,sensor,site,location&meta=&metrics=ins,outs&relativeDate=custom&startDate={0}T00:00:00.000Z\"\n url = url.format(start_date, end_date)\n req = requests.get(url, headers=headers)\n return req", "def edgecount(self):\n\n raise NotImplementedError" ]
[ "0.5754938", "0.551308", "0.54789215", "0.5473855", "0.5299601", "0.5225415", "0.519377", "0.5175258", "0.50648946", "0.50605243", "0.50548464", "0.5018038", "0.5015188", "0.5013341", "0.50113225", "0.5009056", "0.49946627", "0.49912676", "0.49895993", "0.49847665", "0.4979868", "0.49765205", "0.49626046", "0.4959752", "0.49476838", "0.49424028", "0.49424028", "0.4926905", "0.4893687", "0.48812014" ]
0.63438565
0
Write the design to the Specctra format
def write(self, design, filename): self._convert(design) with open(filename, "w") as f: f.write(self._to_string(self.pcb.compose()))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_to_fits(self, filename, comment=None, overwrite = False):\n\n\n hdu = fits.PrimaryHDU(self.flux)\n hdu.header = self.header\n\n # Update header information\n crval = self.dispersion[0]\n cd = self.dispersion[1]-self.dispersion[0]\n crpix = 1\n\n hdu.header['CRVAL1'] = crval\n hdu.header['CD1_1'] = cd\n hdu.header['CDELT1'] = cd\n hdu.header['CRPIX1'] = crpix\n\n hdu.header['HISTORY'] = '1D spectrum generated with SpecOneD'\n\n if comment:\n hdu.header['HISTORY'] = comment\n\n hdul = fits.HDUList([hdu])\n\n try:\n hdul.writeto(filename, overwrite = overwrite)\n except:\n raise ValueError(\"Spectrum could not be saved. Maybe a file with the same name already exists and overwrite is False\")", "def write(self, filename=None):\n if filename == None:\n filename = self.ofilename\n\n ofile = open(filename, 'w')\n\n ofile.write('# Susceptibility: %E d(susc): %E Coercivity: %E d(coer): %E\\n' % (self.susceptibility_mean, self.susceptibility_std, self.coercivity_mean, self.coercivity_std) )\n ofile.write('# H[] M[] Mfit[]\\n')\n\n #for i in range(len(self.h)):\n # ofile.write(\" %12.10f %12.10f %12.10f\\n\" % ( self.h[i], self.m[i], self.m_fit[i] ) )\n\n ofile.close()", "def write(self, design, filename):\n writer = Worker(design, self)\n writer.save(filename)", "def output(self):\n to_write = 'S '\n \"\"\"\n print self.def_field\n for key in self.def_field:\n print key,\"=\", self.def_field[key]\n \"\"\"\n to_write += str(self.offset[0] + self.def_field['x1'])+' '\n to_write += str(self.offset[1] + self.def_field['y1'])+' '\n to_write += str(self.offset[0] + self.def_field['x2'])+' '\n to_write += str(self.offset[1] + self.def_field['y2'])+' '\n to_write += str(self.def_field['part'])+' '\n to_write += str(self.def_field['dmg'])+' '\n to_write += str(self.def_field['pen'])+' '\n to_write += self.def_field['fill']+'\\n'\n return to_write", "def export(self, buffer: IO[str], ind: str = '', disp_multiblend: bool = True) -> None:\n buffer.write(ind + 'side\\n')\n buffer.write(ind + '{\\n')\n buffer.write(\n f'{ind}\\t\"id\" \"{self.id}\"\\n'\n f'{ind}\\t\"plane\" \"({self.planes[0]}) ({self.planes[1]}) ({self.planes[2]})\"\\n'\n f'{ind}\\t\"material\" \"{self.mat}\"\\n'\n f'{ind}\\t\"uaxis\" \"{self.uaxis}\"\\n'\n f'{ind}\\t\"vaxis\" \"{self.vaxis}\"\\n'\n f'{ind}\\t\"rotation\" \"{self.ham_rot:g}\\\"\\n'\n f'{ind}\\t\"lightmapscale\" \"{self.lightmap}\"\\n'\n f'{ind}\\t\"smoothing_groups\" \"{self.smooth}\"\\n'\n )\n if self.disp_power > 0:\n assert self._disp_verts is not None\n assert self.disp_allowed_vert is not None\n buffer.write(\n f'{ind}\\tdispinfo\\n'\n f'{ind}\\t{{\\n'\n f'{ind}\\t\\t\"power\" \"{self.disp_power}\"\\n'\n f'{ind}\\t\\t\"startposition\" \"[{self.disp_pos}]\"\\n'\n f'{ind}\\t\\t\"flags\" \"{_DISP_COLL_TO_FLAG[self.disp_flags & DispFlag.COLL_ALL]}\"\\n'\n f'{ind}\\t\\t\"elevation\" \"{self.disp_elevation}\"\\n'\n f'{ind}\\t\\t\"subdiv\" \"{\"1\" if DispFlag.SUBDIV in self.disp_flags else \"0\"}\"\\n'\n )\n\n size = self.disp_size\n self._export_disp_rowset('normals', 'normal', buffer, ind, size)\n self._export_disp_rowset('distances', 'distance', buffer, ind, size)\n self._export_disp_rowset('offsets', 'offset', buffer, ind, size)\n self._export_disp_rowset('offset_normals', 'offset_norm', buffer, ind, size)\n self._export_disp_rowset('alphas', 'alpha', buffer, ind, size)\n\n buffer.write(f'{ind}\\t\\ttriangle_tags\\n{ind}\\t\\t{{\\n')\n for y in range(size):\n row = [\n f'{vert.triangle_a.value} {vert.triangle_b.value}'\n for vert in self._disp_verts[size * y:size * (y+1)]\n ]\n buffer.write(f'{ind}\\t\\t\"row{y}\" \"{\" \".join(row)}\"\\n')\n buffer.write(ind + '\\t\\t}\\n')\n\n buffer.write(ind + '\\t\\tallowed_verts\\n')\n buffer.write(ind + '\\t\\t{\\n')\n assert len(self.disp_allowed_vert) == 10, self.disp_allowed_vert\n buffer.write(f'{ind}\\t\\t\"10\" \"{\" \".join(map(str, self.disp_allowed_vert))}\"\\n')\n buffer.write(f'{ind}\\t\\t}}\\n{ind}\\t}}\\n')\n\n if disp_multiblend and any(vert.multi_blend for vert in self._disp_verts):\n self._export_disp_rowset('multiblend', 'multi_blend', buffer, ind, size)\n self._export_disp_rowset('alphablend', 'multi_alpha', buffer, ind, size)\n for i in range(4):\n buffer.write(f'{ind}\\t\\tmultiblend_color_{i}\\n{ind}\\t\\t{{\\n')\n for y in range(size):\n row = [\n str(vert.multi_colors[i]) if vert.multi_colors is not None else '1'\n for vert in self._disp_verts[size * y:size * (y+1)]\n ]\n buffer.write(f'{ind}\\t\\t\"row{y}\" \"{\" \".join(row)}\"\\n')\n buffer.write(ind + '\\t\\t}\\n')\n\n buffer.write(ind + '}\\n')", "def write(self, file):\n\n # Initialize output buffer\n out = ''\n\n # Print specification\n for key, value in self.specification.items():\n out += f'{key} : {value}\\n'\n\n # Print the tour\n if self.tour:\n out += 'TOUR_SECTION\\n'\n for s in self.tour:\n out += str(s) + '\\n'\n out += '-1\\n'\n\n # Append EOF\n out += 'EOF\\n'\n\n # Write to file\n with open(file, 'w') as f:\n f.write(out)", "def WriteStructuralMaterialsjson(save_path,dic_in_json_format):\n complete_name=os.path.join(save_path,\"StructuralMaterials.json\") \n with open(complete_name, \"w\") as save_file:\n save_file.write(dic_in_json_format)\n if(DEBUG):\n print(\"StructuralMaterials.json written\")", "def write(self,structure,tar,write_ft_soap_npy=True,write_ft_soap_png=True,write_ft_soap_full_npy=True,write_geo=True,op_id=0,format_geometry='aims'):\n \n if not is_descriptor_consistent(structure, self):\n raise Exception('Descriptor not consistent. Aborting.') \n \n desc_folder = self.configs['io']['desc_folder']\n descriptor_info = structure.info['descriptor']['descriptor_info']\n \n ft_soap_descriptor=structure.info['descriptor']['FT_SOAP_harmonics']\n \n \n \n if write_ft_soap_npy:\n \n ft_soap_filename_npy = os.path.abspath(os.path.normpath(os.path.join(desc_folder,\n structure.info['label'] +\n self.desc_metadata.ix[\n 'FT_SOAP_harmonics'][\n 'file_ending'])))\n only_file=structure.info['label'] + self.desc_metadata.ix['FT_SOAP_harmonics']['file_ending']\n \n np.save(ft_soap_filename_npy, ft_soap_descriptor)\n structure.info['FT_SOAP_harmonics_filename_npy'] = ft_soap_filename_npy\n tar.add(structure.info['FT_SOAP_harmonics_filename_npy'],arcname=only_file) \n \n if write_ft_soap_png:\n\n image_ft_soap_filename_png = os.path.abspath(os.path.normpath(os.path.join(desc_folder,\n structure.info['label'] +\n self.desc_metadata.ix[\n 'FT_SOAP_harmonics_image'][\n 'file_ending'])))\n only_file=structure.info['label'] + self.desc_metadata.ix['FT_SOAP_harmonics_image']['file_ending']\n \n plt.title(structure.info['label']+' FT SOAP descriptor ')\n plt.xlabel('FT SOAP component')\n plt.ylabel('FT SOAP value')\n plt.plot(ft_soap_descriptor)\n plt.savefig(image_ft_soap_filename_png)\n plt.close()\n structure.info['FT_SOAP_harmonics_filename_png'] = image_ft_soap_filename_png\n tar.add(structure.info['FT_SOAP_harmonics_filename_png'],arcname=only_file) \n \n if write_ft_soap_full_npy:\n \n full_fft=structure.info['descriptor']['FT_SOAP_full']\n \n ft_soap_full_filename_npy = os.path.abspath(os.path.normpath(os.path.join(desc_folder,\n structure.info['label'] +\n self.desc_metadata.ix[\n 'FT_SOAP_full_fft'][\n 'file_ending'])))\n only_file=structure.info['label'] + self.desc_metadata.ix['FT_SOAP_full_fft']['file_ending']\n \n np.save(ft_soap_full_filename_npy, full_fft)\n structure.info['FT_SOAP_full_filename_npy'] = ft_soap_full_filename_npy\n tar.add(structure.info['FT_SOAP_full_filename_npy'],arcname=only_file) \n \n if write_geo:\n \n coord_filename_in = os.path.abspath(os.path.normpath(os.path.join(desc_folder, structure.info['label'] +\n self.desc_metadata.ix['FT_SOAP_harmonics_coordinates'][\n 'file_ending'])))\n \n only_file=structure.info['label']+self.desc_metadata.ix['FT_SOAP_harmonics_coordinates']['file_ending']\n \n structure.write(coord_filename_in, format=format_geometry)\n structure.info['FT_SOAP_harmonics_coord_filename_in'] = coord_filename_in\n tar.add(structure.info['FT_SOAP_harmonics_coord_filename_in'],arcname=only_file)", "def write(self):", "def write(self):", "def print_design(x, D):\n\n N = round(x[0])\n ds = x[1]\n ws = x[2]\n wc = x[3]\n lc = x[4]\n g = x[5]\n\n # compute mass\n M = 2.0*(2.0*wc+ws+ds)*lc*wc*D.rowmc + \\\n (2*lc+2*wc+np.pi*ds)*ds*ws*D.kpf*D.rowwc\n # compute loss at rated current\n Prt = (2*lc+2*wc+np.pi*ds)*(N*D.irt) ** 2/(ds*ws*D.kpf*D.sigmawc)\n # compute inductance\n L = D.mu0*lc*wc*N ** 2/(2*g)\n # compute the flux density\n Brt = D.mu0*N*D.irt/(2*g)\n # current density\n Jrt = N*D.irt/(ws*ds*D.kpf)\n print('Design Data')\n print(f'Turns = {N}')\n print(f'Slot depth (m) = {ds}')\n print(f'Slot width (m) = {ws}')\n print(f'Core width (m) = {wc}')\n print(f'Core length (m) = {lc}')\n print(f'Air gap (m) = {g}')\n print(' ')\n print('Design Metrics')\n print(f'Mass (kg) = {M}')\n print(f'Loss at rated current (W) = {Prt}')\n print(' ')\n print('Constrained Quantities')\n print(f'Inductance (H) = {L}')\n print(f'Flux Density at Rated Current (T) = {Brt}')\n print(f'Current Density Rated Current (A/m**2) = {Jrt}')", "def output(self):\n\n to_write = []\n\n\n try:\n test_field = self.fields[0]['id']\n except IndexError:\n test_field = None\n\n\n if test_field == None or test_field != 0:\n # missing fields\n return to_write\n\n\n\n to_write += ['#\\n# '+self.name+'\\n#\\n']\n to_write += ['DEF '+\n self.name+' '+\n self.ref+' '+\n '0 '+ # 0\n '1 '+ # off\n self.pin_numbers_visible + ' '+\n self.pin_names_visible + ' '+\n '1 '+\n 'F '+\n self.powerobject + '\\n'\n ]\n\n to_write += ['$FPLIST\\n']\n to_write += ['$ENDFPLIST\\n']\n\n for field in self.fields:\n line = 'F'\n for key in self._F_KEYS:\n line += str(field[key]) + ' '\n to_write += [line.rstrip() + '\\n']\n\n if self.alias != '':\n to_write += ['ALIAS '+self.alias+'\\n']\n\n to_write += ['DRAW\\n']\n\n for draw in self.draws:\n #print \"==================>\",draw.output()\n to_write += [draw.output()]\n\n for connection in self.connections:\n to_write += [connection.output()]\n\n to_write += ['ENDDRAW\\n']\n\n to_write += ['ENDDEF\\n']\n\n\n\n return to_write", "def draw_design(self):\n if len(self.design.trace_segments) > 0:\n self.draw_layout()\n else:\n self.draw_schematic()", "def output(self):\n to_write = 'X '\n to_write += str(self.def_field['name'])+' '\n to_write += str(self.def_field['pin_number'])+' '\n to_write += str(self.def_field['x'])+' '\n to_write += str(self.def_field['y'])+' '\n to_write += str(self.def_field['length'])+' '\n to_write += self.def_field['direction']+' '\n to_write += str(self.def_field['size_num'])+' '\n to_write += str(self.def_field['size_name'])+' '\n #to_write += str(self.def_field['part'])+' '\n to_write += str(self.def_field['dmg'])+' '\n to_write += str(self.def_field['type'])+' '\n to_write += self.def_field['shape']\n to_write += '\\n'\n return to_write", "def write(self, fp):\n if self._defaults:\n fp.write(\"[%s]\\n\" % DEFAULTSECT)\n for (key, value) in self._defaults.items():\n fp.write(\"%s = %s\\n\" % (key, str(value).replace('\\n', '\\n\\t')))\n fp.write(\"\\n\")\n for section in self._sections:\n fp.write(\"[%s]\\n\" % section)\n for (key, value) in self._sections[section].items():\n if key == \"__name__\":\n continue\n if (value is not None) or (self._optcre == self.OPTCRE):\n key = \" = \".join((key, str(value).replace('\\n', '\\n\\t')))\n fp.write(\"%s\\n\" % (key))\n fp.write(\"\\n\")", "def _write_model(self, file, specs, experiment = None):\n self.require_complete()\n for pragma in self._pragmas:\n file.write(pragma)\n file.write('\\n')\n if len(self._pragmas) > 0:\n file.write('\\n')\n file.write('-- NOTE: This file was auto-generated by aivika-modeler 1.0\\n')\n file.write('\\n')\n for module_import in self._module_imports:\n file.write(module_import)\n file.write('\\n')\n if len(self._module_imports) > 0:\n file.write('\\n')\n file.write('specs =\\n')\n specs.write(file, ' ')\n file.write('\\n')\n self._write_transact_types(file)\n self._write_model_def(file)\n file.write('\\n')\n if experiment is None:\n file.write('main =\\n')\n file.write(' printSimulationResultsInStopTime\\n')\n file.write(' printResultSourceInEnglish\\n')\n file.write(' model specs\\n')\n file.write('\\n')\n else:\n experiment.write(file)\n file.write('\\n')", "def save(self):\n \n fileName=self.characterName+\"_\"+self.race+\"_\"+self.classType+\"_lvl_\"+str(self.level)\n new_file = open(str(fileName)+\".txt\",\"w\")\n new_file.write(\"~~~~~~~~~~~ \"+self.characterName+\" the \"+self.race+\" \"+self.classType+\" ~~~~~~~~~~~\\n\\n\")\n new_file.write(\"Level: \"+str(self.level)+\" HP: \"+str(self.hp)+\" XP: \"+str(self.xp)+\" Hit Dice: \"+str(self.level)+str(self.hit_dice[self.classType])+\"\\n\")\n new_file.write(str(self.abilityScores()))\n new_file.write(\"\\n\\n~~~~~~~~~ Skills ~~~~~~~~~\\n\")\n for i in self.skills:\n new_file.write(\"\\n\"+i+\" \"+\"(\"+skills[i.lower()].upper()+\")\")\n new_file.write(\"\\n\\n~~~~~~~~~ Traits ~~~~~~~~~\\n\")\n for i in self.traits:\n new_file.write(\"\\n ~~\"+i+\"~~\\n \"+str(self.traits[i])+\"\\n\")\n new_file.write(\"\\n\\n~~~~~~~~~ Specialty: \"+self.specialty+\" ~~~~~~~~\\n\")\n new_file.write(\"\\n \"+self.specialtyStory+\"\\n\")\n new_file.write(\"\\n ~~~~ Feats ~~~~\\n\")\n for i in range(1,self.level+1):\n if i == 1 or i%3 == 0:\n new_file.write(\"\\n Level \"+str(i)+\": \"+self.feats[i]['name']+' '\\\n \"(\"+self.feats[i]['type']+\")\\n\"\\\n ' \"'+self.feats[i]['description']+'\"\\n\\n')\n if 'prereq' in self.feats[i]:\n new_file.write(\" Prerequisite: \"+self.feats[i]['prereq']+\"\\n\")\n if 'benefit' in self.feats[i]:\n new_file.write(\" Benefit: \"+self.feats[i]['benefit']+\"\\n\")\n if 'effect' in self.feats[i]:\n new_file(\" Effect: \"+self.feats[i]['effect']+\"\\n\")\n \n new_file.write(\"\\n\\n~~~~~~~~~ Background: \"+self.background+\" ~~~~~~~~\\n\")\n if self.backgroundProfession == '':\n pass\n else:\n new_file.write(\"Profession: \"+self.backgroundProfession)\n new_file.write(\"\\n \"+self.backgroundStory)\n \n new_file.close()\n print \"File \"+str(fileName)+\".txt saved.\"", "def write_data_card(spec, data_card, channels, path):\n with open(path, \"w\") as f:\n f.write(f\"imax {str(size(data_card.bins))}\" + \"\\n\")\n f.write(\n \"jmax \"\n + str(size(data_card.processes) - size(data_card.isSignal.keys()))\n + \"\\n\"\n )\n f.write(f\"kmax {str(size(data_card.systs, 0))}\" + \"\\n\")\n\n if data_card.hasShapes:\n for channel in data_card.shapeMap.keys():\n for sample in data_card.shapeMap[channel].keys():\n f.write(\n f\"shapes {sample} {channel} {data_card.shapeMap[channel][sample][0]} {data_card.shapeMap[channel][sample][1]}\"\n )\n if size(data_card.shapeMap[channel][sample]) > 2:\n f.write(f\" {data_card.shapeMap[channel][sample][2]}\" + \"\\n\")\n else:\n f.write(\"\\n\")\n\n f.write(\"\\n---------------------------------\\n\")\n f.write(\"bin \")\n for bin in data_card.obs.keys():\n f.write(f\"{bin} \")\n f.write(\"\\n\")\n f.write(\"observation \")\n for channel in data_card.obs.keys():\n f.write(f\"{str(data_card.obs[channel])} \")\n f.write(\"\\n---------------------------------\\n\")\n f.write(\"bin \")\n for channel in data_card.obs.keys():\n for sample in data_card.exp[channel].keys():\n f.write(f\"{channel} \")\n f.write(\"\\n\")\n f.write(\"process \")\n for channel in data_card.bins:\n for sample in data_card.exp[channel].keys():\n f.write(f\"{sample} \")\n f.write(\"\\n\")\n f.write(\"process \")\n for channel in data_card.bins:\n for sample in data_card.exp[channel].keys():\n if sample in data_card.signals:\n f.write(f\"{str(-1 * data_card.processes.index(sample))} \")\n else:\n f.write(f\"{str(data_card.processes.index(sample) + 1)} \")\n f.write(\"\\n\")\n f.write(\"rate \")\n for channel in data_card.bins:\n for sample in data_card.exp[channel].keys():\n\n f.write(f\"{str(data_card.exp[channel][sample])} \")\n f.write(\"\\n---------------------------------\\n\")\n for syst in data_card.systs:\n f.write(f\"{syst[0]} {syst[2]} \")\n for bin in syst[4].keys():\n for sample in data_card.exp[bin].keys():\n if syst[4][bin][sample] != 0:\n f.write(f\"{str(syst[4][bin][sample])} \")\n else:\n f.write(\"- \")\n\n f.write(\"\\n\")\n f.write(\"\\n---------------------------------\\n\")\n for cAp in data_card.rateParams.keys():\n _dir = cAp.split(\"AND\")\n for i in range(size(data_card.rateParams[cAp], 0)):\n if size(data_card.rateParams[cAp][i][0]) > 3:\n f.write(\n f\"{str(data_card.rateParams[cAp][i][0][0])} rateParam {_dir[0]} {_dir[1]} {str(data_card.rateParams[cAp][i][0][1])} {data_card.rateParams[cAp][i][0][3]}\"\n )\n else:\n f.write(\n f\"{str(data_card.rateParams[cAp][i][0][0])} rateParam {_dir[0]} {_dir[1]} {str(data_card.rateParams[cAp][i][0][1])}\"\n )\n f.write(\"\\n\")\n f.write(\"\\n---------------------------------\\n\")\n for idxc, channel in enumerate(channels):\n if (\n channel in data_card.binParFlags.keys()\n and data_card.binParFlags[channel] == True\n ):\n # double check to be safe\n shapesys = False\n staterror = False\n for sample in spec[\"channels\"][idxc][\"samples\"]:\n mod_types = [mod[\"type\"] for mod in sample[\"modifiers\"]]\n if \"shapesys\" in mod_types:\n shapesys = True\n elif \"staterror\" in mod_types:\n staterror = True\n\n if shapesys:\n f.write(f\"{channel} autoMCStats 100000 0 2\" + \"\\n\")\n if staterror:\n f.write(f\"{channel} autoMCStats 0 0 2\" + \"\\n\")", "def xephemFormat(self):\n line = []\n #Field 1: names\n names = [self.getName()]\n identifiers = self.getIdentifiers()\n if identifiers[0] is not None:\n names.append(identifiers[0])\n for i in range(1,4):\n if identifiers[i] is not None:\n names.extend(identifiers[i])\n line.append(\"|\".join(names))\n\n #Field 2: type designation\n objType = self.getType()\n if objType in (\"Galaxy Pair\", \"Galaxy Triplet\", \"Group of galaxies\"):\n line.append(\"f|A\")\n elif objType == \"Globular Cluster\":\n line.append(\"f|C\")\n elif objType == \"Double star\":\n line.append(\"f|D\")\n elif objType in (\"HII Ionized region\", \"Nebula\"):\n line.append(\"f|F\")\n elif objType == \"Galaxy\":\n if self.getHubble().startswith(\"S\"):\n line.append(\"f|G\")\n else:\n line.append(\"f|H\")\n elif objType == \"Dark Nebula\":\n line.append(\"f|K\")\n elif objType in (\"Emission Nebula\", \"Reflection Nebula\"):\n line.append(\"f|N\")\n elif objType in (\"Association of stars\", \"Open Cluster\"):\n line.append(\"f|O\")\n elif objType == \"Planetary Nebula\":\n line.append(\"f|P\")\n elif objType == \"Supernova remnant\":\n line.append(\"f|R\")\n elif objType == \"Star\":\n line.append(\"f|S\")\n elif objType == \"Star cluster + Nebula\":\n line.append(\"f|U\")\n else:\n line.append(\"f\")\n\n #Field 3: Right Ascension\n line.append(self.getRA())\n\n #Field 4: Declination\n line.append(self.getDec())\n\n #Field 5: Magnitude\n #We use the first available magnitude in the sequence b,v,j,h,k\n for mag in self.getMagnitudes():\n if mag is not None:\n line.append(str(mag))\n break\n\n #Field 6: optional Epoch, we let it empty\n line.append(\"\")\n\n #Field 7: Dimensions\n dimensions = []\n #Xephem format wants axes espressed in arcsec, we have arcmin\n for value in (self.getDimensions()[0],self.getDimensions()[1]):\n if value is not None:\n dimensions.append(str(value*60))\n else:\n dimensions.append(\"\")\n if self.getDimensions()[2] is not None:\n dimensions.append(str(value))\n else:\n dimensions.append(\"\")\n line.append(\"|\".join(dimensions))\n\n return \",\".join(line)", "def writeSpec(self,dir=\"\"):\n for codestruct in self.codestructures:\n codestruct.writeSpec(dir)", "def WindingDesign(main):\n oEditor = main['ANSYS']['oEditor']\n\n # Slots number\n Slots = main['ANSYS']['FixedVariables']['Slots']\n\n # SlotType\n SlotType = main['ANSYS']['FixedVariables']['SlotType']\n\n # Geimetric parameters\n g = main['ANSYS']['DesignProperties']['Stator']['g']\n\n Hs0 = main['ANSYS']['DesignProperties']['Slot']['Hs0']\n Hs1 = main['ANSYS']['DesignProperties']['Slot']['Hs1']\n Hs2 = main['ANSYS']['DesignProperties']['Slot']['Hs2']\n Bs1 = main['ANSYS']['DesignProperties']['Slot']['Bs1']\n Bs2 = main['ANSYS']['DesignProperties']['Slot']['Bs2']\n\n DiaGap = main['ANSYS']['DesignProperties']['Rotor']['DiaGap']\n\n # Coils Arrange ABC\n PhasesABC = main['ANSYS']['Winding']['ABC']\n\n # Color used for phases\n Color = main['ANSYS']['Winding']['Color']\n\n oEditor.CreateUserDefinedPart(\n [\n \"NAME:UserDefinedPrimitiveParameters\",\n \"DllName:=\"\t\t, \"RMxprt/LapCoil.dll\",\n \"Version:=\"\t\t, \"16.0\",\n \"NoOfParameters:=\"\t, 22,\n \"Library:=\"\t\t, \"syslib\",\n [\n \"NAME:ParamVector\",\n [\n \"NAME:Pair\",\n \"Name:=\"\t\t, \"DiaGap\",\n \"Value:=\"\t\t, \"DiaGap+g*2\"\n ],\n [\n \"NAME:Pair\",\n \"Name:=\"\t\t, \"DiaYoke\",\n \"Value:=\"\t\t, \"DiaYoke\"\n ],\n [\n \"NAME:Pair\",\n \"Name:=\"\t\t, \"Length\",\n \"Value:=\"\t\t, \"0mm\"\n ],\n [\n \"NAME:Pair\",\n \"Name:=\"\t\t, \"Skew\",\n \"Value:=\"\t\t, \"0deg\"\n ],\n [\n \"NAME:Pair\",\n \"Name:=\"\t\t, \"Slots\",\n \"Value:=\"\t\t, str(int(Slots))\n ],\n [\n \"NAME:Pair\",\n \"Name:=\"\t\t, \"SlotType\",\n \"Value:=\"\t\t, str(int(SlotType))\n ],\n [\n \"NAME:Pair\",\n \"Name:=\"\t\t, \"Hs0\",\n \"Value:=\"\t\t, \"Hs0\"\n ],\n [\n \"NAME:Pair\",\n \"Name:=\"\t\t, \"Hs1\",\n \"Value:=\"\t\t, \"Hs1\"\n ],\n [\n \"NAME:Pair\",\n \"Name:=\"\t\t, \"Hs2\",\n \"Value:=\"\t\t, \"Hs2\"\n ],\n [\n \"NAME:Pair\",\n \"Name:=\"\t\t, \"Bs0\",\n \"Value:=\"\t\t, \"Bs0\"\n ],\n [\n \"NAME:Pair\",\n \"Name:=\"\t\t, \"Bs1\",\n \"Value:=\"\t\t, \"Bs1\"\n ],\n [\n \"NAME:Pair\",\n \"Name:=\"\t\t, \"Bs2\",\n \"Value:=\"\t\t, \"Bs2\"\n ],\n [\n \"NAME:Pair\",\n \"Name:=\"\t\t, \"Rs\",\n \"Value:=\"\t\t, \"Rs\"\n ],\n [\n \"NAME:Pair\",\n \"Name:=\"\t\t, \"FilletType\",\n \"Value:=\"\t\t, \"0\"\n ],\n [\n \"NAME:Pair\",\n \"Name:=\"\t\t, \"Layers\",\n \"Value:=\"\t\t, \"2\"\n ],\n [\n \"NAME:Pair\",\n \"Name:=\"\t\t, \"CoilPitch\",\n \"Value:=\"\t\t, \"1\"\n ],\n [\n \"NAME:Pair\",\n \"Name:=\"\t\t, \"EndExt\",\n \"Value:=\"\t\t, \"5mm\"\n ],\n [\n \"NAME:Pair\",\n \"Name:=\"\t\t, \"SpanExt\",\n \"Value:=\"\t\t, \"25mm\"\n ],\n [\n \"NAME:Pair\",\n \"Name:=\"\t\t, \"BendAngle\",\n \"Value:=\"\t\t, \"0deg\"\n ],\n [\n \"NAME:Pair\",\n \"Name:=\"\t\t, \"SegAngle\",\n \"Value:=\"\t\t, \"10deg\"\n ],\n [\n \"NAME:Pair\",\n \"Name:=\"\t\t, \"LenRegion\",\n \"Value:=\"\t\t, \"200mm\"\n ],\n [\n \"NAME:Pair\",\n \"Name:=\"\t\t, \"InfoCoil\",\n \"Value:=\"\t\t, \"0\"\n ]\n ]\n ],\n [\n \"NAME:Attributes\",\n \"Name:=\"\t\t, \"LapCoil1\",\n \"Flags:=\"\t\t, \"\",\n \"Color:=\"\t\t, \"(143 175 143)\",\n \"Transparency:=\"\t, 0,\n \"PartCoordinateSystem:=\", \"Global\",\n \"UDMId:=\"\t\t, \"\",\n \"MaterialValue:=\"\t, \"\\\"copper\\\"\",\n \"SurfaceMaterialValue:=\", \"\\\"\\\"\",\n \"SolveInside:=\"\t\t, True,\n \"ShellElement:=\"\t, False,\n \"ShellElementThickness:=\", \"0mm\",\n \"IsMaterialEditable:=\"\t, True,\n \"UseMaterialAppearance:=\", False,\n \"IsLightweight:=\"\t, False\n ]\n )\n\n # Body Separation\n oEditor.SeparateBody(\n [\n \"NAME:Selections\",\n \"Selections:=\"\t\t, \"LapCoil1\",\n \"NewPartsModelFlag:=\"\t, \"Model\"\n ],\n [\n \"CreateGroupsForNewObjects:=\", False\n ]\n )\n\n # Average Slot Width\n AverWidth = (Bs2 + Bs1)/2\n\n # Average Radius\n AverRadius = DiaGap/2 + g + Hs0 + Hs1 + Hs2*0.75\n\n # Angle to shift and find the kth tooth\n ShiftSlot = 1/Slots*np.pi\n\n # Angle to fond the corrent layer\n ShiftLayer = np.arctan(AverWidth/4/AverRadius)\n\n # List to save the coils sides names\n WindingNames = [[], [], []]\n\n # Phases name to employed\n PhaseNames = ['A', 'B', 'C']\n\n for phase, row in enumerate(PhasesABC):\n\n PhaseName = [[], []]\n\n for coil, slot in enumerate(row):\n\n SlotAngle = np.abs(slot)/Slots*2*np.pi - ShiftSlot\n\n if coil % 2 == 1:\n SlotAngle = SlotAngle - ShiftLayer\n\n else:\n SlotAngle = SlotAngle + ShiftLayer\n\n x = np.cos(SlotAngle)*AverRadius\n y = np.sin(SlotAngle)*AverRadius\n\n Name0 = oEditor.GetBodyNamesByPosition(\n [\n \"NAME:Parameters\",\n \"XPosition:=\", str(x)+\"mm\",\n \"YPosition:=\", str(y)+\"mm\",\n \"ZPosition:=\", \"0mm\"\n ]\n )\n\n C = Color[phase]\n\n if np.sign(slot) == 1:\n\n CoilSideName = PhaseNames[phase]+\"In\"+str(np.abs(coil))\n\n PhaseName[0] += [CoilSideName]\n\n oEditor.ChangeProperty(\n [\n \"NAME:AllTabs\",\n [\n \"NAME:Geometry3DAttributeTab\",\n [\n \"NAME:PropServers\",\n Name0[0]\n ],\n [\n \"NAME:ChangedProps\",\n [\n \"NAME:Name\",\n \"Value:=\"\t\t,\n CoilSideName\n ],\n [\n \"NAME:Color\",\n \"R:=\"\t\t\t, C[0],\n \"G:=\"\t\t\t, C[1],\n \"B:=\"\t\t\t, C[2]\n ],\n\n ]\n ]\n ]\n )\n else:\n\n CoilSideName = PhaseNames[phase]+\"Out\"+str(np.abs(coil))\n\n PhaseName[1] += [CoilSideName]\n\n oEditor.ChangeProperty(\n [\n \"NAME:AllTabs\",\n [\n \"NAME:Geometry3DAttributeTab\",\n [\n \"NAME:PropServers\",\n Name0[0]\n ],\n [\n \"NAME:ChangedProps\",\n [\n \"NAME:Name\",\n \"Value:=\"\t\t,\n CoilSideName\n ],\n [\n \"NAME:Color\",\n \"R:=\"\t\t\t, C[0],\n \"G:=\"\t\t\t, C[1],\n \"B:=\"\t\t\t, C[2],\n ],\n\n ]\n ]\n ]\n )\n\n WindingNames[phase] += PhaseName\n\n main['ANSYS']['Winding']['CoilNames'] = WindingNames\n\n return main", "def save(self):\n if os.path.isfile(self.filename): os.remove(self.filename)\n fits.HDUList([self.primary_hdu, self.energs_hdu, self.params_hdu, self.spectra_hdu]).writeto(self.filename)", "def write_file(self, f=None):\n # get model information\n nlay = self.parent.nlay\n dis = self.parent.get_package(\"DIS\")\n if dis is None:\n dis = self.parent.get_package(\"DISU\")\n\n # Open file for writing\n if f is None:\n f_obj = open(self.fn_path, \"w\")\n\n # Item 1: ipakcb, HDRY, IWDFLG, WETFCT, IWETIT, IHDWET, IKVFLAG, IKCFLAG\n f_obj.write(\n f\" {self.ipakcb:9d} {self.hdry:9.3G} {self.iwdflg:9d}\"\n f\" {self.wetfct:9.3G} {self.iwetit:9d} {self.ihdwet:9d}\"\n f\" {self.ikvflag:9d} {self.ikcflag:9d}\\n\"\n )\n\n # LAYCON array\n for layer in range(nlay):\n if self.intercellt[layer] > 0:\n f_obj.write(\n f\"{self.intercellt[layer]:1d} {self.laycon[layer]:1d} \"\n )\n else:\n f_obj.write(f\"0{self.laycon[layer]:1d} \")\n f_obj.write(\"\\n\")\n\n # TRPY, <ANGLEX>\n f_obj.write(self.trpy.get_file_entry())\n transient = not dis.steady.all()\n structured = self.parent.structured\n anis = any(t != 1 for t in self.trpy)\n if (not structured) and anis:\n f_obj.write(self.anglex.get_file_entry())\n\n # <SF1>, <TRAN>, <HY>, <VCONT>, <KV>, <SF2>, <WETDRY>\n for layer in range(nlay):\n if transient:\n f_obj.write(self.sf1[layer].get_file_entry())\n\n if self.ikcflag == 0:\n self._write_hy_tran_vcont_kv(f_obj, layer)\n\n if transient and (self.laycon[layer] in [2, 3, 4]):\n f_obj.write(self.sf2[layer].get_file_entry())\n\n if (self.iwdflg != 0) and (self.laycon[layer] in [1, 3]):\n f_obj.write(self.wetdry[layer].get_file_entry())\n\n # <KSAT> (if ikcflag==1)\n if abs(self.ikcflag == 1):\n f_obj.write(self.ksat.get_file_entry())\n\n f_obj.close()", "def to_figure(self, structure):\n if not self.bypass:\n if self.format is \"show\":\n plt.show()\n elif self.format is \"png\":\n plt.savefig(self.path + self.filename + \".png\", bbox_inches=\"tight\")\n elif self.format is \"fits\":\n if structure is not None:\n structure.output_to_fits(\n file_path=self.path + self.filename + \".fits\", overwrite=True\n )", "def save_to_poscar(self, filename,direct=False,species_line=False): \n with open( filename, 'w' ) as F:\n F.write( self.name )\n F.write( \" 1.0\\n\" )\n F.write( mat2str( self.unit_cell, \"%16.10f\" ) )\n if species_line:\n pos = 0\n for n in self.num_per_type: \n F.write('%s '%self.species[pos])\n pos += n\n F.write('\\n')\n F.write(' '.join([str(n) for n in self.num_per_type]) )\n F.write('\\n')\n if not direct:\n F.write(\"Cart\\n\")\n F.write( mat2str( self.atoms, \"%16.10f\" ) )\n else:\n F.write(\"Direct\\n\")\n F.write( mat2str( dot(self.atoms,self.recip_cell), \"%16.10f\" ) )", "def save(self, filename, format = \"text\"):\n #\n # preparation of data\n #\n Data = []\n Data.append(['Name','Spectrum','Select','MDV','Std'])\n for fragment in self.mdv.keys():\n for isotope_number in self.mdv[fragment].keys():\n if self.mdv[fragment][isotope_number]['use'] == 'use':\n use_or_no = 1\n else:\n use_or_no = 0\n Data.append([fragment,\n str(isotope_number),\n str(use_or_no),\n str(self.mdv[fragment][isotope_number]['ratio']),\n str(self.mdv[fragment][isotope_number]['std'])])\n try:\n with open(filename, 'w', newline='') as f:\n import csv\n if format == \"text\":\n writer = csv.writer(f, delimiter='\\t')\n writer.writerows(Data)\n if format == \"csv\":\n writer = csv.writer(f, dialect='excel')\n writer.writerows(Data)\n except:\n return False\n\n return True", "def generate(self):\n self._open_file()\n # copied from GenerateCSPEC.py\n self._write_header_and_defaults()\n self._write_source()\n self._write_sample()\n\n self._write_all_components()\n self._write_mantle_module()\n self._write_segment()\n self._write_all_ids()\n self._write_footer()\n self._close_file()", "def draw_design(self, dxfversion=None):\n\n if self.file == None:\n raise Exception(\"No file name given. Use design.file to set name.\")\n \n if dxfversion is None:\n self.drawing = ezdxf.new()\n else:\n self.drawing = ezdxf.new(dxfversion=dxfversion)\n self.msp = self.drawing.modelspace()\n \n for x in self.layers:\n self.drawing.layers.add(self.layers[x]['name'], color=self.layers[x]['color'])\n\n for x in self.features:\n self.add_polyline(self.layers[self.features[x].layer],self.features[x].coord,\n self.features[x].open)\n \n self.drawing.saveas(self.file)", "def output(self):\n to_write = 'C '\n \"\"\"\n print self.def_field\n for key in self.def_field:\n print key,\"=\", self.def_field[key]\n \"\"\"\n to_write += str(self.offset[0] + self.def_field['x'])+' '\n to_write += str(self.offset[1] + self.def_field['y'])+' '\n to_write += str(self.def_field['radius'])+' '\n to_write += str(self.def_field['part'])+' '\n to_write += str(self.def_field['dmg'])+' '\n to_write += str(self.def_field['pen'])+' '\n to_write += self.def_field['fill']+'\\n'\n return to_write", "def setMyDesign(self, designObject):\n self.myDesign = designObject\n self.designID = designObject.id\n self.componentdata = designObject.componentdata\n self.weapondata = designObject.weapondata\n self.myShipHull = designObject.myShipHull\n designAttrDict = designObject.getMyInfoAsDict()\n self.currentISP = self.myShipHull.maxISP\n self.currentPower = designAttrDict['maxPower']\n self.maxBattery = designAttrDict['maxBattery']\n self.thrust = designAttrDict['thrust']\n self.rotation = designAttrDict['rotation']\n self.radar = designAttrDict['radar']\n self.jamming = designAttrDict['jamming']\n self.repair = designAttrDict['repair']\n self.maxAssault = designAttrDict['maxAssault']\n self.mass = self.myShipHull.mass\n\n for position, dQuad in designObject.quads.iteritems():\n newQuad = quad.Quad(dQuad.getMyInfoAsDict())\n newQuad.setMyParent(self)\n # weapons have to be created first\n for id, weap in dQuad.weapons.iteritems():\n newWeap = weapon.Weapon(weap.getMyInfoAsDict())\n newWeap.setMyQuad(newQuad)\n for id, comp in dQuad.components.iteritems():\n newComp = component.Component(comp.getMyInfoAsDict())\n newComp.setMyQuad(newQuad)\n newQuad.setMyStatus()\n self.quads[newQuad.position] = newQuad\n newQuad.resetDefences()\n newQuad.reloadAmmo()\n\n self.name = designObject.name + '-' + self.id\n self.setMyStrength()\n if 'AS' in self.myShipHull.abr:\n self.isAssault = 1\n else:\n self.isAssault = 0" ]
[ "0.6008693", "0.5994822", "0.59387493", "0.5817487", "0.5813526", "0.58061814", "0.5734599", "0.5709072", "0.56894326", "0.56894326", "0.5635465", "0.56147325", "0.56106025", "0.5595291", "0.55891746", "0.55859977", "0.5578043", "0.55598426", "0.55499566", "0.55450016", "0.5532548", "0.55310994", "0.5530842", "0.55182815", "0.54954016", "0.5492114", "0.54908884", "0.5487201", "0.54865825", "0.5481765" ]
0.6873917
0
Convert a pin into an outline
def _convert_pin_to_outline(self, pin): pcbshape = specctraobj.Path() pcbshape.layer_id = 'Front' pcbshape.aperture_width = self._from_pixels(1) pcbshape.vertex.append(self._from_pixels((pin.p1.x, pin.p1.y))) pcbshape.vertex.append(self._from_pixels((pin.p2.x, pin.p2.y))) outline = specctraobj.Outline() outline.shape = pcbshape return outline
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw_pin(self, pin, xform):\n # TODO special pin characteristics (inverted, clock)?\n line = [xform.chain(p) for p in (pin.p1, pin.p2)]\n self.canvas.line([(p.x, p.y) for p in line],\n fill=self.options.style['part'])", "def draw_pins():\n\n pass", "def add_outline(component, layer=LAYER.DEVREC):\n c = component\n points = [\n [c.xmin, c.ymin],\n [c.xmax, c.ymin],\n [c.xmax, c.ymax],\n [c.xmin, c.ymax],\n ]\n c.add_polygon(points, layer=layer)", "def outline(color=\"white\", linewidth=3, **kwargs):\n return [pe.withStroke(linewidth=linewidth, foreground=color, **kwargs)]", "def convert_track_to_shape_pin(self, track):\n # to scale coordinates to tracks\n x = track[0]*self.track_width - 0.5*self.track_width\n y = track[1]*self.track_width - 0.5*self.track_width\n # offset lowest corner object to to (-track halo,-track halo)\n ll = snap_to_grid(vector(x,y))\n ur = snap_to_grid(ll + vector(self.track_width,self.track_width))\n\n p = pin_layout(\"\", [ll, ur], self.get_layer(track[2]))\n return p", "def outline_geoids(sf, df, geoids, include_labels=True):\n# df = read_shapefile(sf)\n# df['tract_geoid'] = df.GEOID.str[:11]\n bg_id = []\n for i in geoids:\n bg_id.append(df[df.GEOID==i].index[0])\n\n itr = 0\n for shape in sf.shapeRecords():\n if itr in bg_id:\n x = [i[0] for i in shape.shape.points[:]]\n y = [i[1] for i in shape.shape.points[:]]\n plt.plot(x, y, 'k')\n \n \n if include_labels:\n x0 = np.mean(x)\n y0 = np.mean(y)\n label = df.iloc[itr].density_label\n\n plt.text(x0, y0, label, fontsize=8)\n \n itr = itr+1", "def createCornerPin():\n i = b.createNode('CornerPinMI')\n i['tile_color'].setValue(int('%02x%02x%02x%02x' % (232.05, 145.095, 0, 255), 16))\n if cc:\n i = gU(i)\n return i", "def annotate_pin_and_tracks(self, pin, tracks):\n debug.info(0,\"Annotating\\n pin {0}\\n tracks {1}\".format(pin,tracks))\n for coord in tracks:\n (ll,ur) = self.convert_track_to_shape(coord)\n self.cell.add_rect(layer=\"text\",\n offset=ll,\n width=ur[0]-ll[0],\n height=ur[1]-ll[1])\n (ll,ur) = self.convert_track_to_pin(coord).rect\n self.cell.add_rect(layer=\"boundary\",\n offset=ll,\n width=ur[0]-ll[0],\n height=ur[1]-ll[1])\n (ll,ur) = pin.rect\n self.cell.add_rect(layer=\"text\",\n offset=ll,\n width=ur[0]-ll[0],\n height=ur[1]-ll[1])", "def decode_outline_line(blob_info, index):\n return decode_outline(\n blob_info['contour_start'][index],\n blob_info['contour_encode_len'][index],\n blob_info['contour_encoded'][index]\n )", "def _draw_outline(self) -> None:\n stroke = self.border_thickness\n\n # draw outline rectangle\n for _w in range(self.widget_width):\n for line in range(stroke):\n self._bitmap[_w, line] = 1\n self._bitmap[_w, self.widget_height - 1 - line] = 1\n for _h in range(self.widget_height):\n for line in range(stroke):\n self._bitmap[line, _h] = 1\n self._bitmap[self.widget_width - 1 - line, _h] = 1", "def generate_outlines(self):\n morphed_atlas = bio.load_nii(self.registered_atlas_img_path, as_array=False)\n atlas_scale = morphed_atlas.header.get_zooms()\n morphed_atlas = morphed_atlas.get_data()\n boundaries_mask = sk_segmentation.find_boundaries(morphed_atlas, mode='inner')\n boundaries = morphed_atlas * boundaries_mask\n bio.to_nii(boundaries, self.outlines_file_path, scale=atlas_scale)", "def compute_pin_enclosure(self, ll, ur, zindex, name=\"\"):\n layer = self.get_layer(zindex)\n \n # This finds the pin shape enclosed by the track with DRC spacing on the sides\n pin = self.convert_track_to_pin(ll)\n (abs_ll,unused) = pin.rect\n pin = self.convert_track_to_pin(ur)\n (unused,abs_ur) = pin.rect\n \n pin = pin_layout(name, [abs_ll, abs_ur], layer)\n \n return pin", "def create_mark(xa,ya,col):\n disque = canvas.create_oval(xa-2,ya-2,xa+2,ya+2,fill=col,outline=col)\n return disque", "def draw_pin_highlight(self, img, color, pin_number):\n bounds = self._slot_bounds[pin_number - 1]\n img.draw_circle(bounds, color, thickness=int(bounds.radius() * 0.2))\n img.draw_text(str(pin_number), bounds.center(), color, centered=True)", "def convert_track_to_pin(self, track):\n # calculate lower left \n x = track.x*self.track_width - 0.5*self.track_width + 0.5*self.track_space\n y = track.y*self.track_width - 0.5*self.track_width + 0.5*self.track_space\n ll = snap_to_grid(vector(x,y))\n \n # calculate upper right\n x = track.x*self.track_width + 0.5*self.track_width - 0.5*self.track_space\n y = track.y*self.track_width + 0.5*self.track_width - 0.5*self.track_space\n ur = snap_to_grid(vector(x,y))\n\n p = pin_layout(\"\", [ll, ur], self.get_layer(track[2]))\n return p", "def outline_to_mask(line, x, y):\n mpath = mplp.Path(line)\n X, Y = np.meshgrid(x, y)\n points = np.array((X.flatten(), Y.flatten())).T\n mask = mpath.contains_points(points).reshape(X.shape)\n return mask", "def draw_parabola_outline(start_x, start_y, end_x, height, color,\n border_width=1, tilt_angle=0):\n center_x = (start_x + end_x) / 2\n center_y = start_y + height\n start_angle = 0\n end_angle = 180\n width = (start_x - end_x)\n draw_arc_outline(center_x, center_y, width, height, color,\n start_angle, end_angle, border_width, tilt_angle)", "def GetPolyline(polyline):\r\n pass", "def draw_pins(self, data):\n # Here we set defaults (with 'or' keyword ...)\n ax = self.ax\n plot_pins = self.plot_pins\n plot_pins_values = self.plot_pins_values\n #plot_pins_method = self.plot_pins_method or \"highlight\"\n plot_pins_colors = self.plot_pins_colors\n\n # Here we do checks and stop drawing pins if something is unset\n if ax is None: return\n if plot_pins is None: return\n \n verbose=self.verbose\n\n no_of_pins = len(self.plot_pins)\n\n if self.plot_pins_method == \"highlight\":\n\n for pin_idx in range(no_of_pins): # For every pin number (0,1,2,3)\n\n if plot_pins[pin_idx] == True: # If we want them plotted\n \n hold_times = self.hold_times_obj.identify_hold_times(pin_idx, plot_pins_values[pin_idx], data.gpio)\n\n if hold_times is not None:\n for ht in hold_times:\n axvsp = ax.axvspan(ht[0], ht[1], color=plot_pins_colors[pin_idx], alpha=0.25)\n self.axvspans[pin_idx].append(axvsp)\n\n x_halfway = (ht[1] - ht[0]) / 4 + ht[0]\n y_halfway = (self.plot_ymax - self.plot_ymin) / 2 + self.plot_ymin\n annon = ax.annotate(str(self.iterations[pin_idx] + 1), xy=(x_halfway, y_halfway))\n self.annotations[pin_idx].append(annon)\n \n self.iterations[pin_idx] += 1\n\n # TODO: The start and stop indexes of the data points that are area of interest\n # might be more useful for an averaging function, but currently the plot uses\n # the coordinates of the X axis(the start/stop timestamps) in order to highlight\n # the areas of interest.\n self.preprocessed_averages_data[pin_idx].append((self.iterations[pin_idx], ht, 0, None))\n \n # This should be in update_plot()\n self.ax.set_title(\n f\"Logging. Collected {len(data.power)} power samples and {len(data.gpio)} gpio samples.\")\n\n elif self.plot_pins_method == \"line\":\n extend_gpio = data.gpio.timestamps[-1] < data.power.timestamps[-1]\n for pin, plot_pin in enumerate(self.plot_pins):\n if plot_pin:\n self.ln_pins[pin].set_xdata(\n data.gpio.timestamps + extend_gpio * [data.power.timestamps[-1]])\n self.ln_pins[pin].set_ydata(\n data.gpio.get_select_in_value(pin) + extend_gpio * [data.gpio.values[-1][pin]])\n self.ax.set_title(f\"Logging. Collected {len(data.power)} power samples and {len(data.gpio)} gpio samples.\")\n self.fig.show()\n else:\n raise ValueError(f\"Unrecognized plot_pins_method: {self.plot_pins_method}\")", "def add_pin_square_inside(\n component, port, port_length=0.1, layer=LAYER.PORT, label_layer=LAYER.TEXT\n):\n p = port\n a = p.orientation\n ca = np.cos(a * np.pi / 180)\n sa = np.sin(a * np.pi / 180)\n rot_mat = np.array([[ca, -sa], [sa, ca]])\n\n d = p.width / 2\n dx = port_length\n\n dbot = np.array([0, -d])\n dtop = np.array([0, d])\n dbotin = np.array([-dx, -d])\n dtopin = np.array([-dx, +d])\n\n p0 = p.position + _rotate(dbot, rot_mat)\n p1 = p.position + _rotate(dtop, rot_mat)\n ptopin = p.position + _rotate(dtopin, rot_mat)\n pbotin = p.position + _rotate(dbotin, rot_mat)\n polygon = [p0, p1, ptopin, pbotin]\n component.add_polygon(polygon, layer=layer)", "def convert_track_to_inflated_pin(self, track):\n # calculate lower left \n x = track.x*self.track_width - 0.5*self.track_width - 0.5*self.track_space\n y = track.y*self.track_width - 0.5*self.track_width - 0.5*self.track_space\n ll = snap_to_grid(vector(x,y))\n \n # calculate upper right\n x = track.x*self.track_width + 0.5*self.track_width + 0.5*self.track_space\n y = track.y*self.track_width + 0.5*self.track_width + 0.5*self.track_space\n ur = snap_to_grid(vector(x,y))\n\n p = pin_layout(\"\", [ll, ur], self.get_layer(track[2]))\n return p", "def route_vertical_side_pin(self, name, side, offset_multiple=1):\n if side == \"left\":\n bot_loc = vector(-offset_multiple * self.vertical_pitch, 0)\n top_loc = vector(-offset_multiple * self.vertical_pitch, self.height)\n elif side == \"right\":\n bot_loc = vector(self.width + offset_multiple * self.vertical_pitch, 0)\n top_loc = vector(self.width + offset_multiple * self.vertical_pitch, self.height)\n\n layer = self.supply_stack[2]\n top_via = contact(layer_stack=self.supply_stack,\n directions=(\"H\", \"H\"))\n\n\n# self.add_layout_pin_rect_ends(text=name,\n# layer=layer,\n# start=bot_loc,\n# end=top_loc)\n self.add_layout_pin_segment_center(text=name,\n layer=layer,\n start=bot_loc,\n end=top_loc,\n width=top_via.second_layer_width)\n\n return (bot_loc, top_loc)", "def aline(p, width, dash, grayamount):\r\n if grayamount > 0:\r\n w(\"%f setgray\" %grayamount)\r\n ap = []\r\n for i in range(len(p)):\r\n ap.append(apoint(p[i]))\r\n if dash > 0:\r\n w(\"[%d %d] 0 setdash\" % (dash,dash))\r\n\r\n w(\"%d %d moveto\" % (ap[0][0],ap[0][1]))\r\n for j in range(1,len(p)):\r\n w(\"%d %d lineto\" % (ap[j][0],ap[j][1]))\r\n width*= gv[\"globalscale\"]\r\n w(\"%f setlinewidth\" % width)\r\n w(\"stroke\")\r\n w(\"[ ] 0 setdash\")\r\n if grayamount > 0:\r\n w(\"0 setgray\")", "def decorate_scene():\n make_polygon( (100,100),(120,140),(270,70) )\n make_polygon( (300,10), (300,550), (340,452),(380,300), (330,50))\n make_polygon( (200,450), (100,450), (100,500), (200,500) )\n make_polygon( (130,320), (150,300), (140,280) )\n return", "def house ():\n\n poly (3,300,\"red\")\n penup()\n setposition(0,-300)\n pendown()\n poly (4,300,\"brown\")\n penup()\n setposition(100,-300)\n pendown()\n poly(4,100,\"green\") \n\n return None", "def polyline(out, p, color):\n\n points = \" \".join(\"%g,%g\" % (v.x, v.y) for v in p)\n out.write(' <polyline fill=\"none\" stroke=\"%s\" stroke-width=\"1\" points=\"%s\"/>\\n' %\n (color, points))", "def add_pin(x, y):\n\n pass", "def draw_equitriangle(t,sz):\r\n\r\n\tdraw_poly(t, 3, sz)", "def annotate(self, ax):\n annotation = ax.annotate(self.template, xy=(0, 0), ha='right',\n xytext=self.offsets, textcoords='offset points', va='bottom',\n bbox=dict(boxstyle='round,pad=0.5', fc='yellow', alpha=0.5),\n arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0')\n )\n annotation.set_visible(False)\n return annotation", "def annotate(self, ax):\n annotation = ax.annotate(self.template, xy=(0, 0), ha='right',\n xytext=self.offsets, textcoords='offset points', va='bottom',\n bbox=dict(boxstyle='round,pad=0.5', fc='yellow', alpha=0.5),\n arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0')\n )\n annotation.set_visible(False)\n return annotation" ]
[ "0.6245729", "0.6115153", "0.5688149", "0.54557323", "0.5455196", "0.5401273", "0.53704077", "0.5337081", "0.53153765", "0.53109276", "0.52516943", "0.5228548", "0.5222575", "0.51996744", "0.5138629", "0.5120291", "0.5064127", "0.5062142", "0.5055162", "0.504735", "0.5032255", "0.49867585", "0.4973086", "0.4971022", "0.49680898", "0.49575353", "0.4932671", "0.4887238", "0.48701644", "0.48701644" ]
0.84876585
0
Convert points to paths
def _points_to_paths(self, points): prev = points[0] result = [] for point in points[1:]: path = specctraobj.Path() path.aperture_width = self._from_pixels(1) path.vertex.append(prev) path.vertex.append(point) result.append(path) prev = point return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def full_path_to_points(path):\n\n points_x = [path[0][0]]\n points_y = [path[1][0]]\n\n new_path = path\n prev_turn, new_path = path_to_command_thymio(new_path)\n\n for i in range(len(new_path[0]) - 1):\n\n new_turn, new_path = path_to_command_thymio(new_path)\n\n if new_turn != prev_turn:\n points_x.append(path[0][i + 1])\n points_y.append(path[1][i + 1])\n\n prev_turn = new_turn\n\n points_x.append(path[0][-1])\n points_y.append(path[1][-1])\n points = [points_x, points_y]\n\n return points", "def path_convert(self):\n pub_path = Exp_msg()\n for i in self.path:\n epoint = Cordi()\n (epoint.x, epoint.y) = i\n pub_path.bliss.append(epoint)\n return(pub_path)", "def _generate_subpaths(self):\n\n scale = self.SCALE\n\n for point in self._points:\n x_base = point[0] * scale + self.border * scale + self.line_size\n y_base = point[1] * scale + self.border * scale + self.line_size\n\n yield 'M {x0} {y0} L {x0} {y1} L {x1} {y1} L {x1} {y0} z'.format(\n x0=x_base,\n y0=y_base,\n x1=x_base + scale,\n y1=y_base + scale\n )", "def _build_path(self):\n for point_3d in self.path_coordinates:\n self.connect_point_with_neighbors(point_3d)", "def convert_paths(self):\n # convert to node sequences, dropping s'\n self.nodeseq_paths = []\n for path in self.paths:\n node_seq = [] # don't include s'\n for arc in path:\n node_seq.append(self.arc_info[arc]['destin'])\n self.nodeseq_paths.append(node_seq)\n # convert to og graph\n self.converted_paths = []\n for path in self.nodeseq_paths:\n this_path = []\n add_next_node = True\n for i in range(len(path) - 1):\n print(\"This path is\", this_path)\n node1 = path[i]\n node2 = path[i + 1]\n print(\"node1={}, node2={}\".format(node1, node2))\n if (node1, node2) in self.mapping:\n sc = self.mapping[(node1, node2)]\n print(\"uses sc edge for {}\".format(sc))\n print(\"should add {}, but also need to check for overlaps\".\n format(sc[1:-1]))\n if sc[1] in this_path:\n # we have an overlap\n start = len(this_path) - this_path.index(sc[1])\n this_path.extend(sc[start:-1])\n else:\n this_path.extend(sc[1:-1])\n add_next_node = False # next node is second of sc edge\n elif add_next_node:\n this_path.append(node1)\n else:\n add_next_node = True\n this_path.append(path[-1])\n self.converted_paths.append(this_path)", "def convert_points(pointsIN,epsgIN,epsgOUT):\n \n if(epsgIN != epsgOUT):\n \n coords_in = osr.SpatialReference()\n coords_in.ImportFromEPSG(epsgIN)\n coords_out = osr.SpatialReference() \n coords_out.ImportFromEPSG(epsgOUT) \n numPts = len(pointsIN)\n dimension = len(pointsIN[0])\n pointsOUT = []\n n=0\n while n<numPts:\n point = ogr.Geometry(type=ogr.wkbPoint)\n point.SetPoint(0, float(pointsIN[n][0]), float(pointsIN[n][1]))\n point.AssignSpatialReference(coords_in)\n point.TransformTo(coords_out)\n if dimension < 3:\n pointsOUT.append([float(point.GetX()),float(point.GetY())])\n else:\n pointsOUT.append([float(point.GetX()),float(point.GetY()),float(pointsIN[n][2])])\n \n n+=1\n \n return pointsOUT\n \n else:\n return pointsIN", "def to_list(self):\n path = []\n for point in self.points:\n path.append(point.to_dict())\n\n return path", "def array_to_path(np_array):\n path = Path()\n\n for point in np_array:\n path_point = PathPoint()\n path_point.point = Point(point[0,0], point[0,1], point[0,2])\n path_point.orientation = Vector3(point[1,0], point[1,1], point[1,2])\n\n path.points.append(path_point)\n\n return path", "def segments_svg_path(self):\n verts = self.vertices.split(',') # leave as string\n segs = [int(v) for v in self.segments.split(',')]\n data = []\n for i in xrange(0, len(segs), 2):\n v0 = 2 * segs[i]\n v1 = 2 * segs[i + 1]\n data.append(u\"M%s,%sL%s,%s\" % (\n verts[v0], verts[v0 + 1],\n verts[v1], verts[v1 + 1],\n ))\n return u\"\".join(data)", "def polygon2pathd(polyline_d):\n return polyline2pathd(polyline_d, True)", "def calculate_paths(shape: Tuple[int, int], point: Tuple[int, int]) -> int:\n\tn, m = map(int, input().split())\n\tf = [[0] * (m+1) for i in range(n+1)]\n\tf[1][1] = 1\n\tfor i in range(2, n+1):\n\t\tfor j in range(2, m + 1):\n\t\t\tf[i][j] = f[i-2][j-2] + f[i-2][j-1]\n\treturn n + m", "def get_markovian_path(points):\n\n def dist(x, y):\n return math.hypot(y[0] - x[0], y[1] - x[1])\n\n paths = [p for p in it.permutations(points)]\n path_distances = [\n sum(map(lambda x: dist(x[0], x[1]), zip(p[:-1], p[1:])))\n for p in paths]\n min_index = np.argmin(path_distances)\n\n return paths[min_index]", "def paths(p, q):\n if (p, q) == (0, 0):\n return [((0, 0),)]\n answer = list()\n if p > 0:\n west = paths(p - 1, q)\n for path in west:\n answer.append(path + ((p, q),))\n if q > 0:\n south = paths(p, q - 1)\n for path in south:\n answer.append(path + ((p, q),))\n return answer", "def extract_paths(measures: List['UserMeasure']) -> List['GwPoint']:\n\n path: List['GwPoint'] = []\n measures = sorted(measures, key = lambda k: k.timestamp)\n (src, dest) = find_endpoints(measures)\n dest_index = 0\n while 'D' not in (src, dest): # Loop until the end of the file is reached\n for m in measures[dest_index:]:\n dest_index += 1\n if m.zone == dest:\n break\n src_index = dest_index\n for m in reversed(measures[:dest_index]):\n src_index -= 1\n if m.zone == src:\n break\n dag = to_DAG(measures[src_index:dest_index])\n for d in dag.list:\n path.append(GwPoint(\n d.id,\n d.lac,\n d.find_gw_match().azimuth,\n d.find_gw_match().longitude,\n d.find_gw_match().latitude,\n d.zone,\n d.timestamp\n ))\n src_index = dest_index\n (src, dest) = find_endpoints(measures[src_index:])\n return path", "def Reconstruct_Path(self, dir_map):\n path = ''\n first_time = True\n x = self.xFinish\n y = self.yFinish\n while not (x == self.xStart and y == self.yStart):\n j = dir_map[y][x]\n c = str((self.num_directions-j-1) % self.num_directions)\n if first_time:\n path=c+path\n first_time=False\n else:\n path = c + ','+ path\n x += self.dx[j]\n y += self.dy[j]\n return path", "def node2path(node, lowx, lowy, highx, highy, polygons, lines, points):\n if node.items:\n ll = lowx, lowy\n lr = highx, lowy\n ur = highx, highy\n ul = lowx, highy\n polygons.append((ll, lr, ur, ul))\n for pt in node.items:\n points.append(pt)\n return\n else:\n if (node.cutdim % 2 == 0):\n items.append( ((node.cutval, lowy), (node.cutval, highy)) )\n node2path(node.left, lowx, lowy, node.cutval, highy, items, points)\n node2path(node.right, node.cutval, lowy, highx, highy, items, points)\n else:\n items.append((( lowx, node.cutval),( highx, node.cutval)))\n node2path(node.left, lowx, lowy, highx, node.cutval, items, points)\n node2path(node.right, lowx, node.cutval, highx, highy, items, points)\n return", "def drawPaths(points, lines, height, lineWidth, pointRadius):\r\n\r\n\tlineArraySize = len(lines)\r\n\tpointArraySize = len(points)\r\n\tlineArrayItems = lineArraySize / 4\r\n\tpointArrayItems = pointArraySize / 2\r\n\r\n\r\n\tglLineWidth(lineWidth)\r\n\tglPointSize(pointRadius)\r\n\r\n\tglColor4f(0.0, 0.0, 1.0, 1.0)\r\n\tglNormal3f(0.0, 0.0, 1.0)\r\n\r\n\tglDisable(GL_TEXTURE_2D)\r\n\r\n\tglBegin(GL_LINES)\r\n\r\n#\tglLoadIdentity()\r\n\r\n\tfor i in range(lineArrayItems):\r\n\t\tglVertex3f(lines[i * 4], height - lines[i * 4 + 1], 0.1)\r\n\t\tglVertex3f(lines[i * 4 + 2], height - lines[i * 4 + 3], 0.1)\r\n\r\n\tglEnd()\r\n\r\n\tglBegin(GL_POINTS)\r\n\r\n#\tglLoadIdentity()\r\n\r\n\tfor i in range(pointArrayItems):\r\n\t\tglVertex3f(points[i * 2], height - points[i * 2 + 1], 0.11)\r\n\r\n\tglEnd()\r\n\r\n\tglEnable(GL_TEXTURE_2D)", "def polyline2pathd(polyline_d):\n points = polyline_d.replace(', ', ',')\n points = points.replace(' ,', ',')\n points = points.split()\n\n closed = points[0] == points[-1]\n\n d = 'M' + points.pop(0).replace(',', ' ')\n for p in points:\n d += 'L' + p.replace(',', ' ')\n if closed:\n d += 'z'\n return d", "def decompose_paths(self):\n if self.child_nodes == {}:\n return []\n\n import numpy as np\n\n def decompose_paths_rec(node_inner, path):\n \"\"\"\n This function does the recursive create_path of the decomposition\n :param node_inner:\n :param path:\n \"\"\"\n if node_inner.is_leaf():\n path = np.append(path, str(node_inner.value))\n return path[None]\n else:\n paths = np.array([])\n for edge_name in node_inner.child_nodes:\n new_path = np.append(path, str(edge_name))\n paths = np.append(paths, decompose_paths_rec(node_inner.child_nodes[edge_name], new_path))\n return paths\n\n decomposition = decompose_paths_rec(self, np.array([]))\n return decomposition.reshape((decomposition.shape[0]/(self.d+1), self.d+1))", "def path_to_poses(mapdata, path):\n p_array = []\n rospy.loginfo(\"Converting path to poses.\")\n last_ori = quaternion_from_euler(0, 0, 0)\n last_ori = Quaternion(last_ori[0], last_ori[1],last_ori[2],last_ori[3])\n for i in range(len(path) - 1):\n msg = PoseStamped()\n msg.pose.position = PathPlanner.grid_to_world(mapdata, path[i][0], path[i][1])\n last_ori = quaternion_from_euler(0, 0, PathPlanner.get_orientation(path[i], path[i+1]))\n last_ori = Quaternion(last_ori[0], last_ori[1],last_ori[2],last_ori[3])\n msg.pose.orientation = last_ori\n p_array.append(msg)\n\n last = PoseStamped()\n last.pose.position = PathPlanner.grid_to_world(mapdata, path[-1][0], path[-1][1])\n last.pose.orientation = last_ori\n p_array.append(last)\n return p_array", "def points (p, line: str) -> list:\n direction = line [0]\n steps = list (range (1, 1 + int (F.tail (line))))\n return F.map (point (p, direction)) (steps)", "def svg2paths(svg_file_location,\n return_svg_attributes=False,\n convert_circles_to_paths=True,\n convert_ellipses_to_paths=True,\n convert_lines_to_paths=True,\n convert_polylines_to_paths=True,\n convert_polygons_to_paths=True,\n convert_rectangles_to_paths=True):\n if os_path.dirname(svg_file_location) == '':\n svg_file_location = os_path.join(getcwd(), svg_file_location)\n\n doc = parse(svg_file_location)\n\n def dom2dict(element):\n \"\"\"Converts DOM elements to dictionaries of attributes.\"\"\"\n keys = list(element.attributes.keys())\n values = [val.value for val in list(element.attributes.values())]\n return dict(list(zip(keys, values)))\n\n def parse_trafo(trafo_str):\n \"\"\"Returns six matrix elements for a matrix transformation for any \n valid SVG transformation string.\"\"\"\n trafos = trafo_str.split(')')[:-1]\n trafo_matrix = np.array([1., 0., 0., 0., 1., 0., 0., 0., 1.]).reshape(\n (3, 3)) # Start with neutral matrix\n\n for trafo_sub_str in trafos:\n trafo_sub_str = trafo_sub_str.lstrip(', ')\n value_str = trafo_sub_str.split('(')[1]\n values = list(map(float, value_str.split(',')))\n if 'translate' in trafo_sub_str:\n x = values[0]\n y = values[1] if (len(values) > 1) else 0.\n trafo_matrix = np.dot(trafo_matrix, np.array(\n [1., 0., x, 0., 1., y, 0., 0., 1.]).reshape((3, 3)))\n elif 'scale' in trafo_sub_str:\n x = values[0]\n y = values[1] if (len(values) > 1) else 0.\n trafo_matrix = np.dot(trafo_matrix,\n np.array([x, 0., 0., 0., y, 0., 0., 0.,\n 1.]).reshape((3, 3)))\n elif 'rotate' in trafo_sub_str:\n a = values[0] * np.pi / 180.\n x = values[1] if (len(values) > 1) else 0.\n y = values[2] if (len(values) > 2) else 0.\n am = np.dot(np.array(\n [np.cos(a), -np.sin(a), 0., np.sin(a), np.cos(a), 0., 0.,\n 0., 1.]).reshape((3, 3)),\n np.array(\n [1., 0., -x, 0., 1., -y, 0., 0., 1.]).reshape(\n (3, 3)))\n am = np.dot(\n np.array([1., 0., x, 0., 1., y, 0., 0., 1.]).reshape(\n (3, 3)), am)\n trafo_matrix = np.dot(trafo_matrix, am)\n elif 'skewX' in trafo_sub_str:\n a = values[0] * np.pi / 180.\n trafo_matrix = np.dot(trafo_matrix,\n np.array(\n [1., np.tan(a), 0., 0., 1., 0., 0.,\n 0., 1.]).reshape((3, 3)))\n elif 'skewY' in trafo_sub_str:\n a = values[0] * np.pi / 180.\n trafo_matrix = np.dot(trafo_matrix,\n np.array(\n [1., 0., 0., np.tan(a), 1., 0., 0.,\n 0., 1.]).reshape((3, 3)))\n else: # Assume matrix transformation\n while len(values) < 6:\n values += [0.]\n trafo_matrix = np.dot(trafo_matrix,\n np.array([values[::2], values[1::2],\n [0., 0., 1.]]))\n\n trafo_list = list(trafo_matrix.reshape((9,))[:6])\n return trafo_list[::3] + trafo_list[1::3] + trafo_list[2::3]\n\n def parse_node(node):\n \"\"\"Recursively iterate over nodes. Parse the groups individually to \n apply group transformations.\"\"\"\n # Get everything in this tag\n data = [parse_node(child) for child in node.childNodes]\n if len(data) == 0:\n ret_list = []\n attribute_dictionary_list_int = []\n else:\n # Flatten the lists\n ret_list = []\n attribute_dictionary_list_int = []\n for item in data:\n if type(item) == tuple:\n if len(item[0]) > 0:\n ret_list += item[0]\n attribute_dictionary_list_int += item[1]\n\n if node.nodeName == 'g':\n # Group found\n # Analyse group properties\n group = dom2dict(node)\n if 'transform' in group.keys():\n trafo = group['transform']\n\n # Convert all transformations into a matrix operation\n am = parse_trafo(trafo)\n am = np.array([am[::2], am[1::2], [0., 0., 1.]])\n\n # Apply transformation to all elements of the paths\n def xy(p):\n return np.array([p.real, p.imag, 1.])\n\n def z(coords):\n return coords[0] + 1j * coords[1]\n\n ret_list = [Path(*[bpoints2bezier([z(np.dot(am, xy(pt)))\n for pt in seg.bpoints()])\n for seg in path])\n for path in ret_list]\n return ret_list, attribute_dictionary_list_int\n elif node.nodeName == 'path':\n # Path found; parsing it\n path = dom2dict(node)\n d_string = path['d']\n return [parse_path(d_string)] + ret_list, [\n path] + attribute_dictionary_list_int\n elif convert_polylines_to_paths and node.nodeName == 'polyline':\n attrs = dom2dict(node)\n path = parse_path(polyline2pathd(node['points']))\n return [path] + ret_list, [attrs] + attribute_dictionary_list_int\n elif convert_polygons_to_paths and node.nodeName == 'polygon':\n attrs = dom2dict(node)\n path = parse_path(polygon2pathd(attrs['points']))\n return [path] + ret_list, [attrs] + attribute_dictionary_list_int\n elif convert_lines_to_paths and node.nodeName == 'line':\n line = dom2dict(node)\n d_string = ('M' + line['x1'] + ' ' + line['y1'] +\n 'L' + line['x2'] + ' ' + line['y2'])\n path = parse_path(d_string)\n return [path] + ret_list, [line] + attribute_dictionary_list_int\n elif convert_ellipses_to_paths and node.nodeName == 'ellipse':\n attrs = dom2dict(node)\n path = parse_path(ellipse2pathd(attrs))\n return [path] + ret_list, [attrs] + attribute_dictionary_list_int\n\t\telif convert_circles_to_paths and node.nodeName == 'circle':\n\t\t\tattrs = dom2dict(node)\n path = parse_path(ellipse2pathd(attrs))\n return [path] + ret_list, [attrs] + attribute_dictionary_list_int\n\t\telif convert_rectangles_to_paths and node.nodeName == 'rect':\n attrs = dom2dict(node)\n path = parse_path(rect2pathd(attrs))\n return [path] + ret_list, [attrs] + attribute_dictionary_list_int\n else:\n return ret_list, attribute_dictionary_list_int", "def reconstruct_path(current):\r\n path = [current.coord]\r\n parent = current.parent\r\n while parent:\r\n path = [parent.coord] + path\r\n parent = parent.parent\r\n path = path[1:]\r\n return path", "def polygon_path(x, y=None):\n\n if y is None:\n y = x\n\n return np.vstack([\n np.vstack([x, np.full_like(x, y[0])]).T,\n np.vstack([np.full_like(y, x[-1]), y]).T[1:],\n np.vstack([x, np.full_like(x, y[-1])]).T[::-1][1:],\n np.vstack([np.full_like(y, x[0]), y]).T[::-1][1:]]).T", "def create_straight_path(self, spacing, num_points, row=1):\n\t\tx_array, y_array = [], []\n\t\tfor i in range(1, num_points, spacing):\n\t\t\tx_array.append(row) # NOTE: straight line at x=1m\n\t\t\ty_array.append(i)\n\t\treturn x_array, y_array", "def convert(points):\n distance = []\n for i in points:\n x = int(i[0])\n y = int(i[1])\n distance.append([x,y])\n return distance", "def get_paths(self, depth=None):\n if not isinstance(self.ref_cell, Cell):\n return []\n if self.origin is not None:\n trans = numpy.array(self.origin)\n else:\n trans = None\n if self.rotation is not None:\n rot = self.rotation * numpy.pi / 180.0\n else:\n rot = None\n return [\n p.transform(trans, rot, self.magnification, self.x_reflection)\n for p in self.ref_cell.get_paths(depth=depth)\n ]", "def points2contour(points):\n return points.reshape(-1, 1, 2)", "def merge_portals_to_path(self):\n letter_coordinates = np.argwhere(self.maze.isalpha())\n for coord in letter_coordinates:\n coord = tuple(coord)\n if point_3d(\"above\", coord) in letter_coordinates:\n if self.maze[point_3d(\"below\", coord)] == PATH:\n self.maze[point_3d(\"below\", coord)] = (self.maze[point_3d(\"above\", coord)] +\n self.maze[coord])\n self.maze[coord] = EMPTY\n self.maze[point_3d(\"above\", coord)] = EMPTY\n continue\n\n if point_3d(\"below\", coord) in letter_coordinates:\n if self.maze[point_3d(\"above\", coord)] == PATH:\n self.maze[point_3d(\"above\", coord)] = (self.maze[coord] +\n self.maze[point_3d(\n \"below\", coord)]\n )\n self.maze[coord] = EMPTY\n self.maze[point_3d(\"below\", coord)] = EMPTY\n continue\n\n if point_3d(\"right_of\", coord) in letter_coordinates:\n if self.maze[point_3d(\"left_of\", coord)] == PATH:\n self.maze[point_3d(\"left_of\", coord)] = (self.maze[coord] +\n self.maze[point_3d(\n \"right_of\", coord)]\n )\n self.maze[coord] = EMPTY\n self.maze[point_3d(\"right_of\", coord)] = EMPTY\n continue\n\n if point_3d(\"left_of\", coord) in letter_coordinates:\n if self.maze[point_3d(\"right_of\", coord)] == PATH:\n self.maze[point_3d(\"right_of\", coord)] = (self.maze[point_3d(\"left_of\", coord)] +\n self.maze[coord])\n self.maze[coord] = EMPTY\n self.maze[point_3d(\"left_of\", coord)] = EMPTY\n continue", "def plot_path_points(ax, points=[], paths=[], path_labels=[]):\n cols = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf']\n \n # if points:\n # ax.scatter(*points, alpha=0.1, c='k')\n\n# add functionality to ignore labels\n for path, label, col in zip(paths, path_labels, cols):\n ax.plot(*path, alpha=.8, c=col, label=label)\n ax.scatter(*path, alpha=.6, c=col)\n \n ax.set(xlim=[0, 1], ylim=[0, 1])\n ax.set_axis_off()\n \n if path_labels:\n ax.legend()\n\n return ax" ]
[ "0.66994214", "0.65940034", "0.6466729", "0.6385552", "0.6291015", "0.61070466", "0.6079898", "0.6022747", "0.59725976", "0.59154207", "0.59085953", "0.58816546", "0.57742137", "0.5762992", "0.5753632", "0.57504135", "0.57386243", "0.57256794", "0.5717574", "0.5676891", "0.56718785", "0.56569505", "0.562711", "0.559445", "0.55770016", "0.55652076", "0.5523925", "0.5518702", "0.5517927", "0.5496569" ]
0.83508885
0
Returns the metric used in the search
def metric(self): return self.__metric
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def metric(self):\n return self._metric", "def metric(self) -> str:\r\n return self._metric", "def metric(self):\n\n if not self._metric_cache:\n # Select an appropriate statistic\n cls = utils.import_class_or_module(self._metric)\n self._metric_cache = cls(self.additional)\n\n return self._metric_cache", "def get_metrics(self):\n return None", "def get_metrics(self, metric_name: str):\n if metric_name == \"rmse\":\n return self._rmse\n elif metric_name == \"mase\":\n return self._mase\n elif metric_name == \"mae\":\n return self._mae\n elif metric_name == \"mape\":\n return self._mape\n elif metric_name == \"f1\":\n return self._f1\n elif metric_name == \"accuracy\":\n return self._accuracy", "def get_metrics(self) -> Dict[str, base.Number]:\n return self._metrics", "def get_evaluation_metric(self):\n\t\treturn self.metric", "def get_metric(self) -> mt.Metric:\n return mt.BinaryAccuracy()", "def get_unit_by_metric(metric):\n for item in PROMETHEUS_METRICS_LIST:\n if item['name'] == metric:\n return item['unit']\n return \"\"", "def _getMetrics(self):\n metric = None\n if self.metrics is not None:\n metric = self.metrics(self._currentRecordIndex+1)\n elif self.metricValue is not None:\n metric = self.metricValue\n else:\n raise RuntimeError('No metrics or metric value specified for dummy model')\n\n return {self._optimizeKeyPattern:metric}", "def metric_name(self) -> str:\n return self._values.get('metric_name')", "def metric_name(self) -> str:\n return self._values.get('metric_name')", "def best_metric(self) -> float:\n return self._best_metric", "def get_metric(self) -> mt.Metric:\n return mt.CategoricalAccuracy()", "def get_metric(self) -> mt.Metric:\n return mt.CategoricalAccuracy()", "def compute_metrics(self):\n pass", "def metric_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"metric_name\")", "def metric_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"metric_name\")", "def metric_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"metric_name\")", "def metric_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"metric_name\")", "def metric_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"metric_name\")", "def metric_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"metric_name\")", "def metric_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"metric_name\")", "def metric_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"metric_name\")", "def metric_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"metric_name\")", "def get_metric(self):\n assert self._metric in self._metrics, 'Unsupported metric! Check the _metrics attribute for a list of supported metrics.'\n if self._metric == 'Euclidean':\n metric = torch.eye(self.parameters.shape[0])\n elif self._metric == 'Banana':\n n = self.dataset.shape[0]\n fish = torch.zeros(2,2)\n fish[0,0] = n/self.prior_var + 1\n fish[0,1] = n*2*self.parameters[1]/self.prior_var\n fish[1,0] = n*2*self.parameters[1]/self.prior_var\n fish[1,1] = n*4*self.parameters[1]**2/self.prior_var + 1\n metric = fish\n elif self._metric == 'Hessian':\n metric = self.get_hessian()\n elif self._metric == 'Softabs':\n hessian = self.get_hessian()\n if self._potential == 'funnel':\n hessian += torch.diag(self.jitters)\n eigs, vects = hessian.symeig(eigenvectors = True)\n softabs = (1./torch.tanh(self.softabs * eigs)) * eigs\n metric = vects @ softabs.diag() @ vects.t()\n elif self._metric == 'Fisher':\n metric = torch.zeros(self.parameters.shape[0],self.parameters.shape[0])\n grads = torch.zeros(self.parameters.shape[0])\n grads[0] = 0.5*torch.sum(self.parameters[1:]**2)*torch.exp(self.parameters[0]) + self.parameters[0]/9.\n grads[1:] = self.parameters[1:]*torch.exp(self.parameters[0])\n metric = torch.ger(grads,grads) + torch.eye(self.parameters.shape[0])/self.softabs\n return metric", "def metric_name(self) -> str:\n return pulumi.get(self, \"metric_name\")", "def metric_name(self) -> str:\n return pulumi.get(self, \"metric_name\")", "def get_metric(name):\n return metric_name_to_function_mapping[name.lower()]", "def getMetricName(self):\n return self.getOrDefault(self.metricName)" ]
[ "0.73544544", "0.7343437", "0.7094367", "0.6804628", "0.67208123", "0.6674424", "0.65164226", "0.6417011", "0.6387913", "0.63811266", "0.6376277", "0.6376277", "0.6359819", "0.63471746", "0.63471746", "0.63350976", "0.63268703", "0.63268703", "0.63268703", "0.63268703", "0.63268703", "0.63268703", "0.63268703", "0.63268703", "0.63268703", "0.6321826", "0.63127846", "0.63127846", "0.6296842", "0.62902844" ]
0.74399334
0
Select an account and set it as the current 'working' account Calling this method also cleares the Batch Queue, if it isn't empty
def SelectAccount(self, nickname): self.ClearBatchQueue() if nickname in self.accounts: self.current_account = self.accounts[nickname] self.client = self.current_account.client return True else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def account(self, account):\n\n self._account = account", "def account(self, account):\n\n self._account = account", "def account(self, account):\n\n self._account = account", "def account(self, account):\n\n self._account = account", "def account(self, account: str):\n self._account = account", "def set_bill_account_from_single_selection_kendo_dropdown(self, bill_account):\n self.single_selection_from_kendo_dropdown(self.bill_account_single_selection_kendo_dropdown_locator, bill_account)", "def flush_account(self):\n if self.data_channel:\n if not self.data_channel.transfer_in_progress():\n self.data_channel.close()\n self.data_channel = None\n if self.data_server:\n self.data_server.close()\n self.data_server = None\n\n self.fs.rnfr = None\n self.authenticated = False\n self.username = \"\"\n self.attempted_logins = 0\n self.current_type = 'a'\n self.restart_position = 0\n self.quit_pending = False\n self.in_dtp_queue = None\n self.out_dtp_queue = None\n\n\n # --- connection", "def set_account(self, account_list):\n self.multiple_items_selection_from_kendo_dropdown(self.account_kendo_dropdown_locator, account_list)\n self.wait_for_ajax_spinner_load()", "def change_account(self, account):\r\n check_account = Account(account, steem_instance=self.steem)\r\n self.account = check_account[\"name\"]\r\n self.refresh()", "def set_account(self):\n return self.__Account", "def set_account(self, account: str):\n ret = self._call_txtrader_api('set_account', {'account': account})\n if ret:\n self.account = account\n return ret", "def save_account(self):\n Credential.account_list.append(self)", "def set_audit_account(self, audit_account):\n self.single_selection_from_kendo_dropdown(self.statement_entry_audit_account_locator, audit_account)\n self.wait_for_ajax_spinner_load()", "def put_account(self, account):\n \n pass", "def set_bill_account(self, bill_account_list):\n self.multiple_items_selection_from_kendo_dropdown(self.bill_account_kendo_dropdown_locator, bill_account_list)\n page_header_element = self.wait().until(EC.element_to_be_clickable(self.page_header_locator), 'page header locator not found before specified time out')\n page_header_element.click()", "def set_ixtools_account(self, item_name):\n self.single_selection_from_kendo_dropdown(self.ixtools_account_kendo_dropdown_locator, item_name)\n self.wait_for_ajax_spinner_load()", "def setaccount(self, vergeaddress, account):\n return self.proxy.setaccount(vergeaddress, account)", "def set_default_account(web3):\n web3.eth.defaultAccount = web3.eth.accounts[0]", "def save_account(self):\n Credentials.credentials_list.append(self)", "def get_account(self, account):\n \n pass", "def account(self, acct):\n aMgr = self.acctManager\n if len(aMgr.accounts) <= acct:\n raise Exception(\"requested unknown account number %i\" % acct)\n return aMgr.account(acct)", "def on_UseDefaultA1Account_clicked(self):\n # TODO: not implemented yet\n raise NotImplementedError", "def select_fedcm_account(self, account_index):\n pass", "def choose_account (self):\n\t\traise ae.AccountError(\"Fixing the config file hasn't been overloaded\")", "def put(self, account=None, user=None, account_id=None):\n return super().put()", "def accounts(self, accounts):\n\n self._accounts = accounts", "def sync(self):\n for subscription in self.getSubscriptionList():\n #user_id = subscription.getZopeUser()\n #uf = self.getPortalObject().acl_users\n #user = uf.getUserById(user_id).__of__(uf)\n #newSecurityManager(None, user)\n subscription.activate(activity='SQLQueue',\n tag=subscription.getId(),\n priority=ACTIVITY_PRIORITY\n ).SubSync(subscription.getPath())", "def setAccount(self, account_id):\n self.data_struct['_setAccount'] = account_id", "def set_buy_sell_deal_account(self, account_list):\n self.multiple_items_selection_from_kendo_dropdown(self.buy_sell_deal_account_dropdown_locator, account_list)\n self.wait_for_ajax_spinner_load()", "def create_account(\n account_name,\n account_email,\n account_role,\n access_to_billing,\n organization_unit_id,\n scp):\n\n client = session.client('organizations')\n\n try:\n create_account_response = client.create_account(Email=account_email, AccountName=account_name,\n RoleName=account_role,\n IamUserAccessToBilling=access_to_billing)\n except botocore.exceptions.ClientError as e:\n print(e)\n sys.exit(1)\n\n time.sleep(10)\n\n account_status = 'IN_PROGRESS'\n while account_status == 'IN_PROGRESS':\n create_account_status_response = client.describe_create_account_status(\n CreateAccountRequestId=create_account_response.get('CreateAccountStatus').get('Id'))\n print(\"Create account status \"+str(create_account_status_response))\n account_status = create_account_status_response.get('CreateAccountStatus').get('State')\n if account_status == 'SUCCEEDED':\n accountid = create_account_status_response.get('CreateAccountStatus').get('AccountId')\n elif account_status == 'FAILED':\n print(\"Account creation failed: \" + create_account_status_response.get('CreateAccountStatus').get('FailureReason'))\n sys.exit(1)\n root_id = client.list_roots().get('Roots')[0].get('Id')\n\n # Move account to the org\n if organization_unit_id is not None:\n try:\n describe_organization_response = client.describe_organizational_unit(\n OrganizationalUnitId=organization_unit_id)\n move_account_response = client.move_account(AccountId=accountid, SourceParentId=root_id,\n DestinationParentId=organization_unit_id)\n except Exception as ex:\n template = \"An exception of type {0} occurred. Arguments:\\n{1!r} \"\n message = template.format(type(ex).__name__, ex.args)\n # create_organizational_unit(organization_unit_id)\n print(message)\n\n # Attach policy to account if exists\n if scp is not None:\n attach_policy_response = client.attach_policy(PolicyId=scp, TargetId=accountid)\n print(\"Attach policy response \"+str(attach_policy_response))\n\n return accountid" ]
[ "0.5898519", "0.5898519", "0.5898519", "0.5898519", "0.58418894", "0.5787817", "0.5739551", "0.57178605", "0.5570972", "0.55681026", "0.55255353", "0.54933035", "0.5438046", "0.54226345", "0.5285505", "0.52594006", "0.52074546", "0.5121794", "0.5118399", "0.50960463", "0.50945896", "0.50919914", "0.505194", "0.5038863", "0.50200766", "0.49970993", "0.49925402", "0.49776524", "0.4966959", "0.4905131" ]
0.6025916
0
Clear the batch queue
def ClearBatchQueue(self): self.batch_queue = gdata.contacts.data.ContactsFeed()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clearQueueAll():", "def clear_queue(self):\n self.queue = deque()", "def clear(self):\n self.queue.clear()", "def clear_queue(self):\n while not self.queue.empty():\n self.queue.get()", "def clear(self):\n self.queue = Queue()", "def clear_batch(self):\n self._batch_idx = 0\n self.variant_states = None\n self.object_specs = None\n self.object_attribute_values = None", "def clearQueue(targets):", "def reset(self):\n # Because it's a queue no need for reset..\n pass", "def reset(self):\n # Because it's a queue no need for reset..\n pass", "def reset(self):\n # Because it's a queue no need for reset..\n pass", "def reset_queueing(self):\n self._num_queued = 0", "def __clear_message_queue(self):\r\n self.__lib.CC_ClearMessageQueue(self.__serno)", "def clear(self):\r\n try:\r\n while not self._queue.empty():\r\n self._queue.get().close()\r\n except:\r\n pass", "def flushMsgs(self):\n\n self.queue = self.pre_queue[:]\n self.pre_queue = []", "def reset(self):\n self.stop()\n self._queue = Queue.Queue()", "def _drain_queue(self):\n while self.queue:\n self._export_batch()", "def clearDownloadQueue(self):\n #print(\"CLEAR DOWNLOAD QUEUE\")\n self.downloadQueue = []\n self.clearEvents()", "def clean_queue(self):\n self._stdin_queue.put_nowait(None) # Release thread", "def clear_jobs(self):\n with self._mutex:\n self._jobs = []", "def clear_queue(self):\n\t\t\tself.message_queue.clear()\n\t\t\treturn self.message_queue", "def _queue_delete(self, queue):\n\n queue.delete()", "def _flush_queue():\n try:\n # Multiple queue flushes may be scheduled simultaneously on different threads\n # (e.g., if the queue is at its flush threshold and several more items\n # are added before a flush occurs). For correctness and efficiency, only one such\n # flush operation should proceed; all others are redundant and should be dropped\n acquired_lock = _metric_queue_lock.acquire(blocking=False)\n if acquired_lock:\n client = mlflow.tracking.MlflowClient()\n # For thread safety and to avoid modifying a list while iterating over it, we record a\n # separate list of the items being flushed and remove each one from the metric queue,\n # rather than clearing the metric queue or reassigning it (clearing / reassigning is\n # dangerous because we don't block threads from adding to the queue while a flush is\n # in progress)\n snapshot = _metric_queue[:]\n for item in snapshot:\n _metric_queue.remove(item)\n\n metrics_by_run = _assoc_list_to_map(snapshot)\n for run_id, metrics in metrics_by_run.items():\n client.log_batch(run_id, metrics=metrics, params=[], tags=[])\n finally:\n if acquired_lock:\n _metric_queue_lock.release()", "def clear(self):\n with self._not_full:\n with self._not_empty:\n with self._mutex:\n self.close()\n self._queue.clear()\n self._cur_size = 0", "def queueOff() -> None:\n\t\tLogging.enableQueue = False", "def reset_queue(self, db_session):\n for player in self.player_queue.queue:\n self.command_queue.appendleft(('_delete_last_row', {}))\n self.player_queue = PlayerQueue.PlayerQueue()\n db_session.execute(sqlalchemy.update(db.User.__table__, values={db.User.__table__.c.times_played: 0}))\n self._add_to_chat_queue('The queue has been emptied and all players start fresh.')", "def on_queue_clear_command(self, event):\n self.pre_check(event)\n self.same_channel_check(event)\n if self.get_player(event.guild.id).queue:\n self.get_player(event.guild.id).queue.clear()\n api_loop(event.channel.send_message, \"The queue has been cleared.\")\n else:\n api_loop(event.channel.send_message, \"The queue is already empty.\")", "async def clear(self):", "def purge(self):\n self._rpc(specification.Queue.Purge())", "def drain(queue):\n while not queue.is_empty():\n queue.remove()", "def discart(self):\n self.queue.clear()\n self.fetchable = 0" ]
[ "0.8027347", "0.79099566", "0.7851463", "0.780406", "0.7732012", "0.75668514", "0.7311712", "0.721093", "0.721093", "0.721093", "0.7194719", "0.7069694", "0.70656955", "0.69665104", "0.6955623", "0.6941233", "0.68873274", "0.6882734", "0.68424505", "0.68145674", "0.6807092", "0.6802429", "0.6666744", "0.6660617", "0.6658293", "0.6646039", "0.66038364", "0.6601882", "0.659246", "0.6585563" ]
0.8334762
0
Lazily get the first contact group's Atom Id
def GetFirstGroupId(self): return self.client.GetGroups().entry[0].id.text
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def get_contact_group(dbcon: DBConnection, id: int) -> Any: # Use any because optional returns suck.\n q = \"\"\"select id, name, active from contact_groups where id=%s\"\"\"\n row = await dbcon.fetch_row(q, (id,))\n contact = None\n if row:\n contact = object_models.ContactGroup(*row)\n return contact", "def id(self):\n return self._group", "def customer_group_get_one(group_id):\n return customer_group_get(group_id)", "def get_group_id(self, group_name):\n uri_vars = {\"q\": group_name, \"start\": 0, \"count\": \"Infinity\"}\n group_uri = urllib.parse.urlencode(uri_vars)\n full_url = self.base_url + f\"/group/group/findgroup.html?{group_uri}\"\n result_str = self.fetch(full_url)\n result = json.loads(result_str)\n group_id = result[\"items\"][0][\"id\"] # Choose ID of first result\n logger.debug(f\"Found {group_id} for group {group_name}\")\n return group_id", "def min_group_id(self) -> int:\n\n group_ids = np.asarray(self.group_ids, dtype=np.float)\n if np.all(np.isnan(group_ids)):\n group_id = None\n else:\n group_id = int(np.nanmin(group_ids))\n\n return group_id", "def identifier(self):\n return self._group.identifier", "def pull_one_contact(self, name):\n contact = []\n for x in self.contacts:\n if x[0] == name:\n contact_name = x[0]\n number = x[1]\n email = x[2]\n zipcode = x[3]\n contact = [contact_name, number, email, zipcode]\n print(contact)\n return contact, self.contacts.index(x)", "def getId(self):\n return _libsbml.Group_getId(self)", "def GroupId(self):\n\t\treturn self._get_attribute('groupId')", "def getGroup(self, resname, atomname):\n group = \"\"\n if resname in self.map:\n resid = self.map[resname]\n if resid.hasAtom(atomname):\n atom = resid.atoms[atomname]\n group = atom.group\n return group", "def get_group_id():\n url = f\"{BASE_URL}/api/v2/tickets/api/v2/groups\"\n headers = {\"AUTHorization\": f\"Basic {AUTH}\"}\n r = requests.get(url, headers=headers)\n if r.ok:\n print('Got group ID for \"Onboarding\" Group')\n else:\n logging.debug(f\"Error - {r.status_code} - {r.content}\")\n groups = r.json()\n for group in groups:\n if group[\"name\"] == \"Onboarding\":\n group_id = group[\"id\"]\n\n return group_id", "def buGroup(self):\n return self.xsID[1]", "def getID():", "def selectAtomid(self):\n\n\t\tif len(self.atomid) == 0:\n\t\t\treturn\n\n\t\ttmplist = []\n\t\tfor atom in self.atomlist:\n\t\t\tfound = False\n\t\t\tfor id in self.atomid:\n\t\t\t\tif int(id) == int(atom.file_id):\n\t\t\t\t\tfound = True\n\t\t\t\t\tbreak\n\n\n\t\t\tif found and not self.invatomid:\n\t\t\t\ttmplist.append(atom)\n\t\t\tif not found and self.invatomid:\n\t\t\t\ttmplist.append(atom)\n\n\t\tself.atomlist = tmplist", "def _getNextGroupId(self):\n groupId = self._nextGroupId\n self._nextGroupId += 1\n return str(groupId)", "def group_id(self):\n return self._id", "def msid(self):\n return self.msids[0]", "def what_is(self, _id):\n for g in self.groups:\n if _id in self.h_group_ids[g]:\n return g\n return None", "def test_groups_group_id_get(self):\n pass", "def next_identity(self) -> OrganisationId:\n ...", "def entry_group_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"entry_group_id\")", "def group_serial(self):\n return self.structure.group_serial[self.mask]", "def get_contact(self, guid):\n for contact in self._contacts:\n if contact.guid == guid:\n return contact\n return None", "def get_primary_id(self):", "def get_project_id(group, name):\n response = urllib.request.urlopen(\"https://abf.io/api/v1/search.json?type=projects&query=\" + name)\n decode = response.read().decode(\"UTF-8\")\n projects = json.loads(decode)['results']['projects']\n projects = [x for x in projects if \"fullname\" in x and x['fullname'] == group + \"/\" + name]\n if len(projects) > 0:\n project = projects[0]\n return project['id'], project['git_url']\n else:\n return None", "def mtz_get_group(mtz_filename):\n\n xdata = mtz_get_xdata(mtz_filename)\n\n try:\n group = int(xdata[-2])\n except Exception:\n raise Exception(\"Could not extract group number from xdata\")\n\n try:\n assert group == xdata[-2]\n except Exception:\n # print(\"Exception\")\n raise Exception(f\"Expected integer group number, got {group} from {xdata[-2]}\")\n\n return int(group)", "def get(self):\n self._group = self._client.get(\n url=self._client.get_full_url(\n self.get_path(\n 'single', realm=self._realm_name, group_id=self._group_id\n )\n )\n )\n self._group_id = self._group[\"id\"]\n return self._group", "def test_first_id(self):\r\n ids = \\\r\n \"R27DLI_4812 R27DLI_600 R27DLI_727 U1PLI_403 U1PLI_8969\".split(\r\n )\r\n self.assertEqual(first_id(ids, {}), 'R27DLI_4812')", "def test_get_first_id(self):\r\n lines = \"\"\">S74_1 E86FECS01CEVAV orig_bc=ACATGTCACGTG new_bc=ACATGTCACGTG bc_diffs=0\r\nCTCCTC\r\n>Unassigned_2 E86FECS01EKKMF orig_bc=AGCGCTGATGTA new_bc=None bc_diffs=1\r\nGGTGCCTCCCTCGC\r\n>S80_3 E86FECS01EKKMF orig_bc=AGCGCTGATGTA new_bc=None bc_diffs=1\r\nGGTGCCTCCCTCGC\r\n>S80_4 E86FECS01CW66X orig_bc=AGTCCATAGCTG new_bc=AGTCCATAGCTG bc_diffs=0\r\nGTCCTGGCAG\"\"\".splitlines()\r\n self.assertEqual(\r\n get_first_id(lines),\r\n set(['S74_1',\r\n 'Unassigned_2',\r\n 'S80_3',\r\n 'S80_4']))", "def getMemberByIdRef(self, *args):\n return _libsbml.Group_getMemberByIdRef(self, *args)" ]
[ "0.6018908", "0.5888721", "0.5825256", "0.57383466", "0.56857145", "0.54320073", "0.54071623", "0.538237", "0.5367063", "0.5323985", "0.52931386", "0.5278044", "0.52581567", "0.52283746", "0.52128196", "0.51881367", "0.51880187", "0.51782674", "0.51699287", "0.5155815", "0.5137151", "0.51330835", "0.5114152", "0.51037765", "0.5092764", "0.5070506", "0.5052677", "0.5040736", "0.5039554", "0.5031945" ]
0.6422962
0
Remove a contact from the selected account
def RemoveContact(self, contact): self.client.Delete(contact)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_contact(self):\n contact_mob_num = input(\"-=\" * 30 + \"\\n\" + \"Please enter contact's mobile number to be removed: \")\n contact = self.auth.get_users_by_MobNum(contact_mob_num)\n if (not contact) or contact not in self._user.contacts:\n print('This user not in your contact list')\n return self.homepage()\n \n self._user.remove_contact(contact)\n print('Contact removed successfully')\n return self.homepage()", "def remove_contact(self, contact):\n\t\tclient_log.debug(f'Удаление контакта {contact}')\n\t\treq = {\n\t\t\tACTION: REMOVE_CONTACT,\n\t\t\tTIME: time.time(),\n\t\t\tUSER: self.username,\n\t\t\tACCOUNT_NAME: contact\n\t\t}\n\t\twith socket_lock:\n\t\t\tsend_message(self.transport, req)\n\t\t\tself.process_server_ans(get_message(self.transport))", "def remove_contact(self, contact):\n try:\n self._contacts.remove(contact)\n except ValueError:\n pass", "def del_contact(contact):\n db = get_db()\n \n if contact.get_hash_name() in db:\n db.pop(contact.get_hash_name())\n write_db(db)\n sys.exit(logger.ok('success: contact ' + '\"%s\"' % contact.get_name() + ' deleted'))\n else:\n sys.exit(logger.fail('fatal: contact does not exist'))", "def delete_contact(self, contact):\n self._delete('contacts', self._build_params(uuid=contact))", "def delete_contact(self):\n delete_first_name = input(\"Enter first name that you want to delete\\n\")\n for contact in self.contact_list:\n if contact.first_name == delete_first_name:\n #print(str(contact))\n self.contact_list.remove(contact)\n else:\n print(f\"No contact is present with first name {delete_first_name} \")", "def do_deluser(self, line):\n\t\tif isinstance(self.cl, Book):\n\t\t\ttry:\n\t\t\t\tself.cl.del_contact(line)\n\t\t\texcept ValueError:\n\t\t\t\tprint(\"Wrong syntax! Type 'help delete'\")\n\t\telse:\n\t\t\tprint(\"To delete contacts you need to open or create a book.\")", "def remove_contact(request, ck, contact_name):\n\n refresh_template = request.session[constants.ACTUAL_TEMPLATE]\n\n contacts = request.session[constants.ADD_CONTACTS]\n contact = next(el for el in contacts if el.contact == contact_name)\n\n if ck != \"0\":\n coding = get_object_or_404(CodingProject, id=ck)\n\n # TODO: Review this \n us = get_user(request)\n user = us\n\n # Project must have been created by the current user and\n # User must have permission to add new CodeRequest\n if coding.coder != user.id:\n raise Http404\n\n if coding.contacts.filter(contact=contact_name):\n cache_list = request.session[constants.REM_CONTACTS]\n cache_list.append(contact)\n\n contacts.remove(contact)\n request.session[constants.ADD_CONTACTS] = contacts\n\n # TODO: Centralize this?\n return HttpResponseRedirect(refresh_template)", "def do_delContact(self, line):\n\t\tif not(self.db is None):\n\t\t\ttry:\n\t\t\t\tself.db.contact.delete_one({'_id': ObjectId(line)})\n\t\t\texcept Exception:\n\t\t\t\tprint(\"This id doesn't exist!\")\n\t\telse:\n\t\t\tprint(\"You must open the existing database or create new one.\")", "def delete(self):\n self.skype.conn(\"DELETE\", \"{0}/users/{1}/contacts/8:{2}\"\n .format(SkypeConnection.API_CONTACTS, self.skype.userId, self.id),\n auth=SkypeConnection.Auth.SkypeToken)\n self.skype.conn(\"DELETE\", \"{0}/users/ME/contacts/8:{1}\".format(self.skype.conn.msgsHost, self.id),\n auth=SkypeConnection.Auth.RegToken)", "def delcontact(id):\n delid = str(id)\n\n try:\n r.srem(\"contacts\", delid, 1)\n\n r.delete(\"uid:\" + delid + \":name\")\n r.delete(\"uid:\" + delid + \":address\")\n r.delete(\"uid:\" + delid + \":phone\")\n r.delete(\"uid:\" + delid + \":email\")\n\n return {}\n except:\n print \"Unexpected error:\", sys.exc_info()[0]\n raise", "def delete_contact_in_db(self):\n self.init_db(self._testing)\n\n # making sure that the object is in the db\n assert not self.uid == \"\"\n\n self._delete_row_in_db(Contact.table_name, (self.uid,))", "def rm_contact_from_addressbook(database, name, surname, database_counter,\n database_ids):\n\n from addressbook.verify_contact import check_if_contact_exists\n\n if check_if_contact_exists(database, name, surname, database_counter,\n database_ids)[0] == 'Yes':\n print('The following contact will be removed:')\n id = check_if_contact_exists(database, name, surname, database_counter,\n database_ids)[1]\n print(str(id), '|', database[f'{id}']['first name'], '|',\n database[f'{id}']['last name'],\n '|', database[f'{id}']['address'], '|',\n database[f'{id}']['mobile phone'])\n del database[f'{id}']\n print('\\n')\n return id\n else:\n print('There is no such contact for deletion!')\n print('\\n')\n return 0", "def delete_account(self):\n Credential.account_list.remove(self)", "def removeContact(self, LibraryID, ListID, RecipientID, **kwargs):\n if not self.request(\"removeContact\",\n Product='TA',\n LibraryID=LibraryID,\n ListID=ListID,\n RecipientID=RecipientID,\n **kwargs):\n print(self.last_error_message)\n return None\n return self.json_response", "async def delete_contact(dbcon: DBConnection, contact_id: int) -> None:\n if not await contact_exists(dbcon, contact_id):\n raise errors.InvalidArguments('contact does not exist')\n q = \"\"\"delete from contacts where id=%s\"\"\"\n await dbcon.operation(q, (contact_id,))", "def remove_from_contact_list(self, contacts_to_remove_list):\n if self.contact_list is None:\n return\n for id in contacts_to_remove_list:\n if id in range(0, len(self.contact_list) + 1):\n self.contact_list[id - 1] = None\n self.contact_list = [contact for contact in self.contact_list if contact is not None]", "def remove_from_group(self, org, contact, group):\n pass", "def delete_account(self, account):\n \n pass", "def delete_user(self):\n raise NotImplementedError(\"Function not yet implemented contact package creator\")", "def remove_account(self, account, remove_ms_from_account=True):\r\n # check for accounts by name per Q2 bonus below\r\n if account.name in [account.name for account in self._accounts]:\r\n self._accounts.remove(account)\r\n if remove_ms_from_account:\r\n account.remove_from_market_segment(self)\r\n else:\r\n # nothing to do, the account wasn't part of the market\r\n # segment so we're done\r\n pass", "def delete_contact_from_personal_addressbook(self, contact_id, give_json=False):\n\n url = Constants.BASE_URL + 'users/addressbooks/personal'\n response = requests.delete(url=url, params={'key': self.user_access_token, 'contact_id': contact_id})\n\n if give_json:\n return response.json()\n else:\n return response.text", "def remove_contact(self, contact):\n super(CachingKBucket, self).remove_contact(contact)\n self.fill_from_cache()", "def remove_contacts(self, contacts, group=None, group_uuid=None):\n payload = self._build_params(contacts=contacts, action='remove', group=group, group_uuid=group_uuid)\n self._post('contact_actions', None, payload)", "def delete_contacts(self):\n self.db.delete_all_contacts()\n return self.update_contacts()", "def remove_contact_reference(self):\n self.reference_contact_datetime = None\n self.save()", "async def delete_contact_from_contact_group(dbcon: DBConnection, contact_group_id: int, contact_id: int) -> None:\n q = \"\"\"delete from contact_group_contacts where contact_group_id=%s and contact_id=%s\"\"\"\n q_args = (contact_group_id, contact_id)\n await dbcon.operation(q, q_args)", "def deleteAccountContact(self,accountId, contactId):\r\n\r\n\t\turl = MozuUrl(\"/api/commerce/customer/accounts/{accountId}/contacts/{contactId}\", \"DELETE\", UrlLocation.TenantPod, False);\r\n\t\turl.formatUrl(\"accountId\", accountId);\r\n\t\turl.formatUrl(\"contactId\", contactId);\r\n\t\tself.client.withResourceUrl(url).execute();", "def __ui_remove_person(self):\n remove_person_id = int(input(\"Introduce the ID of the person you want to remove: \"))\n self.__person_service.service_remove_person(remove_person_id)\n print(\"Person successfully removed from your agenda!\\n\")", "def mailman_remove(contact, listname=None, userack=None, admin_notify=None):\n\n\n mm, listname = _get_maillist(listname)\n print('mailman removing %s from %s' % (contact.email, listname), file=sys.stderr)\n if mm.isMember(contact.email):\n try:\n mm.Lock()\n mm.ApprovedDeleteMember(contact.email, 'satchmo_ext.newsletter', admin_notify, userack)\n mm.Save()\n finally:\n mm.Unlock()" ]
[ "0.7927598", "0.78937054", "0.76196754", "0.760548", "0.7403511", "0.7339263", "0.7177775", "0.71389616", "0.69924855", "0.6943864", "0.68841195", "0.681138", "0.6772057", "0.67245716", "0.66446775", "0.6630713", "0.66240466", "0.65467685", "0.653159", "0.64608634", "0.6421187", "0.6417622", "0.6339092", "0.6310416", "0.62944585", "0.62787396", "0.6250602", "0.62475985", "0.6226276", "0.6204473" ]
0.82682854
0
Remove all contacts from the selected account
def RemoveAll(self): contacts = self.GetContactList() for contact in contacts: self.BatchEnqueue('delete', contact) self.ExecuteBatchQueue()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_contacts(self):\n self.db.delete_all_contacts()\n return self.update_contacts()", "def del_contact_all(self):\n\n send_key(KEY_MENU)\n delstr = contact.get_value('contact_delete')\n if search_text(delstr):\n click_textview_by_text(delstr)\n click_checkbox_by_id('select_all_check')\n click_button_by_id('btn_ok')\n click_button_by_index(1)\n else:\n goback()\n\n sleep(2) #take a rest to wait view ...", "def clearContactsFromPhone():\n\tprint \"Deleting any contacts from phone...\"\n\tcmd =r\"adb shell pm clear com.android.providers.contacts\"\n\tos.system(cmd)\n\tprint \"Finished deleting contacts from phone.\"", "def delete_contact(self):\n delete_first_name = input(\"Enter first name that you want to delete\\n\")\n for contact in self.contact_list:\n if contact.first_name == delete_first_name:\n #print(str(contact))\n self.contact_list.remove(contact)\n else:\n print(f\"No contact is present with first name {delete_first_name} \")", "def remove_from_contact_list(self, contacts_to_remove_list):\n if self.contact_list is None:\n return\n for id in contacts_to_remove_list:\n if id in range(0, len(self.contact_list) + 1):\n self.contact_list[id - 1] = None\n self.contact_list = [contact for contact in self.contact_list if contact is not None]", "def delete_contacts(self, contacts):\n self._post('contact_actions', None, self._build_params(contacts=contacts, action='delete'))", "def remove_accounts(self):\n current_creds = self._accounts.copy()\n for creds in current_creds:\n self.remove_account(current_creds[creds].credentials.token,\n current_creds[creds].credentials.url)", "def delete(self):\n self.skype.conn(\"DELETE\", \"{0}/users/{1}/contacts/8:{2}\"\n .format(SkypeConnection.API_CONTACTS, self.skype.userId, self.id),\n auth=SkypeConnection.Auth.SkypeToken)\n self.skype.conn(\"DELETE\", \"{0}/users/ME/contacts/8:{1}\".format(self.skype.conn.msgsHost, self.id),\n auth=SkypeConnection.Auth.RegToken)", "def remove_contact(self):\n contact_mob_num = input(\"-=\" * 30 + \"\\n\" + \"Please enter contact's mobile number to be removed: \")\n contact = self.auth.get_users_by_MobNum(contact_mob_num)\n if (not contact) or contact not in self._user.contacts:\n print('This user not in your contact list')\n return self.homepage()\n \n self._user.remove_contact(contact)\n print('Contact removed successfully')\n return self.homepage()", "def remove_contact(self, date_limit):\n for provider in ServiceProvider.objects.filter(end_at__lt=date_limit, history=None):\n # Check for history versions\n for sp in ServiceProvider.objects.filter(history=provider.pk):\n for contact in Contact.objects.filter(sp=sp):\n self.output(\n sp.entity_id + \": Removing contact (history): \" + contact.firstname + \" \" + contact.lastname\n )\n if not self.list_only:\n contact.delete()\n for contact in Contact.objects.filter(sp=provider):\n self.output(provider.entity_id + \": Removing contact: \" + contact.firstname + \" \" + contact.lastname)\n if not self.list_only:\n contact.delete()", "def delete_account(self):\n Credential.account_list.remove(self)", "async def delete_contacts(self, **kwargs) -> List[CertificateContact]:\n contacts = await self._client.delete_certificate_contacts(\n vault_base_url=self.vault_url, **kwargs\n )\n return [CertificateContact._from_certificate_contacts_item(contact_item=item) for item in contacts.contact_list]", "def remove_contacts(self, contacts, group=None, group_uuid=None):\n payload = self._build_params(contacts=contacts, action='remove', group=group, group_uuid=group_uuid)\n self._post('contact_actions', None, payload)", "def RemoveContact(self, contact):\n\t\tself.client.Delete(contact)", "def clear_recipients(self):\n self._to = []\n self._cc = []\n self._bcc = []", "def unlink(self):\n analytic_accounts_to_delete = self.env['account.analytic.account']\n for project in self:\n if project.analytic_account_id and not project.analytic_account_id.line_ids:\n analytic_accounts_to_delete |= project.analytic_account_id\n result = super(Project, self).unlink()\n analytic_accounts_to_delete.unlink()\n return result", "def delete_all(cls):\n with sqlite3.connect(cls.dbpath) as connection:\n connection.row_factory = sqlite3.Row\n cursor = connection.cursor()\n SQL = \"DELETE FROM accounts;\"\n cursor.execute(SQL)", "def remove(self, contacts):\n if not contacts:\n return\n\n new_heap = []\n\n for ell in self._heap:\n if ell not in contacts: \n distance = distance_to(self._node_id, ell.getId())\n heapq.heappush(new_heap, (distance, ell))\n\n self._heap = new_heap", "def unblock_contacts(self, contacts):\n self._post('contact_actions', None, self._build_params(contacts=contacts, action='unblock'))", "def removeAll(self, addr: ghidra.program.model.address.Address) -> None:\n ...", "def remove_all(self):\n if self._processed:\n res, messages = self._mailconn.search(None, 'ALL')\n if res == 'OK':\n for msg in messages[0].split():\n res, data = self._mailconn.store(msg.decode('utf-8'), '+FLAGS', '\\\\Deleted')\n print(res)", "def delcontact(id):\n delid = str(id)\n\n try:\n r.srem(\"contacts\", delid, 1)\n\n r.delete(\"uid:\" + delid + \":name\")\n r.delete(\"uid:\" + delid + \":address\")\n r.delete(\"uid:\" + delid + \":phone\")\n r.delete(\"uid:\" + delid + \":email\")\n\n return {}\n except:\n print \"Unexpected error:\", sys.exc_info()[0]\n raise", "def remove_contact(self, contact):\n\t\tclient_log.debug(f'Удаление контакта {contact}')\n\t\treq = {\n\t\t\tACTION: REMOVE_CONTACT,\n\t\t\tTIME: time.time(),\n\t\t\tUSER: self.username,\n\t\t\tACCOUNT_NAME: contact\n\t\t}\n\t\twith socket_lock:\n\t\t\tsend_message(self.transport, req)\n\t\t\tself.process_server_ans(get_message(self.transport))", "def delete_account(self, account):\n \n pass", "def delete_selected(self, button):\n lines = self.get_lines(lambda tup: tup[4] and tup[5])\n if not lines:\n return\n if config['autoremove']:\n ips_to_delete = {line['ip'] for line in lines}\n accounts = self.get_lines(lambda tup: tup[4] in ips_to_delete)\n else:\n accounts = lines\n to_delete = {sleekxmpp.JID(account['jid']).bare for account in accounts}\n if len(to_delete) == 1:\n message = 'Are you sure you want to delete this account?'\n else:\n message = 'Are you sure you want to delete the %s selected accounts?' % len(to_delete)\n dialog = Gtk.MessageDialog(self,\n Gtk.DialogFlags.MODAL,\n Gtk.MessageType.QUESTION,\n Gtk.ButtonsType.YES_NO,\n message)\n response = dialog.run()\n dialog.destroy()\n if response == Gtk.ResponseType.NO:\n return\n self.admin.delete_users(list(to_delete))\n self.store.clear()\n self.admin.get_online_users()", "def do_deluser(self, line):\n\t\tif isinstance(self.cl, Book):\n\t\t\ttry:\n\t\t\t\tself.cl.del_contact(line)\n\t\t\texcept ValueError:\n\t\t\t\tprint(\"Wrong syntax! Type 'help delete'\")\n\t\telse:\n\t\t\tprint(\"To delete contacts you need to open or create a book.\")", "def MultiWaySync(self, accounts):\n\t\tcleaned_contacts = []\n\t\tcontacts = []\n\t\t\n\t\tfor account in accounts:\n\t\t\tself.SelectAccount(account)\n\t\t\tcontacts.extend(self.GetContactList())\n\t\t\n\t\tduplicates, originals = ceFindDuplicates(contacts)\n\t\tmerged, todelete = ceMergeDuplicates(duplicates)\n\t\t\n\t\tcleaned_contacts.extend(originals)\n\t\tcleaned_contacts.extend(merged)\n\t\t\n\t\tfor account in accounts:\n\t\t\tself.SelectAccount(account)\n\t\t\tself.RemoveAll()\n\t\t\n\t\tfor account in accounts:\n\t\t\tself.SelectAccount(account)\n\t\t\tfor contact in cleaned_contacts:\n\t\t\t\tself.BatchEnqueue('create', contact)\n\t\t\tself.ExecuteBatchQueue()", "def rm_contact_from_addressbook(database, name, surname, database_counter,\n database_ids):\n\n from addressbook.verify_contact import check_if_contact_exists\n\n if check_if_contact_exists(database, name, surname, database_counter,\n database_ids)[0] == 'Yes':\n print('The following contact will be removed:')\n id = check_if_contact_exists(database, name, surname, database_counter,\n database_ids)[1]\n print(str(id), '|', database[f'{id}']['first name'], '|',\n database[f'{id}']['last name'],\n '|', database[f'{id}']['address'], '|',\n database[f'{id}']['mobile phone'])\n del database[f'{id}']\n print('\\n')\n return id\n else:\n print('There is no such contact for deletion!')\n print('\\n')\n return 0", "def delete_contact_in_db(self):\n self.init_db(self._testing)\n\n # making sure that the object is in the db\n assert not self.uid == \"\"\n\n self._delete_row_in_db(Contact.table_name, (self.uid,))", "def ListAllContacts(self):\n feed = self.gd_client.GetContacts()\n self.contacts = self.CleanPhoneNumbers(self.GetContactsInfo(feed))\n return self.contacts" ]
[ "0.7838508", "0.73882663", "0.71244544", "0.6743833", "0.6735509", "0.66522604", "0.6644226", "0.65938866", "0.6535013", "0.6475446", "0.63914645", "0.62559044", "0.6243891", "0.6176765", "0.610713", "0.6046226", "0.60396665", "0.60009325", "0.598013", "0.5964348", "0.5959389", "0.5933774", "0.59215635", "0.58954096", "0.5889426", "0.5839892", "0.5835882", "0.5826297", "0.58233565", "0.5816048" ]
0.7585864
1
Copy all contacts from one account to another This method does not check for duplicates
def CopyContacts(self, from_nickname, to_nickname): self.SelectAccount(from_nickname) contacts = self.GetContactList() self.SelectAccount(to_nickname) for contact in contacts: self.BatchEnqueue('create', contact) self.ExecuteBatchQueue()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def MultiWaySync(self, accounts):\n\t\tcleaned_contacts = []\n\t\tcontacts = []\n\t\t\n\t\tfor account in accounts:\n\t\t\tself.SelectAccount(account)\n\t\t\tcontacts.extend(self.GetContactList())\n\t\t\n\t\tduplicates, originals = ceFindDuplicates(contacts)\n\t\tmerged, todelete = ceMergeDuplicates(duplicates)\n\t\t\n\t\tcleaned_contacts.extend(originals)\n\t\tcleaned_contacts.extend(merged)\n\t\t\n\t\tfor account in accounts:\n\t\t\tself.SelectAccount(account)\n\t\t\tself.RemoveAll()\n\t\t\n\t\tfor account in accounts:\n\t\t\tself.SelectAccount(account)\n\t\t\tfor contact in cleaned_contacts:\n\t\t\t\tself.BatchEnqueue('create', contact)\n\t\t\tself.ExecuteBatchQueue()", "def MoveContacts(self, from_nickname, to_nickname):\n\t\tself.SelectAccount(from_nickname)\n\t\tcontacts = self.GetContactList()\n\t\t\n\t\t# Copy contacts -before- deleting\n\t\tself.SelectAccount(to_nickname)\n\t\tfor contact in contacts:\n\t\t\tself.BatchEnqueue('create', contact)\n\t\tself.ExecuteBatchQueue()\n\t\t\n\t\t# Then delete\n\t\tself.SelectAccount(from_nickname)\n\t\tfor contact in contacts:\n\t\t\tself.BatchEnqueue('delete', contact)\n\t\tself.ExecuteBatchQueue()", "def update_contacts(self):\n self.contacts = self.db.list_contacts()\n return self.list_contacts()", "def contacts(self, contacts):\n\n self._contacts = contacts", "def contacts(self, contacts):\n\n self._contacts = contacts", "def push_all(self, contacts):\n for ell in contacts:\n self.push(ell)", "def copy_from_teamusercopy(apps, schema_editor):\n TeamUser = apps.get_model('status', 'TeamUser')\n TeamUserCopy = apps.get_model('status', 'TeamUserCopy')\n\n for teamusercopy in TeamUserCopy.objects.all():\n if TeamUser.objects.filter(team_id=teamusercopy.team_id, user_id=teamusercopy.user_id).count() == 0:\n TeamUser.objects.create(team_id=teamusercopy.team_id, user_id=teamusercopy.user_id)\n print('Created %s %s' % (teamusercopy.team_id, teamusercopy.user_id))\n else:\n print('Already exists... skipping')", "def pull_contacts(self, org, modified_after, modified_before, progress_callback=None):\n pass", "def copy_contents(self, trello, destination_list):\n\n for card in self._cards:\n card.copy(trello, destination_list)", "def duplicates_existing_address(self, new_address):\n\n addresses = Address.objects.filter(customer=self.user)\n\n for address in addresses:\n match = True\n for field in self.cleaned_data:\n value = self.cleaned_data[field]\n address_value = getattr(address, field)\n if value != address_value:\n match = False\n break\n if match:\n self.set_most_current_address(address)\n return False\n\n else:\n return True", "def mergeContacts(self):\n self.mergeDialog = MergeDialog(self.db, self.userList.getSelectedItems())\n self.mergeDialog.accepted.connect(self.refreshLists)\n self.mergeDialog.show()", "def copy_to_teamusercopy(apps, schema_editor):\n TeamUser = apps.get_model('status', 'TeamUser')\n TeamUserCopy = apps.get_model('status', 'TeamUserCopy')\n\n for teamuser in TeamUser.objects.all():\n if TeamUserCopy.objects.filter(team_id=teamuser.team_id, user_id=teamuser.user_id).count() == 0:\n TeamUserCopy.objects.create(team_id=teamuser.team_id, user_id=teamuser.user_id)\n print('Created %s %s' % (teamuser.team_id, teamuser.user_id))\n else:\n print('Already exists... skipping')", "def contacts_list_update(self):\n\t\tself.database.contacts_clear()\n\t\tclient_log.debug(f'Запрос контакт листа для пользователся {self.name}')\n\t\treq = {\n\t\t\tACTION: GET_CONTACTS,\n\t\t\tTIME: time.time(),\n\t\t\tUSER: self.username\n\t\t}\n\t\tclient_log.debug(f'Сформирован запрос {req}')\n\t\twith socket_lock:\n\t\t\tsend_message(self.transport, req)\n\t\t\tans = get_message(self.transport)\n\t\tclient_log.debug(f'Получен ответ {ans}')\n\t\tif RESPONSE in ans and ans[RESPONSE] == 202:\n\t\t\tfor contact in ans[LIST_INFO]:\n\t\t\t\tself.database.add_contact(contact)\n\t\telse:\n\t\t\tclient_log.error('Не удалось обновить список контактов.')", "def testCopyCampaigns(self):\n if self.__class__.campaign1 is None:\n self.testSaveCampaign()\n requests = [{\n 'campaignId': self.__class__.campaign1['id']\n }]\n self.assert_(isinstance(self.__class__.service.CopyCampaigns(requests),\n tuple))", "def test_save_multiple_contacts(self):\n self.new_contact.save_contact() # saving the new contact\n test_contact = Contact(\"Test\", \"User\", 254712345678, \"[email protected]\") # new user\n test_contact.save_contact() # saving the new contact\n self.assertEqual(len(Contact.contact_list), 2)", "def refreshContacts(self):\n contact_ids = self._getAllContactIds()\n contacts = self._getContacts(contact_ids)\n\n self.contacts = [LineContact(self, contact) for contact in contacts]\n\n self.contacts.sort()", "async def set_contacts(self, contacts: List[CertificateContact], **kwargs) -> List[CertificateContact]:\n new_contacts = await self._client.set_certificate_contacts(\n vault_base_url=self.vault_url,\n contacts=self._models.Contacts(contact_list=[c._to_certificate_contacts_item() for c in contacts]),\n **kwargs\n )\n return [\n CertificateContact._from_certificate_contacts_item(contact_item=item) for item in new_contacts.contact_list\n ]", "def test_copy_email(self):\n self.new_contact.save_contact()\n Contact.copy_email(254719702373)\n\n self.assertEqual(self.new_contact.email, pyperclip.paste())", "def test_save_multiple_contact(self):\n self.new_contact.save_contact()\n # new contact\n test_contact = Contact(\"Test\", \"user\", \"0798765432\", \"[email protected]\")\n test_contact.save_contact()\n self.assertEqual(len(Contact.contact_list), 2)", "def update_contacts(self, contact_list):\n updated_contacts = 0\n request_list = list()\n\n # stale_contacts contains all old contacts at first, all current\n # contacts get then removed so that the remaining can get deleted\n stale_contacts = set(self.contacts)\n\n for contact in contact_list:\n c = Persona.query.get(contact[\"id\"])\n\n if c is None:\n c = Persona(id=contact[\"id\"], _stub=True)\n\n if c._stub is True:\n request_list.append(contact[\"id\"])\n\n try:\n # Old and new contact; remove from stale list\n stale_contacts.remove(c)\n except KeyError:\n # New contact\n self.contacts.append(c)\n updated_contacts += 1\n\n # Remove old contacts that are not new contacts\n for contact in stale_contacts:\n self.contacts.remove(contact)\n\n app.logger.info(\"Updated {}'s contacts: {} added, {} removed, {} requested\".format(\n self.username, updated_contacts, len(stale_contacts), len(request_list)))\n\n return request_list", "def archive_contact_messages(self, org, contact):\n pass", "def archive_contacts(self, contacts):\n self._post('contact_actions', None, self._build_params(contacts=contacts, action='archive'))", "def merge_accounts(self, secret, account, destination, async=None):\n\n\t\tif not account:\n\t\t\taccount = signer.account_from_seed(secret)\n\n\t\tdef on_success(seq_fee):\n\t\t\treturn Transaction.account_merge(\n\t\t\t\taccount,\n\t\t\t\tdestination,\n\t\t\t\t*seq_fee\n\t\t\t)\n\n\t\treturn self.__transaction(secret, account, on_success, async)", "def ListAllContacts(self):\n feed = self.gd_client.GetContacts()\n self.contacts = self.CleanPhoneNumbers(self.GetContactsInfo(feed))\n return self.contacts", "def fill_from_cache(self):\n move_count = min(\n len(self._replacement_cache),\n constants.K - len(self._contacts)\n )\n\n for _ in range(move_count):\n self.add_contact(self._replacement_cache.pop())", "def add_contact(self):\n contact = Contact.create_contact()\n self.contact_list.append(contact)\n\n df = pd.read_csv('address_book.csv')\n #print(df)\n adf = pd.DataFrame({'FIRST NAME': [contact.first_name],\n 'LAST NAME': [contact.last_name],\n 'ADDRESS': [contact.address],\n 'CITY': [contact.city],\n 'STATE': [contact.state],\n 'ZIP CODE': [contact.zip],\n 'PHONE NUMBER': [contact.phone_number],\n 'EMAIL': [contact.email]})\n adf.to_csv('address_book.csv',mode='a', header=False, index=None)\n #storing all contacts in address_book.csv file\n \"\"\"with open(\"address_book.csv\", \"w\") as f:\n for contact in self.contact_list:\n f.write(f\"FIRST NAME -> {contact.first_name}\\n\"\n f\"LAST NAME -> {contact.last_name}\\n\"\n f\"ADDRESS -> {contact.address}\\n\"\n f\"CITY -> {contact.city}\\n\"\n f\"STATE -> {contact.state}\\n\"\n f\"ZIP CODE -> {contact.zip}\\n\"\n f\"PHONE NUMBER -> {contact.phone_number}\\n\"\n f\"EMAIL -> {contact.email}\\n\\n\")\"\"\"", "def test_sync_from_sugar_contact(self):\n LOG.debug('test_sync_from_sugar_contact')\n business = Business.objects.get(id=114)\n advertiser = Advertiser.objects.get(id=114)\n email = advertiser.email\n module = \"Contacts\"\n query = build_recent_entry_query(module=module, test_mode=True, \n get_modified=False, start=None)\n sugar_list = self.sugar.get_entry_list(module, query)\n sugar_dict = sugar_list[0]\n sugar_dict['advertiser_id_c'] = ''\n self.sugar.set_entry(module, dict_to_name_value(sugar_dict))\n billing_record = BillingRecord.objects.get(id=114)\n order = billing_record.orders.all()[0]\n order.delete()\n billing_record.delete()\n business.delete()\n consumer = Consumer.objects.get(email=email)\n consumer.delete()\n advertiser.delete()\n sync_business_from_sugar(test_mode=True, sugar=self.sugar)\n # business is not created since Sugar record modified by 10Coupons user\n try:\n business = Business.objects.get(advertiser=advertiser)\n self.assertTrue(False)\n except business.DoesNotExist:\n self.assertTrue(True)", "def action_merge(self, src_lists, archive):\n # Explation of the SQL query with an example. There are the following lists\n # A (id=4): [email protected]; [email protected]\n # B (id=5): [email protected]; [email protected]\n # C (id=6): nothing\n # To merge the mailing lists A and B into C, we build the view st that looks\n # like this with our example:\n #\n # contact_id | email | row_number | list_id |\n # ------------+---------------------------+------------------------\n # 4 | [email protected] | 1 | 4 |\n # 6 | [email protected] | 2 | 5 |\n # 5 | [email protected] | 1 | 4 |\n # 7 | [email protected] | 1 | 5 |\n #\n # The row_column is kind of an occurence counter for the email address.\n # Then we create the Many2many relation between the destination list and the contacts\n # while avoiding to insert an existing email address (if the destination is in the source\n # for example)\n self.ensure_one()\n # Put destination is sources lists if not already the case\n src_lists |= self\n self.env['mailing.contact'].flush(['email', 'email_normalized'])\n self.env['mailing.contact.subscription'].flush(['contact_id', 'opt_out', 'list_id'])\n self.env.cr.execute(\"\"\"\n INSERT INTO mailing_contact_list_rel (contact_id, list_id)\n SELECT st.contact_id AS contact_id, %s AS list_id\n FROM\n (\n SELECT\n contact.id AS contact_id,\n contact.email AS email,\n list.id AS list_id,\n row_number() OVER (PARTITION BY email ORDER BY email) AS rn\n FROM\n mailing_contact contact,\n mailing_contact_list_rel contact_list_rel,\n mailing_list list\n WHERE contact.id=contact_list_rel.contact_id\n AND COALESCE(contact_list_rel.opt_out,FALSE) = FALSE\n AND contact.email_normalized NOT IN (select email from mail_blacklist where active = TRUE)\n AND list.id=contact_list_rel.list_id\n AND list.id IN %s\n AND NOT EXISTS\n (\n SELECT 1\n FROM\n mailing_contact contact2,\n mailing_contact_list_rel contact_list_rel2\n WHERE contact2.email = contact.email\n AND contact_list_rel2.contact_id = contact2.id\n AND contact_list_rel2.list_id = %s\n )\n ) st\n WHERE st.rn = 1;\"\"\", (self.id, tuple(src_lists.ids), self.id))\n self.flush()\n self.invalidate_cache()\n if archive:\n (src_lists - self).action_archive()", "def add_contact_to_google_account(self, i):\n\n self.add_contact_to_phone(i)", "def writecontactstocsv(self , contact_entries):\n rx = re.compile('\\W+')\n allcontacts = []\n for entry in contact_entries:\n if entry.name is not None and len(entry.phone_number) > 0 and len(entry.group_membership_info) > 0:\n\n # Clean up characters in contact name; replace all non-alphanumerics with spaces\n fullname = entry.name.full_name.text\n fullname = rx.sub(' ', fullname).strip()\n for rawPhoneNumber in entry.phone_number:\n # Remove non-numeric characters from the phone number\n phone_number = re.sub(\"[^0-9]\", \"\", rawPhoneNumber.text)\n # Save contact for later insert\n allcontacts.append((fullname, phone_number))\n\n allcontacts = tuple(set(allcontacts))\n\n csvfilename = \"Downloads/ContactExport\"+time.strftime(\"%Y%m%d-%H%M%S\")+\".csv\"\n csvfile = open(csvfilename, \"w\")\n for csvFullName, csvPhoneNumber in allcontacts:\n line = \"\\\"%s\\\",%s\\n\" % (csvFullName, csvPhoneNumber)\n csvfile.write(line)\n\n csvfile.close()" ]
[ "0.6812184", "0.6329817", "0.5751867", "0.5707562", "0.5707562", "0.5586134", "0.5383878", "0.5352327", "0.5340893", "0.53407866", "0.53282636", "0.53140664", "0.5285255", "0.5284708", "0.52719766", "0.52107036", "0.52066034", "0.51984245", "0.51651853", "0.5159195", "0.5151738", "0.51386243", "0.50983644", "0.5080244", "0.5077525", "0.5073873", "0.50716347", "0.50254536", "0.50166845", "0.49841234" ]
0.756032
0
Move all contacts from one account to another This method does not check for duplicates
def MoveContacts(self, from_nickname, to_nickname): self.SelectAccount(from_nickname) contacts = self.GetContactList() # Copy contacts -before- deleting self.SelectAccount(to_nickname) for contact in contacts: self.BatchEnqueue('create', contact) self.ExecuteBatchQueue() # Then delete self.SelectAccount(from_nickname) for contact in contacts: self.BatchEnqueue('delete', contact) self.ExecuteBatchQueue()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def MultiWaySync(self, accounts):\n\t\tcleaned_contacts = []\n\t\tcontacts = []\n\t\t\n\t\tfor account in accounts:\n\t\t\tself.SelectAccount(account)\n\t\t\tcontacts.extend(self.GetContactList())\n\t\t\n\t\tduplicates, originals = ceFindDuplicates(contacts)\n\t\tmerged, todelete = ceMergeDuplicates(duplicates)\n\t\t\n\t\tcleaned_contacts.extend(originals)\n\t\tcleaned_contacts.extend(merged)\n\t\t\n\t\tfor account in accounts:\n\t\t\tself.SelectAccount(account)\n\t\t\tself.RemoveAll()\n\t\t\n\t\tfor account in accounts:\n\t\t\tself.SelectAccount(account)\n\t\t\tfor contact in cleaned_contacts:\n\t\t\t\tself.BatchEnqueue('create', contact)\n\t\t\tself.ExecuteBatchQueue()", "def CopyContacts(self, from_nickname, to_nickname):\n\t\tself.SelectAccount(from_nickname)\n\t\tcontacts = self.GetContactList()\n\t\t\n\t\tself.SelectAccount(to_nickname)\n\t\tfor contact in contacts:\n\t\t\tself.BatchEnqueue('create', contact)\n\t\tself.ExecuteBatchQueue()", "def update_contacts(self):\n self.contacts = self.db.list_contacts()\n return self.list_contacts()", "def contacts(self, contacts):\n\n self._contacts = contacts", "def contacts(self, contacts):\n\n self._contacts = contacts", "def update_contacts(self, contact_list):\n updated_contacts = 0\n request_list = list()\n\n # stale_contacts contains all old contacts at first, all current\n # contacts get then removed so that the remaining can get deleted\n stale_contacts = set(self.contacts)\n\n for contact in contact_list:\n c = Persona.query.get(contact[\"id\"])\n\n if c is None:\n c = Persona(id=contact[\"id\"], _stub=True)\n\n if c._stub is True:\n request_list.append(contact[\"id\"])\n\n try:\n # Old and new contact; remove from stale list\n stale_contacts.remove(c)\n except KeyError:\n # New contact\n self.contacts.append(c)\n updated_contacts += 1\n\n # Remove old contacts that are not new contacts\n for contact in stale_contacts:\n self.contacts.remove(contact)\n\n app.logger.info(\"Updated {}'s contacts: {} added, {} removed, {} requested\".format(\n self.username, updated_contacts, len(stale_contacts), len(request_list)))\n\n return request_list", "def push_all(self, contacts):\n for ell in contacts:\n self.push(ell)", "def refreshContacts(self):\n contact_ids = self._getAllContactIds()\n contacts = self._getContacts(contact_ids)\n\n self.contacts = [LineContact(self, contact) for contact in contacts]\n\n self.contacts.sort()", "def archive_contact_messages(self, org, contact):\n pass", "def remove_from_contact_list(self, contacts_to_remove_list):\n if self.contact_list is None:\n return\n for id in contacts_to_remove_list:\n if id in range(0, len(self.contact_list) + 1):\n self.contact_list[id - 1] = None\n self.contact_list = [contact for contact in self.contact_list if contact is not None]", "def archive_contacts(self, contacts):\n self._post('contact_actions', None, self._build_params(contacts=contacts, action='archive'))", "def remove_existing_customers(self):\n # remove the customers which are not active (.is_active )\n self.to_move = False\n #for cust in self.customers:\n # print(cust.state)\n self.customers = [cust for cust in self.customers if cust.state != 'checkout']\n #if cust.to_move():\n # self.to_move = True", "def delete_contacts(self):\n self.db.delete_all_contacts()\n return self.update_contacts()", "def move_aliases(apps, schema_editor):\n OldAlias = apps.get_model(\"modoboa_postfix_autoreply\", \"Alias\")\n Alias = apps.get_model(\"admin\", \"Alias\")\n AliasRecipient = apps.get_model(\"admin\", \"AliasRecipient\")\n try:\n ObjectDates = apps.get_model(\"admin\", \"ObjectDates\")\n except LookupError:\n ObjectDates = None\n to_create = []\n for old_alias in OldAlias.objects.all():\n values = {\"address\": old_alias.full_address, \"internal\": True}\n try:\n alias = Alias.objects.get(**values)\n except Alias.DoesNotExist:\n if ObjectDates:\n values[\"dates\"] = ObjectDates.objects.create()\n alias = Alias.objects.create(**values)\n to_create.append(AliasRecipient(\n address=old_alias.autoreply_address, alias=alias))\n AliasRecipient.objects.bulk_create(to_create)", "def contacts_list_update(self):\n\t\tself.database.contacts_clear()\n\t\tclient_log.debug(f'Запрос контакт листа для пользователся {self.name}')\n\t\treq = {\n\t\t\tACTION: GET_CONTACTS,\n\t\t\tTIME: time.time(),\n\t\t\tUSER: self.username\n\t\t}\n\t\tclient_log.debug(f'Сформирован запрос {req}')\n\t\twith socket_lock:\n\t\t\tsend_message(self.transport, req)\n\t\t\tans = get_message(self.transport)\n\t\tclient_log.debug(f'Получен ответ {ans}')\n\t\tif RESPONSE in ans and ans[RESPONSE] == 202:\n\t\t\tfor contact in ans[LIST_INFO]:\n\t\t\t\tself.database.add_contact(contact)\n\t\telse:\n\t\t\tclient_log.error('Не удалось обновить список контактов.')", "def fill_from_cache(self):\n move_count = min(\n len(self._replacement_cache),\n constants.K - len(self._contacts)\n )\n\n for _ in range(move_count):\n self.add_contact(self._replacement_cache.pop())", "def fix_account(self, account):\r\n if type(account) is not Account:\r\n return False\r\n for acc in self.account:\r\n if acc == account:\r\n return acc.fix()\r\n return False", "def delete_contact(self):\n delete_first_name = input(\"Enter first name that you want to delete\\n\")\n for contact in self.contact_list:\n if contact.first_name == delete_first_name:\n #print(str(contact))\n self.contact_list.remove(contact)\n else:\n print(f\"No contact is present with first name {delete_first_name} \")", "def pull_contacts(self, org, modified_after, modified_before, progress_callback=None):\n pass", "def test_sync_from_sugar_contact(self):\n LOG.debug('test_sync_from_sugar_contact')\n business = Business.objects.get(id=114)\n advertiser = Advertiser.objects.get(id=114)\n email = advertiser.email\n module = \"Contacts\"\n query = build_recent_entry_query(module=module, test_mode=True, \n get_modified=False, start=None)\n sugar_list = self.sugar.get_entry_list(module, query)\n sugar_dict = sugar_list[0]\n sugar_dict['advertiser_id_c'] = ''\n self.sugar.set_entry(module, dict_to_name_value(sugar_dict))\n billing_record = BillingRecord.objects.get(id=114)\n order = billing_record.orders.all()[0]\n order.delete()\n billing_record.delete()\n business.delete()\n consumer = Consumer.objects.get(email=email)\n consumer.delete()\n advertiser.delete()\n sync_business_from_sugar(test_mode=True, sugar=self.sugar)\n # business is not created since Sugar record modified by 10Coupons user\n try:\n business = Business.objects.get(advertiser=advertiser)\n self.assertTrue(False)\n except business.DoesNotExist:\n self.assertTrue(True)", "def remove(self, contacts):\n if not contacts:\n return\n\n new_heap = []\n\n for ell in self._heap:\n if ell not in contacts: \n distance = distance_to(self._node_id, ell.getId())\n heapq.heappush(new_heap, (distance, ell))\n\n self._heap = new_heap", "def fix_account(self, account):\n corrupted = 0\n zip_check = 0\n addr_check = 0\n for elem in self.account:\n if account in elem.__dict__.values():\n corrupted = elem\n if corrupted == 0:\n print(\"Couldn't find account.\")\n return False\n keys = list(corrupted.__dict__.keys())\n if 'name' not in keys:\n corrupted.__dict__['name'] = 'Restored account'\n if 'id' not in keys:\n corrupted.__dict__['id'] = Account.ID_COUNT\n Account.ID_COUNT += 1\n if 'value' not in keys:\n corrupted.__dict__['value'] = 0\n for key in keys:\n if key.startswith('zip'):\n zip_check = 1\n if key.startswith('addr'):\n addr_check = 1\n if key.startswith('b'):\n corrupted.__dict__.pop(key)\n if zip_check == 0:\n corrupted.__dict__['zip'] = '00000'\n if addr_check == 0:\n corrupted.__dict__['addr'] = '42 rue des Corruptions'\n if len(corrupted.__dict__) % 2 == 0:\n for key in corrupted.__dict__.keys():\n if key == 'name' or key == 'id' or key == 'value':\n pass\n elif key.startswith('zip') or key.startswith('addr'):\n pass\n else:\n corrupted.__dict__.pop(key)\n break\n if self.corrupted(corrupted):\n print(\"Couldn't fix account.\")\n return False\n else:\n print(\"Successfully fixed account !\")\n return True", "def duplicates_existing_address(self, new_address):\n\n addresses = Address.objects.filter(customer=self.user)\n\n for address in addresses:\n match = True\n for field in self.cleaned_data:\n value = self.cleaned_data[field]\n address_value = getattr(address, field)\n if value != address_value:\n match = False\n break\n if match:\n self.set_most_current_address(address)\n return False\n\n else:\n return True", "def test_save_multiple_contacts(self):\n self.new_contact.save_contact() # saving the new contact\n test_contact = Contact(\"Test\", \"User\", 254712345678, \"[email protected]\") # new user\n test_contact.save_contact() # saving the new contact\n self.assertEqual(len(Contact.contact_list), 2)", "def remove_contact(self, date_limit):\n for provider in ServiceProvider.objects.filter(end_at__lt=date_limit, history=None):\n # Check for history versions\n for sp in ServiceProvider.objects.filter(history=provider.pk):\n for contact in Contact.objects.filter(sp=sp):\n self.output(\n sp.entity_id + \": Removing contact (history): \" + contact.firstname + \" \" + contact.lastname\n )\n if not self.list_only:\n contact.delete()\n for contact in Contact.objects.filter(sp=provider):\n self.output(provider.entity_id + \": Removing contact: \" + contact.firstname + \" \" + contact.lastname)\n if not self.list_only:\n contact.delete()", "def RemoveAll(self):\n\t\tcontacts = self.GetContactList()\n\t\t\n\t\tfor contact in contacts:\n\t\t\tself.BatchEnqueue('delete', contact)\n\t\tself.ExecuteBatchQueue()", "def remove_contact(self):\n contact_mob_num = input(\"-=\" * 30 + \"\\n\" + \"Please enter contact's mobile number to be removed: \")\n contact = self.auth.get_users_by_MobNum(contact_mob_num)\n if (not contact) or contact not in self._user.contacts:\n print('This user not in your contact list')\n return self.homepage()\n \n self._user.remove_contact(contact)\n print('Contact removed successfully')\n return self.homepage()", "def truncate_contact_list(self, LibraryID, ListID):\n list_of_contacts = self.getListContacts(LibraryID=LibraryID, ListID=ListID)\n failures = []\n if list_of_contacts:\n for contact in list_of_contacts:\n RecipientID = contact['RecipientID']\n try:\n self.removeContact(LibraryID=LibraryID, ListID=ListID, RecipientID=RecipientID)\n except Exception as e:\n # print e\n failures.append(RecipientID)\n if failures:\n return True, []\n else:\n return False, failures", "def mergeContacts(self):\n self.mergeDialog = MergeDialog(self.db, self.userList.getSelectedItems())\n self.mergeDialog.accepted.connect(self.refreshLists)\n self.mergeDialog.show()", "def update_contacts(self, contacts):\n\n if contacts.time.size != 1:\n raise IndexError(\"Contacts should be from one frame only\")\n if contacts.channel.size != self.contacts.channel.size:\n self.new_contact_set(contacts)\n return # Prevent calling update_contacts recursively\n self.contacts = contacts\n contacts = np.array(contacts)\n\n for i, actor in enumerate(self.contacts_actors):\n # mapper = actors.GetNextActor().GetMapper()\n mapper = actor.GetMapper()\n self.contacts_actors[i].GetProperty().SetColor(self.contacts_color)\n self.contacts_actors[i].GetProperty().SetOpacity(self.contacts_opacity)\n source = vtkSphereSource()\n source.SetCenter(contacts[0:3, i])\n source.SetRadius(self.contacts_size)\n mapper.SetInputConnection(source.GetOutputPort())" ]
[ "0.69824326", "0.6433265", "0.57518244", "0.56264514", "0.56264514", "0.5607597", "0.5603125", "0.55658954", "0.5536112", "0.5534724", "0.54958487", "0.5490326", "0.5480002", "0.5392368", "0.53907484", "0.5382409", "0.53588146", "0.5324467", "0.5263877", "0.52340406", "0.52107525", "0.5201146", "0.5178486", "0.5129056", "0.5116332", "0.5109608", "0.5108341", "0.5080843", "0.50784147", "0.5076118" ]
0.72994787
0
Perform a multiway sync between given accounts
def MultiWaySync(self, accounts): cleaned_contacts = [] contacts = [] for account in accounts: self.SelectAccount(account) contacts.extend(self.GetContactList()) duplicates, originals = ceFindDuplicates(contacts) merged, todelete = ceMergeDuplicates(duplicates) cleaned_contacts.extend(originals) cleaned_contacts.extend(merged) for account in accounts: self.SelectAccount(account) self.RemoveAll() for account in accounts: self.SelectAccount(account) for contact in cleaned_contacts: self.BatchEnqueue('create', contact) self.ExecuteBatchQueue()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sync_nas(self, users_from_db: Iterator):", "def synch_all(cls, account, type, filter=None, *args):\n for repo_data in repositories(account, type, filter):\n repo = cls(repo_data)\n repo.synch(*args)", "def sync(self, sync_from, sync_to, **kwargs):\n return self.exec_command('sync %s %s' % (sync_from, sync_to), **kwargs)", "def sync(type, all):\n print(\"Syncing\")", "def test_sync_account(self):\n\n runner = CliRunner()\n LOG.info(\"Testing 'calm sync account {}\".format(ACCOUNT_NAME))\n result = runner.invoke(\n cli,\n [\"sync\", \"account\", ACCOUNT_NAME],\n )\n\n if result.exit_code:\n cli_res_dict = {\"Output\": result.output, \"Exception\": str(result.exception)}\n LOG.debug(\n \"Cli Response: {}\".format(\n json.dumps(cli_res_dict, indent=4, separators=(\",\", \": \"))\n )\n )\n LOG.debug(\n \"Traceback: \\n{}\".format(\n \"\".join(traceback.format_tb(result.exc_info[2]))\n )\n )\n pytest.fail(\"Account sync failed\")\n\n LOG.info(\"Success\")", "def update_synchronization():\n logger.debug(\"Update synchronizations started\")\n for sa in SocialAttributes.objects.filter(start_page_token__isnull=False):\n if should_sync(sa.user, 'google-oauth2', 'tasks.gdrive'):\n if sa.user.social_auth.filter(provider='google-oauth2').first():\n access_token, refresh_token = get_google_tokens(sa.user)\n subtask(sync_gdrive_changes).delay(sa.user, access_token, refresh_token, sa.start_page_token)\n else:\n logger.info(\"Gdrive oauth token for user '%s' already in use, skipping sync ...\", sa.user.username)", "def unison_sync(paths_to_sync):\n log('Synchronizing CA (%s) to all peers.' % (', '.join(paths_to_sync)),\n level=INFO)\n keystone_gid = grp.getgrnam('keystone').gr_gid\n\n # NOTE(dosaboy): This will sync to all peers who have already provided\n # their ssh keys. If any existing peers have not provided their keys yet,\n # they will be silently ignored.\n unison.sync_to_peers(peer_interface='cluster', paths=paths_to_sync,\n user=SSH_USER, verbose=True, gid=keystone_gid,\n fatal=True)\n\n synced_units = peer_units()\n if len(unison.collect_authed_hosts('cluster')) != len(synced_units):\n log(\"Not all peer units synced due to missing public keys\", level=INFO)\n return None\n else:\n return synced_units", "async def sync_pairs(self):\n\n self.watch_only_pairs = []\n\n await self._handle_trader_watch_pairs()\n await self._handle_balancer_watch_pairs()\n\n for pair in self.market.pairs + self.market.extra_base_pairs:\n await self.prepare_trades(pair)\n await self.prepare_states(pair)\n await self.prepare_last_trades(pair)\n\n await self.prepare_all_trade_stats()\n await self.balancer.sync_pairs()", "def sync_account(account):\n stripe_account = stripe.Account.retrieve(id=account.stripe_id)\n return sync_account_from_stripe_data(stripe_account)", "def sync(self):\n acctManager = self.acctManager\n acct = acctManager.account(0)\n gapPolicy = 5\n acct.generateGapAddresses(gapPolicy)\n watchAddresses = set()\n\n # send the initial balance\n self.signals.balance(acct.balance)\n addresses = acct.allAddresses()\n \n # Update the account with known UTXOs.\n chain = self.blockchain\n blockchainUTXOs = chain.UTXOs(addresses)\n acct.resolveUTXOs(blockchainUTXOs)\n\n # Subscribe to block and address updates.\n chain.subscribeBlocks(self.blockSignal)\n watchAddresses = acct.addressesOfInterest()\n if watchAddresses:\n chain.subscribeAddresses(watchAddresses, self.addressSignal)\n # Signal the new balance.\n b = acct.calcBalance(self.blockchain.tip[\"height\"])\n self.signals.balance(b)\n self.save()\n return True", "def half_sync(self,delay):\n self.count = 1\n while not self.shutdown and self.loggedin.autosync:\n time.sleep(delay)\n self.count += 1\n self.filelist = self.loggedin.list()\n print \"Pinged server for changes\"\n self.synced = []\n if self.filelist:\n for f in self.filelist:\n path = self.loggedin.sanitize_path(f['path'])\n path = os.path.join(self.onedirrectory, path)\n if not os.path.exists(path):\n os.makedirs(path)\n if f['name'] and not self.loggedin.exists(f):\n exists, data = self.loggedin.getfile(f)\n if exists:\n with open(self.loggedin.make_path(f), 'a') as new_file:\n new_file.write(data)\n new_file.close()\n elif f['name'] and str(self.loggedin.hash_file(f)) != str(f['hash']):\n self.loggedin.sendfile(f['name'], f['path'])\n if self.loggedin.make_path(f) not in self.synced:\n self.synced.append(self.loggedin.make_path(f))\n os_walk = os.walk(self.loggedin.onedirrectory)\n for directory in os_walk:\n for f in directory[2]:\n if f.startswith('.'):\n continue\n path = os.path.join(directory[0], f)\n if path not in self.synced:\n try:\n os.remove(path)\n except OSError, e:\n print (\"Error: %s - %s.\" % (e.filename,e.strerror))", "def sync(self):\n for subscription in self.getSubscriptionList():\n #user_id = subscription.getZopeUser()\n #uf = self.getPortalObject().acl_users\n #user = uf.getUserById(user_id).__of__(uf)\n #newSecurityManager(None, user)\n subscription.activate(activity='SQLQueue',\n tag=subscription.getId(),\n priority=ACTIVITY_PRIORITY\n ).SubSync(subscription.getPath())", "def test_synchronize_one_network(self):\n\n # Store two tenants in a db and a single tenant in EOS.\n # The sync should send details of the second tenant to EOS\n tenant_1_id = 'tenant-1'\n tenant_1_net_1_id = 'ten-1-net-1'\n tenant_1_net_1_seg_id = 11\n db_lib.remember_tenant(self.admin_ctx, tenant_1_id)\n db_lib.remember_network_segment(self.admin_ctx, tenant_1_id,\n tenant_1_net_1_id,\n tenant_1_net_1_seg_id, 'segment_id_11')\n\n tenant_2_id = 'tenant-2'\n tenant_2_net_1_id = 'ten-2-net-1'\n tenant_2_net_1_seg_id = 21\n db_lib.remember_tenant(self.admin_ctx, tenant_2_id)\n db_lib.remember_network_segment(self.admin_ctx, tenant_2_id,\n tenant_2_net_1_id,\n tenant_2_net_1_seg_id, 'segment_id_21')\n\n self.rpc.get_tenants.return_value = {\n tenant_1_id: {\n 'tenantVmInstances': {},\n 'tenantBaremetalInstances': {},\n 'tenantNetworks': {\n tenant_1_net_1_id: {\n 'networkId': tenant_1_net_1_id,\n 'shared': False,\n 'networkName': 'Net1',\n 'segmenationType': 'vlan',\n 'segmentationTypeId': tenant_1_net_1_seg_id,\n }\n }\n }\n }\n\n self.rpc.sync_start.return_value = True\n self.rpc.sync_end.return_value = True\n self.rpc.check_cvx_availability.return_value = True\n\n self.rpc._baremetal_supported.return_value = False\n self.rpc.get_all_baremetal_hosts.return_value = {}\n\n self.sync_service.do_synchronize()\n\n expected_calls = [\n mock.call.perform_sync_of_sg(self.sync_service._context),\n mock.call.check_cvx_availability(),\n mock.call.get_region_updated_time(),\n mock.call.get_region_updated_time().__bool__(),\n mock.call.sync_start(),\n mock.call.register_with_eos(sync=True),\n mock.call.check_supported_features(),\n mock.call.get_tenants(),\n\n mock.call.create_network_bulk(\n tenant_2_id,\n [{'network_id': tenant_2_net_1_id,\n 'segments': [],\n 'network_name': '',\n 'shared': False}],\n sync=True),\n\n mock.call.sync_end(),\n mock.call.get_region_updated_time()\n ]\n\n self.assertTrue(self.rpc.mock_calls == expected_calls,\n \"Seen: %s\\nExpected: %s\" % (\n self.rpc.mock_calls,\n expected_calls,\n )\n )\n\n db_lib.forget_network_segment(self.admin_ctx, tenant_1_id,\n tenant_1_net_1_id)\n db_lib.forget_network_segment(self.admin_ctx, tenant_2_id,\n tenant_2_net_1_id)\n db_lib.forget_tenant(self.admin_ctx, tenant_1_id)\n db_lib.forget_tenant(self.admin_ctx, tenant_2_id)", "def sync(directory_1, directory_2, key_address, user, server):\n\n import os\n # Creates a list of files in the working directory\n files = os.listdir()\n\n\n # If the bash file already exists, it deletes the bash file before making progress\n if 'sync.sh' in files: \n os.remove('sync.sh')\n else:\n pass\n\n\n with open('sync.sh', 'w') as f:\n f.write('#!/bin/bash\\n')\n f.write('zip -r my_files.zip ' + str(directory_1) + '\\n')\n f.write('scp -i ' + str(key_address) + ' my_files.zip ' + str(user) + \\\n '@' + str(server) + ':' + str(directory_2))", "def sync_auth(self, vault_client, resources):\n for auth in self.auths():\n auth.sync(vault_client)\n\n auth_resources = [x for x in resources\n if isinstance(x, (LDAP, UserPass))]\n for resource in auth_resources:\n resource.sync(vault_client)\n\n return [x for x in resources\n if not isinstance(x, (LDAP, UserPass, AuditLog))]", "def test_synchronize_all_networks(self):\n\n # Store two tenants in a db and none on EOS.\n # The sync should send details of all tenants to EOS\n tenant_1_id = 'tenant-1'\n tenant_1_net_1_id = 'ten-1-net-1'\n tenant_1_net_1_seg_id = 11\n db_lib.remember_tenant(self.admin_ctx, tenant_1_id)\n db_lib.remember_network_segment(self.admin_ctx, tenant_1_id,\n tenant_1_net_1_id,\n tenant_1_net_1_seg_id, 'segment_id_11')\n\n tenant_2_id = 'tenant-2'\n tenant_2_net_1_id = 'ten-2-net-1'\n tenant_2_net_1_seg_id = 21\n db_lib.remember_tenant(self.admin_ctx, tenant_2_id)\n db_lib.remember_network_segment(self.admin_ctx, tenant_2_id,\n tenant_2_net_1_id,\n tenant_2_net_1_seg_id, 'segment_id_21')\n\n self.rpc.get_tenants.return_value = {}\n\n self.rpc.sync_start.return_value = True\n self.rpc.sync_end.return_value = True\n self.rpc.check_cvx_availability.return_value = True\n\n self.rpc._baremetal_supported.return_value = False\n self.rpc.get_all_baremetal_hosts.return_value = {}\n\n self.sync_service.do_synchronize()\n\n expected_calls = [\n mock.call.perform_sync_of_sg(self.sync_service._context),\n mock.call.check_cvx_availability(),\n mock.call.get_region_updated_time(),\n mock.call.get_region_updated_time().__bool__(),\n mock.call.sync_start(),\n mock.call.register_with_eos(sync=True),\n mock.call.check_supported_features(),\n mock.call.get_tenants(),\n\n mock.call.create_network_bulk(\n tenant_1_id,\n [{'network_id': tenant_1_net_1_id,\n 'segments': [],\n 'network_name': '',\n 'shared': False}],\n sync=True),\n\n mock.call.create_network_bulk(\n tenant_2_id,\n [{'network_id': tenant_2_net_1_id,\n 'segments': [],\n 'network_name': '',\n 'shared': False}],\n sync=True),\n mock.call.sync_end(),\n mock.call.get_region_updated_time()\n ]\n\n # The create_network_bulk() can be called in different order. So split\n # it up. The first part checks if the initial set of methods are\n # invoked.\n idx = expected_calls.index(mock.call.get_tenants()) + 1\n self.assertTrue(self.rpc.mock_calls[:idx] == expected_calls[:idx],\n \"Seen: %s\\nExpected: %s\" % (\n self.rpc.mock_calls,\n expected_calls,\n )\n )\n # Check if tenant 1 networks are created. It must be one of the two\n # methods.\n self.assertTrue(self.rpc.mock_calls[idx] in\n expected_calls[idx:idx + 2],\n \"Seen: %s\\nExpected: %s\" % (\n self.rpc.mock_calls,\n expected_calls,\n )\n )\n # Check if tenant 2 networks are created. It must be one of the two\n # methods.\n self.assertTrue(self.rpc.mock_calls[idx + 1] in\n expected_calls[idx:idx + 2],\n \"Seen: %s\\nExpected: %s\" % (\n self.rpc.mock_calls,\n expected_calls,\n )\n )\n # Check if the sync end methods are invoked.\n self.assertTrue(self.rpc.mock_calls[idx + 2:] ==\n expected_calls[idx + 2:],\n \"Seen: %s\\nExpected: %s\" % (\n self.rpc.mock_calls,\n expected_calls,\n )\n )\n\n db_lib.forget_network_segment(self.admin_ctx, tenant_1_id,\n tenant_1_net_1_id)\n db_lib.forget_network_segment(self.admin_ctx, tenant_2_id,\n tenant_2_net_1_id)\n db_lib.forget_tenant(self.admin_ctx, tenant_1_id)\n db_lib.forget_tenant(self.admin_ctx, tenant_2_id)", "def sync(config, group, accounts=(), dryrun=False, region=None):\n config = validate.callback(config)\n destination = config.get('destination')\n client = boto3.Session().client('s3')\n\n for account in config.get('accounts', ()):\n if accounts and account['name'] not in accounts:\n continue\n\n session = get_session(account['role'], region)\n account_id = session.client('sts').get_caller_identity()['Account']\n prefix = destination.get('prefix', '').rstrip('/') + '/%s' % account_id\n prefix = \"%s/%s\" % (prefix, group)\n\n exports = get_exports(client, destination['bucket'], prefix + \"/\")\n\n role = account.pop('role')\n if isinstance(role, str):\n account['account_id'] = role.split(':')[4]\n else:\n account['account_id'] = role[-1].split(':')[4]\n account.pop('groups')\n\n if exports:\n last_export = exports.pop()\n account['export'] = last_export\n else:\n account['export'] = 'missing'\n last_export = None\n try:\n tag_set = client.get_object_tagging(\n Bucket=destination['bucket'], Key=prefix).get('TagSet', [])\n except ClientError:\n tag_set = []\n\n tags = {t['Key']: t['Value'] for t in tag_set}\n tagged_last_export = None\n\n if 'LastExport' in tags:\n le = parse(tags['LastExport'])\n tagged_last_export = (le.year, le.month, le.day)\n account['sync'] = tagged_last_export\n else:\n account['sync'] = account['export'] != 'missing' and 'sync' or 'missing'\n\n if last_export is None:\n continue\n\n if tagged_last_export == last_export or account['export'] == 'missing':\n continue\n\n if dryrun:\n continue\n\n client.put_object(\n Bucket=destination['bucket'],\n Key=prefix,\n Body=json.dumps({}),\n ACL=\"bucket-owner-full-control\",\n ServerSideEncryption=\"AES256\")\n\n export_time = datetime.now().replace(tzinfo=tzlocal()).astimezone(tzutc())\n export_time = export_time.replace(\n year=last_export[0], month=last_export[1], day=last_export[2],\n minute=0, second=0, microsecond=0, hour=0)\n client.put_object_tagging(\n Bucket=destination['bucket'], Key=prefix,\n Tagging={\n 'TagSet': [{\n 'Key': 'LastExport',\n 'Value': export_time.isoformat()}]})\n\n accounts_report = []\n for a in config.get('accounts'):\n if accounts and a['name'] not in accounts:\n continue\n if isinstance(a['sync'], tuple):\n a['sync'] = \"%s/%s/%s\" % (a['sync'])\n if isinstance(a['export'], tuple):\n a['export'] = \"%s/%s/%s\" % (a['export'])\n accounts_report.append(a)\n\n accounts_report.sort(key=operator.itemgetter('export'), reverse=True)\n print(tabulate(accounts_report, headers='keys'))", "def sync(self, options=None):\n return self._call_account_method(\n 'syncAccount', {\n 'options': options,\n }\n )", "def _sync_databases(self):\n host, port = self._src.client().address\n log.info('sync databases from %s:%d' % (host, port))\n for dbname in self._src.client().database_names():\n if dbname in self._ignore_dbs:\n log.info(\"skip database '%s'\" % dbname)\n continue\n if not self._conf.data_filter.valid_db(dbname):\n log.info(\"skip database '%s'\" % dbname)\n continue\n self._sync_database(dbname)\n log.info('all databases done')", "def sync_dirs(self, *dirs, force_hash=False):\n roots = [SyncRoot(d) for d in dirs]\n if self._reverse_sync_order:\n roots = reversed(roots)\n synchronizer = Synchronizer(*roots, force_hash=force_hash)\n synchronizer.sync()", "def sync_remote(self, other):\n pass # TODO", "def syncusers(bot, event, *args):\n if not bot.get_config_option('syncing_enabled'):\n return\n\n combined = True\n\n tokens = list(args)\n if \"rooms\" in args:\n tokens.remove(\"rooms\")\n combined = False\n if \"rooms\" in args:\n tokens.remove(\"room\")\n combined = False\n\n if len(args) == 0:\n filter_convs = [ event.conv_id ]\n else:\n filter_convs = tokens\n\n target_conv = filter_convs.pop(0)\n\n user_lists = _syncout_users(bot, target_conv)\n if not user_lists:\n yield from bot.coro_send_message(event.conv_id, \"no users were returned\")\n return\n\n _lines = []\n\n for room_id in user_lists:\n if combined and room_id != \"*\":\n # list everything, only use wildcard\n continue\n elif not combined and room_id == \"*\":\n # list room-by-room, skip wildcard\n continue\n\n if filter_convs and room_id not in filter_conv and room_id != target_conv:\n # if >1 conv id provided, filter by only supplied conv ids\n continue\n\n if room_id == \"*\":\n _lines.append(\"**all syncout rooms**\")\n else:\n _lines.append(\"**{} ({})**\".format( bot.conversations.get_name(room_id),\n room_id ))\n\n user_list = user_lists[room_id]\n for chat_id in user_list:\n _lines.append(\"* {}\".format(user_list[chat_id].full_name))\n\n yield from bot.coro_send_message(event.conv_id, \"\\n\".join(_lines))\n\n \"\"\"\n # are we in a sync room?\n sync_room_list = None\n for _rooms in syncouts:\n if conversation_id in _rooms:\n sync_room_list = _rooms\n _lines.append(_(\"<b>Sync Rooms: {}</b>\").format(len(sync_room_list)))\n break\n if sync_room_list is None:\n sync_room_list = [conversation_id]\n _lines.append(_(\"<b>Standard Room</b>\"))\n\n all_users = {}\n try:\n if combined or len(sync_room_list) == 1:\n all_users[\"_ALL_\"] = bot.get_users_in_conversation(sync_room_list)\n else:\n for room_id in sync_room_list:\n all_users[room_id] = bot.get_users_in_conversation(room_id)\n except KeyError as e:\n # most likely raised if user provides invalid room list\n yield from bot.coro_send_message(event.conv, _('<b>failed to retrieve user list</b>'))\n return\n\n unique_users = []\n\n for room_id in all_users:\n if room_id is not \"_ALL_\":\n _line_room = '<i>{}</i>'.format(room_id)\n _line_room = '<b>{}</b> {}'.format(\n bot.conversations.get_name(room_id),\n _line_room)\n _lines.append(_line_room)\n list_users = all_users[room_id]\n for User in list_users:\n _line_user = '{}'.format(User.full_name);\n if User.emails:\n _line_user = _line_user + ' ({})'.format(User.emails[0])\n _lines.append(_line_user)\n unique_users.append(User)\n\n unique_users = list(set(unique_users))\n _lines.append(_(\"<b>Total Unique: {}</b>\").format(len(unique_users)))\n\n yield from bot.coro_send_message(event.conv, '<br />'.join(_lines))\n \"\"\"", "def run_sync(self, username=None, password=None, channels=[]):\n self.loop = asyncio.get_event_loop()\n self.loop.run_until_complete(self.run(username, password, channels))", "def sync_org(config, orgs):\n\n logger = logging.getLogger(\"sync-org\")\n\n for org in orgs:\n logger.info(\"Syncing {} organization\".format(org))\n config.get_manager().sync_org(org)", "def sync_devices(\n hostnames: Optional[List[str]] = None,\n device_type: Optional[str] = None,\n group: Optional[str] = None,\n dry_run: bool = True,\n force: bool = False,\n auto_push: bool = False,\n job_id: Optional[int] = None,\n scheduled_by: Optional[str] = None,\n resync: bool = False,\n confirm_mode_override: Optional[int] = None,\n) -> NornirJobResult:\n logger = get_logger()\n nr = cnaas_init()\n nr_filtered, dev_count, skipped_hostnames = select_devices(nr, hostnames, device_type, group, resync)\n\n device_list = list(nr_filtered.inventory.hosts.keys())\n logger.info(\"Device(s) selected for synchronization ({}): {}\".format(dev_count, \", \".join(device_list)))\n\n try:\n nrresult = nr_filtered.run(task=sync_check_hash, force=force, job_id=job_id)\n except Exception as e:\n logger.exception(\"Exception while checking config hash: {}\".format(str(e)))\n raise e\n else:\n if nrresult.failed:\n # Mark devices as unsynchronized if config hash check failed\n with sqla_session() as session:\n session.query(Device).filter(Device.hostname.in_(nrresult.failed_hosts.keys())).update(\n {Device.synchronized: False}, synchronize_session=False\n )\n raise Exception(\"Configuration hash check failed for {}\".format(\" \".join(nrresult.failed_hosts.keys())))\n\n if not dry_run:\n with sqla_session() as session:\n logger.info(\"Trying to acquire lock for devices to run syncto job: {}\".format(job_id))\n max_attempts = 5\n lock_ok: bool = False\n for i in range(max_attempts):\n lock_ok = Joblock.acquire_lock(session, name=\"devices\", job_id=job_id)\n if lock_ok:\n break\n else:\n time.sleep(2)\n if not lock_ok:\n raise JoblockError(\"Unable to acquire lock for configuring devices\")\n\n try:\n nrresult = nr_filtered.run(\n task=push_sync_device,\n dry_run=dry_run,\n job_id=job_id,\n confirm_mode_override=get_confirm_mode(confirm_mode_override),\n )\n except Exception as e:\n logger.exception(\"Exception while synchronizing devices: {}\".format(str(e)))\n try:\n if not dry_run:\n with sqla_session() as session:\n logger.info(\"Releasing lock for devices from syncto job: {}\".format(job_id))\n Joblock.release_lock(session, job_id=job_id)\n except Exception:\n logger.error(\"Unable to release devices lock after syncto job\")\n return NornirJobResult(nrresult=nrresult)\n\n failed_hosts = list(nrresult.failed_hosts.keys())\n for hostname in failed_hosts:\n logger.error(\"Synchronization of device '{}' failed\".format(hostname))\n\n if nrresult.failed:\n logger.error(\"Not all devices were successfully synchronized\")\n\n total_change_score = 1\n change_scores = []\n changed_hosts = []\n unchanged_hosts = []\n # calculate change impact score\n for host, results in nrresult.items():\n if host in failed_hosts or len(results) != 3:\n logger.debug(\"Unable to calculate change score for failed device {}\".format(host))\n elif results[2].diff:\n changed_hosts.append(host)\n if \"change_score\" in results[0].host:\n change_scores.append(results[0].host[\"change_score\"])\n logger.debug(\"Change score for host {}: {:.1f}\".format(host, results[0].host[\"change_score\"]))\n else:\n unchanged_hosts.append(host)\n change_scores.append(0)\n logger.debug(\"Empty diff for host {}, 0 change score\".format(host))\n\n if get_confirm_mode(confirm_mode_override) != 2:\n post_sync_update_cofighash(\n dry_run=dry_run,\n force=force,\n nr_filtered=nr_filtered,\n unchanged_hosts=unchanged_hosts,\n failed_hosts=failed_hosts,\n )\n\n # set devices as synchronized if needed\n with sqla_session() as session:\n for hostname in changed_hosts:\n if dry_run:\n dev: Device = session.query(Device).filter(Device.hostname == hostname).one()\n dev.synchronized = False\n dev.last_seen = datetime.datetime.utcnow()\n # if next job will commit, that job will mark synchronized on success\n elif get_confirm_mode(confirm_mode_override) != 2:\n dev: Device = session.query(Device).filter(Device.hostname == hostname).one()\n dev.synchronized = True\n dev.last_seen = datetime.datetime.utcnow()\n for hostname in unchanged_hosts:\n dev: Device = session.query(Device).filter(Device.hostname == hostname).one()\n dev.synchronized = True\n dev.last_seen = datetime.datetime.utcnow()\n if not dry_run and get_confirm_mode(confirm_mode_override) != 2:\n logger.info(\"Releasing lock for devices from syncto job: {}\".format(job_id))\n Joblock.release_lock(session, job_id=job_id)\n\n if len(device_list) == 0:\n total_change_score = 0\n elif not change_scores or total_change_score >= 100 or failed_hosts:\n total_change_score = 100\n else:\n # use individual max as total_change_score, range 1-100\n total_change_score = max(min(int(max(change_scores) + 0.5), 100), 1)\n logger.info(\n \"Change impact score: {:.1f} (dry_run: {}, selected devices: {}, changed devices: {})\".format(\n total_change_score, dry_run, len(device_list), len(changed_hosts)\n )\n )\n\n next_job_id = None\n if auto_push and len(device_list) == 1 and hostnames and dry_run:\n if not changed_hosts:\n logger.info(\"None of the selected host has any changes (diff), skipping auto-push\")\n elif total_change_score < AUTOPUSH_MAX_SCORE:\n scheduler = Scheduler()\n next_job_id = scheduler.add_onetime_job(\n \"cnaas_nms.devicehandler.sync_devices:sync_devices\",\n when=0,\n scheduled_by=scheduled_by,\n kwargs={\"hostnames\": hostnames, \"dry_run\": False, \"force\": force},\n )\n logger.info(f\"Auto-push scheduled live-run of commit as job id {next_job_id}\")\n else:\n logger.info(\n f\"Auto-push of config to device {hostnames} failed because change score of \"\n f\"{total_change_score} is higher than auto-push limit {AUTOPUSH_MAX_SCORE}\"\n )\n elif get_confirm_mode(confirm_mode_override) == 2 and not dry_run:\n if not changed_hosts:\n logger.info(\"None of the selected host has any changes (diff), skipping commit-confirm\")\n logger.info(\"Releasing lock for devices from syncto job: {}\".format(job_id))\n Joblock.release_lock(session, job_id=job_id)\n elif len(failed_hosts) > 0:\n logger.error(\n \"No confirm job scheduled since one or more devices failed in commitmode 2\"\n \", all devices will rollback in {}s\".format(api_settings.COMMIT_CONFIRMED_TIMEOUT)\n )\n time.sleep(api_settings.COMMIT_CONFIRMED_TIMEOUT)\n logger.info(\"Releasing lock for devices from syncto job: {}\".format(job_id))\n Joblock.release_lock(session, job_id=job_id)\n else:\n scheduler = Scheduler()\n next_job_id = scheduler.add_onetime_job(\n \"cnaas_nms.devicehandler.sync_devices:confirm_devices\",\n when=0,\n scheduled_by=scheduled_by,\n kwargs={\"prev_job_id\": job_id, \"hostnames\": changed_hosts},\n )\n logger.info(f\"Commit-confirm for job id {job_id} scheduled as job id {next_job_id}\")\n\n return NornirJobResult(nrresult=nrresult, next_job_id=next_job_id, change_score=total_change_score)", "def sync() -> None:", "def getSyncFor (self, conn) :\r\n for pw, _conn in self.clients :\r\n if _conn and _conn.getSyncInfo(conn) :\r\n self.ongoing_sync_count += 1\r\n return True\r\n \r\n return False", "def test_sync_1(self):\n expected_vals = {\n 'is_virtual': True,\n 'is_container': False,\n # Expecting that only amount_storage_capacity and amount_ram\n # will be changed.\n 'amount_storage_capacity': 200.0,\n 'amount_ram': 5,\n 'cpu_id': 1,\n 'os_id': 2,\n }\n self.machine_template_1.write(\n {\n 'amount_ram': 5,\n 'amount_storage_capacity': 200,\n 'name': 'Production 2'}\n )\n self.assertEqual(self.machine_template_1.name, 'Production 2')\n self._test_sync(\n self.machine_template_1 | self.mit_1_1 | self.mit_1_2,\n expected_vals)\n self.assertEqual(self.mit_1_1.name, 'Wood Corner Production')\n self.assertEqual(self.mit_1_2.name, 'Deco Addict Production')\n # Update expected values, because we do not expect mit_1_3 to\n # be synchronized.\n expected_vals.update(amount_storage_capacity=30, amount_ram=8)\n self._test_sync(self.mit_1_3, expected_vals)\n self.assertEqual(self.mit_1_3.name, 'Gemini Furniture Production')", "def final_sync(target, s_key, migrate_user):\n\tlog.debug(\"Starting final sync of home directories\")\n\trsync = 'rsync -avP --bwlimit=3200 -e \"ssh -o StrictHostKeyChecking=no -i ' + s_key + ' -p ' + SSH_PORT + '\" ' + migrate_user.home_dir + '/ ' + target.destserver_ip + ':' + migrate_user.dest_home_dir + '/'\n\n\t# rsync = [\n\t# \t\t\t'rsync',\n\t# \t\t\t'-avP',\n\t# \t\t\t'--bwlimit=3200',\n\t# \t\t\t'-e', '\"ssh -o StrictHostKeyChecking=no -i ' + s_key + ' -p ' + SSH_PORT + '\"',\n\t# \t\t\tmigrate_user.home_dir + '/',\n\t# \t\t\ttarget.destserver_ip + ':' + migrate_user.dest_home_dir + '/'\n\t# \t\t]\n\t# r = Popen([rsync], shell=True, stdout=PIPE, stderr=PIPE)\n\t# return r.stdout.read(), r.stderr.read()\n\treturn run_command([rsync], True)", "def sync_datasets_acls(self):\n future_response = self.client._perform_json(\n \"POST\", \"/admin/connections/%s/sync\" % self.name,\n body = {'root':True})\n return DSSFuture(self.client, future_response.get('jobId', None), future_response)" ]
[ "0.6455885", "0.6311536", "0.6180077", "0.61477345", "0.6121937", "0.6006474", "0.58930635", "0.5883944", "0.5810142", "0.5777375", "0.5727696", "0.57057714", "0.56830674", "0.5662616", "0.5652426", "0.564118", "0.5630871", "0.56220114", "0.5602681", "0.55613834", "0.55493385", "0.5523551", "0.5513364", "0.5502699", "0.54861224", "0.5469788", "0.5426231", "0.5417628", "0.54175836", "0.5405009" ]
0.7644102
0