query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
sequencelengths 30
30
| negative_scores
sequencelengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Force a computation of the current portfolio state. | def update_portfolio(self):
if not self._dirty_portfolio:
return
portfolio = self._portfolio
pt = self.position_tracker
portfolio.positions = pt.get_positions()
position_stats = pt.stats
portfolio.positions_value = position_value = (
position_stats.net_value
)
portfolio.positions_exposure = position_stats.net_exposure
self._cash_flow(self._get_payout_total(pt.positions))
start_value = portfolio.portfolio_value
# update the new starting value
portfolio.portfolio_value = end_value = portfolio.cash + position_value
pnl = end_value - start_value
if start_value != 0:
returns = pnl / start_value
else:
returns = 0.0
portfolio.pnl += pnl
portfolio.returns = (
(1 + portfolio.returns) *
(1 + returns) -
1
)
# the portfolio has been fully synced
self._dirty_portfolio = False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_portfolio(self, portfolio: PortfolioController):\n now = portfolio.get_history(seconds_back=0)\n future = portfolio.get_history(seconds_back=-self.update_interval)\n\n for fund in portfolio.funds:\n best_currency = max(portfolio.currencies, key=lambda currency: future_value(fund, currency, now, future))\n if best_currency != fund.currency:\n portfolio.request_transfer(fund, best_currency)",
"def _initalize_portfolio_with_cash(self):\n self.cash = copy.copy(self.starting_cash)\n\n if self.starting_cash > 0.0:\n self.history.append(\n PortfolioEvent.create_subscription(\n self.current_dt, self.starting_cash, self.starting_cash\n )\n )\n\n self.logger.info(\n '(%s) Funds subscribed to portfolio \"%s\" '\n '- Credit: %0.2f, Balance: %0.2f' % (\n self.current_dt.strftime(settings.LOGGING[\"DATE_FORMAT\"]),\n self.portfolio_id,\n round(self.starting_cash, 2),\n round(self.starting_cash, 2)\n )\n )",
"def backtest_portfolio(self):\n raise NotImplementedError(\"Should implement backtest_portfolio()!\")",
"def updateState(self):\n self.state = self.microgridPolicy.computeState();",
"def portfolio(self):\n self.update_portfolio()\n return self._immutable_portfolio",
"def after_run(self):\n # Calculate the performance of the strategy and portfolio\n self.portfolio.calc_stats()\n self.calc_performance()\n\n return self",
"def reset(self): \n ptf_asset_vals, bmk_asset_vals = self._generate_initial_portfolios()\n process_params = self._generate_initial_process()\n self.state_vars = StateVariables( timestamp=0,\n ptf_asset_vals=ptf_asset_vals, \n bmk_asset_vals=bmk_asset_vals, \n process_params=process_params )\n return self._get_observation_from_state_vars()",
"def track_portfolio(self, p):\n\n global st_refresh_thread\n\n if self.terminate:\n return\n\n p.refresh()\n\n self.lock.acquire()\n self.active_portfolio = p\n self.display_portfolio(p)\n self.lock.release()\n\n if not self.refresh_thread:\n thr_args = list()\n thr_args.append(self)\n self.refresh_thread = threading.Thread(target=st_refresh_thread,\n args=thr_args)\n self.refresh_thread.start()",
"def _ensure_calculated(self):\n # return immediately if already done\n if self._state > State.UNCALCULATED:\n return\n\n # do the actual calculation, which must be implemented by the subclass\n # but first, set default state that we expect unless the implementing class overrides\n self._state = State.QUALIFIED \n self._calculate()",
"def on_iteration_start(self):\n\n self.Xfprv = self.Xf.copy()\n if (not self.opt['FastSolve'] or isinstance(self.backtrack,\n BacktrackRobust)):\n self.Yfprv = self.Yf.copy()\n\n if self.opt['Monotone']:\n if self.k == 0:\n self.objfn = self.eval_objfn()\n self.objfn_prev = self.objfn",
"def set_cash(self, cash):\n portfolio = self.get_portfolio_object()\n if portfolio is not None:\n portfolio.cash += cash\n portfolio.initial_cash += cash",
"def compute(self):\n if self._computed:\n return\n\n self._compute()\n self._computed = True",
"def __calculate_portfolio_returns(self):\n\n p_bar = tqdm(range(1), desc=\" Calculating returns\", leave=False)\n\n trade_data = self.historical_trade_data\n\n # Helper functions to calculate cash inflow and outflow\n def f_min(x):\n return x.apply(lambda x: min(x, 0))\n\n def f_max(x):\n return x.apply(lambda x: max(x, 0))\n\n # Calculate cash inflow and outflow\n trade_data[\n pd.MultiIndex.from_product(\n [[\"Period cash inflow\"], self.tickers_list + [\"Total\"]]\n )\n ] = -1 * trade_data[\"Investment delta\"][:].apply(lambda x: f_min(x), axis=0)\n\n trade_data[\n pd.MultiIndex.from_product(\n [[\"Period cash outflow\"], self.tickers_list + [\"Total\"]]\n )\n ] = trade_data[\"Investment delta\"][:].apply(lambda x: f_max(x), axis=1)\n\n # Calculate period return\n\n trade_data[\n pd.MultiIndex.from_product(\n [[\"Period absolute return\"], self.tickers_list + [\"Total\"]]\n )\n ] = (trade_data[\"End Value\"] + trade_data[\"Period cash inflow\"]) - (\n trade_data[\"End Value\"].shift(1).fillna(0)\n + trade_data[\"Period cash outflow\"]\n )\n\n trade_data[\n pd.MultiIndex.from_product(\n [[\"Period percentage return\"], self.tickers_list + [\"Total\"]]\n )\n ] = (trade_data[\"End Value\"] + trade_data[\"Period cash inflow\"]) / (\n trade_data[\"End Value\"].shift(1).fillna(0)\n + trade_data[\"Period cash outflow\"]\n ) - 1\n\n trade_data[\"Period percentage return\"].fillna(0, inplace=True)\n\n self.historical_trade_data = trade_data\n\n self.portfolio_returns = self.historical_trade_data[\"Period percentage return\"][\n \"Total\"\n ]\n\n p_bar.n += 1\n p_bar.refresh()",
"def update_portfolio_on_market(self, market: MarketEvent):\n self._portfolio.update_market_value(market)",
"def __update_portfolio_handler(self, msg):\n pass",
"def on_iteration_start(self):\n\n self.Xprv = self.X.copy()\n if (not self.opt['FastSolve'] or isinstance(self.backtrack,\n BacktrackRobust)):\n self.Yprv = self.Y.copy()\n\n if self.opt['Monotone']:\n if self.k == 0:\n self.objfn = self.eval_objfn()\n self.objfn_prev = self.objfn",
"def rebalance(context, data):\n logger.debug('rebalancing on: %s', algo.get_datetime())\n\n context.trend_filter = False\n\n # new_portfolio = algo.pipeline_output('pipeline').dropna(subset=['overall_rank']).sort_values('momentum', ascending=False)\n\n new_portfolio = algo.pipeline_output('pipeline').dropna(subset=['overall_rank']).sort_values('momentum', ascending=False)\n\n for equity, row in new_portfolio.iterrows():\n logger.debug('new portfolio (before filtering) - equity: %s', equity)\n\n # print(new_portfolio)\n\n # new_portfolio = new_portfolio[new_portfolio['overall_rank'].notna() & new_portfolio['momentum'] > 40][:20]\n \n # new_portfolio = new_portfolio[(new_portfolio['momentum_decile'] > 8)][:20]\n\n new_portfolio = new_portfolio.nlargest(20, ['overall_rank', 'momentum']) #<- $600K PL in 10 years\n\n # new_portfolio = new_portfolio.nlargest(20, ['momentum', 'overall_rank']) #<- 1M PL in 10 years\n\n if logger.level is logging.DEBUG:\n for equity, row in new_portfolio.iterrows():\n logger.debug('new portfolio - (after filtering) equity: %s', equity)\n \n\n # print(len(new_portfolio.index))\n\n # volatility driven weights\n # new_portfolio['inverse_volatility'] = new_portfolio['volatility'].apply(lambda x: 1 / x)\n # inv_vola_sum = new_portfolio['inverse_volatility'].sum()\n # new_portfolio['target_weight'] = new_portfolio['inverse_volatility'].apply(lambda x: x / inv_vola_sum)\n\n # portfolio size driven weights\n # num_equities = len(new_portfolio.index)\n # new_portfolio['target_weight'] = 1 / num_equities\\\n\n # logger.info('len existing portfolio: %s', len(context.portfolio.positions))\n\n if logger.level is logging.DEBUG:\n for equity, values in context.portfolio.positions.items():\n logger.debug('context.portfolio.positions - equity: %s, amount: %s, cost_basis: %s, sold_on: %s, sold_at_price: %s', equity, values.amount, values.cost_basis, values.last_sale_date, values.last_sale_price)\n\n \n order_target(algo.sid('FIBBG000NTFYM5'), 0)\n logger.debug('selling all bonds')\n\n for equity in context.portfolio.positions:\n if equity is algo.sid('FIBBG000NTFYM5'): \n continue\n if equity not in set(new_portfolio.index.tolist()):\n # logger.info('selling %s', equity)\n order_target_percent(equity, 0)\n\n stock_weights = 1.0 / max(len(context.portfolio.positions), len(new_portfolio.index))\n\n logger.debug('len existing portfolio (afer ejection): %s', len(context.portfolio.positions))\n logger.debug('len new portfolio: %s', len(new_portfolio.index))\n logger.debug('stock_weights: %s', stock_weights)\n\n # print(context.portfolio.positions.get(algo.sid('FIBBG000NTFYM5')))\n\n # spy = context.portfolio.positions.get(algo.sid('FIBBG000NTFYM5'))\n\n # if (spy is not None) and (spy.amount > 0):\n # order_target_percent(algo.sid('FIBBG000NTFYM5'), 0)\n\n for equity, row in new_portfolio.iterrows():\n if row.trend_filter is True:\n # logger.info('buying %s', equity)\n context.trend_filter = True\n order_target_percent(equity, stock_weights)\n else:\n context.trend_filter = False\n \n logger.debug('cash: %s', context.portfolio.cash)\n logger.debug('portfolio_value: %s', context.portfolio.portfolio_value)\n logger.debug('num_positions: %s', len(context.portfolio.positions))\n logger.debug('positions: %s', context.portfolio.positions)",
"def recalculate() -> None:\n NotImplemented",
"def _on_state_update(self) -> None:\n super()._on_state_update()\n self._set_futures(True)",
"def precalculate():\n pass",
"def precalculate():\n pass",
"def final(self, state):\n \"*** YOUR CODE HERE ***\"\n return\n util.raiseNotDefined()",
"def execute(self) -> None:\n self.state()",
"def _run_computation(self):\n with self.swap(stats_jobs_continuous.StatisticsAggregator,\n 'get_statistics', self._mock_get_statistics):\n ModifiedUserImpactAggregator.start_computation()\n self.process_and_flush_pending_tasks()",
"def before_trading_start(context, data):\n factors = pipeline_output('ff_example')\n\n # get the data we're going to use\n returns = factors['returns']\n mkt_cap = factors.sort_values(['market_cap'], ascending=True)\n be_me = factors.sort_values(['be_me'], ascending=True)\n\n # to compose the six portfolios, split our universe into portions\n half = int(len(mkt_cap)*0.5)\n small_caps = mkt_cap[:half]\n big_caps = mkt_cap[half:]\n \n thirty = int(len(be_me)*0.3)\n seventy = int(len(be_me)*0.7)\n growth = be_me[:thirty]\n neutral = be_me[thirty:seventy]\n value = be_me[seventy:]\n\n # now use the portions to construct the portfolios.\n # note: these portfolios are just lists (indices) of equities\n small_value = small_caps.index.intersection(value.index)\n small_neutral = small_caps.index.intersection(neutral.index)\n small_growth = small_caps.index.intersection(growth.index)\n \n big_value = big_caps.index.intersection(value.index)\n big_neutral = big_caps.index.intersection(neutral.index)\n big_growth = big_caps.index.intersection(growth.index)\n\n # take the mean to get the portfolio return, assuming uniform\n # allocation to its constituent equities.\n sv = returns[small_value].mean()\n sn = returns[small_neutral].mean()\n sg = returns[small_growth].mean()\n \n bv = returns[big_value].mean()\n bn = returns[big_neutral].mean()\n bg = returns[big_growth].mean()\n\n # computing SMB\n context.smb = (sv + sn + sg)/3 - (bv + bn + bg)/3\n\n # computing HML\n context.hml = (sv + bv)/2 - (sg + bg)/2",
"def reset(self):\n for key in self.portfolio.keys():\n self.portfolio[key] = {'holdings': 0}\n self.buys[key] = 0\n self.portfolio['balance'] = 2500000.0",
"def apply(self, gameState):\n pass",
"def liquidate(self) -> None:\n if self.position.is_close:\n return\n\n if self.position.pnl > 0:\n self.take_profit = self.position.qty, self.price\n else:\n self.stop_loss = self.position.qty, self.price",
"def make_uncurrent(self):\n pass",
"def calculate_portfolio(self, request, pk=None, **kwargs):\n goal = self.get_object()\n\n check_state(Goal.State(goal.state), Goal.State.ACTIVE)\n\n setting_str = request.query_params.get('setting', None)\n if not setting_str:\n raise ValidationError(\"Query parameter 'setting' must be specified and a valid JSON string\")\n try:\n setting = ujson.loads(setting_str)\n except ValueError:\n raise ValidationError(\"Query parameter 'setting' must be a valid json string\")\n\n # Create the settings object from the dict\n serializer = serializers.GoalSettingStatelessSerializer(data=setting)\n serializer.is_valid(raise_exception=True)\n settings = serializer.create_stateless(serializer.validated_data, goal)\n\n try:\n data = self.build_portfolio_data(calculate_portfolio(settings=settings,\n data_provider=DataProviderDjango(),\n execution_provider=ExecutionProviderDjango()))\n return Response(data)\n except Unsatisfiable as e:\n rdata = {'reason': \"No portfolio could be found: {}\".format(e)}\n if e.req_funds is not None:\n rdata['req_funds'] = e.req_funds\n\n return Response({'error': rdata}, status=status.HTTP_400_BAD_REQUEST)"
] | [
"0.60446954",
"0.5951977",
"0.5695806",
"0.5687665",
"0.56560236",
"0.5653465",
"0.56520754",
"0.5633276",
"0.559409",
"0.5562694",
"0.54696906",
"0.54538536",
"0.5440695",
"0.5416885",
"0.5380987",
"0.5366457",
"0.5364525",
"0.5331849",
"0.5323494",
"0.53195906",
"0.53195906",
"0.53085035",
"0.53000546",
"0.5282828",
"0.52782464",
"0.52712125",
"0.52664864",
"0.52351665",
"0.52097136",
"0.52049536"
] | 0.66357535 | 0 |
Compute the current portfolio. Notes This is cached, repeated access will not recompute the portfolio until the portfolio may have changed. | def portfolio(self):
self.update_portfolio()
return self._immutable_portfolio | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_portfolio(self):\n if not self._dirty_portfolio:\n return\n\n portfolio = self._portfolio\n pt = self.position_tracker\n\n portfolio.positions = pt.get_positions()\n position_stats = pt.stats\n\n portfolio.positions_value = position_value = (\n position_stats.net_value\n )\n portfolio.positions_exposure = position_stats.net_exposure\n self._cash_flow(self._get_payout_total(pt.positions))\n\n start_value = portfolio.portfolio_value\n\n # update the new starting value\n portfolio.portfolio_value = end_value = portfolio.cash + position_value\n\n pnl = end_value - start_value\n if start_value != 0:\n returns = pnl / start_value\n else:\n returns = 0.0\n\n portfolio.pnl += pnl\n portfolio.returns = (\n (1 + portfolio.returns) *\n (1 + returns) -\n 1\n )\n\n # the portfolio has been fully synced\n self._dirty_portfolio = False",
"def get_portfolio_object(self):\n return self.__get_portfolio_object(self.portfolio_name, self.portfolio_user)",
"def update_portfolio(self, portfolio: PortfolioController):\n now = portfolio.get_history(seconds_back=0)\n future = portfolio.get_history(seconds_back=-self.update_interval)\n\n for fund in portfolio.funds:\n best_currency = max(portfolio.currencies, key=lambda currency: future_value(fund, currency, now, future))\n if best_currency != fund.currency:\n portfolio.request_transfer(fund, best_currency)",
"def get_portfolio_pnl(self):\n\n return self._portfolio",
"def portfolio():\n #Query transactions by user id\n trans = Transactions.query.filter_by(owner=session['user_id']).all()\n \n #Create list of comanies user owns stock in\n companies = []\n for t in trans:\n if t.symbol not in companies:\n companies.append(t.symbol)\n\n #Create list of current stock dictionaries and total their values\n total = 0\n stocks = []\n for company in companies:\n trans = Transactions.query.filter_by(owner=session['user_id'], symbol=company).all()\n stock = {}\n stock['shares'] = 0\n for t in trans:\n stock['shares'] += t.shares\n if stock['shares'] > 0:\n stock['symbol'] = company\n stock['name'] = lookup(company)['name']\n stock['price'] = lookup(company)['price']\n stock['total'] = stock['shares'] * stock['price']\n stock['price'] = usd(stock['price'])\n stock['total'] = usd(stock['total'])\n total += float(stock['total'][1:].replace(',', ''))\n stocks.append(stock)\n\n #Set user cash and total values\n value = {}\n value['cash'] = usd(Users.query.filter_by(id=session['user_id']).first().cash)\n value['total'] = usd(total + float(value['cash'][1:].replace(',', '')))\n\n #Add values to list\n stocks.append(value)\n\n #Return list of dictionaries\n return stocks",
"def current_portfolio_weights(self) -> 'pd.Series[float]':\n position_values = pd.Series({\n asset: (\n position.last_sale_price *\n position.amount *\n asset.price_multiplier\n )\n for asset, position in self.positions.items()\n }, dtype=\"float64\")\n return position_values / self.portfolio_value",
"def generate_portfolio_data(self):\n self.__load_portfolio_historical_prices()\n self.__populate_historical_trade_data()\n self.__calculate_portfolio_returns()\n self.__calculate_portfolio_performance()",
"def __calculate_portfolio_returns(self):\n\n p_bar = tqdm(range(1), desc=\" Calculating returns\", leave=False)\n\n trade_data = self.historical_trade_data\n\n # Helper functions to calculate cash inflow and outflow\n def f_min(x):\n return x.apply(lambda x: min(x, 0))\n\n def f_max(x):\n return x.apply(lambda x: max(x, 0))\n\n # Calculate cash inflow and outflow\n trade_data[\n pd.MultiIndex.from_product(\n [[\"Period cash inflow\"], self.tickers_list + [\"Total\"]]\n )\n ] = -1 * trade_data[\"Investment delta\"][:].apply(lambda x: f_min(x), axis=0)\n\n trade_data[\n pd.MultiIndex.from_product(\n [[\"Period cash outflow\"], self.tickers_list + [\"Total\"]]\n )\n ] = trade_data[\"Investment delta\"][:].apply(lambda x: f_max(x), axis=1)\n\n # Calculate period return\n\n trade_data[\n pd.MultiIndex.from_product(\n [[\"Period absolute return\"], self.tickers_list + [\"Total\"]]\n )\n ] = (trade_data[\"End Value\"] + trade_data[\"Period cash inflow\"]) - (\n trade_data[\"End Value\"].shift(1).fillna(0)\n + trade_data[\"Period cash outflow\"]\n )\n\n trade_data[\n pd.MultiIndex.from_product(\n [[\"Period percentage return\"], self.tickers_list + [\"Total\"]]\n )\n ] = (trade_data[\"End Value\"] + trade_data[\"Period cash inflow\"]) / (\n trade_data[\"End Value\"].shift(1).fillna(0)\n + trade_data[\"Period cash outflow\"]\n ) - 1\n\n trade_data[\"Period percentage return\"].fillna(0, inplace=True)\n\n self.historical_trade_data = trade_data\n\n self.portfolio_returns = self.historical_trade_data[\"Period percentage return\"][\n \"Total\"\n ]\n\n p_bar.n += 1\n p_bar.refresh()",
"def __get_portfolio_object(self, name, user):\n portfolio = self.__get_object_portfolio_bulk(name, user)\n if portfolio is None:\n portfolio = self.__get_object_portfolio_bulk(name, user, \"portfolio_update\")\n if portfolio is None:\n portfolio = self.db_tool.session.query(Portfolio) \\\n .outerjoin(Orders)\\\n .join(Stock)\\\n .filter(name == Portfolio.name) \\\n .filter(user == Portfolio.user).first()\n self.bulk_data[\"portfolio_update\"].append(portfolio)\n return portfolio",
"def backtest_portfolio(self):\n self.rank=dict()\n self.accuracy=dict()\n portfolio = dict()\n \n for algo in self.algos:\n portfolio[algo]=pd.DataFrame(index=self.positions.index)\n self.pos_diff=dict()\n self.pos_diff[algo] = self.positions[algo].diff()\n \n portfolio[algo]['price_diff'] = self.bars['Close']-self.bars['Open']\n #portfolio['price_diff'][0:5] = 0.0\n portfolio[algo]['profit'] = self.positions[algo] * portfolio[algo]['price_diff']\n portfolio[algo]['total'] = self.initial_capital + portfolio[algo]['profit'].cumsum()\n portfolio[algo]['returns'] = portfolio[algo]['total'].pct_change()\n d=np.array(portfolio[algo]['profit']).copy()\n d[d>0]=1\n d[d<0]=0\n d[np.array(self.positions[algo])==0]=1\n for i in np.arange(1,len(d)+1):\n c=float(sum(d[0:i]))/(i)\n d[i-1]=c\n portfolio[algo]['accuracy']=d\n self.rank[algo]=float(portfolio[algo]['total'][-1] - portfolio[algo]['total'][0])\n self.returns=portfolio\n c=np.array(self.returns[algo]['profit'])\n c[c>0]=1\n c[c<0]=0\n c[np.array(self.positions[algo])==0]=1\n accuracy=round(float(c.sum())/len(c),2)*self.rank[algo]\n self.accuracy[algo]=accuracy\n #self.ranking= sorted(self.rank.items(), key=operator.itemgetter(1), reverse=True)\n self.ranking= sorted(self.accuracy.items(), key=operator.itemgetter(1))\n self.ready=True\n return (portfolio, self.rank, self.ranking)",
"def backtest_portfolio(self):\n raise NotImplementedError(\"Should implement backtest_portfolio()!\")",
"def compute_portvals(start_date, end_date, orders_file, start_val):\n \n #Read order file\n orders = pd.read_csv( orders_file, parse_dates = [0])\n \n #Get symbols making up the portfolio\n stock_symbols = list( set( orders[\"Symbol\"] ) )\n dates = pd.date_range(start_date, end_date)\n \n #Read stock prices\n stock_prices = get_data(stock_symbols, dates)\n \n #Create a portfolio keeping track of positions, \n #_CASH column indicates cash position, _VALUE total portfolio value\n #_LEVERAGE the leverage of portfolio when we allow for short selling\n symbols = stock_symbols[:] #Shallow copy of the list\n symbols.append(\"_CASH\")\n symbols.append(\"_VALUE\")\n symbols.append(\"_LEVERAGE\")\n \n #Index contains only business days, same dates as stock prices\n portfolio = pd.DataFrame(index=stock_prices.index, columns = symbols )\n portfolio.fillna(0) \n portfolio[\"_CASH\"][0] = start_val\n portfolio[\"_VALUE\"][0] = start_val\n \n #Snapshot of a portfolio at any time. To avoid using numerical indexes\n portfolio_snapshot = dict.fromkeys ( symbols, 0 )\n portfolio_snapshot[\"_CASH\"] = start_val\n portfolio[\"_VALUE\"] = start_val\n \n #Now calcualte portfolio day by day\n for date in portfolio.index:\n #Check transactions for the day\n day_orders = orders[ orders[\"Date\"] == date ] \n \n for ord in day_orders.iterrows():\n symbol = ord[1][ \"Symbol\"] \n stock_price = stock_prices[ symbol ][ date ]\n shares = ord[1][\"Shares\" ]\n side = ord[1][\"Order\"]\n \n if side == \"BUY\":\n portfolio_snapshot[ \"_CASH\" ] -= stock_price * shares\n portfolio_snapshot[ symbol ] += shares \n elif side == \"SELL\":\n portfolio_snapshot[ \"_CASH\" ] += stock_price * shares\n portfolio_snapshot[ symbol ] -= shares\n else:\n raise \"Order not recognized.\"\n \n #Compute portfolio value\n portfolio_snapshot[ \"_VALUE\" ] = portfolio_snapshot[ \"_CASH\" ]\n shorts = longs = 0\n for symbol in stock_symbols: \n stock_price = stock_prices[ symbol ][ date ]\n shares = portfolio_snapshot[ symbol ]\n notional = stock_price*shares\n if shares > 0:\n longs += notional\n else:\n shorts += notional\n \n portfolio_snapshot[ \"_VALUE\" ] += notional\n \n #Compute leverage\n leverage = (longs+shorts)/(longs-shorts + portfolio_snapshot[ \"_CASH\" ] )\n portfolio_snapshot[ \"_LEVERAGE\" ] = leverage\n \n #Assert we never achieve a leverage > 2.0\n if leverage > 2:\n raise \"Leverage > 2.0 achieved\"\n \n #Update portfolio from the daily snapshot\n #TODO: Is this causing performance issues?\n for symbol in portfolio.keys():\n portfolio[ symbol ][ date ] = portfolio_snapshot[ symbol ]\n \n return portfolio",
"def getPortfolioValue(self, start_t, t):\n sum_tmp=0\n for item in self.portfolio.keys():\n if \"DJI_\" in item:\n t_tmp=datetime.strftime(pd.date_range(end=t,periods=1,freq='B')[0],'%Y-%m-%d')\n price=universe.get_price_in_currency(item,t_tmp,'CAD')\n elif 'rf_rate' in item:\n price=universe.get_security(item).get_cc_return(start_t,t) \n else:\n price=universe.get_price_in_currency(item,t,'CAD')\n #price=universe.get_security(item).price[t]\n amount=self.portfolio[item]\n sum_tmp=sum_tmp+price*amount\n \n return sum_tmp",
"def get_portfolio_pnl_tsd(self):\n\n return self._tsd_portfolio",
"def portfolio_performance(returns,weights):\r\n print('Calculating Portfolio Performance')\r\n # returns=target_asset_port_data_attributes['component_returns']\r\n # weights =target_asset_port_data_attributes['effective_weights']\r\n\r\n component_returns= returns\r\n compnent_weights = pd.DataFrame(data=np.nan,index= component_returns.index,columns=component_returns.columns)\r\n compnent_weights.loc[weights.index,:] = weights\r\n\r\n portfolio_dates = component_returns.index\r\n components = component_returns.columns\r\n\r\n # pre-allocate\r\n BoP_df = pd.DataFrame(data=np.nan,index=portfolio_dates,columns=components)\r\n EoP_df = pd.DataFrame(data=np.nan,index=portfolio_dates,columns=components)\r\n PnL_df = pd.DataFrame(data=np.nan,index=portfolio_dates,columns=components)\r\n portfolio_BoP = pd.DataFrame(data=np.nan,index=portfolio_dates,columns=['Portfolio BoP'])\r\n portfolio_EoP = pd.DataFrame(data=np.nan,index=portfolio_dates,columns=['Portfolio EoP'])\r\n portfolio_PnL = pd.DataFrame(data=np.nan,index=portfolio_dates,columns=['Portfolio PnL'])\r\n \r\n portfolio_index = pd.DataFrame(data=np.nan,index=portfolio_dates,columns=['Index'])\r\n previous_index_value = np.int64(1)\r\n\r\n pre_date = portfolio_dates[0]\r\n # set BoP to start weights\r\n for date,row in component_returns.iterrows():\r\n # print(date)\r\n # 1st date\r\n if date == portfolio_dates[0]:\r\n BoP_df.loc[date] = compnent_weights.iloc[0,:]\r\n EoP_df.loc[date] = BoP_df.loc[date] * (1+component_returns.loc[date])\r\n PnL_df.loc[date] = EoP_df.loc[date].subtract(BoP_df.loc[date])\r\n\r\n portfolio_BoP.loc[date] = BoP_df.loc[date].sum()\r\n portfolio_EoP.loc[date] = EoP_df.loc[date].sum()\r\n portfolio_PnL.loc[date] = PnL_df.loc[date].sum()\r\n\r\n portfolio_index.loc[date] = np.nansum([previous_index_value,portfolio_PnL.loc[date].values])\r\n previous_index_value = portfolio_index.loc[date]\r\n pre_date = date\r\n\r\n # after first date\r\n else:\r\n BoP_df.loc[date] = EoP_df.loc[pre_date]\r\n # weights override\r\n if date in compnent_weights.index:\r\n none_NaN_index = ~compnent_weights.loc[date].isnull()\r\n if not compnent_weights.loc[date][none_NaN_index].empty:\r\n tmp_sum = BoP_df.loc[date].sum()\r\n BoP_df.loc[date][none_NaN_index.values] = (compnent_weights.loc[date][none_NaN_index.values].values)*tmp_sum\r\n\r\n \r\n EoP_df.loc[date] = BoP_df.loc[date] * (1+component_returns.loc[date])\r\n PnL_df.loc[date] = EoP_df.loc[date].subtract(BoP_df.loc[date])\r\n\r\n portfolio_BoP.loc[date] = BoP_df.loc[date].sum()\r\n portfolio_EoP.loc[date] = EoP_df.loc[date].sum()\r\n portfolio_PnL.loc[date] = PnL_df.loc[date].sum()\r\n \r\n portfolio_index.loc[date] = np.nansum([previous_index_value,portfolio_PnL.loc[date].values])\r\n previous_index_value = portfolio_index.loc[date]\r\n pre_date = date\r\n\r\n\r\n portfolio_returns = portfolio_index.pct_change(1) \r\n portfolio_returns.columns = ['Returns']\r\n\r\n portfolio_index\r\n perf = portfolio_index.calc_stats()\r\n \r\n output = pd.Series(data = [perf,PnL_df,portfolio_index,portfolio_BoP,portfolio_EoP,BoP_df], index=['Portfolio Perf','Component PnL','portfolio_index','portfolio_BoP','portfolio_EoP','BoP_df'])\r\n return output",
"def calculate_portfolio(self, request, pk=None, **kwargs):\n goal = self.get_object()\n\n check_state(Goal.State(goal.state), Goal.State.ACTIVE)\n\n setting_str = request.query_params.get('setting', None)\n if not setting_str:\n raise ValidationError(\"Query parameter 'setting' must be specified and a valid JSON string\")\n try:\n setting = ujson.loads(setting_str)\n except ValueError:\n raise ValidationError(\"Query parameter 'setting' must be a valid json string\")\n\n # Create the settings object from the dict\n serializer = serializers.GoalSettingStatelessSerializer(data=setting)\n serializer.is_valid(raise_exception=True)\n settings = serializer.create_stateless(serializer.validated_data, goal)\n\n try:\n data = self.build_portfolio_data(calculate_portfolio(settings=settings,\n data_provider=DataProviderDjango(),\n execution_provider=ExecutionProviderDjango()))\n return Response(data)\n except Unsatisfiable as e:\n rdata = {'reason': \"No portfolio could be found: {}\".format(e)}\n if e.req_funds is not None:\n rdata['req_funds'] = e.req_funds\n\n return Response({'error': rdata}, status=status.HTTP_400_BAD_REQUEST)",
"def initialize_portfolio(self):\n\n raise NotImplementedError('''\n Must implement initialize_portfolio. Call help() for details.\n ''')",
"def rebalance(context, data):\n logger.debug('rebalancing on: %s', algo.get_datetime())\n\n context.trend_filter = False\n\n # new_portfolio = algo.pipeline_output('pipeline').dropna(subset=['overall_rank']).sort_values('momentum', ascending=False)\n\n new_portfolio = algo.pipeline_output('pipeline').dropna(subset=['overall_rank']).sort_values('momentum', ascending=False)\n\n for equity, row in new_portfolio.iterrows():\n logger.debug('new portfolio (before filtering) - equity: %s', equity)\n\n # print(new_portfolio)\n\n # new_portfolio = new_portfolio[new_portfolio['overall_rank'].notna() & new_portfolio['momentum'] > 40][:20]\n \n # new_portfolio = new_portfolio[(new_portfolio['momentum_decile'] > 8)][:20]\n\n new_portfolio = new_portfolio.nlargest(20, ['overall_rank', 'momentum']) #<- $600K PL in 10 years\n\n # new_portfolio = new_portfolio.nlargest(20, ['momentum', 'overall_rank']) #<- 1M PL in 10 years\n\n if logger.level is logging.DEBUG:\n for equity, row in new_portfolio.iterrows():\n logger.debug('new portfolio - (after filtering) equity: %s', equity)\n \n\n # print(len(new_portfolio.index))\n\n # volatility driven weights\n # new_portfolio['inverse_volatility'] = new_portfolio['volatility'].apply(lambda x: 1 / x)\n # inv_vola_sum = new_portfolio['inverse_volatility'].sum()\n # new_portfolio['target_weight'] = new_portfolio['inverse_volatility'].apply(lambda x: x / inv_vola_sum)\n\n # portfolio size driven weights\n # num_equities = len(new_portfolio.index)\n # new_portfolio['target_weight'] = 1 / num_equities\\\n\n # logger.info('len existing portfolio: %s', len(context.portfolio.positions))\n\n if logger.level is logging.DEBUG:\n for equity, values in context.portfolio.positions.items():\n logger.debug('context.portfolio.positions - equity: %s, amount: %s, cost_basis: %s, sold_on: %s, sold_at_price: %s', equity, values.amount, values.cost_basis, values.last_sale_date, values.last_sale_price)\n\n \n order_target(algo.sid('FIBBG000NTFYM5'), 0)\n logger.debug('selling all bonds')\n\n for equity in context.portfolio.positions:\n if equity is algo.sid('FIBBG000NTFYM5'): \n continue\n if equity not in set(new_portfolio.index.tolist()):\n # logger.info('selling %s', equity)\n order_target_percent(equity, 0)\n\n stock_weights = 1.0 / max(len(context.portfolio.positions), len(new_portfolio.index))\n\n logger.debug('len existing portfolio (afer ejection): %s', len(context.portfolio.positions))\n logger.debug('len new portfolio: %s', len(new_portfolio.index))\n logger.debug('stock_weights: %s', stock_weights)\n\n # print(context.portfolio.positions.get(algo.sid('FIBBG000NTFYM5')))\n\n # spy = context.portfolio.positions.get(algo.sid('FIBBG000NTFYM5'))\n\n # if (spy is not None) and (spy.amount > 0):\n # order_target_percent(algo.sid('FIBBG000NTFYM5'), 0)\n\n for equity, row in new_portfolio.iterrows():\n if row.trend_filter is True:\n # logger.info('buying %s', equity)\n context.trend_filter = True\n order_target_percent(equity, stock_weights)\n else:\n context.trend_filter = False\n \n logger.debug('cash: %s', context.portfolio.cash)\n logger.debug('portfolio_value: %s', context.portfolio.portfolio_value)\n logger.debug('num_positions: %s', len(context.portfolio.positions))\n logger.debug('positions: %s', context.portfolio.positions)",
"def index():\n def getListOfCompanies(username, symbolOrPriceOrNumber):\n if symbolOrPriceOrNumber == \"symbol\" or symbolOrPriceOrNumber == \"price\" or symbolOrPriceOrNumber == \"number\":\n rows = db.execute(\"SELECT {0} FROM portfolio WHERE username=:username\".format(symbolOrPriceOrNumber), username=username)\n if symbolOrPriceOrNumber == \"symbol\" and len(rows) >= 1:\n namesList = []\n for row in rows:\n namesList.append(lookup(row[symbolOrPriceOrNumber])[\"name\"])\n return namesList\n elif symbolOrPriceOrNumber == \"price\" and len(rows) >= 1:\n pricseList = []\n for row in rows:\n pricseList.append(row[symbolOrPriceOrNumber])\n return pricseList\n elif symbolOrPriceOrNumber == \"number\" and len(rows) >= 1:\n numbersList = []\n for row in rows:\n numbersList.append(row[symbolOrPriceOrNumber])\n return numbersList\n else:\n return None\n else:\n return None\n\n def getTotalValueHolding(username):\n priceRow = db.execute(\"SELECT price FROM portfolio WHERE username=:username\", username=username)\n numberRow = db.execute(\"SELECT number FROM portfolio WHERE username=:username\", username=username)\n\n if len(priceRow) >= 1 and len(numberRow) >= 1 and len(priceRow) == len(numberRow):\n totalList = []\n for i in range(len(priceRow)):\n totalList.append(float(priceRow[i][\"price\"]) * float(numberRow[i][\"number\"]))\n\n return totalList\n\n username = db.execute(\"SELECT username FROM users WHERE id=:userId\", userId=session[\"user_id\"])[0][\"username\"]\n companiesNames = getListOfCompanies(username, \"symbol\")\n numberOfShares = getListOfCompanies(username, \"number\")\n prices = getListOfCompanies(username, \"price\")\n totalValueHolding = getTotalValueHolding(username)\n\n currentCashBalance = db.execute(\"SELECT cash FROM users WHERE id=:userId\", userId=session[\"user_id\"])[0][\"cash\"]\n total = 0\n if totalValueHolding:\n for totalValue in totalValueHolding:\n total = total + totalValue\n\n cashAndStocksTotalValue = float(currentCashBalance) + total\n\n return render_template(\"index.html\", username=username, companiesNames=companiesNames, numberOfShares=numberOfShares,\n prices=prices, totalValueHolding=totalValueHolding, currentCashBalance=currentCashBalance, cashAndStocksTotalValue=cashAndStocksTotalValue)",
"def portfolio_table(self):\n idx = set(name.split('-')[0].split('.')[0] for name, etf in self.etfs.items() if not etf.sold())\n table = pd.DataFrame({'Invested': 0, 'Shares':0, 'Share Price':0, 'Present Value':0, 'P/L':0, 'P/L%':0},index=idx)\n for name, etf in self.etfs.items():\n if not etf.sold():\n table.loc[name.split('-')[0].split('.')[0], 'Invested'] += etf.initial_investment()\n table.loc[name.split('-')[0].split('.')[0], 'Shares'] += etf.n_shares\n table.loc[name.split('-')[0].split('.')[0], 'Share Price'] = etf.stock_price()\n table.loc[name.split('-')[0].split('.')[0], 'Present Value'] += etf.present_value()\n table.loc[name.split('-')[0].split('.')[0], 'P/L'] += etf.profit_loss()\n table.insert(1, 'PMA', round(table['Invested'] / table['Shares'], 2))\n table.insert(3, 'Initial Weight', round(table['Invested'] / table['Invested'].sum() * 100, 2))\n table.insert(4, 'Present Weight', round(table['Present Value'] / table['Present Value'].sum() * 100, 2))\n table['P/L%'] = round(table['P/L'] / table['Invested'] * 100, 2)\n table['P/L'] = round(table['P/L'], 2)\n table['Present Value'] = round(table['Present Value'], 2)\n return table.sort_values('Invested', 0, ascending=False)",
"def index():\n\n #select user's portfolio\n rows = db.execute(\"SELECT * FROM portfolio WHERE userid=:id\", id=session[\"user_id\"])\n\n #set temporary holding place for cash to zero\n tcash = 0\n\n #update the stock information in user's portfolio\n for row in rows:\n stock = row[\"stock\"]\n number = row[\"number\"]\n quote = lookup(stock)\n total = float(number) * float(quote[\"price\"])\n tcash += total\n db.execute(\"UPDATE portfolio SET price=:price, total=:total WHERE userid=:id AND stock=:stock AND number=:number\", price=usd(quote[\"price\"]), total=total, id=session[\"user_id\"], stock=stock, number=number)\n\n #select user's cash and updated portfolio\n updated_cash = db.execute(\"SELECT cash FROM users WHERE id=:id\", id=session[\"user_id\"])\n tcash += updated_cash[0][\"cash\"]\n updated_stock = db.execute(\"SELECT stock, SUM(number) AS number, price, SUM(total) AS stock_total FROM portfolio WHERE userid=:id GROUP BY stock HAVING SUM(number) > 0\", id=session[\"user_id\"])\n\n return render_template(\"index.html\", stocks=updated_stock, cash=usd(updated_cash[0][\"cash\"]), all_total=usd(tcash))",
"def update_portfolio_on_market(self, market: MarketEvent):\n self._portfolio.update_market_value(market)",
"def portfolio_analytics(port_returns, market_returns):\n\n # add the intercept to the model\n x2 = sm.add_constant(market_returns)\n\n # train the model\n estimator = sm.OLS(port_returns, x2)\n model = estimator.fit()\n\n # get portfolio analytics\n alpha, beta = model.params\n r_squared = model.rsquared\n regression = model.predict()\n\n return alpha, beta, r_squared, regression",
"def portfolio_analytics(port_returns, market_returns):\n\n # add the intercept to the model\n x2 = sm.add_constant(market_returns)\n\n # train the model\n estimator = sm.OLS(port_returns, x2)\n model = estimator.fit()\n\n # get portfolio analytics\n alpha, beta = model.params\n r_squared = model.rsquared\n regression = model.predict()\n\n return alpha, beta, r_squared, regression",
"def __init__(\n self,\n portfolio,\n market=None,\n commission_min=5.00,\n commission_pct=0.0,\n buy_percent=1.0,\n sell_percent=1.0,\n pm_threshold=0.0,\n pm_order=1.0,\n risk_free_return=1.0,\n name=None\n ):\n\n # Assumptions\n self.name = name if name else portfolio.name\n self.commission_min = commission_min\n self.commission_pct = commission_pct\n self.buy_percent = buy_percent\n self.sell_percent = sell_percent\n self.pm_threshold = pm_threshold\n self.pm_order = pm_order\n self.risk_free_return = risk_free_return\n self.performance = {}\n\n # Inputs\n self.portfolio = portfolio\n self.market = copy.deepcopy(market) if market else Asset(np.ones(len(self.portfolio.dates)))\n\n # Trading states\n self.long_open = {symbol:False for symbol in portfolio.assets.keys()}\n self.short_open = {symbol:False for symbol in portfolio.assets.keys()}\n\n # Keep track of intermidiate results for performance\n self.trade_data = []\n recordings = [\n 'buy price', 'buy shares', 'buy fees', 'buy date',\n 'sell price', 'sell shares', 'sell fees', 'sell date',\n 'gain', 'profit', 'loss', 'return', 'win/loose',\n 'min balance', 'min date', 'max balance', 'max date',\n 'drawdown', 'drawdown days',\n 'volatility', 'expected_return', 'beta', 'lpm', 'hpm',\n 'max', 'mean', 'min'\n ]\n self.record = {symbol:pd.DataFrame(columns=recordings) for symbol in portfolio.assets.keys()}\n self.max = {symbol:[portfolio.assets[symbol].c.iloc[0], None] for symbol in portfolio.assets.keys()}\n self.min = {symbol:[999999999999999, None] for symbol in portfolio.assets.keys()}\n self.drawdown = {symbol:[999999999999999, None] for symbol in portfolio.assets.keys()}",
"def get_stock(self, investor):\n\n # Find out the stock details \n sym, qty, price = investor.portfolios[0].portfolios[0]\n # p = investor.portfolios[0]\n \n # Check if broker has a portfolio\n if self.portfolios[0]:\n self.portfolios[0].add_stock(sym, qty, price)\n else:\n # Broker doesn't have a portfolio\n p = Portfolio()\n #logging.info(\"p is: %s\" % p)\n p.add_stock(sym, qty, price)\n self.add_portfolio(p)\n logging.info(\"Broker's portfolios AFTER addition: %s\" % self)\n # logging.info(\"WHAT ARE YOU\")\n logging.info(\"Investor portfolio BEFORE removal: %s\" % investor.portfolios[0].portfolios)\n investor.portfolios[0].remove_stock(sym, qty)\n logging.info(\"Investor portfolio AFTER removal: %s\" % investor.portfolios[0])\n # investor.portfolios[0].portfolios.remove( (sym, qty, price) )\n \n # investor.portfolios[0].remove(sym, qty, price)\n total_price = qty * price\n investor.portfolios[0].value -= total_price\n investor.cash += qty * float(price)",
"def _initalize_portfolio_with_cash(self):\n self.cash = copy.copy(self.starting_cash)\n\n if self.starting_cash > 0.0:\n self.history.append(\n PortfolioEvent.create_subscription(\n self.current_dt, self.starting_cash, self.starting_cash\n )\n )\n\n self.logger.info(\n '(%s) Funds subscribed to portfolio \"%s\" '\n '- Credit: %0.2f, Balance: %0.2f' % (\n self.current_dt.strftime(settings.LOGGING[\"DATE_FORMAT\"]),\n self.portfolio_id,\n round(self.starting_cash, 2),\n round(self.starting_cash, 2)\n )\n )",
"def before_trading_start(context, data):\n factors = pipeline_output('ff_example')\n\n # get the data we're going to use\n returns = factors['returns']\n mkt_cap = factors.sort_values(['market_cap'], ascending=True)\n be_me = factors.sort_values(['be_me'], ascending=True)\n\n # to compose the six portfolios, split our universe into portions\n half = int(len(mkt_cap)*0.5)\n small_caps = mkt_cap[:half]\n big_caps = mkt_cap[half:]\n \n thirty = int(len(be_me)*0.3)\n seventy = int(len(be_me)*0.7)\n growth = be_me[:thirty]\n neutral = be_me[thirty:seventy]\n value = be_me[seventy:]\n\n # now use the portions to construct the portfolios.\n # note: these portfolios are just lists (indices) of equities\n small_value = small_caps.index.intersection(value.index)\n small_neutral = small_caps.index.intersection(neutral.index)\n small_growth = small_caps.index.intersection(growth.index)\n \n big_value = big_caps.index.intersection(value.index)\n big_neutral = big_caps.index.intersection(neutral.index)\n big_growth = big_caps.index.intersection(growth.index)\n\n # take the mean to get the portfolio return, assuming uniform\n # allocation to its constituent equities.\n sv = returns[small_value].mean()\n sn = returns[small_neutral].mean()\n sg = returns[small_growth].mean()\n \n bv = returns[big_value].mean()\n bn = returns[big_neutral].mean()\n bg = returns[big_growth].mean()\n\n # computing SMB\n context.smb = (sv + sn + sg)/3 - (bv + bn + bg)/3\n\n # computing HML\n context.hml = (sv + bv)/2 - (sg + bg)/2",
"def calc_portfolio_risk(\n context,\n data,\n risk_func,\n hist_days=180,\n **kwargs):\n\n \n positions = context.portfolio.positions\n positions_index = pd.Index(positions)\n share_counts = pd.Series( \n index=positions_index, \n data=[positions[asset].amount for asset in positions] \n )\n\n current_prices = data.current(positions_index, 'price') \n current_weights = (\n share_counts * current_prices / context.portfolio.portfolio_value\n )\n \n prices = data.history(\n current_weights.index.tolist(),\n 'price',\n hist_days,\n '1d'\n )\n\n daily_rets = prices.pct_change()\n daily_rets = daily_rets - daily_rets.mean(skipna=True)\n daily_rets = daily_rets.fillna(0.0)\n\n risk = risk_func(current_weights.values, daily_rets, **kwargs)\n return risk",
"def __display_portfolio(self, p, w):\n\n global st_sort_key\n global st_reverse_sort\n\n line = 1\n total_assets = 0\n total_change = 0\n\n p.assets.sort(key=st_sort_key, reverse=st_reverse_sort)\n\n for s in p.assets:\n # Make sure we have space to write the portfolio totals.\n if line >= (curses.LINES - 3):\n break\n\n total_assets += (p.asset_counts[s.symb()] * s.price())\n total_change += (p.asset_counts[s.symb()] * s.change())\n\n # Color red/green for stocks going up/down.\n change_color = curses.color_pair(0)\n if s.change() > 0:\n change_color = curses.color_pair(1)\n elif s.change() < 0:\n change_color = curses.color_pair(2)\n\n direction = ''\n if s.change() > 0:\n direction = u'\\u25b2'\n elif s.change() < 0:\n direction = u'\\u25bc'\n\n w.addstr(line, 0, '%-15s' % s.name()[0:14])\n w.addstr(line, 16, '%-5s' % s.symb(), curses.A_BOLD)\n w.addstr(line, 22, '%9.2f' % s.price())\n w.addstr(line, 32, direction.encode('utf-8'), change_color)\n w.addstr(line, 33, '%6.2f %5.2f%%' % (abs(s.change()),\n abs(s.change_percent()) *\n 100),\n change_color)\n w.addstr(line, 47, '|')\n w.addstr(line, 49, '%-6d' % p.asset_counts[s.symb()])\n w.addstr(line, 56, '%11.2f' % (p.asset_counts[s.symb()] *\n s.price()))\n w.addstr(line, 68, '%10.2f' % (p.asset_counts[s.symb()] *\n s.change()),\n change_color)\n\n line += 1\n\n line += 1\n\n # Get overall change (of assets) for the portfolio.\n overall_change = total_assets - p.cost_basis()\n overall_color = curses.color_pair(0)\n if overall_change > 0:\n overall_color = curses.color_pair(1)\n elif overall_change < 0:\n overall_color = curses.color_pair(2)\n\n # Color red/green for assets changing.\n change_color = curses.color_pair(0)\n if total_change > 0:\n change_color = curses.color_pair(1)\n elif total_change < 0:\n change_color = curses.color_pair(2)\n\n # Print accumulated stats for the portfolio.\n w.addstr(line, 0, 'Daily:')\n w.addstr(line, 8, '$%.2f' % total_change,\n curses.A_BOLD | change_color)\n w.addstr(line, 23, 'Total:')\n w.addstr(line, 30, '$%.2f' % overall_change,\n curses.A_BOLD | overall_color)\n w.addstr(line + 1, 0, 'Assets:')\n w.addstr(line + 1, 8, '$%.2f' % total_assets)\n w.addstr(line + 1, 23, 'Cash: $%.2f' % p.cash)\n w.addstr(line + 1, 44, 'Total value:')\n w.addstr(line + 1, 58, '$%.2f' % (p.cash + total_assets),\n curses.A_BOLD)"
] | [
"0.7362575",
"0.6996456",
"0.6729907",
"0.6721263",
"0.6675463",
"0.66379255",
"0.6499",
"0.63661253",
"0.63091534",
"0.62968546",
"0.6240645",
"0.6196709",
"0.6144556",
"0.60243994",
"0.5963545",
"0.5962163",
"0.59054154",
"0.5858664",
"0.5812279",
"0.5781019",
"0.5715677",
"0.5695099",
"0.56832755",
"0.56832755",
"0.56502557",
"0.56142795",
"0.5601788",
"0.5591255",
"0.5557412",
"0.5552806"
] | 0.81566226 | 0 |
Override fields on ``self.account``. | def override_account_fields(self,
settled_cash=not_overridden,
accrued_interest=not_overridden,
buying_power=not_overridden,
equity_with_loan=not_overridden,
total_positions_value=not_overridden,
total_positions_exposure=not_overridden,
regt_equity=not_overridden,
regt_margin=not_overridden,
initial_margin_requirement=not_overridden,
maintenance_margin_requirement=not_overridden,
available_funds=not_overridden,
excess_liquidity=not_overridden,
cushion=not_overridden,
day_trades_remaining=not_overridden,
leverage=not_overridden,
net_leverage=not_overridden,
net_liquidation=not_overridden):
# mark that the portfolio is dirty to override the fields again
self._dirty_account = True
self._account_overrides = kwargs = {
k: v for k, v in locals().items() if v is not not_overridden
}
del kwargs['self'] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def account(self, account):\n\n self._account = account",
"def account(self, account):\n\n self._account = account",
"def account(self, account):\n\n self._account = account",
"def account(self, account):\n\n self._account = account",
"def patch(self, account=None, user=None, account_id=None):\n return super().patch()",
"def account(self, account: str):\n self._account = account",
"def get_account(self, account):\n \n pass",
"def set_account(self):\n return self.__Account",
"def onAccountUpdate(self, data):\n pass",
"def account(self):\r\n return Account(self)",
"def put(self, account=None, user=None, account_id=None):\n return super().put()",
"def set_specific_fields(self):\n raise NotImplementedError(\"Must be defined by subclass!\")",
"def change_account(self, account):\r\n check_account = Account(account, steem_instance=self.steem)\r\n self.account = check_account[\"name\"]\r\n self.refresh()",
"def get_account_details(self):\n pass",
"def put_account(self, account):\n \n pass",
"def update_account_data(self):\n self.ensure_one()\n getattr(self, '%s_update_account_data' % self.provider, lambda: None)()",
"def account(self):\n return Account(self)",
"def account_information(self) -> MetatraderAccountInformation:\n return self._accountInformation",
"def __init__(self, client, account_id):\n\n super(AccountsMixin, self).__init__(client)\n self._account_id = account_id",
"def account_id(self, account_id):\n self._account_id = account_id",
"def account(self, account_code):\r\n return acc.Account(self, account_code)",
"def extended_account_data(self,accountinfo):\n\n data = {\n 'username' : accountinfo['username'],\n 'password' : accountinfo['password'],\n 'passwordconfirm' : accountinfo['password'],\n 'firstname' : accountinfo['firstname'],\n 'middlename' : accountinfo['middlename'],\n 'lastname' : accountinfo['lastname'],\n 'email' : accountinfo['email'],\n 'emailconfirm' : accountinfo['email'],\n 'gender' : 'Refused',\n 'disability' : 'Refused',\n 'hispanic' : 'Refused',\n 'race_refused' : 'Yes',\n 'captcha' : False,\n 'usageagreement' : True,\n }\n return data",
"def to_representation(self, instance):\n data = super(AccountSerializer, self).to_representation(instance)\n data[\"display_name\"] = instance.name\n return data",
"def _account(self) -> Account:\n if isinstance(self._node_cached_account, Account):\n return self._node_cached_account\n account = Account.retrieve(\n session=self.entity.session,\n entity=self.entity,\n account_id=self.account_id\n )\n self._node_cached_account = account\n return account",
"def account_amount(self, account_amount):\n\n self._account_amount = account_amount",
"def get_account(self):\n return self._account",
"def get_account(self):\n return self._account",
"def account_id(self, account_id):\n\n self._account_id = account_id",
"def account_id(self, account_id):\n\n self._account_id = account_id",
"def account_id(self, account_id):\n\n self._account_id = account_id"
] | [
"0.6413226",
"0.6413226",
"0.6413226",
"0.6413226",
"0.6343206",
"0.6329128",
"0.6322581",
"0.63135093",
"0.61441493",
"0.60770935",
"0.60361296",
"0.6022793",
"0.60135746",
"0.60068023",
"0.59981",
"0.59467715",
"0.58997744",
"0.5875439",
"0.5858766",
"0.5787134",
"0.5765871",
"0.5693723",
"0.5661822",
"0.56605464",
"0.5647849",
"0.5623891",
"0.5623891",
"0.56091964",
"0.56091964",
"0.56091964"
] | 0.67399603 | 0 |
Called when the partition's reference count reaches zero. If the partition contains a temporary file which is not referenced by any other partition then the temporary file is removed from disk. If the partition contains a nontemporary file which is not referenced by any other partition then the file is closed. | def __del__(self):
# subarray = getattr(self, '_subarray', None)
subarray = self._subarray
# If the subarray is unique it will have 2 references to
# it plus 1 within this method, making 3. If it has more
# than 3 references to it then it is not unique.
if getrefcount is not None:
self._decrement_file_counter()
if subarray is None or getrefcount(subarray) > 3:
return
else:
# getrefcount has itself been deleted or is in the process
# of being torn down
return
_partition_file = getattr(subarray, "_partition_file", None)
if _partition_file is not None:
# This partition contains a temporary file which is not
# referenced by any other partition on this process, so if
# there are no lock files present remove the file from
# disk.
_remove_temporary_files(_partition_file)
else:
try:
if FileArray is not None and isinstance(subarray, FileArray):
try:
filename = subarray.get_filename()
except Exception:
filename = None
if self.file_counter.get(filename, 999) <= 0:
# This partition contains a non-temporary file
# which is not referenced by any other
# partitions, so close the file.
subarray.close()
except Exception:
# If we're here then it is likely that FileArray has been
# torn down, so just do nothing.
pass
# --- End: if | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_file_deleted(self):\n try:\n with get_temp_file() as (fd, name):\n os.unlink(name)\n except Exception as err:\n self.fail('Failed with exception \"{}\"'.format(err))",
"def _Close(self):\n self._fsfat_volume = None\n self._file_object = None",
"def __del__(self):\n if (\n self._fpointer is not None and not self._fpointer.closed\n ): # pragma: no mutate\n self._fpointer.close()",
"def clean(self):\n if os.path.exists(self.initial):\n if os.path.exists(self.path) and os.stat(self.path).st_size == os.stat(\n self.initial).st_size:\n os.remove(self.initial)\n else:\n # if it doesn't match, something probably crashed; rename the temporary file and\n # it'll get uploaded at some point\n self.auto_filename()\n self.rename()\n self.connect()\n os.remove(self.initial)\n if os.path.exists(self.path):\n os.remove(self.path)\n self.filename_set = False",
"def _cleanup(self, fnum):\n while os.path.exists('%s.%s' % (self.name, fnum)):\n try:\n fname = '%s.%s' % (self.name, fnum)\n os.unlink(fname)\n # self.log.debug(\"Cleaned up file: %s\", fname)\n except:\n pass\n fnum -= 1",
"def test_file_unused(self):\n try:\n with get_temp_file() as (fd, name):\n pass\n except Exception as err:\n self.fail('Failed with exception \"{}\"'.format(err))\n else:\n file_exists = os.access(name, os.F_OK)\n self.assertFalse(file_exists)",
"def __purge_old_files(self):\n\n chkpts = self.checkpointer.sorted_checkpoints()\n p_chkpts = []\n e_chkpts = []\n for c in chkpts:\n if c.startswith(self.checkpointer.prefix + CheckpointingCallback.PERIODIC_PREFIX):\n p_chkpts.append(c)\n\n if c.startswith(self.checkpointer.prefix + CheckpointingCallback.EPOCH_PREFIX):\n e_chkpts.append(c)\n\n # Delete periodic checkpoints\n if self.max_files is not None and len(p_chkpts) > self.max_files:\n for c in p_chkpts[self.max_files:]:\n log.debug(\"CheckpointingCallback deleting {}\".format(c))\n self.checkpointer.delete(c)\n\n # Delete older epochs\n if self.max_epochs is not None and len(e_chkpts) > self.max_epochs:\n for c in e_chkpts[self.max_epochs:]:\n log.debug(\"CheckpointingCallback deleting (epoch) {}\".format(c))\n self.checkpointer.delete(c)",
"def __del__(self):\n if self.file is None:\n return\n try:\n self.file.close()\n del self.file\n self.file = None\n except:\n getLogger(__name__).warning('Error on file close', exc_info=True)",
"def __del__(self):\n self.close_files()",
"def __del__(self):\r\n self.filename.close()",
"def flow_file_chunk_delete(sender, instance, **kwargs):\n instance.file.delete(False)",
"def _recover_disk_space(self):\n while self.used_disk_space > self.cache_size:\n space_to_recover = self.used_disk_space - self.cache_size\n logger.info('Recovering disk space %s', space_to_recover)\n lru_file = self.touch_list.pop(0)\n file_path = self._path_to_file(lru_file)\n logger.info('Deleting %s', file_path)\n os.remove(file_path)\n del self.index[lru_file]",
"def __del__(self):\n\t\tif self.temp_dir:\n\t\t\tself.temp_dir.cleanup()",
"def tearDown(self):\n try:\n os.remove(self.junk_file)\n except OSError as doh:\n if doh.errno == 2:\n # No such File, ignore\n pass\n else:\n raise",
"def __del__(self) -> None:\n try:\n shutil.rmtree(self.temp_path)\n except FileNotFoundError:\n pass",
"def Close(self):\n super(CPIOArchiveFile, self).Close()\n self._file_entries = None",
"def dispose(self):\n rmtree(self._temp_path)",
"def unlink(self,):\n self._wait()\n self.fd.close()\n self.fd = None\n os.unlink(self.fname)",
"def __del__(self):\n self.file.close()",
"def __del__(self):\r\n self.chunk = None",
"def endWrite(self, withErrors):\r\n #if withErrors or self._file_obj.get_seek()>0:\r\n self.provider.cache_fs.remove(self.path)",
"def release(self, path, fh, *args, **pargs):\n with(self.rwlock):\n # If we're closing a FLACCue file...\n if(path in self._open_subtracks):\n # Delete the file handle from the stored list.\n del self._open_subtracks[path]['Positions'][fh]\n # Close the OS reference to the file.\n return os.close(fh)",
"def WriteAbort(self):\n if self._file_object:\n self._file_object.close()\n self._file_object = None\n\n if os.path.exists(self.name):\n os.remove(self.name)",
"def __del__(self):\n for handle in self._filehandles:\n handle.close()",
"def __del__(self):\n self.file_out.close()",
"def __del__(self):\n self.f.close()",
"def unique_files(self):\n self._tempfiles[-1].ctr = -1",
"def test_ClearOldFile(self):\n q = Queue(self.path, chunksize=10)\n for i in range(15):\n q.put('var1')\n\n for i in range(11):\n q.get()\n\n q = Queue(self.path, chunksize=10)\n self.assertEqual(q.qsize(), 15)\n\n for i in range(11):\n q.get()\n q.task_done()\n self.assertEqual(q.qsize(), 4)",
"def release(self):\n #关闭文件,删除文件\n if self.fd is not None:\n os.close(self.fd)\n os.unlink(self.lockfile)\n self.is_locked = False\n self.fd = None",
"def releaseFile(self, fid):\n if fid in self.files:\n del self.files[fid]"
] | [
"0.637165",
"0.6141179",
"0.6079966",
"0.60165036",
"0.5893113",
"0.5771018",
"0.5769767",
"0.5752214",
"0.5731687",
"0.5730452",
"0.57243013",
"0.5715975",
"0.5715163",
"0.5711628",
"0.5696097",
"0.56944114",
"0.56878215",
"0.56815344",
"0.5681272",
"0.5633728",
"0.5630855",
"0.56236386",
"0.5620949",
"0.5599183",
"0.5533983",
"0.5530115",
"0.552735",
"0.5516672",
"0.55129987",
"0.5509583"
] | 0.61570215 | 1 |
Add i to the count of subarrays referencing the file of this partition's subarray. Only do this if self._subarray is an instance of FileArray, but not a temporary FileArray. | def _add_to_file_counter(self, i):
# subarray = getattr(self, '_subarray', None)
subarray = self._subarray
if subarray is None:
return
try:
if isinstance(subarray, FileArray) and not isinstance(
subarray, CachedArray
):
try:
filename = subarray.get_filename()
except Exception:
filename = None
if filename is None:
return
file_counter = self.file_counter
# count = file_counter.get(filename, 0)
# file_counter[filename] = count + i
# if file_counter[filename] <= 0:
count = file_counter.get(filename, 0) + i
if count <= 0:
# Remove the file from the dictionary if its count has
# dropped to zero
file_counter.pop(filename, None)
else:
file_counter[filename] = count
except Exception:
# If we're here then it is likely that FileArray has been
# torn down, so just do nothing.
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def n_subfile(self):\n self.assert_is_dir_and_exists()\n n = 0\n for _ in self.select_file(recursive=False):\n n += 1\n return n",
"def update(self, i, v):\n # index in BTree is 1 more than index in arr[]\n i += 1\n\n # Traverse to ancestors of BITree[i]\n while i <= self.size:\n self.BITree[i] += v\n\n # Update index to next set bit in binary representation\n i += i & (-i)",
"def __del__(self):\n # subarray = getattr(self, '_subarray', None)\n subarray = self._subarray\n\n # If the subarray is unique it will have 2 references to\n # it plus 1 within this method, making 3. If it has more\n # than 3 references to it then it is not unique.\n if getrefcount is not None:\n self._decrement_file_counter()\n if subarray is None or getrefcount(subarray) > 3:\n return\n else:\n # getrefcount has itself been deleted or is in the process\n # of being torn down\n return\n\n _partition_file = getattr(subarray, \"_partition_file\", None)\n if _partition_file is not None:\n # This partition contains a temporary file which is not\n # referenced by any other partition on this process, so if\n # there are no lock files present remove the file from\n # disk.\n _remove_temporary_files(_partition_file)\n\n else:\n try:\n if FileArray is not None and isinstance(subarray, FileArray):\n try:\n filename = subarray.get_filename()\n except Exception:\n filename = None\n\n if self.file_counter.get(filename, 999) <= 0:\n # This partition contains a non-temporary file\n # which is not referenced by any other\n # partitions, so close the file.\n subarray.close()\n except Exception:\n # If we're here then it is likely that FileArray has been\n # torn down, so just do nothing.\n pass\n # --- End: if",
"def write_sub_index(self):\n for sie in self.subIndex:\n self.db_file.write(sie.get_representation())",
"def add(self, i: int, v: int) -> None:\n while i < self.size:\n self.tree[i] += v\n i += self._lsb(i)",
"def __setitem__(self, filenr, data_arr):\n cvcfile = self.filenames[filenr]\n cvcpath = os.path.join(self.filefolder, cvcfile)\n data_arr.tofile(cvcpath)",
"def _increment_file_counter(self):\n self._add_to_file_counter(1)",
"def add_index(self, idx, subproblem_shape):\n self.indices.append(int(idx))\n self.subproblem_shapes.append(subproblem_shape)",
"def add_photo(self, new_photo, i):\r\n self.__photos[i] = new_photo",
"def append(self, i):\n \n self.ret.append(i)",
"def array(self):\n config = self.config\n\n unique_array = config[\"unique_subarray\"]\n\n p_axes = self.axes\n p_flip = self.flip\n p_part = self.part\n p_units = self.Units\n p_shape = self.shape\n p_location = self.location\n subarray = self._subarray\n\n len_p_axes = len(p_axes)\n\n if not self.in_memory:\n # --------------------------------------------------------\n # The subarray is not in memory.\n #\n # It could be in a file on disk or implied by a FileArray\n # object, etc.\n # --------------------------------------------------------\n self._original = self.copy()\n\n unique_array = True\n update = True\n copy = False\n\n if not p_part:\n indices = Ellipsis\n else:\n indices = tuple(p_part)\n\n # Read from a file into a numpy array\n p_data = subarray[indices]\n\n # We've just copied p_data from disk, so in place changes\n # are not possible\n in_place_changes = False\n else:\n # --------------------------------------------------------\n # The subarray is in memory\n # --------------------------------------------------------\n update = config[\"update\"]\n\n if p_part:\n p_data = get_subspace(subarray, p_part)\n elif not unique_array:\n p_data = subarray.view()\n else:\n p_data = subarray\n\n copy = config[\"extra_memory\"]\n\n # In place changes to p_data might be possible if we're not\n # copying the data\n in_place_changes = not copy\n\n if not p_data.ndim and isinstance(p_data, (numpy_number, numpy_bool_)):\n # --------------------------------------------------------\n # p_data is a numpy number (like numpy.int64) which does\n # not support assignment, so convert it to a numpy array.\n # --------------------------------------------------------\n p_data = numpy_array(p_data)\n # We've just copied p_data, so in place changes are\n # not possible\n copy = False\n in_place_changes = False\n\n masked = numpy_ma_isMA(p_data)\n if masked:\n # The p_data is a masked array\n if p_data.mask is numpy_ma_nomask or not numpy_ma_is_masked(\n p_data\n ):\n # There are no missing data points so recast as an\n # unmasked numpy array\n p_data = p_data.data\n masked = False\n # --- End: if\n\n if masked:\n # Set the hardness of the mask\n if config[\"hardmask\"]:\n p_data.harden_mask()\n else:\n p_data.soften_mask()\n # --- End: if\n\n self.masked = masked\n\n # ------------------------------------------------------------\n # Make sure that the data array has the correct units. This\n # process will deep copy the data array if required (e.g. if\n # another partition is referencing this numpy array), even if\n # the units are already correct.\n # ------------------------------------------------------------\n func = config.get(\"func\")\n units = config[\"units\"]\n if func is None:\n if not p_units.equals(units) and bool(p_units) is bool(units):\n func = Units.conform\n\n if func is not None:\n inplace = not copy\n p_data = func(p_data, p_units, units, inplace)\n p_units = units\n\n if not inplace:\n # We've just copied p_data, so in place changes are\n # not possible\n copy = False\n in_place_changes = False\n # --- End: if\n\n flip = config.get(\"flip\", None)\n if flip or p_flip:\n flip_axes = set(p_flip).symmetric_difference(flip)\n else:\n flip_axes = None\n\n axes = config[\"axes\"]\n\n if p_data.size > 1:\n # --------------------------------------------------------\n # Flip axes\n # --------------------------------------------------------\n if flip_axes:\n indices = [\n (\n slice(None, None, -1)\n if axis in flip_axes\n else slice(None)\n )\n for axis in p_axes\n ]\n p_data = p_data[tuple(indices)]\n\n # --------------------------------------------------------\n # Transpose axes\n # --------------------------------------------------------\n if p_axes != axes:\n iaxes = [p_axes.index(axis) for axis in axes if axis in p_axes]\n\n if len_p_axes > len(iaxes):\n for i in range(len_p_axes):\n if i not in iaxes:\n # iaxes.append(i)\n iaxes.insert(i, i)\n # --- End: if\n\n p_data = numpy_transpose(p_data, iaxes)\n # --- End: if\n\n # ------------------------------------------------------------\n # Remove excessive/insert missing size 1 axes\n # ------------------------------------------------------------\n if p_shape != p_data.shape:\n # if len_p_axes != len(p_shape):\n p_data = p_data.reshape(p_shape)\n\n # ------------------------------------------------------------\n # Apply the auxiliary mask\n # ------------------------------------------------------------\n auxiliary_mask = config[\"auxiliary_mask\"]\n if auxiliary_mask:\n for mask in auxiliary_mask:\n if mask.any():\n if not masked:\n p_data = p_data.view(numpy_ma_MaskedArray)\n masked = True\n\n p_data.mask = (mask | p_data.mask).array\n # --- End: for\n\n self.masked = True\n\n # ------------------------------------------------------------\n # Convert the array's data type\n # ------------------------------------------------------------\n p_dtype = p_data.dtype\n dtype = config.get(\"dtype\", None)\n if dtype is not None and dtype != p_dtype:\n try:\n p_data = p_data.astype(dtype) # Note: returns a copy\n except ValueError:\n raise ValueError(\n \"Can't recast partition array from {} to {}\".format(\n p_dtype.name, dtype.name\n )\n )\n else:\n # We've just copied p_data, so in place changes are\n # not possible\n copy = False\n in_place_changes = False\n # --- End: if\n\n # ------------------------------------------------------------\n # Copy the array\n # -----------------------------------------------------------\n if copy:\n if p_dtype.char != \"O\":\n if not masked or p_data.ndim > 0:\n p_data = p_data.copy()\n else:\n # This is because numpy.ma.copy doesn't work for\n # scalar arrays (at the moment, at least)\n p_data = numpy_ma_masked_all((), p_data.dtype)\n\n # We've just copied p_data, so in place changes are\n # not possible\n in_place_changes = False\n else:\n # whilst netCDF4.netcdftime.datetime is mucking bout,\n # don't copy!!!!\n # p_data = _copy(p_data)\n pass\n # --- End: if\n\n # ------------------------------------------------------------\n # Update the partition\n # ------------------------------------------------------------\n if update:\n self.subarray = p_data # ?? DCH CHECK\n self.Units = p_units\n self.part = []\n self.axes = axes\n self.flip = flip\n self.flatten = []\n self.shape = p_shape\n self.location = p_location\n\n self._in_place_changes = in_place_changes\n\n # ------------------------------------------------------------\n # Return the numpy array\n # ------------------------------------------------------------\n return p_data",
"def __len__(self):\n return len(self.files[self.split])",
"def _extend_contiguous_traj_field(self, run_idx, traj_idx, field_path, field_data):\n\n traj_grp = self.h5['{}/{}/{}/{}'.format(RUNS, run_idx, TRAJECTORIES, traj_idx)]\n field = traj_grp[field_path]\n\n # make sure this is a feature vector\n assert len(field_data.shape) > 1, \\\n \"field_data must be a feature vector with the same number of dimensions as the number\"\n\n # of datase new frames\n n_new_frames = field_data.shape[0]\n\n # check the field to make sure it is not empty\n if all([i == 0 for i in field.shape]):\n\n # check the feature shape against the maxshape which gives\n # the feature dimensions for an empty dataset\n assert field_data.shape[1:] == field.maxshape[1:], \\\n \"field feature dimensions must be the same, i.e. all but the first dimension\"\n\n # if it is empty resize it to make an array the size of\n # the new field_data with the maxshape for the feature\n # dimensions\n feature_dims = field.maxshape[1:]\n field.resize( (n_new_frames, *feature_dims) )\n\n # set the new data to this\n field[0:, ...] = field_data\n\n else:\n # make sure the new data has the right dimensions against\n # the shape it already has\n assert field_data.shape[1:] == field.shape[1:], \\\n \"field feature dimensions must be the same, i.e. all but the first dimension\"\n\n\n # append to the dataset on the first dimension, keeping the\n # others the same, these must be feature vectors and therefore\n # must exist\n field.resize( (field.shape[0] + n_new_frames, *field.shape[1:]) )\n # add the new data\n field[-n_new_frames:, ...] = field_data",
"def append_filepath(self, filepath):\n idx = len(self.t_sect['filepaths'])\n self.t_sect['filepaths'].append(filepath)\n return idx",
"def write_sub_4(self):\n self.subIndex[constants.sub_4_genre_albums].offset = (\n self.db_file.tell())\n self.subIndex[constants.sub_4_genre_albums].size = 8\n self.subIndex[constants.sub_4_genre_albums].count = (\n len(self.genreIndex) - 1)\n\n entry_offset = 0\n for giEntry in self.genreIndex[1:]:\n self.db_file.write(\n struct.pack(\n \"<HHHH\",\n giEntry.number,\n entry_offset,\n giEntry.number_of_albums,\n 0x0000))\n entry_offset += giEntry.number_of_albums",
"def load(self, i: int) -> np.ndarray:\n raise NotImplementedError(\"Do not call load from BaseLoader\")",
"def write_all_sub_indices(self):\n\n # remember where we are.\n temp_offset_1 = self.db_file.tell()\n\n # Write a filler for the relative offset to the first table\n self.db_file.write(struct.pack(\"<I\", 0x00000000))\n\n # Write the sub index entries (blank at this stage)\n self.write_sub_index()\n\n # self.subIndex[constants.sub_0_genre_performers].offset = \\\n # self.db_file.tell()\n self.write_sub_0()\n\n # self.subIndex[constants.sub_1_genre_performer_albums].offset = \\\n # self.db_file.tell()\n self.write_sub_1()\n\n # self.subIndex[constants.sub_2_genre_performer_album_titles].offset = \\\n # self.db_file.tell()\n self.write_sub_2()\n\n # self.subIndex[constants.sub_3_genre_ordered_titles].offset = \\\n # self.db_file.tell()\n self.write_sub_3()\n\n # self.subIndex[constants.sub_4_genre_albums].offset = \\\n # self.db_file.tell()\n self.write_sub_4()\n\n # self.subIndex[constants.sub_5_genre_album_titles].offset = \\\n # self.db_file.tell()\n self.write_sub_5()\n\n # self.subIndex[constants.sub_6_genre_titles].offset = \\\n # self.db_file.tell()\n self.write_sub_6()\n\n # self.subIndex[constants.sub_7_performer_albums].offset = \\\n # self.db_file.tell()\n self.write_sub_7()\n\n # self.subIndex[constants.sub_8_performer_album_titles].offset = \\\n # self.db_file.tell()\n self.write_sub_8()\n\n # self.subIndex[constants.sub_9_performer_titles].offset = \\\n # self.db_file.tell()\n self.write_sub_9()\n\n # self.subIndex[constants.sub_10_genre_performers].offset = \\\n # self.db_file.tell()\n self.write_sub_10()\n\n # self.subIndex[constants.sub_11_genre_performer_titles].offset = \\\n # self.db_file.tell()\n self.write_sub_11()\n\n # self.subIndex[constants.sub_12_genre_ordered_titles].offset = \\\n # self.db_file.tell()\n self.write_sub_12()\n\n # Remeber where we are\n temp_offset_2 = self.db_file.tell()\n\n # Go back to the start\n self.db_file.seek(temp_offset_1)\n\n # Write the offset to the first table\n self.db_file.write(\n struct.pack(\n \"<I\",\n self.subIndex[constants.sub_0_genre_performers].offset -\n temp_offset_1))\n\n # Write the real data now\n self.write_sub_index()\n\n # Go to the end\n self.db_file.seek(temp_offset_2)",
"def extend(self, i):\n for x in i:\n self.add(x)",
"def update_subvarga(self, subvarga):\n\t\tself.subvarga = subvarga\n\t\tself.subvargaNum += 1",
"def _write_array_on_file(self, pa_array):\n pa_batch = pa.RecordBatch.from_struct_array(pa_array)\n self._num_bytes += pa_array.nbytes\n self.pa_writer.write_batch(pa_batch)",
"def add(perm, i):\n for j in self.increasing_children(i):\n add(perm, j)\n perm.append(i)",
"def enqueue(self, i):\n if len(self) == self.capacity:\n self._resize(self.capacity*2)\n\n if self.tail == self.capacity:\n self.tail = 0\n\n self.lst[self.tail] = i\n self.tail += 1\n self.n += 1",
"def _update_subfiles(self) -> None:\n\t\t# Clear list of subfiles\n\t\tself.subfiles.clear()\n\t\t# Iterate over Nodes\n\t\tfor node in self.nodes:\n\t\t\tfor file in node.get_subfiles():\n\t\t\t\tself.subfiles.add(\"{}/{}\".format(self.xml_dir, file))\n\t\t# Iterate over SubNodes\n\t\tfor subnode in self.subnodes:\n\t\t\tfor file in subnode.filenames:\n\t\t\t\tself.subfiles.add(\"{}/{}\".format(self.xml_dir, file))",
"def sequential_files(self, ctr=0):\n self._tempfiles[-1].ctr = ctr",
"def add(perm, i):\n for j in self.decreasing_children(i):\n add(perm, j)\n perm.append(i)",
"def extra_memory(self):\n if not self.in_memory:\n # --------------------------------------------------------\n # The subarray is on disk so getting the partition's data\n # array will require extra memory\n # --------------------------------------------------------\n extra_memory = True\n else:\n # --------------------------------------------------------\n # The subarray is already in memory\n # --------------------------------------------------------\n config = self.config\n\n p_part = self.part\n if p_part:\n extra_memory = True\n elif not config[\"unique_subarray\"]:\n extra_memory = True\n else:\n p_data = self._subarray\n\n if not numpy_ma_isMA(p_data):\n # The p_data is not a masked array\n extra_memory = isinstance(p_data.base, numpy_ndarray)\n else:\n # The p_data is a masked array\n memory_overlap = isinstance(\n p_data.data.base, numpy_ndarray\n )\n if not (\n p_data.mask is numpy_ma_nomask\n or not numpy_ma_is_masked(p_data)\n ):\n # There is at least one missing data point\n memory_overlap |= isinstance(\n p_data.mask.base, numpy_ndarray\n )\n\n extra_memory = memory_overlap\n # --- End: if\n\n p_dtype = p_data.dtype\n\n if not extra_memory:\n if config[\"func\"] is not None:\n extra_memory = True\n else:\n p_units = self.Units\n units = config[\"units\"]\n if (\n not p_units.equals(units)\n and bool(p_units) is bool(units)\n and not (\n p_data.flags[\"C_CONTIGUOUS\"]\n and p_dtype.kind == \"f\"\n )\n ):\n extra_memory = True\n\n # ------------------------------------------------------------\n # Extra memory is required if the dtype needs changing\n # ------------------------------------------------------------\n if not extra_memory:\n dtype = config[\"dtype\"]\n if dtype is not None and dtype != p_data.dtype:\n extra_memory = True\n # --- End: if\n\n # ------------------------------------------------------------\n # Amount of extra memory (in bytes) required to access the\n # array\n # ------------------------------------------------------------\n return self.nbytes if extra_memory else 0",
"def _dfs(self, i):\n self.tracks[i] = self.cnt\n for j in self.edges[i]:\n if self.tracks[j] == -1:\n self._dfs(j)",
"def get_sum(self, i):\n s = 0\n\n # index in BITree is 1 more than index in arr[]\n i += 1\n\n # Traverse to leaves of BITree[i]:\n while i > 0:\n s += self.BITree[i]\n\n # Move index to parent node (next set bit in binary representation)\n i -= i & (-i)\n\n return s",
"def nbytes_at(self, device_id:int):\n if self._slices:\n if isinstance(self._coherence._local_states[device_id], dict): # there are subarrays no this device\n if self._slices_hash in self._coherence._local_states[device_id].keys(): # this subarray is already there\n return self._array.nbytes_at(device_id)\n else: # the subarray will be moved to there\n return self._array.nbytes_at(device_id) + self.subarray_nbytes # add the incoming subarray size\n else: # there is a complete copy on this device, no need to prepare subarray\n return self.nbytes\n else:\n return self.nbytes",
"def inc_size(self):\r\n self.__length += 1"
] | [
"0.5323221",
"0.5309046",
"0.5237938",
"0.52028364",
"0.51664454",
"0.5117871",
"0.51150346",
"0.5107957",
"0.50491905",
"0.50423014",
"0.5001557",
"0.499577",
"0.49840355",
"0.4932267",
"0.4909338",
"0.48659304",
"0.48641643",
"0.48607743",
"0.48591715",
"0.4854085",
"0.48242405",
"0.4823937",
"0.481012",
"0.48052314",
"0.479596",
"0.47512335",
"0.47405446",
"0.47338173",
"0.4703466",
"0.4691803"
] | 0.8270168 | 0 |
Add 1 to the Partition.file_counter if self._subarray is an instance of FileArray and not a temporary FileArray. | def _increment_file_counter(self):
self._add_to_file_counter(1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _add_to_file_counter(self, i):\n # subarray = getattr(self, '_subarray', None)\n subarray = self._subarray\n\n if subarray is None:\n return\n\n try:\n if isinstance(subarray, FileArray) and not isinstance(\n subarray, CachedArray\n ):\n try:\n filename = subarray.get_filename()\n except Exception:\n filename = None\n\n if filename is None:\n return\n\n file_counter = self.file_counter\n # count = file_counter.get(filename, 0)\n # file_counter[filename] = count + i\n # if file_counter[filename] <= 0:\n count = file_counter.get(filename, 0) + i\n if count <= 0:\n # Remove the file from the dictionary if its count has\n # dropped to zero\n file_counter.pop(filename, None)\n else:\n file_counter[filename] = count\n except Exception:\n # If we're here then it is likely that FileArray has been\n # torn down, so just do nothing.\n pass",
"def __len__(self):\n return len(self.files[self.split])",
"def _decrement_file_counter(self):\n self._add_to_file_counter(-1)",
"def fileCount(self):\n pass",
"def getFileCount(self) -> int:\n ...",
"def numberFiles(self):\n with open(self.inputfile) as fin:\n for n, _ in enumerate(fin, start=1): pass\n self.n = n\n return self.n",
"def numberFiles(self):\n return self.n",
"def count_files(self):\n self.file_count = 0\n self.count_files_loop(self.dirpath)\n return",
"def unique_files(self):\n self._tempfiles[-1].ctr = -1",
"def __len__(self) -> int:\n return len(self.files)",
"def stat_beg_file(self, filename):\n\n self.batchvals['numfiles'] += 1\n self.filevals['filename'] = filename\n self.filevals['start_time'] = time.time()\n\n return -1",
"def __len__(self):\n return len(self.files)",
"def file_num_increment(full_fpath):\r\n while os.path.isfile(full_fpath) == True:\r\n \r\n fpath, fext = os.path.splitext(full_fpath) #['C:\\Users\\Desktop\\file(1)', '.ext']\r\n\r\n if re.findall(\"[(]\\d+[)]\", fpath) != []: #Check if there is (x) in the path.\r\n for counter in range(1000): #Loop 1000 times\r\n if fpath.endswith(f\"({counter})\"): \r\n fpath = replace_last(fpath, f\"({counter})\", f\"({counter+1})\") #Replace the last occurence of (counter) in the string.\r\n full_fpath = fpath + fext\r\n break\r\n else: #here we pass for cases where (counter) is in the file/folder name itself. We skip them.\r\n continue\r\n else: #If there is no (counter), we create (1)\r\n counter = 1\r\n full_fpath = fpath + '(' + str(counter) + ')' + fext\r\n\r\n return full_fpath",
"def __len__(self):\n\n return len(self._file_list)",
"def sequential_files(self, ctr=0):\n self._tempfiles[-1].ctr = ctr",
"def __del__(self):\n # subarray = getattr(self, '_subarray', None)\n subarray = self._subarray\n\n # If the subarray is unique it will have 2 references to\n # it plus 1 within this method, making 3. If it has more\n # than 3 references to it then it is not unique.\n if getrefcount is not None:\n self._decrement_file_counter()\n if subarray is None or getrefcount(subarray) > 3:\n return\n else:\n # getrefcount has itself been deleted or is in the process\n # of being torn down\n return\n\n _partition_file = getattr(subarray, \"_partition_file\", None)\n if _partition_file is not None:\n # This partition contains a temporary file which is not\n # referenced by any other partition on this process, so if\n # there are no lock files present remove the file from\n # disk.\n _remove_temporary_files(_partition_file)\n\n else:\n try:\n if FileArray is not None and isinstance(subarray, FileArray):\n try:\n filename = subarray.get_filename()\n except Exception:\n filename = None\n\n if self.file_counter.get(filename, 999) <= 0:\n # This partition contains a non-temporary file\n # which is not referenced by any other\n # partitions, so close the file.\n subarray.close()\n except Exception:\n # If we're here then it is likely that FileArray has been\n # torn down, so just do nothing.\n pass\n # --- End: if",
"def _write_array_on_file(self, pa_array):\n pa_batch = pa.RecordBatch.from_struct_array(pa_array)\n self._num_bytes += pa_array.nbytes\n self.pa_writer.write_batch(pa_batch)",
"def on_disk(self):\n return isinstance(self._subarray, FileArray)",
"def n_subfile(self):\n self.assert_is_dir_and_exists()\n n = 0\n for _ in self.select_file(recursive=False):\n n += 1\n return n",
"def __number_of_files(self):\n self.__get_files()\n return len(self.files)",
"def fileCounter(directory):",
"def n_total_files(self):\n return len(self.fileinfo)",
"def setNextFile(self):\n\n if (self.nReadBlocks >= self.processingHeaderObj.dataBlocksPerFile):\n self.nReadFiles=self.nReadFiles+1\n if self.nReadFiles > self.nTotalReadFiles:\n self.flagNoMoreFiles=1\n raise schainpy.admin.SchainWarning('No more files to read')\n\n print('------------------- [Opening file] ------------------------------',self.nReadFiles)\n self.nReadBlocks = 0\n #if self.nReadBlocks==0:\n # self.readFirstHeader()",
"def number_idx(self, filename):\n with open(filename) as fh:\n firstline = fh.readline()\n parts = firstline.split('\\t')\n # only add if there are 4 parts\n if len(parts) != 4:\n return\n\n count = 1\n def writeline(fho, line, count):\n fho.write(line.rstrip() + '\\t' + str(count) + '\\n')\n\n with open(filename + '.tmp', 'w+b') as fho:\n writeline(fho, firstline, count)\n count += 1\n for line in fh:\n writeline(fho, line, count)\n count += 1\n\n shutil.move(filename + '.tmp', filename)",
"def to_disk(self, reopen=True):\n # try:\n tfa = CachedArray(self.array)\n # except Exception:\n # return False\n\n fd, _lock_file = mkstemp(\n prefix=tfa._partition_file + \"_\", dir=tfa._partition_dir\n )\n close(fd)\n\n self.subarray = tfa\n _temporary_files[tfa._partition_file] = (\n tfa._partition_dir,\n _lock_file,\n set(),\n )\n\n if reopen:\n # Re-open the partition\n self.open(self.config)\n\n return True",
"def __len__(self):\n return len(self.file_paths)",
"def increment_count(self):\n self.image_count +=1\n if self.image_count > self.max_count:\n self.image_count = self.count_start # overflow",
"def totalfiles(self):\n return len([sz for sz in self.iterate()])",
"def append_filepath(self, filepath):\n idx = len(self.t_sect['filepaths'])\n self.t_sect['filepaths'].append(filepath)\n return idx",
"def incr_counter(self, path):\n res = self.read_counter(path)\n # print 'incr_counter:', path, res, '->', res + 1\n res += 1\n self.cursor.execute('REPLACE INTO counter(fullpath, count) VALUES(?, ?)', (path, res))\n self.conn.commit()\n pass"
] | [
"0.83802605",
"0.6079216",
"0.60073787",
"0.5972287",
"0.5871502",
"0.58649766",
"0.58356875",
"0.5813897",
"0.5656929",
"0.5650024",
"0.5628978",
"0.5588129",
"0.5575472",
"0.55536973",
"0.55466735",
"0.55383646",
"0.5529106",
"0.5502226",
"0.54975855",
"0.5481056",
"0.54359984",
"0.5412819",
"0.5331949",
"0.52709615",
"0.5260593",
"0.52554363",
"0.5216349",
"0.5193865",
"0.51864076",
"0.51768965"
] | 0.71174276 | 1 |
Subtract 1 from the Partition.file_counter if self._subarray is an instance of FileArray and not a temporary FileArray. | def _decrement_file_counter(self):
self._add_to_file_counter(-1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _add_to_file_counter(self, i):\n # subarray = getattr(self, '_subarray', None)\n subarray = self._subarray\n\n if subarray is None:\n return\n\n try:\n if isinstance(subarray, FileArray) and not isinstance(\n subarray, CachedArray\n ):\n try:\n filename = subarray.get_filename()\n except Exception:\n filename = None\n\n if filename is None:\n return\n\n file_counter = self.file_counter\n # count = file_counter.get(filename, 0)\n # file_counter[filename] = count + i\n # if file_counter[filename] <= 0:\n count = file_counter.get(filename, 0) + i\n if count <= 0:\n # Remove the file from the dictionary if its count has\n # dropped to zero\n file_counter.pop(filename, None)\n else:\n file_counter[filename] = count\n except Exception:\n # If we're here then it is likely that FileArray has been\n # torn down, so just do nothing.\n pass",
"def __del__(self):\n # subarray = getattr(self, '_subarray', None)\n subarray = self._subarray\n\n # If the subarray is unique it will have 2 references to\n # it plus 1 within this method, making 3. If it has more\n # than 3 references to it then it is not unique.\n if getrefcount is not None:\n self._decrement_file_counter()\n if subarray is None or getrefcount(subarray) > 3:\n return\n else:\n # getrefcount has itself been deleted or is in the process\n # of being torn down\n return\n\n _partition_file = getattr(subarray, \"_partition_file\", None)\n if _partition_file is not None:\n # This partition contains a temporary file which is not\n # referenced by any other partition on this process, so if\n # there are no lock files present remove the file from\n # disk.\n _remove_temporary_files(_partition_file)\n\n else:\n try:\n if FileArray is not None and isinstance(subarray, FileArray):\n try:\n filename = subarray.get_filename()\n except Exception:\n filename = None\n\n if self.file_counter.get(filename, 999) <= 0:\n # This partition contains a non-temporary file\n # which is not referenced by any other\n # partitions, so close the file.\n subarray.close()\n except Exception:\n # If we're here then it is likely that FileArray has been\n # torn down, so just do nothing.\n pass\n # --- End: if",
"def _increment_file_counter(self):\n self._add_to_file_counter(1)",
"def __len__(self):\n return len(self.files[self.split])",
"def unique_files(self):\n self._tempfiles[-1].ctr = -1",
"def numberFiles(self):\n return self.n",
"def getFileCount(self) -> int:\n ...",
"def fileCount(self):\n pass",
"def __len__(self) -> int:\n return len(self.files)",
"def numberFiles(self):\n with open(self.inputfile) as fin:\n for n, _ in enumerate(fin, start=1): pass\n self.n = n\n return self.n",
"def __len__(self):\n return len(self.files)",
"def stat_beg_file(self, filename):\n\n self.batchvals['numfiles'] += 1\n self.filevals['filename'] = filename\n self.filevals['start_time'] = time.time()\n\n return -1",
"def file_close(self):\n if self.on_disk:\n self._subarray.close()",
"def count_files(self):\n self.file_count = 0\n self.count_files_loop(self.dirpath)\n return",
"def __len__(self):\n\n return len(self._file_list)",
"def on_disk(self):\n return isinstance(self._subarray, FileArray)",
"def n_subfile(self):\n self.assert_is_dir_and_exists()\n n = 0\n for _ in self.select_file(recursive=False):\n n += 1\n return n",
"def sequential_files(self, ctr=0):\n self._tempfiles[-1].ctr = ctr",
"def getFileCount(self):\n\n if self.filecount == -1:\n self.filecount = self.db.filecount()\n\n return self.filecount",
"def __number_of_files(self):\n self.__get_files()\n return len(self.files)",
"def __len__(self):\n return len(self.frame1_files)",
"def file_count(self) -> int:\n if self.dataset is None:\n raise ValueError('No known dataset found!')\n return self._max_file_count",
"def get_nrof_aux(self):\n aux = 0\n for l in self.aux_array:\n if l:\n aux += 1\n return aux",
"def fix_index(self):\n if self.record_size <= self.size:\n self.record_size += 1\n if self.index % self.size == 0:\n self.isFull = True if len(self._storage) == self.size else False\n if self.replace_flag:\n self.index = 1\n return self.index\n else:\n sys.stderr.write('Experience replay buff is full and replace is set to FALSE!\\n')\n return -1\n else:\n self.index += 1\n return self.index",
"def count_deleted_bytes(self): # FileObj.count_deleted_bytes\n if self.deleted:\n return self.bytes \n else:\n return 0",
"def getnrfiles(self):\n return len(self.filenames)",
"def reduce(self, array, index):\n\n return 0",
"def n_total_files(self):\n return len(self.fileinfo)",
"def __len__(self):\n return len(self.file_paths)",
"def close(self, **kwargs):\n config = getattr(self, \"config\", None)\n\n if config is None:\n return\n\n if kwargs:\n config.update(kwargs)\n\n original = getattr(self, \"_original\", None)\n logger.partitioning(\"Partition.close: original = {}\".format(original))\n\n if not original:\n originally_on_disk = False\n original_subarray = None\n else:\n originally_on_disk = not original.in_memory\n original_subarray = original._subarray\n\n config = self.config\n logger.partitioning(\" config = {}\".format(config))\n\n if config[\"serial\"]:\n # --------------------------------------------------------\n # SERIAL\n # --------------------------------------------------------\n logger.partitioning(\" serial\")\n\n if config[\"readonly\"]:\n logger.partitioning(\" readonly=True\")\n\n if originally_on_disk:\n logger.partitioning(\" subarray originally on disk\")\n\n if config.get(\"to_disk\", False):\n # 1.1.1.1 The original subarray was on disk,\n # we don't want to keep the current\n # subarray in memory, and we are happy\n # to discard any changes that may have\n # been made to the subarray.\n logger.partitioning(\" 1.1.1.1 revert\")\n self.revert()\n elif free_memory() <= cf_fm_threshold():\n # 1.1.1.2 The original subarray was on disk,\n # we are happy to keep the current\n # subarray in memory, but there is not\n # enough free memory to do so.\n logger.partitioning(\n \" 1.1.1.2 revert ({} <= {})\".format(\n free_memory(), cf_fm_threshold()\n )\n )\n self.revert()\n else:\n # 1.1.1.3 The original subarray was on disk\n # and there is enough memory to keep\n # the current subarray in memory\n if config[\"unique_subarray\"] and isinstance(\n original_subarray, CachedArray\n ):\n # The original subarray was a temporary\n # file which is not referenced by any\n # other partitions\n _remove_temporary_files(\n original_subarray._partition_file\n )\n\n del self.masked\n logger.partitioning(\n \" 1.1.1.3 del masked ({} > {})\".format(\n free_memory(), cf_fm_threshold()\n )\n )\n\n else:\n logger.partitioning(\" subarray originally in memory\")\n if config.get(\"to_disk\", False):\n # 1.1.2.1 Original subarray was in memory and\n # we don't want to keep the current\n # subarray in memory\n logger.partitioning(\" 1.1.2.1 to_disk\")\n self.to_disk(reopen=False)\n elif free_memory() <= cf_fm_threshold():\n # 1.1.2.2 Original subarray was in memory and\n # unique but there is not enough\n # memory to keep the current subarray\n logger.partitioning(\" 1.1.2.2 to_disk\")\n self.to_disk(reopen=False)\n else:\n # 1.1.2.3 Original subarray was in memory and\n # unique and there is enough memory to\n # keep the current subarray in memory\n logger.partitioning(\" 1.1.2.3 pass\")\n pass\n else:\n # config['readonly'] is False\n if originally_on_disk:\n if config.get(\"to_disk\", False):\n # 1.2.1.1 Original subarray was on disk and\n # there and we don't want to keep the\n # array\n if config[\"unique_subarray\"] and isinstance(\n original_subarray, CachedArray\n ):\n # Original subarray was a temporary file\n # on disk which is not referenced by any\n # other partitions\n _remove_temporary_files(\n original_subarray._partition_file\n )\n\n logger.partitioning(\" 1.2.1.1 to_disk\")\n self.to_disk(reopen=False)\n elif free_memory() <= cf_fm_threshold():\n # 1.2.1.2 Original subarray was on disk but\n # there is not enough memory to keep\n # it\n if config[\"unique_subarray\"] and isinstance(\n original_subarray, CachedArray\n ):\n # Original subarray was a temporary file\n # on disk which is not referenced by any\n # other partitions\n _remove_temporary_files(\n original_subarray._partition_file\n )\n\n logger.partitioning(\" 1.2.1.2 to_disk\")\n self.to_disk(reopen=False)\n else:\n # 1.2.1.3 Original subarray was on disk and\n # there is enough memory to keep it\n logger.partitioning(\" 1.2.1.3 pass\")\n del self.masked\n else:\n if config.get(\"to_disk\", False):\n # 1.2.2.1 Original subarray was in memory but\n # we don't want to keep it\n logger.partitioning(\" 1.2.2.1 to_disk\")\n self.to_disk(reopen=False)\n elif free_memory() <= cf_fm_threshold():\n # 1.2.2.2 Original subarray was an in memory\n # but there is not enough memory to\n # keep it\n logger.partitioning(\" 1.2.2.2 to_disk\")\n self.to_disk(reopen=False)\n else:\n # 1.2.2.3 Original subarray was in memory and\n # there is enough memory to keep it\n logger.partitioning(\" 1.2.2.3 del masked\")\n del self.masked\n else:\n logger.partitioning(\"Partition.close: parallel\")\n # --------------------------------------------------------\n # PARALLEL\n # --------------------------------------------------------\n pass\n\n # if hasattr(self, '_original'):\n # del self._original\n\n # print(hasattr(self, 'config')),\n try:\n del self.config\n except AttributeError:\n pass"
] | [
"0.7747842",
"0.63648546",
"0.6072333",
"0.60351396",
"0.5918244",
"0.57059807",
"0.56674904",
"0.5664642",
"0.56031275",
"0.5573989",
"0.5520654",
"0.5453089",
"0.5448838",
"0.54281026",
"0.5422772",
"0.5378375",
"0.5307265",
"0.5244931",
"0.52217174",
"0.5220032",
"0.5216291",
"0.51331407",
"0.5115629",
"0.5106185",
"0.50907373",
"0.5089161",
"0.5085614",
"0.507725",
"0.5066728",
"0.50590724"
] | 0.6648334 | 1 |
Add the auxiliary mask to the config dictionary. Assumes that ``self.config`` already exists. | def _configure_auxiliary_mask(self, auxiliary_mask):
indices = self.indices
new = [
mask[
tuple(
[
(slice(None) if n == 1 else index)
for n, index in zip(mask.shape, indices)
]
)
]
for mask in auxiliary_mask
]
# # If the partition is to be parallelised then get rid of mask
# # components which are all False so the mask component does
# # not get copied to the child process
# if not config['serial']:
# new = [mask for mask in new if not mask.any()]
self.config["auxiliary_mask"] = new | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def configure_masking(self, masks):\n self.masks = masks",
"def add_config(self):\n\n config = {\n 'invert_byte': InvertByte,\n 'invert_word': InvertWord,\n 'invert_double_word': InvertDoubleWord,\n 'and_byte': AndByte,\n 'and_word': AndWord,\n 'and_double_word': AndDoubleWord,\n 'or_byte': OrByte,\n 'or_word': OrWord,\n 'or_double_word': OrDoubleWord,\n 'exclusive_or_byte': ExclusiveOrByte,\n 'exclusive_or_word': ExclusiveOrWord,\n 'exclusive_or_double_word': ExclusiveOrDoubleWord\n }\n\n return config",
"def _updateMaskedValueSet():\n global masked_value_set\n for confName in controller.CONF:\n # Add all needed values to masked_value_set\n if (controller.getParamKeyValue(confName, \"MASK_INPUT\") == True):\n masked_value_set.add(controller.CONF[confName])",
"def update_mask(self, mask):\n\n # Get general mask\n general_mask = self.general_mask\n\n # Complete with the input mask\n new_mask = (general_mask | mask)\n\n # Update attribute\n self.mask = new_mask\n\n # Correct i_bounds if it was not specified\n # self.update_i_bnds()\n\n # Re-compute weights\n self.weights, self.weights_k_idx = self.compute_weights()\n\n return",
"def add(self, files, mask):\n pass",
"def update_mask(self):\r\n \r\n # Binary mask from ML detection\r\n if len(self.selected_ML_Index) > 0:\r\n # Delete items in dictionary that are not roi items\r\n roi_dict = self.selected_cells_infor_dict.copy()\r\n del_key_list=[]\r\n for key in roi_dict:\r\n print(key)\r\n if 'ROIitem' not in key:\r\n del_key_list.append(key)\r\n for key in del_key_list:\r\n del roi_dict[key]\r\n \r\n self.MLmask = ProcessImage.ROIitem2Mask(roi_dict, mask_resolution = (self.MLtargetedImg.shape[0], self.MLtargetedImg.shape[1]))\r\n # Binary mask of added rois\r\n self.addedROIitemMask = ProcessImage.ROIitem2Mask(self.roi_list_freehandl_added, mask_resolution = (self.MLtargetedImg.shape[0], self.MLtargetedImg.shape[1]))\r\n \r\n self.intergrate_into_final_mask()",
"def add_mask_layer(self):\n return Masking(mask_value=self.mask_value, input_shape=(self.max_sequence_size, 1))",
"def open(self, config):\n unique_subarray = getrefcount(self._subarray) <= 2\n\n config = config.copy()\n config[\"unique_subarray\"] = unique_subarray\n\n self.config = config\n\n if config.get(\"auxiliary_mask\"):\n self._configure_auxiliary_mask(config[\"auxiliary_mask\"])\n\n self.config[\"extra_memory\"] = self.extra_memory()\n\n self._in_place_changes = True\n self.masked = True\n\n if hasattr(self, \"output\"):\n del self.output\n\n return config",
"def __set_special_config_values(cfg: __Config, config: dict) -> \"__Config\":\n cfg.file_name_plane_masks = lambda i: str(i) + config['file_name_plane_mask_suf']\n cfg.file_name_planercnn_image = lambda i: str(i) + config['file_name_planercnn_image_suf']\n cfg.dir_results = f\"{cfg.edge_detection_type}\" # will be the output folder, create in data dir\n cfg.image_size = tuple(int(x) for x in config['image_size'].split(\" \"))\n return cfg",
"def _buildMaskImage(self,maskname, mask_array):\n # If an old version of the maskfile was present,\n # remove it and rebuild it.\n if fileutil.findFile(maskname):\n fileutil.removeFile(maskname)\n\n _file = pyfits.open(maskname,mode='append')\n _phdu = pyfits.PrimaryHDU(data=mask_array)\n\n _file.append(_phdu)\n _file.close()\n del _file, _phdu",
"def setup_mask(self, d25scale): \n\n logger = logging.getLogger(name=\"ShotSensitivity\")\n \n # see if this is a bad shot\n #print(\"Bad shot from \", self.conf.badshot)\n badshot = loadtxt(self.conf.badshot, dtype=int)\n badtpshots = loadtxt(self.conf.lowtpshots, dtype=int)\n if (self.shotid in badshot) or (self.shotid in badtpshots):\n logger.warn(\"Shot is in bad. Making mask zero everywhere\")\n self.badshot = True\n else:\n self.badshot = False\n \n # set up bad amps\n logger.info(\"Bad amps from {:s}\".format(self.conf.badamp))\n self.bad_amps = Table.read(self.conf.badamp)\n sel_shot = (self.bad_amps[\"shotid\"] == self.shotid)\n self.bad_amps = self.bad_amps[sel_shot]\n \n # set up galaxy mask\n logger.info(\"Galaxy mask from {:s}\".format(self.conf.rc3cat))\n galaxy_cat = Table.read(self.conf.rc3cat, format='ascii')\n gal_coords = SkyCoord(galaxy_cat['Coords'], frame='icrs')\n shot_coords = SkyCoord(ra=self.shot_ra, dec=self.shot_dec,\n unit=\"deg\")\n sel_reg = where(shot_coords.separation(gal_coords) < 1.*u.deg)[0]\n\n self.gal_regions = []\n if len(sel_reg) > 0:\n for idx in sel_reg:\n self.gal_regions.append(create_gal_ellipse(galaxy_cat, \n row_index=idx, \n d25scale=d25scale))\n \n # set up meteor mask\n # check if there are any meteors in the shot:\n logger.info(\"Meteors from {:s}\".format(self.conf.meteor))\n self.met_tab = Table.read(self.conf.meteor, format=\"ascii\")\n self.met_tab = self.met_tab[self.shotid == self.met_tab[\"shotid\"]]",
"def add_mask(self):\n return xr.open_dataset(f'/{test.dlfile_directory}/{test.climate}_mask_{test.mask_str}_dldata_traintest.nc')",
"def apply_mask(self, mask_band=None, mask_val=None):\n pass",
"def prepareMask(self, mask):\n\n # Make sure that the mask has the same\n # number of voxels as the atlas image.\n # Use nearest neighbour interpolation\n # for resampling, as it is most likely\n # that the mask is binary.\n try:\n mask, xform = resample.resample(\n mask, self.shape[:3], dtype=np.float32, order=0)\n\n except ValueError:\n raise MaskError('Mask has wrong number of dimensions')\n\n # TODO allow non-aligned mask - as long as it overlaps\n # in world coordinates, it should be allowed\n if not fslimage.Image(mask, xform=xform).sameSpace(self):\n raise MaskError('Mask is not in the same space as atlas')\n\n return mask",
"def customize_experiment_config(self, config):\n # TODO: use ConfigList from Coach launcher, and share customization code.\n hyperparams_dict = json.loads(os.environ.get(\"SM_HPS\", \"{}\"))\n\n # Set output dir to intermediate\n # TODO: move this to before customer-specified so they can override\n hyperparams_dict[\"rl.training.local_dir\"] = \"/opt/ml/output/intermediate\"\n\n self.hyperparameters = ConfigurationList() # TODO: move to shared\n for name, value in hyperparams_dict.items():\n # self.map_hyperparameter(name, val) #TODO\n if name.startswith(\"rl.\"):\n # self.apply_hyperparameter(name, value) #TODO\n self.hyperparameters.store(name, value)\n # else:\n # raise ValueError(\"Unknown hyperparameter %s\" % name)\n\n self.hyperparameters.apply_subset(config, \"rl.\")\n return config",
"def mask_custom(self, custom_mask):\n\t\t## combine the list and remove the duplicates\n\t\tself.mask = list(set().union(self.mask, custom_mask))\n\n\t\tself.wave = np.delete(self.oriWave, list(self.mask))\n\t\tself.flux = np.delete(self.oriFlux, list(self.mask))\n\t\tself.noise = np.delete(self.oriNoise, list(self.mask))\n\n\t\treturn self",
"def add_extra(self, entry, value):\n\n config_spec = vim.vm.ConfigSpec()\n self.logger.info(\"Adding/Updating extra config: {0} = {1}\".format(entry, value))\n opt = vim.option.OptionValue()\n opt.key = entry\n opt.value = value\n config_spec.extraConfig = [opt]\n return self.vm_obj.ReconfigVM_Task(spec=config_spec)",
"def attention_mask(model, x):\n config = model.config\n input_mask = model.inputs[\"input_mask\"]\n final_mask = model.builder.customOp(opName=\"AttentionMask\",\n opVersion=1,\n domain=\"ai.graphcore\",\n inputs=[input_mask, x],\n attributes={\"dataType\": model.config.popart_dtype})[0]\n final_mask = model.detach(final_mask)\n return final_mask",
"def merge_config(self_config, indict):\n\n self_config.merge(indict)\n patch_config(self_config, indict)",
"def set_measurement_mask(self, program_name, mask_name, begins, lengths) -> Tuple[numpy.ndarray, numpy.ndarray]:",
"def add_config(self, conf_map):\n if self.active.isChecked():\n self.add_feat_conf(conf_map)",
"def add_config(self, config):\n clean=lambda n: n.strip().strip('\"').lower()\n for line in config.split('\\n'):\n items=line.strip().split()\n if items and len(items) >= 3:\n cmd, evt, hnd=items[:3]\n \"\"\" NOTE\n - just 'bind' command expected right now\n - '+' prepended ti the handler means REPEAT (make sense just for keyboard keys actually)\n \"\"\"\n cmd=clean(cmd)\n if cmd in ['bind']:\n evt,hnd=(clean(evt), clean(hnd))\n if not cmd in self.config: self.config[cmd]={}\n repeat=hnd.startswith('+')\n if repeat: hnd=hnd[1:]\n self.config[cmd].update([[evt, [hnd, repeat]]])",
"def _draw_mask_on_image(self, mask):\n mask = self.STANDARD_COLORS_ARRAY[mask]\n cv2.addWeighted(mask,self.config.ALPHA,self.image,1.0,0,self.image)",
"def _update_mask_type(configs, mask_type):\n configs[\"train_input_config\"].mask_type = mask_type\n _update_all_eval_input_configs(configs, \"mask_type\", mask_type)",
"def createMaskDictionary(self):\n try:\n self.maskMap = dict(list(zip(self.inds,list(range(len(self.inds))))))\n self.maskSet = set(self.inds)\n except Exception as error:\n print(\"failed in createMaskDictionary\", error)",
"def add_mask(self, bg, mask):\n # if mask is to tall for the background image, decrease the size by 50%\n if bg.shape[0] < mask.shape[0]:\n mask = cv2.resize(mask, (int(0.5*mask.shape[0]), int(0.5*mask.shape[1])), interpolation=cv2.INTER_AREA)\n h_mask, w_mask = mask.shape[:2]\n h, w = bg.shape[:2]\n \n # select random location for mask\n h_rand = np.random.rand() * 0.9\n h_rand = np.clip(h_rand, 0, 1.0 - h_mask/h)\n h_update = int(h_rand * h)\n w_rand = np.random.rand() * 0.9\n w_rand = np.clip(w_rand, 0, 1.0 - w_mask/w)\n w_update = int(w_rand * w)\n \n # define filter for a mask\n filt = (mask == 0)\n \n # place the mask in the bg img\n mod = bg.copy()\n mod[h_update:h_update+h_mask, w_update:w_update+w_mask, :] *= filt\n mod[h_update:h_update+h_mask, w_update:w_update+w_mask, :] += mask\n \n # yolo dim for mask\n locy = (h_update+h_update+h_mask)/2/h\n locx = (w_update+w_update+w_mask)/2/w\n sizey = (h_mask/h)\n sizex = (w_mask/w)\n \n dim = [locx, locy, sizex, sizey]\n \n return mod, dim",
"def _GetChangesForMask(config_sed_input):\n config_sed = config_sed_input\n config_sed += [(r'WALLTIME_MINUTES=100',\n (r'WALLTIME_MINUTES=100\\n'\n r'export CONT=mlperf-nvidia:object_detection\\n'\n r'export DATADIR=\\/data\\n'\n r'export PKLDIR=\\/data\\/coco2017\\/pkl_coco\\n'\n r'export NEXP=1'))]\n if MASKRCNN_BATCH_SIZE.value:\n config_sed.append(\n (r'BATCHSIZE=.*', fr'BATCHSIZE={MASKRCNN_BATCH_SIZE.value}'))\n return config_sed",
"def apply_mask(self, mask, parameters=None):\n if parameters is None:\n self.dates = self.dates[mask]\n for key in self.data.keys():\n self.data[key] = self.data[key][mask]\n\n self.manufacturer = self.manufacturer[mask]\n self.data_file = self.data_file[mask]\n self.serial_number = self.serial_number[mask]\n else:\n for parameter in parameters:\n self.data[parameter][~mask] = np.nan",
"def add_dimension(self, name, bit_index=None, default=False):\n if not self.flag_masks.get(name) is None:\n raise ValueError(\"the name %s is already in this flag space\" % name)\n bit_nums = list(self.flag_bits.values())\n if bit_index is None:\n bit_index = 0\n #assign the lowest currently unused bit number\n while bit_index in bit_nums:\n bit_index += 1\n if bit_index in bit_nums:\n raise ValueError(\"bit_index %d is already taken\" % bit_index)\n self.flag_bits[name] = bit_index\n self.flag_masks[name] = 2**bit_index\n self.default_dict[name] = default",
"def _configure(self) -> None:\n reg_data = self.configuration\n conf_data = reg_data & ~0xC0 | 0x80\n # check if already in the right configuration, do not re-configure on and on again\n if reg_data != conf_data:\n self.configuration = conf_data"
] | [
"0.5756575",
"0.5562516",
"0.54862016",
"0.5367435",
"0.5347834",
"0.53455114",
"0.5306387",
"0.52775955",
"0.522134",
"0.5194576",
"0.51778084",
"0.5152401",
"0.51056355",
"0.51011837",
"0.50790006",
"0.5067728",
"0.5065551",
"0.4962702",
"0.4946801",
"0.4946075",
"0.49183005",
"0.49159494",
"0.49100894",
"0.49084634",
"0.49076694",
"0.48849526",
"0.48177648",
"0.48108754",
"0.48106894",
"0.47964016"
] | 0.71955097 | 0 |
True if and only if the partition's subarray is in memory as opposed to on disk. | def in_memory(self):
return hasattr(self._subarray, "__array_interface__") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def in_memory(self) -> bool:\n return all(isinstance(x, np.ndarray) for x in self.chunks.values())",
"def extra_memory(self):\n if not self.in_memory:\n # --------------------------------------------------------\n # The subarray is on disk so getting the partition's data\n # array will require extra memory\n # --------------------------------------------------------\n extra_memory = True\n else:\n # --------------------------------------------------------\n # The subarray is already in memory\n # --------------------------------------------------------\n config = self.config\n\n p_part = self.part\n if p_part:\n extra_memory = True\n elif not config[\"unique_subarray\"]:\n extra_memory = True\n else:\n p_data = self._subarray\n\n if not numpy_ma_isMA(p_data):\n # The p_data is not a masked array\n extra_memory = isinstance(p_data.base, numpy_ndarray)\n else:\n # The p_data is a masked array\n memory_overlap = isinstance(\n p_data.data.base, numpy_ndarray\n )\n if not (\n p_data.mask is numpy_ma_nomask\n or not numpy_ma_is_masked(p_data)\n ):\n # There is at least one missing data point\n memory_overlap |= isinstance(\n p_data.mask.base, numpy_ndarray\n )\n\n extra_memory = memory_overlap\n # --- End: if\n\n p_dtype = p_data.dtype\n\n if not extra_memory:\n if config[\"func\"] is not None:\n extra_memory = True\n else:\n p_units = self.Units\n units = config[\"units\"]\n if (\n not p_units.equals(units)\n and bool(p_units) is bool(units)\n and not (\n p_data.flags[\"C_CONTIGUOUS\"]\n and p_dtype.kind == \"f\"\n )\n ):\n extra_memory = True\n\n # ------------------------------------------------------------\n # Extra memory is required if the dtype needs changing\n # ------------------------------------------------------------\n if not extra_memory:\n dtype = config[\"dtype\"]\n if dtype is not None and dtype != p_data.dtype:\n extra_memory = True\n # --- End: if\n\n # ------------------------------------------------------------\n # Amount of extra memory (in bytes) required to access the\n # array\n # ------------------------------------------------------------\n return self.nbytes if extra_memory else 0",
"def on_disk(self):\n return isinstance(self._subarray, FileArray)",
"def in_cached_file(self):\n return isinstance(self._subarray, CachedArray)",
"def has_shareable_memory(a):\r\n return _get_backing_memmap(a) is not None",
"def is_full(self):\n if len(self._page_map) >= self.memory_size:\n return True\n return False",
"def IsAllocated(self):\n return self._fsntfs_file_entry.is_allocated()",
"def is_full(self):\n elements_in_sects = sum(\n map(opr.attrgetter(\"size\"), self.sects.values())\n )\n elements_in_total = fct.reduce(\n opr.mul, type(self).flatten_shape(self.shape), 1\n )\n res = elements_in_sects >= elements_in_total\n return res",
"def is_array(self):\n return len(self.descriptor) > 1",
"def is_full(self) -> bool:\n return self._array[0].all()",
"def pageable(self):\n return maxSRAM(self.mem) <= self.dev.SRAM_PAGE_LEN",
"def is_space_available(partition, size):\n available_space = psutil.disk_usage(partition).free\n return False if available_space < size else True",
"def is_full(self) -> bool:\r\n return self.size == self.capacity",
"def is_full(self):\r\n if self.size == self.capacity:\r\n return True\r\n return False",
"def full(self):\n return self.size >= self.maxsize",
"def isFull(self) -> bool:\n return self._elems == self._k",
"def full(self):\n return self._current_size == self._size",
"def is_full(self):\n return self.heap_size >= self.capacity",
"def isSetSize(self):\n return _libsbml.Compartment_isSetSize(self)",
"def has_next(self):\n while self._row < self._n and not self._arr[self._row]: # current sub-array is empty\n self._row += 1 # move to next sub-array\n self._col = 0\n if self._row >= self._n: # end of master-array already\n return False\n return True",
"def isFull(self):\n\t\treturn self.size == self.capacity",
"def full(self) -> bool:\n return self.maxsize and self.qsize() >= self.maxsize",
"def is_full(self):\n\n return self.count == len(self.array)",
"def has_full_batch(self) -> bool:",
"def is_in_heap(self, address):\n return self.is_address_of_type(address, MemoryType.MajorHeap, MemoryType.MinorHeap)",
"def _is_full(self):\n if self.allocated_spaces == self.capacity:\n return True\n elif self.allocated_spaces < self.capacity:\n return False",
"def is_free(self):\n return self._size > 0",
"def is_partition(disk): #TODO: Could change to use \"Whole\" attrib. Good idea?\n\n return \"s\" in disk.split(\"disk\")[1]",
"def full(self):\r\n if self._maxsize <= 0:\r\n return False\r\n else:\r\n return self.qsize() >= self._maxsize",
"def check_free(self, arr):\n cell_location = self.cartesian_to_cell(arr)\n cell = self.occ_matrix[cell_location[0], cell_location[1]]\n return cell == 0"
] | [
"0.7677807",
"0.75874203",
"0.73676527",
"0.6567502",
"0.6373806",
"0.62520576",
"0.6168975",
"0.6139325",
"0.61339194",
"0.6126576",
"0.611511",
"0.608777",
"0.6025861",
"0.6025105",
"0.60162103",
"0.5953649",
"0.59421575",
"0.59196216",
"0.5899218",
"0.5882104",
"0.5880964",
"0.5842848",
"0.584197",
"0.583599",
"0.5819761",
"0.5816238",
"0.5800492",
"0.57884943",
"0.57869345",
"0.5768234"
] | 0.7635218 | 1 |
True if and only if the partition's subarray is on disk as opposed to in memory. | def on_disk(self):
return isinstance(self._subarray, FileArray) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def in_memory(self):\n return hasattr(self._subarray, \"__array_interface__\")",
"def in_memory(self) -> bool:\n return all(isinstance(x, np.ndarray) for x in self.chunks.values())",
"def extra_memory(self):\n if not self.in_memory:\n # --------------------------------------------------------\n # The subarray is on disk so getting the partition's data\n # array will require extra memory\n # --------------------------------------------------------\n extra_memory = True\n else:\n # --------------------------------------------------------\n # The subarray is already in memory\n # --------------------------------------------------------\n config = self.config\n\n p_part = self.part\n if p_part:\n extra_memory = True\n elif not config[\"unique_subarray\"]:\n extra_memory = True\n else:\n p_data = self._subarray\n\n if not numpy_ma_isMA(p_data):\n # The p_data is not a masked array\n extra_memory = isinstance(p_data.base, numpy_ndarray)\n else:\n # The p_data is a masked array\n memory_overlap = isinstance(\n p_data.data.base, numpy_ndarray\n )\n if not (\n p_data.mask is numpy_ma_nomask\n or not numpy_ma_is_masked(p_data)\n ):\n # There is at least one missing data point\n memory_overlap |= isinstance(\n p_data.mask.base, numpy_ndarray\n )\n\n extra_memory = memory_overlap\n # --- End: if\n\n p_dtype = p_data.dtype\n\n if not extra_memory:\n if config[\"func\"] is not None:\n extra_memory = True\n else:\n p_units = self.Units\n units = config[\"units\"]\n if (\n not p_units.equals(units)\n and bool(p_units) is bool(units)\n and not (\n p_data.flags[\"C_CONTIGUOUS\"]\n and p_dtype.kind == \"f\"\n )\n ):\n extra_memory = True\n\n # ------------------------------------------------------------\n # Extra memory is required if the dtype needs changing\n # ------------------------------------------------------------\n if not extra_memory:\n dtype = config[\"dtype\"]\n if dtype is not None and dtype != p_data.dtype:\n extra_memory = True\n # --- End: if\n\n # ------------------------------------------------------------\n # Amount of extra memory (in bytes) required to access the\n # array\n # ------------------------------------------------------------\n return self.nbytes if extra_memory else 0",
"def is_partition(disk): #TODO: Could change to use \"Whole\" attrib. Good idea?\n\n return \"s\" in disk.split(\"disk\")[1]",
"def in_cached_file(self):\n return isinstance(self._subarray, CachedArray)",
"def has_subfile(self) -> bool:\n\t\tself._update_subfiles()\n\t\treturn bool(len(self.subfiles))",
"def _is_partitioned(self):\n ## check if the table are partitioned, need the split because of a change in the type of partitions in pydantic\n partitions = self.table_config[\"partitions\"]\n if partitions is None or len(partitions) == 0:\n return False\n\n if isinstance(partitions, list):\n # check if any None inside list.\n # False if it is the case Ex: [None, 'partition']\n # True otherwise Ex: ['partition1', 'partition2']\n return all(item is not None for item in partitions)\n\n raise ValueError(\"Partitions must be a list or None\")",
"def IsAllocated(self):\n return self._fsntfs_file_entry.is_allocated()",
"def isleaf(self):\n no_kids = super(PartitionDevice, self).isleaf\n # it is possible that the disk that originally contained this partition\n # no longer contains a disklabel, in which case we can assume that this\n # device is a leaf\n if self.disk and self.partedPartition and \\\n self.disk.format.type == \"disklabel\" and \\\n self.partedPartition in self.disk.format.partitions:\n disklabel = self.disk.format\n else:\n disklabel = None\n\n extended_has_logical = (self.isExtended and\n (disklabel and disklabel.logicalPartitions))\n return (no_kids and not extended_has_logical)",
"def is_array(self):\n return len(self.descriptor) > 1",
"def is_space_available(partition, size):\n available_space = psutil.disk_usage(partition).free\n return False if available_space < size else True",
"def is_full(self) -> bool:\n return self._array[0].all()",
"def in_file(self):\n return self.on_disk and not self.in_cached_file",
"def is_partition_the_last(dbapi, partition):\n idisk_uuid = partition.get('idisk_uuid')\n onidisk_parts = dbapi.partition_get_by_idisk(idisk_uuid)\n part_number = get_part_number(partition.get('device_path'))\n\n if int(part_number) != len(onidisk_parts):\n return False\n\n return True",
"def _are_features_already_extracted(self, output_path: str, subset: str) -> bool:\n file_path = join(output_path, subset + '.npy')\n return os.path.exists(file_path)",
"def _is_size_bound(self, path):\n return path.suffix == \".bin\"",
"def _array_name_implies_ND_slice(self, array_name):\n for v in self._split_arrays.values():\n if array_name in v:\n return True\n\n generic_match = re.findall(\"^(.+)_[xyz]$\", array_name)\n loadable_keys = self.loadable_keys()\n keys = list(self.keys())\n if len(generic_match) == 1 and generic_match[0] not in self._split_arrays:\n return generic_match[0] in loadable_keys or generic_match[0] in keys\n return False",
"def has_shareable_memory(a):\r\n return _get_backing_memmap(a) is not None",
"def is_partition(dev):\n dev = os.path.realpath(dev)\n if not stat.S_ISBLK(os.lstat(dev).st_mode):\n raise Error('not a block device', dev)\n\n name = get_dev_name(dev)\n if os.path.exists(os.path.join('/sys/block', name)):\n return False\n\n # make sure it is a partition of something else\n for basename in os.listdir('/sys/block'):\n if os.path.exists(os.path.join('/sys/block', basename, name)):\n return True\n\n raise Error('not a disk or partition', dev)",
"def has_full_batch(self) -> bool:",
"def is_part_of_disk(part_device_path, disk_device_path):\n is_part_of_disk = False\n\n if disk_device_path in part_device_path:\n is_part_of_disk = True\n elif constants.DEVICE_NAME_MPATH in disk_device_path:\n path_split = disk_device_path.split(constants.DEVICE_NAME_MPATH)\n if (path_split[0] in part_device_path and\n path_split[1] in part_device_path):\n is_part_of_disk = True\n\n return is_part_of_disk",
"def has_next(self):\n while self._row < self._n and not self._arr[self._row]: # current sub-array is empty\n self._row += 1 # move to next sub-array\n self._col = 0\n if self._row >= self._n: # end of master-array already\n return False\n return True",
"def exist_partition(self, partition_spec):\n return partition_spec in self.partitions",
"def isFull(self) -> bool:\n return self._elems == self._k",
"def _loaded_data(self):\n try:\n dsize = [int(d) for d\n in self.run('fits size', via='get').split()]\n except (ValueError, TypeError, AttributeError) as err:\n log.debug(f' FITS size error: {err}')\n return False\n else:\n if 0 in dsize:\n return False\n else:\n return True",
"def __check_flat_array__(self):\n if self.flat_array is not None:\n return True\n else:\n return False",
"def is_slice(self) -> bool:\n return self._is_slice",
"def contains_offset(self, offset):\n return (offset >= self.offset) and (offset < self.offset + self.filesize)",
"def is_mounted(device):\n\n partitions = psutil.disk_partitions()\n device_path = \"/dev/\" + device\n for i in partitions:\n if i.device == device_path:\n return True\n return False",
"def full(self):\n return self.size >= self.maxsize"
] | [
"0.69458973",
"0.67629313",
"0.6696292",
"0.656141",
"0.63517463",
"0.6290223",
"0.61257756",
"0.59961635",
"0.59486187",
"0.59276515",
"0.59175736",
"0.5881189",
"0.5803704",
"0.577553",
"0.5774722",
"0.5755425",
"0.5737828",
"0.57311875",
"0.5723483",
"0.5720749",
"0.5682879",
"0.5678204",
"0.56532705",
"0.5638273",
"0.56376",
"0.5626054",
"0.5618182",
"0.5615703",
"0.56024516",
"0.5587035"
] | 0.80375713 | 0 |
The partition's subarray of data. | def subarray(self):
return self._subarray | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def subarray(self) -> Subarray:\n return Subarray.from_pybind11(self._ctx, self._subarray)",
"def partition(self, sep):\n return asarray(partition(self, sep))",
"def array(self) -> ndarray:\n if self._slices: # so this is a sub-parray object\n # index into origin array by saved slices\n ret = self._array.get_by_global_slices(self._current_device_index, self._slices[0])\n for s in self._slices[1:]:\n ret = ret[s]\n return ret\n else: # this is a complete copy\n ret = self._array.get(self._current_device_index)\n\n if isinstance(ret, list): # get a subarray instead\n raise IndexError(\"Current device doesn't have a complete copy of this array\")\n return ret",
"def array(self):\n config = self.config\n\n unique_array = config[\"unique_subarray\"]\n\n p_axes = self.axes\n p_flip = self.flip\n p_part = self.part\n p_units = self.Units\n p_shape = self.shape\n p_location = self.location\n subarray = self._subarray\n\n len_p_axes = len(p_axes)\n\n if not self.in_memory:\n # --------------------------------------------------------\n # The subarray is not in memory.\n #\n # It could be in a file on disk or implied by a FileArray\n # object, etc.\n # --------------------------------------------------------\n self._original = self.copy()\n\n unique_array = True\n update = True\n copy = False\n\n if not p_part:\n indices = Ellipsis\n else:\n indices = tuple(p_part)\n\n # Read from a file into a numpy array\n p_data = subarray[indices]\n\n # We've just copied p_data from disk, so in place changes\n # are not possible\n in_place_changes = False\n else:\n # --------------------------------------------------------\n # The subarray is in memory\n # --------------------------------------------------------\n update = config[\"update\"]\n\n if p_part:\n p_data = get_subspace(subarray, p_part)\n elif not unique_array:\n p_data = subarray.view()\n else:\n p_data = subarray\n\n copy = config[\"extra_memory\"]\n\n # In place changes to p_data might be possible if we're not\n # copying the data\n in_place_changes = not copy\n\n if not p_data.ndim and isinstance(p_data, (numpy_number, numpy_bool_)):\n # --------------------------------------------------------\n # p_data is a numpy number (like numpy.int64) which does\n # not support assignment, so convert it to a numpy array.\n # --------------------------------------------------------\n p_data = numpy_array(p_data)\n # We've just copied p_data, so in place changes are\n # not possible\n copy = False\n in_place_changes = False\n\n masked = numpy_ma_isMA(p_data)\n if masked:\n # The p_data is a masked array\n if p_data.mask is numpy_ma_nomask or not numpy_ma_is_masked(\n p_data\n ):\n # There are no missing data points so recast as an\n # unmasked numpy array\n p_data = p_data.data\n masked = False\n # --- End: if\n\n if masked:\n # Set the hardness of the mask\n if config[\"hardmask\"]:\n p_data.harden_mask()\n else:\n p_data.soften_mask()\n # --- End: if\n\n self.masked = masked\n\n # ------------------------------------------------------------\n # Make sure that the data array has the correct units. This\n # process will deep copy the data array if required (e.g. if\n # another partition is referencing this numpy array), even if\n # the units are already correct.\n # ------------------------------------------------------------\n func = config.get(\"func\")\n units = config[\"units\"]\n if func is None:\n if not p_units.equals(units) and bool(p_units) is bool(units):\n func = Units.conform\n\n if func is not None:\n inplace = not copy\n p_data = func(p_data, p_units, units, inplace)\n p_units = units\n\n if not inplace:\n # We've just copied p_data, so in place changes are\n # not possible\n copy = False\n in_place_changes = False\n # --- End: if\n\n flip = config.get(\"flip\", None)\n if flip or p_flip:\n flip_axes = set(p_flip).symmetric_difference(flip)\n else:\n flip_axes = None\n\n axes = config[\"axes\"]\n\n if p_data.size > 1:\n # --------------------------------------------------------\n # Flip axes\n # --------------------------------------------------------\n if flip_axes:\n indices = [\n (\n slice(None, None, -1)\n if axis in flip_axes\n else slice(None)\n )\n for axis in p_axes\n ]\n p_data = p_data[tuple(indices)]\n\n # --------------------------------------------------------\n # Transpose axes\n # --------------------------------------------------------\n if p_axes != axes:\n iaxes = [p_axes.index(axis) for axis in axes if axis in p_axes]\n\n if len_p_axes > len(iaxes):\n for i in range(len_p_axes):\n if i not in iaxes:\n # iaxes.append(i)\n iaxes.insert(i, i)\n # --- End: if\n\n p_data = numpy_transpose(p_data, iaxes)\n # --- End: if\n\n # ------------------------------------------------------------\n # Remove excessive/insert missing size 1 axes\n # ------------------------------------------------------------\n if p_shape != p_data.shape:\n # if len_p_axes != len(p_shape):\n p_data = p_data.reshape(p_shape)\n\n # ------------------------------------------------------------\n # Apply the auxiliary mask\n # ------------------------------------------------------------\n auxiliary_mask = config[\"auxiliary_mask\"]\n if auxiliary_mask:\n for mask in auxiliary_mask:\n if mask.any():\n if not masked:\n p_data = p_data.view(numpy_ma_MaskedArray)\n masked = True\n\n p_data.mask = (mask | p_data.mask).array\n # --- End: for\n\n self.masked = True\n\n # ------------------------------------------------------------\n # Convert the array's data type\n # ------------------------------------------------------------\n p_dtype = p_data.dtype\n dtype = config.get(\"dtype\", None)\n if dtype is not None and dtype != p_dtype:\n try:\n p_data = p_data.astype(dtype) # Note: returns a copy\n except ValueError:\n raise ValueError(\n \"Can't recast partition array from {} to {}\".format(\n p_dtype.name, dtype.name\n )\n )\n else:\n # We've just copied p_data, so in place changes are\n # not possible\n copy = False\n in_place_changes = False\n # --- End: if\n\n # ------------------------------------------------------------\n # Copy the array\n # -----------------------------------------------------------\n if copy:\n if p_dtype.char != \"O\":\n if not masked or p_data.ndim > 0:\n p_data = p_data.copy()\n else:\n # This is because numpy.ma.copy doesn't work for\n # scalar arrays (at the moment, at least)\n p_data = numpy_ma_masked_all((), p_data.dtype)\n\n # We've just copied p_data, so in place changes are\n # not possible\n in_place_changes = False\n else:\n # whilst netCDF4.netcdftime.datetime is mucking bout,\n # don't copy!!!!\n # p_data = _copy(p_data)\n pass\n # --- End: if\n\n # ------------------------------------------------------------\n # Update the partition\n # ------------------------------------------------------------\n if update:\n self.subarray = p_data # ?? DCH CHECK\n self.Units = p_units\n self.part = []\n self.axes = axes\n self.flip = flip\n self.flatten = []\n self.shape = p_shape\n self.location = p_location\n\n self._in_place_changes = in_place_changes\n\n # ------------------------------------------------------------\n # Return the numpy array\n # ------------------------------------------------------------\n return p_data",
"def partitionData(data, labels, partition):\n\treturn [s[partition] for s in data], labels[partition]",
"def GetPartitioningArray(self):\n return _hypre.HypreParVector_GetPartitioningArray(self)",
"def as_slice(self):\n # slice for accessing arrays of values\n return slice(self._lo_atom, self._lo_atom + self._n_atoms)",
"def partition(data, indecies):\n\tsplitdata = [data[:indecies[0]]]\n\tsplitdata += [data[indecies[i-1]:indecies[i]] for i in range(1,len(indecies))]\n\tsplitdata.append(data[indecies[-1]:])\n\treturn splitdata",
"def get_partions(self) -> Union[ndarray, Tuple[ndarray, ndarray]]:\n if self.fragmented:\n return (self[self._begin:], self[:self._end])\n else:\n return self[self._begin:self._end]",
"def subset(arr, start, end):\n return [[row_data for row_data in row[start[1]:end[1]]] for row in arr[start[0]:end[0]]]",
"def rpartition(self, sep):\n return asarray(rpartition(self, sep))",
"def subset(self, data, subset_size):\n subset_size_q = int((subset_size - 1) / 2)\n subset_image = []\n\n for i in range(-subset_size_q, subset_size_q + 1):\n for j in range(-subset_size_q, subset_size_q + 1):\n subset_roll = np.roll(data, i, axis=0)\n subset_roll = np.roll(subset_roll, j, axis=1)\n subset_image.append(subset_roll)\n\n return np.sum(np.asarray(subset_image), axis=0)",
"def __getslice__(self, i, j):\n return self.dtrs[i:j]",
"def __getslice__(self,i,j):\n return self.x[i:j]",
"def _fprop_slice_np(h, stride, H, roi_offset):\n hstart = int(np.floor(float(h) * stride))\n hend = int(np.ceil(float(h + 1) * stride))\n\n hstart = min(max(hstart + roi_offset, 0), H)\n hend = min(max(hend + roi_offset, 0), H)\n\n return slice(hstart, hend), hend - hstart",
"def subdataset(self):\n return self._clip_metadata.get(\"subdataset\")",
"def getData(self, slice=None):\n\t\traise NotImplementedError",
"def slice(data, size):\n\treturn dice(data, size).T",
"def __getslice__( self, *args):\n return array.array.__getslice__(self, *args).tostring()",
"def get_subset(self, tile, band=0):\r\n # access window bounds\r\n bounds = rasterio.windows.bounds(tile, self.dataset.transform)\r\n return (\r\n self.__arr[(band,) + tile.toslices()],\r\n bounds,\r\n ) # Shape of array is announced with (bands, height, width)\r",
"def row_slice(self, xt, nproc):\n if nproc is None: nproc = self.nproc\n cs = xt.shape[0]//nproc #chuncksize\n tmp = [xt[i*cs:cs*i+cs,:] for i in range(nproc)]\n if nproc*cs != xt.shape[0]:\n tmp[-1] = np.concatenate((tmp[-1],xt[nproc*cs:xt.shape[0],:]),axis=0)\n return tmp",
"def slices(self):\n return self._slices",
"def _slice(self, start, stop, step=None):\n\n slices = [slice(None)] * self.data.ndim\n slices[self.axis] = slice(start, stop, step)\n return tuple(slices)",
"def __getslice__(self, start, stop):\n return self.__getitem__(slice(start, stop, None))",
"def get_part_array(array, part, additional_area, offset={'x': 0, 'y': 0}):\n\tresult = []\n\n\toffset_before = {\n\t\t'x': offset['x'] - additional_area['x'],\n\t\t'y': offset['y'] - additional_area['y']\n\t}\n\tif offset_before['x'] < 0:\n\t\tprint('set x')\n\t\toffset_before['x'] = 0\n\tif offset_before['y'] < 0:\n\t\tprint('set y')\n\t\toffset_before['y'] = 0\n\n\tfor i in array[offset_before['y'] : offset['y']+part['y']+additional_area['y']]:\n\t\tresult.append(i[offset_before['x'] : offset['x']+part['x']+additional_area['x']])\n\treturn np.array(result)",
"def subimage(image_as_array, step):\r\n\tsubimage_2d_array = image_as_array[200-int(step):200+int(step)]\r\n\treturn subimage_2d_array",
"def _extract_data_sub_stack(self, startRow, endRow):\n # Grab the shape of the image stack\n nz, ny, nx = self.shape\n\n # Compute the number of rows in this sub stack\n numberOfRows = endRow - startRow\n\n # Build an array for storing output\n outData = np.zeros((nz, numberOfRows, nx))\n\n # Loop through each image and extract its data\n for zInd, img in enumerate(self.imageList):\n outData[zInd, :, :] = img.data[startRow:endRow, :]\n\n return np.ma.array(outData)",
"def get_slice(self):\n return self.locs[tuple(self.indices), :]",
"def __getslice__(self, i, j):\n return self.__getitem__(slice(i,j))",
"def getPartition(self):\n\t\treturn self.partition"
] | [
"0.68206084",
"0.6535615",
"0.6457086",
"0.6347438",
"0.62131214",
"0.61877143",
"0.61062545",
"0.6101903",
"0.60762966",
"0.6068932",
"0.6029079",
"0.6012183",
"0.59961045",
"0.59653914",
"0.5928066",
"0.58841175",
"0.58761495",
"0.58514863",
"0.5848823",
"0.5791028",
"0.5779958",
"0.57720095",
"0.5763268",
"0.5713823",
"0.5713595",
"0.56957734",
"0.5685574",
"0.5683506",
"0.56661355",
"0.5665764"
] | 0.7297865 | 0 |
Change the axis names. The axis names are arbitrary, so mapping them to another arbitrary collection does not change the data array values, units, nor axis order. | def change_axis_names(self, axis_map):
axes = self.axes
# Partition axes
self.axes = [axis_map[axis] for axis in axes]
# Flipped axes
flip = self.flip
if flip:
self.flip = [axis_map[axis] for axis in flip] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setAxisName(name, axes='XYZ'):\n dislin.name(name, axes)",
"def setAxesNames(self):\n \n labels = ['T', 'Z', 'Y', 'X'] + [chr(ord('S')-i) for i in xrange(18)]\n if (len(self.axisList) >= 4):\n i = 0\n else:\n i = 4 - len(self.axisList)\n \n for axis in self.axisList:\n self.axesNames.append(labels[i] + ' - ' + axis.id)\n i += 1",
"def set_index_names(self, names, axis=0):\n self.get_axis(axis).names = names",
"def axesNames(self, data, info):\n return []",
"def setaxesnames(self):\n if not self._axesnames or self.prop['skipsai']:\n return\n debug('ControllerStartup.setaxesnames()')\n oldaxes = self.pidevice.qSAI_ALL()\n for i, newaxis in enumerate(self.axesnames):\n if newaxis != oldaxes[i] or self.prop['forcesai']:\n setstage = False\n if self.pidevice.HasqCST():\n if self.pidevice.qCST()[oldaxes[i]] == 'NOSTAGE':\n try:\n debug('try rename NOSTAGE to TEMP (0x3C)')\n self.pidevice.SPA(oldaxes[i], 0x3c, 'TEMP')\n setstage = True\n except GCSError:\n pass\n self.pidevice.SAI(oldaxes[i], newaxis)\n if setstage:\n self.pidevice.SPA(newaxis, 0x3c, 'NOSTAGE')\n debug('restore NOSTAGE (0x3C)')",
"def _default_axis_names(n_dims):\n _DEFAULT_NAMES = (\"z\", \"y\", \"x\")\n return _DEFAULT_NAMES[-n_dims:]",
"def customAxisNames(self):\n return []",
"def setAllAxisUnits(self,units): \n self.__axis_units__ = units",
"def setAxisUnits(self, dim, units): \n try:\n self.__axis_units__[dim] = units\n except IndexError:\n self.__axis_units__.append(units)",
"def axesnames(self, axesnames):\n if axesnames is None:\n self._axesnames = None\n else:\n assert isinstance(axesnames, list), 'axesnames must be list'\n self._axesnames = axesnames\n debug('ControllerStartup.axesnames = %s', itemstostr(self._axesnames))",
"def set_index_name(self, name, axis=0):\n self.get_axis(axis).name = name",
"def setAllAxisLabels(self, labels):\n self.__axis_labels__ = labels",
"def _update_axes(self):\n data_shape = self.data.shape\n if len(self.axes) < self.data.ndim + 1:\n self._axes.append(Axis())\n for index in range(self.data.ndim):\n if len(self.axes[index].values) != data_shape[index]:\n self.axes[index].values = np.arange(data_shape[index],\n dtype=np.float64)",
"def process_custom_axes(axis_names):\n return axis_names.strip().strip(\"'\").strip('\"').split(',')",
"def axis_name(self):\n return self._axis_name",
"def setAxisNameColor(idx=-1, axes='XYZ'):\n dislin.axclrs(idx, 'Name', axes)",
"def setAxisNameJustification(jus, axes='XYZ'):\n dislin.namjus(justdict[jus],axes)",
"def axesnames(self):\n return self._axesnames",
"def _handle_setup_axis(self, axis_args):\n axis_name = axis_args['name']\n axes_dict = self.server.axes\n\n if axis_name not in [name for name, _ in axes_dict.items()]:\n print \"Adding a new axis:\", axis_name\n axis_count = len(axes_dict)\n newaxis = self.server.figure.add_subplot(axis_count+1, 1, axis_count+1)\n axes_dict[axis_name] = newaxis\n axes_dict[axis_name].grid(True)\n axes_dict[axis_name].set_xlabel(axis_args['x_label'])\n axes_dict[axis_name].set_ylabel(axis_args['y_label'])\n # TODO: support *.set_title(\"Title\")\n if FLAGS.logy:\n axes_dict[axis_name].set_yscale('log', nonposy='clip')\n\n if axis_count != 0:\n # Resize other axes if the above wasn't the first.\n axis_count = len(axes_dict)\n for row,(name, _) in enumerate(axes_dict.items(), 1):\n print name, axis_count, row\n axes_dict[name].change_geometry(axis_count, 1, row)",
"def setAxisLabel(self, dim, label): \n try:\n self.__axis_labels__[dim] = label\n except IndexError:\n self.__axis_labels__.append(label)",
"def convert_axis( mv, axisold, axisindnew ):\n (axisnew, indexina3) = axisindnew\n axes = allAxes(mv)\n kold = None\n for k in range(len(axes)):\n if axes[k]==axisold: kold=k\n if kold==None:\n print \"ERROR. convert_axis cannot find axis\",axisold,\" in variable\",mv\n if len(axisold)==len(axisnew):\n mv.setAxis( kold, axisnew )\n return\n # Here's what we would do in 1-D:\n # newdata = ma.ones(len(axisnew))*mv.missing_value # Note that a FileVariable's missing_value is a tuple.\n # for i in range(len(axisold)):\n # newdata[ indexina3[i] ] = ma[i]\n # newmv = cdms2.createVariable( newdata, id=mv.id )\n # >1-D is the same idea, but more dimensions are coming along for the ride,\n # making it more complicated...\n shape0 = mv.shape\n shape0[kold] = len(axisnew)\n newdata = ma.ones(shape0)*mv.missing_value # Note that a FileVariable's missing_value is a tuple.\n # We want to copy ma to newdata - except that we need indirect indexing for the kold-th axis.\n # There seems to be nothing in numpy for treating one axis differently from the rest\n # (except for ellipsis, but it makes sense to use only one ellipsis and we would need two here).\n # The following will do the job. It would be very slow for an array with many big dimensions,\n # but the arrays here have already been reduced for graphics; the index sets will be small or\n # empty...\n ranges = map( range, shape0[0:kold] )\n for i in range(len(axisold)):\n for idx in apply(itertools.product,ranges):\n idx = idx + [indexina3(i)] + [Ellipsis]\n idxo = idx + [i] + [Ellipsis]\n newdata[ tuple(idx) ] = mv[idxo]\n newmv = cdms2.createVariable( newdata, id=mv.id )",
"def swapaxes(self, a1, a2):\n an = self.axes_names[:]\n ia1, ia2 = self.get_axis_id(a1), self.get_axis_id(a2)\n an[ia2], an[ia1] = an[ia1], an[ia2]\n return xndarray(np.swapaxes(self.data, ia1, ia2), an, self.axes_domains,\n self.value_label, self.meta_data)",
"def set_label_names(self, x: Union[np.ndarray, Dict[int, str]]) -> None:\n if isinstance(x, np.ndarray):\n label_names = x\n elif isinstance(x, dict):\n label_names = np.full(max(x.keys()) + 1, \"\", dtype=\"object\")\n label_names[list(x.keys())] = list(x.values())\n else:\n raise ValueError(f\"Unsupported {type(x)=}\")\n self._label_names_array = label_names",
"def _set_axis(axis):\n\n def axis_setter(self, labels):\n new_qc = DataFrameDefault.register(pandas.DataFrame.set_axis)(\n self, axis=axis, labels=labels\n )\n self.__dict__.update(new_qc.__dict__)\n\n return axis_setter",
"def _update_axislabels(self, x='x', **kwargs):\n if x not in 'xy':\n return\n # Update label on this axes\n axis = getattr(self, x + 'axis')\n axis.label.update(kwargs)\n kwargs.pop('color', None)\n\n # Defer to parent (main) axes if possible, then get the axes\n # shared by that parent\n ax = self._panel_parent or self\n ax = getattr(ax, '_share' + x) or ax\n\n # Apply to spanning axes and their panels\n axs = [ax]\n if getattr(ax.figure, '_span' + x):\n s = axis.get_label_position()[0]\n if s in 'lb':\n axs = ax._get_side_axes(s)\n for ax in axs:\n getattr(ax, x + 'axis').label.update(kwargs) # apply to main axes\n pax = getattr(ax, '_share' + x)\n if pax is not None: # apply to panel?\n getattr(pax, x + 'axis').label.update(kwargs)",
"def setAxisNameDistance(dist,axes='XYZ'):\n dislin.namdis(dist, axes)",
"def setIndexNames(self):\n self.xi = self.i1\n self.yi = self.i2",
"def set_axis_label(self, label, axis):\n if axis == 'x':\n self.axplot.set_xlabel(label)\n elif axis == 'y':\n self.axplot.set_ylabel(label)\n else:\n errmsg = 'Valid axis names are x and y.'\n raise ValueError(errmsg)",
"def _set_plot_axes_labels(self, data, viewer_id):\n viewer = self._viewer_by_id(viewer_id)\n\n # Get the units of the data to be loaded.\n spectral_axis_unit_type = data.spectral_axis.unit.physical_type.title()\n flux_unit_type = data.flux.unit.physical_type.title()\n\n if data.spectral_axis.unit.is_equivalent(u.m):\n spectral_axis_unit_type = \"Wavelength\"\n elif data.spectral_axis.unit.is_equivalent(u.pixel):\n spectral_axis_unit_type = \"pixel\"\n\n viewer.figure.axes[0].label = f\"{spectral_axis_unit_type} [{data.spectral_axis.unit.to_string()}]\"\n viewer.figure.axes[1].label = f\"{flux_unit_type} [{data.flux.unit.to_string()}]\"\n\n # Make it so y axis label is not covering tick numbers.\n viewer.figure.axes[1].label_offset = \"-50\"",
"def setnames(self, *args, **kwargs):\n return _coordsys.coordsys_setnames(self, *args, **kwargs)"
] | [
"0.7356323",
"0.7326029",
"0.6893155",
"0.6677015",
"0.64332557",
"0.6372005",
"0.63517636",
"0.62905",
"0.628417",
"0.62775946",
"0.6271645",
"0.6077482",
"0.6047013",
"0.6024936",
"0.5993588",
"0.59712166",
"0.5967535",
"0.596201",
"0.5952742",
"0.58923167",
"0.589169",
"0.5884654",
"0.578875",
"0.57817596",
"0.5780179",
"0.5779107",
"0.57761216",
"0.57437253",
"0.5734455",
"0.5707204"
] | 0.75589085 | 0 |
Close the partition after it has been conformed. The partition should usually be closed after its `array` method has been called to prevent memory leaks. Closing the partition does one of the following, depending on the values of the partition's `!_original` attribute and on the | def close(self, **kwargs):
config = getattr(self, "config", None)
if config is None:
return
if kwargs:
config.update(kwargs)
original = getattr(self, "_original", None)
logger.partitioning("Partition.close: original = {}".format(original))
if not original:
originally_on_disk = False
original_subarray = None
else:
originally_on_disk = not original.in_memory
original_subarray = original._subarray
config = self.config
logger.partitioning(" config = {}".format(config))
if config["serial"]:
# --------------------------------------------------------
# SERIAL
# --------------------------------------------------------
logger.partitioning(" serial")
if config["readonly"]:
logger.partitioning(" readonly=True")
if originally_on_disk:
logger.partitioning(" subarray originally on disk")
if config.get("to_disk", False):
# 1.1.1.1 The original subarray was on disk,
# we don't want to keep the current
# subarray in memory, and we are happy
# to discard any changes that may have
# been made to the subarray.
logger.partitioning(" 1.1.1.1 revert")
self.revert()
elif free_memory() <= cf_fm_threshold():
# 1.1.1.2 The original subarray was on disk,
# we are happy to keep the current
# subarray in memory, but there is not
# enough free memory to do so.
logger.partitioning(
" 1.1.1.2 revert ({} <= {})".format(
free_memory(), cf_fm_threshold()
)
)
self.revert()
else:
# 1.1.1.3 The original subarray was on disk
# and there is enough memory to keep
# the current subarray in memory
if config["unique_subarray"] and isinstance(
original_subarray, CachedArray
):
# The original subarray was a temporary
# file which is not referenced by any
# other partitions
_remove_temporary_files(
original_subarray._partition_file
)
del self.masked
logger.partitioning(
" 1.1.1.3 del masked ({} > {})".format(
free_memory(), cf_fm_threshold()
)
)
else:
logger.partitioning(" subarray originally in memory")
if config.get("to_disk", False):
# 1.1.2.1 Original subarray was in memory and
# we don't want to keep the current
# subarray in memory
logger.partitioning(" 1.1.2.1 to_disk")
self.to_disk(reopen=False)
elif free_memory() <= cf_fm_threshold():
# 1.1.2.2 Original subarray was in memory and
# unique but there is not enough
# memory to keep the current subarray
logger.partitioning(" 1.1.2.2 to_disk")
self.to_disk(reopen=False)
else:
# 1.1.2.3 Original subarray was in memory and
# unique and there is enough memory to
# keep the current subarray in memory
logger.partitioning(" 1.1.2.3 pass")
pass
else:
# config['readonly'] is False
if originally_on_disk:
if config.get("to_disk", False):
# 1.2.1.1 Original subarray was on disk and
# there and we don't want to keep the
# array
if config["unique_subarray"] and isinstance(
original_subarray, CachedArray
):
# Original subarray was a temporary file
# on disk which is not referenced by any
# other partitions
_remove_temporary_files(
original_subarray._partition_file
)
logger.partitioning(" 1.2.1.1 to_disk")
self.to_disk(reopen=False)
elif free_memory() <= cf_fm_threshold():
# 1.2.1.2 Original subarray was on disk but
# there is not enough memory to keep
# it
if config["unique_subarray"] and isinstance(
original_subarray, CachedArray
):
# Original subarray was a temporary file
# on disk which is not referenced by any
# other partitions
_remove_temporary_files(
original_subarray._partition_file
)
logger.partitioning(" 1.2.1.2 to_disk")
self.to_disk(reopen=False)
else:
# 1.2.1.3 Original subarray was on disk and
# there is enough memory to keep it
logger.partitioning(" 1.2.1.3 pass")
del self.masked
else:
if config.get("to_disk", False):
# 1.2.2.1 Original subarray was in memory but
# we don't want to keep it
logger.partitioning(" 1.2.2.1 to_disk")
self.to_disk(reopen=False)
elif free_memory() <= cf_fm_threshold():
# 1.2.2.2 Original subarray was an in memory
# but there is not enough memory to
# keep it
logger.partitioning(" 1.2.2.2 to_disk")
self.to_disk(reopen=False)
else:
# 1.2.2.3 Original subarray was in memory and
# there is enough memory to keep it
logger.partitioning(" 1.2.2.3 del masked")
del self.masked
else:
logger.partitioning("Partition.close: parallel")
# --------------------------------------------------------
# PARALLEL
# --------------------------------------------------------
pass
# if hasattr(self, '_original'):
# del self._original
# print(hasattr(self, 'config')),
try:
del self.config
except AttributeError:
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def close(self):\n return self.close_array",
"def file_close(self):\n if self.on_disk:\n self._subarray.close()",
"def close(self):\n self.ix.close()",
"def close (self):\n pass\n #TODO: implement more realistic closing semantics",
"def close(self):\n self.data.close()",
"def close(self):\n self.drill = None",
"def _close( self ):\n for sji in self._sji_data:\n sji.close()",
"def close(self) -> None:",
"def close(self) -> None:",
"def close(self) -> None:",
"def close(self) -> None:",
"def close(self) -> None:",
"def close(self) -> None:",
"def close(self) -> None:",
"def close(self) -> None:",
"def close():",
"def close(self) -> None:\n ...",
"def close(self) -> None:\n ...",
"def _close(self):\n self.write_data(self.write_queue)\n self.write_compound(self.write_compound_queue)",
"def close(self):\n\t\tself.filep.close()",
"def close(self):",
"def close(self):",
"def close(self):",
"def close(self):",
"def close(self):",
"def close(self):",
"def close(self):",
"def close(self):",
"def close(self):",
"def close(self):"
] | [
"0.6764165",
"0.6518303",
"0.60208786",
"0.5811839",
"0.5795013",
"0.579095",
"0.5784808",
"0.57648385",
"0.57648385",
"0.57648385",
"0.57648385",
"0.57648385",
"0.57648385",
"0.57648385",
"0.57648385",
"0.5713722",
"0.5699953",
"0.5699953",
"0.5665161",
"0.5664806",
"0.56545746",
"0.56545746",
"0.56545746",
"0.56545746",
"0.56545746",
"0.56545746",
"0.56545746",
"0.56545746",
"0.56545746",
"0.56545746"
] | 0.70555735 | 0 |
True if the subarray contains datetime objects. | def isdt(self):
return self.Units.isreftime and self._subarray.dtype == _dtype_object | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def has_datetime_type(obj: _std_typing.Any) -> bool:\n return obj.dtype == sc.DType.datetime64",
"def _uses_datetimeblock(dtype: Union[np.dtype, ExtensionDtype]) -> bool:\n vtype = dtype.type\n return issubclass(vtype, np.datetime64)",
"def is_datetime(self) -> bool:\n return False",
"def are_all_datetimes(values: List[Union[str, int, float]]):\n for value in values:\n if not is_datetime(value):\n return False\n return True",
"def has_time(self):\n return isinstance(self._start, datetime.datetime)",
"def is_datetime(self):\n answer = self._call('is_datetime')\n return answer.yes",
"def is_datetime_type(val):\n return (\n pd.api.types.is_datetime64_any_dtype(val)\n or isinstance(val, pd.Timestamp)\n or isinstance(val, datetime)\n )",
"def is_all_dates(self) -> bool:\n return False",
"def is_normalized(self) -> bool:\n return is_date_array_normalized(self.asi8, self.tz, reso=self._creso)",
"def has_start_stop_acqtamps(self):\n try:\n if not all([isinstance(x, datetime) for x in self.start_acq]):\n raise Exception(\"Invalid value encountered in start_acq\")\n if not all([isinstance(x, datetime) for x in self.stop_acq]):\n raise Exception(\"Invalid value encountered in stop_acq\")\n if not all([len(self) == len(x) for x in [self.start_acq,\\\n self.stop_acq]]):\n raise Exception(\"Lengths of arrays do not match...\")\n return True\n except Exception as e:\n print((repr(e)))\n return False",
"def has_t(self):\n return any(map(lambda s: s.is_temporal, self))",
"def time_series(self) -> bool:\n return self._time_series",
"def check_dataset_dates(self):\n # TODO: graph traverse and date checking\n pass",
"def __contains__(self, ts):\n if not isinstance(ts, datetime.datetime):\n return False\n base_key = self.floor_time(key)\n return self.first_timestamp <= base_key <= self.last_timestamp",
"def is_date_dtype(df, col_name):\n dtype = df.dtypes[col_name]\n return np.issubdtype(dtype, np.datetime64) or np.issubdtype(dtype, np.timedelta64)",
"def check_dates(dates):\n for date in dates:\n if type(date) != datetime.datetime:\n raise TypeError('Input date, %s, not datetime object' % date)",
"def test_validate_datetime(self):\n self.datatrue = pd.read_csv(pkg_resources.resource_filename(resource_package, 'tests/testing_data/report_2_counts.csv'))\n\n self.datafalse = pd.read_csv(pkg_resources.resource_filename(resource_package, 'tests/testing_data/random_date1.csv'))\n\n self.test1 = utils.validate_datetime(self.datatrue)\n\n self.test2 = utils.validate_datetime(self.datafalse)\n\n self.assertTrue(isinstance(self.test1, pd.DataFrame))\n\n self.assertTrue(np.dtype('datetime64[ns]') in self.test1.dtypes.tolist())\n\n self.assertFalse(np.dtype('datetime64[ns]') in self.test2.dtypes.tolist())",
"def has_timestamp(self):\n return (self.data_type() & 0x100 == 0x100) and (self.raw_data_length() >= 8)",
"def _checkData(data: Sequence[HistoryElement]):\r\n if not all(x.timeStamp for x in data):\r\n raise ValueError(\"At least one element in data doesn't have a TimeStamp\")",
"def could_be_datetime(val, fmt):\n\n if val == None or fmt == None:\n return False\n\n if isinstance(val, datetime):\n return True\n\n if isinstance(val, (str, unicode)):\n if Record.is_empty_str(val) or Record.is_empty_str(fmt):\n return False\n\n try:\n d = datetime.strptime(val, fmt)\n if not isinstance(d, datetime):\n raise ValueError\n else:\n return True\n except Exception as e:\n logging.error(e)\n return False\n\n #otherwise\n return False",
"def test_14_digit_datetime_detection(self):\n obj = awstats_reader.awstats_datetime('20091130165230')\n self.assertTrue(isinstance(obj, awstats_reader.AwstatsDateTime))",
"def _is_key_value_array(self, data):\n for d in data:\n if not self._is_key_value(d):\n return False\n return True",
"def __eq__(self, t):\n if not isinstance(t, DateTime):\n return False\n return (self._micros, self._tz) == (t._micros, t._tz)",
"def is_date(dt):\n return isinstance(dt, datetime.date) and not isinstance(dt, datetime.datetime)",
"def has_data(self, fit_id, species_id, start=None, stop=None):\n if not (fit_id in self.raw_results and species_id in\\\n self.raw_results[fit_id]):\n return False\n if all([isinstance(x, datetime) for x in [start, stop]]):\n ts = self.raw_results[fit_id][\"start\"]\n if not any([start < x < stop for x in ts]):\n return False\n return True",
"def _is_tc_entity_array(self, data):\n for d in data:\n if not self._is_tc_entity(d):\n return False\n return True",
"def _check_dates_tareas(self, cr, uid, ids, context=None):\n for leave in self.read(cr, uid, ids, ['date_start_tarea', 'date_end_tarea'], context=context):\n if leave['date_start_tarea'] and leave['date_end_tarea']:\n if leave['date_start_tarea'] > leave['date_end_tarea']:\n return False\n return True",
"def check_consistency(object) -> bool:\n time = np.array(list(object.keys()))\n time_diff = time[1:] - time[0:-1]\n return np.all(time_diff == 1)",
"def __len__(self):\n return len(self.dates)",
"def table_has_any_timestamp_fields(table_object) -> bool:\n mapper = sqlalchemy.inspect(table_object)\n for column in mapper.all_orm_descriptors:\n try:\n if isinstance(column.type, PSQL_TIMESTAMP) or isinstance(column.type, SQLITE_TIMESTAMP):\n return True\n except Exception:\n pass\n return False"
] | [
"0.72152364",
"0.68011504",
"0.67946845",
"0.6540459",
"0.6300369",
"0.62526584",
"0.6159265",
"0.612599",
"0.612436",
"0.60484034",
"0.60277694",
"0.6001856",
"0.5956571",
"0.5938945",
"0.58849597",
"0.58664906",
"0.58624506",
"0.5751764",
"0.56482595",
"0.56456316",
"0.5640433",
"0.5638849",
"0.5635289",
"0.55743665",
"0.55646634",
"0.5558836",
"0.55414283",
"0.5526302",
"0.5519423",
"0.55170965"
] | 0.71551335 | 1 |
Close the file containing the subarray, if there is one. | def file_close(self):
if self.on_disk:
self._subarray.close() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def close_file(self):\n self.root_group.close()",
"def __del__(self):\n # subarray = getattr(self, '_subarray', None)\n subarray = self._subarray\n\n # If the subarray is unique it will have 2 references to\n # it plus 1 within this method, making 3. If it has more\n # than 3 references to it then it is not unique.\n if getrefcount is not None:\n self._decrement_file_counter()\n if subarray is None or getrefcount(subarray) > 3:\n return\n else:\n # getrefcount has itself been deleted or is in the process\n # of being torn down\n return\n\n _partition_file = getattr(subarray, \"_partition_file\", None)\n if _partition_file is not None:\n # This partition contains a temporary file which is not\n # referenced by any other partition on this process, so if\n # there are no lock files present remove the file from\n # disk.\n _remove_temporary_files(_partition_file)\n\n else:\n try:\n if FileArray is not None and isinstance(subarray, FileArray):\n try:\n filename = subarray.get_filename()\n except Exception:\n filename = None\n\n if self.file_counter.get(filename, 999) <= 0:\n # This partition contains a non-temporary file\n # which is not referenced by any other\n # partitions, so close the file.\n subarray.close()\n except Exception:\n # If we're here then it is likely that FileArray has been\n # torn down, so just do nothing.\n pass\n # --- End: if",
"def close(self):\n return self.close_array",
"def close_raster_file(self):\n try:\n if self.dataset:\n del self.dataset\n self.dataset = None\n except AttributeError:\n pass",
"def close_datafile(fs):\r\n fs.close() # fs is the output from init_datafile\r",
"def close_file(self):\r\n self.file.close()",
"def close(self):\n self.fileobj.close()",
"def close(self):\n self.fileobj.close()",
"def close(self):\n\t\tself.filep.close()",
"def close(self):\n if self.current_file_number is not None:\n self.fh_raw.close()\n self.current_file_number = None",
"def release(self, path, fh, *args, **pargs):\n with(self.rwlock):\n # If we're closing a FLACCue file...\n if(path in self._open_subtracks):\n # Delete the file handle from the stored list.\n del self._open_subtracks[path]['Positions'][fh]\n # Close the OS reference to the file.\n return os.close(fh)",
"def close(self):\n self.fout.close()",
"def close_file(self, data_set):\n if hasattr(data_set, '_h5_base_group'):\n data_set._h5_base_group.close()\n # Removes reference to closed file\n del data_set._h5_base_group\n else:\n logging.warning(\n 'Cannot close file, data_set has no open hdf5 file')",
"def _close( self ):\n for raster in self._raster_data:\n if raster != []:\n raster.close()",
"def close_file(self):\n self.file.close()",
"def close_file(self):\n self.file.close()",
"def Close(self):\n super(CPIOArchiveFile, self).Close()\n self._file_entries = None",
"def test_fileobj_not_closed(self):\n\n f = open(self.data(\"test0.fits\"), \"rb\")\n _ = fits.getdata(f)\n assert not f.closed\n\n f.seek(0)\n _ = fits.getheader(f)\n assert not f.closed\n\n f.close() # Close it now",
"def _close(self):\n self.fh.close()",
"def close_csv_file(self):\n if self.file is not None:\n self.file.close()",
"def close(self):\n self.file.close()",
"def close(self):\n self.file.close()",
"def close(self):\n if self.mode == \"w\":\n # Write the content index\n self.cnt.write(self.file)\n\n self.file.close()",
"def close_file(file):\n file.close()",
"def close(self):\n self.__file.close()",
"def close_file_serializer(self):\n if self.file_writer:\n self.file_writer.close()\n self._output_file = None",
"def close(self):\n self.file_out.close()",
"def closeJson(f):\n f.write(']')\n f.close()",
"def close(self):\n if not self.file.closed:\n self.file.close()",
"def close(self):\n self.__file_object.close()"
] | [
"0.61085176",
"0.6024418",
"0.59545076",
"0.5824396",
"0.58042437",
"0.5736725",
"0.57022864",
"0.57022864",
"0.56994104",
"0.56970567",
"0.56860274",
"0.564539",
"0.56289333",
"0.56183493",
"0.5591367",
"0.5591367",
"0.55517685",
"0.5509945",
"0.5487693",
"0.54802656",
"0.54625976",
"0.54625976",
"0.5456624",
"0.54523706",
"0.5448531",
"0.5446381",
"0.5442482",
"0.54284096",
"0.54231614",
"0.54186165"
] | 0.7828047 | 0 |
Return an iterator over indices of the master array which are spanned by the data array. | def master_ndindex(self): # itermaster_indices(self):
return itertools_product(
*[range(*r) for r in self.location]
) # TODO check | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_subset_inds(self, adata_parent):\r\n subset_inds = np.ones(len(adata_parent), dtype=bool)\r\n for condition, values in self.subset_cond.items():\r\n subset_inds *= adata_parent.obs[condition].isin(values)\r\n return subset_inds",
"def enumerate(self):\n # go through the container and tile in sync\n for index, value in zip(self.tile, self.data):\n # hand the {index} and the corresponding value to the caller\n yield index, value\n # all done\n return",
"def indices(self):\n return range(len(self))",
"def mainIndices(self):\n return self.i1, self.i2",
"def __iter__(self):\n indices = []\n for i, size in enumerate(self.group_sizes):\n if size == 0:\n continue\n indice = np.where(self.flag == i)[0]\n if not len(indice) == size:\n raise ValueError('the length of the indice should be equal to the size')\n np.random.shuffle(indice)\n num_extra = int(np.ceil(size / self.samples_per_gpu)\n ) * self.samples_per_gpu - len(indice)\n indice = np.concatenate([indice, indice[:num_extra]])\n indices.append(indice)\n indices = np.concatenate(indices)\n indices = [\n indices[i * self.samples_per_gpu:(i + 1) * self.samples_per_gpu]\n for i in np.random.permutation(\n range(len(indices) // self.samples_per_gpu))\n ]\n indices = np.concatenate(indices)\n indices = torch.from_numpy(indices).long()\n if not len(indices) == self.num_samples:\n raise ValueError(\"the length of the indices should be equal to num_samples\")\n return iter(indices)",
"def indices(self):\n return tuple([slice(*r) for r in self.location])",
"def indices(self) -> np.ndarray:\n return self.impl.indices",
"def get_index_array(self):\n return self.region_pairs",
"def __iter__(self):\n\n starts = range(0, self.data.shape[self.axis], self.chunksize)\n\n for t in zip_longest(starts, starts[1:], fillvalue=None):\n yield self.data[self._slice(*t)]",
"def __iter__(self):\n while True:\n if self.batches is None:\n for indexed_sentence in self.indexed_sentences:\n yield indexed_sentence\n else:\n for batch in self.batches:\n yield batch[:-1, :], batch[1:, :] # Return batch and target indices\n\n if not self.repeat:\n return",
"def get_data_idx(self)->list:\n return self.__data_idx",
"def get_indices(self):\r\n return self._indices",
"def getind(self,start,end,blk):\n\n if blk is None:\n # Return all blocks\n blk = np.arange(self.ind[start].size)\n\n ind=np.array([])\n for k,val in enumerate(blk):\n ind=np.append(ind,np.arange(self.ind[start][val],self.ind[end][val]))\n return ind.astype(int)",
"def inidices(self):\n return [Index(name, index) for name, index in self._data['indices'].iteritems()]",
"def index_iterator((x_min, x_max, y_min, y_max)):\n for row in xrange(y_min, y_max):\n for col in xrange(x_min, x_max):\n yield (row, col)",
"def _get_split_indices(self):\n\n cumsum = np.cumsum(\n np.concatenate((np.array([0], dtype=np.int8), self.split_sizes)))\n \n fold_inds = np.array(\n [(cumsum[n], cumsum[n + 1]) for n in range(self.n_splits)])\n\n return fold_inds",
"def get_indexes(self, dataset: BaseDataset) -> int:\n\n index = [np.random.randint(0, len(dataset)) for _ in range(1)]\n\n return index",
"def __iter__(self):\n # deterministically shuffle based on epoch\n if self.shuffle:\n g = torch.Generator()\n g.manual_seed(self.epoch)\n indices = torch.randperm(len(self.dataset), generator=g).tolist()\n else:\n indices = torch.arange(len(self.dataset)).tolist()\n\n # add extra samples to make it evenly divisible\n indices += indices[:(self.total_size - len(indices))]\n if not len(indices) == self.total_size:\n raise ValueError('the length of the indices should be equal to total_size')\n\n # subsample\n indices = indices[self.rank:self.total_size:self.num_replicas]\n if not len(indices) == self.num_samples:\n raise ValueError(\"the length of the indices should be equal to num_samples in subsample\")\n\n return iter(indices)",
"def _get_chunk_indexer(self, array):\n if self.data.num_chunks == 1:\n return np.broadcast_to(0, len(array))\n return np.digitize(array, self.offsets[1:])",
"def get_intra_sample_indices(self):\n intra_sample_indices = []\n for group_index in range(self.num_groups):\n num_images_in_group = self.num_images_per_group[group_index]\n if self.intra_group_option == \"forward\":\n for i in range(num_images_in_group):\n for j in range(i):\n # j < i\n intra_sample_indices.append(\n ((group_index, j), (group_index, i))\n )\n elif self.intra_group_option == \"backward\":\n for i in range(num_images_in_group):\n for j in range(i):\n # i > j\n intra_sample_indices.append(\n ((group_index, i), (group_index, j))\n )\n elif self.intra_group_option == \"unconstrained\":\n for i in range(num_images_in_group):\n for j in range(i):\n # j < i, i > j\n intra_sample_indices.append(\n ((group_index, j), (group_index, i))\n )\n intra_sample_indices.append(\n ((group_index, i), (group_index, j))\n )\n else:\n raise ValueError(\n \"Unknown intra_group_option, must be forward/backward/unconstrained\"\n )\n return intra_sample_indices",
"def create_repeated_indexes(data):\n from numpy import arange\n\n index_range = arange(0, len(data))\n return (index_range for i in index_range)",
"def get_indices_entire_sequence(data: pd.Dataframe, window_size: int, step_size: int) -> list:\n stop_position = len(data)-1 # 1- because of 0 indexing\n\n # Start the first sub-sequence at index position 0\n subseq_first_idx = 0\n\n subseq_last_idx = subseq_first_idx + window_size\n\n indices = []\n\n while subseq_last_idx <= stop_position:\n indices.append((subseq_first_idx, subseq_last_idx))\n subseq_first_idx += step_size\n subseq_last_idx += step_size\n return indices",
"def batch_indices(self):\n b = self.batch_size\n return [np.arange(i*b, i*b+b) for i in range(self.num_batches)]",
"def atom_idxs(self):\n\n return np.array([atom.atom_idxs for atom in self])",
"def indices(self):\n _indices = []\n for h in self.miller.indices():\n _indices.append(self.indices_hkl(*h)[0])\n return _indices",
"def getLandmarkindices(self):\n return self.subsetindices",
"def __iter__(self):\n batch = []\n for i_batch in range(self.episode_num):\n classes = torch.randperm(len(self.idx_list))[: self.way_num]\n for c in classes:\n idxes = self.idx_list[c.item()]\n pos = torch.randperm(idxes.size(0))[: self.image_num]\n batch.append(idxes[pos])\n if len(batch) == self.episode_size * self.way_num:\n batch = torch.stack(batch).reshape(-1)\n yield batch\n batch = []",
"def _iterCoordsets(self):\n\n for i in range(self._n_csets):\n yield self._coords[i]",
"def iterCoordsets(self):\n\n for i in range(self._n_csets):\n yield self._coords[i].copy()",
"def __iter__(self):\n worker_info = torch.utils.data.get_worker_info()\n num_batches = len(self.coords_batcher)\n if worker_info is None:\n # In single-processing mode\n start, end = 0, num_batches\n else:\n worker_id = worker_info.id\n num_workers = worker_info.num_workers\n shard_size = int(np.ceil(num_batches / num_workers))\n start = shard_size * worker_id\n end = min(start + shard_size, num_batches)\n return (self.get_batch(i) for i in range(start, end))"
] | [
"0.61020994",
"0.6072502",
"0.60690475",
"0.60687184",
"0.60543483",
"0.60275173",
"0.5924552",
"0.5918545",
"0.59088135",
"0.58821535",
"0.58567846",
"0.58429295",
"0.5828838",
"0.58083194",
"0.5794418",
"0.5793355",
"0.57828283",
"0.57748425",
"0.5756645",
"0.57550335",
"0.5752208",
"0.5744148",
"0.57440686",
"0.5740753",
"0.5721396",
"0.5708537",
"0.5681665",
"0.5668879",
"0.566354",
"0.5635181"
] | 0.6835397 | 0 |
Update the `!part` attribute inplace for new indices of the master array. | def new_part(self, indices, master_axis_to_position, master_flip):
shape = self.shape
if indices == [slice(0, stop, 1) for stop in shape]:
return
# ------------------------------------------------------------
# If a dimension runs in the wrong direction then change its
# index to account for this.
#
# For example, if a dimension with the wrong direction has
# size 10 and its index is slice(3,8,2) then after the
# direction is set correctly, the index needs to changed to
# slice(6,0,-2):
#
# >>> a = [9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
# >>> a[slice(3, 8, 2)]
# [6, 4, 2]
# >>> a.reverse()
# >>> print(a)
# >>> a = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
# >>> a[slice(6, 0, -2)]
# [6, 4, 2]
# ------------------------------------------------------------
if self._subarray.size > 1:
indices = indices[:]
p_flip = self.flip
for axis, i in master_axis_to_position.items():
if (axis not in p_flip and axis not in master_flip) or (
axis in p_flip and axis in master_flip
):
# This axis runs in the correct direction
continue
# Still here? Then this axis runs in the wrong
# direction.
# Reset the direction
p_flip = p_flip[:]
if axis in self.flip:
p_flip.remove(axis)
else:
p_flip.append(axis)
# Modify the index to account for the changed
# direction
size = shape[i]
if isinstance(indices[i], slice):
start, stop, step = indices[i].indices(size)
# Note that step is assumed to be always +ve here
div, mod = divmod(stop - start - 1, step)
start = size - 1 - start
stop = start - div * step - 1
if stop < 0:
stop = None
indices[i] = slice(start, stop, -step)
else:
size -= 1
indices[i] = [size - j for j in indices[i]]
# --- End: for
self.flip = p_flip
# --- End: if
slice_None = slice(None)
# Reorder the new indices
indices = [
(
indices[master_axis_to_position[axis]]
if axis in master_axis_to_position
else slice_None
)
for axis in self.axes
]
part = self.part
if not part:
self.part = indices
return
# Still here? update an existing part
p_part = []
for part_index, index, size in zip(
part, indices, self._subarray.shape
):
if index == slice_None:
p_part.append(part_index)
continue
if isinstance(part_index, slice):
if isinstance(index, slice):
start, stop, step = part_index.indices(size)
size1, mod = divmod(stop - start - 1, step)
start1, stop1, step1 = index.indices(size1 + 1)
size2, mod = divmod(stop1 - start1, step1)
if mod != 0:
size2 += 1
start += start1 * step
step *= step1
stop = start + (size2 - 1) * step
if step > 0:
stop += 1
else:
stop -= 1
if stop < 0:
stop = None
p_part.append(slice(start, stop, step))
continue
else:
new_part = list(range(*part_index.indices(size)))
new_part = [new_part[i] for i in index]
else:
if isinstance(index, slice):
new_part = part_index[index]
else:
new_part = [part_index[i] for i in index]
# --- End: if
# Still here? Then the new element of p_part is a list of
# integers, so let's see if we can convert it to a slice
# before appending it.
new_part0 = new_part[0]
if len(new_part) == 1:
# Convert a single element list to a slice object
new_part = slice(new_part0, new_part0 + 1, 1)
else:
step = new_part[1] - new_part0
if step:
if step > 0:
start, stop = new_part0, new_part[-1] + 1
else:
start, stop = new_part0, new_part[-1] - 1
if new_part == list(range(start, stop, step)):
if stop < 0:
stop = None
new_part = slice(start, stop, step)
# --- End: if
p_part.append(new_part)
# --- End: for
self.part = p_part | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def save(self, patch):\n internalSlices = self._get_internal_slices(patch.slices)\n self.array[internalSlices] = patch.array",
"def _idx_changed(self, idx):\n self.refresh_memory()",
"def _loadpart(self, part):\n new_partidx = util.Partname(part.partname).idx\n for idx, seq_part in enumerate(self._values):\n partidx = util.Partname(seq_part.partname).idx\n if partidx > new_partidx:\n self._values.insert(idx, part)\n return\n self._values.append(part)",
"def _idxs_postformat_array(self):\n self.idxs = np.array(self.idxs)",
"def __update(self, idx):\n parent = (idx - 1) // 2\n while parent >= 0:\n left, right = 2 * parent + 1, 2 * parent + 2\n self.__tree[parent] = self.__tree[left] + self.__tree[right]\n parent = (parent - 1) // 2",
"def part_ids(self, part_ids):\n\n self._part_ids = part_ids",
"def update_chunk(self):\n for key, value in self.piece_coordinates.items():\n # Why is the key a numpy.int type ???\n self.chunk[value] = key",
"def update_idx(self):\n self.idx = (self.F * self.FMUL +\n self.E * self.EMUL +\n self.Z * self.ZMUL +\n self.A * self.AMUL +\n self.B * self.BMUL )",
"def setInternalIndex(self,ind):\n\t\tself.trMtrxNode_ind = ind",
"def reindex_subcomponent_taxa(self):\n ti_mutable = self.taxon_set._is_mutable\n self.taxon_set._is_mutable = True\n new_map = CharacterDataMap()\n for taxon, seq in self.taxon_seq_map.items():\n taxon = self.taxon_set.require_taxon(label=taxon.label)\n new_map[taxon] = seq\n self.taxon_set._is_mutable = ti_mutable\n self.taxon_seq_map = new_map",
"def set(self, idx_in, vals, fill=False):\n o = np.broadcast_arrays(vals, *idx_in)\n vals = np.ravel(o[0])\n\n # TODO: Determine whether new vs. existing indices are being\n # addressed, in the latter case we only need to update data\n # array\n\n vals = np.array(vals, ndmin=1)\n idx_flat_in, msk_in = self._to_flat_index(idx_in)\n vals = np.asanyarray(vals, dtype=self.data.dtype)\n idx, data = merge_sparse_arrays(\n idx_flat_in, vals, self.idx, self.data, fill=fill\n )\n\n # Remove elements equal to fill value\n msk = data != self._fill_value\n idx = idx[msk]\n data = data[msk]\n self._idx = idx\n self._data = data\n # idx, msk = find_in_array(idx_flat_in, self.idx)\n # self._data[idx[msk]] = vals[msk]",
"def _setActiveChildWellIndices(self,activeWellIndices):\n if activeWellIndices is None:\n activeWellIndices=list(range(0,len(self.childWellIndices())))\n\n if len(activeWellIndices) > 0:\n includedIndices=set()\n for localdataidx in activeWellIndices:\n if localdataidx < 0 or localdataidx >= len(self.childWellIndices()):\n raise RuntimeError(\"local index \"+str(localdataidx)+\" out of range\")\n if localdataidx in includedIndices:\n raise RuntimeError(\"local index \"+str(localdataidx)+\" given multiple times\")\n includedIndices.add(localdataidx)\n\n self._activeWellIndices=activeWellIndices",
"def __setitem__(self, key, value):\n mixed_positions, vindex_positions = _advanced_indexer_subspaces(key)\n self._array[key] = np.moveaxis(value, vindex_positions, mixed_positions)",
"def reindex(self):\n self._index = {w: i for i, w in enumerate(self._words)}\n self.n, self.d = self._vecs.shape\n assert self.n == len(self._words) == len(self._index)\n self._neighbors = None",
"def update(self, idx, add):\n idx += 1\n while idx < len(self.array):\n self.array[idx] += add\n idx += idx & -idx #Adding the last bit",
"def propagateDirty(self, slot, subindex, roi):\n totalIndex = (self._subSlots.index(slot),) + subindex\n self.operator.propagateDirty(self, totalIndex, roi)",
"def set_index_ub(self, param, length):\n if tik.Dprofile().get_product_name() in (MINI, CLOUD, HISI_ES):\n sum_mask_ub = self.instance.Tensor(self.dtype, (16,),\n name=\"sum_mask_ub\",\n scope=tik.scope_ubuf)\n work_tensor_ub = self.instance.Tensor(self.dtype, (16,),\n name=\"work_tensor_ub\",\n scope=tik.scope_ubuf)\n self.instance.vec_reduce_add(self.mask, sum_mask_ub, param['reduce_mask_ub'], work_tensor_ub, 1, 8)\n\n mask_scalar = self.instance.Scalar(\"uint16\", name=\"mask_scalar\")\n mask_scalar.set_as(sum_mask_ub[0])\n with self.instance.if_scope(mask_scalar != 0):\n with self.instance.if_scope(param['count'] < PRE_NMS_TOPN):\n with self.instance.for_range(0, length) as mask_index:\n param['index_offset'].set_as(param['index_offset'] + 1)\n with self.instance.if_scope(param['count'] < PRE_NMS_TOPN):\n mask_scalar.set_as(param['reduce_mask_ub'][mask_index])\n\n # 1 fp16 == 15360 uint16\n with self.instance.if_scope(mask_scalar == 15360):\n param['index_ub'][param['count']].set_as(\n param['index_offset'])\n param['count'].set_as(param['count'] + 1)\n with self.instance.else_scope():\n param['index_offset'].set_as(param['index_offset'] + length)",
"def update_rec(self):\n import copy\n \n self.leftrec, self.rightrec = copy.copy(self.rec), copy.copy(self.rec)\n self.leftrec[2*self.dim + 1], self.rightrec[2*self.dim] = self.node.dimension[self.dim], self.node.dimension[self.dim]",
"def update(self, idx, value):\n idx = self.__capacity - 1 + idx\n self.__tree[idx] = value\n self.__update(idx)",
"def reindex(self):",
"def reindex(self):",
"def __setitem__(self, index, value):\n if isinstance(index, types.SliceType):\n keys = self._main._sequence[index]\n if len(keys) != len(value):\n raise ValueError('attempt to assign sequence of size %s '\n 'to slice of size %s' % (len(name), len(keys)))\n # FIXME: efficiency? Would be better to calculate the indexes\n # directly from the slice object\n # NOTE: the new keys can collide with existing keys (or even\n # contain duplicates) - these will overwrite\n for key, val in zip(keys, value):\n self._main[key] = val\n else:\n self._main[self._main._sequence[index]] = value",
"def preCommitFixup(self):\n log_method_call(self, self.name)\n if not self.exists or not self.disklabelSupported:\n return\n\n # find the correct partition on the original parted.Disk since the\n # name/number we're now using may no longer match\n _disklabel = self.disk.originalFormat\n\n if self.isExtended:\n # getPartitionBySector doesn't work on extended partitions\n _partition = _disklabel.extendedPartition\n log.debug(\"extended lookup found partition %s\",\n devicePathToName(getattr(_partition, \"path\", None) or \"(none)\"))\n else:\n # lookup the partition by sector to avoid the renumbering\n # nonsense entirely\n _sector = self.partedPartition.geometry.start\n _partition = _disklabel.partedDisk.getPartitionBySector(_sector)\n log.debug(\"sector-based lookup found partition %s\",\n devicePathToName(getattr(_partition, \"path\", None) or \"(none)\"))\n\n self.partedPartition = _partition",
"def untie_everything(self):\r\n self.tied_indices = []",
"def get_local_indices(self, part, ctx):\n return self.map_to_global(\n F.arange(0, self.local_size(part), ctx=ctx), part\n )",
"def setActiveChildWellIndices(self,activeWellIndices):\n if not self.isReplicateGroup():\n raise RuntimeError(self.fullId()+\": cannot change active child well indices, this is not a replicate group.\")\n self._setActiveChildWellIndices(activeWellIndices)\n self.clsParent.modified=True",
"def setReference(self, updatedIndices):\n # self.colors[:] = [self.colors[i] for i in updatedIndices]\n self.cellData[:] = [self.cellData[i] for i in updatedIndices]",
"def set_position(self, idx, pos):\n if self.EMULATOR_MODE:\n return\n if idx >= self.nleaflets or idx < 0:\n raise IndexError('index specified is out of bounds')\n self._fserial.write(self.MAGIC_BYTES + bytes([idx]) + pos.to_bytes(2, byteorder='big', signed=False) )\n self._fserial.reset_input_buffer()",
"def _setPartedPartition(self, partition):\n log_method_call(self, self.name)\n\n if partition is not None and not isinstance(partition, parted.Partition):\n raise ValueError(\"partition must be None or a parted.Partition instance\")\n\n log.debug(\"device %s new partedPartition %s\", self.name, partition)\n self._partedPartition = partition\n self.updateName()",
"def undo_scan(self, sub_array_id: int):"
] | [
"0.5455975",
"0.5450583",
"0.5443926",
"0.53721666",
"0.5359862",
"0.5275372",
"0.52543634",
"0.52315736",
"0.5228334",
"0.51149035",
"0.50902224",
"0.50782204",
"0.5062653",
"0.50265366",
"0.50074023",
"0.49909484",
"0.49905938",
"0.49756426",
"0.4970326",
"0.496184",
"0.496184",
"0.49535567",
"0.4943507",
"0.49215022",
"0.49166226",
"0.49110836",
"0.49097493",
"0.4902477",
"0.4890401",
"0.48781407"
] | 0.5784396 | 0 |
The extra memory required to access the array. | def extra_memory(self):
if not self.in_memory:
# --------------------------------------------------------
# The subarray is on disk so getting the partition's data
# array will require extra memory
# --------------------------------------------------------
extra_memory = True
else:
# --------------------------------------------------------
# The subarray is already in memory
# --------------------------------------------------------
config = self.config
p_part = self.part
if p_part:
extra_memory = True
elif not config["unique_subarray"]:
extra_memory = True
else:
p_data = self._subarray
if not numpy_ma_isMA(p_data):
# The p_data is not a masked array
extra_memory = isinstance(p_data.base, numpy_ndarray)
else:
# The p_data is a masked array
memory_overlap = isinstance(
p_data.data.base, numpy_ndarray
)
if not (
p_data.mask is numpy_ma_nomask
or not numpy_ma_is_masked(p_data)
):
# There is at least one missing data point
memory_overlap |= isinstance(
p_data.mask.base, numpy_ndarray
)
extra_memory = memory_overlap
# --- End: if
p_dtype = p_data.dtype
if not extra_memory:
if config["func"] is not None:
extra_memory = True
else:
p_units = self.Units
units = config["units"]
if (
not p_units.equals(units)
and bool(p_units) is bool(units)
and not (
p_data.flags["C_CONTIGUOUS"]
and p_dtype.kind == "f"
)
):
extra_memory = True
# ------------------------------------------------------------
# Extra memory is required if the dtype needs changing
# ------------------------------------------------------------
if not extra_memory:
dtype = config["dtype"]
if dtype is not None and dtype != p_data.dtype:
extra_memory = True
# --- End: if
# ------------------------------------------------------------
# Amount of extra memory (in bytes) required to access the
# array
# ------------------------------------------------------------
return self.nbytes if extra_memory else 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def allocated_memory(self):\n return self._allocated_memory",
"def memory(self):\r\n return self._memory",
"def __len__(self):\n\t\treturn len(self.memory)",
"def __len__(self):\r\n return len(self.memory)",
"def __len__(self):\n return len(self.memory)",
"def __len__(self):\n return len(self.memory)",
"def __len__(self):\n return len(self.memory)",
"def __len__(self):\n return len(self.memory)",
"def __len__(self):\n return len(self.memory)",
"def __len__(self):\n return len(self.memory)",
"def __len__(self):\n return len(self.memory)",
"def __len__(self):\n return len(self.memory)",
"def __len__(self):\n return len(self.memory)",
"def __len__(self):\n return len(self.memory)",
"def __len__(self):\n return len(self.memory)",
"def __len__(self):\n return len(self.memory)",
"def __len__(self):\n return len(self.memory)",
"def __len__(self):\n return len(self.memory)",
"def __len__(self):\n return len(self.memory)",
"def __len__(self):\n return len(self.memory)",
"def __len__(self):\n return len(self.memory)",
"def __len__(self):\n return len(self.memory)",
"def __len__(self):\n return len(self.memory)",
"def allocatememory(self):\n pass",
"def get_array_size(self):\r\n return conf.lib.clang_getArraySize(self)",
"def MAXMEM(self):",
"def getMemory():\n return tracemalloc.take_snapshot()",
"def getMemory():\n return tracemalloc.take_snapshot()",
"def getMemory():\n return tracemalloc.take_snapshot()",
"def getMemory():\n return tracemalloc.take_snapshot()"
] | [
"0.6821264",
"0.6811845",
"0.66922146",
"0.66735834",
"0.6612898",
"0.6612898",
"0.6612898",
"0.6612898",
"0.6612898",
"0.6612898",
"0.6612898",
"0.6612898",
"0.6612898",
"0.6612898",
"0.6612898",
"0.6612898",
"0.6612898",
"0.6612898",
"0.6612898",
"0.6612898",
"0.6612898",
"0.6612898",
"0.6612898",
"0.65476745",
"0.6454627",
"0.6434698",
"0.64256907",
"0.64256907",
"0.64256907",
"0.64256907"
] | 0.73394907 | 0 |
Move the partition's subarray to a temporary file on disk. | def to_disk(self, reopen=True):
# try:
tfa = CachedArray(self.array)
# except Exception:
# return False
fd, _lock_file = mkstemp(
prefix=tfa._partition_file + "_", dir=tfa._partition_dir
)
close(fd)
self.subarray = tfa
_temporary_files[tfa._partition_file] = (
tfa._partition_dir,
_lock_file,
set(),
)
if reopen:
# Re-open the partition
self.open(self.config)
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def close(self, **kwargs):\n config = getattr(self, \"config\", None)\n\n if config is None:\n return\n\n if kwargs:\n config.update(kwargs)\n\n original = getattr(self, \"_original\", None)\n logger.partitioning(\"Partition.close: original = {}\".format(original))\n\n if not original:\n originally_on_disk = False\n original_subarray = None\n else:\n originally_on_disk = not original.in_memory\n original_subarray = original._subarray\n\n config = self.config\n logger.partitioning(\" config = {}\".format(config))\n\n if config[\"serial\"]:\n # --------------------------------------------------------\n # SERIAL\n # --------------------------------------------------------\n logger.partitioning(\" serial\")\n\n if config[\"readonly\"]:\n logger.partitioning(\" readonly=True\")\n\n if originally_on_disk:\n logger.partitioning(\" subarray originally on disk\")\n\n if config.get(\"to_disk\", False):\n # 1.1.1.1 The original subarray was on disk,\n # we don't want to keep the current\n # subarray in memory, and we are happy\n # to discard any changes that may have\n # been made to the subarray.\n logger.partitioning(\" 1.1.1.1 revert\")\n self.revert()\n elif free_memory() <= cf_fm_threshold():\n # 1.1.1.2 The original subarray was on disk,\n # we are happy to keep the current\n # subarray in memory, but there is not\n # enough free memory to do so.\n logger.partitioning(\n \" 1.1.1.2 revert ({} <= {})\".format(\n free_memory(), cf_fm_threshold()\n )\n )\n self.revert()\n else:\n # 1.1.1.3 The original subarray was on disk\n # and there is enough memory to keep\n # the current subarray in memory\n if config[\"unique_subarray\"] and isinstance(\n original_subarray, CachedArray\n ):\n # The original subarray was a temporary\n # file which is not referenced by any\n # other partitions\n _remove_temporary_files(\n original_subarray._partition_file\n )\n\n del self.masked\n logger.partitioning(\n \" 1.1.1.3 del masked ({} > {})\".format(\n free_memory(), cf_fm_threshold()\n )\n )\n\n else:\n logger.partitioning(\" subarray originally in memory\")\n if config.get(\"to_disk\", False):\n # 1.1.2.1 Original subarray was in memory and\n # we don't want to keep the current\n # subarray in memory\n logger.partitioning(\" 1.1.2.1 to_disk\")\n self.to_disk(reopen=False)\n elif free_memory() <= cf_fm_threshold():\n # 1.1.2.2 Original subarray was in memory and\n # unique but there is not enough\n # memory to keep the current subarray\n logger.partitioning(\" 1.1.2.2 to_disk\")\n self.to_disk(reopen=False)\n else:\n # 1.1.2.3 Original subarray was in memory and\n # unique and there is enough memory to\n # keep the current subarray in memory\n logger.partitioning(\" 1.1.2.3 pass\")\n pass\n else:\n # config['readonly'] is False\n if originally_on_disk:\n if config.get(\"to_disk\", False):\n # 1.2.1.1 Original subarray was on disk and\n # there and we don't want to keep the\n # array\n if config[\"unique_subarray\"] and isinstance(\n original_subarray, CachedArray\n ):\n # Original subarray was a temporary file\n # on disk which is not referenced by any\n # other partitions\n _remove_temporary_files(\n original_subarray._partition_file\n )\n\n logger.partitioning(\" 1.2.1.1 to_disk\")\n self.to_disk(reopen=False)\n elif free_memory() <= cf_fm_threshold():\n # 1.2.1.2 Original subarray was on disk but\n # there is not enough memory to keep\n # it\n if config[\"unique_subarray\"] and isinstance(\n original_subarray, CachedArray\n ):\n # Original subarray was a temporary file\n # on disk which is not referenced by any\n # other partitions\n _remove_temporary_files(\n original_subarray._partition_file\n )\n\n logger.partitioning(\" 1.2.1.2 to_disk\")\n self.to_disk(reopen=False)\n else:\n # 1.2.1.3 Original subarray was on disk and\n # there is enough memory to keep it\n logger.partitioning(\" 1.2.1.3 pass\")\n del self.masked\n else:\n if config.get(\"to_disk\", False):\n # 1.2.2.1 Original subarray was in memory but\n # we don't want to keep it\n logger.partitioning(\" 1.2.2.1 to_disk\")\n self.to_disk(reopen=False)\n elif free_memory() <= cf_fm_threshold():\n # 1.2.2.2 Original subarray was an in memory\n # but there is not enough memory to\n # keep it\n logger.partitioning(\" 1.2.2.2 to_disk\")\n self.to_disk(reopen=False)\n else:\n # 1.2.2.3 Original subarray was in memory and\n # there is enough memory to keep it\n logger.partitioning(\" 1.2.2.3 del masked\")\n del self.masked\n else:\n logger.partitioning(\"Partition.close: parallel\")\n # --------------------------------------------------------\n # PARALLEL\n # --------------------------------------------------------\n pass\n\n # if hasattr(self, '_original'):\n # del self._original\n\n # print(hasattr(self, 'config')),\n try:\n del self.config\n except AttributeError:\n pass",
"def cleanup(job, tempOutputFileStoreID, outputFile, cores=1, memory=sortMemory, disk=\"3G\"):\n fileName = job.fileStore.readGlobalFile(tempOutputFileStoreID)\n shutil.copyfile(fileName, outputFile)\n job.fileStore.logToMaster(\"Finished copying sorted file to output: %s\" % outputFile)",
"def _register_temporary_file(self):\n _partition_file = self._subarray._partition_file\n _partition_dir = self._subarray._partition_dir\n if _partition_file not in _temporary_files:\n fd, _lock_file = mkstemp(\n prefix=_partition_file + \"_\", dir=_partition_dir\n )\n close(fd)\n _temporary_files[_partition_file] = (\n _partition_dir,\n _lock_file,\n set(),\n )\n else:\n _, _lock_file, _ = _temporary_files[_partition_file]\n\n return _lock_file",
"def save_partition(self, partition):\n raise NotImplementedError('save_file')",
"def _write_array_on_file(self, pa_array):\n pa_batch = pa.RecordBatch.from_struct_array(pa_array)\n self._num_bytes += pa_array.nbytes\n self.pa_writer.write_batch(pa_batch)",
"def quick_save_array(data, file_name, delimiter=',', ):\n data.tofile(file_name, sep=delimiter)",
"def save_tmp_file(self, data):\n with open(self.tmp_file, 'wb') as f:\n f.write(data)",
"def move_from_temp_directory(self):",
"def test_deleting_local_file_using_file_io_output_file() -> None:\n with tempfile.TemporaryDirectory() as tmpdirname:\n # Write to the temporary file\n file_location = os.path.join(tmpdirname, \"foo.txt\")\n with open(file_location, \"wb\") as f:\n f.write(b\"foo\")\n\n # Instantiate the file-io\n file_io = PyArrowFileIO()\n\n # Confirm that the file initially exists\n assert os.path.exists(file_location)\n\n # Instantiate the custom OutputFile\n output_file = PyArrowFileIO().new_output(location=f\"{file_location}\")\n\n # Delete the file using the file-io implementations delete method\n file_io.delete(output_file)\n\n # Confirm that the file no longer exists\n assert not os.path.exists(file_location)",
"def test_deleting_local_file_using_file_io() -> None:\n with tempfile.TemporaryDirectory() as tmpdirname:\n # Write to the temporary file\n output_file_location = os.path.join(tmpdirname, \"foo.txt\")\n with open(output_file_location, \"wb\") as f:\n f.write(b\"foo\")\n\n # Instantiate the file-io\n file_io = PyArrowFileIO()\n\n # Confirm that the file initially exists\n assert os.path.exists(output_file_location)\n\n # Delete the file using the file-io implementations delete method\n file_io.delete(output_file_location)\n\n # Confirm that the file no longer exists\n assert not os.path.exists(output_file_location)",
"def write_prep_arr(self, arr, index=None):\n if index is None:\n prep_data_dir = os.path.join(self.experiment_dir, 'prep')\n else:\n prep_data_dir = os.path.join(self.experiment_dir, *('scan_' + str(index), 'prep'))\n data_file = os.path.join(prep_data_dir, 'prep_data.tif')\n if not os.path.exists(prep_data_dir):\n os.makedirs(prep_data_dir)\n arr = self.detector.clear_seam(arr, self.roi)\n ut.save_tif(arr, data_file)",
"def test_save_npy(temp_dir):\n data = np.array([[1, 2, 3], [4, 5, 6]])\n save_npy(temp_dir, data, step=1)\n\n assert os.path.exists(os.path.join(temp_dir, \"npy\", \"1.npy\"))",
"def test_to_file(self):\n with TemporaryDirectory() as tmp:\n df_test = make_simple_dataframe()\n Base = BaseDataClass.from_object(df_test)\n fp_save = os.path.join(tmp, \"test_save.csv\")\n Base.to_file(fp_save)\n assert os.path.exists(fp_save)",
"def ArraytoFile(_array):\n\tfile = open('sort1.txt', 'w')\n\tfor line in _array:\n\t\tfile.write(line+\"\\n\")\n\tfile.close()",
"def write_to_disk(self):\n\n\t\t# print \"--------------------------------------------------------WRITING PIECE %r TO DISK\" %self.index\n\t\ttry:\n\t\t\tos.makedirs(PATH)\n\t\texcept:\n\t\t\tpass\n\t\tself.piece_file_name = os.path.join(PATH, self.torrent.name+'.'+'00'+str(self.index))\n\t\t# print \"Saving piece to file name: \", self.piece_file_name\n\t\tpiece_file = open(self.piece_file_name, 'w')\n\t\tpiece_file.write(self.data)\n\t\tpiece_file.close()",
"def write_part_to_file(self, uid, part):\n filename = part.get_filename()\n filename = os.path.join(self.tmp_dir, os.path.basename(filename))\n try:\n open(filename, 'wb').write(part.get_payload(decode=True))\n except Exception as e:\n raise Exception(\n \"Error writing to filename %s with exception %s\" %\n (filename, str(e)))\n else:\n self.helper.log_debug(\n 'write_part_to_file: saved file %s from uid %s' %\n (filename, uid))\n return filename",
"def toFile(self,fid):\n stack = []\n for w,b in self.stack:\n w.copy_to_host()\n b.copy_to_host()\n stack.append([w.numpy_array,b.numpy_array])\n\tpickle.dump(stack,fid)",
"def write_to_outfile(involume, outvolume, data, outfiles_partition, outdir_path, O, file_manager, addition, tracker):\n lowcorner, upcorner = get_overlap_subarray(involume, outvolume) # find subarray crossing both files in the basis of the original image\n overlap_vol = get_overlap_volume(involume, outvolume)\n overlap_shape = overlap_vol.get_shape()\n if DONT_WRITE:\n tracker.add_volume(overlap_vol)\n\n nb_outfile_seeks_tmp = 0\n s = overlap_shape\n if s[2] != O[2]:\n nb_outfile_seeks_tmp += s[0]*s[1]\n elif s[1] != O[1]:\n nb_outfile_seeks_tmp += s[0]\n elif s[0] != O[0]:\n nb_outfile_seeks_tmp += 1\n else:\n pass\n\n if DONT_WRITE:\n print(f\"Overlap shape: {overlap_shape}\")\n print(f\"Outfile shape: {O}\")\n print(f\"Number seeks: {nb_outfile_seeks_tmp}\")\n return overlap_shape, 0, nb_outfile_seeks_tmp\n\n slices = [(lowcorner[0], upcorner[0]), (lowcorner[1], upcorner[1]), (lowcorner[2], upcorner[2])]\n offset_in = involume.get_corners()[0] # lower corner\n offset_out = outvolume.get_corners()[0]\n\n slices_in_infile = [ # convert corners in the basis of input file\n (lowcorner[0]-offset_in[0], upcorner[0]-offset_in[0]), \n (lowcorner[1]-offset_in[1], upcorner[1]-offset_in[1]), \n (lowcorner[2]-offset_in[2], upcorner[2]-offset_in[2])]\n \n slices_in_outfile = [ # convert corners in the basis of output file\n (lowcorner[0]-offset_out[0], upcorner[0]-offset_out[0]), \n (lowcorner[1]-offset_out[1], upcorner[1]-offset_out[1]), \n (lowcorner[2]-offset_out[2], upcorner[2]-offset_out[2])]\n\n if DEBUG_LOCAL:\n logger.debug(f\"[debug] extracting {s[0][0]}:{s[0][1]}, {s[1][0]}:{s[1][1]}, {s[2][0]}:{s[2][1]} from input file\")\n logger.debug(f\"[debug] inserting {s2[0][0]}:{s2[0][1]}, {s2[1][0]}:{s2[1][1]}, {s2[2][0]}:{s2[2][1]} into output file {out_filename}\")\n\n s = slices_in_infile\n subarr_data = data[s[0][0]:s[0][1],s[1][0]:s[1][1],s[2][0]:s[2][1]] # extract subarr from input file's data \n\n _3d_pos = numeric_to_3d_pos(outvolume.index, outfiles_partition, order='C')\n i, j, k = _3d_pos\n\n if addition:\n subarr_data = subarr_data + 1\n\n global outdirs_dict, outdir_index\n\n if (i, j, k) in outdirs_dict.keys():\n outdir_path = outdirs_dict[(i, j, k)]\n print(f\"Writing at: {outdir_path}\")\n else:\n outdir_path = '/disk' + str(outdir_index) + '/gtimothee/output'\n outdirs_dict[(i, j, k)] = outdir_path\n outdir_index += 1\n if outdir_index == 6:\n outdir_index = 0\n\n print(f\"Writing at: {outdir_path}\")\n print(f\"Increasing writing index: {outdir_index}\")\n\n t2 = time.time()\n if not DONT_WRITE:\n file_manager.write_data(i, j, k, outdir_path, subarr_data, slices_in_outfile, O)\n t2 = time.time() - t2\n \n if DEBUG_LOCAL: \n file_manager.test_write(outfile_path, slices_in_outfile, subarr_data)\n\n return overlap_shape, t2, nb_outfile_seeks_tmp",
"def save_data_to_disk(self):\n Omega_M = self.theta_fid[0]\n for key in self.data.keys():\n np.save(f'./preloaded_data/{Omega_M}_{self.delta_theta[0]}_{key}.npy', self.data[key])",
"def save(self):\n if self.hasChanged:\n filePath = self.path\n tempPath = filePath+'.tmp'\n fileDir = os.path.split(filePath)[0]\n if not os.path.exists(fileDir): os.makedirs(fileDir)\n cPickle.dump(self.data,open(tempPath,'w'))\n renameFile(tempPath,filePath,True)\n self.hasChanged = False",
"def temp_split(filename):\n filename, ext = filename.rsplit('.')\n data = np.load(filename + \".\" + ext)\n # define basic constants from parent\n A = data['a']\n A_SIZE = A.shape[0]\n A_SHAPE = A.shape\n ORIGINAL_SIZE = data['original_size']\n B = data['b']\n # basics\n ki, kj, m = np.sum(A, 1), np.sum(A, 0), np.sum(np.sum(A, 1))\n # eval & evec\n eval, evec = linalg.eigh(B)\n # split\n g1_order, g1_arrays, g2_order, g2_arrays = create_g(A, evec)\n g1, g2 = create_g_matrix(g1_order, g1_arrays), create_g_matrix(g2_order, g2_arrays)\n # threshold (q)\n q1 = create_q(A_SIZE, B, g1_order, m)\n q2 = create_q(A_SIZE, B, g2_order, m)\n # B of G\n b1 = create_b_of_g(B, g1_order)\n b2 = create_b_of_g(B, g2_order)\n # a_elems\n a1_elems = []\n a2_elems = []\n original_elems = data['a_elems']\n for i in g1_order:\n a1_elems.append(original_elems[i])\n for i in g2_order:\n a2_elems.append(original_elems[i])\n return Part(filename + ',1', ext, q1, g1.shape[0], ','.join([str(x) for x in a1_elems])), \\\n Part(filename + ',2', ext, q2, g2.shape[0], ','.join([str(x) for x in a2_elems]))",
"def update_flat_file(array, flat_file):\n\n logger.info('Updating filespace flat files') \n\n pg_system_fs_entries = GetFilespaceEntriesDict(GetFilespaceEntries(array, PG_SYSTEM_FILESPACE).run()).run() \n \n flat_file_location = os.path.join(pg_system_fs_entries[1][2], flat_file) \n \n if not os.path.exists(flat_file_location):\n return\n\n logger.debug('flat file location for transaction files = %s' % flat_file_location)\n #Copy over the updated flat file to the standby\n with open(flat_file_location) as read_file:\n lines_to_write = ''\n for line in read_file:\n tokens = line.split()\n if len(tokens) != 2:\n lines_to_write += line\n elif tokens[0] == '1':\n lines_to_write += line\n\n temp_flat_file = os.path.join(flat_file_location + '.tmp')\n \n try:\n with open(temp_flat_file, 'w') as write_file:\n write_file.write(lines_to_write)\n \n #Rewrite the master flat file to include the standby information \n shutil.move(temp_flat_file, flat_file_location)\n except Exception, e:\n raise Exception('Failed to update flat file')",
"def writePosFilesStep(self): \n \n writeSetOfCoordinates(self._getExtraPath(), self.inputCoordinatesTiltedPairs.get().getUntilted())\n \n writeSetOfCoordinates(self._getExtraPath(), self.inputCoordinatesTiltedPairs.get().getTilted())",
"def rotate_file(cls, main_dir, temp_dir):\n\t\tif(os.path.isfile(main_dir)):\n\t\t\tos.remove(main_dir)\n\t\tcopyfile(temp_dir, main_dir)\n\t\tos.remove(temp_dir)",
"def save_data(self, f): \n if not self.sampling:\n self.convert_to_array()\n np.save(f, self.reads)",
"def dump(self, step=None):\n if self._compressed:\n np.savez_compressed(self._file_path, **self._data)\n else:\n np.savez(self._file_path, **self._data)",
"def __del__(self):\n # subarray = getattr(self, '_subarray', None)\n subarray = self._subarray\n\n # If the subarray is unique it will have 2 references to\n # it plus 1 within this method, making 3. If it has more\n # than 3 references to it then it is not unique.\n if getrefcount is not None:\n self._decrement_file_counter()\n if subarray is None or getrefcount(subarray) > 3:\n return\n else:\n # getrefcount has itself been deleted or is in the process\n # of being torn down\n return\n\n _partition_file = getattr(subarray, \"_partition_file\", None)\n if _partition_file is not None:\n # This partition contains a temporary file which is not\n # referenced by any other partition on this process, so if\n # there are no lock files present remove the file from\n # disk.\n _remove_temporary_files(_partition_file)\n\n else:\n try:\n if FileArray is not None and isinstance(subarray, FileArray):\n try:\n filename = subarray.get_filename()\n except Exception:\n filename = None\n\n if self.file_counter.get(filename, 999) <= 0:\n # This partition contains a non-temporary file\n # which is not referenced by any other\n # partitions, so close the file.\n subarray.close()\n except Exception:\n # If we're here then it is likely that FileArray has been\n # torn down, so just do nothing.\n pass\n # --- End: if",
"def dump_trjqueue(self,replica):\n\t\n\t# write coords and enes to the workspace, by_temp and by_replica\n rep = replica.repnum\n\t\n\t### WORKSPACE FILES ###\n\tfor pt in range(0,len(self.trjqueue[rep])):\n\n\t self.repfiles_trj[rep].write(repr(self.trjqueue[rep][pt]))\n\t self.repfiles_trj[rep].write('\\n')\n\n\t### BY_TEMP and BY_REPLICA FILES ###\n realrep = replica.mc.tempfromrep\n\t\n\tself.byreplica_temp[rep].write(str(rep))\n\tself.byreplica_temp[rep].write('\\n')\n\n\tself.bytemp_replica[rep].write(str(realrep))\n\tself.bytemp_replica[rep].write('\\n')\n\n\tfor pt in range(0,len(self.trjqueue[realrep])):\n\t self.bytemp_trj[rep].write(repr(self.trjqueue[realrep][pt]))\n\t self.bytemp_trj[rep].write('\\n')\n\n\tfor pt in range(0,len(self.trjqueue[rep])):\n\t self.byreplica_trj[rep].write(repr(self.trjqueue[rep][pt]))\n\t self.byreplica_trj[rep].write('\\n')\n\n ### clear the trj and ene queues\n self.trjqueue[rep] = []",
"def take_snapshot(self):\r\n self.snapshot = self.name, self.size, copy.copy(self.cells)\r\n self.bucket_array.take_snapshot()",
"def save_array(array, filename):\n np.save(filename, array)"
] | [
"0.58671135",
"0.56434155",
"0.5580373",
"0.5417111",
"0.5385665",
"0.5372569",
"0.53123814",
"0.52139175",
"0.5193834",
"0.51137465",
"0.5101339",
"0.5099379",
"0.50423723",
"0.5027432",
"0.50095487",
"0.5004623",
"0.49991313",
"0.4995337",
"0.49318588",
"0.49247873",
"0.4896562",
"0.48937136",
"0.48899746",
"0.48796624",
"0.48620802",
"0.48261485",
"0.48148912",
"0.47867736",
"0.4782684",
"0.477437"
] | 0.6427523 | 0 |
Register a temporary file on this rank that has been created on another rank. | def _register_temporary_file(self):
_partition_file = self._subarray._partition_file
_partition_dir = self._subarray._partition_dir
if _partition_file not in _temporary_files:
fd, _lock_file = mkstemp(
prefix=_partition_file + "_", dir=_partition_dir
)
close(fd)
_temporary_files[_partition_file] = (
_partition_dir,
_lock_file,
set(),
)
else:
_, _lock_file, _ = _temporary_files[_partition_file]
return _lock_file | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def register_tmp_file(self, tmp_file: str):\n self.temp_files.add(pathlib.Path(tmp_file))",
"def set_temp_file(self):\n\n index = self.filename.rfind('/') + 1\n self.temp_filename = self.filename[:index] + \"tmp_\" + self.filename[index:]",
"def add_tempfile(self, filename, exists=True):\n tmp = os.path.abspath(filename)\n if exists and not os.path.exists(tmp):\n raise IOError(\"Temporary file does not exist: \" + tmp)\n self._tempfiles[-1].append(tmp)",
"def TemporaryFile(mode='w+b',bufsize=_1,suffix='',prefix='tmp',dir=None):\n\tpass",
"def _create_file(self, rel_path, text):\n # FIXME: There are better/more secure APIs for creating tmp file paths.\n file_path = self.filesystem.join(self._temp_dir, rel_path)\n self.filesystem.write_text_file(file_path, text)\n return file_path",
"def get_temp_file(self, delete: bool = False, close: bool = False):\n prefix = str(self._tmp_folder / f\"pysimt_{os.getpid()}\")\n t = tempfile.NamedTemporaryFile(\n mode='w', prefix=prefix, delete=delete)\n self.register_tmp_file(t.name)\n if close:\n t.close()\n return t",
"def test_write_file():\n with tempfile.NamedTemporaryFile(delete=False) as t:\n temp_fn = t.name\n try:\n z = XPIManager(temp_fn, mode='w')\n f, d = 'install.rdf', '注目のコレクション'.decode('utf-8')\n z.write(f, d)\n assert z.read(f) == d.encode('utf-8')\n finally:\n os.unlink(temp_fn)",
"def _tempfile(self):\n fd, path = tempfile.mkstemp(dir = os.path.join(self.root, \"temporary\"))\n try:\n return os.fdopen(fd, \"wb\"), path\n except:\n os.unlink(path)\n os.close(fd)\n raise",
"def _temp_file(self, val):\n fd, fn = tempfile.mkstemp()\n fp = os.fdopen(fd, \"wb\")\n if val:\n if not isinstance(val, bytes):\n fp.write(val.encode(\"utf-8\", \"surrogateescape\"))\n else:\n fp.write(val)\n fp.close()\n return fn",
"def _create_unique_file(self):\n with open(self.uniquefile, 'w') as f:\n f.write(self._uniquename)\n self._uniquefile_created = True\n self._extend_expiration_time()\n self._p(\"Unique file created: %s\" % self.uniquefile)",
"def test_create1(self):\n fname = TempfileManager.create_tempfile()\n OUTPUT = open(fname, 'w')\n OUTPUT.write('tempfile\\n')\n OUTPUT.close()\n self.assertEqual(len(list(glob.glob(tempdir + '*'))), 1)\n fname = os.path.basename(fname)\n self.assertTrue(fname.startswith('tmp'))",
"def test_add3(self):\n OUTPUT = open(tempdir + 'add3', 'w')\n OUTPUT.write('tempfile\\n')\n OUTPUT.close()\n TempfileManager.add_tempfile(tempdir + 'add3')",
"def mktmp(self, src, ext='.py'):\n fname = temp_pyfile(src, ext)\n if not hasattr(self, 'tmps'):\n self.tmps=[]\n self.tmps.append(fname)\n self.fname = fname",
"def _tmpfile(self,filename=None):\n\t\tif self._tmpdir is None:\n\t\t\tself._tmpdir = TemporaryDirectory(prefix=\"jitcxde_\")\n\t\t\n\t\tif filename is None:\n\t\t\treturn self._tmpdir.name\n\t\telse:\n\t\t\treturn path.join(self._tmpdir.name, filename)",
"def _tempfile(filename):\n return tempfile.NamedTemporaryFile(mode='w',\n dir=os.path.dirname(filename),\n prefix=os.path.basename(filename),\n suffix=os.fsencode('.tmp'),\n delete=False)",
"def _tmpfile(*args, **kwargs):\n with NamedTemporaryFile(prefix='test_parser', suffix='.tmp', delete=False) as tmp:\n fpath = tmp.name\n fh = open(fpath, *args, **kwargs)\n file_handles.append(fh)\n return fh",
"def new_temp_file(prefix, suffix):\n f = tempfile.NamedTemporaryFile(prefix=prefix, suffix=suffix, delete=False)\n f.close()\n return f.name",
"def new_file(self, *args, **kwargs):\n super().new_file(*args, **kwargs)\n self.file = TemporaryUploadedFile(self.file_name, self.content_type, 0, self.charset, self.content_type_extra)",
"def make_temp_file():\n global TEST_DATA_PATH\n TEST_DATA_PATH = tempfile.mkstemp()",
"def create_temporary_file():\n f = NamedTemporaryFile(delete=False)\n return f.name",
"def _temp_path(self, uri_like):\n handle, filename = tempfile.mkstemp(suffix=uri_like.split(\"/\")[-1])\n os.close(handle)\n return filename",
"def make_temp_file():\n with tempfile.NamedTemporaryFile() as f:\n return f.name",
"def save_tmp_file(self, data):\n with open(self.tmp_file, 'wb') as f:\n f.write(data)",
"def makeFilePointer(self, key, tmppath):\n fp = open(tmppath, 'w')\n self.getFile(key, fp)\n fp.close()",
"def test_unable_to_create_tmp_file(self, mocker):\n mocker.patch(\n 'tempfile.NamedTemporaryFile', side_effect=IOError('Fail')\n )\n\n payload = dict(id=\"stub_id\", data={\"some\": \"data\"})\n resp = self.client.post(self.url, json=payload)\n\n assert resp.status_code == 500\n assert resp.get_json() == {\n 'status': 'Error',\n 'type': 'OSError',\n 'message': 'Error during TAR.GZ creation: Fail'\n }",
"def save_to_tmp(form):\n file = request.files.get('file')\n suffix = os.path.splitext(secure_filename(file.filename))[-1]\n tf = tempfile.NamedTemporaryFile(dir='/tmp', delete=False, suffix=suffix, prefix='lpm_tmp_')\n filepath = tf.name\n tf.close()\n file.save(filepath)\n form.tmpname.data = os.path.basename(filepath)\n return filepath",
"def _create_temp_batch_file(self):\n return tempfile.NamedTemporaryFile(delete=False)",
"def get_temp_file(self, prefix=template, suffix=\"\"):\n ret = NamedTemporaryFile(delete=False, prefix=prefix, suffix=suffix)\n self._tempfiles.append(ret)\n if is_win():\n ret.close()\n return ret",
"def test_create2(self):\n fname = TempfileManager.create_tempfile(prefix='foo')\n OUTPUT = open(fname, 'w')\n OUTPUT.write('tempfile\\n')\n OUTPUT.close()\n self.assertEqual(len(list(glob.glob(tempdir + '*'))), 1)\n fname = os.path.basename(fname)\n self.assertTrue(fname.startswith('foo'))",
"def test_write(self):\n temp_file = tempfile.mkstemp()[1]\n try:\n with open(temp_file, \"w+\") as fh:\n self.new_manifest.write(fh)\n tools.eq_(self.new_manifest, load_manifest(temp_file))\n finally:\n os.unlink(temp_file)"
] | [
"0.7100669",
"0.6645065",
"0.6373811",
"0.60678446",
"0.59707797",
"0.5943573",
"0.5882913",
"0.5867154",
"0.5833076",
"0.57806534",
"0.5745454",
"0.5718152",
"0.5706169",
"0.5704879",
"0.56752044",
"0.56738895",
"0.56596625",
"0.56512666",
"0.56443375",
"0.56338364",
"0.56253743",
"0.56225497",
"0.5620132",
"0.5610537",
"0.5586419",
"0.5562855",
"0.55558413",
"0.55427134",
"0.5532904",
"0.5532109"
] | 0.66906744 | 1 |
Add the lock files listed in lock_files to the list of lock files managed by other ranks. | def _update_lock_files(self, lock_files):
_, _lock_file, _other_lock_files = _temporary_files[
self._subarray._partition_file
]
_other_lock_files.update(set(lock_files))
if _lock_file in _other_lock_files:
# If the lock file managed by this rank is in the list of
# lock files managed by other ranks, remove it from there
_other_lock_files.remove(_lock_file) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def LockFiles(self, entries):\n self._model.lock(entries)",
"def add_mock_files(self, file_list):\n self._mock_file_list.extend(file_list)",
"def thread_file_list(self):\n # Establish connection for this thread\n connection = self.connect()\n\n # Set working directory on server\n connection.chdir(self.settings.server_dir)\n\n while len(self.files) > 0:\n self.lock.acquire()\n file = self.files.pop()\n self.lock.release()\n\n # Pass popped file to function\n try:\n self.upload_file(connection, file)\n except EOFError as error:\n self._logger.log(logging.CRITICAL, \"Connection lost during \"\n \"file transfer\")\n self._logger.log(logging.CRITICAL, str(error))\n\n # Establish connection for this thread\n connection = self.connect()\n\n # Set working directory on server\n connection.chdir(self.settings.server_dir)\n\n # Lock and append filename to list to retry\n self.lock.acquire()\n self.files.append(file)\n self.lock.release()\n\n except FileNotFoundError as error:\n self._logger.log(logging.CRITICAL, \"File \" + file + \" not \"\n \"found\")\n self._logger.log(logging.CRITICAL, str(error))\n\n except IOError:\n self.lock.acquire()\n self.files.append(file)\n self.lock.release()\n\n connection.close()",
"def get_file_list(self):\n try:\n for filename in os.listdir(SHARED_DIR):\n self.file_list.append(filename)\n except Exception as e:\n print \"Error: retriving file list, %s\" % e",
"def _distribute_files(self, distribution='one'):\n for k, files in self.file_lists.items():\n self.idle[k] = False\n if distribution.lower() == 'single':\n self.distribution_comms[k] = None\n if self.comm.rank >= 1:\n self.local_file_lists[k] = None\n self.idle[k] = True\n else:\n self.local_file_lists[k] = files\n elif distribution.lower() == 'even':\n if len(files) <= self.comm.size:\n if self.comm.rank >= len(files):\n self.local_file_lists[k] = None\n self.distribution_comms[k] = None\n self.idle[k] = True\n else:\n self.local_file_lists[k] = [files[self.comm.rank],]\n self.distribution_comms[k] = self.comm.Create(self.comm.Get_group().Incl(np.arange(len(files))))\n else:\n files_per = int(np.floor(len(files) / self.comm.size))\n excess_files = int(len(files) % self.comm.size)\n if self.comm.rank >= excess_files:\n self.local_file_lists[k] = list(files[int(self.comm.rank*files_per+excess_files):int((self.comm.rank+1)*files_per+excess_files)])\n else:\n self.local_file_lists[k] = list(files[int(self.comm.rank*(files_per+1)):int((self.comm.rank+1)*(files_per+1))])\n self.distribution_comms[k] = self.comm",
"def removeLocks():\n global lockFnames\n for lockFname in lockFnames:\n if isfile(lockFname):\n logging.debug('Removing lockfile %s' % lockFname)\n os.remove(lockFname)\n\n lockFnames = []",
"def pipfile_lock_names(self):\n return ext_split(self.pipfile_locks, \"Pipfile.lock\")",
"def add_filelist_to_cache(self, file_list=None):\n if file_list is None:\n return False\n for fileinfo in file_list:\n fn_ = fileinfo.filename\n self.cache_file_list_dict[fn_] = fileinfo\n return True",
"def add(self, files, mask):\n pass",
"def _assignUIDs(self):\n for messagePath in self.maildir:\n\n messageFile = os.path.basename(messagePath)\n\n if not messageFile in self.metadata['uids']:\n\n self.metadata['uids'][messageFile] = self.metadata['uidnext']\n\n self.metadata['uidnext'] += 1\n\n self.saveMetadata()",
"def add_mock_files_after_load(self, file_list):\n self._mock_file_list_after.extend(file_list)",
"def add_files(self, files, commit_msg):\n paths = []\n for rpath in files:\n path = os.path.join(self.repodir, rpath)\n paths.append(path)\n with open(path, 'w') as f:\n f.write(files[rpath])\n if paths:\n self.git_cmd(['add'] + paths)\n self.commit(commit_msg)",
"def lock(self):\n logging.debug(\"Locking %s (and subdirectories)\" % self.directory)\n LOCK_ACL.append(target=self.directory)\n for subdirectory in self._subdirectories():\n LOCK_ACL.append(target=subdirectory)",
"def create_files(self):\n self._do_action_under_lock(self._create_files)",
"def _save_sync_list(self):\n\t\tfp = open(self.sync_file, 'w')\n\t\tself.sync_list.write(fp)\n\t\tfp.close()",
"def _find_locked_by(self):\n fstat_flags = NTR('otherLock | otherOpen0 & headType=*+l')\n any_locked_files = {} # depot_path : user\n for branch_chunk in self.ctx.iter_writable_branch_chunks():\n # Skip any newly defined branches: they're new, won't contain any\n # files yet, and won't get a view definition until later at per-\n # commit preflight time.\n bvl = [b for b in branch_chunk if b.view_lines]\n if not bvl:\n continue\n with self.ctx.switched_to_union(bvl):\n r = self.ctx.p4run('fstat', '-F', fstat_flags, '-m1',\n '//{}/...'.format(self.ctx.p4.client),\n log_warnings=logging.DEBUG)\n # Collect a dictionary of the locked files from the writable union of branch views\n for lf in r:\n user = lf['otherOpen'][0] if 'otherOpen' in lf else NTR('<unknown>')\n any_locked_files[lf['depotFile']] = user\n return any_locked_files",
"def add_list(self, files):\n if files:\n if not list:\n self.set_list(files)\n else:\n self.playlist.extend(files)",
"def cmd_sync(self, args, list_only=False):\n if not list_only:\n log.info('synchronizing repository files...')\n for curdir, dirs, files in os.walk(self.files_path):\n for f in files:\n ignore_file = False\n repo_path = os.path.join(curdir, f).replace(self.files_path, '')\n for ignored in self.ignored_files:\n if ignored.startswith('/'):\n f = os.path.join(repo_path, f)\n if fnmatch(f, ignored):\n log.debug('ignored file ({}): {}'.format(ignored, repo_path[1:]))\n ignore_file = True\n break\n if ignore_file:\n continue\n fpath = os.path.join(curdir, f)\n linkpath = fpath.replace(self.files_path, self.homedir)\n if not os.path.exists(linkpath) and not os.path.islink(linkpath):\n log.info('synced: {}'.format(linkpath))\n if not list_only:\n log.debug('creating link: {}'.format(linkpath))\n os.symlink(fpath, linkpath)\n else:\n if os.path.islink(linkpath):\n # target path already exists\n frealpath = os.path.realpath(linkpath)\n if frealpath != fpath:\n log.warning('conflict (wrong link): {} -> {}'.format(linkpath, frealpath))\n if not list_only:\n if not args.force:\n if not log.ask_yesno('overwrite existing link?', default='n'):\n continue\n log.debug('installing link in place of existing link: {}'.format(linkpath))\n os.unlink(linkpath)\n os.symlink(fpath, linkpath)\n else:\n log.info('OK: {}'.format(linkpath))\n else: # linkpath is a regular file\n log.warning('conflict (file already exists): {}'.format(linkpath))\n if not list_only:\n if not args.force:\n if not log.ask_yesno('overwrite existing file?', default='n'):\n continue\n log.debug('installing link in place of existing file: {}'.format(linkpath))\n os.unlink(linkpath)\n os.symlink(fpath, linkpath)\n log.info('done')",
"def loadFileList(self):\r\n try:\r\n data = open(self.filelist_file, 'rb')\r\n except IOError:\r\n '''print \"No SRTM cached file list. Creating new one!\"'''\r\n if self.offline == 0:\r\n self.createFileList()\r\n return\r\n try:\r\n self.filelist = pickle.load(data)\r\n except:\r\n '''print \"Unknown error loading cached SRTM file list. Creating new one!\"'''\r\n if self.offline == 0:\r\n self.createFileList()",
"def add_files(self, filenames):\n for filename in filenames:\n self.add_file(filename)",
"def half_sync(self,delay):\n self.count = 1\n while not self.shutdown and self.loggedin.autosync:\n time.sleep(delay)\n self.count += 1\n self.filelist = self.loggedin.list()\n print \"Pinged server for changes\"\n self.synced = []\n if self.filelist:\n for f in self.filelist:\n path = self.loggedin.sanitize_path(f['path'])\n path = os.path.join(self.onedirrectory, path)\n if not os.path.exists(path):\n os.makedirs(path)\n if f['name'] and not self.loggedin.exists(f):\n exists, data = self.loggedin.getfile(f)\n if exists:\n with open(self.loggedin.make_path(f), 'a') as new_file:\n new_file.write(data)\n new_file.close()\n elif f['name'] and str(self.loggedin.hash_file(f)) != str(f['hash']):\n self.loggedin.sendfile(f['name'], f['path'])\n if self.loggedin.make_path(f) not in self.synced:\n self.synced.append(self.loggedin.make_path(f))\n os_walk = os.walk(self.loggedin.onedirrectory)\n for directory in os_walk:\n for f in directory[2]:\n if f.startswith('.'):\n continue\n path = os.path.join(directory[0], f)\n if path not in self.synced:\n try:\n os.remove(path)\n except OSError, e:\n print (\"Error: %s - %s.\" % (e.filename,e.strerror))",
"def ingest(self, files):\n for file in files:\n self.files.add(file)",
"def updateFileList(self, fileList):\n\n if fileList == self.fileList:\n return 0\n\n self.mutex.acquire()\n # init = time.time()\n # \n # while(self.bussy):\n # sleep(0.1)\n # if time.time() - init > 2*self.period:\n # return 0\n \n self.fileList = fileList\n self.mutex.release()\n return 1",
"def update_list(self):\n\t\ttry:\n\t\t\tassert(not self.master.TransactionInProgress)\n\t\t\tself.master.Vacuum()\n\n\t\t\tself.fetch_repo_file(\"/torrent\", self.config[\"daemon\"][\"rootdir\"] + \"/torrent\", \"wb\")\n\t\t\tself.master.master = json.loads(self.fetch_repo_file(\"/package-index.json\", True).decode('utf-8'))\n\t\t\tself.torrent_info = lt.torrent_info(self.config[\"daemon\"][\"rootdir\"] + \"/torrent\")\n\n\t\t\t\"\"\" Find pre-downloaded files \"\"\"\n\t\t\tpre_downloaded = {}\n\t\t\ti = 0\n\t\t\tfor f in self.torrent_info.files():\n\t\t\t\tif self.valid_tpkg_file(f.path):\n\t\t\t\t\tpre_downloaded[i] = f\n\t\t\t\ti += 1\n\n\n\t\t\t\"\"\" Default torrent params \"\"\"\n\t\t\tparams = {\n\t\t\t\t\"save_path\": self.config[\"daemon\"][\"rootdir\"],\n\t\t\t\t\"ti\": self.torrent_info\n\t\t\t}\n\t\t\t\n\t\t\t\"\"\" Set torrent handler \"\"\"\n\t\t\tself.handler = self.ses.add_torrent(params)\n\n\t\t\t\"\"\" Set chunk priority to 0 (don't download) \"\"\"\n\t\t\tfor p in range(self.torrent_info.num_pieces()):\n\t\t\t\tself.handler.piece_priority(p, 0)\n\n\t\t\tfor i in self.torrent_info.files():\n\t\t\t\tif i in pre_downloaded:\n\t\t\t\t\tpr = self.torrent_info.map_file(i, 0, pre_downloaded[i].size)\n\t\t\t\t\tn_pieces = pr.length / self.torrent_info.piece_length() + 1\n\n\t\t\t\t\tfor p in range(self.torrent_info.num_pieces()):\n\t\t\t\t\t\tif p in range(pr.piece, pr.piece + n_pieces):\n\t\t\t\t\t\t\tself.handler.piece_priority(p, 7)\n\n\t\texcept Exception as e:\n\t\t\tsys.stderr.write(\"Failed to update package list: {0}\\n\".format(e))\n\t\t\ttraceback.print_exc()\n\t\t\tself.write_line(\"Error: XXX - Failed to update package list.\")",
"def _add_files(self, index_key, media_key,\n new_list, fundamentals):\n _index=fundamentals.get(index_key, {})\n _media=fundamentals.get(media_key, {})\n for _file in new_list:\n _data=self._item_from_index(_file, 'data', _media)\n if not _data:\n self.log('Failed to write file %s due to no data'%_file)\n continue\n if self._item_from_index(_file, None, _index) is None:\n _origin=self._item_from_index(_file, 'origin', _media)\n if _origin=='ringers':\n _path=self.protocolclass.RT_PATH\n elif _origin=='sounds':\n _path=self.protocolclass.SND_PATH\n elif _origin=='images':\n _path=self.protocolclass.PIC_PATH\n else:\n selg.log('File %s has unknown origin, skip!'%_file)\n continue\n _file_name=_path+'/'+_file\n try:\n self.writefile(_file_name, _data)\n except:\n self.log('Failed to write file '+_file_name)\n if __debug__:\n raise",
"def __add_files(self, snapshot):\n\n # Why partition()?\n # Don't delete a parent after adding its child:\n # M 100644 deba01f cookbooks/apt/README\n # D cookbooks/apt <== BUG, would also delete/omit README\n\n partitioned = p4gf_util.partition(lambda x:x.is_delete(), snapshot)\n for p4file in partitioned:\n path = self.__relative_path(p4file)\n if not path:\n continue\n if path == p4gf_const.P4GF_EMPTY_CHANGELIST_PLACEHOLDER:\n # Perforce-only artifact. Never copy this into Git.\n continue\n if p4file.is_delete():\n self.__append(\"D {0}\\n\".format(path))\n else:\n if p4file.sha1 == \"\":\n LOG.debug(\"skipping missing revision {}#{}\".format(path, p4file.revision))\n continue\n if p4file.is_x_type():\n mode = \"100755\"\n elif p4file.is_symlink():\n mode = \"120000\"\n else:\n mode = \"100644\"\n self.__append(\"M {0} {1} {2}\\n\".\n format(mode, p4file.sha1, path))",
"def addFiles(self, filePaths): \n \n for filePath in filePaths: \n self.addFile(filePath)",
"def add(self,filelist):\n\n self.ws.execute('svn add %s' % (' '.join(filelist)))",
"def add_files(self,count=None):\n message_buffer =[]\n if count is None:\n count = len(self.files)\n while count:\n count -= 1\n message_buffer.append((count,base64.b64encode(self.files.pop()),0)) # required to maintain compatibility with\n if len(message_buffer) > 9:\n self.queue.write_batch(message_buffer)\n message_buffer = []\n self.queue.write_batch(message_buffer)",
"def write_manifests( file_lists, target_dir, output_dir ):\n for i, lst in enumerate( file_lists ):\n with open( os.path.join( output_dir, \"manifest-{}.txt\".format( i ) ), \"w\" ) as fout:\n for r in lst:\n fout.write( insert_rsync_marker( r, target_dir ) + \"\\n\" )"
] | [
"0.5858453",
"0.5857905",
"0.5442941",
"0.5427439",
"0.5369485",
"0.534254",
"0.5242692",
"0.5233011",
"0.52133423",
"0.51889026",
"0.51700175",
"0.51625514",
"0.5118121",
"0.5110031",
"0.50916535",
"0.5077471",
"0.5056525",
"0.5047517",
"0.50415224",
"0.50407803",
"0.50310415",
"0.50032467",
"0.49873388",
"0.49867135",
"0.49853197",
"0.49367034",
"0.49278733",
"0.49048927",
"0.48945892",
"0.48774955"
] | 0.785469 | 0 |
r""" Samples a 2d function f over specified intervals and returns two arrays (X, Y) suitable for plotting with matlab (matplotlib) syntax. See examples\mplot2d.py. f is a function of one variable, such as x2. x_args is an interval given in the form (var, min, max, n) | def sample2d(f, x_args):
try:
f = sympify(f)
except SympifyError:
raise ValueError("f could not be interpreted as a SymPy function")
try:
x, x_min, x_max, x_n = x_args
except (TypeError, IndexError):
raise ValueError("x_args must be a tuple of the form (var, min, max, n)")
x_l = float(x_max - x_min)
x_d = x_l/float(x_n)
X = np.arange(float(x_min), float(x_max) + x_d, x_d)
Y = np.empty(len(X))
for i in range(len(X)):
try:
Y[i] = float(f.subs(x, X[i]))
except TypeError:
Y[i] = None
return X, Y | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sample3d(f, x_args, y_args):\n x, x_min, x_max, x_n = None, None, None, None\n y, y_min, y_max, y_n = None, None, None, None\n try:\n f = sympify(f)\n except SympifyError:\n raise ValueError(\"f could not be interpreted as a SymPy function\")\n try:\n x, x_min, x_max, x_n = x_args\n y, y_min, y_max, y_n = y_args\n except (TypeError, IndexError):\n raise ValueError(\"x_args and y_args must be tuples of the form (var, min, max, intervals)\")\n\n x_l = float(x_max - x_min)\n x_d = x_l/float(x_n)\n x_a = np.arange(float(x_min), float(x_max) + x_d, x_d)\n\n y_l = float(y_max - y_min)\n y_d = y_l/float(y_n)\n y_a = np.arange(float(y_min), float(y_max) + y_d, y_d)\n\n def meshgrid(x, y):\n \"\"\"\n Taken from matplotlib.mlab.meshgrid.\n \"\"\"\n x = np.array(x)\n y = np.array(y)\n numRows, numCols = len(y), len(x)\n x.shape = 1, numCols\n X = np.repeat(x, numRows, 0)\n\n y.shape = numRows, 1\n Y = np.repeat(y, numCols, 1)\n return X, Y\n\n X, Y = np.meshgrid(x_a, y_a)\n\n Z = np.ndarray((len(X), len(X[0])))\n for j in range(len(X)):\n for k in range(len(X[0])):\n try:\n Z[j][k] = float(f.subs(x, X[j][k]).subs(y, Y[j][k]))\n except (TypeError, NotImplementedError):\n Z[j][k] = 0\n return X, Y, Z",
"def fcontourf(f, x1range, x2range, yrange, **kwargs):\n x1s = np.linspace(x1range[0], x1range[1])\n x2s = np.linspace(x2range[0], x2range[1])\n ys = np.linspace(yrange[0], yrange[1], 20)\n fs = [[f(np.array([x1,x2])) for x1 in x1s] for x2 in x2s]\n plt.contourf(x1s, x2s, fs, ys, **kwargs)\n plt.axis('scaled')",
"def show_trace_2d(f, results):\n plt.close()\n # draw input points\n plt.plot(*zip(*results), '-o', color='#ff7f0e')\n # get the field of figure\n x1, x2 = np.meshgrid(np.arange(-5.5, 1.0, 0.1), np.arange(-3.0, 1.0, 0.1))\n # draw the contour of function using x1,x2 as step\n plt.contour(x1, x2, f(x1, x2), colors='#1f77b4')\n plt.xlabel('x1')\n plt.ylabel('x2')\n plt.show()",
"def show_trace_2d(f, results): #@save\n set_figsize()\n plt.plot(*zip(*results), '-o', color='#ff7f0e')\n x1, x2 = torch.meshgrid(torch.arange(-5.5, 1.0, 0.1),torch.arange(-3.0, 1.0, 0.1))\n plt.contour(x1, x2, f(x1, x2), colors='#1f77b4')\n plt.xlabel('x1')",
"def test_exercise_1():\n a, b = 5, 0\n fvals = []\n grid = np.linspace(-3, 4)\n for value in grid:\n fvals.append(get_test_function(value, a, b))\n plt.plot(grid, fvals)",
"def show_trace_2d(f, results): #@save\n d2l.set_figsize()\n d2l.plt.plot(*zip(*results), '-o', color='#ff7f0e')\n x1, x2 = d2l.meshgrid(d2l.arange(-5.5, 1.0, 0.1),\n d2l.arange(-3.0, 1.0, 0.1))\n d2l.plt.contour(x1, x2, f(x1, x2), colors='#1f77b4')\n d2l.plt.xlabel('x1')\n d2l.plt.ylabel('x2')",
"def display(f, x_min, x_max, delta=0.001):\n x = list(drange(x_min, x_max,delta))\n y = [f(i) for i in x]\n plt.title(f.__name__)\n plt.grid(True)\n plt.xlabel('X')\n plt.ylabel('Y= '+f.__name__ + '(X)')\n plt.plot(x,y, 'r')\n plt.show()",
"def sampleFunction2(x2: int, y2: float) -> float:\n return x2 * y2",
"def create_data(f, x_vals):\n y_vals = []\n for i in x_vals:\n y_vals.append(f(x_vals[i]))\n return np.array(y_vals)",
"def sample(f, *var_args):\n if len(var_args) == 1:\n return sample2d(f, var_args[0])\n elif len(var_args) == 2:\n return sample3d(f, var_args[0], var_args[1])\n else:\n raise ValueError(\"Only 2d and 3d sampling are supported at this time.\")",
"def simps_2d_py(fxy, x, y):\n\n I_simps = 0.0\n\n N = x.size\n\n h_x = (x.max() - x.min())/(N - 1)\n h_y = (y.max() - y.min())/(N - 1)\n\n S = S_simps(N)\n\n for i in range(x.size):\n\n for j in range(y.size):\n\n I_simps += 1.0/9 * h_x * h_y * S[i, j] * fxy[i, j]\n\n return I_simps",
"def sp_integrate_2D ( func ,\n xmin , xmax ,\n ymin , ymax , *args , **kwargs ) :\n from scipy import integrate\n ##\n result = integrate.dblquad ( func ,\n ymin ,\n ymax ,\n lambda x : xmin ,\n lambda x : xmax , \n *args , **kwargs )\n return result[0]",
"def get_extremum(f,\n dx=0.05, dy=0.05,\n ylims=[-2, 2], xlims=[-2, 2]):\n # generate 2 2d grids for the x & y bounds\n ymin, ymax = ylims\n xmin, xmax = xlims\n y, x = np.mgrid[slice(ymin, ymax + dy, dy),\n slice(xmin, xmax + dx, dx)]\n\n # create f(x, y)\n z = np.zeros(x.shape)\n for i in range(x.shape[0]):\n for j in range(y.shape[0]):\n xy = np.array([x[i, j], y[i, j]]).reshape(1, -1)\n z[i, j] = f(xy)\n\n return np.max(z), np.min(z)",
"def makeCrossPlotX(f,g):\n x = zerofloat(n1,n2)\n y = zerofloat(n1,n2)\n class Loop(Parallel.LoopInt):\n def compute(self,i2):\n for i1 in range(1,n1-1):\n x[i2][i1] = 0.5*(f[i2][i1+1]-f[i2][i1-1])\n y[i2][i1] = g[i2][i1]-f[i2][i1]\n Parallel.loop(n2,Loop())\n return x,y",
"def sample(self,f,N,p=100):\n return [f(x) for x in np.linspace(0,N,p)]",
"def evaluate_1darray_function_on_2d_array(function, samples, opts=None):\n num_args = get_num_args(function)\n assert samples.ndim == 2\n num_samples = samples.shape[1]\n if num_args == 2:\n values_0 = function(samples[:, 0], opts)\n else:\n values_0 = function(samples[:, 0])\n values_0 = np.atleast_1d(values_0)\n assert values_0.ndim == 1\n num_qoi = values_0.shape[0]\n values = np.empty((num_samples, num_qoi), float)\n values[0, :] = values_0\n for i in range(1, num_samples):\n if num_args == 2:\n values[i, :] = function(samples[:, i], opts)\n else:\n values[i, :] = function(samples[:, i])\n\n return values",
"def draw2 ( self ,\n dataset = None ,\n nbins = 100 ,\n silent = True ,\n in_range = None ,\n args = () , **kwargs ) :\n if in_range and isinstance ( in_range , tuple ) and 2 == len ( in_range ) :\n range_name = 'aux2_rng1_%s' % self.name \n with rooSilent ( 3 ) : \n self.xvar.setRange ( range_name , in_range[0] , in_range[1] )\n if dataset:\n dataset.get_var(self.xvar.GetName()).setRange ( range_name , in_range[0] , in_range[1] )\n\n in_range = range_name\n\n return self.draw ( drawvar = self.yvar ,\n dataset = dataset ,\n nbins = nbins ,\n ybins = 20 , ## fake\n silent = silent ,\n in_range = in_range ,\n args = args , **kwargs )",
"def interpolate_2d(x, y, z):\n X = np.linspace(min(x), max(x))\n Y = np.linspace(min(y), max(y))\n X, Y = np.meshgrid(X, Y)\n #f = interpolate.interp2d(x, y, z)\n #Z = f(X[0, :], Y[:, 0])\n f = interpolate.LinearNDInterpolator(zip(x, y), z)\n Z = f(X, Y)\n return X, Y, Z",
"def function2D(self, t):\n if t.ndim == 1:\n nX = int(self.getAttributeValue('nX'))\n nY = int(self.getAttributeValue('nY'))\n pos = t.reshape(nX, nY, 2)\n elif t.ndim == 3:\n pos = t\n X = pos[...,0]\n Y = pos[...,1]\n A = self.getParamValue(0)\n muX = self.getParamValue(1)\n muY = self.getParamValue(2)\n sigX = self.getParamValue(3)\n sigY = self.getParamValue(4)\n sigP = self.getParamValue(5)\n bg = self.getParamValue(6)\n\n sigXY = sigX*sigY*sigP\n Z = A*bivariate_normal(X,Y, sigmax=sigX, sigmay=sigY,\n mux=muX,muy=muY,sigmaxy=sigXY)\n Z += bg\n return Z",
"def plot(\n self,\n function: Callable[[float], float],\n x_range: Sequence[float] | None = None,\n use_vectorized: bool = False,\n **kwargs,\n ):\n\n t_range = np.array(self.x_range, dtype=float)\n if x_range is not None:\n t_range[: len(x_range)] = x_range\n\n if x_range is None or len(x_range) < 3:\n # if t_range has a defined step size, increase the number of sample points per tick\n t_range[2] /= self.num_sampled_graph_points_per_tick\n # For axes, the third coordinate of x_range indicates\n # tick frequency. But for functions, it indicates a\n # sample frequency\n\n graph = ParametricFunction(\n lambda t: self.coords_to_point(t, function(t)),\n t_range=t_range,\n scaling=self.x_axis.scaling,\n use_vectorized=use_vectorized,\n **kwargs,\n )\n graph.underlying_function = function\n return graph",
"def function_to_XYs(func, fpars,\n Egrid=equal_bins(100),\n domainUnit='eV', domainName='energy_in', rangeUnit='b', rangeName='crossSection',\n accuracy=upperEps):\n return XYs1dModule.XYs1d.createFromFunction(\n XYs1d.defaultAxes(labelsUnits={\n XYs1dModule.yAxisIndex: (rangeName, rangeUnit),\n XYs1dModule.xAxisIndex: (domainName, domainUnit)}),\n Xs=Egrid,\n func=func,\n parameters=fpars,\n accuracy=accuracy,\n biSectionMax=20,\n checkForRoots=False,\n infill=1,\n safeDivide=1)",
"def eval_2d_mesh(xmin, ymin, xmax, ymax, nx, ny, eval_fun):\n if xmin > xmax:\n raise ValueError(\"xmin (%.2f) was greater than\"\n \"xmax (%.2f)\" % (xmin, xmax))\n if ymin > ymax:\n raise ValueError(\"ymin (%.2f) was greater than\"\n \"ymax (%.2f)\" % (xmin, xmax))\n if nx < 1 or ny < 1:\n raise ValueError(\"nx (%.2f) or ny (%.2f) was less than 1\" % (nx, ny))\n X = np.linspace(xmin, xmax, nx)\n lenx = len(X)\n Y = np.linspace(ymin, ymax, ny)\n leny = len(Y)\n X, Y = np.meshgrid(X, Y)\n Z = np.zeros((leny, lenx))\n for i in range(leny):\n for j in range(lenx):\n Z[i][j] = eval_fun(np.array([X[i][j], Y[i][j]]))\n return (X, Y, Z)",
"def fillax(x, y, *args, **kw):\n xx = np.concatenate((x, np.array([x[-1], x[0]], x.dtype)))\n yy = np.concatenate((y, np.zeros(2, y.dtype)))\n return pylab.fill(xx, yy, *args, **kw)",
"def draw_f():\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n x_matrix = np.arange(-10, 11, 0.1)\n y_matrix = np.arange(-10, 11, 0.1)\n x_matrix, y_matrix = np.meshgrid(x_matrix, y_matrix)\n # print(x_matrix)\n u_matrix = x_matrix.copy()\n for i in range(x_matrix.shape[0]):\n for j in range(x_matrix.shape[0]):\n u_matrix[i][j] = f(x_matrix[i][j], y_matrix[i][j])\n surf = ax.plot_surface(x_matrix, y_matrix, u_matrix)\n\n plt.show()\n return surf",
"def make_plot(x,y):",
"def plot_response_surface(f, p, dims=[0,1]):\n import pylab\n if len(dims) == 1:\n xi = dims[0]\n x = pylab.linspace(-10,10,40) - p[xi]\n def value(v):\n p[xi] = v\n return f(p)\n z = [value(v) for v in x]\n pylab.plot(x,z)\n else:\n xi,yi = dims\n x = pylab.linspace(-10,10,40) - p[xi]\n y = pylab.linspace(-10,10,40) - p[yi]\n def value(pt):\n p[xi] = pt[0]\n p[yi] = pt[1]\n return f(p)\n z = np.array([[value((v,w)) for v in x] for w in y])\n pylab.pcolor(x,y,z)",
"def interp_2d(_x, _y, _x_min, _x_step, _nx, _y_min, _y_step, _ny, _ar_f, _ord=3, _ix_per=1, _ix_ofst=0):\r\n if(_ord == 1): #bi-linear interpolation based on 4 points\r\n ix0 = int(trunc((_x - _x_min)/_x_step + 1.e-09))\r\n if(ix0 < 0):\r\n ix0 = 0\r\n elif(ix0 >= _nx - 1):\r\n ix0 = _nx - 2\r\n ix1 = ix0 + 1\r\n tx = (_x - (_x_min + _x_step*ix0))/_x_step\r\n \r\n iy0 = int(trunc((_y - _y_min)/_y_step + 1.e-09))\r\n if(iy0 < 0):\r\n iy0 = 0\r\n elif(iy0 >= _ny - 1):\r\n iy0 = _ny - 2\r\n iy1 = iy0 + 1\r\n ty = (_y - (_y_min + _y_step*iy0))/_y_step\r\n\r\n nx_ix_per = _nx*_ix_per\r\n iy0_nx_ix_per = iy0*nx_ix_per\r\n iy1_nx_ix_per = iy1*nx_ix_per\r\n ix0_ix_per_p_ix_ofst = ix0*_ix_per + _ix_ofst\r\n ix1_ix_per_p_ix_ofst = ix1*_ix_per + _ix_ofst\r\n a00 = _ar_f[iy0_nx_ix_per + ix0_ix_per_p_ix_ofst]\r\n f10 = _ar_f[iy0_nx_ix_per + ix1_ix_per_p_ix_ofst]\r\n f01 = _ar_f[iy1_nx_ix_per + ix0_ix_per_p_ix_ofst]\r\n f11 = _ar_f[iy1_nx_ix_per + ix1_ix_per_p_ix_ofst]\r\n a10 = f10 - a00\r\n a01 = f01 - a00\r\n a11 = a00 - f01 - f10 + f11\r\n return a00 + tx*(a10 + ty*a11) + ty*a01\r\n\r\n elif(_ord == 2): #bi-quadratic interpolation based on 6 points\r\n ix0 = int(round((_x - _x_min)/_x_step))\r\n if(ix0 < 1):\r\n ix0 = 1\r\n elif(ix0 >= _nx - 1):\r\n ix0 = _nx - 2\r\n ixm1 = ix0 - 1\r\n ix1 = ix0 + 1\r\n tx = (_x - (_x_min + _x_step*ix0))/_x_step\r\n\r\n iy0 = int(round((_y - _y_min)/_y_step))\r\n if(iy0 < 1):\r\n iy0 = 1\r\n elif(iy0 >= _ny - 1):\r\n iy0 = _ny - 2\r\n iym1 = iy0 - 1\r\n iy1 = iy0 + 1\r\n ty = (_y - (_y_min + _y_step*iy0))/_y_step\r\n\r\n nx_ix_per = _nx*_ix_per\r\n iym1_nx_ix_per = iym1*nx_ix_per\r\n iy0_nx_ix_per = iy0*nx_ix_per\r\n iy1_nx_ix_per = iy1*nx_ix_per\r\n ixm1_ix_per_p_ix_ofst = ixm1*_ix_per + _ix_ofst\r\n ix0_ix_per_p_ix_ofst = ix0*_ix_per + _ix_ofst\r\n ix1_ix_per_p_ix_ofst = ix1*_ix_per + _ix_ofst\r\n fm10 = _ar_f[iy0_nx_ix_per + ixm1_ix_per_p_ix_ofst]\r\n a00 = _ar_f[iy0_nx_ix_per + ix0_ix_per_p_ix_ofst]\r\n f10 = _ar_f[iy0_nx_ix_per + ix1_ix_per_p_ix_ofst]\r\n f0m1 = _ar_f[iym1_nx_ix_per + ix0_ix_per_p_ix_ofst]\r\n f01 = _ar_f[iy1_nx_ix_per + ix0_ix_per_p_ix_ofst]\r\n f11 = _ar_f[iy1_nx_ix_per + ix1_ix_per_p_ix_ofst]\r\n a10 = 0.5*(f10 - fm10)\r\n a01 = 0.5*(f01 - f0m1)\r\n a11 = a00 - f01 - f10 + f11\r\n a20 = 0.5*(f10 + fm10) - a00\r\n a02 = 0.5*(f01 + f0m1) - a00\r\n return a00 + tx*(a10 + tx*a20 + ty*a11) + ty*(a01 + ty*a02)\r\n \r\n elif(_ord == 3): #bi-cubic interpolation based on 12 points\r\n ix0 = int(trunc((_x - _x_min)/_x_step + 1.e-09))\r\n if(ix0 < 1):\r\n ix0 = 1\r\n elif(ix0 >= _nx - 2):\r\n ix0 = _nx - 3\r\n ixm1 = ix0 - 1\r\n ix1 = ix0 + 1\r\n ix2 = ix0 + 2\r\n tx = (_x - (_x_min + _x_step*ix0))/_x_step\r\n\r\n iy0 = int(trunc((_y - _y_min)/_y_step + 1.e-09))\r\n if(iy0 < 1):\r\n iy0 = 1\r\n elif(iy0 >= _ny - 2):\r\n iy0 = _ny - 3\r\n iym1 = iy0 - 1\r\n iy1 = iy0 + 1\r\n iy2 = iy0 + 2\r\n ty = (_y - (_y_min + _y_step*iy0))/_y_step\r\n\r\n nx_ix_per = _nx*_ix_per\r\n iym1_nx_ix_per = iym1*nx_ix_per\r\n iy0_nx_ix_per = iy0*nx_ix_per\r\n iy1_nx_ix_per = iy1*nx_ix_per\r\n iy2_nx_ix_per = iy2*nx_ix_per\r\n ixm1_ix_per_p_ix_ofst = ixm1*_ix_per + _ix_ofst\r\n ix0_ix_per_p_ix_ofst = ix0*_ix_per + _ix_ofst\r\n ix1_ix_per_p_ix_ofst = ix1*_ix_per + _ix_ofst\r\n ix2_ix_per_p_ix_ofst = ix2*_ix_per + _ix_ofst\r\n f0m1 = _ar_f[iym1_nx_ix_per + ix0_ix_per_p_ix_ofst]\r\n f1m1 = _ar_f[iym1_nx_ix_per + ix1_ix_per_p_ix_ofst]\r\n fm10 = _ar_f[iy0_nx_ix_per + ixm1_ix_per_p_ix_ofst]\r\n a00 = _ar_f[iy0_nx_ix_per + ix0_ix_per_p_ix_ofst]\r\n f10 = _ar_f[iy0_nx_ix_per + ix1_ix_per_p_ix_ofst]\r\n f20 = _ar_f[iy0_nx_ix_per + ix2_ix_per_p_ix_ofst]\r\n fm11 = _ar_f[iy1_nx_ix_per + ixm1_ix_per_p_ix_ofst]\r\n f01 = _ar_f[iy1_nx_ix_per + ix0_ix_per_p_ix_ofst]\r\n f11 = _ar_f[iy1_nx_ix_per + ix1_ix_per_p_ix_ofst]\r\n f21 = _ar_f[iy1_nx_ix_per + ix2_ix_per_p_ix_ofst]\r\n f02 = _ar_f[iy2_nx_ix_per + ix0_ix_per_p_ix_ofst]\r\n f12 = _ar_f[iy2_nx_ix_per + ix1_ix_per_p_ix_ofst]\r\n a10 = -0.5*a00 + f10 - f20/6 - fm10/3\r\n a01 = -0.5*a00 + f01 - f02/6 - f0m1/3\r\n a11 = -0.5*(f01 + f10) + (f02 - f12 + f20 - f21)/6 + (f0m1 - f1m1 + fm10 - fm11)/3 + f11\r\n a20 = -a00 + 0.5*(f10 + fm10)\r\n a02 = -a00 + 0.5*(f01 + f0m1)\r\n a21 = a00 - f01 + 0.5*(f11 - f10 - fm10 + fm11)\r\n a12 = a00 - f10 + 0.5*(f11 - f01 - f0m1 + f1m1)\r\n a30 = 0.5*(a00 - f10) + (f20 - fm10)/6\r\n a03 = 0.5*(a00 - f01) + (f02 - f0m1)/6\r\n a31 = 0.5*(f01 + f10 - f11 - a00) + (f21 + fm10 - f20 - fm11)/6\r\n a13 = 0.5*(f10 - f11 - a00 + f01) + (f0m1 + f12 - f02 - f1m1)/6\r\n return a00 + tx*(a10 + tx*(a20 + tx*(a30 + ty*a31) + ty*a21) + ty*a11) + ty*(a01 + ty*(a02 + ty*(a03 + tx*a13) + tx*a12))\r\n return 0",
"def apply_se_corr2D(x, y, lx, f):\n if( x.shape != y.shape or x.shape != f.shape):\n print (x.shape)\n print (y.shape)\n print (f.shape)\n raise ValueError(\"Incompatible shape of coordinates arrays\")\n #\n nx = x.shape[0]\n ny = x.shape[1]\n #\n g = np.zeros_like(f)\n two_lx2 = 2.0*lx*lx\n #\n for i in range(nx):\n for j in range(ny):\n coef_sum = 0\n for k1 in range(nx):\n for k2 in range(ny):\n dx = x[k1,k2] - x[i,j]\n dy = y[k1,k2] - y[i,j]\n coef = np.exp( -(dx*dx+dy*dy)/two_lx2 )\n g[i,j] = g[i,j] + coef*f[k1,k2]\n coef_sum = coef_sum + coef\n pass # k2\n pass #k1\n # Normalization if needed\n g[i,j] = g[i,j]/coef_sum\n pass # j\n pass # i\n return g",
"def Generate_Custom(f, n, m):\n return np.fromfunction(np.vectorize(f, otypes=[float]), (n,m))",
"def function_to_surface (x, y, func, hist=False):\n dx = x[1] - x[0]\n dy = y[1] - y[0]\n xbins = np.r_[x - dx/2., x[-1] + dx/2.]\n ybins = np.r_[y - dy/2., y[-1] + dy/2.]\n values = np.vectorize (func) (*np.meshgrid (x, y)).T\n if hist:\n return Hist2D (xbins, ybins, values)\n else:\n return Surface2D (xbins, ybins, values)"
] | [
"0.6939351",
"0.6302334",
"0.6178659",
"0.58382726",
"0.57627946",
"0.5739403",
"0.56020606",
"0.5559102",
"0.5487004",
"0.5467543",
"0.5426483",
"0.5417944",
"0.53844726",
"0.53342545",
"0.53172773",
"0.53114015",
"0.5287415",
"0.5252938",
"0.5250112",
"0.52382034",
"0.5224161",
"0.5208987",
"0.5190809",
"0.51832134",
"0.51677656",
"0.51570743",
"0.515311",
"0.5142582",
"0.513174",
"0.51237595"
] | 0.7971121 | 0 |
r""" Samples a 3d function f over specified intervals and returns three 2d arrays (X, Y, Z) suitable for plotting with matlab (matplotlib) syntax. See examples\mplot3d.py. f is a function of two variables, such as x2 + y2. x_args and y_args are intervals given in the form (var, min, max, n) | def sample3d(f, x_args, y_args):
x, x_min, x_max, x_n = None, None, None, None
y, y_min, y_max, y_n = None, None, None, None
try:
f = sympify(f)
except SympifyError:
raise ValueError("f could not be interpreted as a SymPy function")
try:
x, x_min, x_max, x_n = x_args
y, y_min, y_max, y_n = y_args
except (TypeError, IndexError):
raise ValueError("x_args and y_args must be tuples of the form (var, min, max, intervals)")
x_l = float(x_max - x_min)
x_d = x_l/float(x_n)
x_a = np.arange(float(x_min), float(x_max) + x_d, x_d)
y_l = float(y_max - y_min)
y_d = y_l/float(y_n)
y_a = np.arange(float(y_min), float(y_max) + y_d, y_d)
def meshgrid(x, y):
"""
Taken from matplotlib.mlab.meshgrid.
"""
x = np.array(x)
y = np.array(y)
numRows, numCols = len(y), len(x)
x.shape = 1, numCols
X = np.repeat(x, numRows, 0)
y.shape = numRows, 1
Y = np.repeat(y, numCols, 1)
return X, Y
X, Y = np.meshgrid(x_a, y_a)
Z = np.ndarray((len(X), len(X[0])))
for j in range(len(X)):
for k in range(len(X[0])):
try:
Z[j][k] = float(f.subs(x, X[j][k]).subs(y, Y[j][k]))
except (TypeError, NotImplementedError):
Z[j][k] = 0
return X, Y, Z | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def draw_f():\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n x_matrix = np.arange(-10, 11, 0.1)\n y_matrix = np.arange(-10, 11, 0.1)\n x_matrix, y_matrix = np.meshgrid(x_matrix, y_matrix)\n # print(x_matrix)\n u_matrix = x_matrix.copy()\n for i in range(x_matrix.shape[0]):\n for j in range(x_matrix.shape[0]):\n u_matrix[i][j] = f(x_matrix[i][j], y_matrix[i][j])\n surf = ax.plot_surface(x_matrix, y_matrix, u_matrix)\n\n plt.show()\n return surf",
"def sample2d(f, x_args):\n try:\n f = sympify(f)\n except SympifyError:\n raise ValueError(\"f could not be interpreted as a SymPy function\")\n try:\n x, x_min, x_max, x_n = x_args\n except (TypeError, IndexError):\n raise ValueError(\"x_args must be a tuple of the form (var, min, max, n)\")\n\n x_l = float(x_max - x_min)\n x_d = x_l/float(x_n)\n X = np.arange(float(x_min), float(x_max) + x_d, x_d)\n\n Y = np.empty(len(X))\n for i in range(len(X)):\n try:\n Y[i] = float(f.subs(x, X[i]))\n except TypeError:\n Y[i] = None\n return X, Y",
"def newplot3(*args, **kwargs):\n\n if 'linewidth' and 'lw' not in kwargs.keys():\n kwargs['linewidth'] = 2\n\n fig = plt.figure(figsize=FIGURE_SIZE, dpi=FIGURE_DPI)\n ax = fig.add_subplot(111, projection='3d')\n\n x = np.asarray(args[0], dtype=float)\n y = np.asarray(args[1], dtype=float)\n z = np.asarray(args[2], dtype=float)\n\n if z.ndim == 2:\n if x.ndim < 2:\n x = np.tile(x, z.shape[1]).reshape(z.T.shape).T\n if y.ndim < 2:\n y = np.tile(y, z.shape[0]).reshape(z.shape)\n\n # Plot each array independently\n for n in range(len(z)):\n ax.plot(x[n], y[n], z[n], *args[3:], **kwargs)\n else:\n ax.plot(*args, **kwargs)",
"def plot3d(data):\n assert span1 == span2\n span = span1\n # ---------------------- create the figure and axes ---------------------- #\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n\n # -- discretize the definition space and compute the function's images --- #\n X, Y = discretise_space([defspace1, defspace2], n=span)\n Z = data\n\n # ----------------------- appearance and plotting ------------------------ #\n ax.set_zlim(np.min(Z) - 0.5, np.max(Z) + 0.5)\n ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))\n ax.set(xlabel='$W\\_C$', ylabel='$W\\_W$', zlabel=\"Utilité\")#,\n # title='Utilité à {} ticks en fonction de W_W et W_C'.format(ticks))\n\n # Plot the surface.\n surf = ax.plot_surface(X, Y, Z, alpha=0.8, #, cmap='binary'\n linewidth=0, antialiased=False, zorder=1)\n\n plt.show()",
"def sample(f, *var_args):\n if len(var_args) == 1:\n return sample2d(f, var_args[0])\n elif len(var_args) == 2:\n return sample3d(f, var_args[0], var_args[1])\n else:\n raise ValueError(\"Only 2d and 3d sampling are supported at this time.\")",
"def plot_3d(results_list): \n x_range = range(len(results_list[0]))\n fig = plt.figure()\n axe = Axes3D(fig)\n\n for idx, result in enumerate(results_list):\n axe.plot(x_range, result, idx)\n plt.show()",
"def drawCurve3D(xlist, ylist, zlist):\n dislin.curv3d(xlist,ylist,zlist,len(xlist))",
"def plot3d(self,datarange=None,nx=100,ny=100,clf=True,cb=True,data='auto',**kwargs):\n from enthought.mayavi import mlab as M\n from operator import isMappingType\n\n if data == 'auto':\n if self.data:\n data = self.data[:2]\n else:\n data = None\n\n if data: #TODO:correct coord conv\n xd,yd = data[0][0],data[0][1]\n if datarange is None:\n datarange = (np.min(xd),np.max(xd),np.min(yd),np.max(yd))\n maxmind = (np.max(data[1]),np.min(data[1]))\n elif datarange is None:\n if self.rangehint is not None:\n datarange = self.rangehint\n else:\n raise ValueError(\"Can't choose limits for plotting without data or a range hint\")\n maxmind = None\n\n grid = np.mgrid[datarange[0]:datarange[1]:1j*nx,datarange[2]:datarange[3]:1j*ny]\n res = self(grid)\n\n# if maxmind:\n# norm = plt.normalize(min(np.min(res),maxmind[1]),max(np.max(res),maxmind[0]))\n# else:\n# norm = plt.normalize(np.min(res),np.max(res))\n\n if clf:\n M.clf()\n\n M.mesh(grid[0],grid[1],res)\n\n if cb:\n if isMappingType(cb):\n M.colorbar(**cb)\n else:\n M.colorbar()\n\n if data:\n if isMappingType(data):\n kwscat = dict(data)\n else:\n kwscat = {}\n zd = data[1]\n zres = zd-self((xd,yd))\n kwscat.setdefault('scale_mode','none')\n kwscat.setdefault('scale_factor','auto')\n g = M.points3d(xd,yd,zd,zres,**kwscat)\n if kwscat['scale_factor'] == 'auto':\n g.glyph.glyph.scale_factor /= 2\n\n #M.xlim(datarange[0],datarange[1])\n #M.ylim(datarange[2],datarange[3])",
"def axis3D(xlow,xhigh,xfirst,xstep,ylow,yhigh,yfirst,ystep,\\\n zlow,zhigh,zfirst,zstep):\n dislin.graf3d(xlow,xhigh,xfirst,xstep,ylow,yhigh,yfirst,ystep,\\\n zlow,zhigh,zfirst,zstep)",
"def surfcut_points(**kwargs):\n npoints = kwargs.get( 'npoints', 240 )\n origin = kwargs.get( 'origin', vec3(0.,0.,0.)) \n normal = kwargs.get( 'normal', (np.pi/2., 0.) ) \n lims0 = kwargs.get( 'lims0', (-50., 50.) ) \n lims1 = kwargs.get( 'lims1', (-50., 50.) ) \n extents = kwargs.get( 'extents', None) \n \n if extents is not None:\n lims0 = (-extents, extents)\n lims1 = (-extents, extents)\n \n # Make the unit vectors that define the plane\n unit = vec3()\n th = normal[0]\n ph = normal[1]\n unit.set_spherical( 1, th, ph) \n orth0 = vec3( -1.*np.sin(ph), np.cos(ph), 0. )\n orth1 = cross(unit,orth0)\n \n t0 = np.linspace( lims0[0], lims0[1], npoints )\n t1 = np.linspace( lims1[0], lims1[1], npoints ) \n \n # Obtain points on which function will be evaluated\n T0,T1 = np.meshgrid(t0,t1)\n X = origin[0] + T0*orth0[0] + T1*orth1[0] \n Y = origin[1] + T0*orth0[1] + T1*orth1[1]\n Z = origin[2] + T0*orth0[2] + T1*orth1[2] \n \n\n # If given an axes it will plot the reference surface to help visusalize\n # the surface cut\n \n # Note that the axes needs to be created with a 3d projection. \n # For example: \n # fig = plt.figure( figsize=(4.,4.) ) \n # gs = matplotlib.gridspec.GridSpec( 1,1 ) \n # ax0 = fig.add_subplot( gs[0,0], projection='3d' ) \n \n ax0 = kwargs.get( 'ax0', None ) \n if ax0 is not None: \n\n # Plot the reference surface\n ax0.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3, linewidth=0.)\n ax0.set_xlabel('X')\n ax0.set_ylabel('Y')\n ax0.set_zlabel('Z')\n lmin = min([ ax0.get_xlim()[0], ax0.get_ylim()[0], ax0.get_zlim()[0] ] )\n lmax = max([ ax0.get_xlim()[1], ax0.get_ylim()[1], ax0.get_zlim()[1] ] )\n ax0.set_xlim( lmin, lmax )\n ax0.set_ylim( lmin, lmax )\n ax0.set_zlim( lmin, lmax )\n ax0.set_yticklabels([])\n ax0.set_xticklabels([])\n ax0.set_zticklabels([])\n \n # If given an axes and a potential it will plot the surface cut of the \n # potential \n\n ax1 = kwargs.get( 'ax1', None) \n pot = kwargs.get( 'potential', None) \n\n if (ax1 is not None) and (pot is not None):\n # Evaluate function at points and plot\n EVAL = pot.evalpotential(X,Y,Z)\n\n im =ax1.pcolormesh(T0, T1, EVAL, cmap = plt.get_cmap('jet')) \n # cmaps: rainbow, jet\n\n plt.axes( ax1)\n cbar = plt.colorbar(im)\n cbar.set_label(pot.unitlabel, rotation=0 )#self.unitlabel\n \n return T0, T1, X, Y, Z",
"def plot_bivariate_3d(X, Y, Z, bounds, title, **kwargs):\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.set_xticks(np.linspace(bounds[0],bounds[1],6))\n ax.set_yticks(np.linspace(bounds[0],bounds[1],6))\n ax.set_xlim(bounds)\n ax.set_ylim(bounds)\n ax.plot_surface(X,Y,Z, **kwargs)\n plt.title(title)\n plt.show()",
"def plot_results_traj_3d(p_x, p_y, p_z, xmin, xmax, ymin, ymax, zmin, zmax):\n fig, ax = plt.subplots(2 , 2, figsize = (10, 10))\n \n for p in np.arange(0, p_x.shape[0], step = 1): \n for t in np.arange(0, p_x.shape[1], step = 1): \n ax[0,0].plot(t, p_x[p, t], 'rx') \n ax[0,1].plot(t, p_y[p, t], 'gx') \n ax[1,0].plot(t, p_z[p, t], 'bx') \n ax[1,1].plot(t, p_x[p, t], 'rx') \n ax[1,1].plot(t, p_y[p, t], 'gx') \n ax[1,1].plot(t, p_z[p, t], 'bx') \n for a in ax.flat: \n a.set(xlabel = 'Time steps', ylabel = 'Position')\n ax[0,0].set_title('X (pix)') \n ax[0,0].set_ylim([xmin, xmax]) \n ax[0,1].set_title('Y (pix)') \n ax[0,1].set_ylim([ymin, ymax]) \n ax[1,0].set_title('Z (pix)') \n ax[1,0].set_ylim([zmin, zmax])\n ax[1,1].set_title('Positions combined') \n ax[1,1].set_ylim([np.array([xmin, ymin, zmin]).min(), np.array([xmax, ymax, zmax]).max()])",
"def plot_response_surface(f, p, dims=[0,1]):\n import pylab\n if len(dims) == 1:\n xi = dims[0]\n x = pylab.linspace(-10,10,40) - p[xi]\n def value(v):\n p[xi] = v\n return f(p)\n z = [value(v) for v in x]\n pylab.plot(x,z)\n else:\n xi,yi = dims\n x = pylab.linspace(-10,10,40) - p[xi]\n y = pylab.linspace(-10,10,40) - p[yi]\n def value(pt):\n p[xi] = pt[0]\n p[yi] = pt[1]\n return f(p)\n z = np.array([[value((v,w)) for v in x] for w in y])\n pylab.pcolor(x,y,z)",
"def plot_cube(ax: Axes, x: ArrayLike, y: ArrayLike, f_low: callable, f_upp: callable, **kwargs):\n # lower\n xm, ym = np.meshgrid(x, y)\n zm = f_low(xm, ym)\n ax.plot_surface(xm, ym, zm, **kwargs)\n\n # upper\n zm = f_upp(xm, ym)\n ax.plot_surface(xm, ym, zm, **kwargs)\n\n # north\n xm, ym = np.array([x, x]), y[0]*np.ones([2, len(y)])\n zm = np.array([f_low(x, y[0]), f_upp(x, y[0])])\n ax.plot_surface(xm, ym, zm, **kwargs)\n\n # south\n xm, ym = np.array([x, x]), y[-1]*np.ones([2, len(y)])\n zm = np.array([f_low(x, y[-1]), f_upp(x, y[-1])])\n ax.plot_surface(xm, ym, zm, **kwargs)\n\n # east\n xm, ym = x[0]*np.ones([2, len(x)]), np.array([y, y])\n zm = np.array([f_low(x[0], y), f_upp(x[0], y)])\n ax.plot_surface(xm, ym, zm, **kwargs)\n\n # west\n xm, ym = x[-1]*np.ones([2, len(x)]), np.array([y, y])\n zm = np.array([f_low(x[-1], y), f_upp(x[-1], y)])\n ax.plot_surface(xm, ym, zm, **kwargs)",
"def plot_mesh_function(mesh, f, title=\"\", colormap = \"hot\", edges = False, mybounds = [], myticks = []) :\n if mesh.dimension() == 1 :\n # get the mesh points\n x = mesh_axes(mesh)\n # plot the map\n plt.plot(x, f)\n \n elif mesh.dimension() == 2 :\n\n # Get the mesh axes and then make a grid of them for plotting.\n x, y = mesh_axes(mesh)\n X, Y = np.meshgrid(x, y)\n # Reshape the function\n f = f.reshape(mesh.number_cells_x(), mesh.number_cells_y())\n if edges :\n plt.pcolor(X, Y, f, cmap=colormap, edgecolors='k')\n else :\n plt.pcolor(X, Y, f, cmap=colormap)\n plt.axis(\"scaled\") \n plt.xlabel(\"x [cm]\")\n plt.ylabel(\"y [cm]\")\n if len(myticks) :\n cbar = plt.colorbar(boundaries=mybounds,ticks=myticks)\n else : \n cbar = plt.colorbar()\n else :\n print \"not ready for 3d\"\n return\n plt.title(title)\n # show the plot\n plt.show()",
"def eval_r_func_3(f, x, y, t):\n elementary_func = ['prod', 'avg', 'cos_pi', 'sin_pi']\n if f[0] == \"x\":\n return x\n elif f[0] == \"y\":\n return y\n elif f[0] == \"t\":\n return t\n else:\n if f[0] == elementary_func[0]:\n first_argument = eval_r_func_3(f[1], x, y, t)\n second_argument = eval_r_func_3(f[2], x, y, t)\n return first_argument * second_argument\n elif f[0] == elementary_func[1]:\n first_argument = eval_r_func_3(f[1], x, y, t)\n second_argument = eval_r_func_3(f[2], x, y, t)\n return .5*(first_argument + second_argument)\n elif f[0] == elementary_func[2]:\n argument = eval_r_func_3(f[1], x, y, t)\n ans = math.cos(math.pi * argument)\n return ans\n elif f[0] == elementary_func[3]:\n argument = eval_r_func_3(f[1], x, y, t)\n ans = math.sin(math.pi * argument)\n return ans",
"def __create_sample_data__(npts = 20):\n\t#data function\n\tdef wavy(x, y):\n\t\treturn np.sin(0.2*np.pi*x)*np.cos(0.4*np.pi*y)\n\t\n\t#make grid\n\txs = np.linspace(0, 2*20, 2*npts + 1)\n\tys = np.linspace(0, 20, npts + 1)\n\t(xgrid, ygrid) = np.meshgrid(xs, ys)\n\tzgrid = wavy(xgrid, ygrid)\n\t\n\treturn (xgrid, ygrid, zgrid)",
"def show_trace_2d(f, results):\n plt.close()\n # draw input points\n plt.plot(*zip(*results), '-o', color='#ff7f0e')\n # get the field of figure\n x1, x2 = np.meshgrid(np.arange(-5.5, 1.0, 0.1), np.arange(-3.0, 1.0, 0.1))\n # draw the contour of function using x1,x2 as step\n plt.contour(x1, x2, f(x1, x2), colors='#1f77b4')\n plt.xlabel('x1')\n plt.ylabel('x2')\n plt.show()",
"def plotSurface(X):\n from mpl_toolkits.mplot3d import Axes3D\n from mpl_toolkits.mplot3d import proj3d\n f=plt.figure()\n ax=f.add_subplot(111,projection='3d')\n xi=np.arange(10,14,0.05)\n yi=np.arange(12,16,0.05)\n z = matplotlib.mlab.griddata(X[:,0], X[:,1], X[:,2], xi, yi, interp='nn')\n x, y = np.meshgrid(xi, yi)\n ax.plot_surface(x, y, z)\n return f",
"def test_exercise_1():\n a, b = 5, 0\n fvals = []\n grid = np.linspace(-3, 4)\n for value in grid:\n fvals.append(get_test_function(value, a, b))\n plt.plot(grid, fvals)",
"def timeit_plot3D(data, xlabel='xlabel', ylabel='ylabel', **kwargs):\n dataT = {}\n figs = []\n series = kwargs.get('series', (0,1))\n cmap = kwargs.get('cmap', cm.coolwarm)\n for k, v in data.items():\n dataT[k] = zip(*v)\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n X, Y, Z = dataT[k][series[0]], dataT[k][series[1]], dataT[k][-1]\n wide, tall = (max(X)-min(X)+1), (max(Y)-min(Y)+1)\n intervalX = max(X) - min(heapq.nlargest(2,set(X)))\n intervalY = max(Y) - min(heapq.nlargest(2,set(Y)))\n wide, tall = 1+wide/intervalX, 1+tall/intervalY\n X = np.reshape(X, [wide, tall])\n Y = np.reshape(Y, [wide, tall])\n # TODO: BUG: fix so that Z transposes with x & y reversed\n Z = np.reshape(Z, [wide, tall])\n surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cmap, linewidth=0, antialiased=False)\n ax.zaxis.set_major_locator(LinearLocator(10))\n ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n ax.set_title(substitute_titles(k,series))\n fig.colorbar(surf, shrink=0.5, aspect=5)\n figs.append(fig)\n return figs",
"def plot_surface(\n condition: bool,\n function: typing.Callable,\n x: typing.List[float],\n t: typing.List[float],\n p: typing.List[float],\n t_min: float,\n t_max: float,\n x_v: numpy.array,\n):\n # TODO: docstring\n fig = plt.figure()\n ax = fig.add_subplot(projection=\"3d\")\n\n if condition:\n ax.scatter(x, t, p, marker=\"o\")\n\n t_v = numpy.linspace((t_min - 10), (t_max + 10), num=50)\n x_fit, t_fit = numpy.meshgrid(x_v, t_v)\n p_fit = numpy.array([function(x_fit[i], t_fit[i]) for i in range(len(x_fit))])\n ax.plot_surface(x_fit, t_fit, p_fit, alpha=0.2)\n\n ax.set_xlabel(\"First component fraction\")\n ax.set_ylabel(\"Temperature K\")\n ax.set_zlabel(\"Permeance\")\n fig.suptitle(\"Fit Illustration\", fontsize=10)\n plt.show()",
"def fcontourf(f, x1range, x2range, yrange, **kwargs):\n x1s = np.linspace(x1range[0], x1range[1])\n x2s = np.linspace(x2range[0], x2range[1])\n ys = np.linspace(yrange[0], yrange[1], 20)\n fs = [[f(np.array([x1,x2])) for x1 in x1s] for x2 in x2s]\n plt.contourf(x1s, x2s, fs, ys, **kwargs)\n plt.axis('scaled')",
"def plot(vec):\n\n # ADDITIONAL CODE, to see what the ranges are of the features\n # get a list containing all the first features and second features, respectively\n # feature_1 = list(map(lambda x: x[0], vec))\n # feature_2 = list(map(lambda x: x[1], vec))\n # x = np.arange(min(feature_1), max(feature_1), delta)\n # y = np.arange(min(feature_2), max(feature_2), delta)\n\n fig = plt.figure()\n ax = Axes3D(fig)\n\n # make a numpy arange from the minimum feature until the maximum features\n # delta is the size of spacing between samples\n delta = 0.1\n x = np.arange(-2.0, 4.0, delta)\n y = np.arange(-3.0, 4.0, delta)\n\n # make a 2-D grind\n x, y = np.meshgrid(x, y)\n\n # assign bivariate Gaussian distribution for equal shape X, Y.\n z1 = mlab.bivariate_normal(x, y, sigmax=1.0, sigmay=1.0, mux=0.0, muy=0.0)\n z2 = mlab.bivariate_normal(x, y, sigmax=1.5, sigmay=0.5, mux=1, muy=1)\n z = 10.0 * (z2 - z1)\n\n # create surface plot\n ax.plot_surface(x, y, z, cmap=cm.coolwarm, linewidth=0, antialiased=False)\n plt.savefig(\"./graphs/figures/f1_2surf3n.png\")\n\n \"\"\"\n # create contour plot\n contour_plot = plt.contour(x, y, z)\n # assign labels and title\n plt.clabel(contour_plot, inline=1, fontsize=10)\n plt.title('Feature 1 against feature 2')\n plt.savefig(\"./graphs/figures/f1_2n.png\")\n \"\"\"",
"def function_3d(point):\n return point[0]**2 + point[1]**2 + point[2]**2 - 1",
"def plot_results_3d(p_x, p_y, p_z, h_exp = 0.5):\n plt.figure(figsize = (10, 10))\n ax3d = plt.axes(projection = '3d') \n\n color=iter(cm.rainbow(np.linspace(0,1,p_x.shape[0]))) # (1)\n labels = ['Particle ' + str(pl+1) for pl in np.arange(0, p_x.shape[0], step = 1)]\n \n for p in np.arange(0, p_x.shape[0], step = 1): \n c = next(color) # (1)\n for t in np.arange(0, p_x.shape[1], step = 1): \n ax3d.plot3D(p_x[p, t], p_y[p, t], p_z[p, t], 'x', c = c, label = labels[p]) \n legend_without_duplicate_labels(ax3d)\n ax3d.set_xlabel('X (pixels)') \n ax3d.set_ylabel('Y (pixels') \n ax3d.set_zlabel('Z (pixels)') \n ax3d.set_xlim([origin-150,origin+150])\n ax3d.set_ylim([origin-150,origin+150])\n ax3d.set_zlim([origin-150,origin+150])\n ax3d.set_title('3D particle trajectories - H = ' + str(h_exp))",
"def show_trace_2d(f, results): #@save\n set_figsize()\n plt.plot(*zip(*results), '-o', color='#ff7f0e')\n x1, x2 = torch.meshgrid(torch.arange(-5.5, 1.0, 0.1),torch.arange(-3.0, 1.0, 0.1))\n plt.contour(x1, x2, f(x1, x2), colors='#1f77b4')\n plt.xlabel('x1')",
"def apply_PSFvar3Dz(x, z, a):\n N1, N2, N3 = x.shape\n Hxz = np.zeros((N1, N2))\n\n p3 = int((a.shape[2]-1)/2)\n\n zmin = max(0,z-p3)\n zmax = min(N3,z+p3+1)\n\n for n3 in range(zmin,zmax):\n bF2 = conv2D_fourier(x[:,:,n3], a[:,:,z-n3+p3])\n Hxz = Hxz + bF2\n\n return Hxz",
"def list_plot3d_tuples(v, interpolation_type, texture, **kwds):\n from matplotlib import tri, delaunay\n import numpy\n import scipy\n from random import random\n from scipy import interpolate\n from .plot3d import plot3d\n\n if len(v)<3:\n raise ValueError(\"We need at least 3 points to perform the interpolation\")\n\n x = [float(p[0]) for p in v]\n y = [float(p[1]) for p in v]\n z = [float(p[2]) for p in v]\n\n # If the (x,y)-coordinates lie in a one-dimensional subspace, the\n # matplotlib Delaunay code segfaults. Therefore, we compute the\n # correlation of the x- and y-coordinates and add small random\n # noise to avoid the problem if needed.\n corr_matrix = numpy.corrcoef(x, y)\n if corr_matrix[0, 1] > 0.9 or corr_matrix[0, 1] < -0.9:\n ep = float(.000001)\n x = [float(p[0]) + random()*ep for p in v]\n y = [float(p[1]) + random()*ep for p in v]\n\n\n # If the list of data points has two points with the exact same\n # (x,y)-coordinate but different z-coordinates, then we sometimes\n # get segfaults. The following block checks for this and raises\n # an exception if this is the case.\n # We also remove duplicate points (which matplotlib can't handle).\n # Alternatively, the code in the if block above which adds random\n # error could be applied to perturb the points.\n drop_list = []\n nb_points = len(x)\n for i in range(nb_points):\n for j in range(i+1, nb_points):\n if x[i] == x[j] and y[i] == y[j]:\n if z[i] != z[j]:\n raise ValueError(\"Two points with same x,y coordinates and different z coordinates were given. Interpolation cannot handle this.\")\n elif z[i] == z[j]:\n drop_list.append(j)\n x = [x[i] for i in range(nb_points) if i not in drop_list]\n y = [y[i] for i in range(nb_points) if i not in drop_list]\n z = [z[i] for i in range(nb_points) if i not in drop_list]\n\n xmin = float(min(x))\n xmax = float(max(x))\n ymin = float(min(y))\n ymax = float(max(y))\n\n num_points = kwds['num_points'] if 'num_points' in kwds else int(4*numpy.sqrt(len(x)))\n #arbitrary choice - assuming more or less a nxn grid of points\n # x should have n^2 entries. We sample 4 times that many points.\n\n if interpolation_type == 'linear':\n T = tri.Triangulation(x, y)\n f = tri.LinearTriInterpolator(T, z)\n j = numpy.complex(0, 1)\n from .parametric_surface import ParametricSurface\n def g(x, y):\n z = f(x, y)\n return (x, y, z)\n G = ParametricSurface(g, (list(numpy.r_[xmin:xmax:num_points*j]), list(numpy.r_[ymin:ymax:num_points*j])), texture=texture, **kwds)\n G._set_extra_kwds(kwds)\n return G\n\n if interpolation_type == 'nn' or interpolation_type =='default':\n\n T=delaunay.Triangulation(x,y)\n f=T.nn_interpolator(z)\n f.default_value=0.0\n j=numpy.complex(0,1)\n vals=f[ymin:ymax:j*num_points,xmin:xmax:j*num_points]\n from .parametric_surface import ParametricSurface\n def g(x,y):\n i=round( (x-xmin)/(xmax-xmin)*(num_points-1) )\n j=round( (y-ymin)/(ymax-ymin)*(num_points-1) )\n z=vals[int(j),int(i)]\n return (x,y,z)\n G = ParametricSurface(g, (list(numpy.r_[xmin:xmax:num_points*j]), list(numpy.r_[ymin:ymax:num_points*j])), texture=texture, **kwds)\n G._set_extra_kwds(kwds)\n return G\n\n if interpolation_type == 'spline':\n from .plot3d import plot3d\n kx = kwds['kx'] if 'kx' in kwds else 3\n ky = kwds['ky'] if 'ky' in kwds else 3\n if 'degree' in kwds:\n kx = kwds['degree']\n ky = kwds['degree']\n s = kwds['smoothing'] if 'smoothing' in kwds else len(x)-numpy.sqrt(2*len(x))\n s = interpolate.bisplrep(x, y, z, [int(1)]*len(x), xmin, xmax, ymin, ymax, kx=kx, ky=ky, s=s)\n f = lambda x,y: interpolate.bisplev(x, y, s)\n return plot3d(f, (xmin, xmax), (ymin, ymax), texture=texture, plot_points=[num_points, num_points], **kwds)",
"def F3d_2_vtkFromat(F3d):\n #asign variables\n [Fx,Fy,Fz] = F3d\n \n #generate the output array\n F3dVTK = N.array([N.zeros(3) for i in range(len(Fx)*len(Fy[0])*len(Fz[0][0]))])\n \n #loop and rearange\n c=0\n for k in range(len(Fz)):\n for j in range(len(Fz[0])):\n for i in range(len(Fz[0][0])):\n #fariables corresponding with the point\n fxn = Fx[k][j][i]\n fyn = Fy[k][j][i]\n fzn = Fz[k][j][i]\n F3dVTK[c] = N.array([fxn,fyn,fzn])\n #update counter \n c = c+1\n \n return F3dVTK"
] | [
"0.6324257",
"0.61952674",
"0.60081536",
"0.6003968",
"0.59578633",
"0.5772959",
"0.5612362",
"0.55938494",
"0.5529308",
"0.552768",
"0.5526545",
"0.55223507",
"0.55000556",
"0.5411602",
"0.54069936",
"0.53909606",
"0.5389578",
"0.53585124",
"0.5356868",
"0.53406954",
"0.53343284",
"0.53251106",
"0.5310632",
"0.5283576",
"0.52764714",
"0.52738726",
"0.5269587",
"0.5254057",
"0.525168",
"0.52485824"
] | 0.8300914 | 0 |
Samples a 2d or 3d function over specified intervals and returns a dataset suitable for plotting with matlab (matplotlib) syntax. Wrapper for sample2d and sample3d. f is a function of one or two variables, such as x2. var_args are intervals for each variable given in the form (var, min, max, n) | def sample(f, *var_args):
if len(var_args) == 1:
return sample2d(f, var_args[0])
elif len(var_args) == 2:
return sample3d(f, var_args[0], var_args[1])
else:
raise ValueError("Only 2d and 3d sampling are supported at this time.") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sample3d(f, x_args, y_args):\n x, x_min, x_max, x_n = None, None, None, None\n y, y_min, y_max, y_n = None, None, None, None\n try:\n f = sympify(f)\n except SympifyError:\n raise ValueError(\"f could not be interpreted as a SymPy function\")\n try:\n x, x_min, x_max, x_n = x_args\n y, y_min, y_max, y_n = y_args\n except (TypeError, IndexError):\n raise ValueError(\"x_args and y_args must be tuples of the form (var, min, max, intervals)\")\n\n x_l = float(x_max - x_min)\n x_d = x_l/float(x_n)\n x_a = np.arange(float(x_min), float(x_max) + x_d, x_d)\n\n y_l = float(y_max - y_min)\n y_d = y_l/float(y_n)\n y_a = np.arange(float(y_min), float(y_max) + y_d, y_d)\n\n def meshgrid(x, y):\n \"\"\"\n Taken from matplotlib.mlab.meshgrid.\n \"\"\"\n x = np.array(x)\n y = np.array(y)\n numRows, numCols = len(y), len(x)\n x.shape = 1, numCols\n X = np.repeat(x, numRows, 0)\n\n y.shape = numRows, 1\n Y = np.repeat(y, numCols, 1)\n return X, Y\n\n X, Y = np.meshgrid(x_a, y_a)\n\n Z = np.ndarray((len(X), len(X[0])))\n for j in range(len(X)):\n for k in range(len(X[0])):\n try:\n Z[j][k] = float(f.subs(x, X[j][k]).subs(y, Y[j][k]))\n except (TypeError, NotImplementedError):\n Z[j][k] = 0\n return X, Y, Z",
"def sample2d(f, x_args):\n try:\n f = sympify(f)\n except SympifyError:\n raise ValueError(\"f could not be interpreted as a SymPy function\")\n try:\n x, x_min, x_max, x_n = x_args\n except (TypeError, IndexError):\n raise ValueError(\"x_args must be a tuple of the form (var, min, max, n)\")\n\n x_l = float(x_max - x_min)\n x_d = x_l/float(x_n)\n X = np.arange(float(x_min), float(x_max) + x_d, x_d)\n\n Y = np.empty(len(X))\n for i in range(len(X)):\n try:\n Y[i] = float(f.subs(x, X[i]))\n except TypeError:\n Y[i] = None\n return X, Y",
"def __create_sample_data__(npts = 20):\n\t#data function\n\tdef wavy(x, y):\n\t\treturn np.sin(0.2*np.pi*x)*np.cos(0.4*np.pi*y)\n\t\n\t#make grid\n\txs = np.linspace(0, 2*20, 2*npts + 1)\n\tys = np.linspace(0, 20, npts + 1)\n\t(xgrid, ygrid) = np.meshgrid(xs, ys)\n\tzgrid = wavy(xgrid, ygrid)\n\t\n\treturn (xgrid, ygrid, zgrid)",
"def generate_data(values, function=non_linear_fn, length=25, range_=[-1, 1]):\n\n # build x vector\n x = np.linspace(range_[0], range_[1], length)\n\n data = np.zeros((values.shape[0], length))\n\n for i in range(values.shape[0]):\n data[i, :] = function(x, values[i, 0], values[i, 1], values[i, 2])\n\n return data",
"def gendata(params,xmin,xmax,npts=4000):\n F = lorentzian.ForwardFactory\n def gensample(F, xmin, xmax):\n from numpy import arange\n import random\n a = arange(xmin, xmax, (xmax-xmin)/200.)\n ymin = 0\n ymax = F(a).max()\n while 1:\n t1 = random.random() * (xmax-xmin) + xmin\n t2 = random.random() * (ymax-ymin) + ymin\n t3 = F(t1)\n if t2 < t3:\n return t1\n fwd = F(params)\n return array([gensample(fwd, xmin,xmax) for i in xrange(npts)])",
"def random_resample(*args, samples,\n function=None, function_kwargs=None, bundle_args=True,\n replace=True):\n samples_spec = samples.copy() # copy because use pop below\n args_sub = [obj.copy() for obj in args]\n dim_block_1 = [d for d, s in samples_spec.items() if s[1] == 1]\n\n # Do all dimensions with block_size = 1 together\n samples_block_1 = { dim: samples_spec.pop(dim) for dim in dim_block_1 }\n random_samples = {dim: \n np.random.choice(\n len(args_sub[0][dim]),\n size=n,\n replace=replace)\n for dim, (n, _) in samples_block_1.items()}\n args_sub = [obj.isel(\n {dim: random_samples[dim] \n for dim in (set(random_samples.keys()) & set(obj.dims))}) for obj in args_sub]\n\n # Do any remaining dimensions\n for dim, (n, block_size) in samples_spec.items():\n n_blocks = int(n / block_size)\n random_samples = [slice(x,x+block_size) \n for x in np.random.choice(\n len(args_sub[0][dim])-block_size+1, \n size=n_blocks,\n replace=replace)]\n args_sub = [xr.concat([obj.isel({dim: random_sample}) \n for random_sample in random_samples],\n dim=dim) \n if dim in obj.dims else obj \n for obj in args_sub]\n\n if function:\n if bundle_args:\n if function_kwargs is not None:\n res = function(*args_sub, **function_kwargs)\n else:\n res = function(*args_sub)\n else:\n if function_kwargs is not None:\n res = tuple([function(obj, **function_kwargs) for obj in args_sub])\n else:\n res = tuple([function(obj) for obj in args_sub])\n else:\n res = tuple(args_sub,)\n\n if isinstance(res, tuple):\n if len(res) == 1:\n return res[0]\n else:\n return res",
"def sample(self,f,N,p=100):\n return [f(x) for x in np.linspace(0,N,p)]",
"def evaluate_1darray_function_on_2d_array(function, samples, opts=None):\n num_args = get_num_args(function)\n assert samples.ndim == 2\n num_samples = samples.shape[1]\n if num_args == 2:\n values_0 = function(samples[:, 0], opts)\n else:\n values_0 = function(samples[:, 0])\n values_0 = np.atleast_1d(values_0)\n assert values_0.ndim == 1\n num_qoi = values_0.shape[0]\n values = np.empty((num_samples, num_qoi), float)\n values[0, :] = values_0\n for i in range(1, num_samples):\n if num_args == 2:\n values[i, :] = function(samples[:, i], opts)\n else:\n values[i, :] = function(samples[:, i])\n\n return values",
"def plot_multidimensional_function_slices(\n func: Callable[[np.ndarray], NDAorTuple],\n slice_loc: np.ndarray,\n bounds: Union[np.ndarray, List[Tuple[float, float]]],\n input_names: Optional[List[str]] = None,\n obs_points: Optional[Union[np.ndarray, List[np.ndarray]]] = None,\n input_scales: Optional[List[PLOT_SCALE]] = None,\n output_scale: PLOT_SCALE = \"linear\",\n output_label: str = \"Objective Value\",\n size: float = 3,\n slice_2d_resolution: int = 50,\n # slide_1d_resolution: int = 100,\n func_returns_confidence_intervals: bool = False,\n) -> Tuple[plt.Figure, np.ndarray]:\n # Input validation checks\n assert output_scale in [\"linear\", \"log\", \"symlog\"]\n\n def func_return_just_mean(x):\n \"\"\"\n If the supplied function is a predictor returning lower and upper confidence bounds as well as mean,\n return just the mean prediction. If not, return the function value evaluated at x.\n \"\"\"\n return func(x)[0] if func_returns_confidence_intervals else func(x)\n\n n_dims: int = len(bounds)\n # If multiple batches of points supplied as a list in obs_points, make a colour palette\n n_batches = len(obs_points) if isinstance(obs_points, (list, tuple)) else 1\n scatter_colours = sns.color_palette(\"viridis\", n_colors=n_batches)\n # If input_scales not specified, default all to 'linear'\n input_scales = input_scales if input_scales else [\"linear\"] * n_dims # type: ignore # auto\n # Keep track of contour sets returned for each axis\n contour_sets = []\n\n # Construct axes\n fig = plt.figure(figsize=(size * n_dims, size * n_dims))\n axes, cbar_axes = make_lower_triangular_axis_grid_with_colorbar_axes(\n fig=fig, num_cols=n_dims, num_colorbars=2, share_y_on_diagonal=True\n )\n\n # Keep a running minimum and maximum of function values in 2D slices\n func_values_min: float = np.inf\n func_values_max: float = -np.inf\n\n with sns.axes_style(\"darkgrid\"):\n for i in range(n_dims): # i iterates over the rows of the plots\n for j in range(n_dims): # j iterates over the columns of the plots\n ax = axes[i, j]\n # 1D-slice plots along the diagonal\n if i == j:\n if func_returns_confidence_intervals:\n plot_1d_slice_through_function_with_confidence_intervals(\n func, # type: ignore\n dim=i,\n slice_loc=slice_loc,\n slice_bounds=bounds[i],\n ax=ax,\n x_scale=input_scales[i],\n )\n else:\n plot_1d_slice_through_function(\n func, # type: ignore\n dim=i,\n slice_loc=slice_loc,\n slice_bounds=bounds[i],\n ax=ax,\n x_scale=input_scales[i],\n )\n ax.set_yscale(output_scale)\n\n # lower triangle\n elif i > j:\n dim_x, dim_y = j, i\n # Compute the data for the 2D slice plots\n xx, yy, func_values_slice = calc_2d_slice(\n func=func_return_just_mean, # type: ignore # auto\n dim_x=dim_x,\n dim_y=dim_y,\n slice_loc=slice_loc,\n slice_bounds_x=bounds[dim_x],\n slice_bounds_y=bounds[dim_y],\n x_scale=input_scales[dim_x],\n y_scale=input_scales[dim_y],\n resolution=slice_2d_resolution,\n )\n # Plot the 2D slice\n _, im = plot_2d_slice_from_arrays(\n xx,\n yy,\n func_values_slice,\n ax=ax,\n x_scale=input_scales[dim_x],\n y_scale=input_scales[dim_y],\n output_scale=output_scale,\n )\n contour_sets.append(im)\n # Keep a running minimum and maximum of function values in slices\n func_values_min = min(func_values_min, func_values_slice.min()) # type: ignore\n func_values_max = max(func_values_max, func_values_slice.max()) # type: ignore\n # Scatter points on the slices if given\n if obs_points is not None: # pragma: no cover\n if isinstance(obs_points, np.ndarray):\n # If just one array given, scatter with the colour reflecting objective value\n ax.scatter(\n obs_points[:, dim_x], obs_points[:, dim_y], color=scatter_colours[0], s=20, zorder=15\n )\n else:\n assert isinstance(obs_points, (list, tuple))\n # If multiple arrays given, colour the points according to the batch number\n for batch_num, batch_arr in enumerate(obs_points):\n ax.scatter(\n batch_arr[:, dim_x],\n batch_arr[:, dim_y],\n color=scatter_colours[batch_num],\n s=25,\n lw=0.0,\n alpha=0.8,\n zorder=15,\n )\n # Add axis labels\n if input_names is not None: # pragma: no cover\n # If plot in the first column (but not first row), add a y_label\n if i != 0 and j == 0:\n axes[i, j].set_ylabel(input_names[i])\n # If plot is at the bottom, add an x_label\n if i == n_dims - 1:\n axes[i, j].set_xlabel(input_names[j])\n if i >= j:\n # Remove redundant ticks on inner plots\n if i != n_dims - 1:\n axes[i, j].xaxis.set_visible(False)\n if j != 0:\n axes[i, j].yaxis.set_visible(False)\n # # Prune the upper-most tick from plot, so that the ticks don't overlap each other between plots\n # ax.yaxis.set_major_locator(ticker.MaxNLocator(prune='upper'))\n ax.tick_params(axis=\"both\", which=\"major\", labelsize=9)\n ax.tick_params(axis=\"both\", which=\"minor\", labelsize=6)\n # Update the colour limits of the slice plots\n for contour_set in contour_sets:\n contour_set.set_clim(vmin=func_values_min, vmax=func_values_max)\n # Add the colourbars\n if n_dims > 1:\n # make a colourbar for the contour plots\n cb1 = fig.colorbar(contour_sets[-1], cax=cbar_axes[0], aspect=50)\n cb1.set_label(output_label)\n cbar_axes[0].yaxis.set_ticks_position(\"left\")\n # make a colourbar for different batches\n if n_batches > 1: # pragma: no cover\n cb2 = matplotlib.colorbar.ColorbarBase( # type: ignore # auto\n cbar_axes[1],\n cmap=matplotlib.colors.ListedColormap(scatter_colours),\n boundaries=[x - 0.5 for x in range(n_batches + 1)],\n ticks=list(range(n_batches)),\n spacing=\"proportional\",\n )\n cb2.set_label(\"Batch Number\")\n else:\n cbar_axes[1].set_visible(False)\n return fig, axes",
"def create_samples(f: Callable[..., int], n_args: int, n_bits: int,\n) -> Dict[Tuple[int, ...], Tuple[int, ...]]:\n samples = {}\n max_arg = 2 ** n_bits\n for inputs in itertools.product((0, 1), repeat=n_args * n_bits):\n ints = [int(\"\".join(str(bit) for bit in inputs[i:i + n_bits]), 2)\n for i in range(0, len(inputs), n_bits)]\n try:\n output = f(*ints)\n if 0 <= output < max_arg:\n bit_string = (\"{:0\" + str(n_bits) + \"b}\").format(output)\n samples[inputs] = tuple(int(bit) for bit in bit_string)\n except ZeroDivisionError:\n pass\n return samples",
"def create_data(f, x_vals):\n y_vals = []\n for i in x_vals:\n y_vals.append(f(x_vals[i]))\n return np.array(y_vals)",
"def get_data_args(data_func, nfuncs):\n if data_func.__name__ == 'gg_1d':\n # first arg is sorted\n if nfuncs == 1:\n data_args = [{'a': 0.75, 'mu': 0.4, 'sigma': 0.3, 'beta': 2.0}]\n elif nfuncs == 2:\n data_args = [{'a': 0.2, 'mu': 0.4, 'sigma': 0.6, 'beta': 5.0},\n {'a': 0.55, 'mu': 0.4, 'sigma': 0.2, 'beta': 4.0}]\n elif nfuncs == 3:\n data_args = [{'a': 0.2, 'mu': 0.4, 'sigma': 0.6, 'beta': 5.0},\n {'a': 0.35, 'mu': 0.6, 'sigma': 0.07, 'beta': 2.0},\n {'a': 0.55, 'mu': 0.32, 'sigma': 0.14, 'beta': 6.0}]\n elif nfuncs == 4:\n data_args = [{'a': 0.2, 'mu': 0.3, 'sigma': 0.5, 'beta': 5.0},\n {'a': 0.4, 'mu': 0.65, 'sigma': 0.07, 'beta': 2.0},\n {'a': 0.6, 'mu': 0.25, 'sigma': 0.1, 'beta': 6.0},\n {'a': 0.9, 'mu': 0.95, 'sigma': 0.1, 'beta': 6.0}]\n elif data_func.__name__ == 'ta_1d':\n # first arg is sorted\n if nfuncs == 1:\n data_args = [{'a': 0.8, 'w_0': 0.0, 'w_1': 1.5}]\n elif nfuncs == 2:\n data_args = [{'a': 0.7, 'w_0': -1, 'w_1': 3},\n {'a': 0.9, 'w_0': 2, 'w_1': -3}]\n elif nfuncs == 3:\n data_args = [\n {'a': 0.6, 'w_0': -7, 'w_1': 8},\n {'a': 1, 'w_0': -1, 'w_1': 3},\n {'a': 1.4, 'w_0': 2, 'w_1': -3}]\n elif data_func.__name__ == 'gg_2d':\n # the order is (with first arg sorted):\n # [a_1, mu1_1, mu2_1, s1_1, s2_1, b1_1, b2_1, rot angle]\n if nfuncs == 1:\n data_args = [\n {'a': 0.8, 'mu1': 0.6, 'mu2': 0.6, 'sigma1': 0.1,\n 'sigma2': 0.2, 'beta1': 2, 'beta2': 2, 'omega': 0.1 * np.pi}]\n elif nfuncs == 2:\n data_args = [\n {'a': 0.5, 'mu1': 0.5, 'mu2': 0.4, 'sigma1': 0.4,\n 'sigma2': 0.2, 'beta1': 2, 'beta2': 2, 'omega': 0},\n {'a': 0.8, 'mu1': 0.5, 'mu2': 0.6, 'sigma1': 0.1,\n 'sigma2': 0.1, 'beta1': 2, 'beta2': 2, 'omega': 0}]\n elif nfuncs == 3:\n data_args = [\n {'a': 0.5, 'mu1': 0.3, 'mu2': 0.7, 'sigma1': 0.2,\n 'sigma2': 0.2, 'beta1': 2, 'beta2': 2, 'omega': 0},\n {'a': 0.7, 'mu1': 0.7, 'mu2': 0.6, 'sigma1': 0.15,\n 'sigma2': 0.15, 'beta1': 2, 'beta2': 2, 'omega': 0},\n {'a': 0.9, 'mu1': 0.4, 'mu2': 0.3, 'sigma1': 0.1,\n 'sigma2': 0.1, 'beta1': 2, 'beta2': 2, 'omega': 0}]\n try:\n data_args_list = []\n for name in bf.get_bf_param_names(data_func):\n data_args_list += [d[name] for d in data_args]\n return data_args_list\n except NameError:\n raise AssertionError('no data args found! func={} nfuncs={}'.format(\n data_func.__name__, nfuncs))",
"def test_exercise_1():\n a, b = 5, 0\n fvals = []\n grid = np.linspace(-3, 4)\n for value in grid:\n fvals.append(get_test_function(value, a, b))\n plt.plot(grid, fvals)",
"def monte_carlo_sample(f, bounds, n_samples):\r\n samples = []\r\n pmax = f(bounds[0])\r\n tries_per_run = int(n_samples*1/pmax)\r\n while len(samples) < n_samples:\r\n x = np.random.rand(tries_per_run)*(bounds[1]-bounds[0])+bounds[0]\r\n y = np.random.rand(tries_per_run)*pmax\r\n good = x[y <= f(x)]\r\n samples = samples + [i for i in x[y <= f(x)]]\r\n return np.array(np.array(samples))[:n_samples]",
"def sample_data_input_fn(params):\n window_size = params['window_size']\n batch_size = params['batch_size']\n\n dataset_names = sample_data.get_data_names()\n all_downsampled = [sample_data.get_downsampled_data(name) for name in dataset_names]\n np_dtype = all_downsampled[0].dtype\n _, num_columns = all_downsampled[0].shape\n assert num_columns == 3\n\n # For each data item, this computes\n time_diffs = [(x[1:, 0] - x[:-1, 0]) for x in all_downsampled]\n median_time_diff = np.median(np.concatenate(time_diffs, axis=0))\n lower, upper = median_time_diff * 0.8, median_time_diff * 1.2\n valid_start_window_indices = [\n get_window_valid_indices(d, lower, upper, window_size) for d in time_diffs\n ]\n for name, valid_indices in zip(dataset_names, valid_start_window_indices):\n if np.size(valid_indices) == 0:\n raise ValueError(\"{} has no valid window ranges\".format(name))\n\n def get_samples_py_op(idx_array):\n assert isinstance(idx_array, np.ndarray)\n assert idx_array.shape == (batch_size, )\n samp_results = np.zeros((batch_size, window_size, num_columns), dtype=np_dtype)\n for i, sample_idx in enumerate(idx_array):\n start_idx = random.choice(valid_start_window_indices[sample_idx])\n samp_results[i, :, :] = all_downsampled[sample_idx][start_idx: (\n start_idx + window_size)]\n assert samp_results.shape == (batch_size, window_size, num_columns)\n return samp_results\n\n def get_window_sample(idx_tensor):\n samples = tf.py_func(get_samples_py_op, [idx_tensor], np_dtype)\n samples.set_shape((batch_size, window_size, num_columns))\n return samples\n\n def random_negative_py_op(idx_array):\n assert isinstance(idx_array, np.ndarray)\n neg_idx_array = np.copy(idx_array)\n for i, idx in enumerate(idx_array):\n while neg_idx_array[i] == idx_array[i]:\n neg_idx_array[i] = random.randint(0, len(all_downsampled) - 1)\n return neg_idx_array\n\n def get_negative_window_sample(idx_tensor):\n neg_idx_tensor = tf.py_func(\n random_negative_py_op,\n [idx_tensor],\n idx_tensor.dtype)\n return get_window_sample(neg_idx_tensor)\n\n # Current sample method: First select sample index, then select window.\n num_samples = len(all_downsampled)\n if num_samples < 2:\n raise ValueError(\"Need at least 2 light curves for negative samples!\")\n dataset = tf.data.Dataset.range(num_samples)\n dataset = dataset.repeat().shuffle(num_samples * 2).batch(batch_size)\n\n positive = dataset.map(lambda idx_tensor: {\n 'left': get_window_sample(idx_tensor),\n 'right': get_window_sample(idx_tensor),\n 'goal': tf.constant([1.0] * batch_size, dtype=tf.float64)\n })\n negative = dataset.map(lambda idx_tensor: {\n 'left': get_window_sample(idx_tensor),\n 'right': get_negative_window_sample(idx_tensor),\n 'goal': tf.constant([0.0] * batch_size, dtype=tf.float64)\n })\n\n # TODO(gatoatigrado): Experiment with shuffling positive & negative within a batch.\n # Currently each batch is just positive or negative.\n assert positive.output_shapes == negative.output_shapes\n assert negative.output_types == positive.output_types\n dataset = tf.contrib.data.sample_from_datasets((positive, negative))\n assert dataset.output_shapes == negative.output_shapes\n return dataset",
"def sample_function(\n function: _vtk.vtkImplicitFunction,\n bounds: Sequence[float] = (-1.0, 1.0, -1.0, 1.0, -1.0, 1.0),\n dim: Sequence[int] = (50, 50, 50),\n compute_normals: bool = False,\n output_type: np.dtype = np.double, # type: ignore\n capping: bool = False,\n cap_value: float = sys.float_info.max,\n scalar_arr_name: str = \"scalars\",\n normal_arr_name: str = \"normals\",\n progress_bar: bool = False,\n):\n samp = _vtk.vtkSampleFunction()\n samp.SetImplicitFunction(function)\n samp.SetSampleDimensions(dim)\n samp.SetModelBounds(bounds)\n samp.SetComputeNormals(compute_normals)\n samp.SetCapping(capping)\n samp.SetCapValue(cap_value)\n samp.SetNormalArrayName(normal_arr_name)\n samp.SetScalarArrayName(scalar_arr_name)\n\n if output_type == np.float64:\n samp.SetOutputScalarTypeToDouble()\n elif output_type == np.float32:\n samp.SetOutputScalarTypeToFloat()\n elif output_type == np.int64:\n if os.name == 'nt':\n raise ValueError('This function on Windows only supports int32 or smaller')\n samp.SetOutputScalarTypeToLong()\n elif output_type == np.uint64:\n if os.name == 'nt':\n raise ValueError('This function on Windows only supports int32 or smaller')\n samp.SetOutputScalarTypeToUnsignedLong()\n elif output_type == np.int32:\n samp.SetOutputScalarTypeToInt()\n elif output_type == np.uint32:\n samp.SetOutputScalarTypeToUnsignedInt()\n elif output_type == np.int16:\n samp.SetOutputScalarTypeToShort()\n elif output_type == np.uint16:\n samp.SetOutputScalarTypeToUnsignedShort()\n elif output_type == np.int8:\n samp.SetOutputScalarTypeToChar()\n elif output_type == np.uint8:\n samp.SetOutputScalarTypeToUnsignedChar()\n else:\n raise ValueError(f'Invalid output_type {output_type}')\n\n _update_alg(samp, progress_bar=progress_bar, message='Sampling')\n return wrap(samp.GetOutput())",
"def iid_sample_fn(*args, **kwargs):\n\n with tf.name_scope('iid_sample_fn'):\n\n seed = kwargs.pop('seed', None)\n if samplers.is_stateful_seed(seed):\n kwargs = dict(kwargs, seed=SeedStream(seed, salt='iid_sample')())\n def pfor_loop_body(_):\n with tf.name_scope('iid_sample_fn_stateful_body'):\n return sample_fn(*args, **kwargs)\n else:\n # If a stateless seed arg is passed, split it into `n` different\n # stateless seeds, so that we don't just get a bunch of copies of the\n # same sample.\n if not JAX_MODE:\n warnings.warn(\n 'Saw Tensor seed {}, implying stateless sampling. Autovectorized '\n 'functions that use stateless sampling may be quite slow because '\n 'the current implementation falls back to an explicit loop. This '\n 'will be fixed in the future. For now, you will likely see '\n 'better performance from stateful sampling, which you can invoke '\n 'by passing a Python `int` seed.'.format(seed))\n seed = samplers.split_seed(seed, n=n, salt='iid_sample_stateless')\n def pfor_loop_body(i):\n with tf.name_scope('iid_sample_fn_stateless_body'):\n return sample_fn(*args, seed=tf.gather(seed, i), **kwargs)\n\n if static_n == 1:\n draws = pfor_loop_body(0)\n else:\n draws = parallel_for.pfor(pfor_loop_body, n)\n return tf.nest.map_structure(unflatten, draws, expand_composites=True)",
"def sample_dimension(data, dimension, n_frames, scheme=\"linear\"):\n d_data = [i[:,dimension][:,np.newaxis] for i in data]\n\n #sort it because all three sampling schemes use it\n\n all_vals = []\n for i in d_data:\n all_vals.extend(i.flatten())\n all_vals = np.sort(all_vals)\n\n #get lineraly placed points\n if scheme==\"linear\":\n max_val = all_vals[-1]\n min_val = all_vals[0]\n spaced_points = np.linspace(min_val, max_val, n_frames)\n\n elif scheme==\"random\":\n spaced_points = np.sort(np.random.choice(all_vals, n_frames))\n\n elif scheme==\"edge\":\n _cut_point = np.int(n_frames / 2)\n spaced_points = np.hstack((all_vals[:_cut_point], all_vals[-_cut_point:]))\n else:\n raise ValueError(\"Scheme has be to one of linear, random or edge\")\n\n tree = KDTree(d_data)\n\n return_vec = []\n for pt in spaced_points:\n dis, ind = tree.query([pt])\n return_vec.append(ind)\n\n return return_vec",
"def n_random_resamples(*args, samples, n_repeats, \n function=None, function_kwargs=None, bundle_args=True, \n replace=True, with_dask=True):\n\n if with_dask & (n_repeats > 1000):\n n_args = itertools.repeat(args[0], times=n_repeats)\n b = db.from_sequence(n_args, npartitions=100)\n rs_list = b.map(random_resample, *(args[1:]), \n **{'samples':samples, 'function':function, \n 'function_kwargs':function_kwargs, 'replace':replace}).compute()\n else: \n resample_ = dask.delayed(random_resample) if with_dask else random_resample\n rs_list = [resample_(*args,\n samples=samples,\n function=function,\n function_kwargs=function_kwargs,\n bundle_args=bundle_args,\n replace=replace) for _ in range(n_repeats)] \n if with_dask:\n rs_list = dask.compute(rs_list)[0]\n \n if all(isinstance(r, tuple) for r in rs_list):\n return tuple([xr.concat([r.unify_chunks() for r in rs], dim='k') for rs in zip(*rs_list)])\n else:\n return xr.concat([r.unify_chunks() for r in rs_list], dim='k')",
"def run_f(df, sample_number):\n samples = normal_custom(df.get(Model.MEAN_KEY), df.get(Model.STD_KEY), n_sample=sample_number) # Normal_custom imported from helper_func\n return samples",
"def blumli(function, resolution, domain):\n\t# fugly hacks galore to determine dimensions\n\tinput_dim = len(inspect.getargspec(function).args)\n\toutsample = function( *range(input_dim))\n\toutput_dim = len(list( outsample )) if isinstance(outsample, collections.Iterable) else 1\n\n\tif input_dim == 1:\n\t\tdomain = [(domain[0], domain[1])]\n\n\tunits = [(domain[dim][1]-domain[dim][0])/resolution for dim in range(input_dim) ]\n\n\t# first layer: for each input dimension, we have resolution-1 neurons, each with input_dim inputs\n\tfirst_layer = []\n\tfor dimension in range(input_dim):\n\t\tinput_weights = [1 if dim==dimension else 0 for dim in range(input_dim)]\n\t\tfirst_layer.extend( [Perceptron([domain[dimension][0]+num*units[dimension]] + input_weights, step_function) for num in range(1,resolution)] )\n\n\tsecond_layer = []\n\txs = { }\n\tfor square in product(range(resolution), repeat=input_dim):\n\t\tweights = [0]*len(first_layer)\n\t\tbias = -0.5\n\t\txvalues = [None]*len(square)\n\t\tfor dimension, area in enumerate(square):\n\t\t\thb = area\n\t\t\tlb = area-1\n\n\t\t\tif lb >= 0:\n\t\t\t\tbias += 1\n\t\t\t\tweights[ (resolution-1)*(dimension) + lb ] = 1\n\n\t\t\tif hb < (resolution-1):\n\t\t\t\tbias += 1\n\t\t\t\tweights[ (resolution-1)*(dimension) + hb ] = -1\n\n\t\t\tmidpoint = lb+0.5 if lb>=0 else hb-0.5\n\t\t\txvalues[dimension] = domain[dimension][0] + (1+midpoint)*units[dimension]\n\n\t\tneuron = Perceptron([bias]+weights, zero_step_function)\n\t\tsecond_layer.append( neuron )\n\t\txs[neuron] = xvalues\n\n\tthird_layer = [\n\t\tPerceptron( [0] + [\n\t\t\tfunction(*xs[neuron]) if output_dim==1 else function(*xs[neuron])[outdim] for neuron in second_layer\n\t\t], identity ) for outdim in range(output_dim)\n\t]\n\n\treturn FFNN([first_layer, second_layer, third_layer])",
"def sample_grid(variable='snow_depth', month=None):\n\n my_func = {'snow_depth': snow_depth,\n 'swe': swe}\n\n lat, lon = np.linspace(65.,90.,20), np.linspace(0.,359.,360)\n \n if not month:\n month = np.arange(1,13)\n else:\n month = np.array(month)\n\n x, y = np.meshgrid(lon, lat)\n\n if month.size == 1:\n da = xr.DataArray(my_func[variable](x,y,month),\n coords={'lat': lat, 'lon': lon},\n dims=['lat', 'lon'])\n else:\n da = xr.DataArray([my_func[variable](x, y, m) for m in month],\n coords={'month': month, 'lat': lat, 'lon': lon},\n dims=['month', 'lat', 'lon'])\n return da",
"def rng_fn_scipy(cls, rng, *args, **kwargs):",
"def interpolate(f, Q, method='linear', y_transect=None):\n if isinstance(f, (ufl.core.expr.Expr, firedrake.Function)):\n return firedrake.interpolate(f, Q)\n\n mesh = Q.mesh()\n element = Q.ufl_element()\n if len(element.sub_elements()) > 0:\n element = element.sub_elements()[0]\n\n V = firedrake.VectorFunctionSpace(mesh, element)\n X = firedrake.interpolate(mesh.coordinates, V).dat.data_ro\n\n q = firedrake.Function(Q)\n\n if isinstance(f, rasterio.DatasetReader):\n q.dat.data[:] = _sample(f, X, method, y_transect)\n elif (isinstance(f, tuple)\n and all(isinstance(fi, rasterio.DatasetReader) for fi in f)):\n for i, fi in enumerate(f):\n q.dat.data[:, i] = _sample(fi, X, method, y_transect)\n else:\n raise ValueError('Argument must be a rasterio data set or a tuple of '\n 'data sets!')\n\n return q",
"def linear_function_dataset(a, b, n=100, show_plot=False):\n x = torch.randn(n, 1)\n y = a*x + b + 0.1*torch.randn(n, 1)\n if show_plot:\n show_TensorFunction1D(x, y, marker='.')\n return TensorDataset(x, y)",
"def nonlinear_function_dataset(n=100, show_plot=False):\n x = torch.rand(n, 1)*20 - 10 # Random values between [-10 and 10]\n y = (-1/100)*x**7 -x**4 -2*x**2 -4*x + 1 + 0.1*torch.randn(n, 1)\n if show_plot:\n show_TensorFunction1D(x, y, marker='.')\n return TensorDataset(x, y)",
"def plot(\n self,\n function: Callable[[float], float],\n x_range: Sequence[float] | None = None,\n use_vectorized: bool = False,\n **kwargs,\n ):\n\n t_range = np.array(self.x_range, dtype=float)\n if x_range is not None:\n t_range[: len(x_range)] = x_range\n\n if x_range is None or len(x_range) < 3:\n # if t_range has a defined step size, increase the number of sample points per tick\n t_range[2] /= self.num_sampled_graph_points_per_tick\n # For axes, the third coordinate of x_range indicates\n # tick frequency. But for functions, it indicates a\n # sample frequency\n\n graph = ParametricFunction(\n lambda t: self.coords_to_point(t, function(t)),\n t_range=t_range,\n scaling=self.x_axis.scaling,\n use_vectorized=use_vectorized,\n **kwargs,\n )\n graph.underlying_function = function\n return graph",
"def makePLDS(T, x_0, f, g, Dx, Dy):\n\tX = np.zeros((T, Dx))\n\tY = np.zeros((T, Dy))\n\n\tX[0] = x_0\n\tY[0] = g.sample(x_0)\n\tfor t in range(1,T):\n\t\tX[t] = f.sample(X[t-1])\n\t\tY[t] = g.sample(X[t])\n\treturn X, Y",
"def data_fun(times, n_dipoles=4):\n n = 0 # harmonic number\n n_samp = len(times)\n window = np.zeros(n_samp)\n start, stop = [int(ii * float(n_samp) / (2 * n_dipoles)) for ii in (2 * n, 2 * n + 1)]\n window[start:stop] = 1.0\n n += 1\n data = 25e-9 * np.sin(2.0 * np.pi * 10.0 * n * times)\n data *= window\n return data",
"def sampleFunction(x: int, y: float) -> float:\n return x * y"
] | [
"0.7615295",
"0.70778996",
"0.6081689",
"0.57048494",
"0.56257445",
"0.55822027",
"0.55651504",
"0.55339074",
"0.54328907",
"0.53981596",
"0.5369118",
"0.53515136",
"0.53430045",
"0.5319749",
"0.5275369",
"0.5249938",
"0.52404726",
"0.52143115",
"0.52082497",
"0.51295596",
"0.5065049",
"0.5063601",
"0.5039212",
"0.50357884",
"0.5035016",
"0.5013174",
"0.5011391",
"0.5002498",
"0.49957132",
"0.49888596"
] | 0.7539873 | 1 |
Default reducer for distinctions. Expects all distinctions to follow | def __reduce__(self):
return instanceReducer(self) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __reduce__(self):\n\t\treturn self.__class__, (self.dist, self.frozen)",
"def evaluation_reducer(self) -> Union[Reducer, Dict[str, Reducer]]:\n return Reducer.AVG",
"def __reduce__(self): # real signature unknown; restored from __doc__\r\n pass",
"def _reduce(self, action):\n assert len(self.stack) >= 2, \"ERROR: Cannot reduce with stack length less than 2\"\n \n # STUDENT\n # hint: use list.pop()\n # END STUDENT\n rightarc = self.stack.pop()\n leftarc = self.stack.pop()\n head = rightarc if action == Actions.REDUCE_L else leftarc\n mod = leftarc if action == Actions.REDUCE_L else rightarc\n self.stack.append( StackEntry(head.headword, head.headword_pos, self.combiner(head.embedding,mod.embedding)) )\n return DepGraphEdge((head.headword, head.headword_pos),(mod.headword, mod.headword_pos))",
"def __reduce__(self, *args, **kwargs): # real signature unknown\n pass",
"def __reduce__(self, *args, **kwargs): # real signature unknown\n pass",
"def __reduce__(self, *args, **kwargs): # real signature unknown\n pass",
"def __reduce__(self, *args, **kwargs): # real signature unknown\n pass",
"def __reduce__(self, *args, **kwargs): # real signature unknown\n pass",
"def reduce(self, reduce_op, value, axis): # pylint: disable=useless-super-delegation\n return super(OneDeviceStrategy, self).reduce(reduce_op, value, axis)",
"def __reduce__(self, *args, **kwargs) -> Any:\n ...",
"def set_gate_reducer(self, reducer: ReductionRule):\n self.gate_reducer_ = reducer",
"def toposorted_actions(self) -> Iterable[Action]:\n # Here we execute two \"nanopasses\" (a term borrowed from compiler implementation)\n #\n # 1. Traverse a values-and-actions graph, reducing it to a dependency graph containing actions\n #\n # 2. Perform a toposort over actions (using Kahn's algorithm https://en.wikipedia.org/wiki/Topological_sorting)\n #\n # TODO: switch to graphlib from standard library\n #\n # TODO: Consider using Tarjan's strongly connected components algorithm\n # Rationale: Tarjan's SCC would find loops and produce a helpful diagnostic\n\n # 1. Dependency graph representation optimized for toposort\n o: dict[Action, set[Action]] = {} # for actions: action -> set of outgoing dependency edges\n i: dict[Action, set[Action]] = {} # for actions: action -> set of incoming dependency edges\n\n # set of nodes without incoming edges\n s: Set[Action] = set()\n\n # 1. Transform execution plan into dependency graph\n for action in self.actions:\n # if action does not depend on any other action, add it to set s\n if all(inp.producer() is None for inp in action.inputs()):\n s.add(action)\n # add outgoing edges to graph, if any\n for output in action.outputs():\n for depending_action in output.consumers():\n # add an edge action -> depending_action to the graph\n if action not in o:\n o[action] = set()\n if depending_action not in i:\n i[depending_action] = set()\n o[action].add(depending_action)\n i[depending_action].add(action)\n\n # 2. Now run Kahn's algorithm (could be separated from previous to improve abstraction)\n # resulting list\n l: list[Action] = []\n\n while len(s) > 0:\n n = s.pop()\n l.append(n)\n if n in o:\n o_n = o[n]\n del o[n]\n else:\n o_n = set()\n while len(o_n) > 0:\n # remove edge from the graph\n m = o_n.pop()\n i[m].remove(n)\n if len(i[m]) == 0:\n del i[m]\n s.add(m)\n\n if len(o) != 0 or len(i) != 0:\n for (node, edges) in o.items():\n print(\"Source: \" + str(node))\n for e in edges:\n print(\" Edge: \" + str(e))\n raise Exception(\"Dependency graph has at least one cycle\")\n else:\n return l",
"def category_reducer(category):\n if not \"--\" in category:\n if category in BAD_CATEGORIES:\n return \"Unknown\"\n return category\n\n main, sub = category.split(\"--\")\n\n main = main.strip()\n if main in [\"Science\"]:\n return sub.strip()\n else:\n return main",
"def _call_reduce_action(self, context, subresults):\n debug = self.debug\n result = None\n bt_result = None\n production = context.production\n\n if self.build_tree:\n # call action for building tree node if enabled.\n if debug:\n h_print(\"Building non-terminal node\",\n \"'{}'.\".format(production.symbol.name), level=2)\n\n bt_result = NodeNonTerm(context, children=subresults,\n production=production)\n context.node = bt_result\n if not self.call_actions_during_tree_build:\n return bt_result\n\n sem_action = production.symbol.action\n if sem_action:\n assignments = production.assignments\n if assignments:\n assgn_results = {}\n for a in assignments.values():\n if a.op == '=':\n assgn_results[a.name] = subresults[a.index]\n else:\n assgn_results[a.name] = bool(subresults[a.index])\n\n if type(sem_action) is list:\n if assignments:\n result = sem_action[production.prod_symbol_id](\n context, subresults, **assgn_results)\n else:\n result = sem_action[production.prod_symbol_id](context,\n subresults)\n else:\n if assignments:\n result = sem_action(context, subresults, **assgn_results)\n else:\n result = sem_action(context, subresults)\n\n else:\n if debug:\n h_print(\"No action defined\",\n \" for '{}'.\".format(production.symbol.name), level=1)\n if len(subresults) == 1:\n if debug:\n h_print(\"Unpacking a single subresult.\", level=1)\n result = subresults[0]\n else:\n if debug:\n h_print(\"Result is a list of subresults.\", level=1)\n result = subresults\n\n if debug:\n h_print(\"Action result =\",\n \"type:{} value:{}\"\n .format(type(result), repr(result)), level=1)\n\n # If build_tree is set to True, discard the result of the semantic\n # action, and return the result of treebuild_reduce_action.\n return bt_result if bt_result is not None else result",
"def reduce(self, app, nodes, result):",
"def _reduce_distances(self, threshold):\n reduced = self.orig_dists.copy()\n reduced[reduced <= threshold] = 0\n # Remove ignored from all consideration\n ignrd_indices = [self.index[name] for name in self.ignored]\n if ignrd_indices:\n reduced[:,ignrd_indices] = np.inf\n reduced[ignrd_indices,:] = np.inf\n # Check if the given parameters are feasible\n chsn_indices = set(self.index[name] for name in self.chosen)\n avail_indices = set(self.index[name] for name in self.available)\n ca_indices = chsn_indices | avail_indices\n unassigned_indices = np.array(list(self._not_ignored_inds - ca_indices))\n if len(unassigned_indices) == 0:\n unassigned_orphans = unassigned_indices\n else:\n ca_indices = list(ca_indices)\n avail_in_range = np.count_nonzero(reduced[np.ix_(unassigned_indices,ca_indices)] == 0, axis=1)\n unassigned_orphans = unassigned_indices[avail_in_range == 0]\n return reduced, unassigned_orphans",
"def allreduce_hook(state: AllReduceState, grad: torch.Tensor):\n if state.gradient_predivide_factor > 1:\n grad.div_(state.gradient_predivide_factor)\n dist.all_reduce(grad, group=state.process_group)\n if state.gradient_postdivide_factor > 1:\n grad.div_(state.gradient_postdivide_factor)",
"def optimize_actions(actions):\n result = {}\n\n def donothing(oid, index_oid, action1, action2):\n del result[(oid, index_oid)]\n\n def doadd(oid, index_oid, action1, action2):\n result[(oid, index_oid)] = action1\n\n def dochange(oid, index_oid, action1, action2):\n result[(oid, index_oid)] = ReindexAction(\n action2.index, action2.mode, oid,\n )\n\n def dodefault(oid, index_oid, action1, action2):\n result[(oid, index_oid)] = action2\n\n statefuncs = {\n # txn asked to remove an object that previously it was\n # asked to add, conclusion is to do nothing\n (IndexAction, UnindexAction):donothing,\n # txn asked to change an object that was not previously added,\n # concusion is to just do the add\n (IndexAction, ReindexAction):doadd,\n # txn action asked to remove an object then readd the same\n # object. We translate this to a single change action.\n (UnindexAction, IndexAction):dochange,\n }\n\n for newaction in actions:\n oid = newaction.oid\n index_oid = newaction.index_oid\n oldaction = result.get((oid, index_oid))\n statefunc = statefuncs.get(\n (oldaction.__class__, newaction.__class__),\n dodefault,\n )\n statefunc(oid, index_oid, oldaction, newaction)\n\n result = list(sorted(result.values()))\n return result",
"def _get_reduction(self):\n if (not self._allow_sum_over_batch_size and\n distribute_lib.has_strategy() and\n (self.reduction == losses_utils.ReductionV2.AUTO or\n self.reduction == losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE)):\n raise ValueError(\n 'Please use `tf.keras.losses.Reduction.SUM` or '\n '`tf.keras.losses.Reduction.NONE` for loss reduction when losses are '\n 'used with `tf.distribute.Strategy` outside of the built-in training '\n 'loops. You can implement '\n '`tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE` using global batch '\n 'size like:\\n```\\nwith strategy.scope():\\n'\n ' loss_obj = tf.keras.losses.CategoricalCrossentropy('\n 'reduction=tf.keras.losses.Reduction.NONE)\\n....\\n'\n ' loss = tf.reduce_sum(loss_obj(labels, predictions)) * '\n '(1. / global_batch_size)\\n```\\nPlease see '\n 'https://www.tensorflow.org/tutorials/distribute/custom_training'\n ' for more details.')\n\n if self.reduction == losses_utils.ReductionV2.AUTO:\n return losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE\n return self.reduction",
"def _call_reduce_action(self, context, subresults):\n debug = self.debug\n result = None\n bt_result = None\n production = context.production\n\n if self.build_tree:\n # call action for building tree node if enabled.\n if debug:\n h_print(\"Building non-terminal node\",\n \"'{}'.\".format(production.symbol.name), level=2)\n\n bt_result = treebuild_reduce_action(context, nodes=subresults)\n if not self.call_actions_during_tree_build:\n return bt_result\n\n sem_action = production.symbol.action\n if sem_action:\n assignments = production.assignments\n if assignments:\n assgn_results = {}\n for a in assignments.values():\n if a.op == '=':\n assgn_results[a.name] = subresults[a.index]\n else:\n assgn_results[a.name] = bool(subresults[a.index])\n\n if type(sem_action) is list:\n if assignments:\n result = sem_action[production.prod_symbol_id](\n context, subresults, **assgn_results)\n else:\n result = sem_action[production.prod_symbol_id](context,\n subresults)\n else:\n if assignments:\n result = sem_action(context, subresults, **assgn_results)\n else:\n result = sem_action(context, subresults)\n\n else:\n if debug:\n h_print(\"No action defined\",\n \" for '{}'.\".format(production.symbol.name), level=1)\n if len(subresults) == 1:\n if debug:\n h_print(\"Unpacking a single subresult.\", level=1)\n result = subresults[0]\n else:\n if debug:\n h_print(\"Result is a list of subresults.\", level=1)\n result = subresults\n\n if debug:\n h_print(\"Action result =\",\n \"type:{} value:{}\"\n .format(type(result), repr(result)), level=1)\n\n # If build_tree is set to True, discard the result of the semantic\n # action, and return the result of treebuild_reduce_action.\n return bt_result if bt_result is not None else result",
"def action_map(self) -> Dict[str, CLIActionType]:\n return add_dicts({\n \"dump\": self.dump_action,\n \"dump-macrosizes\": self.dump_macrosizes_action,\n \"dump_macrosizes\": self.dump_macrosizes_action,\n \"synthesis\": self.synthesis_action,\n \"syn\": self.synthesis_action,\n \"par\": self.par_action,\n \"synthesis_to_par\": self.synthesis_to_par_action,\n \"synthesis-to-par\": self.synthesis_to_par_action,\n \"syn_to_par\": self.synthesis_to_par_action,\n \"syn-to-par\": self.synthesis_to_par_action,\n \"synthesis_par\": self.synthesis_par_action,\n \"synthesis-par\": self.synthesis_par_action,\n \"syn_par\": self.synthesis_par_action,\n \"syn-par\": self.synthesis_par_action,\n \"hier_par_to_syn\": self.hier_par_to_syn_action,\n \"hier-par-to-syn\": self.hier_par_to_syn_action,\n \"par_to_drc\": self.par_to_drc_action,\n \"par-to-drc\": self.par_to_drc_action,\n \"par_to_lvs\": self.par_to_lvs_action,\n \"par-to-lvs\": self.par_to_lvs_action,\n \"drc\": self.drc_action,\n \"lvs\": self.lvs_action\n }, self.all_hierarchical_actions)",
"def reducer(token_pair):\n\treturn (token_pair[0], sum(token_pair[1]))",
"def reducer(state: State, action: Action) -> State:\n state = copy.deepcopy(state)\n if isinstance(state, dict):\n state = forest.state.State.from_dict(state)\n if isinstance(action, dict):\n try:\n action = forest.actions.Action.from_dict(action)\n except TypeError:\n return state.to_dict()\n\n if action.kind == SET_FIGURES:\n state.layers.figures = action.payload\n\n elif action.kind == ON_ADD:\n state.layers.mode.state = \"add\"\n\n elif action.kind == ON_CLOSE:\n row_index = action.payload\n try:\n layer_index = sorted(state.layers.index.keys())[row_index]\n del state.layers.index[layer_index]\n except IndexError:\n pass\n\n elif action.kind == ON_EDIT:\n row_index = action.payload\n layer_index = sorted(state.layers.index.keys())[row_index]\n state.layers.mode.state = \"edit\"\n state.layers.mode.index = layer_index\n\n elif action.kind == SAVE_LAYER:\n # NOTE: Layer index is stored in payload\n layer_index = action.payload[\"index\"]\n settings = action.payload[\"settings\"]\n if layer_index in state.layers.index:\n state.layers.index[layer_index].update(settings)\n else:\n state.layers.index[layer_index] = settings\n\n elif action.kind == SET_ACTIVE:\n active = action.payload[\"active\"]\n row_index = action.payload[\"row_index\"]\n row_to_layer = sorted(state.layers.index.keys())\n try:\n layer_index = row_to_layer[row_index]\n state.layers.index[layer_index][\"active\"] = active\n except IndexError:\n pass\n\n return state.to_dict()",
"def reduce_run():",
"def __reduce__(\n self: TokenMatcher,\n ) -> Tuple[Any, Any]: # Precisely typing this would be really long.\n data = (\n self.__class__,\n self.vocab,\n self._patterns,\n self._callbacks,\n self.defaults,\n )\n return (unpickle_matcher, data)",
"def reveal(self, dst=None):\n op = torch.distributed.ReduceOp.BXOR\n if dst is None:\n return comm.get().all_reduce(self.share, op=op)\n else:\n return comm.get().reduce(self.share, dst, op=op)",
"def reduce_tree(self, handle_actions, terminal_converter=None):\r\n def handle_node(node):\r\n \"\"\"\r\n Return the result representing the node, using recursion.\r\n\r\n Call the appropriate `handle_action` for this node. As its inputs,\r\n feed it the output of `handle_node` for each child node.\r\n \"\"\"\r\n if not isinstance(node, ParseResults):\r\n # Then treat it as a terminal node.\r\n if terminal_converter is None:\r\n return node\r\n else:\r\n return terminal_converter(node)\r\n\r\n node_name = node.getName()\r\n if node_name not in handle_actions: # pragma: no cover\r\n raise Exception(u\"Unknown branch name '{}'\".format(node_name))\r\n\r\n action = handle_actions[node_name]\r\n handled_kids = [handle_node(k) for k in node]\r\n return action(handled_kids)\r\n\r\n # Find the value of the entire tree.\r\n return handle_node(self.tree)",
"def reducer2():\n\n riders = 0 # The number of total riders for this key\n num_hours = 0 # The number of hours with this key\n old_key = None\n\n for line in sys.stdin:\n data = line.strip().split('\\t')\n\n if len(data) != 2:\n continue\n\n new_key, count = data\n\n if old_key and old_key != new_key:\n print('{}\\t{}'.format(old_key, riders / num_hours))\n riders = 0\n num_hours = 0\n\n riders += float(count)\n num_hours += 1\n old_key = new_key\n\n if old_key is not None:\n print('{}\\t{}'.format(old_key, riders / num_hours))",
"def applyDemapping(self):\n pass"
] | [
"0.52857256",
"0.5162497",
"0.5107433",
"0.5057547",
"0.5027394",
"0.5027394",
"0.5027394",
"0.5027394",
"0.5027394",
"0.49870852",
"0.49124625",
"0.48178238",
"0.47693735",
"0.47630015",
"0.47151983",
"0.4696542",
"0.46933955",
"0.4672102",
"0.46205962",
"0.46201527",
"0.4614642",
"0.45813182",
"0.4547565",
"0.45459363",
"0.45360172",
"0.45087424",
"0.44910946",
"0.44584632",
"0.44497505",
"0.44495073"
] | 0.5502056 | 0 |
For conjugate distinctions this should be overridden and return the base distinctions used. For none conjugate it will automatically return an empty list. | def getBaseDistinctions(self):
return [] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_conjugate_bases_of(chebi_ent):\n if hasattr(chebi_ent, 'OntologyParents'):\n return [ent.chebiId for ent in chebi_ent.OntologyParents if\n (ent.type == \"is conjugate base of\")]\n else:\n return []",
"def conjugate(self):\n pass",
"def conjugate(self, ???):",
"def conjugate(self):\n v = zeros_como(self)\n for x in range(self.n):\n v[x] = (self[x]).conjugate()\n\n return v",
"def conjugate(self):\r\n return self.__class__(self._real, -self._imag)",
"def conjugate(self):\n return self.rotate().dagger()",
"def conjugate(self, *args, **kwargs): # real signature unknown\n pass",
"def conjugate(self, *args, **kwargs): # real signature unknown\n pass",
"def conjugate(self, *args, **kwargs): # real signature unknown\n pass",
"def conjugate(self, *args, **kwargs): # real signature unknown\n pass",
"def conjugate(self, *args, **kwargs): # real signature unknown\n pass",
"def conjugate(self, *args, **kwargs): # real signature unknown\n pass",
"def conjugate(self, *args, **kwargs): # real signature unknown\n pass",
"def conjugate(self, *args, **kwargs): # real signature unknown\n pass",
"def conjugate(self, *args, **kwargs): # real signature unknown\n pass",
"def conjugate(self, *args, **kwargs): # real signature unknown\n pass",
"def conjugate(self, *args, **kwargs): # real signature unknown\n pass",
"def conjugate(self, *args, **kwargs): # real signature unknown\n pass",
"def conjugate(self, *args, **kwargs): # real signature unknown\n pass",
"def conjugate(self, *args, **kwargs): # real signature unknown\n pass",
"def conjugate(self, *args, **kwargs): # real signature unknown\n pass",
"def conjugate(self, *args, **kwargs): # real signature unknown\n pass",
"def base(self):\n if self._base == []:\n self.schreier_sims()\n return self._base",
"def conjugate(self) -> JaggedArray:\n return self._unitary_op(np.conjugate)",
"def conjugate_irregular_tenses(self):\n conjugations = [ None ] * len(Tense)\n def __look_for_overrides(verb): \n overrides = [ override_attribute.key for override_attribute in ConjugationOverrideProperty.all_except(ConjugationOverrideProperty.conjugation_joins) if hasattr(verb, override_attribute.key)]\n if len(overrides) == 0:\n return None\n \n for attr_name in overrides:\n for tense in range(len(Tense)):\n override = getattr(verb, attr_name)\n if override[tense] is None:\n continue\n \n if tense in Tense.Person_Agnostic():\n if conjugations[tense] is None:\n conjugations[tense] = self.verb_for_derived.conjugate_tense(tense)\n else:\n for person in range(len(Person)):\n if override[tense][person] is not None:\n if conjugations[tense] is None:\n conjugations[tense] = [ None ] * len(Person)\n if conjugations[tense][person] is None:\n conjugations[tense][person] = self.verb_for_derived.conjugate(tense, person)\n __look_for_overrides(self)\n if self.base_verb is not None:\n __look_for_overrides(self.base_verb)\n return conjugations",
"def conjugate(self, x):\n\n a = self.array_form\n b = x.array_form\n n = len(a)\n if len(b) != n:\n raise ValueError(\"The number of elements in the permutations \\\ndon\\'t match.\")\n invb = [None]*n\n for i in xrange(n):\n invb[b[i]] = i\n return _new_from_array_form([invb[a[i]] for i in b])",
"def get_base_coefs(mv):\n\trs = []\n\tfor bs in bases:\n\t\tt = []\n\t\tfor b in bs:\n\t\t\tt.append(mv.coef(b))\n\t\t\t\t\t\n\t\trs.append(t)\t\t\n\treturn rs",
"def conjugate(self):\n return self.__class__(scalar=self.scalar, vector= -self.vector)",
"def conjugate(self) -> 'MultiVector':\n\n return (~self).gradeInvol()",
"def conjugate(self):\n return Complex(self._reNum, -self._imNum)"
] | [
"0.6720197",
"0.64957684",
"0.63367724",
"0.6140327",
"0.6048907",
"0.6023874",
"0.5890595",
"0.5890595",
"0.5890595",
"0.5890595",
"0.5890595",
"0.5890595",
"0.5890595",
"0.5890595",
"0.5890595",
"0.5890595",
"0.5890595",
"0.5890595",
"0.5890595",
"0.5890595",
"0.5890595",
"0.5890595",
"0.5865215",
"0.5845415",
"0.5821366",
"0.5820317",
"0.5815315",
"0.5783399",
"0.57787544",
"0.5774332"
] | 0.6723077 | 0 |
Generates a random distinction of this type than is valid for the schema config.schema and for the given graphs. This function for must take graphs as its first argument, and if its a conjugate distinction it must then take, as separate args, not a tuple, | def getRandomDistinction(config, graphs, *base_distinctions):
raise AbstractMethodException(Distinction) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generate_regular_graph(variable_names, dist_func, num_neigh=10, **kwargs):\n shuffle(variable_names)\n num_vars = len(variable_names)\n num_neigh = min(num_neigh, num_vars-1)\n graphs = nx.random_graphs.random_regular_graph(num_neigh, num_vars)\n edges = np.array(graphs.edges())\n edges.sort(axis=-1)\n\n return graph_from_edges(variable_names, dist_func, edges)",
"def generate_full(variable_names, dist_func, **kwargs):\n return generate_random_graph(variable_names, dist_func, edge_prob=1.0)",
"def simulate_random_dag(d: int,\n degree: float,\n graph_type: str,\n w_range: tuple = (0.5, 2.0)) -> nx.DiGraph:\n if graph_type == 'erdos-renyi':\n prob = float(degree) / (d - 1)\n B = np.tril((np.random.rand(d, d) < prob).astype(float), k=-1)\n elif graph_type == 'barabasi-albert':\n m = int(round(degree / 2))\n B = np.zeros([d, d])\n bag = [0]\n for ii in range(1, d):\n dest = np.random.choice(bag, size=m)\n for jj in dest:\n B[ii, jj] = 1\n bag.append(ii)\n bag.extend(dest)\n elif graph_type == 'full': # ignore degree, only for experimental use\n B = np.tril(np.ones([d, d]), k=-1)\n else:\n raise ValueError('unknown graph type')\n # random permutation\n P = np.random.permutation(np.eye(d, d)) # permutes first axis only\n B_perm = P.T.dot(B).dot(P)\n U = 1*np.random.uniform(low=w_range[0], high=w_range[1], size=[d, d])\n U[np.random.rand(d, d) < 0.5] *= -1\n W = (B_perm != 0).astype(float) * U\n G = nx.DiGraph(W)\n return G",
"def test_unique_graph(self):\n g0_graph = tf.Graph()\n with g0_graph.as_default():\n tf.constant(1, name=\"a\")\n tf.constant(2, name=\"b\")\n g1_graph = tf.Graph()\n with g1_graph.as_default():\n tf.constant(1, name=\"a\")\n tf.constant(2, name=\"b\")\n\n g0 = gde.Graph(g0_graph.as_graph_def())\n g1 = gde.Graph(g1_graph.as_graph_def())\n a0, b0, a1, b1 = (g0[\"a\"], g0[\"b\"], g1[\"a\"], g1[\"b\"])\n\n print(\"g0['a'] returns {} (type {})\".format(g0['a'], type(g0['a'])))\n\n # Same graph, should be fine.\n self.assertIsNone(gde.util.check_graphs(a0, b0))\n # Two different graphs, should assert.\n with self.assertRaises(ValueError):\n gde.util.check_graphs(a0, b0, a1, b1)\n # a0 and b0 belongs to the same graph, should be fine.\n self.assertEqual(gde.util.get_unique_graph([a0, b0]), g0)\n # Different graph, should raise an error.\n with self.assertRaises(ValueError):\n gde.util.get_unique_graph([a0, b0, a1, b1])",
"def generate_random_graph(variable_names, dist_func, edge_prob, connected=False, max_parents=-1, num_latents=0, **kwargs):\n shuffle(variable_names) # To have a random order\n num_vars = len(variable_names)\n\n # Generate random adjacency matrix with specified edge probability\n adj_matrix = np.random.binomial(n=1, p=edge_prob, size=(num_vars, num_vars))\n\n # Make sure that adjacency matrix is half diagonal\n for v_idx in range(num_vars):\n adj_matrix[v_idx, :v_idx+1] = 0\n\n # Nodes that do not have any parents or children are connected\n for v_idx in range(num_vars):\n has_connection = (adj_matrix[v_idx, :].any() or adj_matrix[:, v_idx].any())\n if not has_connection:\n con_idx = np.random.randint(num_vars-1)\n if con_idx >= v_idx:\n con_idx += 1\n adj_matrix[v_idx, con_idx] = True\n else:\n adj_matrix[con_idx, v_idx] = True\n\n # Ensure that a node has less than N parents\n if max_parents > 0:\n for v_idx in range(adj_matrix.shape[0]):\n num_parents = adj_matrix[:, v_idx].sum()\n if num_parents > max_parents:\n indices = np.where(adj_matrix[:, v_idx] == 1)[0]\n indices = indices[np.random.permutation(indices.shape[0])[:num_parents-max_parents]]\n adj_matrix[indices, v_idx] = 0\n\n # Connect nodes to one connected graph\n if connected:\n visited_nodes, connected_nodes = [], [0]\n while len(visited_nodes) < num_vars:\n while len(connected_nodes) > 0:\n v_idx = connected_nodes.pop(0)\n children = np.where(adj_matrix[v_idx, :])[0].tolist()\n parents = np.where(adj_matrix[:, v_idx])[0].tolist()\n neighbours = children + parents\n for n in neighbours:\n if (n not in visited_nodes) and (n not in connected_nodes):\n connected_nodes.append(n)\n if v_idx not in visited_nodes:\n visited_nodes.append(v_idx)\n if len(visited_nodes) < num_vars:\n node1 = np.random.choice(np.array(visited_nodes))\n node2 = np.random.choice(np.array([i for i in range(num_vars) if i not in visited_nodes]))\n adj_matrix[min(node1, node2), max(node1, node2)] = True\n connected_nodes.append(node1)\n\n # Add latent confounders \n if num_latents > 0:\n # Latent confounders are identified by their variable name \"X_{l,...}\"\n variable_names = [r\"$X_{l,%i}$\" % (i+1) for i in range(num_latents)] + variable_names\n # Latent confounders are added in the graph structure. When exporting the graph, \n # we remove those variables so that we can apply our structure learning algorithm\n # without any changes.\n node_idxs = [v_idx+num_latents for v_idx in range(num_vars)\n if (adj_matrix[:, v_idx].sum() < max_parents or max_parents <= 0)]\n adj_matrix = np.concatenate([np.zeros((num_latents, num_vars)), adj_matrix], axis=0)\n adj_matrix = np.concatenate([np.zeros((num_vars+num_latents, num_latents)), adj_matrix], axis=1)\n # Randomly select the node pairs on which we want to have a latent confounder\n latent_children = []\n for l in range(num_latents):\n node_pair = None\n # We sample unique node pairs where there exists no direct edge between both nodes\n while node_pair is None or node_pair in latent_children or adj_matrix[node_pair[0], node_pair[1]]:\n node_pair = random.sample(node_idxs, k=2)\n node_pair = sorted(node_pair)\n latent_children.append(node_pair)\n adj_matrix[l, node_pair[0]] = 1\n adj_matrix[l, node_pair[1]] = 1\n latents = np.array([[i]+lc for i, lc in enumerate(latent_children)])\n else:\n latents = None\n\n return graph_from_adjmatrix(variable_names, dist_func, adj_matrix, latents=latents)",
"def test_CreateRandomGraph(\n node_x_dimensionality: int,\n node_y_dimensionality: int,\n graph_x_dimensionality: int,\n graph_y_dimensionality: int,\n):\n g = random_networkx_generator.CreateRandomGraph(\n node_x_dimensionality=node_x_dimensionality,\n node_y_dimensionality=node_y_dimensionality,\n graph_x_dimensionality=graph_x_dimensionality,\n graph_y_dimensionality=graph_y_dimensionality,\n )\n for _, data in g.nodes(data=True):\n assert len(data[\"x\"]) == node_x_dimensionality\n assert len(data[\"y\"]) == node_y_dimensionality\n assert len(g.graph[\"x\"]) == graph_x_dimensionality\n assert len(g.graph[\"y\"]) == graph_y_dimensionality",
"def _randomize(self):\n return self.graph",
"def generate_chain(variable_names, dist_func, **kwargs):\n shuffle(variable_names) # To have a random order\n num_vars = len(variable_names)\n\n adj_matrix = np.zeros((num_vars, num_vars), dtype=np.bool)\n for v_idx in range(num_vars-1):\n adj_matrix[v_idx, v_idx+1] = True\n\n return graph_from_adjmatrix(variable_names, dist_func, adj_matrix)",
"def test_random_node_disconnected_graphs(self):\n self.assertEquals(\n Dijkstras().dijkstras(self.g7, 'a', 'b'),\n (3, ['a', 'c', 'b']))",
"def generate_collider(variable_names, dist_func, **kwargs):\n shuffle(variable_names)\n num_vars = len(variable_names)\n\n adj_matrix = np.zeros((num_vars, num_vars), dtype=np.bool)\n adj_matrix[:-1, -1] = True\n\n return graph_from_adjmatrix(variable_names, dist_func, adj_matrix)",
"def generate_test_graph(sameDomain = False):\n num = 100\n\n urls = []\n emails = []\n nodes={}\n if sameDomain:\n domain = generate_domainname()\n else:\n domain = None\n for i in range(num):\n urls.append(generate_url(domain))\n emails.append(generate_email())\n \n used_urls = set()\n used_emails = set()\n for u in urls:\n l = random.choices(urls, k = floor(num/4))\n #l = [u for u in urls]\n e = random.choices(emails, k = floor(num/10))\n #e = [e for e in emails]\n used_urls.update(l)\n used_emails.update(e)\n nodes[u] = testNode(u, l, e)\n nodes[u].generate_page()\n \n return nodes, urls, emails",
"def generate_bidiag(variable_names, dist_func, **kwargs):\n shuffle(variable_names)\n num_vars = len(variable_names)\n\n adj_matrix = np.zeros((num_vars, num_vars), dtype=np.bool)\n for v_idx in range(num_vars-1):\n adj_matrix[v_idx, v_idx+1] = True\n if v_idx < num_vars - 2:\n adj_matrix[v_idx, v_idx+2] = True\n\n return graph_from_adjmatrix(variable_names, dist_func, adj_matrix)",
"def create_random_heterogeneous_crosslinking(graph, data_output, b, c, shape, dclm, k, f):#!!!\n d = densities(Vt=b[2][0]*b[2][1]*b[2][2], dclm=dclm, c=k, f=f)\n if shape == 'croix': z = croix(graph, c, b, f)\n elif shape == 'sphere': z = sphere(graph, c, b, f=f)\n else: print \"heu\", shape\n m = list(set(graph.nodes()).difference(set(z)))\n modified_graph = reticuler(graph, ['miniboucle'], d['dclh'], zone = z, visuel = False, blabla=True)\n twice_modified_graph = reticuler(modified_graph, ['alea'], d['dclm'], zone = m, visuel = False, sauvdata = data_output)",
"def test_generate_graph(self):\n expected_graph = {\n \"a\":[\"b\", \"c\"],\n \"b\":[\"c\"],\n \"c\":[\"b\"],\n }\n\n assert self.graph.generate_graph() == expected_graph",
"def random_two_graph(n):\n return nx.fast_gnp_random_graph(n, 2/(n*n), directed=True)",
"def generate_categorical_graph(num_vars,\n min_categs,\n max_categs,\n inputs_independent=False,\n use_nn=True,\n deterministic=False,\n graph_func=generate_random_graph,\n seed=-1,\n **kwargs):\n if seed >= 0:\n np.random.seed(seed)\n random.seed(seed)\n torch.manual_seed(seed)\n\n if num_vars <= 26: # For less than 26 variables, we call the variables alphabetically, otherwise numerically\n variable_names = [n for i, n in zip(range(1, num_vars+1), string.ascii_uppercase)]\n else:\n variable_names = [r\"$X_{%s}$\" % i for i in range(1, num_vars+1)]\n var_num_categs = np.random.randint(min_categs, max_categs+1, size=(num_vars,))\n\n def dist_func(input_names, name):\n if min_categs != max_categs:\n input_num_categs = [var_num_categs[variable_names.index(v_name)] for v_name in input_names]\n num_categs = var_num_categs[variable_names.index(name)]\n else:\n input_num_categs, num_categs = [min_categs]*len(input_names), min_categs\n dist = get_random_categorical(input_names=input_names,\n input_num_categs=input_num_categs,\n num_categs=num_categs,\n inputs_independent=inputs_independent,\n use_nn=use_nn,\n deterministic=deterministic)\n return dist\n\n return graph_func(variable_names, dist_func, **kwargs)",
"def do_generations():\n # Extract the data from the initialised Instance\n gens = request.args.get('gens')\n gens = int(gens)\n current_gen = request.args.get('currentGen')\n current_gen = int(current_gen)\n client = request.args.get('client')\n graph_data = request.get_json()\n nodes = graph_data['nodes']\n alpha = graph_data['alpha']\n beta = graph_data['beta']\n decay = graph_data['decay']\n min_pheromone = graph_data['min_pheromone']\n q = graph_data['q']\n local_deposit = graph_data['local_deposit']\n distances = graph_data['distances']\n pheromones = graph_data['pheromones']\n ants = graph_data['ants']\n shortest_path = graph_data['shortest_path']\n min_distance = graph_data['min_distance']\n # Initialise an Instance copy\n i = Instance([], float(alpha), float(beta), float(decay), float(q))\n # Alter the Instance copy with the Instance data\n i.nodes = nodes\n i.min_pheromone = min_pheromone\n i.q = q\n i.local_deposit = local_deposit\n i.distances = distances\n i.pheromones = pheromones\n i.ants = ants\n i.shortest_path = shortest_path\n i.min_distance = min_distance\n # Perform the aco algorithm on the instance\n gen_reached, path, distance = i.aco(gens, current_gen, client)\n\n # Create a message for the console to output\n msg = \"Generation \" + str(gen_reached) + \" distance \" + str(distance) + \" path \" + str(path)\n return jsonify(nodes=i.nodes, alpha=i.alpha, beta=i.beta, decay=i.decay,\n min_pheromone=i.min_pheromone, q=i.q,\n local_deposit=i.local_deposit, distances=i.distances,\n pheromones=i.pheromones, ants=i.ants, shortest_path=i.shortest_path,\n min_distance=round(i.min_distance, 3), gen_reached = gen_reached, message=msg)",
"def sampled_clique(clusters,strategy):\n G = nx.Graph()\n sample = []\n #Sample 'size' nodes from a single cluster\n if strategy == \"rand\":\n size = len(clusters)\n while len(sample) < size:\n cluster = random.choice(clusters)\n if len(cluster) >= size:\n sample = random.sample(cluster,size)\n #Sample 1 choice from each cluster\n elif strategy == \"optim\":\n for _,cluster in clusters.items():\n if len(cluster) > 0:\n sample.append(random.choice(cluster))\n for n1 in sample:\n for n2 in sample:\n if n1 != n2:\n G.add_edge(n1,n2)\n return G",
"def generateUnaryRel(graph, dist=None):\n if dist is None:\n dist = lambda: random.randint(1, len(graph.nodes()))\n\n count = dist()\n return random.sample(graph.nodes(), count)",
"def generate_graph(self):\n temp_graph = [[] for i in xrange(Parameters.num_peers)]\n unconnected = set([i for i in xrange(Parameters.num_peers)])\n while len(unconnected) > 1:\n node1 = random.sample(unconnected, 1)[0]\n unconnected.remove(node1)\n node2 = random.sample(unconnected, 1)[0]\n temp_graph[node2].append(self.nodes[node1])\n temp_graph[node1].append(self.nodes[node2])\n unconnected = set([i for i in xrange(Parameters.num_peers)])\n i = 0\n for i in xrange(Parameters.num_peers*Parameters.num_neighbours/2-Parameters.num_peers):\n a = random.sample(unconnected, 1)[0]\n b = random.sample(unconnected, 1)[0]\n while b == a:\n b = random.sample(unconnected, 1)[0]\n temp_graph[a].append(self.nodes[b])\n temp_graph[b].append(self.nodes[a])\n graph = {}\n for i in xrange(len(self.nodes)):\n graph[\"P_\" + str(i)] = list(set(temp_graph[i]))\n return graph",
"def randomGraph(n,base=True,bSize=None,stronglyCon=False):\n A = (np.random.rand(n,n)>np.random.rand())*1.\n for j in range(n): A[j,j] = 0\n nodes = list(range(n))\n\n if stronglyCon:\n while not nx.is_strongly_connected(nx.DiGraph(A)):\n A = (np.random.rand(n,n)>np.random.rand())*1.\n for j in range(n): A[j,j] = 0\n nodes = list(range(n))\n\n if base:\n if bSize is None:\n bSize = np.random.randint(1,high=n)\n base = list(np.random.choice(nodes,replace=False,size=bSize))\n return A,base\n return A",
"def createRandomGraph():\n g = {}\n n = random.sample([0,1,2,3,4,5,6,7,8,9], 7)\n for i in n:\n g[i] = []\n edges = random.randint(10,20)\n count = 0\n while count < edges:\n a = random.choice(n)\n b = random.choice(n)\n if b not in g[a] and a != b:\n g[a].append(b)\n count += 1\n return g",
"def _make_random_graph(self, graph: mazegraph.MazeGraph):\n rmg.generate_random_maze(graph, self._settings)",
"def generate_Graph(edge, vertex):\n\tif edge > vertex *(vertex -1)/2 or vertex <0 or edge < 0:\n\t\tprint(\"Invalid number of edges\")\n\t\treturn None\n\n\tgraph = [[0 for x in range(vertex)] for y in range(vertex)] \n\t\n\t\n\twhile edge >0:\n\t\ta = random.randint(0,vertex-1)\n \n\t\tb = random.randint(0,vertex-1)\n\n\t\tif graph[a][b] == 1 or a ==b: \n\t\t\tcontinue\n\n\t\telse: \n\t\t\t\n\t\t\tedge = edge -1\n\t\t\tgraph[a][b] = 1\n\t\t\tgraph[b][a] = 1\n\treturn graph",
"def build_disconnected_test_graph():\n graph = build_triangle_graph()\n g2 = build_triangle_graph()\n g3 = build_triangle_graph()\n\n merge_graphs(graph, g2)\n merge_graphs(graph, g3)\n\n return graph",
"def _graph_fn_sample_deterministic(self, distribution):\n raise NotImplementedError",
"def random_one_graph(n):\n return nx.fast_gnp_random_graph(n, 1/(n*n), directed=True)",
"def generate_graph(size, number_of_clusters, minimal_size):\n base_list = list(range(size))\n result_list = []\n random.shuffle(base_list)\n for i in range(number_of_clusters - 1):\n size = random.randint(minimal_size, len(base_list) - (number_of_clusters - i - 1) * minimal_size)\n cluster = []\n for n in range(size):\n actual = random.choice(base_list)\n base_list.remove(actual)\n cluster.append(actual)\n result_list.append(strongly_connect(cluster))\n result_list.append(strongly_connect(base_list))\n\n while len(result_list) < 5:\n result_list.append([])\n\n print(sorted([len(i) for i in result_list], reverse=True)[:5])\n\n return weak_connect_graph(result_list)",
"def gen_graph(self):",
"def test_graph1():\n mol_graph = DGLGraph([(0, 1), (0, 2), (1, 2)])\n node_feats = torch.arange(mol_graph.number_of_nodes()).float().reshape(-1, 1)\n edge_feats = torch.arange(2 * mol_graph.number_of_edges()).float().reshape(-1, 2)\n\n complete_graph = get_complete_graph(mol_graph.number_of_nodes())\n atom_pair_feats = torch.arange(complete_graph.number_of_edges()).float().reshape(-1, 1)\n\n return mol_graph, node_feats, edge_feats, complete_graph, atom_pair_feats"
] | [
"0.6162866",
"0.60737395",
"0.56084675",
"0.5604049",
"0.5572062",
"0.55707884",
"0.5547834",
"0.5473386",
"0.54535896",
"0.54377645",
"0.5403645",
"0.5399105",
"0.53836566",
"0.5325518",
"0.5313336",
"0.5309223",
"0.5285266",
"0.52835023",
"0.5281857",
"0.52793276",
"0.52750707",
"0.5272496",
"0.5256191",
"0.5253513",
"0.5253208",
"0.5230315",
"0.5202073",
"0.5174802",
"0.5167406",
"0.51576334"
] | 0.7116983 | 0 |
Get an estimate of the number of different subtypes for this distinction. This is used to estimate a PDF for randomly sampling the distinction space. Examine the code of other distinctions to get a feel for how things are estimated. | def getNumberOfSubtypes(config, low_estimate=True):
raise AbstractMethodException(Distinction) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getNumberOfBaseDistinctionsNeeded():\n\n raise AbstractMethodException(Distinction)",
"def findAtypicalTerms(self):\n self.atypicalTermsDict = collections.OrderedDict()\n distanceList = list()\n distance = 0\n for key in self.summaryFilteredDict:\n partitionName = str(key).split(\" :\")[0]\n partition = voc.getPartition(partitionName)\n modNames = partition.getModNames()\n currentModality = str(key).split(\": \")[1]\n indexCurrentModality = modNames.index(currentModality)\n coverCurrentModality = self.getCoverFromModalityInDictionnary(self.summaryFilteredDict,partitionName + \" : \" + currentModality) #cover(v',R)\n if coverCurrentModality > 0:\n for modality in partition.getModalities():\n coverModality = self.getCoverFromModalityInDictionnary(self.summaryFilteredDict,partitionName + \" : \" + modality.getName()) # cover(v,R)\n if modality.isTrapeziumModality():\n indexModality = modNames.index(modality.getName())\n distance = abs(indexCurrentModality - indexModality) / (partition.getNbModalities() - 1) #d(v,v')\n elif modality.isEnumModality():\n if (modality.getName() == currentModality):\n distance = 0\n else:\n distance = 1\n distanceList.append(min(distance, 1 - coverCurrentModality, coverModality)) # min(d(v,v'),cover(v,R),1-cover(v',R))\n self.atypicalTermsDict[partitionName + \" : \" + currentModality] = max(distanceList) # D(v',R)\n distanceList = list()",
"def test_get_tax_return_frequencies(self):\n pass",
"def get_type_stats(self):\n if not self.fitted:\n raise ValueError(\"Vocabulary hasn't been computed yet\")\n\n total_types = len(self.freqs)\n known_types = len(self) - len(self.reserved)\n return known_types, total_types, known_types / total_types",
"def specificity(self):\n result = 0\n for focal, value in self.items():\n if focal.cardinal > 0:\n result += value / focal.cardinal\n return round(result, 6)",
"def prob_t_N(genotype, base):\n cnter = Counter(genotype)\n return cnter.get(base, 0) * 1/len(genotype)",
"def get_num_classes(self):",
"def calculate_size(self, num_dots):\n self.objects = num_dots\n square = sqrt(self.objects)\n if self.objects % square == 0:\n return int(square), int(square)\n else:\n denom = self.objects // sqrt(self.objects)\n while self.objects % denom != 0:\n denom -= 1\n return int(denom), int(self.objects // denom)",
"def subtype_counts(node_set, G, log=False):\n subtypes = Counter()\n for n in node_set:\n subtype = G.node[n]['subtype']\n subtypes[subtype] += 1\n\n if log:\n for k, v in subtypes.items():\n subtypes[k] = np.log10(v)\n \n return subtypes",
"def _calcTypeDist(self, uSignType, uPassiveShape,\n dbSignType, dbPassiveShape):\n if dbSignType != uSignType:\n # different type\n typeDist = 1\n else:\n # the same type\n if dbSignType == 'passive hand':\n if dbPassiveShape == uPassiveShape:\n # the same shape\n typeDist = 0\n else:\n # different shape\n typeDist = 0.5\n else:\n # the same type other than 'passive hand'\n typeDist = 0\n return typeDist",
"def getDensityEstimate(self):\n return self.density",
"def class_size(self):\n\t\tif self.subject.count()==0:\n\t\t\treturn student.objects.all().filter(reg=self).count()\n\t\telse:\n\t\t\treturn self.grade_set.all().distinct().count()",
"def create_subspace_preference_dimensionality(self):\n # For each point compute number of dimensions that have a lower variance then delta\n spd = np.count_nonzero(self.attribute_variances < self.delta, axis=1)\n return spd",
"def N(self) -> int:\n n_types = len(self)\n return n_types",
"def generatePDI(self,level='species',type='richness'):\n if type == 'richness':\n import biodiversity.richness as rich\n pdi = rich(self)\n try:\n return pdi[level]\n except:\n logger.error(\"[biospatial.gbif.taxonomy.distanceToTree] level selected non existent (used %s)\" %level)",
"def getDimensionality(self):\n dimensionality = self._distribution.returnDimensionality()\n return dimensionality",
"def n(self):\n return len(self.genotypes)",
"def num_depth(self):\n return len(self._sizes) + len(self._ratios) - 1",
"def target_pdf(p, disttype):\n me, cov = target_params(disttype)\n if disttype == 'round' or disttype == 'correlated':\n prob = multivariate_normal.pdf(p, mean=me, cov=cov)\n elif disttype == 'bimodal' or disttype == 'close_bimodal':\n prob0 = multivariate_normal.pdf(p, mean=me[0], cov=cov)\n prob1 = multivariate_normal.pdf(p, mean=me[1], cov=cov)\n prob = max([prob0, prob1]) \n \n return prob",
"def determine_size(self):\n size = np.inf\n while size >= self.n:\n size = np.random.pareto(0.2)\n size = int(math.ceil(size))\n return size",
"def denominator(self, ???):",
"def data_type_ratio(self):\n if self.sample_size:\n return float(self.match_count) / self.sample_size\n return None",
"def class_size(self):\n if not self.is_mutation_finite():\n return infinity\n else:\n components = []\n multiplicities = []\n for x in self.irreducible_components():\n if components.count(x) == 0:\n components.append(x)\n multiplicities.append(1)\n else:\n y = components.index(x)\n multiplicities[y] = multiplicities[y]+1\n\n sizes = [ x.class_size() for x in components ]\n if NotImplemented in sizes:\n print(\"Size unknown\")\n return NotImplemented\n else:\n return prod( [binomial(sizes[i]+multiplicities[i]-1,\n multiplicities[i] ) for i in range (0,len(sizes))])",
"def measures(self, actual, top, nSugg):\n m2 = 0.0\n m3 = 0.0\n for categorySug, count in top:\n if categorySug in actual:\n m2 += 1.0\n else:\n for cR in actual:\n if self.getFatherSon(cR, categorySug) != None:\n m3 += 0.5\n elif self.getBrothers(cR, categorySug) != None:\n m3 += 0.25\n m3 /= len(actual)\n m2 /= nSugg\n m3 = m2 + m3 / nSugg\n return 1 if m2 > 0 else 0, m2, m3",
"def total_sdram_requirements(self):",
"def top_dimensionality(self):\n return self._vocab_size",
"def subtype_occurences(self):\n\n subtype_counts = Counter()\n\n for seqkey,seqs in self.seqs.iteritems():\n for seq,seqentry in seqs.iteritems():\n\n subtype_counts[seqentry['subtype']] += 1\n\n return subtype_counts",
"def getDistType(self):\n return self.distType",
"def get_utilization(self):\n child_prefixes = Prefix.objects.filter(prefix__net_contained_or_equal=str(self.prefix))\n # Remove overlapping prefixes from list of children\n networks = cidr_merge([c.prefix for c in child_prefixes])\n children_size = float(0)\n for p in networks:\n children_size += p.size\n return int(children_size / self.prefix.size * 100)",
"def mutual_info_score(self):\n _, _, I_CK = self._entropies()\n return I_CK / self.grand_total"
] | [
"0.5700454",
"0.5624713",
"0.5599203",
"0.5592559",
"0.5505562",
"0.54669946",
"0.5466142",
"0.5449652",
"0.5400594",
"0.5398503",
"0.5343119",
"0.53286606",
"0.5312603",
"0.53101563",
"0.5294233",
"0.5283371",
"0.5280055",
"0.5267188",
"0.5262132",
"0.52526665",
"0.52496",
"0.5247098",
"0.52160513",
"0.52129257",
"0.5209133",
"0.52085245",
"0.5191606",
"0.51834834",
"0.5173392",
"0.51689976"
] | 0.6394095 | 0 |
Given a schema return True if this type of distinction is valid for the schema. Default is True. Should be overridden if there are any schemas a distinction is not valid for. | def isValidForSchema(schema):
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_a_dde_schema(self, schema):\n return schema in self.registered_dde_schemas",
"def compatibleSchema(self,\n schema: schemaconverter.TDXSchema,\n raise_error: bool = True\n ) -> bool:\n db_tdx_schema = self.tdx_schema\n # see https://stackoverflow.com/a/41579450/10149169\n is_subset = db_tdx_schema.items() <= schema.items()\n if not is_subset and raise_error:\n raise ValueError((\n \"The given database schema is not compatible with the\"\n \" existing database schema. The given schema was {}\"\n \" but the existing schema was {}\").format(\n schema, db_tdx_schema))\n return is_subset",
"def is_a_dde_schema(schema):\n return schema in registered_dde_schemas()",
"def validate_schema(self, schema):\n json_schema_path = os.path.join(_ROOT, 'data', 'schema.json')\n json_schema = load_json_or_yaml(json_schema_path)\n return validate(schema, json_schema)",
"def schemaIsValid(self):\n ret = libxml2mod.xmlSchemaIsValid(self._o)\n return ret",
"def has_schema_url(self):\n return self.get_schema_url() is not None",
"def can_access_schema(self, datasource: \"BaseDatasource\") -> bool:\n\n return (\n self.can_access_all_datasources()\n or self.can_access_database(datasource.database)\n or self.can_access(\"schema_access\", datasource.schema_perm or \"\")\n )",
"def is_schema_types_valid(self):\n valid_types = {\"string\", \"int\", \"float\", \"datetime\", \"boolean\"}\n invalid_types = []\n if self.schema_content:\n for dataset in self.schema_content:\n attributes = self.schema_content.get(dataset)\n for attr in attributes.values():\n type_to_validate = attr.get(\"type\")\n if type_to_validate not in valid_types:\n invalid_types.append(type_to_validate)\n\n if invalid_types:\n error_message, error_code = Errors.modeling_rule_schema_types_invalid(\n invalid_types\n )\n if self.handle_error(\n error_message, error_code, file_path=self.file_path\n ):\n self._is_valid = False\n return False\n return True",
"def is_schema_valid(self, schema):\n for k, v in schema.items():\n if v[0] == \"var_len\":\n assert len(v) == 2\n assert v[1] in TF_VALUE\n\n if v[0] == \"fixed_len\":\n assert len(v) == 3\n assert v[1] in TF_VALUE\n assert isinstance(v[2], list)",
"def schema_valid(arch, **kwargs):\n validator = relaxng(arch.tag)\n if validator and not validator.validate(arch):\n result = True\n for error in validator.error_log:\n _logger.error(tools.ustr(error))\n result = False\n return result\n return True",
"def equals(self, other: Schema) -> bool:\n if not isinstance(other, Schema):\n raise TypeError(\n f\"invalid equality comparison between Schema and {type(other)}\"\n )\n return self.__cached_equals__(other)",
"def equals(self, other: Schema) -> bool:\n if not isinstance(other, Schema):\n raise TypeError(\n f\"invalid equality comparison between Schema and {type(other)}\"\n )\n return self.__cached_equals__(other)",
"def _validate_bool(instance: typing.Dict[str, typing.Any], schema: typing.Dict[str, typing.Any], path: typing.List[str]) -> None:\n if not isinstance(instance, dict):\n raise ValidationError('instance must be dict', path)\n valid_keys = {'_type', 'value'}\n required_keys = valid_keys\n schema_keys = set(instance.keys())\n invalid_keys = schema_keys - valid_keys - opt_federation_keys\n if invalid_keys:\n raise ValidationError('unexpected keys in schema: {}'.format(invalid_keys), path)\n missing_keys = required_keys - schema_keys\n if missing_keys:\n raise ValidationError('missing keys in schema: {}'.format(missing_keys), path)\n if instance['_type'] != 'bool':\n raise ValidationError('expected _type \"bool\"', path)\n if not isinstance(instance['value'], bool):\n raise ValidationError('value must be bool', path)",
"def validate(self, schema=os.path.join(os.path.dirname(__file__), 'am.xsd')):\n return validate_xml(schema, self.path, from_path=True)",
"def validate(self):\n\n # clear any previous xml errors\n clear_error_log()\n if self.schema_file is not None:\n try:\n # Attempt parsing the schema file\n schdoc = parse(self.schema_file)\n except XMLSyntaxError as e:\n # The schema was not parsable XML\n logging.warning('The schema XML file could not be parsed.')\n for item in e.error_log:\n logging.info(item)\n\n return False\n\n try:\n schema = XMLSchema(schdoc)\n except XMLSchemaParseError as e:\n # The schema document is XML, but it's not a schema\n logging.warning(\n 'The schema XML file was parsed, but it does not appear to be a valid XML Schema document.'\n )\n for item in e.error_log:\n logging.info(item)\n\n return False\n\n try:\n # Attempt parsing the data file\n data = parse(self.datafile)\n except XMLSyntaxError as e:\n # The data was not parsable XML\n logging.warning('The data XML file could not be parsed.')\n for item in e.error_log:\n logging.info(item)\n\n return False\n\n if self.schema_file is not None:\n if schema.validate(data):\n self.data = data\n return True\n\n logging.warning(\n 'The data does not conform to the provided schema.')\n for item in schema.error_log:\n logging.info(item)\n\n return False\n\n self.data = data\n\n return True",
"def isValid(dp: frictionless.package.Package, new_dp: frictionless.package.Package):\n val = frictionless.validate(new_dp)\n if (\n val[\"valid\"]\n and dp[\"resources\"][0][\"schema\"] == new_dp[\"resources\"][0][\"schema\"]\n ):\n logging.info(\"Returning valid and schema-compliant data\")\n return True\n else:\n logging.error(\"Data is not valid or the schema has changed\")\n print(val)\n return False",
"def validate_json(schema, doc):\n is_invalid = set(doc).difference(set(schema))\n if is_invalid:\n return False\n return True",
"def _schema_valid_prod(self, table: TableSchema) -> bool:\n disk_schema = self._get_stored_schema(table['name'])\n if not disk_schema:\n return False\n \n # Column and field order will probably not match\n # TODO don't call update_table_schema twice\n _, alter_reqs = update_table_schema(disk_schema, table)\n return len(alter_reqs) == 0",
"def validate(self) -> bool:\n\n # Start by reading in the blueprint schema json\n schema = json.loads(pkgutil.get_data(\"FactorioTools\", \"blueprintSchema.json\"))\n\n # Validate the object's schema against the blueprintSchema JSON\n try:\n jsonschema.validate(self.data, schema)\n return True\n except jsonschema.ValidationError:\n pass\n\n return False",
"def has_desired_schema(self):\n if self._new_table == self._old_table:\n if not self.rebuild:\n log.info(\"Table already has the desired schema. \")\n return True\n else:\n log.info(\n \"Table already has the desired schema. However \"\n \"--rebuild is specified, doing a rebuild instead\"\n )\n return False\n return False",
"def validate_subset_of_schema(self, schema):\n self.validate_schema_type(schema)\n\n if self.name != schema.name:\n raise AttributeSchemaError(\n \"Expected name '%s'; found '%s'\" % (schema.name, self.name)\n )\n\n if self.exclusive != schema.exclusive:\n raise AttributeSchemaError(\n \"Expected exclusive '%s' for attribute '%s'; found '%s'\"\n % (schema.exclusive, self.name, self.exclusive)\n )\n\n if self.default != schema.default:\n raise AttributeSchemaError(\n \"Expected default '%s' for attribute '%s'; found '%s'\"\n % (schema.default, self.name, self.default)\n )",
"def validate_schema(self, schema):\n if type(schema) is not type(self):\n raise AttributeSchemaError(\n \"Expected schema to have type '%s'; found '%s'\"\n % (type(self), type(schema))\n )\n\n if schema.name != self.name:\n raise AttributeSchemaError(\n \"Expected schema to have name '%s'; found '%s'\"\n % (self.name, schema.name)\n )",
"def validate_schema(doc_xml, schema_xml=None):\n doc_dml = deepcopy(doc_xml)\n\n doc_new = etree.Element(doc_xml.tag, nsmap={None: 'http://www.sii.cl/SiiDte'})\n doc_new[:] = doc_xml[:] # move children into new root\n doc_new.attrib.update(doc_xml.attrib) # copy attributes of the root node\n\n # reload xml\n buff = BytesIO(etree.tostring(doc_new, method='c14n'))\n xml = etree.parse(buff).getroot()\n\n if not schema_xml:\n schema_pth = resolve_schema(doc_xml)\n\n with open(schema_pth, 'rb') as fh:\n schema_dml = etree.parse(fh)\n\n schema = etree.XMLSchema(schema_xml)\n schema.assertValid(dml)\n\n return True # if no assertion gets thrown above, we can safely assume a `True` validity. ",
"def schemaExists(self, schema):\r\n r = self.fetchSqlRecords(\r\n \"SELECT count(*) FROM information_schema.schemata WHERE schema_name = '{}'\".format(schema))\r\n return r[0][0] > 0",
"def validate_class_schema(self, schema):\n json_schema_path = os.path.join(_ROOT,\n 'data',\n 'class_json_schema.json')\n json_schema = load_json_or_yaml(json_schema_path)\n return validate(schema, json_schema)",
"def _verify_schema(schema):\n assert type(schema) in [dict, tuple], f'Expected a dict or a tuple but got {type(schema)}'\n if isinstance(schema, tuple):\n assert len(schema) == 2, f'Expected a tuple with length 2 but got length {len(schema)}'\n if schema[1] is not None:\n assert isinstance(schema[1], schema[0]), f'{str(schema[1])} does not have expected type {str(schema)}'\n elif isinstance(schema, dict):\n for sub_schema in schema.values():\n _verify_schema(sub_schema)",
"def validate(self, descriptor, schema_id):\n try:\n jsonschema.validate(descriptor, self.load_schema(schema_id))\n return True\n\n except ValidationError as e:\n log.error(\"Failed to validate Descriptor against schema '{}'\"\n .format(schema_id))\n self.error_msg = e.message\n log.error(e.message)\n return\n\n except SchemaError as e:\n log.error(\"Invalid Schema '{}'\".format(schema_id))\n self.error_msg = e.message\n log.debug(e)\n return",
"def check_schema_uri(self):\n import asdf\n\n if self.schema_uri is not None:\n with log.augment_exception(\"Invalid ASDF schema URI:\", self.schema_uri):\n asdf.schema.load_schema(self.schema_uri)",
"def is_schema_compatible(self, for_writing_operations_too=False) -> bool:\n _LOG.debug(\n \"software.version\",\n postgis=_schema.get_postgis_versions(self._engine),\n explorer=explorer_version,\n )\n if for_writing_operations_too:\n return _schema.is_compatible_generate_schema(self._engine)\n else:\n return _schema.is_compatible_schema(self._engine)",
"def test_validate_schema(schema_path):\n # Make sure that each schema itself is valid.\n schema_tree = schema.load_schema(schema_path, resolve_references=True)\n schema.check_schema(schema_tree)"
] | [
"0.738567",
"0.6994796",
"0.69861853",
"0.67255765",
"0.66746897",
"0.6557599",
"0.64317065",
"0.6430458",
"0.6274206",
"0.62662953",
"0.61832917",
"0.61832917",
"0.61646706",
"0.6147451",
"0.61017865",
"0.6056857",
"0.6008853",
"0.60003716",
"0.59765136",
"0.5948833",
"0.59400743",
"0.59253293",
"0.5921405",
"0.59120375",
"0.58401763",
"0.58131814",
"0.5810842",
"0.5766985",
"0.5763496",
"0.5755105"
] | 0.81319565 | 0 |
Matrix multiplication of chains of square matrices | def chain_matmul_square(As):
As_matmul = As
while As_matmul.shape[0] > 1:
if As_matmul.shape[0] % 2:
A_last = As_matmul[-1:]
else:
A_last = None
As_matmul = torch.matmul(As_matmul[0:-1:2], As_matmul[1::2])
if A_last is not None:
As_matmul = torch.cat([As_matmul, A_last], dim=0)
return As_matmul.squeeze(0) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def recursive_multiply(a, b):\n if len(a) == 2:\n return naive_multiply(a, b)\n\n a11 = a[0:int(len(a) / 2)]\n for index, row in enumerate(a11):\n a11[index] = row[0:int(len(row) / 2)]\n\n a12 = a[0:int(len(a) / 2)]\n for index, row in enumerate(a12):\n a12[index] = row[int(len(a) / 2):len(a)]\n\n a21 = a[int(len(a) / 2):len(a)]\n for index, row in enumerate(a21):\n a21[index] = row[0:int(len(row) / 2)]\n\n a22 = a[int(len(a) / 2):len(a)]\n for index, row in enumerate(a22):\n a22[index] = row[int(len(a) / 2):len(a)]\n\n b11 = b[0:int(len(b) / 2)]\n for index, row in enumerate(b11):\n b11[index] = row[0:int(len(row) / 2)]\n\n b12 = b[0:int(len(b) / 2)]\n for index, row in enumerate(b12):\n b12[index] = row[int(len(b) / 2):len(b)]\n\n b21 = b[int(len(b) / 2):len(b)]\n for index, row in enumerate(b21):\n b21[index] = row[0:int(len(row) / 2)]\n\n b22 = b[int(len(b) / 2):len(b)]\n for index, row in enumerate(b22):\n b22[index] = row[int(len(b) / 2):len(b)]\n\n c11 = matrix_add(recursive_multiply(a11, b11), recursive_multiply(a12, b21)) # C11 = A11*B11 + A12*B21\n c12 = matrix_add(recursive_multiply(a11, b12), recursive_multiply(a12, b22)) # C12 = A11*B12 + A12*B22\n c21 = matrix_add(recursive_multiply(a21, b11), recursive_multiply(a22, b21)) # C21 = A21*B11 + A22*B21\n c22 = matrix_add(recursive_multiply(a21, b12), recursive_multiply(a22, b22)) # C22 = A21*B12 + A22*B22\n\n # Append c12 to c11\n for row_index, row in enumerate(c11):\n for col_index, col in enumerate(c12):\n row.append(c12[row_index][col_index])\n\n # Append c22 to c21\n for row_index, row in enumerate(c21):\n for col_index, col in enumerate(c12):\n row.append(c22[row_index][col_index])\n\n # Append c21 to c11\n for i in c21:\n c11.append(i)\n\n return c11",
"def __mul__(self, other):\n #\n # TODO - your code here\n #\n final_matrix = []\n for i in range(self.h):\n temp_row = []\n for j in range(other.w):\n # take dot-product of row of\n # matrix in 1st arg with col of\n # matrix in 2nd arg\n temp_row.append(dot_product(get_row(self.g, i), get_col(other.g, j)))\n final_matrix.append(temp_row)\n return Matrix(final_matrix)\n # TODO - your code here",
"def matrix_mult(m1, m2):\n pass",
"def _multi_matmul_chain_order(arrays):\n n = len(arrays)\n # p stores the dimensions of the matrices\n # Example for p: A_{10x100}, B_{100x5}, C_{5x50} --> p = [10, 100, 5, 50]\n # Using -2 to generalize for shapes that are more than 2 dimmensions\n p = [a.shape[-2] for a in arrays] + [arrays[-1].shape[-1]]\n # m is a matrix of costs of the subproblems\n # m[i,j]: min number of scalar multiplications needed to compute A_{i..j}\n m = np.zeros((n, n), dtype=np.double)\n # s is the actual ordering\n # s[i, j] is the value of k at which we split the product A_i..A_j\n s = np.empty((n, n), dtype=np.intp)\n\n for l in range(1, n):\n for i in range(n - l):\n j = i + l\n m[i, j] = np.inf\n for k in range(i, j):\n q = m[i, k] + m[k + 1, j] + p[i] * p[k + 1] * p[j + 1]\n if q < m[i, j]:\n m[i, j] = q\n s[i, j] = k # Note that Cormen uses 1-based index\n return s",
"def lazy_matrix_mul(m_a, m_b):\n return np.dot(m_a, m_b)",
"def matmul():\n\n if RESULT_IN_NVRAM:\n matrix_c = ResultMatrixInDaos()\n else:\n matrix_c = ResultMatrixInMemory()\n\n # This could be trivially optimized by reordering indexes\n # and caching either a_block or b_block (assuming C in-memory).\n # *However* it would result in unfair comparisons with the \n # previous implementation used elsewhere.\n # Using the naive algorithm makes sense for a raw comparison.\n for i in range(MATRIXSIZE):\n for j in range(MATRIXSIZE):\n partial_result_block = np.zeros((BLOCKSIZE, BLOCKSIZE))\n\n for k in range(MATRIXSIZE):\n a_block = np.fromstring(\n DAOS_KV[\"A%02d%02d\" % (i, k)],\n dtype=NP_FROMSTRING_DTYPE\n ).reshape((BLOCKSIZE, BLOCKSIZE))\n\n b_block = np.fromstring(\n DAOS_KV[\"B%02d%02d\" % (k, j)],\n dtype=NP_FROMSTRING_DTYPE\n ).reshape((BLOCKSIZE, BLOCKSIZE))\n\n partial_result_block += a_block @ b_block\n \n matrix_c[i,j] = partial_result_block\n\n return matrix_c",
"def multiply_matrices(a, b):\n try:\n x = len(b[0])\n except:\n b = make_2D(b)\n try:\n x = len(a[0])\n except:\n a = make_2D(a)\n if len(a[0]) != len(b):\n print 'error: matrices cannot be multiplied'\n return\n out = np.zeros((len(a), len(b[0])))\n for i in range(len(out)):\n for j in range(len(out[0])):\n sum = 0\n for k in range(len(a[i])):\n sum += a[i][k] * b[k][j]\n out[i][j] = sum\n return out",
"def multiply_matrices(list):\n # Section 1: Start matrix product using 1st matrix in list\n matrix_product = list[0]\n\n # Section 2: Loop thru list to create product\n for matrix in list[1:]:\n matrix_product = matrix_multiply(matrix_product, matrix)\n\n return matrix_product",
"def mat_mul(mat1, mat2):\n\n if len(mat1[0]) == len(mat2):\n\n mat2 = matrix_transpose(mat2)\n response = []\n\n for row in range(len(mat1)):\n response.append(\n [\n sum(dot_product(mat1[row], mat2[column]))\n for column in range(len(mat2))\n ]\n )\n\n return response\n\n else:\n return None",
"def lazy_matrix_mul(m_a, m_b):\n return (np.matmul(m_a, m_b))",
"def Multiply(M1,M2):\r\n M3=[]\r\n w=0\r\n while w<len(M2[0]):\r\n tap=[]\r\n t=0\r\n while t<len(M2):\r\n tap.append(M2[t][w])\r\n t=t+1\r\n M3.append(tap)\r\n w=w+1\r\n M=[]\r\n # Multiplying matrices\r\n k=0\r\n sums=0\r\n while k<len(M1):\r\n j=0\r\n mpy=[]\r\n while j<len(M3):\r\n p=0\r\n sums=0\r\n while p<len(M3[j]):\r\n temp = (M1[k][p])*(M3[j][p])\r\n sums=sums+temp\r\n p=p+1\r\n mpy.append(sums)\r\n j=j+1\r\n M.append(mpy)\r\n k=k+1\r\n return M",
"def lazy_matrix_mul(m_a, m_b):\n m_a = np.array(m_a)\n m_b = np.array(m_b)\n\n return m_a.dot(m_b)",
"def matrix_mult_matrix(matrix_a, matrix_b):\n m = len(matrix_a)\n n = len(matrix_b)\n result = []\n matrix_b_t = transpose_matrix(matrix_b)\n for i in xrange(m):\n row = []\n\tfor j in xrange(m):\n row.append(dot_product(matrix_a[i], matrix_b_t[j]))\n\tresult.append(row)\n return result",
"def python_nonsquare_matrix_mult(matrix):\n\n transposed_matrix = np.zeros([matrix.shape[1],matrix.shape[0]])\n start = time.time()\n # for i in range(matrix.shape[0]):\n # for j in range(matrix.shape[1]):\n # transposed_matrix[j,i] = matrix[i,j]\n\n transposed_matrix = np.transpose(matrix)\n product = matrix.dot(transposed_matrix)\n\n # transposed_matrix = np.transpose(matrix)\n end = time.time()-start\n\n # print(\"Python Golden Transpose: %s\" % product)\n # print('python transpose time: %.2E' % end)\n return [product, end]",
"def lazy_matrix_mul(m_a, m_b):\n return np.matmul(np.array(m_a), np.array(m_b))",
"def __matmul__(self, q: np.ndarray) -> np.ndarray:\n return self.product(q)",
"def matrixMul(self, matrix, matrix2):\n matrix0 = matrix[:]\n matrix[0] = matrix0[0] * matrix2[0] + matrix0[2]*matrix2[1] # + matrix0[4]*0\n matrix[1] = matrix0[1] * matrix2[0] + matrix0[3]*matrix2[1] # + matrix0[5]*0\n matrix[2] = matrix0[0] * matrix2[2] + matrix0[2]*matrix2[3] # + matrix0[4]*0\n matrix[3] = matrix0[1] * matrix2[2] + matrix0[3]*matrix2[3] # + matrix0[5]*0\n matrix[4] = matrix0[0] * matrix2[4] + matrix0[2]*matrix2[5] + matrix0[4]\n matrix[5] = matrix0[1] * matrix2[4] + matrix0[3]*matrix2[5] + matrix0[5]",
"def __mul__(self, other):\n # \n # TODO - your code here\n #\n \n result = [];\n row_result = [];\n product = 0;\n \n if(self.w != other.h):\n raise(ValueError, \"Matrices can not multiply for their dimesion doesn't match\"); \n \n for row in self.g:\n row_result = [];\n for j in range(other.w):\n product = dot_product(row,other.get_column(j));\n row_result.append(product);\n result.append(row_result);\n \n return Matrix(result);",
"def mat_mul(mat1, mat2):\n\n rows1 = len(mat1)\n cols1 = len(mat1[0])\n rows2 = len(mat2)\n cols2 = len(mat2[0])\n\n if cols1 != rows2:\n return None\n else:\n new_matrix = []\n for x in range(rows1):\n aux_row = []\n for y in range(cols2):\n aux_sum = []\n for z in range(cols1):\n aux_sum.append(mat1[x][z] * mat2[z][y])\n aux_row.append(sum(aux_sum))\n new_matrix.append(aux_row)\n\n return new_matrix",
"def combine_one_matrices(mul):\n factor, args = mul.as_coeff_matrices()\n new_args = [args[0]]\n\n for B in args[1:]:\n A = new_args[-1]\n if not isinstance(A, OneMatrix) or not isinstance(B, OneMatrix):\n new_args.append(B)\n continue\n new_args.pop()\n new_args.append(OneMatrix(A.shape[0], B.shape[1]))\n factor *= A.shape[1]\n\n return newmul(factor, *new_args)",
"def mmultiply(self, matrix):\n try:\n result_matrix = [[0 for row in range(len(self.matrix))] for col in range(len(matrix[0]))]\n for i in range(len(self.matrix)):\n for j in range(len(matrix[0])):\n for k in range(len(matrix)):\n result_matrix[i][j] += self.matrix[i][k] * matrix[k][j]\n self.matrix = result_matrix\n except IndexError:\n pass\n pass",
"def square_matrix_multiply(a, b):\n n = len(a)\n c = [[0]*n for _ in range(n)]\n for i in range(n):\n for j in range(n):\n sm = 0\n for k in range(n):\n sm += (a[i][k] * b[k][j])\n c[i][j] = sm\n\n return c",
"def __mul__(self,m):\n if type(m) != Matrix:\n raise TypeError('The second argument is not a matrix lol')\n if self.ncols != m.nrows:\n raise ValueError('matrix dot argument has incorrect number of rows')\n new = Matrix(self.nrows,m.ncols)\n columns = m.getCols()\n rowindex = 0\n colindex = 0 \n for row in self.matrix:\n colindex = 0 \n for col in columns:\n summ = 0\n for i,j in zip(row,col):\n summ+= i*j \n new.matrix[rowindex][colindex] = summ\n print new.matrix\n colindex += 1 \n rowindex+=1\n return new",
"def matMul(a, b):\n sa=matShape(a)\n sb=matShape(b)\n if sa[1]!=sb[0]: raise ValueError\n ret=matZeros((sa[0],sb[1]))\n for i in range(sa[0]):\n for j in range(sb[1]):\n val=0.0\n for k in range(sa[1]):\n val+=matGet(a,i,k)*matGet(b,k,j)\n matSet(ret,i,j,val)\n return ret",
"def MatMulOrder(D):\r\n\tnum = len(D)-1 # number of matrix in the chain\r\n\tprint(f\"There are {num} matrix to multiply\")\r\n\tM = [[0 for _ in range(num)] for _ in range(num)]\r\n\tP = [[0 for _ in range(num)] for _ in range(num)]\r\n\r\n\t# i要从大到小\r\n\t# i == j时, M[i][j]=0,所以不用更新\r\n\t# i-th矩阵到j-th矩阵的乘的最优值初始化为inf\r\n\tfor i in range(num-2, -1, -1):\r\n\t\tfor j in range(i+1, num):\r\n\t\t\tM[i][j] = 100000000\r\n\t\t\tfor k in range(i, j):\r\n\t\t\t\tnew = M[i][k] + M[k+1][j] + D[i]*D[k+1]*D[j+1]\r\n\t\t\t\tif new < M[i][j]:\r\n\t\t\t\t\tM[i][j] = new \r\n\t\t\t\t\tP[i][j] = k\r\n\treturn M, P",
"def matrix_mult(A,B):\n\n m = len(A)\n p = len(B)\n n = len(B[0])\n AB = []\n for i in range(m):\n AB.append([])\n for j in range(n):\n total = 0\n for k in range(p):\n total += A[i][k] * B[k][j]\n AB[i].append(total)\n return AB",
"def __matmul__(self, csys):\n self._transform(csys)\n return self",
"def matrix_mult(m1, m2):\n output = []\n for rowIndex, row in enumerate(m1): #go through rows in m1\n new_row = []\n for columnIndex in range(len(m2[0])): #go through indices for each column of m2\n sum = 0\n for index3 in range(len(row)):\n product = m1[rowIndex][index3] * m2[index3][columnIndex]\n sum += product\n new_row.append(sum)\n output.append(new_row)\n return output\n \n \n #output = []\n #first for loop corresponds to the rows of my output matrix and loops through the rows of m1 (enumerate)\n #create an empty new row\n # second for loop, loops through columns of m2\n # create sum variable, initialize it with zero\n # third for loop, multiplies the index of the row in m1 times the index of the column in m2\n # add sum to product and assign this to the sum variable\n # append sum to new row\n # append new row to output\n # return output",
"def __matmul__(self, B):\n m, n = self.shape\n n_, r = B.shape\n assert n == n_, (\"Cannot multiply shapes \"\n \"({}, {}) and ({}, {})\".format(m, n, n_, r))\n mul_ = dict()\n # compute A_ik = sum_j A_ij*B_jk\n for i in range(m):\n for k in range(r):\n prod = mpfr(0)\n for j in range(n):\n prod += self[i, j] * B[j, k]\n mul_[i, k] = prod\n return MPMatrix((m, r), mul_)",
"def _multi_matmul(arrays, order, i, j, constant=False) -> Tensor:\n if i == j:\n return arrays[i]\n else:\n return matmul(\n _multi_matmul(arrays, order, i, order[i, j], constant),\n _multi_matmul(arrays, order, order[i, j] + 1, j, constant),\n constant,\n )"
] | [
"0.7083097",
"0.6831898",
"0.6822807",
"0.68104607",
"0.6706873",
"0.66786253",
"0.66484094",
"0.6619107",
"0.659644",
"0.6510616",
"0.6500125",
"0.6498091",
"0.646012",
"0.6443269",
"0.64374197",
"0.6424422",
"0.6419277",
"0.6410198",
"0.6350263",
"0.63494116",
"0.6345248",
"0.6341108",
"0.63231075",
"0.6322469",
"0.6286912",
"0.6279721",
"0.6270186",
"0.6250742",
"0.6244857",
"0.62315214"
] | 0.70788336 | 1 |
Print Bento details by providing the bento_tag. \b | def get(bento_tag: str, output: str) -> None: # type: ignore (not accessed)
bento = bento_store.get(bento_tag)
if output == "path":
console.print(bento.path)
elif output == "json":
info = json.dumps(bento.info.to_dict(), indent=2, default=str)
console.print_json(info)
else:
info = yaml.dump(bento.info, indent=2, sort_keys=False)
console.print(Syntax(info, "yaml")) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def print_tags():\n for tag in Tag.query.all():\n print tag.__repr__()",
"def __gitDescribeTag(self):\n self.vcs.gitDescribe(self.project.getProjectPath(), [])",
"def show_target(self, target):\n print \" \" + repr(target.subject) \\\n + \" \" + target.meaning \\\n + \" \" + target.verb \\\n + \" \" + repr(target.object)",
"def print(self):\r\n self.print_avec_separateur()",
"def printme(self, line):\n self.otag.printme(line)",
"def print_cwb(document, tag='<s>'):\n\n doc = NLP(document)\n for sentence in doc.sents:\n print(tag)\n\n sent = NLP(sentence.text)\n for token in sent:\n print('{word}\\t{pos}\\t{lemma}'.format(\n word=token.text,\n pos=token.pos_,\n lemma=token.lemma_))\n\n print(tag.replace('<', '</'))",
"async def info(self, ctx: \"IceTeaContext\", *, otag: TagConverter):\n tag: models.Tag = otag\n if not tag.alias:\n embed = discord.Embed(description=f\"{ctx.message.guild.name} ``{tag.title}`` tag information\")\n user = ctx.guild.get_member(tag.author)\n embed.set_author(name=user.display_name, icon_url=user.avatar_url)\n embed.add_field(name=\"Tag name\", value=tag.title)\n embed.add_field(name=\"Amount used\", value=str(tag.count))\n embed.timestamp = tag.created\n await ctx.send(embed=embed)\n else:\n embed = discord.Embed(description=f\"{ctx.message.guild.name} ``{tag.title}`` alias information\")\n user = ctx.guild.get_member(tag.author)\n embed.add_field(name=\"Author\", value=user or \"Unknown\")\n embed.add_field(name=\"Amount used\", value=str(tag.count))\n embed.timestamp = tag.created\n await ctx.send(embed=embed)",
"def book_info(self):\n print(\"ID : \", self.ID,\n \"\\nName : \", self.name,\n \"\\nAuthor : \", self.author,\n \"\\nGenre : \", self.genre,\n \"\\nPrice : \", self.price,\n \"\\nQuantity of this book : \", self.quantity)",
"def pretty_print_entity(entity: tg.tl.TLObject) -> str:\n\n return bprint.bprint(entity, stream=str, skip_predicate=_bprint_skip_predicate)",
"def get(self, bento_name, bento_version):",
"def _print(self, *args, **kwargs) -> None:\n # Only print in verbose mode\n if self._verbose:\n arglist = list(args)\n arglist[0] = f\"[buddy-{self._experiment_name}] {args[0]}\"\n print(*arglist, **kwargs)",
"def list_bentos(bento_name: str, output: str) -> None: # type: ignore (not accessed)\n bentos = bento_store.list(bento_name)\n res = [\n {\n \"tag\": str(bento.tag),\n \"path\": display_path_under_home(bento.path),\n \"size\": human_readable_size(calc_dir_size(bento.path)),\n \"creation_time\": bento.info.creation_time.astimezone().strftime(\n \"%Y-%m-%d %H:%M:%S\"\n ),\n }\n for bento in sorted(\n bentos, key=lambda x: x.info.creation_time, reverse=True\n )\n ]\n\n if output == \"json\":\n info = json.dumps(res, indent=2)\n console.print(info)\n elif output == \"yaml\":\n info = yaml.safe_dump(res, indent=2)\n console.print(Syntax(info, \"yaml\"))\n else:\n table = Table(box=None)\n table.add_column(\"Tag\")\n table.add_column(\"Size\")\n table.add_column(\"Creation Time\")\n table.add_column(\"Path\")\n for bento in res:\n table.add_row(\n bento[\"tag\"],\n bento[\"size\"],\n bento[\"creation_time\"],\n bento[\"path\"],\n )\n console.print(table)",
"def print_item(group):\n print(\"\\tName: {}\".format(group.name))\n print(\"\\tId: {}\".format(group.id))\n print(\"\\tLocation: {}\".format(group.location))\n print(\"\\tTags: {}\".format(group.tags))\n if hasattr(group, 'status'):\n print(\"\\tStatus: {}\".format(group.status))\n if hasattr(group, 'state'): # Site\n print(\"\\tStatus: {}\".format(group.state))\n if hasattr(group, 'properties'):\n print_properties(group.properties)\n print(\"\\n\\n\")",
"def print_obs(self,obs):\n print(obs)",
"def export(bento_tag: str, out_path: str) -> None: # type: ignore (not accessed)\n bento = bento_store.get(bento_tag)\n out_path = bento.export(out_path)\n logger.info(\"%s exported to %s.\", bento, out_path)",
"def _show(self, indent = 0):\n print(\" \"*indent, \"Name:\", self.name)\n print(\" \"*indent, \"Description:\", self.description)",
"def print_verbose(self) -> None:\n print(self)\n if self.meta is not None:\n print(self.meta.__repr__())",
"def Print(self):\n\n\t\tif self.verbose:\n\n\t\t print (\"\\033[1m[HEADER]\\033[0m\")\n\t\t print (\"code:\\t\\t%s\" % self.kod)\n\t \tprint (\"version:\\t%s\" % self.ver)\n\t\t print (\"date and time:\\t%s\" % self.probid)\n\t\t print (\"dump number:\\t%s\" % self.knod)\n\t \tprint (\"number of histories:\\t%s\" % self.nps)\n\t\t print (\"number of pseudorandom numbers used:\\t%s\" % self.rnr)\n\t\t print (\"title: %s\" % self.title)\n\n\t\t if self.ntal>1:\n\t\t\t\tprint self.ntal, 'tallies:', self.ntals\n\t \telse:\n\t\t\t\tprint self.ntal, 'tally:', self.ntals\n\n\n\t\t if self.npert != 0:\n\t\t\t\tprint(\"number of perturbations: %s\" % self.npert)",
"def print_object_details(obj: object) -> None:\n print_section(obj, 'Type', print_type)\n print_section(obj, 'Documentation', print_documentation)\n print_section(obj, 'Attributes', print_attributes)\n print_section(obj, 'Methods', print_methods)\n print_section_delimiter()",
"def verbose(self, block: Block):\n print('\\n\\n==============================')\n print('Hash:\\t\\t', block.hash.hexdigest())\n print('Previous Hash:\\t', block.previous_hash.hexdigest())\n print('Nounce:\\t\\t', block.nonce)\n print('Data:\\t\\t', block.data)\n print('\\n\\n==============================')",
"def print_element(self):\n print(\"{selector}\\n{el_path}\\n{selector}\\n{el_source}\\n{selector}\\n\\n\"\n \"\".format(selector=\"--------\",\n el_path=self.save_path,\n el_source=self.text))",
"def print_operation(operations):\n for operation in operations:\n print ' ',\n change_color_by_tag(operation)\n if operation['ExtAttributes']:\n print_extattributes_of_member(operation['ExtAttributes'])\n print operation['Type'],\n if operation['Arguments']:\n print operation['Name'],\n print_argument(operation['Arguments'])\n else:\n print operation['Name']",
"def __str__(self):\n\n return \"[b:{} t:{}]\".format(self.obtem_bag_pass(), self.obtem_ciclo_in())",
"def displayhook(self, obj):\n # reproduce the behavior of the standard displayhook, not printing None\n if obj is not None:\n print >> self.stdout, repr(obj)",
"def __repr__(self):\n if self.bo is None:\n s = \"<BOFeature:%s not attached to bo!>\" % (self.name) \n else:\n s = \"<BOFeature:%s attached to %s>\" % (self.name, self.bo)\n return(s)",
"def print_item(group):\n print(\"\\tName: {}\".format(group.name))\n print(\"\\tId: {}\".format(group.id))\n print(\"\\tLocation: {}\".format(group.location))\n print(\"\\tTags: {}\".format(group.tags))\n if hasattr(group, 'properties'):\n print_properties(group.properties)",
"def print(self):\n size_bid = len(self.bid)\n size_offer = len(self.offer)\n print(\"Book[%s]: %d bids, %d offers --> mid @ %f\" % (self.security,\n size_bid, size_offer, self.mid()))\n print(\"{0: ^32} | {1: ^32}\".format(\"bid\", \"offer\"))\n print(\"{0:^10},{1:^10},{2:^10} | {3:^10}, {4:^10}, {5:^10}\".format(\n \"count\", \"qty\", \"price\", \"price\", \"qty\", \"count\"))\n\n empty_level = OrderBookLevel(\"-\", \"-\", \"-\")\n for i in range(max(size_bid, size_offer)):\n bid = self.bid[-(i+1)] if i < size_bid else empty_level\n offer = self.offer[i] if i < size_offer else empty_level\n print(\"{0:^10},{1:^10},{2:^10} | {3:^10}, {4:^10}, {5:^10}\".format(\n bid.order_count, bid.qty, bid.price, offer.price, offer.qty, offer.order_count))",
"def bbs_show_banner(tn, short = True):\n lines = cmd.lban(tn, short_banner = short)\n for line in lines:\n print(filter_tags(line))",
"def tagger():",
"def print(self):\n self.print_avec_separateur(\" \")"
] | [
"0.59889174",
"0.5728905",
"0.56706613",
"0.56307644",
"0.5569425",
"0.5474408",
"0.5425949",
"0.54244584",
"0.5415978",
"0.5385348",
"0.53358155",
"0.5326319",
"0.5322443",
"0.5317626",
"0.5277394",
"0.52764267",
"0.5227435",
"0.5224774",
"0.5220987",
"0.5199572",
"0.5164107",
"0.5150359",
"0.514877",
"0.51372164",
"0.5133556",
"0.5131356",
"0.51305985",
"0.51263386",
"0.51262504",
"0.5125374"
] | 0.637782 | 0 |
List Bentos in local store \b show all bentos saved $ bentoml list \b show all verions of bento with the name FraudDetector $ bentoml list FraudDetector | def list_bentos(bento_name: str, output: str) -> None: # type: ignore (not accessed)
bentos = bento_store.list(bento_name)
res = [
{
"tag": str(bento.tag),
"path": display_path_under_home(bento.path),
"size": human_readable_size(calc_dir_size(bento.path)),
"creation_time": bento.info.creation_time.astimezone().strftime(
"%Y-%m-%d %H:%M:%S"
),
}
for bento in sorted(
bentos, key=lambda x: x.info.creation_time, reverse=True
)
]
if output == "json":
info = json.dumps(res, indent=2)
console.print(info)
elif output == "yaml":
info = yaml.safe_dump(res, indent=2)
console.print(Syntax(info, "yaml"))
else:
table = Table(box=None)
table.add_column("Tag")
table.add_column("Size")
table.add_column("Creation Time")
table.add_column("Path")
for bento in res:
table.add_row(
bento["tag"],
bento["size"],
bento["creation_time"],
bento["path"],
)
console.print(table) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def command_list(arguments):\n global current_mode\n current_mode = Mode.list\n #current_entity.addlink(arguments[0], arguments[1])\n return 'Now listing all entities'",
"def list_cmd(ctx):\n client = ctx.obj['CLIENT']\n models = client.list_models()\n\n x = PrettyTable()\n x.field_names = [\"Name\",\"Tag\",\"Created\"]\n for m in models:\n x.add_row([m[\"name\"],m[\"tag\"],m[\"uploaded_at\"]])\n print(x)",
"def _list():\n db = shelve.open(\"db\", flag='c', protocol=None, writeback=False)\n names_only = input(\"Names only [Y/n] ->\")\n\n if names_only == \"Y\":\n for name in db.keys():\n print(name)\n elif names_only == \"n\":\n for key in db.items():\n print(key, sep=' ', end='\\n', file=sys.stdout, flush=False)\n #print((\";\\n\".join(\"%s=>%s\" % i for i in db.items())))",
"def db_show_all():\n the_list = []\n db = sh.open(the_phone_book_name, flag='c', writeback=True)\n for key in db:\n person = Person()\n person.name = key\n person.phone = db[key]\n the_list.append(person)\n display_list(the_list)\n db.close()",
"def list(default_view):\n ListCommandExecutor(default_view).list()",
"def view_all(entities, table, db):\n print \n print \"TABLE:\",table\n for ii in entities:\n print ii\n print",
"def view_command():\n listing.delete(0, END)\n for row in backend.view():\n listing.insert(END, row)",
"def list(self, subcmd):\n\n self.__connect_db()\n tariffs = []\n\n for tariff in self.db.get_tariffs():\n tariffs.append(tariff.name)\n print(tariff.name)\n\n #print(\"\\n\".join(sorted(tariffs)))",
"def list():\n rino.remote.list()",
"def list():",
"def list():",
"async def get_pretty_vetoes(self) -> typing.List[str]:\n\n open_bills = await self.bot.db.fetch(\n \"SELECT id, name, link, link FROM bill WHERE is_vetoable = true AND status = $1 ORDER BY id\",\n models.BillPassedLegislature.flag.value,\n )\n\n if not open_bills:\n return []\n\n pretty_bills = []\n b_ids = []\n b_hyperlinks = []\n\n for record in open_bills:\n b_ids.append(f\"Bill #{record['id']}\")\n b_hyperlinks.append(\n f\"=HYPERLINK(\\\"{record['link']}\\\"; \\\"{record['name']}\\\")\"\n )\n pretty_bills.append(\n f\"Bill #{record['id']} - [{record['name']}]({record['link']})\"\n )\n\n exported = [\n f\"Export of Veto-able Bills -- {discord.utils.utcnow().strftime('%c')}\\n\\n\\n\",\n \"----- Veto-able Bills -----\\n\",\n ]\n\n exported.extend(b_ids)\n exported.append(\"\\n\")\n exported.extend(b_hyperlinks)\n\n link = await self.bot.make_paste(\"\\n\".join(exported))\n\n if link:\n pretty_bills.insert(\n 0,\n f\"[*View this list in Google Spreadsheets formatting for easy copy & pasting*]({link})\\n\",\n )\n\n return pretty_bills",
"async def __list(self, ctx):\n server = ctx.message.server\n if server.id not in self.db:\n self.db[server.id] = {}\n self.save_db()\n else:\n db = self.db[server.id]\n if \"bookkeeper\" not in self.db[server.id]:\n self.db[server.id][\"bookkeeper\"] = []\n self.save_db()\n await self.bot.say(\"Bookkeeper list is currently empty, add new bookkeepers using points keeper add\"\n \" <Discord name or nickname>\")\n return\n else:\n bookkeeper = db[\"bookkeeper\"][:]\n msg = \"\"\n for x in bookkeeper:\n bookkeeper[bookkeeper.index(x)] = discord.utils.find(lambda N: N.id == x, server.members).display_name\n bookkeeper = sorted(bookkeeper, key=lambda item: (int(item.partition(' ')[0])\n if item[0].isdigit() else float('inf'), item))\n msg = \", \".join(bookkeeper[:-2] + [\" and \".join(bookkeeper[-2:])])\n await self.bot.say(\"Current bookkeepers assigned are: {}\".format(msg))",
"async def list(self, ctx: MyContext):\n if ctx.subcommand_passed is None:\n await ctx.send_help(\"wormhole list\")",
"def do_bay_list(cs, args):\n bays = cs.bays.list(marker=args.marker, limit=args.limit,\n sort_key=args.sort_key,\n sort_dir=args.sort_dir)\n columns = ['uuid', 'name', 'node_count', 'master_count', 'status']\n columns += utils._get_list_table_columns_and_formatters(\n args.fields, bays,\n exclude_fields=(c.lower() for c in columns))[0]\n utils.print_list(bays, columns,\n {'versions': magnum_utils.print_list_field('versions')},\n sortby_index=None)",
"def ls(filter=None):",
"async def list(self, ctx):\n server = ctx.message.server\n if server.id not in self.db:\n self.db[server.id] = {}\n dataIO.save_json(\"data/lootbox/servers.json\", self.db)\n if len(self.db[server.id]) < 1:\n await self.bot.say(\"No boxes have been created for this server yet, please create some using [p]box create\"\n \" first, thanks\")\n return\n boxes = self.db[server.id].keys()\n await self.bot.say(\"Here are this server's boxes:\\n{}\".format(\"\\n\".join(boxes)))",
"def list_litnacionals_cmd():\n return ListLitnacionalCommand()",
"def listaSangre():\n san = SangreModel()\n\n return san.listarTodos()",
"def list_command(ctx: Any) -> None:\n pass",
"def view(args):\n print(\"List of all available phonebooks:\")\n for file in glob.glob(\"*.ph\"):\n print(file)",
"def do_all(self, line=None):\n\n if not line or line in HBNBCommand.class_list:\n dict_objects = storage.all()\n list_objects = []\n for key, obj in dict_objects.items():\n list_objects.append(obj.__str__())\n print(list_objects)\n else:\n print(\"** class doesn't exist **\")",
"def list_(ctx: click.Context, repository_path):\n root_commands.cmd_list(ctx.obj, repository_path)",
"async def blocklist(self, ctx):\n blocked = await self.db.get('blocked', [])\n if not blocked:\n return await ctx.send('🐱 There are no blocked images.')\n async with aiohttp.ClientSession() as session:\n async with session.post(f'{self.haste_url}/documents', data='\\n'.join(blocked)) as resp:\n return await ctx.send(f'🐱 Here is a list of blocked images\\n\\n{self.haste_url}/{resp[\"key\"]}.txt')",
"def do_command(self, args):\n vendorops = dbops.Vendors()\n listing = vendorops.list(args)\n ordering = ['vendor_name']\n do_list(listing, ordering)",
"def do_list(args):\n session = BMC(server=args.server, username=args.username, password=args.password)\n for i in session.list(args.path):\n print(i)",
"def view_all_batters(self):\n conn = rs.create_connection(\"dailyfantasyscraper.db\")\n cur = conn.cursor()\n position = \"P\"\n cur.execute(\"SELECT * FROM rotowiredk where position != ?\", position)\n result = cur.fetchall()\n conn.commit()\n conn.close()\n\n for item in result:\n print(item)\n tree.insert('', 'end', values=item)",
"def show():\n logger.info('List donors')\n try:\n logger.info('Connecting to database...')\n database.connect()\n database.execute_sql('PRAGMA foreign_keys = ON;')\n for i in Donor.select().order_by(Donor.donor_name):\n print(i)\n except Exception as e:\n logger.info(e)\n finally:\n database.close()",
"def command_ls(self, list_what):\n if list_what in ('available', 'mounted', 'unmounted'):\n callback = getattr(self.environment, 'get_%s_ids' % list_what)\n lst = callback()\n else:\n lst = []\n if len(lst) != 0:\n print((\"\\n\".join(lst)))",
"def list(self):"
] | [
"0.6460789",
"0.609852",
"0.605642",
"0.60312444",
"0.5958324",
"0.58794826",
"0.58415896",
"0.5798007",
"0.57732534",
"0.5748135",
"0.5748135",
"0.5723663",
"0.57183725",
"0.57072484",
"0.568819",
"0.56753975",
"0.5644786",
"0.56367767",
"0.5634464",
"0.5600329",
"0.55807924",
"0.55764073",
"0.55742526",
"0.5569244",
"0.5554157",
"0.5543209",
"0.55427235",
"0.55304193",
"0.550797",
"0.5504177"
] | 0.66813993 | 0 |
Export a Bento to an external file archive \b | def export(bento_tag: str, out_path: str) -> None: # type: ignore (not accessed)
bento = bento_store.get(bento_tag)
out_path = bento.export(out_path)
logger.info("%s exported to %s.", bento, out_path) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def archive(po_filename, bl_filename):\n\n # Store archive in same dir as this script\n root = os.path.abspath(os.path.dirname(sys.argv[0]))\n\n po_archive = root + '/po.csv.%s' % datetime.date.today()\n bl_archive = root + '/bl.csv.%s' % datetime.date.today()\n\n shutil.move(po_filename, po_archive)\n shutil.move(bl_filename, bl_archive)\n\n perms = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH\n os.chmod(po_archive, perms)\n os.chmod(bl_archive, perms)",
"def _toFile(self):\n pass",
"def export_to_file(self):\r\n return True",
"def save(self, export_path: str):",
"def test_export(self):\n structure = {\n \"README.rst\": \"Hi this is 1.0.0.\",\n \"twisted\": {\n \"newsfragments\": {\"README\": \"Hi this is 1.0.0\"},\n \"_version.py\": genVersion(\"twisted\", 1, 0, 0),\n \"web\": {\n \"newsfragments\": {\"README\": \"Hi this is 1.0.0\"},\n \"_version.py\": genVersion(\"twisted.web\", 1, 0, 0),\n },\n },\n }\n reposDir = self.makeRepository(self.tmpDir)\n self.createStructure(reposDir, structure)\n self.commitRepository(reposDir)\n\n exportDir = FilePath(self.mktemp()).child(\"export\")\n self.createCommand.exportTo(reposDir, exportDir)\n self.assertStructure(exportDir, structure)",
"def BT_export(self):\n src = os.path.join(self.resMan.base_path, Config.instance().weld_BT_root_folder)\n srcs=self.BTMan.get_subdirs(src)\n dst = os.path.join(self.project.rootdir, Config.instance().weld_BT_root_folder)\n #this operation has lots of exceptions to output...\n try:\n for src in srcs:\n self.BTMan.export(src, dst)\n except Exception, e:\n print >> sys.__stderr, 'ERROR in Weld.BT_export():'\n print >> sys.__stderr, e.args[0]\n print >> sys.__stderr, 'export cancelled (some cleanup might be needed in %s)' % dst",
"def wrapped_tarball(export_context, context):\n result = export_result_dict(export_context)\n RESPONSE = context.REQUEST.RESPONSE\n RESPONSE.setHeader('Content-type', 'application/x-gzip')\n RESPONSE.setHeader('Content-disposition',\n 'attachment; filename=%s' % result['filename'])\n return result['tarball']",
"def exportBulletFile(*argv):",
"def export_samfile(self):",
"def archive(mongo_backup_file):\r\n filename = get_archive_filename()\r\n tar = tarfile.open(filename, \"w|gz\")\r\n tar.add(mongo_backup_file)\r\n tar.close()\r\n\r\n return filename",
"def export(self, stream):\n pass",
"def saveto(file, tmpfile):\n args = {\"file\": file, \"tmpfile\": tmpfile}\n send_command(\"saveto\", args)",
"def _export_bh_(cls, self):",
"def _export_button_cb(self):\n filename = asksaveasfile(\n mode='w',\n filetypes=(('YAML files', '*.yaml'), ('All files', '*.*'))\n )\n\n if not filename:\n return\n\n with open(filename.name, 'w') as f:\n f.write('obstacles:\\n')\n for obstacle in self.obstacles:\n f.write(f' - {str(obstacle)}')\n f.write('\\n')",
"def extract_to_disk(self):\n archive_name, extension = os.path.splitext(os.path.basename(self.file.name))\n if not os.path.isdir(os.path.join(os.getcwd(), archive_name)):\n os.mkdir(archive_name)\n os.chdir(archive_name)\n for filename, data in self.extract().items():\n f = open(filename, 'wb')\n f.write(data or b'')\n f.close()",
"def save_as_archive(self):\n dest_file_path = QFileDialog.getSaveFileName(self)[0]\n self.binder.to_archive(dest_file_path)",
"def archive(self, header, target: str, output_target: str = None):\n\t\ttry:\n\t\t\ttarget = os.path.abspath(target)\n\t\t\tif output_target:\n\t\t\t\toutfile = output_target\n\t\t\telse:\n\t\t\t\tif os.path.isfile(target):\n\t\t\t\t\toutfile = target + \".edz\"\n\t\t\t\telif os.path.isdir(target):\n\t\t\t\t\toutfile = os.path.join(target, os.path.basename(target) + \".edz\")\n\t\t\t\telse:\n\t\t\t\t\tprint(\"No valid output target\")\n\t\t\t\t\treturn\n\t\t\t#Zip target dir\n\t\t\tprint(f\"Creating virtual zip of {target}\")\n\t\t\tzip_bytes_object = zipit(target)\n\t\t\tprint(f\"Making .edz at {outfile}...\")\n\t\t\twith open(outfile, \"w+b\") as out:\n\t\t\t\tprint(\"Writing header...\")\n\t\t\t\tout.write(header)\n\t\t\t\tprint(\"Writing zip contents...\")\n\t\t\t\tout.write(zip_bytes_object.getvalue())\n\t\t\tprint(\"Success!\")\n\t\t\treturn outfile\n\t\texcept Exception as e:\n\t\t\tprint(f\"Failed to create edizip from target {target} - {e}\")\n\t\t\tprint(\"Attempting cleanup...\")\n\t\t\ttry:\n\t\t\t\tif os.path.isfile(outfile):\n\t\t\t\t\tprint(f\"Removing possibly invalid archive {outfile}\")\n\t\t\t\t\tos.remove(outfile)\n\t\t\texcept:\n\t\t\t\tpass",
"def make_file(self):\n\n f = open(get_output_path(), \"w\")\n \n f.write(self.export())\n \n f.close()\n\n return self",
"def export(self, token):\n studio_module = request.env['ir.module.module'].get_studio_module()\n data = request.env['ir.model.data'].search([('studio', '=', True)])\n content = export.generate_archive(studio_module, data)\n\n return request.make_response(content, headers=[\n ('Content-Disposition', content_disposition('customizations.zip')),\n ('Content-Type', 'application/zip'),\n ('Content-Length', len(content)),\n ], cookies={'fileToken': token})",
"def to_walden(self):\n with tempfile.NamedTemporaryFile() as f:\n # fetch the file locally\n files.download(self.source_data_url, f.name)\n\n # add it to walden, both locally, and to our remote file cache\n add_to_catalog(self.metadata, f.name, upload=True)",
"def __export_file(self, filename, output):\n outfile = open(filename, \"w\")\n outfile.write(output)\n outfile.close\n print(\"Output written to file: \" + filename + \"\\n\")",
"def test_export(api):\n # upload file to file.io servers\n uploaded_file = api.upload(\n tag='test_file',\n expiry='1d',\n path='tests/test_file.txt'\n )\n\n # check that instance of FileIO has these fields\n assert uploaded_file.link\n assert uploaded_file.key\n assert uploaded_file.tag\n assert uploaded_file.path\n\n # check that the uploaded file was added to uploaded files list\n assert api.show_uploads()\n\n # testing that export works\n api.export('tests/files_data.json')\n\n # check that the exported file exists\n assert path.isfile('tests/files_data.json')\n\n remove('tests/files_data.json')\n\n # testing that export in pkl works\n api.export(out_type='pkl')\n\n # check that the exported file exists\n assert path.isfile('exported.pkl')\n\n remove('exported.pkl')\n\n # testong that export in pkl works\n api.export('tests/exported.pkl')\n\n # check that the exported file exists\n assert path.isfile('tests/exported.pkl')\n\n remove('tests/exported.pkl')\n\n # testing that export in json with default path works\n api.export()\n\n # check that exported file exists\n assert path.isfile('exported.json')\n\n remove('exported.json')\n\n # check that export with provided path works\n api.export('tests/exporte.d.pkl', out_type='json')\n\n # testing that export works\n assert path.isfile('tests/exporte.d.pkl.json')\n\n remove('tests/exporte.d.pkl.json')\n\n # check that export works correctly with strange path\n api.export('tests/t.e.s.t.p.k.l', out_type='pkl')\n\n # testing that export works\n assert path.isfile('tests/t.e.s.t.p.k.l.pkl')\n\n remove('tests/t.e.s.t.p.k.l.pkl')",
"def _outside_tar2(self):\r\n outside_tar = self.unsafe_common_dir / \"unsafe_file.tar.gz\"\r\n with tarfile.open(outside_tar, \"w:gz\") as tar:\r\n tar.addfile(tarfile.TarInfo(str(self.unsafe_common_dir / \"../a_file\")))\r\n\r\n return outside_tar",
"def export_bom(self):\n path = self.export_dir.joinpath(self.partcode).joinpath('bom.xlsx')\n bom = self.doc.ComponentDefinition.BOM\n bom.StructuredViewFirstLevelOnly = False\n bom.StructuredViewEnabled = True\n bom.BOMViews.Item(\"Structured\").Export(path, 74498)",
"def transfer(file_obj):",
"def do_bagit_export(assets, export_base_dir, export_filename_base):\n\n # These assets should already be in the correct order - by item, seequence\n for asset in assets:\n asset_id = get_original_asset_id(asset.download_url)\n logger.debug(\"Exporting asset %s into %s\", asset_id, export_base_dir)\n\n asset_id = asset_id.replace(\":\", \"/\")\n asset_path, asset_filename = os.path.split(asset_id)\n\n asset_dest_path = os.path.join(export_base_dir, asset_path)\n os.makedirs(asset_dest_path, exist_ok=True)\n\n # Build a transcription output text file for each asset\n asset_text_output_path = os.path.join(\n asset_dest_path, \"%s.txt\" % asset_filename\n )\n\n if asset.latest_transcription:\n # Write the asset level transcription file\n with open(asset_text_output_path, \"w\") as f:\n f.write(asset.latest_transcription)\n\n write_distinct_asset_resource_file(assets, export_base_dir)\n\n # Turn Structure into bagit format\n bagit.make_bag(\n export_base_dir,\n {\n \"Content-Access\": \"web\",\n \"Content-Custodian\": \"dcms\",\n \"Content-Process\": \"crowdsourced\",\n \"Content-Type\": \"textual\",\n \"LC-Bag-Id\": export_filename_base,\n \"LC-Items\": \"%d transcriptions\" % len(assets),\n \"LC-Project\": \"gdccrowd\",\n \"License-Information\": \"Public domain\",\n },\n )\n\n # Build .zip file of bagit formatted Campaign Folder\n archive_name = export_base_dir\n shutil.make_archive(archive_name, \"zip\", export_base_dir)\n\n export_filename = \"%s.zip\" % export_filename_base\n\n # Upload zip to S3 bucket\n s3_bucket = getattr(settings, \"EXPORT_S3_BUCKET_NAME\", None)\n\n if s3_bucket:\n logger.debug(\"Uploading exported bag to S3 bucket %s\", s3_bucket)\n s3 = boto3.resource(\"s3\")\n s3.Bucket(s3_bucket).upload_file(\n \"%s.zip\" % export_base_dir, \"%s\" % export_filename\n )\n\n return HttpResponseRedirect(\n \"https://%s.s3.amazonaws.com/%s\" % (s3_bucket, export_filename)\n )\n else:\n # Download zip from local storage\n with open(\"%s.zip\" % export_base_dir, \"rb\") as zip_file:\n response = HttpResponse(zip_file, content_type=\"application/zip\")\n response[\"Content-Disposition\"] = \"attachment; filename=%s\" % export_filename\n return response",
"def savefile(self, x, o):\n self.sep('save')\n with open(o, 'w') as f:\n f.write(x)\n sys.exit('all done (%s bytes).. saved as %s' % (len(x), o))",
"def export(self):\n memento = self.create_memento()\n try:\n f = open(\"story.txt\", \"w\")\n try:\n f.write(memento.__str__())\n finally:\n f.close()\n except IOError:\n print 'IOError while exporting story!'",
"def export(fileName, result):\n with open(fileName, 'a') as output:\n output.write(result)",
"def saveAs( self, filename ):\r\n filename = uno.systemPathToFileUrl( os.path.abspath( filename ) )\r\n #filterlist: http://wiki.services.openoffice.org/wiki/Framework/Article/Filter/FilterList_OOo_3_0\r\n exportFilter = self._getExportFilter( filename )\r\n props = exportFilter, \r\n #storeToURL: #http://codesnippets.services.openoffice.org/Office/Office.ConvertDocuments.snip\r\n self.oodocument.storeToURL( filename, props )"
] | [
"0.60800993",
"0.60314316",
"0.599764",
"0.5991197",
"0.5971901",
"0.59628",
"0.59533924",
"0.5905716",
"0.5669903",
"0.5646166",
"0.5619919",
"0.5599178",
"0.55828655",
"0.5566471",
"0.55654246",
"0.5564186",
"0.5563321",
"0.55614084",
"0.5539148",
"0.55066764",
"0.5496924",
"0.54953384",
"0.5485054",
"0.5478849",
"0.5467897",
"0.54620713",
"0.54524106",
"0.54344577",
"0.5412332",
"0.54115105"
] | 0.73495036 | 0 |
Import a previously exported Bento archive file \b | def import_bento_(bento_path: str) -> None: # type: ignore (not accessed)
bento = import_bento(bento_path)
logger.info("%s imported.", bento) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def import_idb(self, idb_file):\n self.__run_import_script(file=idb_file, is_bin=False)",
"def zoo_import(name, head=''):\n net = gz.get_model(name, pretrained=True)\n export_block(head + name, net, preprocess=True)",
"def _import_bh_(self):",
"def import_bin(self, bin_file):\n self.__run_import_script(file=bin_file, is_bin=True)",
"def import_fusion_archive(filename, name=\"import\"):\n import_options = app().importManager.createFusionArchiveImportOptions(filename)\n\n document = app().importManager.importToNewDocument(import_options)\n imported_root = document.products[0].rootComponent\n\n bodies = []\n\n for body in imported_root.bRepBodies:\n bodies.append(brep().copy(body))\n for occurrence in imported_root.allOccurrences:\n for body in occurrence.bRepBodies:\n bodies.append(brep().copy(body))\n\n document.close(saveChanges=False)\n\n return BRepComponent(*bodies, name=name)",
"def importer():\n pass",
"def importar2(self):\n self.set_session()\n fileinfo = self.request.files['archivo'][0]\n fname = fileinfo['filename']\n extn = os.path.splitext(fname)[1]\n cname = str(uuid.uuid4()) + extn\n fh = open(\"server/common/resources/uploads/\" + cname, 'wb')\n fh.write(fileinfo['body'])\n fh.close()\n if extn == '.xlsx':\n mee = self.manager2(self.db).import_excel(cname)\n self.respond(message=mee['message'], success=mee['success'])\n else:\n self.respond(message='Formato de Archivo no aceptado¡¡', success=False)\n self.db.close()",
"def do_import(args):\n base64str = b''\n for infile_name in args.infile_names:\n if args.png:\n chunk = subprocess.check_output(['zbarimg', '--raw', infile_name])\n base64str += chunk\n elif args.base64:\n with open(infile_name, 'rb') as infile:\n chunk = infile.read()\n base64str += chunk\n\n raw = base64.b64decode(base64str)\n paperkey = subprocess.Popen(['paperkey', '--pubring', args.pubkey],\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE)\n (paperkey_stdout, _) = paperkey.communicate(raw)\n gpg = subprocess.Popen(['gpg', '--import'], stdin=subprocess.PIPE)\n gpg.communicate(paperkey_stdout)",
"def import_project_dump(self, key):",
"def do_import(export_filename, token):\r\n\r\n print 'Importing %s' % export_filename\r\n url = 'http://shank.trikeapps.com/mediawiki/index.php?title=Special:Import&action=submit'\r\n export_file = open(export_filename, 'rb')\r\n data = {'source': 'upload', 'log-comment': 'migrate_wiki.py script', 'xmlimport': export_file, 'editToken': token }\r\n feed = urllib2.urlopen(url, data)\r\n buf = feed.read()\r\n tree = etree.fromstring(buf, parser)\r\n nodes = tree.xpath('//div[@id=\"bodyContent\"]/p[2]')\r\n if not nodes or not nodes[0].text.startswith('Import finished!'):\r\n raise Exception('Failed to upload file, perhaps export file exceeds max size, try without the --at-once option')",
"def bimport(filepath, resource_path=None, imgi_import=True, imge_import=True, seq_import=True, mov_import=True, txti_import=True, txte_import=True,\r\n script_import=True, img_embed=False, txt_embed=None, skip_sha1=False, img_merge=True):\r\n \r\n filepath = bpy.path.abspath(filepath) #Ensure path is absolute\r\n \r\n if resource_path is None or resource_path.strip() == \"\":\r\n resource_path = None\r\n else:\r\n resource_path = bpy.path.abspath(resource_path) #Ensure path is absolute\r\n \r\n if path.splitext(filepath)[1] == \".blib\":\r\n try:\r\n archive = zf.ZipFile(filepath, 'r')\r\n except zf.BadZipFile:\r\n raise InvalidBlibFile(\"File is not a valid Blender library\")\r\n \r\n blib = True\r\n try:\r\n file_checksum, blibtype, file_version, compatible, *rest = archive.comment.decode(\"utf-8\").split(\" \")\r\n except ValueError:\r\n raise InvalidBlibFile(\"File is broken, missing meta-data\")\r\n \r\n compatible = Version(compatible)\r\n \r\n if blibtype == \"cycles\":\r\n if compatible <= version:\r\n if archive.testzip() is not None:\r\n raise InvalidBlibFile(\"File is broken\")\r\n else:\r\n if not skip_sha1:\r\n checksum = archive_sha1(archive)\r\n \r\n if not file_checksum == checksum.hexdigest():\r\n raise InvalidBlibFile(\"Checksum does not match, file may be broken or have been altered\\n\"\r\n 'Run with \"skip_sha1\" to ignore checksum')\r\n else:\r\n raise BlibVersionError(\"File has incompatible version of blib\")\r\n else:\r\n raise BlibTypeError(\"File is not a valid Cycles material\")\r\n try:\r\n xml_file = archive.open(\"structure.xml\", 'r')\r\n except KeyError:\r\n raise InvalidBlibFile(\"File is broken, missing structure XML\")\r\n tree = ET.ElementTree(file=xml_file)\r\n xml_file.close()\r\n xroot = tree.getroot()\r\n \r\n elif path.splitext(filepath)[1] == \".xml\":\r\n tree = ET.ElementTree(file=filepath)\r\n xroot = tree.getroot()\r\n blib = False\r\n xversion = Version(xroot.attrib[\"compatible\"])\r\n if xversion > version:\r\n raise BlibVersionError(\"File has incompatible version of blib\")\r\n \r\n else:\r\n raise InvalidBlibFile(\"File is not a Blender library\")\r\n \r\n if xroot.tag != \"blib\":\r\n raise InvalidBlibFile(\"File is not a Blender library\")\r\n \r\n if xroot.attrib[\"type\"] != \"cycles\":\r\n raise BlibTypeError(\"File is not a valid Cycles material\")\r\n \r\n failed = {}\r\n imgs = {}\r\n txts = {}\r\n txt_paths = {}\r\n grps = {}\r\n scripts = {}\r\n resources = {\r\n \"images\": imgs,\r\n \"texts\": txts,\r\n \"text_paths\": txt_paths,\r\n \"groups\": grps,\r\n \"scripts\": scripts,\r\n }\r\n txt_dir = ResourceDir(\"texts\", resource_path)\r\n xres = xroot.find(\"resources\")\r\n \r\n #Import resources\r\n if xres is not None:\r\n ximgs = xres.find(\"images\")\r\n xtxts = xres.find(\"texts\")\r\n xgrps = xres.find(\"groups\")\r\n tmp_path = ResourceDir(\"tmp\", resource_path)\r\n path_dict = {}\r\n \r\n #Images\r\n if ximgs is not None and (imgi_import or imge_import or seq_import or mov_import) and blib:\r\n img_dir = ResourceDir(\"images\", resource_path)\r\n hash_dict = None\r\n sfv_update = False\r\n for ximg in ximgs:\r\n if ximg.attrib[\"source\"] in {'FILE', 'GENERATED'}:\r\n if ximg.attrib[\"origin\"] == \"internal\":\r\n if not imgi_import:\r\n pass\r\n else:\r\n if not imge_import:\r\n pass\r\n elif ximg.attrib[\"source\"] == 'SEQUENCE':\r\n if not seq_import:\r\n pass\r\n elif ximg.attrib[\"source\"] == 'MOVIE':\r\n if not mov_import:\r\n pass\r\n \r\n #Write image to temporary folder, and pack in Blender\r\n if ximg.attrib[\"source\"] in {'FILE', 'GENERATED'} and (img_embed or (img_embed is None and ximg.attrib[\"origin\"] == \"internal\")):\r\n ipath = extract_image(archive, ximg.attrib[\"path\"], str(tmp_path), path_dict, failed)\r\n if ipath is None:\r\n pass\r\n \r\n try:\r\n img = bpy.data.images.load(ipath)\r\n except:\r\n fail(failed, \"images\", \"import image '{}', unknown reason\".format(ximg.attrib[\"path\"]))\r\n else:\r\n img.source = ximg.attrib[\"source\"]\r\n try:\r\n img.pack()\r\n except:\r\n bpy.data.images.remove(img)\r\n fail(failed, \"images\", \"pack image '{}', unknown reason\".format(ximg.attrib[\"path\"]))\r\n else:\r\n img.filepath = \"\"\r\n imgs[ximg.attrib[\"name\"]] = img\r\n \r\n else: #Write image to resource folder, and load in Blender\r\n if img_merge and ximg.attrib[\"source\"] != 'SEQUENCE': #Use existing image in resources if available\r\n try:\r\n comment = archive.getinfo(ximg.attrib[\"path\"]).comment.decode(\"utf-8\")\r\n except KeyError:\r\n fail(failed, \"images\", \"import image '{}', file is missing\".format(ximg.attrib[\"path\"]))\r\n pass\r\n \r\n com_path = path_dict[comment] if comment != \"\" else \"\"\r\n com_name = path.basename(path.dirname(com_path))\r\n if comment != \"\" and com_name != \"tmp\":\r\n ipath = com_path\r\n path_dict[ximg.attrib[\"path\"]] = ipath\r\n else:\r\n #Create hash dictionary only in the first iteration\r\n if hash_dict is None:\r\n hash_path = path.join(img_dir.root, \"list.sfv\")\r\n hash_dict = {}\r\n if path.isfile(hash_path):\r\n sfv = re.compile(r\"(.*) (.*?)$\")\r\n hash_file = open(hash_path, 'r', encoding=\"utf-8\")\r\n for line in hash_file:\r\n key = sfv.sub(r\"\\2\", line).strip()\r\n val = sfv.sub(r\"\\1\", line).strip()\r\n if key in hash_dict and val in hash_dict[key]:\r\n sfv_update = True\r\n else:\r\n hash_dict.setdefault(key, []).append(val)\r\n hash_file.close()\r\n hash_bkp = hash_dict.copy()\r\n \r\n #Check if files match and set path to appropriate image\r\n img_path = ximg.attrib[\"path\"] if comment == \"\" else comment\r\n try:\r\n crc = format(archive.getinfo(img_path).CRC, 'x')\r\n except KeyError:\r\n fail(failed, \"images\", \"import image '{}', file is missing\".format(ximg.attrib[\"path\"]))\r\n pass\r\n \r\n if crc in hash_dict:\r\n i = 0\r\n while i < len(hash_dict[crc]):\r\n val = hash_dict[crc][i]\r\n fpath = path.join(img_dir.root, val)\r\n if path.isfile(fpath):\r\n fsize = path.getsize(fpath)\r\n zsize = archive.getinfo(img_path).file_size\r\n if fsize == zsize:\r\n ffile = open(fpath, 'rb')\r\n zfile = archive.open(img_path, 'r')\r\n if files_equal(ffile, zfile):\r\n ipath = fpath\r\n path_dict[ximg.attrib[\"path\"]] = ipath\r\n ffile.close()\r\n zfile.close()\r\n break\r\n ffile.close()\r\n zfile.close()\r\n else:\r\n hash_dict[crc].remove(val)\r\n i -= 1\r\n i += 1\r\n else:\r\n ipath = extract_image(archive, ximg.attrib[\"path\"], str(img_dir), path_dict, failed)\r\n if ipath is None:\r\n pass\r\n \r\n hash_dict[crc].append(path.relpath(ipath, img_dir.root))\r\n else:\r\n ipath = extract_image(archive, ximg.attrib[\"path\"], str(img_dir), path_dict, failed)\r\n if ipath is None:\r\n pass\r\n \r\n hash_dict[crc] = [path.relpath(ipath, img_dir.root)]\r\n else: #Use image in archive, even if duplicate\r\n if ximg.attrib[\"source\"] == 'SEQUENCE':\r\n seq_dir = path.dirname(ximg.attrib[\"path\"])\r\n dir_name = ximg.attrib[\"path\"].split(\"/\")[-2]\r\n seq_path = path.join(str(img_dir), dir_name)\r\n makedirs(seq_path)\r\n seq_imgs = [img for img in archive.namelist() if img.startswith(seq_dir)]\r\n for img in seq_imgs:\r\n i_tmp_path = extract_image(archive, img, seq_path, path_dict, failed)\r\n if img == ximg.attrib[\"path\"]:\r\n ipath = i_tmp_path\r\n if ipath is None:\r\n break\r\n if ipath is None:\r\n rmtree(seq_path)\r\n pass\r\n else:\r\n ipath = extract_image(archive, ximg.attrib[\"path\"], str(img_dir), path_dict, failed)\r\n if ipath is None:\r\n pass\r\n \r\n #load image to Blender\r\n try:\r\n img = bpy.data.images.load(ipath)\r\n except:\r\n fail(failed, \"images\", \"import image '{}', unknown reason\".format(ximg.attrib[\"path\"]))\r\n else:\r\n img.source = ximg.attrib[\"source\"]\r\n imgs[ximg.attrib[\"name\"]] = img\r\n \r\n if tmp_path:\r\n for item in listdir(str(tmp_path)):\r\n fpath = path.join(str(tmp_path), item)\r\n if path.isfile(fpath):\r\n remove(fpath)\r\n \r\n #Update hash file if list has changed\r\n if hash_dict is not None and (hash_dict != hash_bkp or sfv_update):\r\n hash_path = path.join(img_dir.root, \"list.sfv\")\r\n hash_file = open(hash_path, 'w', encoding=\"utf-8\")\r\n for key in hash_dict:\r\n for val in hash_dict[key]:\r\n hash_file.write(val + \" \" + key + \"\\n\")\r\n hash_file.close()\r\n \r\n #Texts\r\n if xtxts is not None and (txti_import or txte_import):\r\n for xtxt in xtxts:\r\n if xtxt.attrib[\"origin\"] == \"internal\":\r\n if txti_import:\r\n if \"path\" in xtxt.attrib:\r\n if blib:\r\n if txt_embed == False:\r\n import_texts(\"zip\", \"ext\", xtxt, txts, failed, archive, txt_dir)\r\n else:\r\n import_texts(\"zip\", \"int\", xtxt, txts, failed, archive, txt_dir)\r\n else:\r\n if txt_embed == False:\r\n import_texts(\"xml\", \"ext\", xtxt, txts, failed, None, txt_dir)\r\n else:\r\n import_texts(\"xml\", \"int\", xtxt, txts, failed, None, txt_dir)\r\n \r\n else:\r\n if txte_import:\r\n if \"path\" in xtxt.attrib:\r\n if blib:\r\n if txt_embed == True:\r\n import_texts(\"zip\", \"int\", xtxt, txts, failed, archive, txt_dir, txt_paths)\r\n else:\r\n import_texts(\"zip\", \"ext\", xtxt, txts, failed, archive, txt_dir, txt_paths)\r\n else:\r\n if txt_embed == True:\r\n import_texts(\"xml\", \"int\", xtxt, txts, failed, None, txt_dir, txt_paths)\r\n else:\r\n import_texts(\"xml\", \"ext\", xtxt, txts, failed, None, txt_dir, txt_paths)\r\n \r\n #Groups\r\n if xgrps is not None:\r\n for xgrp in xgrps:\r\n xnodes = xgrp.find(\"nodes\")\r\n xlinks = xgrp.find(\"links\")\r\n grp = bpy.data.node_groups.new(xgrp.attrib[\"name\"], xgrp.attrib[\"bl_idname\"])\r\n grps[xgrp.attrib[\"name\"]] = grp\r\n if xnodes is not None:\r\n build_tree(xnodes, xlinks, grp, resources, txt_embed, txt_dir, blib, script_import, archive, failed)\r\n \r\n #Import material\r\n xmat = xroot.find(\"main\")\r\n \r\n if xmat is not None:\r\n xcycles = xmat.find(\"cycles_settings\")\r\n xnodes = xmat.find(\"nodes\")\r\n xlinks = xmat.find(\"links\")\r\n \r\n mat = bpy.data.materials.new(xmat.attrib[\"name\"])\r\n set_attributes(mat, xmat, failed)\r\n set_attributes(mat.cycles, xcycles, failed)\r\n mat.use_nodes = True\r\n mat.node_tree.nodes.clear()\r\n build_tree(xnodes, xlinks, mat.node_tree, resources, txt_embed, txt_dir, blib, script_import, archive, failed)\r\n if blib:\r\n archive.close()\r\n for f in failed:\r\n print(\"{} {} failed to be imported/assigned.\".format(failed[f], f))\r\n return mat\r\n else:\r\n if blib:\r\n archive.close()\r\n for f in failed:\r\n print(\"{} {} failed to be imported/assigned.\".format(failed[f], f))\r\n return grp",
"def importar(self):\n self.set_session()\n fileinfo = self.request.files['archivo'][0]\n fname = fileinfo['filename']\n extn = os.path.splitext(fname)[1]\n cname = str(uuid.uuid4()) + extn\n fh = open(\"server/common/resources/uploads/\" + cname, 'wb')\n fh.write(fileinfo['body'])\n fh.close()\n if extn == '.xlsx':\n mee = self.manager(self.db).import_excel(cname)\n self.respond(message=mee['message'], success=mee['success'])\n else:\n self.respond(message='Formato de Archivo no aceptado¡¡', success=False)\n self.db.close()",
"def importar3(self):\n self.set_session()\n fileinfo = self.request.files['archivo'][0]\n fname = fileinfo['filename']\n extn = os.path.splitext(fname)[1]\n cname = str(uuid.uuid4()) + extn\n print(\"flacoooo\")\n print(str(datetime.now()))\n fh = open(\"server/common/resources/uploads/\" + cname, 'wb')\n fh.write(fileinfo['body'])\n fh.close()\n print(\"end flacooo\")\n print(str(datetime.now()))\n if extn == '.xlsx':\n mee = self.manager3(self.db).import_excel(cname)\n self.respond(message=mee['message'], success=mee['success'])\n else:\n self.respond(message='Formato de Archivo no aceptado¡¡', success=False)\n self.db.close()",
"def action_import(self):\n ctx = self._context\n \n data = base64.b64decode(self.data)\n file_input = cStringIO.StringIO(data)\n file_input.seek(0)\n reader_info = []\n if self.delimeter:\n delimeter = str(self.delimeter)\n else:\n delimeter = ','\n reader = csv.reader(file_input, delimiter=delimeter,\n lineterminator='\\r\\n')\n try:\n reader_info.extend(reader)\n except Exception:\n raise exceptions.Warning(_(\"Not a valid file!\"))\n keys = reader_info[0]",
"def importar4(self):\n self.set_session()\n fileinfo = self.request.files['archivo'][0]\n fname = fileinfo['filename']\n extn = os.path.splitext(fname)[1]\n cname = str(uuid.uuid4()) + extn\n fh = open(\"server/common/resources/uploads/\" + cname, 'wb')\n fh.write(fileinfo['body'])\n fh.close()\n if extn == '.xlsx':\n mee = self.manager4(self.db).import_excel(cname)\n self.respond(message=mee['message'], success=mee['success'])\n else:\n self.respond(message='Formato de Archivo no aceptado¡¡', success=False)\n self.db.close()",
"def action_import(self):\n ctx = self._context\n account_obj = self.env[\"account.account\"]\n import_obj = self.env['import.journal.entries.advanced']\n import_line_obj = self.env[\"journal.entries.csv.import\"]\n if 'active_id' in ctx:\n import_id = import_obj.browse(ctx['active_id'])\n if not self.data:\n raise exceptions.Warning(_(\"Necesitas seleccionar un archivo!\"))\n # Decode the file data\n data = base64.b64decode(self.data).decode('utf-8')\n file_input = StringIO(data)\n file_input.seek(0)\n reader_info = []\n if self.delimeter:\n delimeter = str(self.delimeter)\n else:\n delimeter = ','\n reader = csv.reader(file_input, delimiter=delimeter,\n lineterminator='\\r\\n')\n try:\n reader_info.extend(reader)\n except Exception:\n raise exceptions.Warning(_(\"Archivo no valido\"))\n keys = reader_info[0]\n # check if keys exist\n if not isinstance(keys, list) or ('cuenta' not in keys):\n raise exceptions.Warning(_(\"No se encuentran 'cuentas' contable en el archivo\"))\n del reader_info[0]\n values = {}\n actual_date = fields.Date.today()\n for i in range(len(reader_info)):\n val = {}\n field = reader_info[i]\n values = dict(zip(keys, field))\n account = False\n if 'cuenta' in values and values['cuenta']:\n account_id = account_obj.search([('code', '=', values['cuenta'])]) \n if account_id:\n account = account_id[0]\n else:\n account = account_id\n\n val[\"ref\"] = values[\"descripcion\"]\n val[\"document_number\"] = values[\"num_documento\"]\n val[\"document_date\"] = datetime.strptime(values[\"fecha\"] , \"%d-%m-%Y\")\n val['account_id'] = account.id\n val['parent_id'] = import_id.id\n val['debit'] = values['debito']\n val['credit'] = values['credito']\n val['processed'] = False\n validate = import_line_obj.create(val)\n if validate:\n if validate.account_id:\n validate.is_ok = True",
"def AgiImport(dirpath, file):\n objPath=dirpath+'\\\\'+file\n if os.path.exists(objPath)==False:\n print objPath\n return\n \n ## Open new template file ##\n template = rs.TemplateFile()\n cmd=\"-_New \"\n cmd+=template+\" \"\n rs.Command(cmd)\n \n \n cmd=\"-_Import \"\n cmd+='\"'+os.path.abspath(objPath)+'\"'+\" \"\n cmd+=\"IgnoreTextures=No \"\n cmd+=\"MapOBJToRhinoZ=Yes \"\n cmd+=\"_Enter \"\n rs.Command(cmd)\n \n rs.Command(\"SplitDisjointMesh \")\n \n meshes = rs.LastCreatedObjects()\n max=0\n keep=None\n for guid in meshes:\n mesh = rs.coercemesh(guid)\n count = mesh.Faces.Count\n if count > max:\n keep = guid\n max = count\n \n if keep:\n meshes.remove(keep)\n rs.DeleteObjects(meshes)\n \n rs.ZoomExtents(all=True)\n \n cmd=\"-_SaveAs \"\n cmd+=\"SaveTextures=Yes \"\n cmd+='\"'+os.path.abspath(objPath).replace(\".wrl\",\".3dm\")+'\"'+\" \"\n cmd+=\"_Enter \"\n rs.Command(cmd)\n rs.DocumentModified(False)\n Rhino.RhinoApp.Wait()\n Rhino.RhinoApp.Wait()",
"def importExternal(*args):\n goTo = pi.currentProject\n impFile = cmds.fileDialog2(fm=1, dir = goTo)[0]\n if impFile:\n cmds.file(impFile, i=True)",
"def importObject(portal, file_name):\n try:\n portal.manage_importObject(file_name)\n except:\n portal._p_jar = portal.Destination()._p_jar\n portal.manage_importObject(file_name)",
"def export(bento_tag: str, out_path: str) -> None: # type: ignore (not accessed)\n bento = bento_store.get(bento_tag)\n out_path = bento.export(out_path)\n logger.info(\"%s exported to %s.\", bento, out_path)",
"def action_import(self):\n ctx = self._context\n attachment_obj = self.env['ir.attachment']\n invoice_obj = self.env['account.invoice']\n storage = attachment_obj._storage()\n filestore = attachment_obj._filestore()\n file_gc = attachment_obj._file_gc()\n indir = self.name#+'/E-Faktur'\n files_in_dir = os.listdir(indir)\n in_dir = []\n for x in files_in_dir:\n r = open(indir+\"/\"+x,'rb').read().encode('base64')\n _logger.info(\"_read_file reading %s\", x)\n if len(x) == 67:\n #_logger.info(\"_read_file valid file efaktur %s\", x)\n faktur_pajak = x.split(\"-\")\n #SEARCH INVOICE YG SUDAH TERFALIDASI DAN ADA FAKTUR PAJAK\n invoice_ids = invoice_obj.search([('nomor_faktur_id','!=',None),('move_id','!=',None),('nomor_faktur_id.number','ilike',faktur_pajak[1][8:])])\n #CARI APAKAH SUDAH TERATTACHMENT DI SISTEM\n attachment_ids = attachment_obj.search([('datas','!=',r),('res_id','in',invoice_ids.ids),('res_model','=','account.invoice'),('name','=',faktur_pajak[1])])\n if not attachment_ids and invoice_ids:\n for invoice in invoice_ids:\n values = {\n 'res_model': 'account.invoice',\n 'company_id': 1,\n 'res_name': invoice.number,#NOMOR INVOICE\n 'datas_fname': x,#NAMA FILE\n 'type': 'binary',\n 'res_id': invoice.id,\n 'name': x,#faktur_pajak[1],\n 'mimetype': 'application/pdf',\n 'store_fname': 'E-Faktur/'+x,\n 'datas': r,\n }\n attachment_obj.create(values)\n _logger.info(\"_uploaded_file %s\", x)",
"def import_file(filename):\n if not os.path.exists(filename): return 0\n if zipfile.is_zipfile(filename):\n infp = zipfile.ZipFile(filename)\n elif tarfile.is_tarfile(filename):\n infp = tarfile.TarFile(filename)\n else: # regular file\n infp = RegFile(filename)\n name_list =infp.namelist()\n director = {}\n VALUES = {} \n if \"USERNAME\" in os.environ:\n VALUES[\"USER\"] = os.environ[\"USERNAME\"] # NameId\n if \"HOMEPATH\" in os.environ:\n VALUES[\"HOME\"] = 'C:' + os.sep + os.environ[\"HOMEPATH\"]\n if \"HOME\" in os.environ:\n VALUES[\"HOME\"] = os.environ[\"HOME\"]\n if \"USERPROFILE\" in os.environ:\n VALUES[\"HOME\"] = os.environ[\"USERPROFILE\"]\n globalspath, f = myro.globvars.__file__.rsplit(os.sep, 1)\n #print \"globalspath:\", globalspath\n myropath, f = globalspath.rsplit(os.sep, 1)\n #print \"myropath:\", myropath\n sitepath, f = myropath.rsplit(os.sep, 1)\n #print \"sitepath:\", sitepath\n myroparts = myropath.split(os.sep)\n pythonpath = myroparts[0] + os.sep + myroparts[1]\n VALUES[\"DESKTOP\"] = VALUES[\"HOME\"] + os.sep + \"DESKTOP\" \n VALUES[\"PYTHONDIR\"] = pythonpath\n VALUES[\"MYRODIR\"] = myropath\n VALUES[\"PYTHONSITEDIR\"] = sitepath\n VALUES[\"PYTHONDIR\"] = pythonpath\n install_count = 0\n if \"MANIFEST\" in name_list:\n manifest = infp.read(\"MANIFEST\")\n lines = manifest.split(\"\\n\")\n for line in lines:\n if \":\" in line:\n f, dest = map(string.strip, line.strip().split(\":\"))\n director[f] = dest % VALUES\n for name in name_list:\n if name == \"MANIFEST\": continue\n contents = infp.read(name)\n print \" writing:\", director[name], \"...\"\n # first write to temp file:\n try:\n outfp = open(director[name], \"wb\")\n except:\n makePath(director[name])\n outfp = open(director[name], \"wb\")\n outfp.write(contents)\n outfp.close()\n install_count += 1\n else:\n print \" ERROR: no MANIFEST in Myro upgrade; skipping\"\n infp.close()\n return install_count",
"def import_from_file(jamsite, source='jammers.csv', fieldnames=None):\n\t# import jammers.csv\n\twith open(source) as csvfile:\n\t\tjamsite.mergeinsert( import_jammers(csvfile, fieldnames=fieldnames) )",
"def importar5(self):\n self.set_session()\n fileinfo = self.request.files['archivo'][0]\n fname = fileinfo['filename']\n extn = os.path.splitext(fname)[1]\n cname = str(uuid.uuid4()) + extn\n fh = open(\"server/common/resources/uploads/\" + cname, 'wb')\n fh.write(fileinfo['body'])\n fh.close()\n if extn == '.xlsx':\n mee = self.manager5(self.db).import_excel(cname)\n self.respond(message=mee['message'], success=mee['success'])\n else:\n self.respond(message='Formato de Archivo no aceptado¡¡', success=False)\n self.db.close()",
"def doImport(self,textFile):\n self.loadText(textFile)\n self.getBooks()\n #self.copyBooks()\n self.genLibData()\n self.genLibCells()\n self.sortRecords()",
"def _CMD_IMPORT(self, file_name):\n # reset inspector:\n # self.inspector = DataInspectorRecord()\n\n ext = file_name.split('.')[-1]\n if ext == 'mat':\n # self.model.from_json_dict(buff)\n self.model.from_mat_file(file_name)\n\n elif ext == 'json':\n buff = ''\n with open(file_name, 'rb') as f:\n buff = f.read()\n model = json.loads(buff)\n self.model.from_json_dict(model)\n\n else:\n raise DataExplorerError('Unsupported file format: {}'.format(ext))\n\n # update initial selection - first row:\n if len(self.model.data_list) > 0:\n self.handle_row_select([self.model.data_list[0]])",
"def importIntoFile(filename, outputFile):\n\t#grab contents of current file\n\tcurrFile = open(filename).read().splitlines()\n\n\t#export file\n\twFile = open(outputFile, 'w+')\n\n\tprint \"\\tImporting into \" + outputFile + \":\\n\\t\\t\",\n\n\t#parse and write\n\tskipWrite = False\n\tspaceAppend = \"\"\n\tfor line in currFile:\n\t\tif line.find(importStrL) != -1:\n\t\t\tskipWrite = True\n\t\t\twFile.write(line)\n\t\t\t#handling indentation and space consistency\n\t\t\tif re.match(r\"\\s+\", line) == None:\n\t\t\t\tspaceAppend = \"\"\n\t\t\telse:\n\t\t\t\tspaceAppend = re.match(r\"\\s+\", line).group()\n\t\t\tline = line.replace(importStrL, \"\").replace(importStrR, \"\").strip()\n\t\t\twFile.write('\\n')\n\t\t\t#import lines, matching indentation\n\t\t\tfor importLine in cactusImports[line]:\n\t\t\t\twFile.write(spaceAppend + importLine + '\\n')\n\t\t\tprint line,\n\t\telse:\n\t\t\tif line.find(endStr) != -1:\n\t\t\t\tskipWrite = False\n\t\t\tif not skipWrite:\n\t\t\t\twFile.write(line+'\\n')\n\tprint '\\n'\n\twFile.close()",
"def manual_import_genesis(self, path):\n dtu = DtuLoader.DtuLoader(path)\n fbx_path = dtu.get_fbx_path()\n self.genesis_import(fbx_path, dtu)",
"def file_import(self):\r\n\r\n try:\r\n self.process_file_import()\r\n except InputError as ex:\r\n print(ex)\r\n self.file_import()",
"def _add_demo_import(self):\r\n # add out completed one\r\n q = ImportQueue(\r\n username=u'admin',\r\n file_path=u'testing.txt'\r\n )\r\n DBSession.add(q)\r\n transaction.commit()\r\n return"
] | [
"0.6098828",
"0.6013871",
"0.5977933",
"0.58846664",
"0.5881364",
"0.5841038",
"0.57228506",
"0.56769913",
"0.5621053",
"0.56173635",
"0.5589252",
"0.55258936",
"0.5518586",
"0.5502786",
"0.54741645",
"0.5466958",
"0.545687",
"0.5419559",
"0.5372282",
"0.5365402",
"0.53363955",
"0.53265685",
"0.53158617",
"0.5307871",
"0.52951664",
"0.5284183",
"0.5268203",
"0.5259921",
"0.5199496",
"0.51926446"
] | 0.6817035 | 0 |
Pull Bento from a yatai server. | def pull(bento_tag: str, force: bool) -> None: # type: ignore (not accessed)
yatai_client.pull_bento(bento_tag, force=force) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def _pull(self) -> None:\n raise NotImplementedError()",
"def pull(self):",
"def _pull(self) -> None:\n raise NotImplementedError() # pragma: no cover",
"def pull_from_postmaster(self):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/Destiny2/Actions/Items/PullFromPostmaster/\"))",
"def get(self, bento_name, bento_version):",
"def make_pull(db,url):\n result = db.product_mstator.find_one({\"url\":url})\n return result",
"def fetch_pull(ref):\n origin.fetch(tags=True)\n repo.git.checkout(\"{}\".format(ref))\n repo.git.pull(\"origin\", \"{}\".format(ref))",
"def pull(self):\n origin = self.git_repo.remotes.origin\n origin.pull()",
"def pull(self):\n data = api.get(endpoint=self.endpoint, resource_id=self.id)\n self.__init__(**data)",
"async def _pull_now(self) -> None:\n raise NotImplementedError()",
"def pull(self, *arg, **kwds):\n pass",
"def pull_from_slave():\n print(\"Pulling from slave\")\n r = req.get(f\"{SLAVE_URL}/prepare_pull\")\n if r.status_code != req.codes.ok:\n print(\"Something wrong with slave on prepare pull\")\n print(r.text)\n return False\n print(\"Prepared\")\n try:\n for tname in TABLES:\n with open(f'{tname}.db', 'wb') as f:\n print(f\"Pulling {tname}\")\n r = req.post(f\"{SLAVE_URL}/pull_db/{tname}\", data={'key': HMA_KEY})\n if r.status_code != req.codes.ok:\n print(\"Something went wrong\")\n print(r.text)\n return False\n f.write(r.content)\n return True\n except IOError:\n print(\"IO ERROR\")\n return False",
"def pull(self):\n data = api.get(endpoint=self.endpoint, resource_id=self.slug)\n self.__init__(**data)",
"def pull():\n _with_deploy_env(['git pull'])",
"def pull(self):\n run('git', 'pull', 'origin', 'master')",
"def pull(self, remote, branch, *args):\n return self.cmd('pull', remote, branch, *args)",
"def pull(self, *args, **kwargs) -> Any:\n raise NotImplementedError",
"async def async_update(self):\n try:\n self._data = requests.get(self._build_url(), timeout=10, headers={'accept-encoding': None}).json()\n _LOGGER.debug(\"TOON fetched data = %s\", self._data)\n except (requests.exceptions.RequestException) as error:\n _LOGGER.error(\"Unable to connect to TOON: %s\", error)\n self._data = None",
"def pull(self, verbose=True):\n fetch_cmd = [\"git\", \"fetch\"]\n if not verbose:\n fetch_cmd.append(\"-q\")\n subprocess.call(fetch_cmd, cwd=self.path)\n checkout_cmd = [\"git\", \"checkout\", \"origin/master\", \"-B\", \"master\"]\n if not verbose:\n checkout_cmd.append(\"-q\")\n return subprocess.call(checkout_cmd, cwd=self.path)",
"def remote_pull(*keys):",
"def test_pull_from_origin(tmpdir):\n gitwrapper.clone_from('git://github.com/Tinche/bower-cache', tmpdir)\n gitwrapper.pull_from_origin(tmpdir)",
"def bzpull(request, target):\n if target.startswith(\"sha1:\"):\n target = target[5:]\n url = \"http://bitzi.com/lookup/%s?v=tventtxt\" % target\n tventtxt = urllib.urlopen(url)\n tventdict = {}\n targets_to_update = set()\n count = 0\n text = \"\"\n try:\n for line in tventtxt:\n text += '\\n'\n line = line.strip()\n text += line\n if not line:\n if 'user' in tventdict and 'target_id' in tventdict and 'when' in tventdict:\n tvent = Tvent()\n tvent.when = tventdict['when']\n tvent.user = tventdict['user']\n target, created = Target.objects.get_or_create(id=tventdict['target_id'])\n tvent.target = target\n targets_to_update.add(target.id)\n tvent.tagtext = tventdict['tagtext']\n tvent.save()\n count += 1\n else:\n # error; required field not present\n text += '\\nERROR: incomplete tvent ' + str(tventdict)\n tventdict = {}\n continue\n if line.startswith(\"=\"):\n tventdict['when'] = line[1:]\n continue\n if line.startswith(\"~\"):\n tventdict['user'] = line[1:]\n continue\n if line.startswith(\"@\"):\n tventdict['target_id'] = line[1:]\n continue\n tventdict['tagtext'] = tventdict.get('tagtext','') + line + '\\n'\n if 'user' in tventdict and 'target_id' in tventdict and 'when' in tventdict:\n # TODO: reduce duplication with above\n tvent = Tvent()\n tvent.when = tventdict['when']\n tvent.user = tventdict['user']\n target = Target.objects.get_or_create(id=tventdict['target_id'])\n tvent.target = target\n targets_to_update.add(target.id)\n # TODO: cleanup tags here? \n tvent.save()\n count += 1\n else:\n # error; required field not present\n text += '\\nERROR: incomplete tvent ' + str(tventdict)\n finally:\n tventtxt.close()\n # trigger update of any possibly-changed Target summaries\n for id in targets_to_update:\n Target.objects.get(id=id).updateFromTvents()\n return HttpResponse('Pulled %d tvents from: %s\\n %s' % (count, url, text), mimetype='text/plain')",
"def pull(self):\n raise NotImplementedError()",
"def pull(args):\n cache = set(args.remote_cache).union(set(args.cache))\n for path in sorted(cache):\n if not os.path.exists(os.path.join(args.base, path)) and remote_exists(args.sftp, os.path.join(args.remote_base, path)):\n print('pull: {}'.format(path))\n ensure_local(os.path.dirname(os.path.join(args.base, path)))\n args.sftp.get(\n os.path.join(args.remote_base, path),\n os.path.join(args.base, path)\n )\n args.cache.append(path)\n args.update = True\n return",
"def pull():\n am = AccountManager(get_settings())\n am.pull_all()",
"def d_ploy():\n\tlocal(\"git push origin --all\")\n\twith cd(LIVE_ROOT):\n\t\trun(\"git pull\")",
"def local_bonds_prices():\n url1 = \"https://api.invertironline.com/token\"\n\n data = {\n \"username\": usuario,\n \"password\": password,\n \"grant_type\": \"password\" \n }\n response = requests.post(url1, data=data)\n if response.status_code == 200:\n content = response.text\n access_key = token_key(content)\n\n url2 = f'https://api.invertironline.com/api/v2/Cotizaciones/Bonos/Merval/argentina'\n datos = requests.get(url2, headers={\n 'Authorization': 'Bearer '+access_key\n })\n datos = json.loads(datos.text)\n datos = datos['titulos']\n datos = clean_assets(datos)\n return datos",
"def pull1(repo, **kwargs):\n ret = do_pull(repo, \"topology.virl\")\n if not ret:\n exit(1)",
"def _fetch_remote(self, dqueue, server, url, timeout):\n try:\n req = requests.get(url, timeout=timeout)\n if req.status_code == 200:\n try:\n resp_params = parse_sync_response(req.text)\n dqueue.put({'server': server, 'params': resp_params})\n except ValueError as err:\n logger.error('Failed to parse response of %s: %s', server, err)\n else:\n logger.warning('Recieved status code %s for %s', req.status_code, url)\n except Exception as err:\n logger.warning('Failed to retrieve %s: %s', url, err)",
"def download_stewicombo_from_remote(name):\n meta = set_stewicombo_meta(name, category='')\n log.info(f'attempting download of {name} from {paths.remote_path}')\n download_from_remote(meta, paths)"
] | [
"0.6253494",
"0.61232203",
"0.59411526",
"0.5814772",
"0.5765839",
"0.5646557",
"0.55963373",
"0.55957717",
"0.5572075",
"0.55645216",
"0.5557959",
"0.5454153",
"0.5417521",
"0.54132676",
"0.5413063",
"0.53649527",
"0.5361535",
"0.5349794",
"0.53326946",
"0.53325665",
"0.5319298",
"0.53111213",
"0.5273583",
"0.5221559",
"0.52046984",
"0.51962155",
"0.5192047",
"0.51849926",
"0.5144103",
"0.5126676"
] | 0.72192776 | 0 |
Push Bento to a yatai server. | def push(bento_tag: str, force: bool, threads: int) -> None: # type: ignore (not accessed)
bento_obj = bento_store.get(bento_tag)
if not bento_obj:
raise click.ClickException(f"Bento {bento_tag} not found in local store")
yatai_client.push_bento(bento_obj, force=force, threads=threads) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _push_to_server(self) -> None:\n pass",
"def push(self, *args, **kwargs):\n pass",
"def push(self, obj):\n pass",
"def remote_push(self, pNamespace):",
"def push(self):\n origin = self.git_repo.remotes.origin\n origin.push()",
"def _push(self):\n push_cmds = self.vcs.push_commands()\n if not push_cmds:\n return\n if utils.ask(\"OK to push commits to the server?\"):\n for push_cmd in push_cmds:\n output = execute_command(push_cmd)\n logger.info(output)",
"def push(self):\n out, err, code = self.command( [\"git\", \"push\"], self.directory )",
"def push(self, obj):\r\n request = http.Request('POST', self.get_push_url(), obj)\r\n return request, parsers.parse_json",
"def push():\n branch = git.current_branch().name\n shell.run('git push -u origin {}'.format(branch))",
"def push():\n local('hg push jvacx')",
"def push_data(self, data):\n self.incoming.write(data)",
"def push(ctx):\n dufl_root = ctx.obj['dufl_root']\n git = Git(ctx.obj.get('git', '/usr/bin/git'), dufl_root)\n git.run('push', 'origin', git.working_branch())",
"def to_server(self, o):\n assert type(o) == str\n\n # add to queue\n self.toserverqueue.put(o, block=False)\n\n # send now, if appropriate\n if self.buffer_tx==False:\n self.periodicTimer.fireNow()",
"def push(self):\n return False",
"def push(self, remote, branch, *args):\n return self.cmd('push', remote, branch, *args)",
"def push(self, command):\n \n if len(command) > 1: \n if p.location == a.location:\n for item in p.location.objects:\n if command[1] == item.name:\n item.location.objects.remove(a)\n item.location = seven \n seven.objects.append(a)\n print('You pushed ' + item.name + '!')\n else:\n print(\"Push who?\")",
"def push(self):\n if self.forward:\n git = self.repo.git\n try:\n git.push()\n self.forward = \"pushed\"\n except:\n self.forward = \"push error - \"+self.forward",
"def __gitPush(self):\n self.vcs.gitPush(self.project.getProjectPath())",
"def push(self, item: Any) -> None:\n # TODO: Implement this method\n ...",
"def push_write(self, s):\n ...",
"def push ():\n\n tagname = get_tag (comp_versions, 'ACE')\n\n if opts.push:\n if opts.take_action:\n vprint (\"Pushing ACE_TAO\", opts.ace_tao_branch, \"to origin\")\n ex (\"cd $DOC_ROOT/ACE_TAO && git push origin \" + opts.ace_tao_branch)\n\n vprint (\"Pushing tag %s on ACE_TAO\" % (tagname))\n ex (\"cd $DOC_ROOT/ACE_TAO && git push origin tag \" + tagname)\n\n vprint (\"Pushing tag %s on MPC\" % (tagname))\n ex (\"cd $DOC_ROOT/MPC && git push origin tag \" + tagname)\n\n # Push release branches\n latest_branch_helper (push_latest_branch, opts.release_type)\n else:\n vprint (\"Pushing tag %s on ACE_TAO\" % (tagname))\n vprint (\"Pushing tag %s on MPC\" % (tagname))\n print (\"Pushing tags:\\n\")\n print (\"Pushing tag \" + tagname + \"\\n\")",
"def push(self, **kwargs):\n return _taskpipeoperation(self,'push', **kwargs)",
"def push(self, x):\n self.queue[self.tag].put(x)",
"def push(self):\n self.stack.append(self.save())",
"def push(self, item):\n pass",
"def push(self, *args, **kwargs):\n self.queue.put((args, kwargs))",
"def push_commits(self, verbose=True):\n # The subprocess will return a non-zero exit code even if it succeeded.\n # Check its output to determine whether it worked.\n push_proc = subprocess.run(\n [\"git\", \"push\"],\n cwd=self.path,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n universal_newlines=True,\n )\n if \"updated in conservator\" not in push_proc.stdout:\n if \"Everything up-to-date\" in push_proc.stdout:\n logger.warning(push_proc.stdout)\n else:\n logger.error(\n \"Server did not accept changes to index.json:\\n%s\", push_proc.stdout\n )\n raise RuntimeError(\"Failed to push changes to index.json\")\n self.pull(verbose)",
"def d_ploy():\n\tlocal(\"git push origin --all\")\n\twith cd(LIVE_ROOT):\n\t\trun(\"git pull\")",
"def post(self):\n text = 'HELLO from socnet API Server!'\n return push_to_mattermost(text)",
"def push(args):\n if args.type == 'ssh':\n cache = set(args.remote_cache).union(set(args.cache))\n for path in sorted(cache):\n if os.path.exists(os.path.join(args.base, path)) and not remote_exists(args.sftp, os.path.join(args.remote_base, path)):\n print('push: {}'.format(path))\n ensure_remote(args.sftp, os.path.dirname(os.path.join(args.remote_base, path)))\n args.sftp.put(\n os.path.join(args.base, path),\n os.path.join(args.remote_base, path)\n )\n args.remote_cache.append(path)\n args.remote_update = True\n elif args.type == 's3':\n raise NotImplementedError('s3:// remote type not yet supported!')\n elif args.type == 'gs':\n raise NotImplementedError('gs:// remote type not yet supported!')\n return"
] | [
"0.71998096",
"0.6480765",
"0.5861547",
"0.5851244",
"0.5771112",
"0.575511",
"0.5752704",
"0.57498515",
"0.568986",
"0.564071",
"0.56019354",
"0.5566268",
"0.5532686",
"0.5483675",
"0.54428613",
"0.542945",
"0.5403216",
"0.5375344",
"0.5329458",
"0.53241456",
"0.53143793",
"0.53034",
"0.5295266",
"0.52948445",
"0.5287468",
"0.5282802",
"0.52817434",
"0.52816194",
"0.52518886",
"0.52261347"
] | 0.70428616 | 1 |
Build a new Bento from current directory. | def build(build_ctx: str, bentofile: str, version: str) -> None: # type: ignore (not accessed)
if sys.path[0] != build_ctx:
sys.path.insert(0, build_ctx)
build_bentofile(bentofile, build_ctx=build_ctx, version=version) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def build(root):",
"def build():",
"def makeProject(self, version, baseDirectory=None):\n if baseDirectory is None:\n baseDirectory = FilePath(self.mktemp())\n baseDirectory.createDirectory()\n segments = version.package.split('.')\n directory = baseDirectory\n for segment in segments:\n directory = directory.child(segment)\n if not directory.exists():\n directory.createDirectory()\n directory.child('__init__.py').setContent('')\n directory.child('topfiles').createDirectory()\n directory.child('topfiles').child('README').setContent(version.base())\n replaceProjectVersion(\n directory.child('_version.py').path, version)\n return Project(directory)",
"def _create_checkout(self):\n parent_git_dir = os.path.join(self._parent_repo, self._run_git_command(\n ['rev-parse', '--git-dir']).strip())\n self._workdir = tempfile.mkdtemp(prefix='drover_%s_' % self._branch)\n logging.debug('Creating checkout in %s', self._workdir)\n git_dir = os.path.join(self._workdir, '.git')\n git_common.make_workdir_common(parent_git_dir, git_dir, self.FILES_TO_LINK,\n self.FILES_TO_COPY, mk_symlink)\n self._run_git_command(['config', 'core.sparsecheckout', 'true'])\n with open(os.path.join(git_dir, 'info', 'sparse-checkout'), 'w') as f:\n f.write('/codereview.settings')\n\n branch_name = os.path.split(self._workdir)[-1]\n self._run_git_command(['checkout', '-b', branch_name, self._branch_ref])\n self._branch_name = branch_name",
"def makeProject(self, version, baseDirectory=None):\n if baseDirectory is None:\n baseDirectory = FilePath(self.mktemp())\n segments = version[0].split(\".\")\n directory = baseDirectory\n for segment in segments:\n directory = directory.child(segment)\n if not directory.exists():\n directory.createDirectory()\n directory.child(\"__init__.py\").setContent(b\"\")\n directory.child(\"newsfragments\").createDirectory()\n directory.child(\"_version.py\").setContent(genVersion(*version).encode())\n return Project(directory)",
"def __init__(self, root_dir, relpath, must_exist=True):\r\n\r\n path = os.path.abspath(os.path.join(root_dir, relpath))\r\n buildfile = os.path.join(path, BuildFile._CANONICAL_NAME) if os.path.isdir(path) else path\r\n\r\n if os.path.isdir(buildfile):\r\n raise IOError(\"%s is a directory\" % buildfile)\r\n\r\n if must_exist:\r\n if not os.path.exists(buildfile):\r\n raise IOError(\"BUILD file does not exist at: %s\" % buildfile)\r\n\r\n if not BuildFile._is_buildfile_name(os.path.basename(buildfile)):\r\n raise IOError(\"%s is not a BUILD file\" % buildfile)\r\n\r\n if not os.path.exists(buildfile):\r\n raise IOError(\"BUILD file does not exist at: %s\" % buildfile)\r\n\r\n self.root_dir = os.path.realpath(root_dir)\r\n self.full_path = os.path.realpath(buildfile)\r\n\r\n self.name = os.path.basename(self.full_path)\r\n self.parent_path = os.path.dirname(self.full_path)\r\n\r\n self._bytecode_path = os.path.join(self.parent_path, '.%s.%s.pyc' % (\r\n self.name, PythonIdentity.get()))\r\n\r\n self.relpath = os.path.relpath(self.full_path, self.root_dir)\r\n self.canonical_relpath = os.path.join(os.path.dirname(self.relpath), BuildFile._CANONICAL_NAME)",
"def bundle(self):\n\n try:\n self.build_directory.mkdir(parents=True)\n except FileExistsError:\n logger.warning('Directory already exists: %s', self.build_directory)\n decision = input(\n f'{self.build_directory} already exists. Overwrite? Y/[N]: '\n )\n if decision.strip().upper() == 'Y':\n logger.info('Deleting old build directory: %s', self.build_directory)\n shutil.rmtree(self.build_directory)\n self.build_directory.mkdir(parents=True)\n else:\n return\n\n with cd(self.app_directory):\n self._install_dependencies()\n self._handle_supplemental_data()\n self._cleanup_files()\n if self.make_zip:\n self._zip_files()",
"def temp(cls, basedir=None):\r\n context = cls(BuildFile(get_buildroot(), basedir or 'BUILD.temp', must_exist=False))\r\n with cls.activate(context):\r\n yield",
"def build(config):",
"def build(_):",
"def create(self):\n if os.path.isdir(self.repodir):\n if os.listdir(self.repodir):\n raise EmtError('%s is not empty' % self.repodir)\n else:\n os.makedirs(self.repodir)\n self.git_cmd('init')\n self.initialized = True",
"def build(target_dir):\n prepare_demo_site(target_dir)\n\n patch_config(\n target_dir, (\"# CREATE_FULL_ARCHIVES = False\", \"CREATE_FULL_ARCHIVES = True\")\n )\n\n with cd(target_dir):\n __main__.main([\"build\"])",
"def move_files_into_build():\n build_root = os.path.join(template_path, 'build')\n create_python_package(build_root)\n\n build_buildbot = os.path.join(template_path, 'build', 'buildbot')\n create_python_package(build_buildbot)\n\n pythonify('runtests', [], ['build']) \n pythonify('master.cfg', ['buildbot'], ['build', 'buildbot'])",
"def build(working_directory=None, args=None):\n from .buildme import main\n if args is None:\n args = []\n return main(working_directory, args)",
"def Build(self, out_file):\n raise NotImplementedError",
"def clone(self):\n out, err, code = self.command( [\"git\", \"clone\", self.repo] )\n\n # find the directory into which the\n self.directory = self.path\n for path in os.listdir(self.path):\n self.directory = os.path.join(self.path,path)\n break",
"def build_for_dir(cls, dir_path):\n return cls(etau.parse_dir_pattern(dir_path)[0])",
"def build_code(self):\n if not path.isfile(path.join(self.tmpdir, 'requirements.txt')):\n return\n\n oldpwd = os.getcwd()\n os.chdir(self.tmpdir)\n\n call('pip install --requirement requirements.txt --target .'.split(' '))\n\n # We need to create a __init__.py per code directory without it.\n # This is required to not create a PYTHONPATH with all directories.\n #\n for (current_dir, directories, files) in os.walk('.', topdown=False):\n if current_dir.endswith('.dist-info'):\n # This is a python metadirectory, we can skip it\n continue\n namespacer = path.join(current_dir, '__init__.py')\n if not path.isfile(namespacer):\n print(\"Creating file {0}\".format(namespacer))\n with open(namespacer, 'w') as nmf:\n nmf.write('# File Generated by lambdamanager packager')\n\n os.chdir(oldpwd)",
"def build(self, conanfile):\n app = ConanApp(self._conan_api.cache_folder)\n conanfile.folders.set_base_package(conanfile.folders.base_build)\n conanfile.folders.set_base_pkg_metadata(os.path.join(conanfile.build_folder, \"metadata\"))\n run_build_method(conanfile, app.hook_manager)",
"def _create_builder(self, tmp_dir):\n return cifuzz.InternalGithubBuilder(self.PROJECT_NAME,\n self.PROJECT_REPO_NAME, tmp_dir,\n self.SANITIZER, self.COMMIT_SHA,\n self.PR_REF)",
"def build(path=None, output=None):\n conf.load(path)\n cache = Cache()\n if cache.processing_errors():\n for file_name, error in cache.processing_errors():\n message = \"error processing source file '%s' - %s\"\n logger.error(message % (file_name, error))\n if output:\n conf.set('build_path', output)\n logger.info('build directory: ' + conf.get('build_path'))\n for builder in builders.order():\n builder(cache)",
"def make(source, dependentOn, builder, buildNew=False, *builderParams):\n # check if pickle file exists\n if not os.path.exists(source):\n buildNew = True\n # check date\n # if source is newer\n if not buildNew and os.path.getmtime(source) > os.path.getmtime(dependentOn):\n print(\"load source: \", os.path.basename(source), \"...\", end=' ')\n target = load(open(source, 'rb'))\n else:\n print(\"build source: \", os.path.basename(source), \"...\", end=' ')\n target = builder(*builderParams)\n # pickle the target\n dump(target, open(source, 'wb'), 1)\n print(\"Done!\")\n return target",
"def fork(args):\n subprocess.check_call([\"git\", \"config\", \"--global\",\n \"--add\", \"safe.directory\", args.src])\n head = subprocess.check_output([\"git\", \"rev-parse\", args.rev], cwd=args.src).strip()\n obj_dir = subprocess.check_output([\"git\", \"rev-parse\", \"--git-path\", \"objects\"],\n cwd=args.src)\n obj_dir = os.path.join(args.src, obj_dir.decode())\n\n # Create an empty git repository. Native clone is too slow because the\n # typical gerrit source repo has a huge number of refs and git has to\n # inspect all of them. This approach lets us ignore all of that to only\n # use the rev we were asked to build.\n os.mkdir(\"/build/%s\" %(args.project))\n os.chdir(\"/build/%s\" %(args.project))\n subprocess.check_call([\"git\", \"init\", \"-q\"])\n\n # Setup alternates so we can see all the objects in the source repo\n with open(\".git/objects/info/alternates\", \"w\") as F:\n F.write(obj_dir)\n F.write(\"\\n\")\n\n # Create a branch using the only remote HEAD we care about\n subprocess.check_call([\"git\", \"checkout\", \"-q\", \"-b\", \"build\", \"--no-progress\", head])\n subprocess.check_call([\"git\", \"--no-pager\", \"log\", \"--oneline\", \"-n1\"])\n\n if args.project == \"kernel\":\n copy(\"%s/.config\" %(args.src), \"/build/%s\" %(args.project))\n\n args.src = \"/build/%s\" %(args.project)\n args.rev = head",
"def clone():\n require('PROJECT_NAME')\n require('PROJECT_REPO')\n require('MERCURIAL_BIN')\n\n # Create the \"apps\" directory if it does not exist.\n run('mkdir -p {}'.format(utils.home('apps')))\n\n if files.exists(utils.home('apps', env.PROJECT_NAME)):\n delete()\n\n with cd(utils.home('apps')):\n run('{0} clone {1} {2}'.format(env.MERCURIAL_BIN,\n env.PROJECT_REPO,\n env.PROJECT_NAME))",
"def pushd(cls, new_dir):\n previous_dir = os.getcwd()\n try:\n new_ab_dir = None\n if os.path.isabs(new_dir):\n new_ab_dir = new_dir\n else:\n new_ab_dir = os.path.join(previous_dir, new_dir)\n # Use absolute path to show it on FileNotFoundError message.\n cls.cd(new_ab_dir)\n yield\n finally:\n cls.cd(previous_dir)",
"def _do_action_bento_setup(self):\n self._exit_if_bento_still_running()\n\n cmd = \"rm -rf {bento_dir}; tar -zxvf {bento_tar}\".format(\n bento_dir=self.bento_home,\n bento_tar=self.bento_tgz)\n print(run(cmd))\n\n for command_suffix in [\"-env.sh\", \"\"]:\n\n kiji_env = os.path.join(self.bento_home, \"bin\", \"kiji\" + command_suffix)\n bento_env = os.path.join(self.bento_home, \"bin\", \"bento\" + command_suffix)\n if not os.path.isfile(kiji_env):\n assert os.path.isfile(bento_env)\n cmd = 'cp {bento_env} {kiji_env}'.format(\n bento_env=bento_env,\n kiji_env=kiji_env)\n run(cmd)\n\n cmd = \"cd {bento_dir}; source bin/kiji-env.sh; bento start\".format(\n bento_dir=self.bento_home,\n )\n print(run(cmd))\n assert os.path.isdir(self.bento_home)",
"def init_structure(self):\n dest = os.path.join(self.cwd, 'build', 'debian')\n self.mkdir_p(dest)\n struct = os.path.join(dest, self.cwd)\n self.mkdir_p(struct)\n# copytree_src = os.path.join(self.cwd, 'DEBIAN')\n# self.copytree(copytree_src, dest, symlinks=False, ignore=None)\n\n new_dest = os.path.join(dest, self.cwd[1:])\n self.copytree(\n self.cwd,\n new_dest,\n symlinks=False,\n ignore=self.ignore\n )",
"def build(self):\n self.puts(colored.blue(\"Building project...\"))\n\n if os.path.exists(self.build_path):\n shutil.rmtree(self.build_path)\n os.makedirs(self.build_path)\n\n with indent(2):\n self._reset_build_sequence_id()\n self._build_pre_project_template()\n self._build_project_template()\n self._build_pre_resources_template()\n self._build_resources_template()\n self._build_post_resources_template()",
"def build(self) -> None:",
"def build(session: nox.Session) -> None:\n\n dist_dir = DIR.joinpath(\"dist\")\n if dist_dir.exists():\n shutil.rmtree(dist_dir)\n\n session.install(\".[dev]\")\n session.run(\"flit\", \"build\")"
] | [
"0.59951997",
"0.5941467",
"0.58681095",
"0.5856485",
"0.57614243",
"0.5617354",
"0.5611247",
"0.5596623",
"0.558593",
"0.5584332",
"0.5543917",
"0.5542992",
"0.54703754",
"0.544997",
"0.544899",
"0.54458034",
"0.5438347",
"0.54342854",
"0.54192024",
"0.5414548",
"0.5387722",
"0.53791815",
"0.5356261",
"0.53389066",
"0.5333958",
"0.5315344",
"0.5293159",
"0.5242",
"0.5220701",
"0.5203907"
] | 0.7293197 | 0 |
Set the value. (And calls the base class) This will also check for Options to set the bools. FAULTS_ACTIVE FAULTS_CURRENT >>> BIT_FAULT_PROBE = 0 >>> BIT_FAULT_OVERTEMP = 1 >>> BIT_FAULT_PANEL_OPEN = 2 >>> BIT_FAULT_HIGH_VOLTAGE = 3 >>> BIT_FAULT_RAM_CRC = 4 >>> BIT_FAULT_EEPROM_CRC = 5 >>> BIT_FAULT_GPIO_ERROR = 6 >>> BIT_FAULT_LTFAULT_ERROR = 7 >>> BIT_FAULT_TRIGGER_ERROR = 8 >>> BIT_FAULT_HARDWARE_EXC = 9 >>> BIT_FAULT_TRIGGER_GLITCH = 10 >>> BIT_FAULT_OVERVOLTAGE = 11 >>> BIT_FAULT_TEMP_SENSOR = 12 | def set_value(self, item, value):
super(t_16_Bit_Options, self).set_value(item, value)
if(item == t_16_Bit_Options.FAULT_ACTIVE):
self.set_bools(value, self.faults_current, t_16_Bit_Options.BIT_FAULT_MAX )
if(item == t_16_Bit_Options.FAULT_LATCHED):
self.set_bools(value, self.faults_latched, t_16_Bit_Options.BIT_FAULT_MAX ) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set(self, value): # interface for BlueSky plans\n if str(value).lower() not in (\"fly\", \"taxi\", \"return\"):\n msg = \"value should be either Taxi, Fly, or Return.\"\n msg + \" received \" + str(value)\n raise ValueError(msg)\n\n if self.busy.value:\n raise RuntimeError(\"spin is operating\")\n\n status = DeviceStatus(self)\n \n def action():\n \"\"\"the real action of ``set()`` is here\"\"\"\n if str(value).lower() == \"taxi\":\n self.taxi()\n elif str(value).lower() == \"fly\":\n self.pre_fly()\n self.fly()\n self.post_fly()\n elif str(value).lower() == \"return\":\n self.motor.move(self.return_position)\n\n def run_and_wait():\n \"\"\"handle the ``action()`` in a thread\"\"\"\n self.busy.put(True)\n action()\n self.busy.put(False)\n status._finished(success=True)\n \n threading.Thread(target=run_and_wait, daemon=True).start()\n return status",
"def set_value(self, item, value):\n super(t_8_Bit_Options, self).set_value(item, value)\n\n if(item == t_8_Bit_Options.BOOLEAN_CONFIG_1):\n self.set_bools(value, self.bools, t_8_Bit_Options.BIT_MAX)",
"def set_overflow_status(self, value):\n TikCheckUtil.check_equality(\n get_soc_name(), ASCEND_910,\n \"this api doesn't support version: %s\" % get_soc_name())\n TikCheckUtil.check_type_match(\n value, int, \"value should be Int, \"\n \"invalid type: {}\".format(type(value)))\n TikCheckUtil.check_in_range(\n value, range(_MAX_OVERFLOW_STATUS),\n \"value should be 0 or 1, invalid value: {}\".format(value))\n with self.new_scope():\n self.emit(\n tvm.call_extern(\"uint64_t\", \"set_overflow\",\n type_convert(Expr(value, dtype=\"uint64\"))),\n ONE_IR)",
"def fault_debug(value: bool = False) -> None:",
"def set_flag(self, flag_name, value):\n flags = {'C':0, # Carry\n 'Z':1, # Zero\n 'I':2, # Interrupt mask\n 'D':3, # Decimal\n 'B':4, # Break\n 'V':6, # Overflow\n 'N':7} # Negative\n\n flag_reg = self.get_register('P')\n if value == 1:\n new_flag = flag_reg | 1 << flags[flag_name]\n else:\n new_flag = flag_reg & ~(1 << flags[flag_name])\n\n self.set_register('P', new_flag)",
"def _setEnumFeature(self, valueToSet):\n\n errorCode = VimbaDLL.featureEnumSet(self._handle,\n self._name,\n valueToSet)\n if errorCode != 0:\n raise VimbaException(errorCode)",
"def test_set_invalid_value(self):\n result = self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"set\",\n \"agent.agent_name\",\n \"true\",\n \"--type=bool\",\n ],\n standalone_mode=False,\n )\n assert result.exit_code == 1",
"def _setIntFeature(self, valueToSet):\n\n errorCode = VimbaDLL.featureIntSet(self._handle,\n self._name,\n valueToSet)\n if errorCode != 0:\n raise VimbaException(errorCode)",
"def setFlag(self, flag, value) -> None:\n ...",
"def _setBoolFeature(self, valueToSet):\n\n errorCode = VimbaDLL.featureBoolSet(self._handle,\n self._name,\n valueToSet)\n if errorCode != 0:\n raise VimbaException(errorCode)",
"def set_value (self):\n raise NotImplementedError",
"def value(self, value):\n if self.value == value: # case where we are setting at the same value\n return\n if (not self.has_data) or self.is_unknown or self.is_byte:\n if not ida_bytes.patch_byte(self.ea, value):\n raise RuntimeError(\"Unable to patch value: {}\".format(self))\n elif self.is_word:\n if not ida_bytes.patch_word(self.ea, value):\n raise RuntimeError(\"Unable to patch value: {}\".format(self))\n elif self.is_dword:\n if not ida_bytes.patch_dword(self.ea, value):\n raise RuntimeError(\"Unable to patch value: {}\".format(self))\n elif self.is_qword:\n if not ida_bytes.patch_qword(self.ea, value):\n raise RuntimeError(\"Unable to patch value: {}\".format(self))\n else:\n raise RuntimeError(\"Unable to patch value: {}\".format(self))",
"def fset(self, value):\n message = \"Overriding a constant value is an illegal operation: {0} = {1}.\".format(\n name.__name__,\n value)\n raise TypeError(message)",
"async def set_bit(self, instance, value):\n print(f\"Server: {'set_bit'} Got 'put' request from outside: new value is {value} and type {type(value)}\")\n if self.device is not None:\n self.device.set_bit_server(value)\n else:\n print('device is None')",
"def set_value(self, value):\n if self.value:\n raise ValueError(\"Already has a Value:\", self)\n\n self.value = value\n\n if self.value != 0:\n self.possible = None\n self.solved = True",
"def Set(self,value):\n if value:\n onoff = 0x01\n else:\n onoff = 0x00\n self.Bus.Write_uInt8(self.Address,0x20+self.Pin, onoff)",
"def set_n(self, value):\n\n # set the negative register if greater than 0x80\n self.p &= ~(const.FLAG_NEGATIVE)\n self.p |= const.FLAG_NEGATIVE if value >= 0x80 else 0b0",
"def set_state(self, value):\n _LOGGER.debug(\"%s: Set state to %d\", self.entity_id, value)\n self._flag_state = True\n\n params = {ATTR_ENTITY_ID: self.entity_id}\n if value == 0:\n if self.char_current_state.value != value:\n self.char_current_state.set_value(3)\n self.call_service(DOMAIN, SERVICE_OPEN_COVER, params)\n elif value == 1:\n if self.char_current_state.value != value:\n self.char_current_state.set_value(2)\n self.call_service(DOMAIN, SERVICE_CLOSE_COVER, params)",
"def setFlag(self, whichFlag, whichValue):\n \n try:\n if self.__debugOn == True:\n print(\"Flags in: %x\" %self.__flags)\n \n # Get temproary flag value that blanks out the flag.\n tFlag = (~whichFlag) & self.__flags\n \n # Set our flag to the given value.\n self.__flags = tFlag | whichValue\n \n if self.__debugOn == True:\n print(\"Flags out: %x\" %self.__flags)\n \n except:\n raise\n \n return",
"def set_test_expectations(self, security_flag, is_flaky,\n unsymbolized_crash_state):\n self.expected_security_flag = security_flag\n self.is_flaky = is_flaky\n self.expected_state = unsymbolized_crash_state",
"def set_heat(self, state: bool, value: int = 0):\r\n if state:\r\n self.msg_send_upr.data[0] = b\"\\x22\"[0]\r\n self.msg_send_upr.data[2:4] = value.to_bytes(2, \"little\")\r\n else:\r\n self.msg_send_upr.data[0] = b\"\\x23\"[0]\r\n self.send_and_flush(self.msg_send_upr)",
"def light(self, value: bool | int, /) -> None:",
"def test_set_fails_when_setting_non_primitive_type(self):\n with pytest.raises(\n ClickException, match=\"Attribute `behaviours` is not allowed to be updated!\"\n ):\n self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", \"skills.dummy.behaviours\", \"value\"],\n standalone_mode=False,\n catch_exceptions=False,\n )",
"def fungible(self, value):\n if value is not None:\n self._fungible = True if value else False",
"def eflags_set(self, bit: int, value: bool) -> None:\n if self.eflags_get(bit):\n if not value:\n self.eflags &= ~(1 << bit)\n else:\n if value:\n self.eflags |= 1 << bit",
"def set_wraperror(self, value: bool = True) -> None:\n self.WRAPERROR = tools.coerce_bool(value)",
"def set_power_management(value: int) -> None:",
"async def bit(self, instance, value):\n print(f\"Server: {'bit'} Got 'put' request from outside: new value is {value} and type {type(value)}\")\n if self.device is not None:\n self.device.set_bit_client(value)\n else:\n print('device is None')",
"def set_bitmask(self, value):\r\n self.__bitmask__ = value | 0xFF00",
"def _set_value(self, value, name, option):\r\n self.set_value(name, option, value)"
] | [
"0.6006613",
"0.59550226",
"0.5876505",
"0.5848868",
"0.58279836",
"0.57625586",
"0.57241833",
"0.57025355",
"0.5644544",
"0.56344664",
"0.5631761",
"0.55913144",
"0.55671185",
"0.551112",
"0.5497896",
"0.5429743",
"0.54183626",
"0.53717023",
"0.5368946",
"0.53565514",
"0.5347902",
"0.53451985",
"0.5303583",
"0.529641",
"0.5279937",
"0.52314514",
"0.5228706",
"0.52227205",
"0.5221943",
"0.52100074"
] | 0.7903865 | 0 |
Set the value. (And calls the base class) This will also check for Options to set the bools. BOOLEAN_CONFIG_1 >>> BIT_PROBE_TERMINATION = 0 >>> BIT_TMODE = 1 >>> BIT_EMODE = 2 >>> BIT_MUTE = 3 >>> BIT_PATTERN_TRIGGER = 4 >>> BIT_DEBUG_REALTIME = 5 >>> BIT_DEBUGPRINT = 6 >>> BIT_DEBUG_HW_OVERRIDE = 7 | def set_value(self, item, value):
super(t_8_Bit_Options, self).set_value(item, value)
if(item == t_8_Bit_Options.BOOLEAN_CONFIG_1):
self.set_bools(value, self.bools, t_8_Bit_Options.BIT_MAX) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setbool(self, strcommand, value):\n command = ct.c_wchar_p(strcommand)\n value = ct.c_bool(value)\n self.lib.AT_SetBool(self.AT_H, command, value)",
"def setBoolean(self, key, value):\n self.__config.setValue(key, QtCore.QVariant(value))\n self.__saved = False",
"def Set(self,value):\n if value:\n onoff = 0x01\n else:\n onoff = 0x00\n self.Bus.Write_uInt8(self.Address,0x20+self.Pin, onoff)",
"def setBoolValue(self, *args):\n return _libsbml.ConversionOption_setBoolValue(self, *args)",
"def setBooleanOption(self, option, value):\n result = self.__lib.voikkoSetBooleanOption(self.__handle, option, _boolToInt(value))\n if result == 0:\n raise VoikkoException(\"Could not set boolean option %s to value %s\" % (option, value))",
"def set_bool_value(self, event):\n\n self.undo_add()\n\n key_list = list(self.patch.engine.misc_data.keys())\n key = key_list[self.selected_index]\n data = self.patch.engine.misc_data[key]\n\n if self.ValueEnabled.GetValue():\n self.patch.misc[key] = data['on']\n else:\n self.patch.misc[key] = data['off']\n\n self.is_modified(True)\n self.misclist_update_row(self.selected_index)",
"def _setBoolFeature(self, valueToSet):\n\n errorCode = VimbaDLL.featureBoolSet(self._handle,\n self._name,\n valueToSet)\n if errorCode != 0:\n raise VimbaException(errorCode)",
"def setBoolValue(self, *args):\n return _libsbml.ConversionProperties_setBoolValue(self, *args)",
"def _writeBool(self, val):\n self.__writeValue(self.boolFormat, val)",
"def set_bools(self, value, bools, limit):\n for x in range(limit):\n if value & 1 << x:\n bools[x]['value'] = True\n else:\n bools[x]['value'] = False\n pass",
"def change_setting(self, key, val):\n if isinstance(val, bool):\n payload = 'on' if val else 'off'\n else:\n payload = val\n return self._request('post',\n 'fifo_command.php?cmd={}%20{}'.format(key,\n payload))",
"def set_value(self, item, value):\n super(t_16_Bit_Options, self).set_value(item, value)\n\n if(item == t_16_Bit_Options.FAULT_ACTIVE):\n self.set_bools(value, self.faults_current, t_16_Bit_Options.BIT_FAULT_MAX )\n\n if(item == t_16_Bit_Options.FAULT_LATCHED):\n self.set_bools(value, self.faults_latched, t_16_Bit_Options.BIT_FAULT_MAX )",
"def _set_bool(name, value, context):\n if name in os.environ:\n envval = os.environ.get(name).lower()\n if envval in [\"1\", \"true\", \"y\", \"yes\"]:\n context[name] = True\n elif envval in [\"0\", \"false\", \"n\", \"no\"]:\n context[name] = False\n else:\n raise ValueError(f\"{name} is a boolean, cannot match '{os.environ[name]}'\")\n\n _set_default(name, value, context)",
"def set_gateway(self, bool_value):\n self.chkbtn_gateway.set(bool_value)",
"def set(self, boolean):\n self._val = boolean",
"def set(self, attr, value=True):\n if type(value) == bool:\n self.__dict__['_'+attr] = value\n print attr, \"set to\", value\n else:\n print 'Value must be a bool, either \"True\" or \"False\" (no quotes)!'",
"def setBit(self,i,boolval):\n self.boolVals[i]=boolval",
"async def set_bit(self, instance, value):\n print(f\"Server: {'set_bit'} Got 'put' request from outside: new value is {value} and type {type(value)}\")\n if self.device is not None:\n self.device.set_bit_server(value)\n else:\n print('device is None')",
"def set_parameter(self, name, value = None):\n set_option = False\n for parameter in self.parameters:\n if name in parameter.names:\n if isinstance(parameter, _Switch):\n if value is None:\n import warnings\n warnings.warn(\"For a switch type argument like %s, \"\n \"we expect a boolean. None is treated \"\n \"as FALSE!\" % parameter.names[-1])\n parameter.is_set = bool(value)\n set_option = True\n else:\n if value is not None:\n self._check_value(value, name, parameter.checker_function)\n parameter.value = value\n parameter.is_set = True\n set_option = True\n if not set_option:\n raise ValueError(\"Option name %s was not found.\" % name)",
"def CONST_BOOL(self, t):\n t.value = False if t.value == '#false' else True\n return t",
"def set_flag(self, flag_name, value):\n flags = {'C':0, # Carry\n 'Z':1, # Zero\n 'I':2, # Interrupt mask\n 'D':3, # Decimal\n 'B':4, # Break\n 'V':6, # Overflow\n 'N':7} # Negative\n\n flag_reg = self.get_register('P')\n if value == 1:\n new_flag = flag_reg | 1 << flags[flag_name]\n else:\n new_flag = flag_reg & ~(1 << flags[flag_name])\n\n self.set_register('P', new_flag)",
"def writeBoolean(self, value: bool):\n self.writeByte(1 if value else 0)",
"def Set(self,value):\n self.Bus.Write_uInt8(self.Address,0x50+self.Pin,value)",
"def setFlag(self, flag, value) -> None:\n ...",
"def set_config_value(self, value, index=None):",
"def write(writer: BitStreamWriter, value: bool) -> None:\n\n writer.writeBool(value)",
"def test_set_boolean(self):\n setting_name = 'project_bool_setting'\n url = reverse(\n 'projectroles:api_project_setting_set',\n kwargs={'project': self.project.sodar_uuid},\n )\n post_data = {\n 'app_name': EX_APP_NAME,\n 'setting_name': setting_name,\n 'value': True,\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 200, msg=response.content)\n obj = AppSetting.objects.get(name=setting_name, project=self.project)\n self.assertEqual(obj.get_value(), True)",
"def set_simple(value):\r\n LogOptions._SIMPLE = bool(value)",
"def device_set_property_bool(pnd, property, bEnable):\n return _nfc.device_set_property_bool(pnd, property, bEnable)",
"def test_set_type_bool(self):\n result = self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"set\",\n \"agent.logging_config.disable_existing_loggers\",\n \"true\",\n \"--type=bool\",\n ],\n standalone_mode=False,\n catch_exceptions=False,\n )\n assert result.exit_code == 0"
] | [
"0.69975346",
"0.66105807",
"0.6431443",
"0.641337",
"0.63824916",
"0.63277537",
"0.6307116",
"0.6288704",
"0.6275827",
"0.6273386",
"0.62682843",
"0.6259129",
"0.6229773",
"0.6222815",
"0.6199952",
"0.615967",
"0.6145829",
"0.61051655",
"0.6090899",
"0.6050174",
"0.6049057",
"0.6039881",
"0.6026596",
"0.5985286",
"0.5979112",
"0.5961897",
"0.59396976",
"0.59132636",
"0.5895424",
"0.58844036"
] | 0.7702449 | 0 |
Builds a command packet | def build_command_packet(self, command):
packet = bytearray()
# All option fields are 0
packet.append(0)
packet.append(0)
packet.append(0)
packet.append(command)
return packet | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _build_command(self, command_name, hardware_address = '', comp_var_dict = None):\n # Start command adn set name\n command = \"<Command><Name>{command_name}</Name>\".format(command_name=command_name)\n\n if hardware_address:\n command += \"<DeviceDetails><HardwareAddress>{hardware_address}</HardwareAddress></DeviceDetails>\".format(hardware_address=hardware_address)\n\n if comp_var_dict is not None:\n comp_keys = comp_var_dict.keys()\n if len(comp_keys) > 0:\n for comp_key in comp_keys:\n # Build requested variable list\n command += \"<Components><Component><Name>{comp_key}</Name><Variables>\".format(comp_key=comp_key)\n variables = comp_var_dict[comp_key]\n for var in variables:\n command += \"<Variable><Name>{var}</Name></Variable>\".format(var=var)\n command += \"</Variables></Component></Components>\"\n else:\n # Request all variables from all components\n command += \"<Components><All>Y</All></Components>\"\n\n # Close command\n command += \"</Command>\"\n \n return command",
"def _build_command(self, cmd, unit):\n return '#' + unit + cmd + NEWLINE",
"def _build_send_optode_command(self, cmd, command):\n return \"%s=%s%s\" % (cmd, command, self._newline)",
"def _build_setup_command(self, cmd, unit):\n # use defaults - in the future, may consider making some of these parameters\n # byte 0\n channel_address = unit\n # byte 1\n line_feed = self._param_dict.format(Parameter.LINEFEED)\n parity_type = self._param_dict.format(Parameter.PARITY_TYPE)\n parity_enable = self._param_dict.format(Parameter.PARITY_ENABLE)\n extended_addressing = self._param_dict.format(Parameter.EXTENDED_ADDRESSING)\n baud_rate = self._param_dict.format(Parameter.BAUD_RATE)\n baud_rate = getattr(BaudRate, 'BAUD_%d' % baud_rate, BaudRate.BAUD_9600)\n # byte 2\n alarm_enable = self._param_dict.format(Parameter.ALARM_ENABLE)\n low_alarm_latch = self._param_dict.format(Parameter.LOW_ALARM_LATCH)\n high_alarm_latch = self._param_dict.format(Parameter.HIGH_ALARM_LATCH)\n rtd_wire = self._param_dict.format(Parameter.RTD_4_WIRE)\n temp_units = self._param_dict.format(Parameter.TEMP_UNITS)\n echo = self._param_dict.format(Parameter.ECHO)\n delay_units = self._param_dict.format(Parameter.COMMUNICATION_DELAY)\n # byte 3\n precision = self._param_dict.format(Parameter.PRECISION)\n precision = getattr(UnitPrecision, 'DIGITS_%d' % precision, UnitPrecision.DIGITS_6)\n large_signal_filter_constant = self._param_dict.format(Parameter.LARGE_SIGNAL_FILTER_C)\n large_signal_filter_constant = filter_enum(large_signal_filter_constant)\n small_signal_filter_constant = self._param_dict.format(Parameter.SMALL_SIGNAL_FILTER_C)\n small_signal_filter_constant = filter_enum(small_signal_filter_constant)\n\n # # Factory default: 0x31070182\n # # Lab default: 0x310214C2\n\n byte_0 = int(channel_address.encode(\"hex\"), 16)\n log.debug('byte 0: %s', byte_0)\n byte_1 = \\\n (line_feed << 7) + \\\n (parity_type << 6) + \\\n (parity_enable << 5) + \\\n (extended_addressing << 4) + \\\n baud_rate\n log.debug('byte 1: %s', byte_1)\n byte_2 = \\\n (alarm_enable << 7) + \\\n (low_alarm_latch << 6) + \\\n (high_alarm_latch << 5) + \\\n (rtd_wire << 4) + \\\n (temp_units << 3) + \\\n (echo << 2) + \\\n delay_units\n log.debug('byte 2: %s', byte_2)\n byte_3 = \\\n (precision << 6) + \\\n (large_signal_filter_constant << 3) + \\\n small_signal_filter_constant\n log.debug('byte 3: %s', byte_3)\n\n setup_command = '#%sSU%02x%02x%02x%02x' % (unit[0], byte_0, byte_1, byte_2, byte_3) + NEWLINE\n log.debug('default setup command (%r) for unit %02x (%s)' % (setup_command, byte_0, unit[0]))\n return setup_command",
"def buildCmd( tcmpCmd, cmd, target, sequence, fieldList):\n cmdList = [tcmpCmd, cmd, target, sequence, fieldList]\n\n return \"<{cmd}>\".format(cmd=\":\".join(cmdList))",
"def init_cmd( cmd_num=0):\n if cmd_num in [12,16,2,4,9,10,13,17,18,24]:\n log.warning(\"Command %d is not supported on SDIO, sending anyway but what are you doing?!\" %cmd_num)\n\n cmd = BinaryValue(bits=48,bigEndian=False)\n cmd[47] = 0 # Start value\n cmd[46] = 1 # Direction , 1 = towards device, 0 = towards host\n cmd[45:40] = BinaryValue(value=cmd_num, bits=6, bigEndian=False).integer\n cmd[0] = 1 # Stop bit\n return cmd",
"def build_message(cmd, data):\r\n\tif len(cmd) > CMD_FIELD_LENGTH or len(data) > MAX_DATA_LENGTH:\r\n\t\treturn None\r\n\tfull_cmd = cmd + \" \"*(CMD_FIELD_LENGTH-len(cmd))\r\n\tdata_len = str(len(data))\r\n\tfull_data_len = \"0\"*(LENGTH_FIELD_LENGTH-len(data_len))+data_len\r\n\tfull_msg = DELIMITER.join([full_cmd, full_data_len, data])\r\n\treturn full_msg",
"def _pack(self):\n\n opt = 0\n if self.notify:\n opt = opt | CQC_OPT_NOTIFY\n if self.block:\n opt = opt | CQC_OPT_BLOCK\n if self.action:\n opt = opt | CQC_OPT_ACTION\n\n cmdH = struct.pack(self.PACKAGING_FORMAT, self.qubit_id, self.instr, opt)\n return cmdH",
"def build_command_depricated(device_dict, command_tuple):\n command = \" \" # The final command which should be send in the end\n return_list = [] # Is list of commands which can be returned if need be\n only_command = False # Flag if only a command was passed, important if such a command doesnt need syntax!\n\n if (\n type(command_tuple) == type(u\"Unicode\")\n or type(command_tuple) == str\n or type(command_tuple) == float\n or type(command_tuple) == int\n ):\n command_tuple = (str(command_tuple), \"\") # so only tuple are now prevelent\n only_command = True\n elif type(command_tuple[1]) == list:\n command_tuple = (\n command_tuple[0],\n [str(x) for x in command_tuple[1]],\n ) # so no unicode is present\n\n # Preparations\n # look for a syntax (paranteses and so on)\n if \"syntax\" in device_dict:\n syntax = str(device_dict[\"syntax\"])\n syntax = syntax.split(\"###\")\n if not syntax[0]:\n syntax = [\"\", \"\"] # Most devices have no paranteses or whatsoever\n else:\n syntax = [\"\", \"\"] # Most devices have no paranteses or whatsoever\n\n # Looks if a separator is needed to sepatare mulitple orders\n if \"separator\" in device_dict:\n sepa = str(device_dict[\"separator\"])\n else:\n sepa = \" \" # This should be the standard for most devices\n\n if command_tuple[0] in device_dict:\n # here all the magic happens\n # First look if the order is swichted or not (command value, or value command)\n\n # Check if multiple commands so list or so\n if type(device_dict[command_tuple[0]]) == str or type(\n device_dict[command_tuple[0]]\n ) == type(u\"Unicode\"):\n command_list = [device_dict[command_tuple[0]]]\n else:\n command_list = device_dict[command_tuple[0]]\n\n for command_item in command_list:\n command_item = str(command_item)\n command = \"\"\n\n # Value -> Command\n if int(device_dict.get(\"command_order\", 1)) == -1:\n # Now look if a csv structure is necessary for the command to work\n start_ind = command_tuple[0].find(\n \"_\"\n ) # finds the index of the command, to search for\n if (\n \"CSV\" + command_tuple[0][start_ind:] in device_dict\n ): # looks if an actual csv-command is there\n # Todo: test CSV command\n csv_commands = device_dict[\n \"CSV\" + str(command_tuple[0])[start_ind:]\n ]\n csv_commands = (\n csv_commands.strip()\n .strip(\"(\")\n .strip(\")\")\n .strip(\"[\")\n .strip(\"]\")\n .strip()\n ) # get rid of some caracters which should not be there\n csv_commands = csv_commands.split(\n \",\"\n ) # now split it for easy access\n\n # Make sure you always got a list of the next commandblock will fail\n if (\n type(command_tuple[1]) == list\n or type(command_tuple[1]) == tuple\n ):\n value_list = command_tuple[1]\n elif type(command_tuple[1]) == str or type(command_tuple) == type(\n u\"Unicode\"\n ):\n value_list = (\n command_tuple[1]\n .strip()\n .strip(\"(\")\n .strip(\")\")\n .strip(\"[\")\n .strip(\"]\")\n .strip()\n .replace(\" \", \"\")\n )\n value_list = value_list.split(\",\")\n\n csv_list = (\n \",\".join(map(str, value_list))\n .strip()\n .strip(\"(\")\n .strip(\")\")\n .strip(\"[\")\n .strip(\"]\")\n .strip()\n )\n csv_list = csv_list.split(\",\")\n\n for i, com in enumerate(csv_list):\n # here the input will be checked if enough parameters are passed for this command.\n # If not a 0 will be entered and a warning will be printed\n command += str(csv_list[i]).strip() + sepa\n\n if i + 1 < len(csv_commands) and len(csv_commands) > 1:\n for j in range(\n i + 1, len(csv_commands)\n ): # Fill the rest of the missing paramters\n l.error(\n \"Warning: Not enough parameters passed for function: \"\n + str(command_item)\n + \" the command must consist of \"\n + str(csv_commands)\n + \" '\"\n + str(csv_commands[j])\n + \"' is missing! Inserted 0 instead.\"\n )\n command += \"0\" + sepa\n\n command = command.strip(\" \").strip(\",\") # to get rid of last comma\n\n else: # So if no CSV was found for this command, just build the command with the value and the separator\n # First check if a List is present or so\n if (\n type(command_tuple[1]) == list\n or type(command_tuple[1]) == tuple\n ):\n string = \"\"\n for item in command_tuple[1]:\n command = syntax[1] + str(item) + \" \" + command_item\n command = command.strip()\n # Add a command terminator if one is needed and the last part of the syntax\n command += device_dict.get(\"execution_terminator\", \"\")\n return_list.append(command)\n return return_list\n\n else: # If only a command was passed\n string = str(command_tuple[1])\n command += syntax[1] + str(string).strip()\n\n if (\n only_command\n and device_dict.get(\"no_syntax_with_single_commmand\", False)\n and syntax[1] != \" \"\n and syntax[0] != \" \"\n ):\n command = command.replace(syntax[1], \"\")\n command = command.replace(syntax[0], \"\")\n\n # command += \" \" + str(device_dict[str(command_item)]).strip() + syntax[0] # adds the order to the command\n command += (\n \" \" + str(command_item).strip() + syntax[0]\n ) # adds the order to the command\n # Add a command terminator if one is needed and the last part of the syntax\n command = command.strip()\n command += device_dict.get(\"execution_terminator\", \"\")\n # command += syntax[0] # adds the order to the command\n return_list.append(command)\n\n # Command -> Value\n else:\n command += (\n str(command_item).strip() + \" \" + syntax[0]\n ) # adds the order to the command\n\n # Now look if a csv structure is necessary for the command to work\n start_ind = command_tuple[0].find(\n \"_\"\n ) # finds the index of the command, to search for\n if (\n \"CSV\" + command_tuple[0][start_ind:] in device_dict\n ): # looks if an actual csv-command is there\n # Todo: test CSV command\n csv_commands = device_dict[\n \"CSV\" + str(command_tuple[0])[start_ind:]\n ]\n csv_commands = (\n csv_commands.strip()\n .strip(\"(\")\n .strip(\")\")\n .strip(\"[\")\n .strip(\"]\")\n .strip()\n ) # get rid of some caracters which should not be there\n csv_commands = csv_commands.split(\n \",\"\n ) # now split it for easy access\n\n # Make sure you always got a list of the next commandblock will fail\n if (\n type(command_tuple[1]) == list\n or type(command_tuple[1]) == tuple\n ):\n value_list = command_tuple[1]\n elif type(command_tuple[1]) == str or type(command_tuple) == type(\n u\"Unicode\"\n ):\n value_list = (\n command_tuple[1]\n .strip()\n .strip(\"(\")\n .strip(\")\")\n .strip(\"[\")\n .strip(\"]\")\n .strip()\n .replace(\" \", \"\")\n )\n value_list = value_list.split(\",\")\n\n csv_list = (\n \",\".join(map(str, value_list))\n .strip()\n .strip(\"(\")\n .strip(\")\")\n .strip(\"[\")\n .strip(\"]\")\n .strip()\n )\n csv_list = csv_list.split(\",\")\n\n for i, com in enumerate(csv_list):\n # here the input will be checked if enough parameters are passed for this command.\n # If not a 0 will be entered and a warning will be printed\n command += str(csv_list[i]).strip() + sepa + \" \"\n\n if i + 1 < len(csv_commands) and len(csv_commands) > 1:\n for j in range(\n i + 1, len(csv_commands)\n ): # Fill the rest of the missing paramters\n l.warning(\n \"Not enough parameters passed for function: \"\n + str(command_tuple[0])\n + \" the command must consist of \"\n + str(csv_commands)\n + \" '\"\n + str(csv_commands[j])\n + \"' is missing! Inserted 0 instead.\"\n )\n command += \" \" + \"0\" + sepa\n\n command = command.strip(\" \").strip(\n \",\"\n ) # to get rid of last comma and space at the end if csv\n command += syntax[1]\n\n else: # So if no CSV was found for this command, just build the command with the value and the separator\n # First check if a List is present or so\n if (\n type(command_tuple[1]) == list\n or type(command_tuple[1]) == tuple\n ):\n string = \"\"\n for item in command_tuple[1]:\n command = str(item) + \" \" + command_item + syntax[1]\n command = command.strip()\n # Add a command terminator if one is needed and the last part of the syntax\n command += device_dict.get(\"execution_terminator\", \"\")\n return_list.append(command)\n return return_list\n\n else: # If its just one value or no value\n string = str(command_tuple[1])\n command += string.strip() + syntax[1]\n command = command.strip()\n\n if (\n only_command\n and device_dict.get(\"no_syntax_with_single_commmand\", False)\n and syntax[1] != \" \"\n and syntax[0] != \" \"\n ):\n command = command.replace(syntax[1], \"\")\n command = command.replace(syntax[0], \"\")\n\n # Add a command terminator if one is needed and the last part of the syntax\n command += device_dict.get(\"execution_terminator\", \"\")\n return_list.append(command.strip())\n else:\n # If the command is not found in the device only command tuple will be send\n l.error(\n \"Command \"\n + str(command_tuple[0])\n + \" was not found in device! Unpredictable behavior may happen. No commad build!\"\n )\n return \"\"\n\n # Add a command terminator if one is needed and the last part of the syntax\n # command += device_dict.get(\"execution_terminator\",\"\")\n\n # Todo: multiple commands return\n if len(return_list) > 1:\n return return_list\n else:\n return str(return_list[0])",
"def _pack(self):\n header = struct.pack(self.PACKAGING_FORMAT, self.cmd_length)\n return header",
"def __build_command_string(self, cmd):\n cmd_string = cmd.command\n\n # if we know the number of frames that this command returns,\n # only wait for exactly that number. This avoids some harsh\n # timeouts from the ELM, thus speeding up queries.\n\n\n return cmd_string",
"def _gen_cmd(cmd, address):\n family = {4: 'inet', 6: 'inet6'}[address[0].version]\n args = ['addr', cmd, '%s/%s' % (address[0], address[1])]\n if family == 'inet' and cmd == 'add':\n args += ['brd', '+']\n args += ['dev', real_ifname]\n if family == 'inet6':\n args = ['-6'] + args\n return args",
"def _create_packet(self, request):\n\n data_len = struct.pack('<Q', len(request))\n packet = b'ZBXD\\x01' + data_len + request\n\n def ord23(x):\n if not isinstance(x, int):\n return ord(x)\n else:\n return x\n\n logger.debug('Packet [str]: %s', packet)\n logger.debug('Packet [hex]: %s', ':'.join(hex(ord23(x))[2:] for x in packet))\n return packet",
"def make_packet(self, type, data): \n return (\"{}\\x00{}\\x00{}\".format(type, data, self.ID)).encode()",
"def _build_menu_command(self, cmd):\n if COMMAND_CHAR[cmd]:\n return COMMAND_CHAR[cmd]+self._newline\n else:\n raise InstrumentProtocolException(\"Unknown command character for %s\" % cmd)",
"def _build_robovac_command(mode, command):\n mcu_ota_header_0xa5 = 0xA5\n cmd_data = (mode.value + command.value)\n\n return bytes([mcu_ota_header_0xa5, mode.value, command.value, cmd_data, 0xFA])",
"def buildCommand(self, player, game, json):",
"def build_command(device, command_tuple, single_commands=False):\n if isinstance(command_tuple, (str)):\n command_tuple = (command_tuple, \"\") # make da dummy command\n\n if command_tuple[0] in device:\n\n if isinstance(device[command_tuple[0]], dict):\n try:\n com = device[command_tuple[0]][\"command\"]\n except:\n l.error(\n \"Dict command structure recognised but no actual command found for passed order {}\".format(\n command_tuple\n ),\n exc_info=True,\n )\n return None\n else:\n com = device[command_tuple[0]]\n\n if isinstance(command_tuple[1], (str, float, int)):\n try:\n return com.format(command_tuple[1])\n except IndexError:\n l.error(\n \"You attempted to send a command with the wrong number of parameters the command structure is: {}\"\n \" but you passed: [{}] as parameter(s)\".format(\n com, command_tuple[1]\n ),\n exc_info=True,\n )\n\n elif single_commands:\n if isinstance(command_tuple[1], list) or isinstance(\n command_tuple[1], tuple\n ):\n return [com.format(single) for single in command_tuple[1]]\n else:\n l.error(\"In order to build a list command, a list has to be passed!\")\n return None\n\n elif isinstance(command_tuple[1], list) or isinstance(command_tuple[1], tuple):\n # Find occurance of {} in string if list is as long as occurance of {} then just pass otherwise join a string\n brackets_count = device[command_tuple[0]].count(\"{}\")\n if len(command_tuple[1]) == brackets_count:\n return com.format(*command_tuple[1])\n elif brackets_count == 1 and len(command_tuple[1]) > brackets_count:\n sep = device.get(\"separator\", \" \")\n return com.format(sep.join([str(x) for x in command_tuple[1]]))\n elif (\n len(command_tuple[1]) > brackets_count\n or len(command_tuple[1]) < brackets_count\n and brackets_count != 1\n ):\n l.error(\n \"Could not build command for input length {}\"\n \" and input parameters length {}. Input parameters must be of same length\"\n \" as defined in config or 1\".format(\n len(command_tuple[1]), brackets_count\n )\n )\n return None\n else:\n l.error(\n \"Could not find command {} in command list of device: {}\".format(\n command_tuple[0], device[\"Device_name\"]\n )\n )",
"def build(self, origin, token, args):\r\n # If the last argument is \"long\", package it for sending\r\n if len(args) > 0:\r\n if args[-1].find(\" \") > -1:\r\n build_last_arg = \":\" + args[-1]\r\n build_args = args[0:-1] + build_last_arg.split(\" \")\r\n else:\r\n build_args = args\r\n else:\r\n build_args = []\r\n # Build the line\r\n # Future compatibility - only send \\n\r\n ret = create_numeric(origin) + \" \" + token + \" \" \\\r\n + \" \".join(build_args) + \"\\n\"\r\n \r\n # Check we're not sending things which are protocol violations\r\n if len(ret) > 512:\r\n raise ProtocolError('Line too long to send')\r\n if not token.isupper() and not token.isdigit():\r\n raise ProtocolError('Command not in uppercase during build')\r\n \r\n return ret",
"def genCommand(self,char, command): \n\t\t\n\t\tif char == 'a':\n\t\t\tcommand = outputMsg.Robotiq2FGripper_robot_output();\n\t\t\tcommand.rACT = 1\n\t\t\tcommand.rGTO = 1\n\t\t\tcommand.rSP = 255\n\t\t\tcommand.rFR = 150\n\n\t\tif char == 'r':\n\t\t\tcommand = outputMsg.Robotiq2FGripper_robot_output();\n\t\t\tcommand.rACT = 0\n\n\t\tif char == 'c':\n\t\t\tcommand.rPR = 255\n\n\t\tif char == 'o':\n\t\t\tcommand.rPR = 0 \n\n\t\t#If the command entered is a int, assign this value to rPRA\n\t\ttry: \n\t\t\tcommand.rPR = int(char)\n\t\t\tif command.rPR > 255:\n\t\t\t\tcommand.rPR = 255\n\t\t\tif command.rPR < 0:\n\t\t\t\tcommand.rPR = 0\n\t\texcept ValueError:\n\t\t\tpass \n\t\t\t\n\t\tif char == 'f':\n\t\t\tcommand.rSP += 25\n\t\t\tif command.rSP > 255:\n\t\t\t\tcommand.rSP = 255\n\t\t\t\t\n\t\tif char == 'l':\n\t\t\tcommand.rSP -= 25\n\t\t\tif command.rSP < 0:\n\t\t\t\tcommand.rSP = 0\n\n\t\t\t\t\n\t\tif char == 'i':\n\t\t\tcommand.rFR += 25\n\t\t\tif command.rFR > 255:\n\t\t\t\tcommand.rFR = 255\n\t\t\t\t\n\t\tif char == 'd':\n\t\t\tcommand.rFR -= 25\n\t\t\tif command.rFR < 0:\n\t\t\t\tcommand.rFR = 0\n\n\t\treturn command",
"def _command(self, servo_id, instruction, *params):\n length = 3 + len(params)\n #print('length', length)\n \"\"\"\n checksum calculation:\n checksum = ~(ID + length+instruction+parms) if the numbers in the brackets\n are calculated and exceeded 255, then it takes the lowest one byte, \"~\"\n means Negation\n \"\"\"\n checksum = 255 - ((servo_id + length + instruction + sum(params))% 256)\n #print('checksum', checksum)\n packet = [0x55, 0x55, servo_id, length, instruction, *params, checksum]\n #print('packet', packet)\n self._serial.write(bytearray(packet))\n #print('Sending packet', packet)",
"def _buildCmd(self, cmd, cmdArg=0x00):\n res = [cmd, cmdArg]\n if self.USE_SUFFIX:\n return res + [self.CMD_SUFFIX]\n return res",
"def make_command(self):\n # self.add_root_bucket()\n\n stringa = \"tc qdisc add dev \" + self.__interface + \" root netem \"\n stringa += \"delay \" + self.latency['latency'] + \"ms \" + self.latency['jitter'] + \"ms \" + self.latency[\n 'correlation'] + \"% distribution \" + self.latency['distribution']\n stringa += \" loss \" + self.drop['probability'].__str__() + \"% \" + self.drop['correlation'].__str__() + \"%\"\n stringa += \" corrupt \" + self.corrupt['probability'].__str__() + \"% duplicate \" + \\\n self.duplicate['probability'].__str__() + \"%\"\n\n cmd = shlex.split(stringa)\n proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n try:\n o, e = proc.communicate(timeout=1)\n except subprocess.TimeoutExpired:\n proc.kill()\n raise RuntimeWarning(\"Old configuration not eliminated\")\n\n if e.decode('ascii') != \"\":\n if proc.returncode == 2:\n raise RuntimeWarning(e.decode('ascii') + \"\\nUsing stale configuration, wipe the old settings\")\n return str(proc.returncode)",
"def buildCommandModel ( switchSpecs, posSpecs ):\n\n #-- 1 --\n result = []\n\n #-- 2 --\n # [ result +:= strings representing the options in switchSpecs ]\n for switch in switchSpecs:\n result.append ( \"-%s\" % switch.letter )\n\n #-- 3 --\n # [ result +:= strings representing the keys in posSpecs ]\n for pos in posSpecs:\n if pos.optional:\n result.append ( \"[%s]\" % pos.key )\n else:\n result.append ( pos.key )\n if pos.repeated:\n result.append ( \"...\" )\n\n #-- 4 --\n # [ return the concatenation of the strings in result with single\n # spaces between them ]\n return \" \".join ( result )",
"def _build_simple_command(self, cmd):\n return cmd+SBE37_NEWLINE",
"def __init__(self, command=None, data_length=0, data=[]):\n if command is not None:\n self.command = command\n self.data_length = data_length\n self.data = data\n self.encode()\n else:\n self.message_length = 0\n self.command = 0\n self.data_length = 0\n self.data = []\n self.string = \"\"",
"def _build_command_prelude(class_number, verb):\n return struct.pack(\"<II\", class_number, verb)",
"def makePacket(self,dhash,index,val=None):\n msg = STX + self.addr + CMD\n if val is None:\n msgtype = DATA_READ\n else:\n msgtype = DATA_WRITE\n msg += msgtype\n payload = dhash + chr(index)\n if val is not None:\n payload += struct.pack('>I',val)\n table = {STX : ESC_STX, CR : ESC_CR, ESC : ESC_ESC}\n for i in range(len(payload)):\n if payload[i] in table:\n msg += ESC\n msg += table[payload[i]]\n else:\n msg += payload[i]\n cksum = self.checksum(self.addr+CMD+msgtype+payload)\n msg += cksum\n msg += CR\n return msg",
"def genCommand(char, command): \n \n if char == 'a':\n command = SModelRobotOutput();\n command.rACT = 1\n command.rGTO = 1\n command.rSPA = 255\n command.rFRA = 150\n\n if char == 'r':\n command = SModelRobotOutput();\n command.rACT = 0\n\n if char == 'c':\n command.rPRA = 255\n\n if char == 'o':\n command.rPRA = 0\n\n if char == 'b':\n command.rMOD = 0\n \n if char == 'p':\n command.rMOD = 1\n \n if char == 'w':\n command.rMOD = 2\n \n if char == 's':\n command.rMOD = 3\n\n #If the command entered is a int, assign this value to rPRA\n try: \n command.rPRA = int(char)\n if command.rPRA > 255:\n command.rPRA = 255\n if command.rPRA < 0:\n command.rPRA = 0\n except ValueError:\n pass \n \n if char == 'f':\n command.rSPA += 25\n if command.rSPA > 255:\n command.rSPA = 255\n \n if char == 'l':\n command.rSPA -= 25\n if command.rSPA < 0:\n command.rSPA = 0\n\n \n if char == 'i':\n command.rFRA += 25\n if command.rFRA > 255:\n command.rFRA = 255\n \n if char == 'd':\n command.rFRA -= 25\n if command.rFRA < 0:\n command.rFRA = 0\n\n return command",
"def packSimulationCommand(self, lstCommand):\n szCommand = \"\"\n szCommand = struct.pack('16si', lstCommand[0], lstCommand[1])\n szCommand += struct.pack('i', len(lstCommand[2:]))\n for i in lstCommand[2:]:\n szCommand += struct.pack('16s', i[0])\n szCommand += struct.pack('i', len(i[1:]))\n for j in i[1:]:\n szCommand += struct.pack('i', j)\n\n return szCommand"
] | [
"0.7055916",
"0.6958671",
"0.6834779",
"0.6826811",
"0.66905415",
"0.6657784",
"0.66384405",
"0.65551907",
"0.64848095",
"0.6423199",
"0.641272",
"0.6404379",
"0.63837487",
"0.6381479",
"0.6360029",
"0.6312353",
"0.62964827",
"0.62884474",
"0.6237116",
"0.61567235",
"0.6149468",
"0.61375535",
"0.6101278",
"0.6091492",
"0.6086016",
"0.6063523",
"0.6063219",
"0.6038263",
"0.60172164",
"0.60132384"
] | 0.8249946 | 0 |
This will get the current faults on the system. | def get_faults_current(self):
request = self.get_option_from_shouter([t_16_Bit_Options.FAULT_ACTIVE], BP_TOOL.REQUEST_16)
return self.__get_faults_list(self.config_16.faults_current) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def faults(self):\n debug(\"Getting faults...\")\n code = int(\"01001000\",2)\n command = pack('B',code)\n reply = self.query(command,count=2)\n faults = \" \"\n # The reply is 0xC8 followed by a faults status byte.\n if len(reply) != 2:\n if len(reply)>0:\n warn(\"%r: expecting 2-byte reply, got %r\" % (command,reply))\n elif self.connected:\n warn(\"%r: expecting 2-byte reply, got no reply\" % command)\n else:\n reply_code,bits = unpack('<BB',reply)\n if reply_code != code:\n warn(\"reply %r: expecting 0x%X(%s), got 0x%X(%s)\" %\n (reply,code,bin(code),reply_code,bin(reply_code)))\n else:\n fault_names = {0:\"Tank Level Low\",2:\"Temperature above alarm range\",\n 4:\"RTD Fault\",5:\"Pump Fault\",7:\"Temperature below alarm range\"}\n faults = \"\"\n for i in range(0,8):\n if (bits >> i) & 1:\n if i in fault_names: faults += fault_names[i]+\", \"\n else: faults += str(i)+\", \"\n faults = faults.strip(\", \")\n if faults == \"\": faults = \"none\"\n debug(\"Faults %s\" % faults)\n return faults",
"def get_faults_latched(self):\n request = self.get_option_from_shouter([t_16_Bit_Options.FAULT_LATCHED], BP_TOOL.REQUEST_16)\n return self.__get_faults_list(self.config_16.faults_latched)",
"def fault_counters(self):\n done, data = self._request('GF')\n if done:\n return {\n 'GFI self test': int(data[0], 16),\n 'Ground': int(data[1], 16),\n 'Stuck relay': int(data[2], 16)\n }\n\n raise EvseError",
"def __get_faults_list(self, faults):\n r_faults = []\n for x in faults:\n if faults[x]['value']:\n r_faults.append(faults[x]['name'])\n return r_faults",
"def _extend_fault_map(self):\n faults.FAULT_MAP.update({nsx_lib_exc.ManagerError:\n webob.exc.HTTPBadRequest,\n nsx_lib_exc.ServiceClusterUnavailable:\n webob.exc.HTTPServiceUnavailable,\n nsx_lib_exc.ClientCertificateNotTrusted:\n webob.exc.HTTPBadRequest,\n nsx_exc.SecurityGroupMaximumCapacityReached:\n webob.exc.HTTPBadRequest,\n nsx_lib_exc.NsxLibInvalidInput:\n webob.exc.HTTPBadRequest,\n nsx_exc.NsxENSPortSecurity:\n webob.exc.HTTPBadRequest,\n nsx_exc.NsxPluginTemporaryError:\n webob.exc.HTTPServiceUnavailable\n })",
"def fault():\n return FaultCohesiveKin()",
"def get_faults_history(self, epg_dn):\n class_query = ClassQuery('faultRecord')\n class_query.propFilter = 'eq(faultRecord.affected, \"' + epg_dn + '\")'\n return self.moDir.query(class_query)",
"def _isfault(self):\n return self.dp.state()==PyTango.DevState.FAULT",
"def sys_exc_info(self, for_hidden=False):\n return self.gettopframe()._exc_info_unroll(self.space, for_hidden)",
"def fault(self):\n return (self.status == self.STATUS_FAULT)",
"def fault_code(self):\n from numpy import nan\n debug(\"Getting faults...\")\n code = int(\"01001000\",2)\n command = pack('B',code)\n reply = self.query(command,count=2)\n fault_code = nan\n # The reply is 0xC8 followed by a faults status byte.\n if len(reply) != 2:\n if len(reply)>0:\n warn(\"%r: expecting 2-byte reply, got %r\" % (command,reply))\n elif self.connected:\n warn(\"%r: expecting 2-byte reply, got no reply\" % command)\n else:\n reply_code,fault_code = unpack('<BB',reply)\n if reply_code != code:\n warn(\"reply %r: expecting 0x%X(%s), got 0x%X(%s)\" %\n (reply,code,bin(code),reply_code,bin(reply_code)))\n fault_code = nan\n if fault_code == 2.0**7:\n fault_code = 8\n elif fault_code == 2.0**6:\n fault_code = 7\n elif fault_code == 2.0**5:\n fault_code = 6\n elif fault_code == 2.0**4:\n fault_code = 5\n elif fault_code == 2.0**3:\n fault_code = 4\n elif fault_code == 2.0**2:\n fault_code = 3\n elif fault_code == 2.0**1:\n fault_code = 2\n elif fault_code == 2.0**0:\n fault_code = 1\n elif fault_code == 0:\n fault_code = 0\n else:\n fault_code = -1\n debug(\"Fault code %s\" % fault_code)\n return fault_code",
"def check_page_faults(con, host, warning, critical,perf_data):\n warning = warning or 10\n critical = critical or 30\n data=get_server_status(con)\n\n try:\n page_faults=float(data['extra_info']['page_faults']) \n except:\n # page_faults unsupported on the underlaying system\n return exit_with_general_critical(\"page_faults unsupported on the underlaying system\")\n \n err,delta=maintain_delta([page_faults],host,\"page_faults\")\n if err==0:\n page_faults_ps=delta[1]/delta[0]\n message = \"Page faults : %.2f ps\" % page_faults_ps\n message+=performance_data(perf_data,[(\"%.2f\" %page_faults_ps,\"page_faults_ps\",warning,critical)])\n return check_levels(page_faults_ps,warning,critical,message)\n else:\n return exit_with_general_warning(\"problem reading data from temp file\")",
"def read_fault(filename):\n\n fault_x = []\n fault_y = []\n fault_file = open(filename)\n\n for segment in fault_file:\n x, y = segment.split()\n fault_x.append(float(x))\n fault_y.append(float(y))\n\n fault_file.close()\n\n return fault_x, fault_y",
"def _GetAbortRequests(self):\n new_requests = self._GetRequestsByState(self._ABORTING)\n for request_id in new_requests:\n logging.info('Abort requested for %s', request_id)\n self._ClearRequest(request_id, self._ABORTING)\n return new_requests",
"def __get_fault(self, mps_db_session, fault_id):\n fault = mps_db_session.query(models.Fault).filter(models.Fault.id==fault_id).all()\n\n if len(fault) == 1:\n return fault[0]\n elif len(fault) == 0:\n raise ValueError(\"Function \\\"__get_fault(fault_id={}). Not fault was found.\\\"\"\n .format(fault_id))\n else:\n raise ValueError(\"Function \\\"__get_fault(fault_id={}). More than one fault matches\\\"\"\n .format(fault_id))",
"def as_fault(self):\n return Fault(self.fault_code, self.internal_message or\n 'unknown server error')",
"def page_fault(self):\n self._page_fault += 1",
"def pageFault(proc):\n\n global pfList\n pfList.append([proc, 1])",
"def get_diagnostics(self) -> List[Diagnostic]:\n raise NotImplementedError",
"def get_unexpected_reboots(self):\n\n _, remaining_bootups = self.get_unmatched_events(\n event_cause_label=\"basic.reboot_trigger\",\n event_effect_label=\"basic.bootup\")\n\n return remaining_bootups",
"def remote_getStatus(self):\n zep = getFacade('zep')\n issues = zep.getDeviceIssues(eventClass=[Status_Mail],\n severity=[SEVERITY_WARNING, SEVERITY_ERROR, SEVERITY_CRITICAL])\n return [d\n for d, count, total in issues\n if getattr(self.config.devices, d, None)]",
"def faulty(self, *args):\n for each in args:\n if not self.is_faulty(each):\n self._faults.add(each)",
"def panic_on_fault_enabled(self):\n # The panic_on_fault mechanism might not even be included in the build\n # (in which case the panic_on_fault variables won't exist), so be defensive.\n try:\n enabled = self.chipdata.get_var_strict(\n 'L_panic_on_fault_enabled'\n ).value\n fault_id = self.chipdata.get_var_strict(\n 'L_panic_on_fault_id'\n ).value\n except ct.DebugInfoNoVariable:\n enabled = False\n fault_id = 0\n return (enabled, fault_id)",
"def errors_fatal(self) -> List[Error]:",
"def cpu_halt_reasons(self):\n buf_size = self.MAX_NUM_MOES\n buf = (structs.JLinkMOEInfo * buf_size)()\n num_reasons = self._dll.JLINKARM_GetMOEs(buf, buf_size)\n if num_reasons < 0:\n raise errors.JLinkException(num_reasons)\n\n return list(buf)[:num_reasons]",
"def is_faulty(self, event):\n for each in self._faults:\n if each.name.upper() == event.name.upper():\n return True\n return False",
"def exechost_status():\n\n exechost = socket.gethostname()\n\n # free\n try:\n subp = subprocess.Popen([\"free\", \"-m\"], stdout=subprocess.PIPE)\n output = subp.communicate()[0]\n print \"EXECSTAT %s FREE\\n%s\" % (exechost, output)\n except:\n print \"Problem running free command\"\n (extype, exvalue, trback) = sys.exc_info()\n traceback.print_exception(extype, exvalue, trback, limit=1, file=sys.stdout)\n print \"Ignoring error and continuing...\\n\"\n\n # df\n try:\n cwd = os.getcwd()\n subp = subprocess.Popen([\"df\", \"-h\", cwd], stdout=subprocess.PIPE)\n output = subp.communicate()[0]\n print \"EXECSTAT %s DF\\n%s\" % (exechost, output)\n except:\n print \"Problem running df command\"\n (extype, exvalue, trback) = sys.exc_info()\n traceback.print_exception(extype, exvalue, trback, limit=1, file=sys.stdout)\n print \"Ignoring error and continuing...\\n\"",
"def threat_exceptions(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"threat_exceptions\")",
"def fault_summary(request, accountId):\n if not accountId or len(accountId) == 0:\n return JSONResponse('No input parameter account_id.', status=400)\n\n token = query.getZabbixToken()\n print token\n if not token:\n LOG.exception('failed to query zabbix for token.')\n return JSONResponse('Failed to query zabbix for token.', status=500)\n\n data = query.getServiceState(accountId, token)\n print data\n if data == None:\n LOG.exception('failed to query zabbix for service state.')\n return JSONResponse('Failed to query zabbix for service state.', status=500)\n\n serviceData = normalizeServiceState(data)\n print serviceData\n result = {'service_state': {\n 'account_id': accountId,\n 'account_name': serviceData[0],\n 'hosts': serviceData[1]\n }}\n return JSONResponse(result)",
"def check_errors(self):\n\n errors = []\n while True:\n err = self.values(\"SYST:ERR?\")\n if int(err[0]) != 0:\n errmsg = \"Agilent 5313xA: {0}: {1}\".format(err[0], err[1])\n log.error(errmsg + '\\n')\n errors.append(errmsg)\n else:\n break\n\n return errors"
] | [
"0.7297142",
"0.6973599",
"0.62839913",
"0.6233432",
"0.6117404",
"0.5914989",
"0.585911",
"0.5828832",
"0.56888837",
"0.5640751",
"0.5596669",
"0.55570495",
"0.5398989",
"0.5372518",
"0.5344608",
"0.5326614",
"0.52823734",
"0.525599",
"0.5215235",
"0.5206905",
"0.51792276",
"0.51715744",
"0.51645386",
"0.5153811",
"0.51442236",
"0.5125883",
"0.5123245",
"0.5093789",
"0.50876576",
"0.50817794"
] | 0.82058364 | 0 |
This will get the latched faults on the system. | def get_faults_latched(self):
request = self.get_option_from_shouter([t_16_Bit_Options.FAULT_LATCHED], BP_TOOL.REQUEST_16)
return self.__get_faults_list(self.config_16.faults_latched) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_faults_current(self):\n request = self.get_option_from_shouter([t_16_Bit_Options.FAULT_ACTIVE], BP_TOOL.REQUEST_16)\n return self.__get_faults_list(self.config_16.faults_current)",
"def faults(self):\n debug(\"Getting faults...\")\n code = int(\"01001000\",2)\n command = pack('B',code)\n reply = self.query(command,count=2)\n faults = \" \"\n # The reply is 0xC8 followed by a faults status byte.\n if len(reply) != 2:\n if len(reply)>0:\n warn(\"%r: expecting 2-byte reply, got %r\" % (command,reply))\n elif self.connected:\n warn(\"%r: expecting 2-byte reply, got no reply\" % command)\n else:\n reply_code,bits = unpack('<BB',reply)\n if reply_code != code:\n warn(\"reply %r: expecting 0x%X(%s), got 0x%X(%s)\" %\n (reply,code,bin(code),reply_code,bin(reply_code)))\n else:\n fault_names = {0:\"Tank Level Low\",2:\"Temperature above alarm range\",\n 4:\"RTD Fault\",5:\"Pump Fault\",7:\"Temperature below alarm range\"}\n faults = \"\"\n for i in range(0,8):\n if (bits >> i) & 1:\n if i in fault_names: faults += fault_names[i]+\", \"\n else: faults += str(i)+\", \"\n faults = faults.strip(\", \")\n if faults == \"\": faults = \"none\"\n debug(\"Faults %s\" % faults)\n return faults",
"def _extend_fault_map(self):\n faults.FAULT_MAP.update({nsx_lib_exc.ManagerError:\n webob.exc.HTTPBadRequest,\n nsx_lib_exc.ServiceClusterUnavailable:\n webob.exc.HTTPServiceUnavailable,\n nsx_lib_exc.ClientCertificateNotTrusted:\n webob.exc.HTTPBadRequest,\n nsx_exc.SecurityGroupMaximumCapacityReached:\n webob.exc.HTTPBadRequest,\n nsx_lib_exc.NsxLibInvalidInput:\n webob.exc.HTTPBadRequest,\n nsx_exc.NsxENSPortSecurity:\n webob.exc.HTTPBadRequest,\n nsx_exc.NsxPluginTemporaryError:\n webob.exc.HTTPServiceUnavailable\n })",
"def fault():\n return FaultCohesiveKin()",
"def __get_faults_list(self, faults):\n r_faults = []\n for x in faults:\n if faults[x]['value']:\n r_faults.append(faults[x]['name'])\n return r_faults",
"def faulty(self, *args):\n for each in args:\n if not self.is_faulty(each):\n self._faults.add(each)",
"def errors_fatal(self) -> List[Error]:",
"def emergency_recover_states_from_failure():\n _emergency_state_check()\n _emergency_iobuf_extract()",
"def check_page_faults(con, host, warning, critical,perf_data):\n warning = warning or 10\n critical = critical or 30\n data=get_server_status(con)\n\n try:\n page_faults=float(data['extra_info']['page_faults']) \n except:\n # page_faults unsupported on the underlaying system\n return exit_with_general_critical(\"page_faults unsupported on the underlaying system\")\n \n err,delta=maintain_delta([page_faults],host,\"page_faults\")\n if err==0:\n page_faults_ps=delta[1]/delta[0]\n message = \"Page faults : %.2f ps\" % page_faults_ps\n message+=performance_data(perf_data,[(\"%.2f\" %page_faults_ps,\"page_faults_ps\",warning,critical)])\n return check_levels(page_faults_ps,warning,critical,message)\n else:\n return exit_with_general_warning(\"problem reading data from temp file\")",
"def GetLongLineExceptions(self):\n return []",
"def expand_faults():\n for (x, y) in SupvisorsFaults.__dict__.items():\n if not x.startswith('__'):\n setattr(Faults, x, y + FAULTS_OFFSET)",
"def expand_faults():\n for (x, y) in SupvisorsFaults.__dict__.items():\n if not x.startswith('__'):\n setattr(Faults, x, y + FAULTS_OFFSET)",
"def page_fault(self):\n self._page_fault += 1",
"def fault_counters(self):\n done, data = self._request('GF')\n if done:\n return {\n 'GFI self test': int(data[0], 16),\n 'Ground': int(data[1], 16),\n 'Stuck relay': int(data[2], 16)\n }\n\n raise EvseError",
"def _isfault(self):\n return self.dp.state()==PyTango.DevState.FAULT",
"def get_faults_history(self, epg_dn):\n class_query = ClassQuery('faultRecord')\n class_query.propFilter = 'eq(faultRecord.affected, \"' + epg_dn + '\")'\n return self.moDir.query(class_query)",
"def get_unexpected_reboots(self):\n\n _, remaining_bootups = self.get_unmatched_events(\n event_cause_label=\"basic.reboot_trigger\",\n event_effect_label=\"basic.bootup\")\n\n return remaining_bootups",
"def detect_fatal_errors(self):\n for instance in self.all_instances:\n instance.detect_fatal_errors()",
"def cpu_halt_reasons(self):\n buf_size = self.MAX_NUM_MOES\n buf = (structs.JLinkMOEInfo * buf_size)()\n num_reasons = self._dll.JLINKARM_GetMOEs(buf, buf_size)\n if num_reasons < 0:\n raise errors.JLinkException(num_reasons)\n\n return list(buf)[:num_reasons]",
"def read_fault(filename):\n\n fault_x = []\n fault_y = []\n fault_file = open(filename)\n\n for segment in fault_file:\n x, y = segment.split()\n fault_x.append(float(x))\n fault_y.append(float(y))\n\n fault_file.close()\n\n return fault_x, fault_y",
"def _GetAbortRequests(self):\n new_requests = self._GetRequestsByState(self._ABORTING)\n for request_id in new_requests:\n logging.info('Abort requested for %s', request_id)\n self._ClearRequest(request_id, self._ABORTING)\n return new_requests",
"def refined_errors(self):\r\n errs = []\r\n for err in self.errors:\r\n if err['typo'].lower() not in self.terms:\r\n errs.append(err)\r\n return errs",
"def panic_on_fault_enabled(self):\n # The panic_on_fault mechanism might not even be included in the build\n # (in which case the panic_on_fault variables won't exist), so be defensive.\n try:\n enabled = self.chipdata.get_var_strict(\n 'L_panic_on_fault_enabled'\n ).value\n fault_id = self.chipdata.get_var_strict(\n 'L_panic_on_fault_id'\n ).value\n except ct.DebugInfoNoVariable:\n enabled = False\n fault_id = 0\n return (enabled, fault_id)",
"def pageFault(proc):\n\n global pfList\n pfList.append([proc, 1])",
"def test_crash_process(self):\n def_bucket = self.cluster.buckets[0]\n target_node = self.getTargetNode()\n remote = RemoteMachineShellConnection(target_node)\n target_vbuckets = range(0, self.cluster.vbuckets)\n retry_exceptions = list()\n self.transaction_load_task = None\n self.doc_loading_task = None\n self.N1ql_load_task = None\n\n # If Memcached is killed, we should not perform KV ops on\n # particular node. If not we can target all nodes for KV operation.\n if self.process_name == \"memcached\":\n target_vbuckets = Cbstats(target_node).vbucket_list(\n def_bucket.name, self.target_node)\n if self.target_node == \"active\":\n retry_exceptions = [SDKException.TimeoutException]\n if len(target_vbuckets) == 0:\n self.log.error(\"No target vbucket list generated to load data\")\n remote.disconnect()\n return\n\n bucket_dict = BucketUtils.get_random_collections(\n self.cluster.buckets,\n req_num=1,\n consider_scopes=\"all\",\n consider_buckets=\"all\")\n\n bucket = BucketUtils.get_bucket_obj(self.cluster.buckets,\n bucket_dict.keys()[0])\n scope_name = bucket_dict[bucket.name][\"scopes\"].keys()[0]\n collection_name = bucket_dict[bucket.name][\n \"scopes\"][scope_name][\"collections\"].keys()[0]\n scope = BucketUtils.get_scope_obj(\n bucket, scope_name)\n collection = BucketUtils.get_collection_obj(\n scope, collection_name)\n\n self.start_doc_loading_tasks(target_vbuckets, scope_name, collection)\n\n task_info = dict()\n task_info[self.doc_loading_task] = \\\n self.bucket_util.get_doc_op_info_dict(\n def_bucket, DocLoading.Bucket.DocOps.CREATE, 0,\n replicate_to=self.replicate_to, persist_to=self.persist_to,\n durability=self.durability_level,\n timeout=self.sdk_timeout, time_unit=\"seconds\",\n retry_exceptions=retry_exceptions)\n\n self.sleep(10, \"Wait for doc_ops to start\")\n self.log.info(\"Killing {0}:{1} on node {2}\"\n .format(self.process_name, self.service_name,\n target_node.ip))\n remote.kill_process(self.process_name, self.service_name,\n signum=signum[self.sig_type])\n remote.disconnect()\n # Wait for tasks completion and validate failures\n if self.transaction_load_task:\n self.task.jython_task_manager.get_task_result(\n self.transaction_load_task)\n if self.N1qltxn:\n self.task.jython_task_manager.get_task_result(\n self.N1ql_load_task)\n self.task_manager.get_task_result(self.doc_loading_task)\n self.bucket_util.verify_doc_op_task_exceptions(task_info,\n self.cluster)\n self.bucket_util.log_doc_ops_task_failures(task_info)\n\n # Verification stats\n verification_dict = dict()\n verification_dict[\"ops_create\"] = 2*self.num_items\n verification_dict[\"sync_write_aborted_count\"] = 0\n verification_dict[\"rollback_item_count\"] = 0\n verification_dict[\"pending_writes\"] = 0\n if self.__is_sync_write_enabled:\n verification_dict[\"sync_write_committed_count\"] = 2*self.num_items\n\n if self.bucket_type == Bucket.Type.EPHEMERAL \\\n and self.process_name == \"memcached\":\n result = self.task.rebalance(self.cluster, [], [])\n self.assertTrue(result, \"Rebalance failed\")\n\n # Validate doc count\n if self.process_name != \"memcached\":\n stats_failed = \\\n self.durability_helper.verify_vbucket_details_stats(\n def_bucket, self.cluster_util.get_kv_nodes(self.cluster),\n vbuckets=self.cluster.vbuckets,\n expected_val=verification_dict)\n if stats_failed:\n self.fail(\"Cbstats verification failed\")\n\n # Doc count validation per collection\n if not self.N1qltxn and self.atomicity is False:\n self.bucket_util.validate_docs_per_collections_all_buckets(\n self.cluster)",
"def pin_errors(self):\n for m in range(self.stage_width_list[-1]):\n error, _ = rqrmilib.calculate_submodel_error(self._get_native_object(), self.probe, len(self)-1, m)\n if error < 0: error = 0\n self.error_list[m] = int(error)\n self.rqrmi_state_changed = True\n return self.error_list",
"def check_for_initial_crash(test_runner, crash_retries, testcase):\n crash_times = []\n flaky_stack = False\n saved_crash_state = None\n saved_security_flag = None\n saved_unsymbolized_crash_state = None\n\n results = test_runner.execute_parallel_runs(crash_retries)\n\n for result in results:\n if not result.is_crash():\n continue\n\n if result.should_ignore():\n continue\n\n crash_state = result.get_state(symbolized=True)\n security_flag = result.is_security_issue()\n unsymbolized_crash_state = result.get_state(symbolized=False)\n\n if not unsymbolized_crash_state:\n continue\n\n if security_flag != testcase.security_flag:\n continue\n\n crash_times.append(result.crash_time)\n\n if not saved_crash_state:\n saved_crash_state = crash_state\n saved_security_flag = security_flag\n saved_unsymbolized_crash_state = unsymbolized_crash_state\n continue\n\n crash_comparer = CrashComparer(crash_state, saved_crash_state)\n if not crash_comparer.is_similar():\n flaky_stack = True\n\n logs.log('Total crash count: %d/%d. Flaky: %s. Security: %s. State:\\n%s' %\n (len(crash_times), crash_retries, flaky_stack, saved_security_flag,\n saved_crash_state))\n\n return saved_unsymbolized_crash_state, flaky_stack, crash_times",
"def _add_faults(self, feature_builder, features=None):\n if features is None:\n features = self.features\n for f in reversed(features):\n if f.type == 'fault':\n feature_builder.add_fault(f)\n # if f.type == 'unconformity':\n # break",
"def fault(self):\n return (self.status == self.STATUS_FAULT)",
"def is_faulty(self, event):\n for each in self._faults:\n if each.name.upper() == event.name.upper():\n return True\n return False"
] | [
"0.6717997",
"0.61814696",
"0.5997935",
"0.5937447",
"0.5786711",
"0.5544165",
"0.5489587",
"0.54597056",
"0.53924805",
"0.53853124",
"0.53460693",
"0.53460693",
"0.5332667",
"0.5330461",
"0.5298619",
"0.5260537",
"0.5236995",
"0.5182294",
"0.5093936",
"0.50649893",
"0.5040983",
"0.5036408",
"0.5035424",
"0.50090665",
"0.50085974",
"0.4980584",
"0.49759415",
"0.49644327",
"0.4933094",
"0.49291593"
] | 0.807573 | 0 |
Gets the pattern wave pat_wave 101011110011 .... >>> Request >>> 0> >>> Pattern Wave [More to follow] >>> >> Request Next block >>> 0> >>> Pattern Wave [More to follow] >>> >> >>> ..... >>> >>> Request Next block >>> 0> >>> Pattern Wave [No More to follow] >>> <) | def __request_pat_wave(self, r_number):
packet = bytearray()
packet.append(0) # 16 bit options
packet.append(0) # 8 bit options
packet.append(1) # Request the 1 option
# ---------------------------------------------------------------------
# Request the variable length options. pattern wave.
packet.append(0x01 << t_var_size_Options.PATTERN_WAVE)
# ---------------------------------------------------------------------
# Packets to follow
packet.append(r_number)
# ---------------------------------------------------------------------
# Length of the bytes to follow
packet.append(0)
rval = self.interact_with_shouter(packet)
if rval != False:
return rval
return [] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def wave(self):\n return self._wave",
"def waveband(self):\n return self.get(\"waveband\")",
"def waveband(self):\n return self.get(\"waveband\", default=\"\", decode=True).split(\"#\")",
"def _wave(self):\n try:\n return wave.open(StringIO(self.contents))\n except wave.Error, err:\n err.message += \"\\nInvalid wave file: %s\" % self\n err.args = (err.message,)\n raise",
"def getWave(self):\n return self._wave",
"def getPattern(self):\n return self.pattern",
"def get_wstart(ref, wave_ref, wave_per_pixel):\n\n return wave_ref - ((ref-1) * wave_per_pixel)",
"def waveband(self):\n return self._band",
"def wave_tx_repeat():\n return _u2i(_pigpio_command(_control, _PI_CMD_WVGOR, 0, 0))",
"def get_waveforms(self, network, station, location, channel, starttime,\n endtime):\n # padding channel with spaces does not make sense\n if len(channel) < 3 and channel != \".*\":\n msg = \"channel expression matches less than 3 characters \" + \\\n \"(use e.g. 'BHZ', 'BH?', 'BH[Z12]', 'B??')\"\n raise Exception(msg)\n seedname = '%-2s%-5s%s%-2s' % (network, station, channel, location)\n # allow UNIX style \"?\" wildcard\n seedname = seedname.replace(\"?\", \".\")\n return self.get_waveforms_nscl(seedname, starttime,\n endtime - starttime)",
"def getRicker(f,t):\n # assert len(f) == 1, 'Ricker wavelet needs 1 frequency as input'\n # f = f[0]\n pift = pi*f*t\n wav = (1 - 2*pift**2)*np.exp(-pift**2)\n return wav",
"def get_waveform_halfwidth(waveform, sampling_rate=30000.):\n w = resample(waveform,200)#upsample to smooth the data\n time = np.linspace(0,len(waveform)/sampling_rate,200)\n trough = np.where(w==np.min(w))[0][0]\n peak = np.where(w==np.max(w))[0][0]\n \n #dur = time[trough:][np.where(w[trough:]==np.max(w[trough:]))[0][0]] - time[trough]\n if w[peak] > np.abs(w[trough]):\n dur = time[peak:][np.where(w[peak:]>=0.5*np.min(w[peak:]))[0][0]] - time[peak] \n else:\n dur = time[trough:][np.where(w[trough:]<=0.5*np.max(w[trough:]))[0][0]] - time[trough] \n if peak<trough:\n dur=-dur\n return dur",
"def wave_parameters(self):\n return self._wave_params",
"def rec_one_shot(self, sec, file_name=None):\n self.__open_noncallback_stream()\n frames = []\n for i in range(int(self.RATE / self.CHUNK * sec)):\n data = self.stream.read(self.CHUNK)\n data = np.fromstring(data, dtype=np.int16)\n frames.append(data)\n self.stream.stop_stream()\n if file_name is not None:\n with wave.open(file_name, 'wb') as wav_file:\n wav_file.setnchannels(self.CHANNELS)\n wav_file.setsampwidth(self.recorder.get_sample_size(self.FORMAT))\n wav_file.setframerate(self.RATE)\n wav_file.writeframes(b''.join(frames))\n frame = np.concatenate(frames, 0)\n self.stop_streaming()\n return frame",
"def pattern(self):\n return self.get_data(\"pattern\")",
"def wave_samples(self):\n return self._quantized_subsamples",
"def test_signal(self, data = \"MODE=init\"):\n resdat = wavehttp.get(\"/wave/wfe/test?VER=6&\"+data+\"&zx=\"+self.zx()+\"&t=1\")\n return resdat",
"def wave_send_repeat(wave_id):\n return _u2i(_pigpio_command(_control, _PI_CMD_WVTXR, wave_id, 0))",
"def askwave(self):\n if self.status != \"not connected\":\n m = self.serial\n m.write(\"wave?\" + \"\\r\\n\")\n r = m.read(100)\n r = r[7:]\n result = string.strip(r)\n return result\n else:\n pass",
"def pattern(self):\n return self[\"pattern\"]",
"def pattern(self):\n return self[\"pattern\"]",
"def modulate(data):\n\n wave = ''\n levels = ('\\x00', '\\x55', '\\xaa', '\\xff')\n \n for frame in data:\n next_num = frame\n for grp in range(4):\n wave += levels[next_num % 4]\n next_num /= 4\n\n return wave",
"def rate(self):\n if self._rate:\n return self._rate\n else:\n return self._wave.getframerate()",
"def record_and_get_wav(self, time):\n sample_width, frames = self.record_audio(time)\n return WavFile(samples=frames, sample_width=sample_width, time=time)",
"def do_wave(l, wave_type, r, g, b, duration, repeat):\n command = create_wave_command(\n wave_type, r, g, b, duration, repeat\n )\n l.write(command)",
"def read_wave(path):\n with contextlib.closing(wave.open(path, 'rb')) as wf:\n num_channels = wf.getnchannels()\n assert num_channels == 1\n sample_width = wf.getsampwidth()\n assert sample_width == 2\n sample_rate = wf.getframerate()\n assert sample_rate in (8000, 16000, 32000)\n pcm_data = wf.readframes(wf.getnframes())\n return pcm_data, sample_rate",
"def __next_chunk_pattern(self, chunk_sectors:int):\n\t\tcurr_pat = next(self.pat_it, None)\t\t\t\n\n\t\tif (curr_pat is None):\n\t\t\tself.__reset_pat()\n\t\t\tcurr_pat = next(self.pat_it, None)\n\t\t\t\t\t\n\t\treturn bytearray(curr_pat[0:chunk_sectors * 512])",
"def spectral(w, s=1.0):\n n_in, n_out = w.size()\n n = max(n_out, n_in)\n gain = s / math.sqrt(n)\n return w.normal_(0, 1).mul_(gain)",
"def wav(self):\n if self._wav is None and self._nu is not None:\n return self._nu.to(u.micron, equivalencies=u.spectral())\n else:\n return self._wav",
"def get_pattern(self, name):\n return self._pattern_reg[name]"
] | [
"0.5981788",
"0.5724615",
"0.56906223",
"0.5673661",
"0.56648856",
"0.544376",
"0.54234815",
"0.5388818",
"0.5373861",
"0.53209776",
"0.53193724",
"0.5285998",
"0.52687967",
"0.5267398",
"0.51937246",
"0.5191461",
"0.5150314",
"0.5148032",
"0.51321924",
"0.5121972",
"0.5121972",
"0.51162314",
"0.5113482",
"0.51080644",
"0.5107305",
"0.50997585",
"0.50965",
"0.5076782",
"0.50737554",
"0.5073261"
] | 0.7454566 | 0 |
The resource ID of the Network Fabric l3IsolationDomain. | def l3_isolation_domain_id(self) -> pulumi.Input[str]:
return pulumi.get(self, "l3_isolation_domain_id") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def l3_isolation_domain_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"l3_isolation_domain_id\")",
"def resource_id(self) -> str:\n return pulumi.get(self, \"resource_id\")",
"def resource_id(self) -> str:\n return pulumi.get(self, \"resource_id\")",
"def resource_id(self) -> str:\n return pulumi.get(self, \"resource_id\")",
"def external_network_id(self) -> str:\n return pulumi.get(self, \"external_network_id\")",
"def id(self):\n return self._domain.id",
"def domain_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"domain_id\")",
"def resource_group_id(self) -> str:\n return pulumi.get(self, \"resource_group_id\")",
"def resource_group_id(self) -> str:\n return pulumi.get(self, \"resource_group_id\")",
"def resource_group_id(self) -> str:\n return pulumi.get(self, \"resource_group_id\")",
"def resource_group_id(self) -> str:\n return pulumi.get(self, \"resource_group_id\")",
"def resource_group_id(self) -> str:\n return pulumi.get(self, \"resource_group_id\")",
"def dns_zone_resource_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"dns_zone_resource_id\")",
"def domain_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"domain_id\")",
"def domain_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"domain_id\")",
"def resource_group_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_group_id\")",
"def resource_group_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_group_id\")",
"def resource_group_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_group_id\")",
"def resource_pool_id(self) -> str:\n return pulumi.get(self, \"resource_pool_id\")",
"def failover_group_id(self) -> str:\n return pulumi.get(self, \"failover_group_id\")",
"def resource_id(self) -> Optional[str]:\n return pulumi.get(self, \"resource_id\")",
"def custom_compliance_domain_id(self):\n return self._custom_compliance_domain_id",
"def unique_id(self):\n return self._light.address",
"def get_keystone_v3_domain_id(self, domain_name):\n LOG_OBJ.debug(\"Get the domain ID.\")\n\n _url = \"http://\" + self.host_ip + \":35357/v3/domains?name=\" + \\\n str(domain_name)\n _headers = {'x-auth-token': self.cloud_admin_info[\"token_domain\"],\n 'content-type': 'application/json'}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n\n if response is None:\n LOG_OBJ.error(\"No response from Server while getting the \"\n \"ID of domain\")\n print (\"No response from Server while getting the \"\n \"ID of domain\")\n return response\n\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Get domain ID Failed with status %s and error \"\n \": %s\" % (response.status, response.data))\n print (\"Get domain ID Failed with status %s and error : %s\" %\n (response.status, response.data))\n return response.status\n\n output = json.loads(response.data)\n LOG_OBJ.info(\"Domain details : %s \" % output)\n if len(output['domains']) != 1:\n LOG_OBJ.debug(\"No. of domains with name %s is %s\"\n % (domain_name, len(output['domains'])))\n print(\"No. of domains with name %s is %s\"\n % (domain_name, len(output['domains'])))\n return\n\n return output['domains'][0]['id']",
"def resource_group_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"resource_group_id\")",
"def resource_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_id\")",
"def resource_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_id\")",
"def resource_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_id\")",
"def resource_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_id\")",
"def resource_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_id\")"
] | [
"0.82438326",
"0.639748",
"0.639748",
"0.639748",
"0.6284943",
"0.6265004",
"0.62123185",
"0.617114",
"0.617114",
"0.617114",
"0.617114",
"0.617114",
"0.6158674",
"0.60937095",
"0.60937095",
"0.6091811",
"0.6091811",
"0.6091811",
"0.60563904",
"0.60457486",
"0.5990473",
"0.5979375",
"0.59711367",
"0.5962884",
"0.5938235",
"0.59169245",
"0.59169245",
"0.59169245",
"0.59169245",
"0.59169245"
] | 0.8125011 | 1 |
The default interface name for this L3 network in the virtual machine. This name can be overridden by the name supplied in the network attachment configuration of that virtual machine. | def interface_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "interface_name") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def interface_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"interface_name\")",
"def l3_network_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"l3_network_name\")",
"def interface_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"interface_name\")",
"def network_interface_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"network_interface_id\")",
"def get_interface_name():\n interface_name = ''\n interfaces = psutil.net_if_addrs()\n for name, details in interfaces.items():\n for detail in details:\n if detail.family == socket.AF_INET:\n ip_address = ipaddress.ip_address(detail.address)\n if not (ip_address.is_link_local or ip_address.is_loopback):\n interface_name = name\n break\n return interface_name",
"def _get_interface_name(self):\n return self.__interface_name",
"def getDefaultLayerName(self):\n\t\treturn self._fileSystem.getDefaultLayerName()",
"def _get_ifname(self, intf_type, interface):\n if intf_type == 'port':\n ifname = 'Ethernet' + str(interface)\n elif intf_type == 'portchannel':\n ifname = 'po' + str(interface)\n else:\n raise Exception(\"Unknown interface type: \" + intf_type)\n\n return ifname",
"def get_logical_ifname(self, interface_name, proto='provision'): # pragma: no cover\n output = check_output(['uci', 'show', 'network'])\n network_list = output.strip().split('\\n')\n for config in network_list:\n cfg, option = config.split('=')\n net_prex = cfg.split(\".\")\n if net_prex[-1] == \"proto\" and str(option) != proto:\n ifname = '.'.join(net_prex[:-1]) + '.ifname'\n interface = check_output(['uci', 'get', ifname]).split('\\n')[0]\n if interface == interface_name:\n return net_prex[1]\n return ''",
"def get_default_config(self):\n if not self.iface_type:\n return None\n\n defaults = {}\n defaults['description'] = self.interface_name + ' Interface'\n defaults['admin'] = 'up'\n if self.is_ethernet:\n defaults['speed'] = 'auto'\n defaults['duplex'] = 'auto'\n defaults['type'] = 'bridged'\n elif self.iface_type == 'Bridge-Aggregation':\n defaults['type'] = 'bridged'\n else:\n defaults['type'] = 'routed'\n\n return defaults",
"def get_interface_name(self, network, port=None):\n if not port:\n device_id = self.get_device_id(network)\n port = self.plugin.get_dhcp_port(network.id, device_id)\n return self.driver.get_device_name(port)",
"def getDefaultName(self): # real signature unknown; restored from __doc__\n pass",
"def network_interface(self): \n return self._network_interface",
"def default_ip(ifname):\n ipr = IPRoute()\n index = ipr.link_lookup(ifname=ifname)[0]\n addr = ipr.get_addr(index=index)[0]\n interface = ipaddress.ip_interface('{}/{}'.format(addr.get_attr('IFA_ADDRESS'), addr['prefixlen']))\n addr = interface.ip + 1\n if addr in interface.network:\n return str(addr)\n raise TypeError(f'Unable to calculate default node ip in {ifname} ({interface})')",
"def moc_vnet_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"moc_vnet_name\")",
"def managed_network_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"managed_network_name\")",
"def GetInterface(self):\n ifname = self.interface_watcher.get_last_ifname()\n if ifname is None:\n ifname = ''\n logger.debug('Replying \"' + ifname + '\" to D-Bus request GetInterface')\n return ifname",
"def get_default_ip():\r\n if CONFIG.BIND_INTERFACE is None:\r\n default_gw = netifaces.gateways()['default']\r\n if netifaces.AF_INET in default_gw:\r\n preferred_interface = default_gw[netifaces.AF_INET][1]\r\n else:\r\n interfaces = netifaces.interfaces()\r\n preferred_interface = next((i for i in interfaces if i != 'lo'), interfaces[0])\r\n else:\r\n preferred_interface = CONFIG.BIND_INTERFACE\r\n return netifaces.ifaddresses(preferred_interface)[netifaces.AF_INET][0]['addr']",
"def get_default_iface_name_linux():\n route = \"/proc/net/route\"\n with open(route) as f:\n for line in f.readlines():\n try:\n iface, dest, _, flags, _, _, _, _, _, _, _, = line.strip().split()\n if dest != '00000000' or not int(flags, 16) & 2:\n continue\n return iface\n except:\n continue",
"def cloud_services_network_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cloud_services_network_name\")",
"def get_network_name(self): # type: () -> str\n networks = self.get_network_names()\n\n if not networks:\n raise ApplicationError('No network found for Docker container: %s.' % self.id)\n\n if len(networks) > 1:\n raise ApplicationError('Found multiple networks for Docker container %s instead of only one: %s' % (self.id, ', '.join(networks)))\n\n return networks[0]",
"def default_name(self):\n return '[' + self.__class__.__name__ + ']'",
"def computer_network_name(self) -> str:\n return self._computer_network_name",
"def network(self) -> str:\n return pulumi.get(self, \"network\")",
"def interviewer_name_default(self, interviewer_name_default):\n\n self._interviewer_name_default = interviewer_name_default",
"def default_name(self):\n name = f\"Player {self.UID.split('-')[0]}\"\n return name",
"def vnet_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"vnet_name\")",
"def get_network_default_gateway(self):\n return self.mycam.devicemgmt.GetNetworkDefaultGateway()",
"def get_name(self):\n \n return 'Socket/IP'",
"def network_watcher_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"network_watcher_name\")"
] | [
"0.67701626",
"0.6730182",
"0.6579234",
"0.65378875",
"0.6346248",
"0.6293956",
"0.6253151",
"0.61782867",
"0.61560553",
"0.61084664",
"0.61054385",
"0.605106",
"0.59554195",
"0.59140545",
"0.5869591",
"0.58683455",
"0.58659315",
"0.5836901",
"0.5836809",
"0.5832016",
"0.582706",
"0.5820547",
"0.58059514",
"0.57857776",
"0.5777015",
"0.5722368",
"0.56486154",
"0.56398",
"0.56388086",
"0.56309706"
] | 0.684074 | 0 |
The name of the L3 network. | def l3_network_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "l3_network_name") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def network(self) -> str:\n return pulumi.get(self, \"network\")",
"def computer_network_name(self) -> str:\n return self._computer_network_name",
"def network_name(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"network_name\"), kwargs)",
"def name(self) -> str:\n return f\"{self._inst} NAT {self._data['name']}\"",
"def managed_network_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"managed_network_name\")",
"def layer_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"layer_name\")",
"def get_name():\n\n return 'nettools'",
"def network(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"network\")",
"def layer_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"layer_name\")",
"def cloud_services_network_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cloud_services_network_name\")",
"def network(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"network\")",
"def get_name(self):\n name_str = \"Brain\"\n name_str += \"_\" + self._memory.get_name() \n name_str += \"_ImgSize\" + str(self._img_size[0])\n name_str += \"_Nov\" + self._novelty_loss_type.upper()\n name_str += \"_Train\" + str(self._train_epochs_per_iter)\n name_str += \"_Lrate\" + str(self._learning_rate)\n return name_str",
"def __str__(self):\n\n return \"Network: {0}\".format(self.topology)",
"def layer_protocol_name(self) -> str:\n return self._layer_protocol_name",
"def nw_name(self):\n return self._nw_name",
"def get_network_name(self): # type: () -> str\n networks = self.get_network_names()\n\n if not networks:\n raise ApplicationError('No network found for Docker container: %s.' % self.id)\n\n if len(networks) > 1:\n raise ApplicationError('Found multiple networks for Docker container %s instead of only one: %s' % (self.id, ', '.join(networks)))\n\n return networks[0]",
"def get_network_name_on_vc(options):\n network = get_network_on_vc(options)\n if network:\n return network.name",
"def layer_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"layer_name\")",
"def name(self):\n return 'VL53L1X'",
"def get_node_name(self):\n return util.join_names_underscore(self.name, str(self.as_pointer()))",
"def get_network(self) -> Optional[str]:\n return self.get_value(self._network_attribute)",
"def name(self):\n return utf82unicode(pn_link_name(self._impl))",
"def get_network_name(options):\n user = pwd.getpwuid(os.getuid())[0]\n return \"%s-%s\" %(user, options.name)",
"def _get_l3_label(self):\n return self.__l3_label",
"def network_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"network_id\")",
"def network_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"network_id\")",
"def network_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"network_id\")",
"def name(self) -> str:\n return self._alias or f\"Nut-{self._host}\"",
"def network_watcher_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"network_watcher_name\")",
"def getName(self):\n return _libsbml.XMLTriple_getName(self)"
] | [
"0.6955331",
"0.6824133",
"0.6799091",
"0.6677395",
"0.66031",
"0.6491807",
"0.6462104",
"0.64592564",
"0.64398986",
"0.63908404",
"0.63872564",
"0.638713",
"0.63722634",
"0.6359511",
"0.6331267",
"0.63089246",
"0.62042356",
"0.6186131",
"0.61773044",
"0.61605525",
"0.6082077",
"0.60543025",
"0.60192996",
"0.6005461",
"0.59952414",
"0.59952414",
"0.59952414",
"0.5986967",
"0.59714127",
"0.5953963"
] | 0.88098806 | 0 |
Get an existing L3Network resource's state with the given name, id, and optional extra properties used to qualify the lookup. | def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'L3Network':
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = L3NetworkArgs.__new__(L3NetworkArgs)
__props__.__dict__["associated_resource_ids"] = None
__props__.__dict__["cluster_id"] = None
__props__.__dict__["detailed_status"] = None
__props__.__dict__["detailed_status_message"] = None
__props__.__dict__["extended_location"] = None
__props__.__dict__["hybrid_aks_clusters_associated_ids"] = None
__props__.__dict__["hybrid_aks_ipam_enabled"] = None
__props__.__dict__["hybrid_aks_plugin_type"] = None
__props__.__dict__["interface_name"] = None
__props__.__dict__["ip_allocation_type"] = None
__props__.__dict__["ipv4_connected_prefix"] = None
__props__.__dict__["ipv6_connected_prefix"] = None
__props__.__dict__["l3_isolation_domain_id"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
__props__.__dict__["virtual_machines_associated_ids"] = None
__props__.__dict__["vlan"] = None
return L3Network(resource_name, opts=opts, __props__=__props__) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def a_state(id):\n state = storage.get(State, id)\n if state is not None:\n return jsonify(state.to_dict())\n abort(404)",
"def get_state_by_id(state_id):\n state = storage.get(State, state_id)\n if not state:\n abort(404)\n return jsonify(state.to_dict()), 200",
"def get_state_by_name(exploration_id, state_name, strict=True):\n exploration = get_exploration_by_id(exploration_id)\n assert state_name\n\n # TODO(sll): This is too slow; improve it.\n state = None\n for candidate_state in exploration.states:\n if candidate_state.name == state_name:\n state = candidate_state\n break\n\n if strict and not state:\n raise Exception('State %s not found' % state_name)\n return state",
"def get_state_by_id(state_id):\n my_state = storage.get('State', state_id)\n if my_state is None:\n abort(404)\n return jsonify(my_state.to_dict())",
"def get_state_by_id(state_id):\n for key, value in storage.all(\"State\").items():\n if state_id == value.id:\n return jsonify(value.to_dict())\n abort(404)",
"def get_state(state_id):\n try:\n ''' Check that state_id exists '''\n query = State.select().where(State.id == state_id)\n if not query.exists():\n raise LookupError('state_id')\n\n state = State.get(State.id == state_id)\n return state.to_dict(), 200\n except LookupError as e:\n abort(404)\n except Exception as e:\n abort(500)",
"def get_network(self, name_or_id, filters=None):\n if not filters:\n filters = {}\n return self.network.find_network(\n name_or_id=name_or_id, ignore_missing=True, **filters\n )",
"def get_state_by_id(exploration_id, state_id, strict=True):\n # TODO(sll): Generalize this to handle multiple state_ids at a time.\n state_memcache_key = _get_state_memcache_key(exploration_id, state_id)\n memcached_state = memcache_services.get_multi(\n [state_memcache_key]).get(state_memcache_key)\n\n if memcached_state is not None:\n return memcached_state\n else:\n state_model = exp_models.StateModel.get(\n exploration_id, state_id, strict=strict)\n if state_model:\n state = exp_domain.State.from_dict(state_id, state_model.value)\n memcache_services.set_multi({state_memcache_key: state})\n return state\n else:\n return None",
"def state_by_id(state_id):\n state = storage.get(State, state_id)\n if state is None:\n abort(404)\n return jsonify(state.to_dict())",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n address_space_prefixes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n allow_forwarded_traffic: Optional[pulumi.Input[bool]] = None,\n allow_gateway_transit: Optional[pulumi.Input[bool]] = None,\n allow_virtual_network_access: Optional[pulumi.Input[bool]] = None,\n name: Optional[pulumi.Input[str]] = None,\n remote_address_space_prefixes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n remote_virtual_network_id: Optional[pulumi.Input[str]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n use_remote_gateways: Optional[pulumi.Input[bool]] = None,\n virtual_network_id: Optional[pulumi.Input[str]] = None,\n workspace_id: Optional[pulumi.Input[str]] = None) -> 'VirtualNetworkPeering':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _VirtualNetworkPeeringState.__new__(_VirtualNetworkPeeringState)\n\n __props__.__dict__[\"address_space_prefixes\"] = address_space_prefixes\n __props__.__dict__[\"allow_forwarded_traffic\"] = allow_forwarded_traffic\n __props__.__dict__[\"allow_gateway_transit\"] = allow_gateway_transit\n __props__.__dict__[\"allow_virtual_network_access\"] = allow_virtual_network_access\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"remote_address_space_prefixes\"] = remote_address_space_prefixes\n __props__.__dict__[\"remote_virtual_network_id\"] = remote_virtual_network_id\n __props__.__dict__[\"resource_group_name\"] = resource_group_name\n __props__.__dict__[\"use_remote_gateways\"] = use_remote_gateways\n __props__.__dict__[\"virtual_network_id\"] = virtual_network_id\n __props__.__dict__[\"workspace_id\"] = workspace_id\n return VirtualNetworkPeering(resource_name, opts=opts, __props__=__props__)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'CloudServicesNetwork':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = CloudServicesNetworkArgs.__new__(CloudServicesNetworkArgs)\n\n __props__.__dict__[\"additional_egress_endpoints\"] = None\n __props__.__dict__[\"associated_resource_ids\"] = None\n __props__.__dict__[\"cluster_id\"] = None\n __props__.__dict__[\"detailed_status\"] = None\n __props__.__dict__[\"detailed_status_message\"] = None\n __props__.__dict__[\"enable_default_egress_endpoints\"] = None\n __props__.__dict__[\"enabled_egress_endpoints\"] = None\n __props__.__dict__[\"extended_location\"] = None\n __props__.__dict__[\"hybrid_aks_clusters_associated_ids\"] = None\n __props__.__dict__[\"interface_name\"] = None\n __props__.__dict__[\"location\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"provisioning_state\"] = None\n __props__.__dict__[\"system_data\"] = None\n __props__.__dict__[\"tags\"] = None\n __props__.__dict__[\"type\"] = None\n __props__.__dict__[\"virtual_machines_associated_ids\"] = None\n return CloudServicesNetwork(resource_name, opts=opts, __props__=__props__)",
"def get_state_by_id(state_id):\r\n response = Response(json.dumps(json_error(ResponsesREST.INVALID_INPUT.value)),\r\n status=ResponsesREST.INVALID_INPUT.value, mimetype=\"application/json\")\r\n if validator_id.is_valid({\"id\": state_id}):\r\n state_get = State()\r\n state_get.id_state = state_id\r\n result = state_get.get_state()\r\n if result in (ResponsesREST.NOT_FOUND.value, ResponsesREST.SERVER_ERROR.value):\r\n response = Response(json.dumps(json_error(result)),\r\n status=result, mimetype=\"application/json\")\r\n else:\r\n response = Response(json.dumps(result.json_state()),\r\n status=ResponsesREST.SUCCESSFUL.value,\r\n mimetype=\"application/json\")\r\n return response",
"def get_state(self, entity_id: str, attribute: str = \"state\") -> dict:\n if not self.connected:\n LOGGER.warning(\"Connection is not yet ready.\")\n state_obj = self._states.get(entity_id)\n if state_obj:\n if attribute == \"state\":\n return state_obj[\"state\"]\n if attribute:\n return state_obj[\"attributes\"].get(attribute)\n return state_obj\n return None",
"def state_by_id(state_id):\n states_values = storage.all(\"State\").values()\n for obj in states_values:\n if obj.id == state_id:\n return jsonify(obj.to_dict())\n abort(404)",
"def getstate(self,name):\n state = self.states[name]\n debug('kfnode.getstate ',(name,state))\n return state",
"def get_state(state_id):\n try:\n state = jsonify(storage.get(State, state_id).to_dict())\n return state\n except:\n abort(404)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n arn: Optional[pulumi.Input[str]] = None,\n auth_mode: Optional[pulumi.Input[str]] = None,\n default_s3_location: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n engine_security_group_id: Optional[pulumi.Input[str]] = None,\n idp_auth_url: Optional[pulumi.Input[str]] = None,\n idp_relay_state_parameter_name: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n service_role: Optional[pulumi.Input[str]] = None,\n subnet_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n url: Optional[pulumi.Input[str]] = None,\n user_role: Optional[pulumi.Input[str]] = None,\n vpc_id: Optional[pulumi.Input[str]] = None,\n workspace_security_group_id: Optional[pulumi.Input[str]] = None) -> 'Studio':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _StudioState.__new__(_StudioState)\n\n __props__.__dict__[\"arn\"] = arn\n __props__.__dict__[\"auth_mode\"] = auth_mode\n __props__.__dict__[\"default_s3_location\"] = default_s3_location\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"engine_security_group_id\"] = engine_security_group_id\n __props__.__dict__[\"idp_auth_url\"] = idp_auth_url\n __props__.__dict__[\"idp_relay_state_parameter_name\"] = idp_relay_state_parameter_name\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"service_role\"] = service_role\n __props__.__dict__[\"subnet_ids\"] = subnet_ids\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"tags_all\"] = tags_all\n __props__.__dict__[\"url\"] = url\n __props__.__dict__[\"user_role\"] = user_role\n __props__.__dict__[\"vpc_id\"] = vpc_id\n __props__.__dict__[\"workspace_security_group_id\"] = workspace_security_group_id\n return Studio(resource_name, opts=opts, __props__=__props__)",
"def get_by_id(cls, name):\n\t\treturn super(Locality, cls).get_by_id(cls.normalized_name(name))",
"def get(self, request, state_id, format=None):\n try:\n state = State.objects.get(id=state_id)\n except ObjectDoesNotExist:\n raise NotFound(detail=\"State not found\")\n\n return Response(StateSerializer(state).data)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'NetworkGroup':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = dict()\n\n __props__[\"conditional_membership\"] = None\n __props__[\"description\"] = None\n __props__[\"display_name\"] = None\n __props__[\"etag\"] = None\n __props__[\"group_members\"] = None\n __props__[\"member_type\"] = None\n __props__[\"name\"] = None\n __props__[\"provisioning_state\"] = None\n __props__[\"system_data\"] = None\n __props__[\"type\"] = None\n return NetworkGroup(resource_name, opts=opts, __props__=__props__)",
"def get_state_by_id(states: [State], state_id: str, id_type: str) -> State:\n if id_type == 'new':\n for state in states:\n if state.new_id == state_id:\n return state\n if id_type == 'old':\n for state in states:\n if state.id == state_id:\n return state\n return states[0]",
"def get_one_state(state_id):\n state = storage.get('State', state_id)\n if state is None:\n abort(404)\n if request.method == 'DELETE':\n storage.delete(state)\n storage.save()\n return jsonify({}), 200\n elif request.method == 'PUT':\n try:\n res_dict = request.get_json()\n res_dict['id'] = state.id\n res_dict['created_at'] = state.created_at\n state.__init__(**res_dict)\n state.save()\n return jsonify(state.to_dict()), 200\n except:\n abort(400, description='Not a JSON')\n return jsonify(state.to_dict())",
"def get_network_by_id(self, id):\n return self.network.get_network(id)",
"def get_state(state_id):\n state = storage.get(State, state_id)\n if state is None:\n abort(404)\n return jsonify(state.to_dict())",
"def get_state(state_id):\n state = storage.get(\"State\", state_id)\n if state:\n return jsonify(state.to_dict())\n abort(404)",
"async def get_state(\n self,\n entity_id: str = None,\n attribute: str = None,\n default: Any = None,\n copy: bool = True,\n **kwargs: Optional[Any],\n ) -> Any:\n namespace = self._get_namespace(**kwargs)\n\n return await self.get_entity_api(namespace, entity_id).get_state(attribute, default, copy, **kwargs)",
"def state_id(state_id):\n state = storage.get(State, state_id)\n if state is None:\n abort(404)\n else:\n return jsonify(state.to_dict())",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n force: Optional[pulumi.Input[bool]] = None,\n instance_id: Optional[pulumi.Input[str]] = None,\n state: Optional[pulumi.Input[str]] = None) -> 'InstanceState':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _InstanceStateState.__new__(_InstanceStateState)\n\n __props__.__dict__[\"force\"] = force\n __props__.__dict__[\"instance_id\"] = instance_id\n __props__.__dict__[\"state\"] = state\n return InstanceState(resource_name, opts=opts, __props__=__props__)",
"def a_states_id(state_id):\n i = storage.get(\"State\", state_id)\n if i:\n return jsonify(i.to_dict())\n else:\n return (jsonify({\"error\": \"Not found\"}), 404)",
"def from_esi_name(cls, esi_state_name: str) -> \"StructureService.State\":\n STATES_ESI_MAP = {\"offline\": cls.OFFLINE, \"online\": cls.ONLINE}\n return (\n STATES_ESI_MAP[esi_state_name]\n if esi_state_name in STATES_ESI_MAP\n else cls.OFFLINE\n )"
] | [
"0.5696972",
"0.5616301",
"0.559337",
"0.55891603",
"0.55436486",
"0.5497109",
"0.54890585",
"0.5482085",
"0.5455767",
"0.5444336",
"0.54149556",
"0.53990793",
"0.53760177",
"0.53489095",
"0.534057",
"0.5307886",
"0.5290895",
"0.529065",
"0.5247792",
"0.5244457",
"0.5226916",
"0.5215637",
"0.520895",
"0.5200015",
"0.516398",
"0.50524247",
"0.5008109",
"0.4965296",
"0.4962385",
"0.4961434"
] | 0.6454363 | 0 |
The list of resource IDs for the other Microsoft.NetworkCloud resources that have attached this network. | def associated_resource_ids(self) -> pulumi.Output[Sequence[str]]:
return pulumi.get(self, "associated_resource_ids") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def network_ids(self):\n return self._network_ids",
"def otherResources(self):\n return self._get_list_field(\"otherResources\")",
"def resource_names(self):\n return self._resource_names",
"def get_ids(self):\n all_networks = []\n network_dict = {}\n for network, status in self.networks.items():\n if status[\"onboarded\"]:\n all_networks.append(\"{}\".format(network))\n network_dict[status[\"name\"]] = network\n\n self.network_ids = all_networks\n return network_dict",
"def network_fabric_ids(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"network_fabric_ids\")",
"def resources(self):\n return self._resources",
"def resources(self):\n return self._resources",
"def resources(self):\n return self._resources",
"def GetResourceNames(self):\r\n return [x.name for x in self.resources]",
"def getResources(self):\n\t\treturn deepcopy(self.server.resources)",
"def resources(self):\n res = []\n for resource in self._resources:\n res = res + resource.resources()\n\n return res",
"def resources(self):\n res = []\n for resource in self._resources:\n res = res + resource.resources()\n\n return res",
"def network_interface_ids(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"network_interface_ids\")",
"def parent_resources(cls):\n parent = cls.parent_resource\n parents = [parent]\n\n try:\n while True:\n parent = parent.parent_resource\n parents.append(parent)\n except AttributeError:\n pass\n\n parents.reverse()\n return parents",
"def link_ids(self):\n return self._link_ids",
"def get_resources(self):\n return []",
"def resources(self) -> \"Resources\":\n return self._resources",
"def tenant_internet_gateway_ids(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"tenant_internet_gateway_ids\")",
"def get_parent_resource_nodes(self):\n raise errors.Unimplemented()",
"def pool_ids(self) -> Sequence[str]:\n return pulumi.get(self, \"pool_ids\")",
"def ids(self):\n return self._ids",
"def list_networks():\n return __sets.keys()",
"def virtual_machines_associated_ids(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"virtual_machines_associated_ids\")",
"def virtual_machines_associated_ids(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"virtual_machines_associated_ids\")",
"def resource_names(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"resource_names\")",
"def resource_names(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"resource_names\")",
"def resources(self) -> [ListResourcesResponse]:\n resources = []\n\n layers = self.layers\n for layer_name in layers:\n layer_arns = self.layer_version_arns(layer_name)\n for arn in layer_arns:\n list_resources_response = ListResourcesResponse(\n service=self.service, account_id=self.current_account_id, arn=arn, region=self.region,\n resource_type=self.resource_type, name=layer_name)\n resources.append(list_resources_response)\n return resources",
"def cloud_ids(self):\n if self.stage == 'trainval':\n ids = self.all_cloud_ids['train'] + self.all_cloud_ids['val']\n else:\n ids = self.all_cloud_ids[self.stage]\n return sorted(list(set(ids)))",
"def parent_ids(self):\n return self._parent_ids",
"def remote_get_ids(self):\n return self.smultiengine.get_ids()"
] | [
"0.69101155",
"0.6573186",
"0.6401023",
"0.6343839",
"0.6258195",
"0.5978847",
"0.5978847",
"0.5978847",
"0.5819962",
"0.5801385",
"0.5795712",
"0.5795712",
"0.57860464",
"0.57594633",
"0.5741592",
"0.5688209",
"0.5666085",
"0.5643691",
"0.56345814",
"0.5630704",
"0.5611117",
"0.5605345",
"0.55991346",
"0.55991346",
"0.55941325",
"0.55941325",
"0.55689716",
"0.5566267",
"0.5565457",
"0.55476016"
] | 0.70806557 | 1 |
The extended location of the cluster associated with the resource. | def extended_location(self) -> pulumi.Output['outputs.ExtendedLocationResponse']:
return pulumi.get(self, "extended_location") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def extended_location(self) -> pulumi.Output[Optional['outputs.ExtendedLocationResponse']]:\n return pulumi.get(self, \"extended_location\")",
"def extended_location(self) -> pulumi.Input['ExtendedLocationArgs']:\n return pulumi.get(self, \"extended_location\")",
"def extended_location(self) -> pulumi.Input['ExtendedLocationArgs']:\n return pulumi.get(self, \"extended_location\")",
"def set_up_extended_location(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n edge_zone = self.context.get_edge_zone()\n if edge_zone:\n mc.extended_location = self.models.ExtendedLocation(\n name=edge_zone,\n type=self.models.ExtendedLocationTypes.EDGE_ZONE\n )\n return mc",
"def extended_location(self) -> Optional[pulumi.Input['ExtendedLocationArgs']]:\n return pulumi.get(self, \"extended_location\")",
"def extended_location(self) -> Optional['outputs.ExtendedLocationResponse']:\n return pulumi.get(self, \"extended_location\")",
"def extended_location(self) -> Optional['outputs.ExtendedLocationResponse']:\n return pulumi.get(self, \"extended_location\")",
"def location(self) -> str:\n return self.metadata.location",
"def resource_type(self):\n return 'cluster'",
"def cluster(self):\n return self._cluster",
"def cluster(self):\n return self._cluster",
"def location(self):\n self.manager.refresh_client()\n return self.content[\"location\"]",
"def _course_location(self):\r\n return \"location:{org}+{number}+{run}+course+{run}\".format(**self._course_dict)",
"def location(self) -> str:\n return pulumi.get(self, \"location\")",
"def location(self) -> str:\n return pulumi.get(self, \"location\")",
"def location(self) -> str:\n return pulumi.get(self, \"location\")",
"def location(self) -> str:\n return pulumi.get(self, \"location\")",
"def location(self) -> str:\n return pulumi.get(self, \"location\")",
"def location(self) -> str:\n return pulumi.get(self, \"location\")",
"def location(self) -> str:\n return pulumi.get(self, \"location\")",
"def location(self) -> str:\n return pulumi.get(self, \"location\")",
"def location(self) -> str:\n return pulumi.get(self, \"location\")",
"def location(self) -> str:\n return pulumi.get(self, \"location\")",
"def location(self) -> str:\n return pulumi.get(self, \"location\")",
"def location(self) -> str:\n return pulumi.get(self, \"location\")",
"def location(self) -> str:\n return pulumi.get(self, \"location\")",
"def location(self) -> str:\n return self.__location",
"def location(self) -> str:\n return self.__location",
"def location(self) -> str:\n return self.__location",
"def cluster_name(self):\n return self.name"
] | [
"0.6552111",
"0.6507004",
"0.6507004",
"0.6427688",
"0.64267975",
"0.64260936",
"0.64260936",
"0.63151413",
"0.6296761",
"0.6123441",
"0.6123441",
"0.595561",
"0.58933264",
"0.5883953",
"0.5883953",
"0.5883953",
"0.5883953",
"0.5883953",
"0.5883953",
"0.5883953",
"0.5883953",
"0.5883953",
"0.5883953",
"0.5883953",
"0.5883953",
"0.5883953",
"0.5868833",
"0.5868833",
"0.5868833",
"0.5862934"
] | 0.67308724 | 1 |
The default interface name for this L3 network in the virtual machine. This name can be overridden by the name supplied in the network attachment configuration of that virtual machine. | def interface_name(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "interface_name") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def interface_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"interface_name\")",
"def l3_network_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"l3_network_name\")",
"def interface_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"interface_name\")",
"def network_interface_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"network_interface_id\")",
"def get_interface_name():\n interface_name = ''\n interfaces = psutil.net_if_addrs()\n for name, details in interfaces.items():\n for detail in details:\n if detail.family == socket.AF_INET:\n ip_address = ipaddress.ip_address(detail.address)\n if not (ip_address.is_link_local or ip_address.is_loopback):\n interface_name = name\n break\n return interface_name",
"def _get_interface_name(self):\n return self.__interface_name",
"def getDefaultLayerName(self):\n\t\treturn self._fileSystem.getDefaultLayerName()",
"def _get_ifname(self, intf_type, interface):\n if intf_type == 'port':\n ifname = 'Ethernet' + str(interface)\n elif intf_type == 'portchannel':\n ifname = 'po' + str(interface)\n else:\n raise Exception(\"Unknown interface type: \" + intf_type)\n\n return ifname",
"def get_logical_ifname(self, interface_name, proto='provision'): # pragma: no cover\n output = check_output(['uci', 'show', 'network'])\n network_list = output.strip().split('\\n')\n for config in network_list:\n cfg, option = config.split('=')\n net_prex = cfg.split(\".\")\n if net_prex[-1] == \"proto\" and str(option) != proto:\n ifname = '.'.join(net_prex[:-1]) + '.ifname'\n interface = check_output(['uci', 'get', ifname]).split('\\n')[0]\n if interface == interface_name:\n return net_prex[1]\n return ''",
"def get_default_config(self):\n if not self.iface_type:\n return None\n\n defaults = {}\n defaults['description'] = self.interface_name + ' Interface'\n defaults['admin'] = 'up'\n if self.is_ethernet:\n defaults['speed'] = 'auto'\n defaults['duplex'] = 'auto'\n defaults['type'] = 'bridged'\n elif self.iface_type == 'Bridge-Aggregation':\n defaults['type'] = 'bridged'\n else:\n defaults['type'] = 'routed'\n\n return defaults",
"def get_interface_name(self, network, port=None):\n if not port:\n device_id = self.get_device_id(network)\n port = self.plugin.get_dhcp_port(network.id, device_id)\n return self.driver.get_device_name(port)",
"def getDefaultName(self): # real signature unknown; restored from __doc__\n pass",
"def network_interface(self): \n return self._network_interface",
"def default_ip(ifname):\n ipr = IPRoute()\n index = ipr.link_lookup(ifname=ifname)[0]\n addr = ipr.get_addr(index=index)[0]\n interface = ipaddress.ip_interface('{}/{}'.format(addr.get_attr('IFA_ADDRESS'), addr['prefixlen']))\n addr = interface.ip + 1\n if addr in interface.network:\n return str(addr)\n raise TypeError(f'Unable to calculate default node ip in {ifname} ({interface})')",
"def managed_network_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"managed_network_name\")",
"def moc_vnet_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"moc_vnet_name\")",
"def GetInterface(self):\n ifname = self.interface_watcher.get_last_ifname()\n if ifname is None:\n ifname = ''\n logger.debug('Replying \"' + ifname + '\" to D-Bus request GetInterface')\n return ifname",
"def get_default_iface_name_linux():\n route = \"/proc/net/route\"\n with open(route) as f:\n for line in f.readlines():\n try:\n iface, dest, _, flags, _, _, _, _, _, _, _, = line.strip().split()\n if dest != '00000000' or not int(flags, 16) & 2:\n continue\n return iface\n except:\n continue",
"def get_default_ip():\r\n if CONFIG.BIND_INTERFACE is None:\r\n default_gw = netifaces.gateways()['default']\r\n if netifaces.AF_INET in default_gw:\r\n preferred_interface = default_gw[netifaces.AF_INET][1]\r\n else:\r\n interfaces = netifaces.interfaces()\r\n preferred_interface = next((i for i in interfaces if i != 'lo'), interfaces[0])\r\n else:\r\n preferred_interface = CONFIG.BIND_INTERFACE\r\n return netifaces.ifaddresses(preferred_interface)[netifaces.AF_INET][0]['addr']",
"def cloud_services_network_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cloud_services_network_name\")",
"def get_network_name(self): # type: () -> str\n networks = self.get_network_names()\n\n if not networks:\n raise ApplicationError('No network found for Docker container: %s.' % self.id)\n\n if len(networks) > 1:\n raise ApplicationError('Found multiple networks for Docker container %s instead of only one: %s' % (self.id, ', '.join(networks)))\n\n return networks[0]",
"def default_name(self):\n return '[' + self.__class__.__name__ + ']'",
"def computer_network_name(self) -> str:\n return self._computer_network_name",
"def network(self) -> str:\n return pulumi.get(self, \"network\")",
"def interviewer_name_default(self, interviewer_name_default):\n\n self._interviewer_name_default = interviewer_name_default",
"def default_name(self):\n name = f\"Player {self.UID.split('-')[0]}\"\n return name",
"def vnet_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"vnet_name\")",
"def get_network_default_gateway(self):\n return self.mycam.devicemgmt.GetNetworkDefaultGateway()",
"def get_name(self):\n \n return 'Socket/IP'",
"def network_watcher_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"network_watcher_name\")"
] | [
"0.6839024",
"0.67300063",
"0.6576439",
"0.6536506",
"0.63453054",
"0.62927467",
"0.6252902",
"0.6176393",
"0.6155957",
"0.6108241",
"0.61039215",
"0.60497546",
"0.5955052",
"0.5913382",
"0.5868023",
"0.5867791",
"0.586413",
"0.58365524",
"0.583649",
"0.58314383",
"0.58273387",
"0.58186436",
"0.58046484",
"0.5784699",
"0.5776711",
"0.5720125",
"0.5647284",
"0.5640018",
"0.5637313",
"0.56305647"
] | 0.67675763 | 1 |
The type of the IP address allocation, defaulted to "DualStack". | def ip_allocation_type(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "ip_allocation_type") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_address_type(self):\n return self.__address_type",
"def get_ip_type1(self) -> str:\n hex_ip = hexlify(self.message)[152:160]\n ip_addr = int(hex_ip[6:8] + hex_ip[4:6] + hex_ip[2:4] + hex_ip[0:2], 16)\n return inet_ntoa(pack(\"<L\", ip_addr))",
"def address_type(self) -> str:\n return pulumi.get(self, \"address_type\")",
"def get_ip_type2(self) -> str:\n hex_ip = hexlify(self.message)[154:162]\n ip_addr = int(hex_ip[0:2] + hex_ip[2:4] + hex_ip[4:6] + hex_ip[6:8], 16)\n return inet_ntoa(pack(\">L\", ip_addr))",
"def ip_type(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"ip_type\")",
"def ip_allocation_type(self) -> Optional[pulumi.Input[Union[str, 'IpAllocationType']]]:\n return pulumi.get(self, \"ip_allocation_type\")",
"def ip_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"ip_type\")",
"def address_type(self):\n return addresser.AddressSpace.PROPOSALS",
"def type(self):\n return BipType.get_at(self.ea)",
"def get_network_type(self):\n net_type = self._data['type']\n if net_type == 'Shared':\n return 'guest'\n elif net_type == 'Isolated':\n return 'isolated'",
"def allocate_subnet(self):\n if len(self.subnet_list) == 0:\n subnet = '192.168.1.0/24'\n self.subnet_list.append(subnet)\n return subnet\n else:\n subnet = self.subnet_list[::-1][0]\n ip = ipaddress.IPv4Network(subnet)[0]\n s = ipaddress.IPv4Address(ip) + 256\n return '{}{}'.format(s, '/24')",
"def test_external_ip_get_kind(self):\n assert_equal(self.test_external_ip.get_kind(), 'mpexternalip')",
"def AddrType(self) -> AddrTypes:\n return self.m_addr_type",
"def get_network_type(self):\n\t\treturn call_sdk_function('PrlVirtNet_GetNetworkType', self.handle)",
"def SocketType(self) -> SocketType:",
"def ip_protocol(self) -> str:\n protocol = f\"ipv{self.ip_address.version}\"\n\n log.debug(\"Host %s: IP protocol for paramiko is %s.\", self.host)\n return protocol",
"def get_type(self):\n types = dict(ADDRESS_TYPE_CHOICES)\n return types.get(self.address_type, \"N/A\")",
"def _address_type(self, address):\n parsed_type = None\n parsed = urlparse.urlparse(address)\n if parsed.scheme not in ('http', 'https', 'ipc', 'tcp'):\n raise ValueError('Invalid volttron central address.')\n\n return parsed.scheme",
"def outside_ip_address_type(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"outside_ip_address_type\")",
"def get_ip_version(network):\r\n if netaddr.IPNetwork(network).version == 6:\r\n return \"IPv6\"\r\n elif netaddr.IPNetwork(network).version == 4:\r\n return \"IPv4\"",
"def _get_network_type(self):\n return collections.namedtuple('hyper_dqn_network',\n ['hyp_q_value', 'q_values'])",
"def __ip_protocol(self, proto_num):\n if proto_num in self.protocols:\n return self.protocols[proto_num]\n return str(proto_num)",
"def ip_allocation_method(self) -> pulumi.Input[Union[str, 'VirtualMachineIPAllocationMethod']]:\n return pulumi.get(self, \"ip_allocation_method\")",
"def get_ip_version(network):\n if netaddr.IPNetwork(network).version == 6:\n return \"IPv6\"\n elif netaddr.IPNetwork(network).version == 4:\n return \"IPv4\"",
"def address(self):\n \n return self.__ip",
"def ip_protocol(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"ip_protocol\")",
"def outside_ip_address_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"outside_ip_address_type\")",
"def outside_ip_address_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"outside_ip_address_type\")",
"def ip_protocol(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"ip_protocol\")",
"def pkt_type(self):\n return uint16_packer.unpack(self[32:34])[0]"
] | [
"0.6558105",
"0.6343783",
"0.62579095",
"0.6204035",
"0.61733663",
"0.60764885",
"0.60489833",
"0.6027968",
"0.590761",
"0.5870783",
"0.57298845",
"0.5710156",
"0.569875",
"0.5694414",
"0.5689083",
"0.5678903",
"0.5581792",
"0.5554217",
"0.5541468",
"0.55387825",
"0.5525745",
"0.55044353",
"0.55011237",
"0.5486684",
"0.5449133",
"0.5444622",
"0.54439265",
"0.54439265",
"0.5438365",
"0.5434098"
] | 0.66921926 | 0 |
The resource ID of the Network Fabric l3IsolationDomain. | def l3_isolation_domain_id(self) -> pulumi.Output[str]:
return pulumi.get(self, "l3_isolation_domain_id") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def l3_isolation_domain_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"l3_isolation_domain_id\")",
"def resource_id(self) -> str:\n return pulumi.get(self, \"resource_id\")",
"def resource_id(self) -> str:\n return pulumi.get(self, \"resource_id\")",
"def resource_id(self) -> str:\n return pulumi.get(self, \"resource_id\")",
"def external_network_id(self) -> str:\n return pulumi.get(self, \"external_network_id\")",
"def id(self):\n return self._domain.id",
"def domain_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"domain_id\")",
"def resource_group_id(self) -> str:\n return pulumi.get(self, \"resource_group_id\")",
"def resource_group_id(self) -> str:\n return pulumi.get(self, \"resource_group_id\")",
"def resource_group_id(self) -> str:\n return pulumi.get(self, \"resource_group_id\")",
"def resource_group_id(self) -> str:\n return pulumi.get(self, \"resource_group_id\")",
"def resource_group_id(self) -> str:\n return pulumi.get(self, \"resource_group_id\")",
"def dns_zone_resource_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"dns_zone_resource_id\")",
"def domain_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"domain_id\")",
"def domain_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"domain_id\")",
"def resource_group_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_group_id\")",
"def resource_group_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_group_id\")",
"def resource_group_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_group_id\")",
"def resource_pool_id(self) -> str:\n return pulumi.get(self, \"resource_pool_id\")",
"def failover_group_id(self) -> str:\n return pulumi.get(self, \"failover_group_id\")",
"def resource_id(self) -> Optional[str]:\n return pulumi.get(self, \"resource_id\")",
"def custom_compliance_domain_id(self):\n return self._custom_compliance_domain_id",
"def unique_id(self):\n return self._light.address",
"def get_keystone_v3_domain_id(self, domain_name):\n LOG_OBJ.debug(\"Get the domain ID.\")\n\n _url = \"http://\" + self.host_ip + \":35357/v3/domains?name=\" + \\\n str(domain_name)\n _headers = {'x-auth-token': self.cloud_admin_info[\"token_domain\"],\n 'content-type': 'application/json'}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n\n if response is None:\n LOG_OBJ.error(\"No response from Server while getting the \"\n \"ID of domain\")\n print (\"No response from Server while getting the \"\n \"ID of domain\")\n return response\n\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Get domain ID Failed with status %s and error \"\n \": %s\" % (response.status, response.data))\n print (\"Get domain ID Failed with status %s and error : %s\" %\n (response.status, response.data))\n return response.status\n\n output = json.loads(response.data)\n LOG_OBJ.info(\"Domain details : %s \" % output)\n if len(output['domains']) != 1:\n LOG_OBJ.debug(\"No. of domains with name %s is %s\"\n % (domain_name, len(output['domains'])))\n print(\"No. of domains with name %s is %s\"\n % (domain_name, len(output['domains'])))\n return\n\n return output['domains'][0]['id']",
"def resource_group_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"resource_group_id\")",
"def resource_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_id\")",
"def resource_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_id\")",
"def resource_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_id\")",
"def resource_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_id\")",
"def resource_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_id\")"
] | [
"0.8125011",
"0.639748",
"0.639748",
"0.639748",
"0.6284943",
"0.6265004",
"0.62123185",
"0.617114",
"0.617114",
"0.617114",
"0.617114",
"0.617114",
"0.6158674",
"0.60937095",
"0.60937095",
"0.6091811",
"0.6091811",
"0.6091811",
"0.60563904",
"0.60457486",
"0.5990473",
"0.5979375",
"0.59711367",
"0.5962884",
"0.5938235",
"0.59169245",
"0.59169245",
"0.59169245",
"0.59169245",
"0.59169245"
] | 0.82438326 | 0 |
join the input string | def my_join(iters, string):
out = ""
for i in range(iters):
out += "," + string
return out | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def join(self, iterable) -> String:\n pass",
"def my_join(iters, string):\n out = ''\n for i in range(iters):\n out += \", \" + string\n return out",
"def join_strings(words):\n joined_string = ''\n for word in words:\n joined_string += word\n\n return joined_string",
"def my_join(iters, string):\n out = ''\n for i in range(iters):\n out += string.join(\", \")\n return out",
"def my_join(iters, string):\n out=''\n for i in range(iters):\n out += string.join(\", \")\n #add string together with , as seperator\n #repeat iters numbers of times\n return out",
"def robust_join(s, sep=','):\n return sep.join([str(e) for e in s])",
"def join(sep, xs):\n return str(sep).join(xs)",
"def join(self, tokens):\n if self.chars:\n joiner = ''\n else:\n joiner = ' '\n return joiner.join(tokens)",
"def _urljoin(self, *args):\r\n\t\treturn \"/\".join(map(lambda x: str(x).rstrip('/'), args))",
"def join_string(part1, part2, concatenation_string = 'AND', seperator=' '):\n\n if part1 == '':\n return part2\n\n elif part2 == '':\n return part1\n\n\n if part1[-1] == seperator:\n sep1 = ''\n else:\n sep1 = seperator\n\n\n if part2[0] == seperator:\n sep2 = ''\n else:\n sep2 = ' '\n\n\n return part1 + sep1 + concatenation_string + sep2 + part2",
"def ujoin(*args):\n if len(args) == 0 or len(args[0]) == 0:\n return ''\n return (\n (args[0][0] == '/') * '/' # prepend slash if first arg starts with it\n + '/'.join(x[(x[0] == '/') : (len(x) - (x[-1] == '/'))] for x in args)\n + (args[-1][-1] == '/') * '/'\n ) # append slash if last arg ends with it",
"def word_join(self, words):\n return \" \".join(words)",
"def urljoin(*args):\n\n return \"/\".join(map(lambda x: str(x).rstrip('/'), args))",
"def list_join(the_list):\n return ' '.join(the_list)",
"def rejoin(textList):\n return ','.join(textList)",
"def sentence_join(self, sentences):\n return \" \".join(sentences)",
"def join_link(s, separator):\n if s == empty:\n return \"\"\n elif rest(s) == empty:\n return str(first(s))\n else:\n return str(first(s)) + separator + join_link(rest(s), separator)\n # so much like the TLS programming style.",
"def urljoin(*args):\n return '/'.join(str(a or '').strip('/') for a in args)",
"def join(self, delimiter: str) -> str:\n return delimiter.join((str(x) for x in self.array))",
"def list_string(join_list):\n joined_list = '[{}]'.format(join_list, join_list)\n return joined_list",
"def join(sep, seq):\n return _to_bytes_or_str_array(\n _vec_string(sep, object_, 'join', (seq,)), seq)",
"def join_list(items: Iterable[str]) -> str:\n\n return ITEM_SEPARATOR.join(items)",
"def join(self, *parts):\n if parts:\n parts = list(parts)\n if len(parts) > 1:\n for i, p in enumerate(parts[:-1]):\n parts[i] = p.strip('/')\n parts[-1] = parts[-1].lstrip('/')\n return '/'.join(parts)",
"def jointext(firststring, secondstring):\n\n # Return the joined strings\n return str(firststring) + str(secondstring)",
"def join_link(s, separator):\n if s == empty:\n return\"\"\n elif rest(s) == empty:\n return str(first(s))\n else:\n return str(first(s)) + separator + join_link(rest(s), separator)",
"def join_link(s, separator):\n if s == empty:\n return \"\"\n elif rest(s) == empty:\n return str(first(s))\n else:\n return str(first(s)) + separator + join_link(rest(s), separator)",
"def join_link(s, separator):\n if s == empty:\n return ''\n elif rest(s) == empty:\n return str(first(s))\n else:\n return str(first(s)) + separator + join_link(rest(s), separator)",
"def join_strings_with_comma(words):\n joined_string = ', '.join(words)\n return joined_string",
"def join(self, iterable):\n result = ANSIString(\"\")\n last_item = None\n for item in iterable:\n if last_item is not None:\n result += self._raw_string\n if not isinstance(item, ANSIString):\n item = ANSIString(item)\n result += item\n last_item = item\n return result",
"def join_strings(self):\n\n self.__corpora = [' ' + ' '.join(strings) + ' ' for strings in self.__corpora]"
] | [
"0.73991024",
"0.732438",
"0.7231096",
"0.72088933",
"0.7069224",
"0.6920054",
"0.68781954",
"0.67399055",
"0.66996497",
"0.6629545",
"0.6618605",
"0.65867525",
"0.658431",
"0.6571246",
"0.653331",
"0.65244675",
"0.6523184",
"0.6489013",
"0.6483073",
"0.6398085",
"0.6364346",
"0.63627094",
"0.6359249",
"0.63512945",
"0.6350764",
"0.63500947",
"0.63371634",
"0.6315935",
"0.62794495",
"0.6273673"
] | 0.73344773 | 1 |
Adds the keys 'logits' and 'probs' to the end points dictionary of ResNet50v2. | def _get_updated_endpoints(original_end_points, name):
end_points = dict(original_end_points)
end_points['logits'] = tf.squeeze(end_points[name], [1, 2])
end_points['probs'] = tf.nn.softmax(end_points['logits'])
return end_points | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def augment(self):\n n1 = { 'edges': [ self.next_insert['pred'], self.next_insert ], 'pred': self.next_insert['pred'] }\n n2 = { 'edges': [ n1, self.next_insert ], 'pred': n1 }\n self.next_insert['pred'] = n2\n self.next_insert = n2\n self.nodect += 2",
"def init_output_dict(self):\n return {\n \"outputs\": torch.FloatTensor(),\n \"pred_probs\": torch.FloatTensor(),\n \"labels\": torch.LongTensor(),\n }",
"def postprocess(self, prediction_dict):\r\n #三个通道的网络需要全连接层融合\r\n\r\n eyeFace_logits = prediction_dict['eyeFace_logits']\r\n eyeFace_logits = tf.nn.softmax(eyeFace_logits)\r\n logits = eyeFace_logits\r\n classes = tf.argmax(logits, 1)\r\n postprecessed_dict = {'classes': classes}\r\n return postprecessed_dict",
"def feed_dict(self):\n return {self.lr_tensor: self.lr()}",
"def resnet_endpoints(model):\n graph = tf.get_default_graph()\n scope = _get_resnet_scope()\n end_points = {}\n tensors = ['initial_conv', 'initial_max_pool', 'pre_final_pool',\n 'final_reduce_mean', 'final_dense']\n tensors += [\n 'block_layer{}'.format(i + 1) for i in range(len(model.block_sizes))]\n for name in tensors:\n tensor = graph.get_tensor_by_name('{}{}:0'.format(scope, name))\n if len(tensor.shape) == 4:\n tensor = _model_output(tensor, model.data_format)\n end_points[name] = tensor\n return end_points",
"def inception_resnet_v2(inputs,\n reuse=None,\n scope='InceptionResnetV2'):\n end_points = {}\n\n with tf.variable_scope(scope, 'InceptionResnetV2', [inputs], reuse=reuse):\n with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],\n stride=1, padding='SAME'):\n\n # 149 x 149 x 32\n net = slim.conv2d(inputs, 32, 3, stride=2, padding='VALID',\n scope='Conv2d_1a_3x3')\n end_points['Conv2d_1a_3x3'] = net\n # 147 x 147 x 32\n net = slim.conv2d(net, 32, 3, padding='VALID',\n scope='Conv2d_2a_3x3')\n end_points['Conv2d_2a_3x3'] = net\n # 147 x 147 x 64\n net = slim.conv2d(net, 64, 3, scope='Conv2d_2b_3x3')\n end_points['Conv2d_2b_3x3'] = net\n # 73 x 73 x 64\n net = slim.max_pool2d(net, 3, stride=2, padding='VALID',\n scope='MaxPool_3a_3x3')\n end_points['MaxPool_3a_3x3'] = net\n # 73 x 73 x 80\n net = slim.conv2d(net, 80, 1, padding='VALID',\n scope='Conv2d_3b_1x1')\n end_points['Conv2d_3b_1x1'] = net\n # 71 x 71 x 192\n net = slim.conv2d(net, 192, 3, padding='VALID',\n scope='Conv2d_4a_3x3')\n end_points['Conv2d_4a_3x3'] = net\n # 35 x 35 x 192\n net = slim.max_pool2d(net, 3, stride=2, padding='VALID',\n scope='MaxPool_5a_3x3')\n end_points['MaxPool_5a_3x3'] = net\n\n # 35 x 35 x 320\n with tf.variable_scope('Mixed_5b'):\n with tf.variable_scope('Branch_0'):\n tower_conv = slim.conv2d(net, 96, 1, scope='Conv2d_1x1')\n with tf.variable_scope('Branch_1'):\n tower_conv1_0 = slim.conv2d(net, 48, 1, scope='Conv2d_0a_1x1')\n tower_conv1_1 = slim.conv2d(tower_conv1_0, 64, 5,\n scope='Conv2d_0b_5x5')\n with tf.variable_scope('Branch_2'):\n tower_conv2_0 = slim.conv2d(net, 64, 1, scope='Conv2d_0a_1x1')\n tower_conv2_1 = slim.conv2d(tower_conv2_0, 96, 3,\n scope='Conv2d_0b_3x3')\n tower_conv2_2 = slim.conv2d(tower_conv2_1, 96, 3,\n scope='Conv2d_0c_3x3')\n with tf.variable_scope('Branch_3'):\n tower_pool = slim.avg_pool2d(net, 3, stride=1, padding='SAME',\n scope='AvgPool_0a_3x3')\n tower_pool_1 = slim.conv2d(tower_pool, 64, 1,\n scope='Conv2d_0b_1x1')\n net = tf.concat(axis=3, values=[tower_conv, tower_conv1_1,\n tower_conv2_2, tower_pool_1])\n\n end_points['Mixed_5b'] = net\n net = slim.repeat(net, 10, block35, scale=0.17)\n\n # 17 x 17 x 1024\n with tf.variable_scope('Mixed_6a'):\n with tf.variable_scope('Branch_0'):\n tower_conv = slim.conv2d(net, 384, 3, stride=2, padding='VALID',\n scope='Conv2d_1a_3x3')\n with tf.variable_scope('Branch_1'):\n tower_conv1_0 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')\n tower_conv1_1 = slim.conv2d(tower_conv1_0, 256, 3,\n scope='Conv2d_0b_3x3')\n tower_conv1_2 = slim.conv2d(tower_conv1_1, 384, 3,\n stride=2, padding='VALID',\n scope='Conv2d_1a_3x3')\n with tf.variable_scope('Branch_2'):\n tower_pool = slim.max_pool2d(net, 3, stride=2, padding='VALID',\n scope='MaxPool_1a_3x3')\n net = tf.concat(axis=3, values=[tower_conv, tower_conv1_2, tower_pool])\n\n end_points['Mixed_6a'] = net\n net = slim.repeat(net, 20, block17, scale=0.10)\n\n with tf.variable_scope('Mixed_7a'):\n with tf.variable_scope('Branch_0'):\n tower_conv = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')\n tower_conv_1 = slim.conv2d(tower_conv, 384, 3, stride=2,\n padding='VALID', scope='Conv2d_1a_3x3')\n with tf.variable_scope('Branch_1'):\n tower_conv1 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')\n tower_conv1_1 = slim.conv2d(tower_conv1, 288, 3, stride=2,\n padding='VALID', scope='Conv2d_1a_3x3')\n with tf.variable_scope('Branch_2'):\n tower_conv2 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')\n tower_conv2_1 = slim.conv2d(tower_conv2, 288, 3,\n scope='Conv2d_0b_3x3')\n tower_conv2_2 = slim.conv2d(tower_conv2_1, 320, 3, stride=2,\n padding='VALID', scope='Conv2d_1a_3x3')\n with tf.variable_scope('Branch_3'):\n tower_pool = slim.max_pool2d(net, 3, stride=2, padding='VALID',\n scope='MaxPool_1a_3x3')\n net = tf.concat(axis=3, values=[tower_conv_1, tower_conv1_1,\n tower_conv2_2, tower_pool])\n\n end_points['Mixed_7a'] = net\n\n net = slim.repeat(net, 9, block8, scale=0.20)\n net = block8(net, activation_fn=None)\n \n # GVH: Not sure if we want or need this convolution\n # 8 x 8 x 2080\n net = slim.conv2d(net, 1536, 1, scope='Conv2d_7b_1x1')\n end_points['Conv2d_7b_1x1'] = net\n \n # 8 x 8 x 1536\n return net, end_points",
"def resnet_v2_v1net_config():\n config = ConfigDict()\n config.image_size = (299, 299)\n config.resnet_depth = 50\n config.ckpt_dir = \"pretrained_nets/resnet_v2_%s\" % config.resnet_depth\n config.num_classes = 1001\n config.add_v1net = True\n config.timesteps = 7\n config.v1net_filters = 32\n config.v1net_kernel_size = 5\n return config",
"def generate_update_dict(self, input_data, input_labels=None, batch_step=0):\n update_dict = super(MlpModel, self).generate_update_dict(input_data, input_labels, batch_step)\n feed_dict = self.get_feed_dict(input_data, input_labels)\n sess = tf.compat.v1.get_default_session()\n train_on_adversarial = feed_dict[self.train_on_adversarial]\n if(train_on_adversarial):\n adv_feed_dict = feed_dict.copy()\n adv_feed_dict[self.use_adv_input] = True\n nadv_feed_dict = feed_dict.copy()\n nadv_feed_dict[self.use_adv_input] = False\n current_step = np.array(self.global_step.eval())\n logits_vals = sess.run(self.get_encodings(), feed_dict)\n logits_vals_max = np.array(logits_vals.max())\n logits_frac_act = np.array(np.count_nonzero(logits_vals) / float(logits_vals.size))\n stat_dict = {\"global_batch_index\":current_step,\n \"batch_step\":batch_step,\n \"number_of_batch_steps\":self.params.schedule[self.sched_idx][\"num_batches\"],\n \"schedule_index\":self.sched_idx,\n \"logits_max\":logits_vals_max,\n \"logits_frac_active\":logits_frac_act}\n if(train_on_adversarial):\n adv_accuracy = np.array(self.accuracy.eval(adv_feed_dict))\n nadv_accuracy = np.array(self.accuracy.eval(nadv_feed_dict))\n adv_loss = np.array(self.get_total_loss().eval(adv_feed_dict))\n nadv_loss = np.array(self.get_total_loss().eval(nadv_feed_dict))\n stat_dict[\"accuracy_adv\"] = adv_accuracy\n stat_dict[\"accuracy_nadv\"] = nadv_accuracy\n stat_dict[\"total_loss_adv\"] = adv_loss\n stat_dict[\"total_loss_nadv\"] = nadv_loss\n else:\n accuracy = np.array(self.accuracy.eval(feed_dict))\n total_loss = np.array(self.get_total_loss().eval(feed_dict))\n stat_dict[\"accuracy\"] = accuracy\n stat_dict[\"total_loss\"] = total_loss\n update_dict.update(stat_dict) #stat_dict overwrites\n eval_list = []\n grad_name_list = []\n learning_rate_list = []\n for w_idx, weight_grad_var in enumerate(self.grads_and_vars[self.sched_idx]):\n eval_list.append(weight_grad_var[0][0]) # [grad(0) or var(1)][value(0) or name(1)]\n grad_name = weight_grad_var[0][1].name.split('/')[1].split(':')[0] # 2nd is np.split\n grad_name_list.append(grad_name)\n learning_rate_list.append(self.learning_rates[self.sched_idx][w_idx])\n stat_dict = {}\n out_vals = tf.compat.v1.get_default_session().run(eval_list, feed_dict)\n out_lr = tf.compat.v1.get_default_session().run(learning_rate_list, feed_dict)\n for grad, name, lr in zip(out_vals, grad_name_list, out_lr):\n grad_max = np.array(grad.max())\n grad_min = np.array(grad.min())\n grad_mean = np.mean(np.array(grad))\n stat_dict[name+\"_grad_max_mean_min\"] = [grad_max, grad_mean, grad_min]\n stat_dict[name+\"_learning_rate\"] = lr\n update_dict.update(stat_dict) #stat_dict overwrites for same keys\n return update_dict",
"def loss_and_target(self, point_pred: Tensor, rel_roi_points: Tensor,\n sampling_results: List[SamplingResult],\n batch_gt_instances: InstanceList,\n cfg: ConfigType) -> dict:\n rois = bbox2roi([res.pos_bboxes for res in sampling_results])\n pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])\n\n point_target = self.get_targets(rois, rel_roi_points, sampling_results,\n batch_gt_instances, cfg)\n if self.class_agnostic:\n loss_point = self.loss_point(point_pred, point_target,\n torch.zeros_like(pos_labels))\n else:\n loss_point = self.loss_point(point_pred, point_target, pos_labels)\n\n return dict(loss_point=loss_point, point_target=point_target)",
"def _create_output_alternatives(self, predictions):\n return {self.head_name: (self._problem_type, predictions)}",
"def lightened_v2(inputs, is_training=True,\n dropout_keep_prob=0.8,\n reuse=None,\n scope='LightenedV1'):\n end_points = {}\n \n with tf.variable_scope(scope, 'LightenedV1', [inputs], reuse=reuse):\n with slim.arg_scope([slim.batch_norm, slim.dropout],\n is_training=is_training):\n with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],\n stride=1, padding='SAME'):\n \n endpoints = {}\n \n net=conv(inputs, 48, 9, stride=1, padding='VALID', scope='conv1_9x9')\n end_points['conv1_9x9'] = net\n net = slim.max_pool2d(net, 2, stride=2, padding='SAME', scope='pool1')\n end_points['pool1'] = net\n \n net=conv(net, 96, 5, stride=1, padding='VALID', scope='conv2_5x5')\n end_points['conv2_5x5'] = net\n net = slim.max_pool2d(net, 2, stride=2, padding='SAME', scope='pool2')\n end_points['pool2'] = net\n\n net=conv(net, 128, 5, stride=1, padding='VALID', scope='conv3_5x5')\n end_points['conv3_5x5'] = net\n net = slim.max_pool2d(net, 2, stride=2, padding='SAME', scope='pool3')\n end_points['pool3'] = net\n\n net=conv(net, 192, 4, stride=1, padding='VALID', scope='conv4_4x4')\n end_points['conv4_5x5'] = net\n net = slim.max_pool2d(net, 2, stride=2, padding='SAME', scope='pool4')\n end_points['pool4'] = net\n\n with tf.variable_scope('Logits'):\n net = slim.flatten(net)\n net = slim.dropout(net, dropout_keep_prob, is_training=is_training,\n scope='Dropout') \n endpoints['PreLogitsFlatten'] = net\n \n return net, end_points",
"def init_probability_dict(self):\n for x in xrange(0,10):\n self.class_probabilities[x] = self.init_probability_2d()",
"def _set_resnet_arg_scope(self):\n vs_initializer = tf.keras.initializers.VarianceScaling(2.0)\n l2_regularizer = tf.keras.regularizers.l2(self.config.GENERATOR_WEIGHT_DECAY)\n for layer in self.resnet50V2.layers:\n if isinstance(layer, layers.Conv2D):\n # original implementations slim `resnet_arg_scope` additionally sets\n # `normalizer_fn` and `normalizer_params` which in TF 2.0 need to be implemented\n # as own layers. This is not possible using keras ResNet50V2 application.\n # Nevertheless this is not needed as training seems to be likely stable.\n # See https://www.tensorflow.org/guide/migrate#a_note_on_slim_contriblayers for more\n # migration insights\n setattr(layer, 'padding', 'same')\n setattr(layer, 'kernel_initializer', vs_initializer)\n setattr(layer, 'kernel_regularizer', l2_regularizer)\n if isinstance(layer, layers.BatchNormalization):\n setattr(layer, 'momentum', 0.997)\n setattr(layer, 'epsilon', 1e-5)\n if isinstance(layer, layers.MaxPooling2D):\n setattr(layer, 'padding', 'same')",
"def eval_additional_scores(self, **kwargs):\n self.model.eval()\n self.likelihood.eval()\n\n X_train_torch = torch.from_numpy(kwargs[\"X_train\"]).to(self.device)\n y_train_torch = torch.from_numpy(kwargs[\"y_train\"]).to(self.device)\n mll = gpytorch.mlls.VariationalELBO(self.likelihood, self.model, num_data=y_train_torch.numel())\n\n with torch.no_grad(), gpytorch.settings.num_likelihood_samples(self.num_likelihood_samples):\n f_pred = self.model(X_train_torch)\n elbo = mll(f_pred, y_train_torch).item()\n\n return {\n \"elbo\": elbo\n }",
"def build_feed_dict(self, input_frames, gt_output_frames, generator):\n feed_dict = {}\n batch_size = np.shape(gt_output_frames)[0]\n\n ##\n # Get generated frames from GeneratorModel\n ##\n\n g_feed_dict = {generator.input_frames_train: input_frames,\n generator.gt_frames_train: gt_output_frames}\n g_scale_preds = self.sess.run(generator.scale_preds_train, feed_dict=g_feed_dict)\n\n ##\n # Create discriminator feed dict\n ##\n for scale_num in xrange(self.num_scale_nets):\n scale_net = self.scale_nets[scale_num]\n\n # resize gt_output_frames\n scaled_gt_output_frames = np.empty([batch_size, scale_net.height, scale_net.width, 3])\n for i, img in enumerate(gt_output_frames):\n\t\t# for skimage.transform.resize, images need to be in range [0, 1], so normalize to\n # [0, 1] before resize and back to [-1, 1] after\n sknorm_img = (img / 2) + 0.5\n\n\n # https://github.com/dyelax/Adversarial_Video_Generation/issues/18\n sknorm_img = np.minimum(sknorm_img, 1)\n sknorm_img = np.maximum(sknorm_img, 0)\n\n\n\n resized_frame = resize(sknorm_img, [scale_net.height, scale_net.width, 3])\n scaled_gt_output_frames[i] = (resized_frame - 0.5) * 2\n\n # combine with resized gt_output_frames to get inputs for prediction\n scaled_input_frames = np.concatenate([g_scale_preds[scale_num],\n scaled_gt_output_frames])\n\n # convert to np array and add to feed_dict\n feed_dict[scale_net.input_frames] = scaled_input_frames\n\n # add labels for each image to feed_dict\n batch_size = np.shape(input_frames)[0]\n feed_dict[self.labels] = np.concatenate([np.zeros([batch_size, 1]),\n np.ones([batch_size, 1])])\n\n return feed_dict",
"def build_resnet50(self):\n use_batch_norm = self.use_batch_norm\n\n imgs = tf.placeholder(tf.float32, [self.batch_size]+self.img_shape)\n is_train = tf.placeholder(tf.bool)\n\n conv1_feats = convolution(imgs, 7, 7, 64, 2, 2, 'conv1')\n conv1_feats = batch_norm(conv1_feats, 'bn_conv1', is_train, use_batch_norm)\n conv1_feats = nonlinear(conv1_feats, 'relu')\n pool1_feats = max_pool(conv1_feats, 3, 3, 2, 2, 'pool1')\n\n res2a_feats = self.basic_block(pool1_feats, 'res2a', 'bn2a', is_train, use_batch_norm, 64, 1)\n res2b_feats = self.basic_block2(res2a_feats, 'res2b', 'bn2b', is_train, use_batch_norm, 64)\n res2c_feats = self.basic_block2(res2b_feats, 'res2c', 'bn2c', is_train, use_batch_norm, 64)\n \n res3a_feats = self.basic_block(res2c_feats, 'res3a', 'bn3a', is_train, use_batch_norm, 128)\n res3b_feats = self.basic_block2(res3a_feats, 'res3b', 'bn3b', is_train, use_batch_norm, 128)\n res3c_feats = self.basic_block2(res3b_feats, 'res3c', 'bn3c', is_train, use_batch_norm, 128)\n res3d_feats = self.basic_block2(res3c_feats, 'res3d', 'bn3d', is_train, use_batch_norm, 128)\n\n res4a_feats = self.basic_block(res3d_feats, 'res4a', 'bn4a', is_train, use_batch_norm, 256)\n res4b_feats = self.basic_block2(res4a_feats, 'res4b', 'bn4b', is_train, use_batch_norm, 256)\n res4c_feats = self.basic_block2(res4b_feats, 'res4c', 'bn4c', is_train, use_batch_norm, 256)\n res4d_feats = self.basic_block2(res4c_feats, 'res4d', 'bn4d', is_train, use_batch_norm, 256)\n res4e_feats = self.basic_block2(res4d_feats, 'res4e', 'bn4e', is_train, use_batch_norm, 256)\n res4f_feats = self.basic_block2(res4e_feats, 'res4f', 'bn4f', is_train, use_batch_norm, 256)\n\n res5a_feats = self.basic_block(res4f_feats, 'res5a', 'bn5a', is_train, use_batch_norm, 512)\n res5b_feats = self.basic_block2(res5a_feats, 'res5b', 'bn5b', is_train, use_batch_norm, 512)\n res5c_feats = self.basic_block2(res5b_feats, 'res5c', 'bn5c', is_train, use_batch_norm, 512)\n\n res5c_feats_flat = tf.reshape(res5c_feats, [self.batch_size, 49, 2048])\n self.conv_feats = res5c_feats_flat\n self.conv_feat_shape = [49, 2048]\n self.num_ctx = 49 \n self.dim_ctx = 2048\n\n self.imgs = imgs\n self.is_train = is_train",
"def fprop(self, x):\n\n if x is self.x:\n return self.end_points\n\n else:\n with slim.arg_scope(arg_scopes_map['resnet_v2_50']()):\n net, end_points = networks_map['resnet_v2_50'](\n x, num_classes=self.num_classes,\n is_training=False, reuse=tf.AUTO_REUSE)\n\n return _get_updated_endpoints(end_points, 'resnet_v2_50/logits')",
"def rl_modelrl_ae_l2_base():\n hparams = rl_modelrl_ae_base()\n hparams.generative_model_params = \"basic_conv_l2\"\n return hparams",
"def __init__(self, x, num_classes=15, is_training=False):\n\n super(resnet_v2_50, self).__init__()\n\n self.x = x\n self.num_classes = num_classes\n\n # populating the tensorflow graph\n with slim.arg_scope(arg_scopes_map['resnet_v2_50']()):\n net, end_points = networks_map['resnet_v2_50'](\n x, num_classes=num_classes,\n is_training=is_training, reuse=None)\n\n self.end_points = _get_updated_endpoints(end_points, 'resnet_v2_50/logits')\n self.variables_to_restore = slim.get_variables_to_restore(exclude=[])",
"def on_predict_end(self, logs=None):",
"def on_predict_end(self, logs=None):",
"def rl_modelrl_l2_base():\n hparams = rl_modelrl_base()\n hparams.generative_model_params = \"basic_conv_l2\"\n return hparams",
"def add_metrics_to_db(self) -> None:\n\n model = {\n 'id': 'model1',\n 'name': 'Housing Price Prediction',\n 'metrics': {\n 'mean_squared_error': mean_squared_error(self._y_test, self._predictions),\n 'mean_absolute_error': mean_absolute_error(self._y_test, self._predictions),\n 'r2_score': r2_score(self._y_test, self._predictions)\n }\n }\n\n self._db.add_model(model)",
"def __init__(self, players, prob_end, game, deterministic_cache):\n super(ProbEndRoundRobinMatches, self).__init__(\n players, turns=float(\"inf\"), game=game,\n deterministic_cache=deterministic_cache)\n self.deterministic_cache.mutable = False\n self.prob_end = prob_end",
"def add_prediction_endpoint(self, endpoint_id, saved_model_id):\n self.settings[\"endpoints\"].append({\n \"id\" : endpoint_id,\n \"type\" : \"STD_PREDICTION\",\n \"modelRef\": saved_model_id\n })",
"def inception_resnet_v1(inputs,\r\n is_training=True,\r\n dropout_keep_prob=0.8,\r\n bottleneck_layer_size=128,\r\n reuse=None,\r\n scope='InceptionResnetV1'):\r\n end_points = {}\r\n\r\n with tf.variable_scope(scope, 'InceptionResnetV1', [inputs], reuse=reuse):\r\n with slim.arg_scope([slim.batch_norm, slim.dropout],\r\n is_training=is_training):\r\n with slim.arg_scope(\r\n [slim.conv2d, slim.max_pool2d, slim.avg_pool2d],\r\n stride=1,\r\n padding='SAME'):\r\n\r\n # stem\r\n net = stem(inputs)\r\n end_points['stem_out'] = net\r\n\r\n # 5 x Inception-resnet-A\r\n net = slim.repeat(\r\n net,\r\n 5,\r\n inception_resnet_a,\r\n scale=0.17,\r\n scope=\"inception_resnet_a\")\r\n end_points['inception_resnet_a_out'] = net\r\n\r\n # Reduction-A\r\n with tf.variable_scope('reduction_a'):\r\n net = reduction_a(net, 192, 192, 256, 384)\r\n end_points['reduction_a_out'] = net\r\n\r\n # 10 x Inception-Resnet-B\r\n net = slim.repeat(\r\n net,\r\n 10,\r\n inception_resnet_b,\r\n scale=0.10,\r\n scope=\"inception_resnet_b\")\r\n end_points['inception_resnet_b_out'] = net\r\n\r\n # Reduction-B\r\n with tf.variable_scope('reduction_b'):\r\n net = reduction_b(net)\r\n end_points['reduction_b_out'] = net\r\n\r\n # 5 x Inception-Resnet-C\r\n net = slim.repeat(\r\n net,\r\n 5,\r\n inception_resnet_c,\r\n scale=0.20,\r\n scope=\"inception_resnet_c\")\r\n end_points['inception_resnet_c_out'] = net\r\n\r\n # Average Pooling层,输出为8×8×1792\r\n net = slim.avg_pool2d(\r\n net,\r\n net.get_shape()[1:3],\r\n padding='VALID',\r\n scope='avgpool_8x8')\r\n\r\n # 扁平除了batch_size维度的其它维度。使输出变为:[batch_size, ...]\r\n net = slim.flatten(net)\r\n\r\n # dropout层\r\n net = slim.dropout(\r\n net, dropout_keep_prob, is_training=False, scope='Dropout')\r\n end_points['PreLogitsFlatten'] = net\r\n\r\n # 全链接层。输出为batch_size×128\r\n net = slim.fully_connected(\r\n net,\r\n bottleneck_layer_size,\r\n activation_fn=None,\r\n scope='logits',\r\n reuse=False)\r\n\r\n return net",
"def on_predict_end(self, logs: tp.Optional[tp.Dict[str, np.ndarray]] = None):\n pass",
"def loss_functions(self) -> dict:\n tmp_dict = dict()\n tmp_dict['From_Root'] = (('Cov', self.log_post_cov, self.constraint_cov),\n ('Beta', self.log_post_beta, self.constraint_sigma),\n ('Lambda', self.log_post_lambda, self.constraint_sigma),\n ('Psi', self.log_post_psi, self.constraint_psi),\n ('Theta', self.log_post_theta, self.constraint_theta),\n ('Tree', self.log_post_tree, self.constraint_tree))\n\n tmp_dict['Likelihood'] = (self.log_likelihood, ('Psi',\n 'Beta',\n 'Theta',\n 'Lambda'))\n return tmp_dict",
"def model_2_parameters(num_features, num_classes):\n parameters = {}\n parameters['num_features'] = num_features\n parameters['num_classes'] = num_classes\n \n return parameters",
"def update_predictions(self, context):\n x, y, o = context.get_predictions()\n self.x_eval += x\n self.y_eval += y\n self.o_eval += o\n self.write_predictions(o)"
] | [
"0.52572966",
"0.52147967",
"0.5190277",
"0.5172461",
"0.5160112",
"0.5107622",
"0.5044875",
"0.502481",
"0.49618483",
"0.49577186",
"0.49346328",
"0.4871828",
"0.47935718",
"0.47819144",
"0.47578776",
"0.4739234",
"0.47227845",
"0.46987733",
"0.46696952",
"0.46637616",
"0.46637616",
"0.46511227",
"0.46276534",
"0.4614478",
"0.46027955",
"0.4583889",
"0.45790952",
"0.45701605",
"0.4570038",
"0.45660526"
] | 0.56161755 | 0 |
Load weights from a checkpoint file into the tensorflow graph. | def load_weights(self, checkpoint_path, sess=None):
if sess is None:
sess = tf.get_default_session()
assert sess is not None
saver = tf.train.Saver(self.variables_to_restore)
saver.restore(sess, checkpoint_path) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_model_weights(sess, checkpoint_dir):\n\n init_fn = slim.assign_from_checkpoint_fn(\n checkpoint_dir, slim.get_model_variables(), ignore_missing_vars=True)\n init_fn(sess)",
"def load_weights_from_checkpoint(self, path: str, key: str):\n ckpt = torch.load(path, map_location='cpu')\n self.load_state_dict(ckpt[key])\n # self.to(self.device)",
"def load_weights_from_checkpoint(self, path: str, key: str):\n ckpt = torch.load(path, map_location='cpu')\n self.load_state_dict(ckpt[key])\n # self.to(self.device)",
"def load_weights(self, path=None):\n\n if path is None:\n path = self.checkpoints_dir\n\n self.model.load_weights(tf.train.latest_checkpoint(path))\n logging.info(f'\\tWeights loaded from {path}')",
"def load_weights(self, model_name: str, checkpoint: int, path: str = './models/'):\n path_to_model = path + model_name + '/checkpoint_' + str(checkpoint) + '/model_weights'\n self.model.load_weights(path_to_model)",
"def load_checkpoint(self, file):\n \"\"\"Load \"\"\"\n chkpnt = torch.load(file)\n self.load_state_dict(chkpnt['model_state_dict'])",
"def _load_checkpoint_to_net(config, network):\n if config.existed_ckpt:\n if config.existed_ckpt.endswith(\".npz\"):\n weights = np.load(config.existed_ckpt)\n else:\n weights = load_checkpoint(config.existed_ckpt)\n for param in network.trainable_params():\n weights_name = param.name\n if weights_name not in weights:\n raise ValueError(f\"Param {weights_name} is not found in ckpt file.\")\n\n if isinstance(weights[weights_name], Parameter):\n param.set_data(weights[weights_name].data)\n elif isinstance(weights[weights_name], Tensor):\n param.set_data(Tensor(weights[weights_name].asnumpy(), config.dtype))\n elif isinstance(weights[weights_name], np.ndarray):\n param.set_data(Tensor(weights[weights_name], config.dtype))\n else:\n param.set_data(weights[weights_name])\n else:\n for param in network.trainable_params():\n name = param.name\n value = param.data\n if isinstance(value, Tensor):\n if name.endswith(\".gamma\"):\n param.set_data(one_weight(value.asnumpy().shape))\n elif name.endswith(\".beta\") or name.endswith(\".bias\"):\n if param.data.dtype == \"Float32\":\n param.set_data((weight_variable(value.asnumpy().shape).astype(np.float32)))\n elif param.data.dtype == \"Float16\":\n param.set_data((weight_variable(value.asnumpy().shape).astype(np.float16)))\n else:\n if param.data.dtype == \"Float32\":\n param.set_data(Tensor(weight_variable(value.asnumpy().shape).astype(np.float32)))\n elif param.data.dtype == \"Float16\":\n param.set_data(Tensor(weight_variable(value.asnumpy().shape).astype(np.float16)))",
"def load_weights(self, filename):\n checkpoint = torch.load(filename)\n if not checkpoint['input_size'] == self.state_size:\n print(f\"Error when loading weights from checkpoint {filename}: input size {checkpoint['input_size']} doesn't match state size of agent {self.state_size}\")\n return None\n if not checkpoint['output_size'] == self.action_size:\n print(f\"Error when loading weights from checkpoint {filename}: output size {checkpoint['output_size']} doesn't match action space size of agent {self.action_size}\")\n return None\n my_actor_hidden_layers = [each.out_features for each in self.actor_local.hidden_layers if each._get_name()!='BatchNorm1d']\n if not checkpoint['actor_hidden_layers'] == my_actor_hidden_layers:\n print(f\"Error when loading weights from checkpoint {filename}: actor hidden layers {checkpoint['actor_hidden_layers']} don't match agent's actor hidden layers {my_actor_hidden_layers}\")\n return None\n my_critic_hidden_layers = [each.out_features for each in self.critic_local.hidden_layers if each._get_name()!='BatchNorm1d']\n if not checkpoint['critic_hidden_layers'] == my_critic_hidden_layers:\n print(f\"Error when loading weights from checkpoint {filename}: critic hidden layers {checkpoint['critic_hidden_layers']} don't match agent's critic hidden layers {my_critic_hidden_layers}\")\n return None\n self.actor_local.load_state_dict(checkpoint['actor_state_dict'])\n self.critic_local.load_state_dict(checkpoint['critic_state_dict'])",
"def load_weights(self, filepath):\n self.model.load_weights(filepath)",
"def load_weights(self, weight_file):\r\n self.model.load_weights(weight_file)",
"def try_and_init_from(self, path):\n log.info(\"Loading weights from foreign checkpoint {}\".format(path))\n if not os.path.exists(path):\n raise ValueError(\"Checkpoint {} does not exist\".format(path))\n\n chkpt = th.load(path, map_location=th.device(\"cpu\"))\n if \"model\" not in chkpt.keys() or chkpt[\"model\"] is None:\n raise ValueError(\"{} has no model saved\".format(path))\n\n mdl = chkpt[\"model\"]\n for n, p in self.model.named_parameters():\n if n in mdl:\n p2 = mdl[n]\n if p2.shape != p.shape:\n log.warning(\"Parameter {} ignored, checkpoint size does not match: {}, should be {}\".format(n, p2.shape, p.shape))\n continue\n log.debug(\"Parameter {} copied\".format(n))\n p.data.copy_(p2)\n else:\n log.warning(\"Parameter {} ignored, not found in source checkpoint.\".format(n))\n\n log.info(\"Weights loaded from foreign checkpoint {}\".format(path))",
"def load_checkpoint(checkpoint_path):\n flat_checkpoint_dict = flatten_checkpoint(\n parse_checkpoint(checkpoint_path), keep_empty_nodes=True)\n return flat_checkpoint_dict",
"def load_checkpoint(self, checkpoint: Dict[str, OrderedDict]):\n self.model.load_state_dict(checkpoint[\"model_state_dict\"])\n self.optimizer.load_state_dict(checkpoint[\"optimizer_state_dict\"])\n return self",
"def load_initial_weights(self, sess, weights_path, SKIP_LAYER):\r\n # Load the weights into memory\r\n weights_dict = np.load(weights_path, encoding='bytes').item()\r\n\r\n # list of all assignment operators\r\n # Loop over all layer names stored in the weights dict\r\n for op_name in weights_dict:\r\n\r\n # Check if layer should be trained from scratch\r\n if op_name not in SKIP_LAYER:\r\n\r\n with tf.variable_scope('model/source/' + op_name, reuse=True):\r\n\r\n # Assign weights/biases to their corresponding tf variable\r\n for data in weights_dict[op_name]:\r\n\r\n # Biases\r\n if len(data.shape) == 1:\r\n var = tf.get_variable('biases', trainable=False)\r\n # print(toMagenta(var.name))\r\n sess.run(var.assign(data))\r\n\r\n # Weights\r\n else:\r\n var = tf.get_variable('weights', trainable=False)\r\n # print(toMagenta(var.name))\r\n sess.run(var.assign(data))",
"def load_checkpoint(checkpoint, model, optimizer=None):\n if not os.path.exists(checkpoint):\n raise (\"File doesn't exist {}\".format(checkpoint))\n checkpoint = torch.load(checkpoint)\n model.load_state_dict(checkpoint['state_dict'])\n\n if optimizer:\n optimizer.load_state_dict(checkpoint['optim_dict'])\n\n return checkpoint",
"def load_checkpoint(self, checkpoint_path, continue_from_epoch=True):\n print(\"Loading checkpoint: {}\".format(checkpoint_path))\n state = torch.load(checkpoint_path)\n self.model.load_state_dict(state['state_dict'])\n self.optimizer.load_state_dict(state['optim_dict'])\n\n if continue_from_epoch:\n self.epoch = state['epoch']",
"def load_checkpoint(checkpoint, model, optimizer=None):\n if not os.path.exists(checkpoint):\n raise (\"File doesn't exist {}\".format(checkpoint))\n checkpoint = torch.load(checkpoint, map_location=torch.device('cpu'))\n model.load_state_dict(checkpoint['state_dict'])\n\n if optimizer:\n optimizer.load_state_dict(checkpoint['optim_dict'])\n\n return checkpoint",
"def load_weights(self, file):\n self.model.load_weights(file)\n return",
"def load_checkpoint(self, checkpoint_path=None):\n if checkpoint_path is None:\n checkpoint_path = self.get_latest_path()\n\n if os.path.isfile(checkpoint_path):\n key = 'cuda' if torch.cuda.is_available() else 'cpu'\n checkpoint = torch.load(checkpoint_path, map_location=key)\n self.network.load_state_dict(checkpoint['network'])\n self.network_target.load_state_dict(checkpoint['network_target'])\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n\n print('checkpoint loaded at {}'.format(checkpoint_path))\n else:\n raise OSError(\"Checkpoint file not found.\")",
"def load_weights(self):\n\n reader = pywrap_tensorflow.NewCheckpointReader(self._tf_model_prefix)\n var_to_shape_map = reader.get_variable_to_shape_map()\n data = dict()\n for name in var_to_shape_map:\n tensor = reader.get_tensor(name)\n data[name] = tensor\n\n print (\"Tensorflow checkpoint file [%s] loaded successfully. [%d] variables loaded.\"\n % (self._tf_model_prefix, len(data)))\n return data",
"def load_from_checkpoint(self, path):\n print(f'# loading trainer state from {path}')\n checkpoint = torch.load(path)\n self.load(checkpoint)",
"def load_checkpoint(checkpoint, model, optimizer=None):\n model_state_dict, optimizer_state_dict = torch.load(checkpoint)\n model.load_state_dict(model_state_dict)\n\n if optimizer is not None:\n optimizer.load_state_dict(optimizer_state_dict)",
"def load_checkpoint(tag, params, model):\r\n file_name = os.path.join(\"saved_models\", params.path, tag + \".pt\")\r\n logger.info(\"Load checkpoint from %s\" % file_name)\r\n if os.path.exists(file_name):\r\n checkpoint = torch.load(file_name, map_location='cpu')\r\n params.training_id = checkpoint['training_id']\r\n logger.info(checkpoint['training_id'])\r\n model.global_step = checkpoint['global_step']\r\n model.load_state_dict(checkpoint['model'])\r\n for i, optimizer in enumerate(model.optimizers):\r\n optimizer.load_state_dict(checkpoint['optimizers'][i])\r\n else:\r\n raise Exception(\"Checkpoint not found.\")",
"def load_checkpoint(path, model, optimizer=None, reset_optimizer=True):\n print(\"Load checkpoint from: {}\".format(path))\n state_dict, optimizer_state = _load(path)\n\n model.load_dict(state_dict)\n if not reset_optimizer and optimizer is not None:\n if optimizer_state is not None:\n print(\"[loading] Load optimizer state from {}\".format(path))\n optimizer.load(optimizer_state)\n\n return model",
"def load(loadname, checkpoint=None):\n ckpt_dir = \"./models/tf_ckpt_\" + loadname + \"/\"\n if checkpoint is not None:\n status = checkpoint.restore(tf.train.latest_checkpoint(ckpt_dir))\n status.assert_consumed()\n print(\"Loaded checkpoint\")\n else:\n print(\"Not Loading any checkpoint\")\n print(\"Starting training from initial configuration\")",
"def load_model_weights(self, filename):\n self.model.load_weights(filename)",
"def load_weights(model, fpath):\n state = torch.load(fpath)\n model.load_state_dict(state['state_dict'])",
"def load_tf_weights_in_bert(model, tf_checkpoint_path):\n try:\n import re\n import numpy as np\n import tensorflow as tf\n except ImportError:\n print(\"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see \"\n \"https://www.tensorflow.org/install/ for installation instructions.\")\n raise\n tf_path = os.path.abspath(tf_checkpoint_path)\n print(\"Converting TensorFlow checkpoint from {}\".format(tf_path))\n # Load weights from TF model\n init_vars = tf.train.list_variables(tf_path)\n names = []\n arrays = []\n for name, shape in init_vars:\n print(\"Loading TF weight {} with shape {}\".format(name, shape))\n array = tf.train.load_variable(tf_path, name)\n names.append(name)\n arrays.append(array)\n\n for name, array in zip(names, arrays):\n name = name.split('/')\n # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v\n # which are not required for using pretrained model\n if any(n in [\"adam_v\", \"adam_m\"] for n in name):\n print(\"Skipping {}\".format(\"/\".join(name)))\n continue\n pointer = model\n for m_name in name:\n if re.fullmatch(r'[A-Za-z]+_\\d+', m_name):\n l = re.split(r'_(\\d+)', m_name)\n else:\n l = [m_name]\n if l[0] == 'kernel' or l[0] == 'gamma':\n pointer = getattr(pointer, 'weight')\n elif l[0] == 'output_bias' or l[0] == 'beta':\n pointer = getattr(pointer, 'bias')\n elif l[0] == 'output_weights':\n pointer = getattr(pointer, 'weight')\n else:\n pointer = getattr(pointer, l[0])\n if len(l) >= 2:\n num = int(l[1])\n pointer = pointer[num]\n if m_name[-11:] == '_embeddings':\n pointer = getattr(pointer, 'weight')\n elif m_name == 'kernel':\n array = np.transpose(array)\n try:\n assert pointer.shape == array.shape\n except AssertionError as e:\n e.args += (pointer.shape, array.shape)\n raise\n print(\"Initialize PyTorch weight {}\".format(name))\n pointer.data = torch.from_numpy(array)\n return model",
"def load_checkpoint(self, checkpoint_path: str, *args, **kwargs) -> Dict:\n return torch.load(checkpoint_path, *args, **kwargs)",
"def load_checkpoint(self, checkpoint_path: str, *args, **kwargs) -> Dict:\n return torch.load(checkpoint_path, *args, **kwargs)"
] | [
"0.76077384",
"0.74392045",
"0.74392045",
"0.7414661",
"0.7232018",
"0.71317524",
"0.7071087",
"0.70339084",
"0.700918",
"0.6977498",
"0.6935631",
"0.691333",
"0.68927085",
"0.68749905",
"0.68339276",
"0.6816416",
"0.6816042",
"0.67910594",
"0.6769799",
"0.6769098",
"0.6727852",
"0.6713089",
"0.6706459",
"0.6683782",
"0.6658891",
"0.66465485",
"0.6617412",
"0.6616446",
"0.6595445",
"0.6595445"
] | 0.77680707 | 1 |
Load weights from a checkpoint file into the tensorflow graph. | def load_weights(self, checkpoint_path, sess=None):
if sess is None:
sess = tf.get_default_session()
assert sess is not None
saver = tf.train.Saver(self.variables_to_restore)
saver.restore(sess, checkpoint_path) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_model_weights(sess, checkpoint_dir):\n\n init_fn = slim.assign_from_checkpoint_fn(\n checkpoint_dir, slim.get_model_variables(), ignore_missing_vars=True)\n init_fn(sess)",
"def load_weights_from_checkpoint(self, path: str, key: str):\n ckpt = torch.load(path, map_location='cpu')\n self.load_state_dict(ckpt[key])\n # self.to(self.device)",
"def load_weights_from_checkpoint(self, path: str, key: str):\n ckpt = torch.load(path, map_location='cpu')\n self.load_state_dict(ckpt[key])\n # self.to(self.device)",
"def load_weights(self, path=None):\n\n if path is None:\n path = self.checkpoints_dir\n\n self.model.load_weights(tf.train.latest_checkpoint(path))\n logging.info(f'\\tWeights loaded from {path}')",
"def load_weights(self, model_name: str, checkpoint: int, path: str = './models/'):\n path_to_model = path + model_name + '/checkpoint_' + str(checkpoint) + '/model_weights'\n self.model.load_weights(path_to_model)",
"def load_checkpoint(self, file):\n \"\"\"Load \"\"\"\n chkpnt = torch.load(file)\n self.load_state_dict(chkpnt['model_state_dict'])",
"def _load_checkpoint_to_net(config, network):\n if config.existed_ckpt:\n if config.existed_ckpt.endswith(\".npz\"):\n weights = np.load(config.existed_ckpt)\n else:\n weights = load_checkpoint(config.existed_ckpt)\n for param in network.trainable_params():\n weights_name = param.name\n if weights_name not in weights:\n raise ValueError(f\"Param {weights_name} is not found in ckpt file.\")\n\n if isinstance(weights[weights_name], Parameter):\n param.set_data(weights[weights_name].data)\n elif isinstance(weights[weights_name], Tensor):\n param.set_data(Tensor(weights[weights_name].asnumpy(), config.dtype))\n elif isinstance(weights[weights_name], np.ndarray):\n param.set_data(Tensor(weights[weights_name], config.dtype))\n else:\n param.set_data(weights[weights_name])\n else:\n for param in network.trainable_params():\n name = param.name\n value = param.data\n if isinstance(value, Tensor):\n if name.endswith(\".gamma\"):\n param.set_data(one_weight(value.asnumpy().shape))\n elif name.endswith(\".beta\") or name.endswith(\".bias\"):\n if param.data.dtype == \"Float32\":\n param.set_data((weight_variable(value.asnumpy().shape).astype(np.float32)))\n elif param.data.dtype == \"Float16\":\n param.set_data((weight_variable(value.asnumpy().shape).astype(np.float16)))\n else:\n if param.data.dtype == \"Float32\":\n param.set_data(Tensor(weight_variable(value.asnumpy().shape).astype(np.float32)))\n elif param.data.dtype == \"Float16\":\n param.set_data(Tensor(weight_variable(value.asnumpy().shape).astype(np.float16)))",
"def load_weights(self, filename):\n checkpoint = torch.load(filename)\n if not checkpoint['input_size'] == self.state_size:\n print(f\"Error when loading weights from checkpoint {filename}: input size {checkpoint['input_size']} doesn't match state size of agent {self.state_size}\")\n return None\n if not checkpoint['output_size'] == self.action_size:\n print(f\"Error when loading weights from checkpoint {filename}: output size {checkpoint['output_size']} doesn't match action space size of agent {self.action_size}\")\n return None\n my_actor_hidden_layers = [each.out_features for each in self.actor_local.hidden_layers if each._get_name()!='BatchNorm1d']\n if not checkpoint['actor_hidden_layers'] == my_actor_hidden_layers:\n print(f\"Error when loading weights from checkpoint {filename}: actor hidden layers {checkpoint['actor_hidden_layers']} don't match agent's actor hidden layers {my_actor_hidden_layers}\")\n return None\n my_critic_hidden_layers = [each.out_features for each in self.critic_local.hidden_layers if each._get_name()!='BatchNorm1d']\n if not checkpoint['critic_hidden_layers'] == my_critic_hidden_layers:\n print(f\"Error when loading weights from checkpoint {filename}: critic hidden layers {checkpoint['critic_hidden_layers']} don't match agent's critic hidden layers {my_critic_hidden_layers}\")\n return None\n self.actor_local.load_state_dict(checkpoint['actor_state_dict'])\n self.critic_local.load_state_dict(checkpoint['critic_state_dict'])",
"def load_weights(self, filepath):\n self.model.load_weights(filepath)",
"def load_weights(self, weight_file):\r\n self.model.load_weights(weight_file)",
"def try_and_init_from(self, path):\n log.info(\"Loading weights from foreign checkpoint {}\".format(path))\n if not os.path.exists(path):\n raise ValueError(\"Checkpoint {} does not exist\".format(path))\n\n chkpt = th.load(path, map_location=th.device(\"cpu\"))\n if \"model\" not in chkpt.keys() or chkpt[\"model\"] is None:\n raise ValueError(\"{} has no model saved\".format(path))\n\n mdl = chkpt[\"model\"]\n for n, p in self.model.named_parameters():\n if n in mdl:\n p2 = mdl[n]\n if p2.shape != p.shape:\n log.warning(\"Parameter {} ignored, checkpoint size does not match: {}, should be {}\".format(n, p2.shape, p.shape))\n continue\n log.debug(\"Parameter {} copied\".format(n))\n p.data.copy_(p2)\n else:\n log.warning(\"Parameter {} ignored, not found in source checkpoint.\".format(n))\n\n log.info(\"Weights loaded from foreign checkpoint {}\".format(path))",
"def load_checkpoint(checkpoint_path):\n flat_checkpoint_dict = flatten_checkpoint(\n parse_checkpoint(checkpoint_path), keep_empty_nodes=True)\n return flat_checkpoint_dict",
"def load_checkpoint(self, checkpoint: Dict[str, OrderedDict]):\n self.model.load_state_dict(checkpoint[\"model_state_dict\"])\n self.optimizer.load_state_dict(checkpoint[\"optimizer_state_dict\"])\n return self",
"def load_initial_weights(self, sess, weights_path, SKIP_LAYER):\r\n # Load the weights into memory\r\n weights_dict = np.load(weights_path, encoding='bytes').item()\r\n\r\n # list of all assignment operators\r\n # Loop over all layer names stored in the weights dict\r\n for op_name in weights_dict:\r\n\r\n # Check if layer should be trained from scratch\r\n if op_name not in SKIP_LAYER:\r\n\r\n with tf.variable_scope('model/source/' + op_name, reuse=True):\r\n\r\n # Assign weights/biases to their corresponding tf variable\r\n for data in weights_dict[op_name]:\r\n\r\n # Biases\r\n if len(data.shape) == 1:\r\n var = tf.get_variable('biases', trainable=False)\r\n # print(toMagenta(var.name))\r\n sess.run(var.assign(data))\r\n\r\n # Weights\r\n else:\r\n var = tf.get_variable('weights', trainable=False)\r\n # print(toMagenta(var.name))\r\n sess.run(var.assign(data))",
"def load_checkpoint(checkpoint, model, optimizer=None):\n if not os.path.exists(checkpoint):\n raise (\"File doesn't exist {}\".format(checkpoint))\n checkpoint = torch.load(checkpoint)\n model.load_state_dict(checkpoint['state_dict'])\n\n if optimizer:\n optimizer.load_state_dict(checkpoint['optim_dict'])\n\n return checkpoint",
"def load_checkpoint(self, checkpoint_path, continue_from_epoch=True):\n print(\"Loading checkpoint: {}\".format(checkpoint_path))\n state = torch.load(checkpoint_path)\n self.model.load_state_dict(state['state_dict'])\n self.optimizer.load_state_dict(state['optim_dict'])\n\n if continue_from_epoch:\n self.epoch = state['epoch']",
"def load_checkpoint(checkpoint, model, optimizer=None):\n if not os.path.exists(checkpoint):\n raise (\"File doesn't exist {}\".format(checkpoint))\n checkpoint = torch.load(checkpoint, map_location=torch.device('cpu'))\n model.load_state_dict(checkpoint['state_dict'])\n\n if optimizer:\n optimizer.load_state_dict(checkpoint['optim_dict'])\n\n return checkpoint",
"def load_weights(self, file):\n self.model.load_weights(file)\n return",
"def load_checkpoint(self, checkpoint_path=None):\n if checkpoint_path is None:\n checkpoint_path = self.get_latest_path()\n\n if os.path.isfile(checkpoint_path):\n key = 'cuda' if torch.cuda.is_available() else 'cpu'\n checkpoint = torch.load(checkpoint_path, map_location=key)\n self.network.load_state_dict(checkpoint['network'])\n self.network_target.load_state_dict(checkpoint['network_target'])\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n\n print('checkpoint loaded at {}'.format(checkpoint_path))\n else:\n raise OSError(\"Checkpoint file not found.\")",
"def load_weights(self):\n\n reader = pywrap_tensorflow.NewCheckpointReader(self._tf_model_prefix)\n var_to_shape_map = reader.get_variable_to_shape_map()\n data = dict()\n for name in var_to_shape_map:\n tensor = reader.get_tensor(name)\n data[name] = tensor\n\n print (\"Tensorflow checkpoint file [%s] loaded successfully. [%d] variables loaded.\"\n % (self._tf_model_prefix, len(data)))\n return data",
"def load_from_checkpoint(self, path):\n print(f'# loading trainer state from {path}')\n checkpoint = torch.load(path)\n self.load(checkpoint)",
"def load_checkpoint(checkpoint, model, optimizer=None):\n model_state_dict, optimizer_state_dict = torch.load(checkpoint)\n model.load_state_dict(model_state_dict)\n\n if optimizer is not None:\n optimizer.load_state_dict(optimizer_state_dict)",
"def load_checkpoint(tag, params, model):\r\n file_name = os.path.join(\"saved_models\", params.path, tag + \".pt\")\r\n logger.info(\"Load checkpoint from %s\" % file_name)\r\n if os.path.exists(file_name):\r\n checkpoint = torch.load(file_name, map_location='cpu')\r\n params.training_id = checkpoint['training_id']\r\n logger.info(checkpoint['training_id'])\r\n model.global_step = checkpoint['global_step']\r\n model.load_state_dict(checkpoint['model'])\r\n for i, optimizer in enumerate(model.optimizers):\r\n optimizer.load_state_dict(checkpoint['optimizers'][i])\r\n else:\r\n raise Exception(\"Checkpoint not found.\")",
"def load_checkpoint(path, model, optimizer=None, reset_optimizer=True):\n print(\"Load checkpoint from: {}\".format(path))\n state_dict, optimizer_state = _load(path)\n\n model.load_dict(state_dict)\n if not reset_optimizer and optimizer is not None:\n if optimizer_state is not None:\n print(\"[loading] Load optimizer state from {}\".format(path))\n optimizer.load(optimizer_state)\n\n return model",
"def load(loadname, checkpoint=None):\n ckpt_dir = \"./models/tf_ckpt_\" + loadname + \"/\"\n if checkpoint is not None:\n status = checkpoint.restore(tf.train.latest_checkpoint(ckpt_dir))\n status.assert_consumed()\n print(\"Loaded checkpoint\")\n else:\n print(\"Not Loading any checkpoint\")\n print(\"Starting training from initial configuration\")",
"def load_model_weights(self, filename):\n self.model.load_weights(filename)",
"def load_weights(model, fpath):\n state = torch.load(fpath)\n model.load_state_dict(state['state_dict'])",
"def load_tf_weights_in_bert(model, tf_checkpoint_path):\n try:\n import re\n import numpy as np\n import tensorflow as tf\n except ImportError:\n print(\"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see \"\n \"https://www.tensorflow.org/install/ for installation instructions.\")\n raise\n tf_path = os.path.abspath(tf_checkpoint_path)\n print(\"Converting TensorFlow checkpoint from {}\".format(tf_path))\n # Load weights from TF model\n init_vars = tf.train.list_variables(tf_path)\n names = []\n arrays = []\n for name, shape in init_vars:\n print(\"Loading TF weight {} with shape {}\".format(name, shape))\n array = tf.train.load_variable(tf_path, name)\n names.append(name)\n arrays.append(array)\n\n for name, array in zip(names, arrays):\n name = name.split('/')\n # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v\n # which are not required for using pretrained model\n if any(n in [\"adam_v\", \"adam_m\"] for n in name):\n print(\"Skipping {}\".format(\"/\".join(name)))\n continue\n pointer = model\n for m_name in name:\n if re.fullmatch(r'[A-Za-z]+_\\d+', m_name):\n l = re.split(r'_(\\d+)', m_name)\n else:\n l = [m_name]\n if l[0] == 'kernel' or l[0] == 'gamma':\n pointer = getattr(pointer, 'weight')\n elif l[0] == 'output_bias' or l[0] == 'beta':\n pointer = getattr(pointer, 'bias')\n elif l[0] == 'output_weights':\n pointer = getattr(pointer, 'weight')\n else:\n pointer = getattr(pointer, l[0])\n if len(l) >= 2:\n num = int(l[1])\n pointer = pointer[num]\n if m_name[-11:] == '_embeddings':\n pointer = getattr(pointer, 'weight')\n elif m_name == 'kernel':\n array = np.transpose(array)\n try:\n assert pointer.shape == array.shape\n except AssertionError as e:\n e.args += (pointer.shape, array.shape)\n raise\n print(\"Initialize PyTorch weight {}\".format(name))\n pointer.data = torch.from_numpy(array)\n return model",
"def load_checkpoint(self, checkpoint_path: str, *args, **kwargs) -> Dict:\n return torch.load(checkpoint_path, *args, **kwargs)",
"def load_checkpoint(self, checkpoint_path: str, *args, **kwargs) -> Dict:\n return torch.load(checkpoint_path, *args, **kwargs)"
] | [
"0.76077384",
"0.74392045",
"0.74392045",
"0.7414661",
"0.7232018",
"0.71317524",
"0.7071087",
"0.70339084",
"0.700918",
"0.6977498",
"0.6935631",
"0.691333",
"0.68927085",
"0.68749905",
"0.68339276",
"0.6816416",
"0.6816042",
"0.67910594",
"0.6769799",
"0.6769098",
"0.6727852",
"0.6713089",
"0.6706459",
"0.6683782",
"0.6658891",
"0.66465485",
"0.6617412",
"0.6616446",
"0.6595445",
"0.6595445"
] | 0.77680707 | 0 |
Warn about unused static variables. | def _find_unused_static_warnings(filename, lines, ast_list):
static_declarations = {
node.name: node
for node in ast_list
if (isinstance(node, ast.VariableDeclaration) and
'static' in node.type.modifiers)
}
def find_variables_use(body):
for child in body:
if child.name in static_declarations:
static_use_counts[child.name] += 1
static_use_counts = collections.Counter()
for node in ast_list:
if isinstance(node, ast.Function) and node.body:
find_variables_use(node.body)
elif isinstance(node, ast.Class) and node.body:
for child in node.body:
if isinstance(child, ast.Function) and child.body:
find_variables_use(child.body)
for name in sorted(static_declarations):
if not static_use_counts[name]:
print("{}:{}: unused variable '{}'".format(
filename,
lines.get_line_number(static_declarations[name].start),
name)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def unusedVars(self):\n fullcode = self.code_cfg\n variables = set([x[1:] for x in codeconfig_getvars(fullcode)])\n exceptions = set(['complexity', 'code_cfg'])\n clsvars = set(vars(self).keys())\n nones = set(filter(lambda x: self.__dict__[x] is None, clsvars))\n nones = nones.union(set(filter(lambda x: str(self.__dict__[x]) == \"\", clsvars)))\n unused = clsvars - variables - exceptions - nones\n return unused",
"def check_for_unused_names(self):\n for s in self.unused_names:\n self.warning(\"'%s' is unused.\"%s)\n\n# warns for param that specified with -c (but also if name gets defined in __main__,\n# e.g. by default_density=global_params.default_density in a script file\n## for name in self.params():\n## if name in self.context:\n## self.warning(\"'%s' still exists in global_params.context\"%name)\n\n # detect duplicate param value that wasn't used (e.g. specified with after script)\n for name,val in self.params().items():\n if name in self.context:\n if self.context[name]!=self.inspect_value(name):\n self.warning(\"'%s=%s' is unused.\"%(name,self.context[name]))",
"def init_warnings():\n warnings.simplefilter(\"ignore\", category=AstropyWarning)",
"def _set_var_ignore(self):\n self._var_ignore = [k for k in self.__dict__.keys() if k[0] != '_']",
"def warn():\n pass",
"def _suppress_warnings():\n import warnings\n import sys\n import os\n if os.path.basename(sys.argv[0]) != \"trial\":\n warnings.simplefilter(\"ignore\")",
"def test_no_var_init(self):\n self._test_reports_helper({\"--no-var-init-profiling\": \"\"},\n [\"report.txt\"])",
"def warning(self, *args, **kwargs):",
"def test_instances(self):\n\n @deprecate(bar=\"use baz instead\")\n def foo(bar=None, baz=None):\n pass\n\n @deprecate(baz=\"use bar instead\")\n def food(bar=None, baz=None):\n pass\n\n with warnings.catch_warnings(record=True) as w:\n foo(bar=True)\n food(baz=True)\n self.assertEqual(len(w), 2, \"Not all warnings preserved.\")",
"def filter_warnings():\n warnings.simplefilter(\"ignore\", category=UserWarning)\n warnings.simplefilter(\"ignore\", category=LightningDeprecationWarning)",
"def log_unused(self, error=True):\n have_unused = False\n log = get_logger().error if error else get_logger().info\n for name in self._all_names:\n current_set = getattr(self, name, None)\n if current_set:\n log('Unused from %s: %s', name.upper(), current_set)\n have_unused = True\n return have_unused",
"def skip_require():\n global ignore_once\n ignore_once = True",
"def no_additional_complaints() -> None:\n logging.getLogger(\"asyncio\").setLevel(\"CRITICAL\")\n warnings.simplefilter(\"ignore\")",
"def verif_unused(sv):\r\n if Unused in sv.Object and sv.Object[Unused].value: # check presence and integrity of unused list\r\n unusedlist=[applied (x, Unused) for x in sv.Object[Unused].value]\r\n for nam in unusedlist: # check each unused declaration\r\n nod=sv.Object[nam]\r\n if sv.Namedpinlist.get(nam)==[nod.effects]: continue # pin is just named\r\n elif applied(nam, Output):\r\n if len(nod.effects)==1: # only effect is output list\r\n if len(nod.causes)<=2: continue\r\n if len(nod.causes)<=4 and Faux in nod.causes and Ewent in nod.causes: continue # allow 'take event'\r\n elif nod.causes or nod.effects: # object should have no cause and no effect\r\n print(Err_unused_obj) \r\n print(str(nam))\r\n sv.Current_clause=None, None, None\r\n raise ReferenceError",
"def log_check_warnings(self):\n pass",
"def log_check_warnings(self):\n pass",
"def log_check_warnings(self):\n pass",
"def log_check_warnings(self):\n pass",
"def log_check_warnings(self):\n pass",
"def log_check_warnings(self):\n pass",
"def log_check_warnings(self):\n pass",
"def log_check_warnings(self):\n pass",
"def warnings():\n return THE_LOGGER.warnings",
"def unusedFromKDOTDataPreparation():",
"def _var_check(self):\n missing = set()\n for v in self.variables:\n if getattr(self, v) is None:\n missing.add(v)\n self.missing = missing",
"def check_unused_attributes(self):\n all_attrs_read = collections.defaultdict(set)\n\n def _add_attrs(typ, attr_names_read):\n if typ is None:\n return\n all_attrs_read[typ] |= attr_names_read\n for base_cls in typ.__bases__:\n all_attrs_read[base_cls] |= attr_names_read\n if isinstance(typ, type):\n for child_cls in qcore.inspection.get_subclass_tree(typ):\n all_attrs_read[child_cls] |= attr_names_read\n\n for serialized, attrs_read in six.iteritems(self.attributes_read):\n attr_names_read = {attr_name for attr_name, _, _ in attrs_read}\n _add_attrs(self.unserialize_type(serialized), attr_names_read)\n\n for typ, attrs in self.config.IGNORED_UNUSED_ATTRS_BY_CLASS:\n _add_attrs(typ, attrs)\n\n used_bases = tuple(self.config.USED_BASE_CLASSES)\n\n for typ, attrs_read in sorted(\n six.iteritems(all_attrs_read), key=self._cls_sort\n ):\n if self.serialize_type(typ) not in self.classes_examined or issubclass(\n typ, used_bases\n ):\n continue\n existing_attrs = set(typ.__dict__.keys())\n for attr in existing_attrs - attrs_read - self.config.IGNORED_UNUSED_ATTRS:\n # server calls will always show up as unused here\n if _safe_getattr(_safe_getattr(typ, attr, None), \"server_call\", False):\n continue\n print(\"Unused method: %r.%s\" % (typ, attr))",
"def warnings(self, d):\n\n if d['filter_nu'] == 220e9:\n if d['beam_shape'] == 'gaussian':\n warnings.warn('The nu dependency of the gausian beam FWHM '\n 'is not a good approximation in the 220 GHz band.')\n elif d['beam_shape'] == 'fitted_beam':\n warnings.warn('Beam and solid angle frequency dependence implementation '\n 'in the 220 GHz band for the fitted beam does not correctly describe '\n 'the true behavior')",
"def has_warnings(self) -> bool:",
"def test_deprecated_private_variables(attr):\n with pytest.warns(AstropyDeprecationWarning):\n resolve_name(\"astropy\", \"cosmology\", \"flrw\", attr)",
"def _warn(msg):\n warnings.warn(msg, TessyWarning, stacklevel=3)"
] | [
"0.6999553",
"0.684138",
"0.61850595",
"0.6036887",
"0.58252496",
"0.57561684",
"0.5704306",
"0.56855494",
"0.56610996",
"0.56531423",
"0.5616008",
"0.55835503",
"0.5551007",
"0.55266124",
"0.5507778",
"0.5507778",
"0.5507778",
"0.5507778",
"0.5507778",
"0.5507778",
"0.5507778",
"0.5507778",
"0.5493492",
"0.548651",
"0.5421703",
"0.54148483",
"0.53651136",
"0.53480744",
"0.5334751",
"0.53262484"
] | 0.7391628 | 0 |
Return the parsed contents of the config file. | def get_config():
return json.loads(CONFIG_FILE.read_text()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read_config(self):\n config = configparser.ConfigParser()\n config.read(self.configfile)\n return config",
"def get(self):\n config = self.user_file.parseString(self.content)\n return config",
"def get(self):\n if self.file:\n self._read()\n config = self.client_file.parseString(self.content)\n return config",
"def _parseConfigFile(self):\n\n configFile = self._configFile()\n\n configs = configparser.SafeConfigParser()\n try:\n with open(configFile, 'r', encoding='utf-8') as fh:\n try:\n configs.readfp(fh)\n return configs\n except configparser.Error:\n log(ERROR, traceback.format_exc())\n return None\n except IOError:\n log(DEBUG, \"Error: Could not read from config file {0}\\n\".format(configFile))\n return None",
"def get_config():\n with open(CONFIG_PATH) as config_file:\n data = json.load(config_file)\n return data",
"def read_config():\n with open(CONFIG_PATH) as config_file:\n return json.load(config_file)",
"def _get_config(self, unit, filename):\n file_contents = unit.file_contents(filename)\n config = ConfigParser.ConfigParser()\n config.readfp(io.StringIO(file_contents))\n return config",
"def read_config(self, config_filename):",
"def config():\n with open(config_path) as config_file:\n data = json.load(config_file)\n return data",
"def parse_config(self):\n # TODO: parse config file\n pass",
"def parse(self):\n\n if exists(self.filepath):\n content = open(self.filepath).read().decode(charset)\n else:\n content = \"\"\n\n try:\n config = toml.loads(content)\n except toml.TomlSyntaxError:\n raise ConfigSyntaxError\n\n return config",
"def get_config() -> configparser.ConfigParser:\n config = configparser.ConfigParser()\n config.read(CONFIG_FILE)\n\n return config",
"def _read_config_file(self):\r\n\r\n try:\r\n with open(self.config, 'r') as f:\r\n config_data = json.load(f)\r\n except FileNotFoundError:\r\n config_data = {}\r\n\r\n return config_data",
"def get_config(file_path):\n config = configparser.ConfigParser()\n config.read(file_path)\n return config",
"def read_config(config_file):\n config = configparser.ConfigParser()\n config.read(config_file)\n return config",
"def get_config(configfile):\n cfg = ConfigParser.ConfigParser()\n cfg.read(configfile)\n return cfg",
"def read_config(contents):\n file_obj = io.StringIO(contents)\n config = six.moves.configparser.ConfigParser()\n config.readfp(file_obj)\n return config",
"def read_configuration (self):\n\t\tself.config.read(self._configfile)",
"def get_config():\n handle = open(\"config.json\", \"r\")\n raw_json = handle.read()\n handle.close()\n return json.loads(raw_json)",
"def get(self):\n return util.getJSONFile(CONFIG_PATH)",
"def get(self):\n _config_file = None\n _parsed_config = configparser.ConfigParser()\n try:\n _config_file = open(self._config_path, \"r\")\n except OSError as e:\n logger.error(str(e))\n Utils.exiter(1)\n try:\n _parsed_config.read_file(_config_file)\n except configparser.ParsingError as e:\n logger.error(str(e))\n Utils.exiter(1)\n\n _defaults = _parsed_config.defaults()\n _t = {}\n for (_k, _v) in _defaults:\n _t[self._format_keys(_k)] = self._format_values(_v)\n self.config[self._format_keys(\"defaults\")] = _t\n\n for _s in _parsed_config.sections():\n _t = {}\n for (_k, _v) in _parsed_config.items(_s):\n _t[self._format_keys(_k)] = self._format_values(_v)\n self.config[self._format_keys(_s)] = _t\n logger.debug(f\"Got config: {json.dumps(self.config, indent=2)}\")\n return self.config",
"def get_confg(self):\n\n ini = ConfigParser()\n self.config_parser = ini\n # if isinstance(cfile, (file, StringIO.StringIO, io.BytesIO)):\n if isinstance(self.config_data, str) and self.config_data:\n fp = io.BytesIO(self.config_data)\n ini.readfp(fp)\n elif self.config_file is not None:\n ini.read([self.config_file, os.path.expanduser('~/.' + self.config_file)])\n\n if ini.has_section('whoshere'):\n return ini.items('whoshere')\n\n return {}",
"def read_config():\n config = configparser.ConfigParser()\n if not os.path.exists(\"config.cfg\"):\n raise FileNotFoundError(\"configuration file (config.cfg) not found!\")\n config.read(\"config.cfg\")\n return config",
"def load(self):\n with open(self.conf_fname, \"r\") as fd:\n config = json.load(fd)\n \n return config",
"def get_config(config_path='config.ini'):\n config = configparser.ConfigParser()\n config.read(config_path)\n return config",
"def get_config(self):\r\n if not os.path.exists(self.config_file):\r\n return None\r\n return json.loads(file(self.config_file).read())",
"def parsed_file(config_file):\n parser = ConfigParser(allow_no_value=True)\n parser.read_file(config_file)\n return parser",
"def readConfig(file=\"dispatcher.conf\"):\n\n parser = configparser.ConfigParser()\n parser.read(file)\n machines = parser.items(\"MACHINES\")\n commands = parser.items(\"COMMANDS\")\n\n return machines, commands",
"def get_config(full_path):\n config = configparser.RawConfigParser()\n config.read(full_path)\n print(\"Found these configs:\")\n for config_name in config.sections():\n print('-', config_name)\n return config",
"def read_config(self) -> dict:\n\n if self.valid_is_json():\n with open(self.file_name, 'r') as file:\n return json_loads(file.read())\n elif self.valid_is_yaml():\n with open(self.file_name, 'r') as file:\n return yaml_loads(file.read(), Loader=Loader)\n else:\n raise Exception('Invalid config file')"
] | [
"0.77496666",
"0.7607564",
"0.75940424",
"0.75590175",
"0.7350562",
"0.7329519",
"0.7322378",
"0.73064345",
"0.7271919",
"0.72459453",
"0.71848226",
"0.7173341",
"0.71679926",
"0.7166633",
"0.71516",
"0.7046862",
"0.7030489",
"0.70298284",
"0.6951747",
"0.693856",
"0.6933751",
"0.6929327",
"0.69200516",
"0.6907919",
"0.69077086",
"0.69007176",
"0.68702143",
"0.6862842",
"0.685601",
"0.6842408"
] | 0.76075804 | 1 |
seed users. by defualt set to 5 users | def seed_User(number=5, overwrite=False):
if overwrite:
print('Overwriting all users')
User.objects.all().delete()
count = 0
for i in range(number):
username = fake.first_name()
User.objects.create_user(
email=username + "@blogmail.com",
password="vns12345",
name=username,
date_joined=datetime.datetime.now(),
is_active=1,
is_superadmin=0,
avatar='',
is_staff=1
)
count += 1
percent_complete = count / number * 100
print(
"Adding {} new Users: {:.2f}%".format(
number, percent_complete),
end='\r',
flush=True
)
print() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generate_users(count=10):\n for i in range(count):\n user = generate_random_user()\n db.session.add(user)\n db.session.commit()",
"def setUp(self):\n self.users = [UserFactory.create() for i in range(5)]",
"def create_users(N):\n for _ in range(N):\n name = fake.name()\n phone = fake.phone_number()\n email = fake.email()\n role = random.choice([\"shepherd\",\"admin\"])\n password = fake.user_name\n User.objects.create(\n name=name,phone=phone,\n email=email,role=role,\n password=password\n )",
"def run_seed(self):\n user = User(username='administrator')\n user.set_password('123456123a')\n user.save()",
"def populate(N=5):\n for entry in range(N):\n # Create the fake data for the entry\n fake_name = fakegen.name().split()\n fake_first_name = fake_name[0]\n fake_last_name = fake_name[1]\n fake_email = fakegen.email()\n\n # Create the new User entry\n user = User.objects.get_or_create(first_name=fake_first_name, last_name=fake_last_name, email=fake_email)[0]",
"def insert_default_users():\n user1 = User(email=current_app.config['ADMIN_EMAIL'],\n password=current_app.config['ADMIN_PW'],\n first_name=current_app.config['ADMIN_FIRST_NAME'],\n last_name=current_app.config['ADMIN_LAST_NAME'],\n confirmed=True)\n user1.role = Role.query.filter_by(name='Administrator').first()\n db.session.add(user1)\n\n user2 = User(email=current_app.config['USERMANAGER_EMAIL'],\n password=current_app.config['USERMANAGER_PW'],\n first_name=current_app.config['USERMANAGER_FIRST_NAME'],\n last_name=current_app.config['USERMANAGER_LAST_NAME'],\n confirmed=True)\n user2.role = Role.query.filter_by(name='Usermanager').first()\n db.session.add(user2)\n\n user3 = User(email=current_app.config['USER_EMAIL'],\n password=current_app.config['USER_PW'],\n first_name=current_app.config['USER_FIRST_NAME'],\n last_name=current_app.config['USER_LAST_NAME'],\n confirmed=True)\n user3.role = Role.query.filter_by(name='User').first()\n db.session.add(user3)\n\n db.session.commit()",
"def seed():\n u = User(email=\"[email protected]\", is_admin=False)\n u.set_password(\"foobar123\")\n\n db.session.add(u)\n db.session.commit()",
"def _create_users(self):\r\n users = []\r\n for i in range(8):\r\n username = \"user{}\".format(i)\r\n email = \"test+user{}@edx.org\".format(i)\r\n user = User.objects.create_user(username, email, 'foo')\r\n user.is_active = True\r\n user.save()\r\n users.append(user)\r\n return users",
"def setUpClass(cls):\n super(EmotionTest, cls).setUpClass()\n user = UserFactory(username='dan', email='[email protected]')\n user.set_password('password')\n user.first_name = 'Dan'\n user.last_name = 'Theman'\n user.save()\n cls.dan = user\n\n for _ in range(10):\n user = UserFactory.create()\n user.set_password(factory.Faker('password'))\n user.save()",
"def user_batch():\n return [\n UserFactory(roles=RoleFactory.create_batch(randint(0, 3)))\n for _ in range(randint(3, 5))\n ]",
"def seed_db():\n db.session.add(User(username='Joe', email='[email protected]'))\n db.session.add(User(username='Joe2', email='[email protected]'))\n db.session.commit()",
"def load_users():\n\n \n\n User.query.delete()\n\n with open(\"seed_data/seed_users.psv\") as users:\n for row in users:\n username, fname, lname, email, password, user_role = row.strip().split(\"|\")\n\n user = User(username=username,\n fname=fname,\n lname=lname,\n email=email,\n password=generate_password_hash(password),\n user_role=user_role)\n\n db.session.add(user)\n\n db.session.commit()",
"def load_users():\n\n print \"Users\"\n\n User.query.delete()\n\n for row in open(\"seed_data/u.user\"):\n row = row.rstrip()\n ID, password, name, first_entry_at = row.split(\"|\")\n first_entry_at = datetime.strptime(first_entry_at, \"%m-%d-%y\")\n\n user = User(ID=ID, password=password, name=name, first_entry_at=first_entry_at)\n\n db.session.add(user)\n\n db.session.commit()",
"def setUp(self):\n self.new_users = User('Dennis', 'Kiplangat', 'kiplangat18')",
"def seed_users(project_env, runlevel):\n\n db_client_maker = core_db.get_nest_users_sqla_maker()\n md = nest_db.get_global_sqlalchemy_metadata()\n engine = nest_db.get_global_sqlalchemy_engine()\n #note this is a tablelike client, not a NestUser client\n db_client = db_client_maker.get_db_client(engine, md)\n\n #needs a unique *instance* of system_user to act as 'owner' \n #as we will alter the instance that we add to the table\n db_client.set_requesting_user(core_db.get_system_user())\n\n user_configs = nest_config.generate_seed_users(project_env, runlevel)\n \n success = _add_users_from_configs(db_client, user_configs)\n return success",
"def create_users(self):\n if self.gl is None:\n print(\"No config found, please run connect first.\")\n exit(1)\n else:\n print(\"Starting Users creation.\")\n gl = self.gl\n config = self.config\n for username in config[\"users\"]:\n i = 0\n count = int(config[\"users\"][username][\"count\"])\n pw = config[\"users\"][username][\"pass\"]\n groups = config[\"users\"][username][\"groups\"]\n while i < count:\n i += 1\n print(\"creating user: \" + username + '-' + str(i) + \" ...\", end=' ')\n user = gl.users.create({'email': username + str(i) + '@example.com',\n 'password': pw,\n 'username': username + '-' + str(i),\n 'name': username + '-' + str(i),\n 'skip_confirmation': True})\n self.users.append(user)\n self.usergroups[user.id] = groups\n print(\"done.\")\n print(\"All Users created!\")",
"def setUp(self):\n users = []\n users.append(user.User(username=\"username\", name=\"name\", email=\"[email protected]\", password_hash=\"password_hash\", salt=\"salt\", profile_picture=b\"profile_picture\"))\n users.append(user.User(username=\"test\", password_hash=\"iiojfeaioieof\", salt=\"saltySalt\"))\n users.append(user.User(username=\"jeff\", name=\"jeff bob\", password_hash=\"eeeeeeeeeeeeeee\", salt=\"fffffffffffffff\"))\n users.append(user.User(username=\"epicUsername69\", email=\"[email protected]\", password_hash=\"asdfafeadf\", salt=\"graefgafae\"))\n db.create_all()\n for value in users:\n db.session.add(value)\n db.session.commit()",
"def generate_fake(count=100, **kwargs):\n from sqlalchemy.exc import IntegrityError\n from random import seed, choice\n from faker import Faker\n\n fake = Faker()\n\n seed()\n for i in range(count):\n u = User(\n username=fake.first_name(),\n email=fake.email(),\n password='password',\n **kwargs)\n db.session.add(u)\n try:\n db.session.commit()\n except IntegrityError:\n db.session.rollback()",
"def create_users(self):\n from django.contrib.auth.models import User\n user = User.objects.create_user('red', '', 'red')\n user = User.objects.create_user('green', '', 'green')\n user = User.objects.create_user('blue', '', 'blue')",
"def _create_and_enroll_users(self, count):\n users = []\n for _ in range(count):\n user = UserFactory()\n CourseEnrollmentFactory.create(user=user, course_id=self.course.id)\n users.append(user)\n return users",
"def generate_fake(count=100, **kwargs):\n from sqlalchemy.exc import IntegrityError\n from random import seed, choice\n from faker import Faker\n\n fake = Faker()\n roles = Role.query.all()\n\n seed()\n for i in range(count):\n u = User(\n first_name=fake.first_name(),\n last_name=fake.last_name(),\n email=fake.email(),\n password='password',\n confirmed=True,\n role=choice(roles),\n **kwargs)\n db.session.add(u)\n try:\n db.session.commit()\n except IntegrityError:\n db.session.rollback()",
"def populate_database_with_users(\n db_session, extra_username: t.Optional[str] = None\n) -> None:\n for _ in range(0, 3):\n username = create_random_username()\n db_session.add(GifSyncUser(username=username))\n if extra_username:\n db_session.add(GifSyncUser(username=extra_username))\n db_session.commit()",
"def setUp(self):\n\n # Allocates users\n self.users = []\n self.user_session_tokens = []\n\n # Template for creating users\n user_template = {\n \"clientId\": 2,\n \"username\": \"user\",\n \"pwd\": \"password\",\n \"nameLast\": \"Last\",\n \"nameFirst\": \"First\",\n \"email\": \"[email protected]\",\n \"phone\": \"123-4567\",\n \"profile_picture_path\": \"/\",\n \"timezoneDefault\": \"EST\",\n \"languageDefault\": \"English\"\n }\n\n # Creates 'n' users and stores them\n n = 3\n for i in range(0, n):\n user = deepcopy(user_template)\n user['username'] += randstr()\n user['email'] += randstr()\n handler.user_create(event=user, context=None)\n self.users.append(user)\n self.user_session_tokens.append(None)",
"def seed_all():\n seed_client()\n seed_staff()\n seed_request()\n seed_comment()",
"def setUp(self):\n self.new_users = User(\"Zephon Makale\", \"1234xyz\") #Create User object",
"def load_users():\n\n for i, row in enumerate(open('seed_data/users.csv')):\n data = row.rstrip().split(\",\")\n user_id, email, password = data\n\n user = User(user_id=user_id, email=email,\n password=password)\n\n db.session.add(user)\n\n # For testing, just to see it was happening\n # if i % 100 == 0:\n # print i\n\n db.session.commit()",
"def seed():\n if User.find_by_identity(app.config['SEED_ADMIN_EMAIL']) is not None:\n return None\n\n user = User(\n role = 'admin',\n email = app.config['SEED_ADMIN_EMAIL'],\n password = app.config['SEED_ADMIN_PASSWORD']\n )\n category = Category(\n name='Red Blend',\n description='',\n parent_id=0,\n owner=1\n )\n region = Region(\n name='Columbia Valley',\n description='',\n parent_id=0,\n country='United States',\n state='Washington',\n owner=1\n )\n wine = Wine(\n name='Test Wine',\n maker='Test Maker',\n vintage='2000',\n category=1,\n region=1,\n owner=1\n )\n\n db.session.add(user)\n db.session.commit()\n db.session.add(category)\n db.session.commit()\n db.session.add(region)\n db.session.commit()\n db.session.add(wine)\n db.session.commit()\n\n return user",
"def create_db(num_users=5):\n db.create_all()",
"def load_users():\n filepath = \"./seed_data/u.user\"\n users = open(filepath)\n\n\n for user in users:\n user = user.rstrip().split('|')\n db_user = User(user_id=user[0], age=user[1], zipcode=user[4])\n db.session.add(db_user)\n\n db.session.commit()",
"def load_users():\n\n print \"Users\"\n\n # Delete all rows in table, so if we need to run this a second time,\n # we won't be trying to add duplicate users\n User.query.delete()\n\n # Read u.user file and insert data\n for row in open(\"seed_data/u.user\"):\n row = row.rstrip()\n user_id, age, gender, occupation, zipcode = row.split(\"|\")\n\n user = User(user_id=user_id,\n age=age,\n zipcode=zipcode)\n\n # We need to add to the session or it won't ever be stored\n db.session.add(user)\n\n # Once we're done, we should commit our work\n db.session.commit()"
] | [
"0.76051784",
"0.73858875",
"0.7105484",
"0.7039456",
"0.7026748",
"0.70025635",
"0.69474685",
"0.69027597",
"0.68793637",
"0.6835339",
"0.6813174",
"0.6786388",
"0.6739348",
"0.6732933",
"0.6717712",
"0.66460043",
"0.66320664",
"0.6586459",
"0.6585731",
"0.65754217",
"0.65524065",
"0.65043545",
"0.64892256",
"0.6479792",
"0.6473599",
"0.64275825",
"0.6415466",
"0.641006",
"0.6395737",
"0.6383656"
] | 0.7705716 | 0 |
set_score increments the score by change can be negative | def set_score(self, change):
self._score = self._score + change | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_score(self,score):\n self._score = score",
"def set_score(self, score):\n self._score = score",
"def update_score():\n pass",
"def set_score(self, a, b, score):\n ### FILL IN ###",
"def set_score(self, score):\n # Update the score display\n self.score = score\n self._prep_score()\n\n # Update the high score if required\n if self.score > self.high_score:\n self.high_score = score\n self._prep_high_score()",
"def score(self, score):\n\n self._score = score",
"def score(self, score):\n\n self._score = score",
"def score(self, score):\n\n self._score = score",
"def updateScore(self, score):\n self.__score += score",
"def update_score(self, score: int) -> int:\n self.score += score\n return self.score",
"def updateScore(score):\n return score + 1",
"def set_score(self, score_index: int, score: float) -> None:\n self._scores[score_index - 1] = score",
"def increase_score(self):\n self.score += 1",
"def change_score(self, change: float=1):\n self._score += change",
"def change_score(self, change: float = 1):\n self._score += change",
"def setScore(self, i, score):\n self.scores[i - 1] = score",
"def increase_score(self, increase):\n if increase > 0:\n self.__score += increase",
"def update_turn_score(self, score):\n\n # Increment the attribute by the passed value\n self._current_score += score",
"def score(self, score: str):\n\n self._score = score",
"def set_score(self, points):\n self.score += points",
"def adjust_score(self):\n self.score += game.temporary_score",
"def reset_score(self):\n self._score = p.params['initial_score']",
"def add_score(self, score):\n self._score += score",
"def update_score(self, board):\n self._score += 1",
"def setScore(self, score=None):\r\n self._score = score\r\n self.ids[\"_scoreDisplayer\"].displayScore(score) \r\n self.ids[\"_emailSender\"]._score = self._score\r\n self.ids[\"_scoreSaver\"]._score = self._score\r\n self.ids[\"_MidiPlayer\"]._score = self._score",
"def update_score(score, role):\n if role == 'winner':\n score = score + 1\n if role == 'loser':\n score = score - 1\n return score",
"def set_rewards_score(self, _score: Address) -> None:\n if self.msg.sender == self.owner:\n self._rewards_score.set(_score)",
"def update_g_score(self, value):\n self.g_score = value",
"def min_score(self, score):\n self._evaluated = False\n self._min_score = score\n return self",
"def __init__(self, score=0):\n self.score = score"
] | [
"0.82878745",
"0.8127291",
"0.81113905",
"0.8077499",
"0.7966757",
"0.796316",
"0.796316",
"0.796316",
"0.79581094",
"0.78421885",
"0.7841102",
"0.7821862",
"0.78135276",
"0.77125627",
"0.7695137",
"0.76055825",
"0.75447255",
"0.75010866",
"0.7482682",
"0.7436947",
"0.74270767",
"0.73933923",
"0.7379702",
"0.7360062",
"0.72873527",
"0.7276128",
"0.72540087",
"0.7168964",
"0.7163298",
"0.70986295"
] | 0.830666 | 0 |
move_ray this is the primary function which is responsible for recursively moving a ray. Although it primarily look after the action of the Ray.Ray class it lives in the Game instance itself. THIS IS HOW WE DETERMINE THE EXIT POINT OF ALL RAYS HORIZONTAL, VERTICAL, OR WITH DETOURS | def move_ray(self, ray):
# look to the next spot in the ray's trajectory
next_coordinates = ray.get_next_location()
next_location = self._board.get_board_square(next_coordinates)
# check for a collisition - return if it occurs
if ray.check_for_collision(next_location):
return
# if we didn't collide as we moved we need to look to check our
# diagonals for atoms
ccw_diag_coordinates, cw_diag_coordinates = ray.get_diagonals()
ccw_diagonal = self._board.get_board_square(ccw_diag_coordinates)
cw_diagonal = self._board.get_board_square(cw_diag_coordinates)
if ccw_diagonal.is_atom() or cw_diagonal.is_atom():
# If we're on our first move and the immediately diagonals contain an atom we have a reflection
if ray.get_current_location() == ray.get_origin_location():
terminal_square = self._board.get_board_square(
ray.get_current_location())
# let's the ray know it's finished and the square that it's an endpoint
# self.end_ray(ray, terminal_square)
return ray.record_edge_collision(terminal_square)
# otherwise they cause a bend in the path
else:
# we have to calculate our trajectory based on the pull
# of the atoms in our path
ray.recalculate_trajectory(ccw_diagonal, cw_diagonal)
# get the coordinates of the next location in our new trajectory
next_coordinates = ray.get_next_location()
# determine the next coordinate will result in a collision - return if it would
if ray.check_for_collision(
self._board.get_board_square(next_coordinates)):
return
# move the ray to the next step forward in its current trajectory
ray.set_current_location(next_coordinates)
# finally, recursively call our current function from the next step in its path.
self.move_ray(ray) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def shoot_ray(self, row, col):\n # Uses validate method to check if row,col are legal for ray entrance location\n if not self.valid_ray(row, col):\n return False\n # creates ray object from row, col integers\n ray = Ray(row, col)\n # checks if atom is in front of entrance position\n if not ray.can_continue(self.get_a_locations()):\n self.mark_portal(ray.get_start())\n if self.get_score() <= 0:\n self.change_state(\"LOST\")\n return None\n # while there is no atom in front of ray and ray will not exit board --\n while ray.can_continue(self.get_a_locations()):\n ray.check_diags(self.get_a_locations())\n # moves ray forward one space\n ray.advance()\n # if ray will exit board by advancing --\n if not ray.on_board():\n # adjusts score if entrance/exit do not match prior entrances/exits\n self.mark_portal(ray.get_start(), ray.get_pos())\n # changes state to lose if score is now <= 0\n if self.get_score() <= 0:\n self.change_state(\"LOST\")\n # returns tuple of exit location\n return tuple(ray.get_pos())\n # if ray is blocked by atom --\n if not ray.no_atom(self.get_a_locations()):\n # changes state to lost if score is now <= 0\n self.mark_portal(ray.get_start())\n if self.get_score() <= 0:\n self.change_state(\"LOST\")\n return None",
"def follow(ray: Ray, scene: Scene, max_iters=1000, renderer=None) -> [Tuple[Ray, Decision]]:\n path = [(ray, Decision.EMIT)]\n idx = 0\n last_ray = ray\n while ray.is_alive:\n intersections = scene.intersections(ray.position, ray.direction)\n points, nodes = zip(*[(x.point, x.hit) for x in intersections])\n for ray, decision in step(ray, points, nodes, renderer=renderer):\n path.append((ray, decision))\n if points_equal(ray.position, last_ray.position) and np.allclose(ray.direction, last_ray.direction):\n raise TraceError(\"Ray did not move.\")\n last_ray = ray\n if idx > max_iters:\n raise TraceError(\"Ray got stuck.\")\n return path",
"def step(ray, points, nodes, renderer=None):\n container, to_node, surface_node = ray_status(ray, points, nodes)\n min_point = ray.position\n max_point = points[0]\n \n dist = distance_between(min_point, max_point)\n _ray = ray\n for (ray, decision) in trace_path(ray, container, dist):\n if renderer:\n renderer.add_ray_path([_ray, ray])\n _ray = ray\n yield ray, decision\n\n if to_node is None and container.parent is None:\n # Case: Hit world node; kill ray here.\n ray = replace(ray, is_alive=False)\n yield ray, Decision.KILL\n elif points_equal(ray.position, max_point):\n # Case: Hit surface\n # NB The ray argument of `trace_surface` *must* be a ray on the surface of the \n # node and the returned ray must *not* be on the node!\n before_ray = ray\n _ray = ray\n for ray, decision in trace_surface(ray, container, to_node, surface_node):\n if renderer:\n renderer.add_ray_path([_ray, ray])\n _ray = ray\n yield ray, decision\n # Avoid error checks in production\n if __debug__:\n local_ray = ray.representation(surface_node.root, surface_node)\n if surface_node.geometry.is_on_surface(local_ray.position):\n logger.warning(\"(before) pos: {}\".format(before_ray.position))\n logger.warning(\"(after) pos: {}\".format(ray.position))\n raise TraceError(\"After tracing a surface the ray cannot still be on the surface.\")",
"def shoot_ray(self, origin_row, origin_column):\n\n # get the the square object at row x column\n origin = self._board.get_board_square((origin_row, origin_column))\n\n # check that it is a valid \"edge\" to send a ray from\n origin_check = origin.is_edge()\n\n # if it's not then return false\n if origin_check == False:\n return False\n\n # if we pass the origin check create shoot a new Ray.Ray object from row x column\n new_ray = Ray.Ray(origin_row, origin_column)\n\n # let the square we shot from know its an orign square\n origin.set_originating_ray(new_ray)\n # Deduct 1 from the score since we now have on exit point\n self.set_score(-1)\n\n # while the ray object has a direction (will be set to none when it reaches an endpoint)\n # send it to the helper function that will move it\n while new_ray.get_direction() != None:\n self.move_ray(new_ray)\n\n # if we hit an exit point (other than through reflection) deduct the point for that\n terminus = new_ray.get_terminal_location()\n # check the the terminal point is an edge (hitting an atom returns none as terminus)\n\n if terminus != None:\n # check that the terminus is not a reflection, which shouldn't be counted twice\n terminal_square = self._board.get_board_square(terminus)\n terminal_square.set_terminating_ray(new_ray)\n if terminus != (origin_row, origin_column):\n self.set_score(-1)\n\n return terminus",
"def shoot_ray(self, entry_x, entry_y):\r\n\r\n # check to make sure entry_x and entry_y are valid\r\n if (entry_x in [0, 9] or entry_y in [0, 9]) and \\\r\n self._board.get_board_item(entry_x, entry_y) != \"o\":\r\n\r\n exit_tup = self._board.find_exit(entry_x, entry_y)\r\n # returned 0 if hit\r\n if exit_tup == 0:\r\n # decrement entry only if not visited\r\n marker = self.get_hit_marker()\r\n circle_tuple = self.calculate_entry_exit(entry_y, entry_x)\r\n marker.update_center(circle_tuple)\r\n points = self._player.add_entry_exit((entry_x, entry_y), marker,\r\n (entry_x, entry_y))\r\n self._stats.dec_player_score(points)\r\n return \"Hit\"\r\n elif exit_tup == 1:\r\n # decrement entry only if not visited\r\n marker = self.get_reflect_marker()\r\n circle_tuple = self.calculate_entry_exit(entry_y, entry_x)\r\n marker.update_center(circle_tuple)\r\n points = self._player.add_entry_exit((entry_x, entry_y), marker,\r\n (entry_x, entry_y))\r\n\r\n self._stats.dec_player_score(points)\r\n\r\n return \"reflect\"\r\n else:\r\n # decrement both entry and exit if not already visited\r\n marker = self.get_color_marker()\r\n exit_x, exit_y = exit_tup\r\n circle_entry = self.calculate_entry_exit(entry_y, entry_x)\r\n circle_exit = self.calculate_entry_exit(exit_y, exit_x)\r\n marker.update_center(circle_entry, circle_exit)\r\n points = self._player.add_entry_exit((entry_x, entry_y),\r\n marker, exit_tup)\r\n\r\n self._stats.dec_player_score(points)\r\n return exit_tup\r\n else:\r\n # returns false if the shoot_ray point is invalid\r\n return \"Bad shot\"",
"def move_to_exit(self, time_move=0.25):\n \n #While the agent is not on the exit, we keep going through the labyrinth\n while self.agent_node.labyrinth_position != self.exit_point.labyrinth_position:\n\n #We use breadth first search to create the tree with the distance of every node from the agent position\n self.breadth_first_search()\n node_to_move_on = self.find_node_to_move_on(self.exit_point)\n self.update_statistics_after_move(node_to_move_on)\n self.set_datas_after_move(node_to_move_on)\n\n #We clear the terminal to print the labyrinth with the new position of the agent\n clear = \"cls\" if platform.system() == \"Windows\" else \"clear\"\n os.system(clear)\n self.print_labyrinth()\n time.sleep(time_move)",
"def moveBasedOnRetreatAction(self, time_passed):\n cpos = self.toScreenCoordinate()\n mpos = pygame.mouse.get_pos()\n toMouse = Vector2.from_points(cpos,mpos)\n toMouse.normalize()\n rheading = -toMouse\n \n heading = self.heading\n angle_between = heading.angle_between(rheading)\n if angle_between>=-30 and angle_between<=30:\n return\n \n distance = time_passed * self.speed\n movement = rheading * distance\n x = movement.get_x()\n y = movement.get_y()\n if not self.checkCollision(x, y) and self.checkValidCoord(x, y):\n self.move(x, y)",
"def rayShooting():\r\n \r\n \r\n if nbRay==1:\r\n maxi=1\r\n mini=1\r\n peaceofAngle=angleMax\r\n #to trace one ray at angleMax\r\n else:\r\n maxi=(nbRay-1)/2\r\n mini=-maxi\r\n peaceofAngle=2*angleMax/(nbRay-1)\r\n #to trace rays at regular intervals between [-angleMax;angleMax] \r\n\r\n tot=0 #to count the number of peace of ray\r\n indice=0 #to browse raysIndex\r\n\r\n raysMatrix=np.empty(shape=(0,5),dtype=np.float64)#will contain all the rays in a row\r\n raysIndex=np.empty(shape=(nbRay,),dtype=np.int16)#indexation of the rays in raysMatrix\r\n \r\n for i in np.arange(mini,maxi+1,1):#put maxi+1 to include maxi in the loop\r\n \r\n rayon=Rayon(source.position,angleToVector(peaceofAngle*i))#rayon is\r\n #the ray we will trace\r\n ray,compt=traceRay(rayon)\r\n tot+=(compt+1)\r\n\r\n \r\n raysIndex[indice]=tot #the rays index contains the indice just above\r\n #of the end of the i th ray\r\n\r\n raysMatrix=np.vstack((raysMatrix,ray))\r\n #the form of the ray matrix is a stack of peace of rays describe by\r\n #a,b,c,x1,reflexion. the polynome of the peace of ray being ax^2+bx+c and the\r\n #abscisses of the limiting point being x1, reflexion indicating if a reflexion happened\r\n #when we meet a 5-uple with a coefficient b or c infinite it means\r\n #a new ray begin\r\n \r\n indice+=1\r\n print(\"ray at indice\",i,\"and at angle\",peaceofAngle*i/np.pi*180,'degree(s)')\r\n \r\n print(\"the total number of peaces of ray is :\", tot)\r\n\r\n return(raysMatrix,raysIndex)",
"def _walk(self):\n \n newpos= self.rect.move((self.move, 0)) # x方向移動 .move, y方向不動。\n \n # 偵測碰撞左右牆壁,並處理(反彈)\n if not self.area.contains(newpos):\n if self.rect.left < self.area.left or \\\n self.rect.right > self.area.right:\n self.move = -self.move\n newpos = self.rect.move((self.move, 0))\n self.image = pygame.transform.flip(self.image, 1, 0)\n self.rect = newpos",
"def rollout(leaf, depth):\n if depth <= 0:\n return 0\n\n total_reward = 0\n prev_state = leaf\n\n for i in range(depth):\n cur_state = prev_state.copy()\n agent_actions = rollout_policy(cur_state)\n agents_obs, _, done, _ = cur_state.game_env.step(agent_actions)\n\n # After making a move, update the memory kept on this node\n cur_state.agent_memory = utility.update_agent_memory(cur_state.agent_memory,\n agents_obs[cur_state.agent_id])\n\n reward = decide_reward(prev_state, cur_state)\n total_reward += reward\n\n prev_state = cur_state\n\n if done:\n break\n\n return total_reward",
"def moveFunction(target, rays):\r\n for ray in rays:\r\n ray.hitTarget(target)",
"def moveStep(self):\n\t\tif self.pos[0] <= self.boundsX[0] or \\\n\t\t(self.pos[0]+ 2*(self.radius)) >= self.boundsX[1]:\n\t\t\tself.dir[0] *= -1\n\t\t\t\n\t\tself.pos[0] += self.dir[0]*self.speed\n\t\tself.pos[1] += self.dir[1]*self.speed",
"def move_draught_end(event):\n global red_draughts, white_draughts\n global board_array\n global old_point\n global die_1_num, die_2_num, doubles\n draught = board.find_withtag(CURRENT)[0]\n #Figure out which point they want to put it on\n bottom = (event.y-click_offset[1] >= board_height//2)\n point_left_edges = [board_divisions*i for i in xrange(0,15) if i != 7]\n is_red = draught in red_draughts\n if bottom == False:\n new_point = 12+point_left_edges.index(min(point_left_edges, key=lambda x:abs(x-event.x+click_offset[0])))\n else:\n new_point = 13-point_left_edges.index(min(point_left_edges, key=lambda x:abs(x-event.x+click_offset[0]))) \n #Check legality\n if(board_array[new_point][1] > 1 and is_red) or (board_array[new_point][0] > 1 and not is_red): #if too many opposite color on square\n draw_draughts()\n return\n if(board_array[0][0] > 0 and is_red and old_point != 0)or(board_array[25][1] > 0 and not is_red and old_point != 25):#Obligated to move off bar first\n draw_draughts()\n return\n if(new_point == 0 and not is_red): #if white trying to bear off\n for i in xrange(7,26):\n if(board_array[i][1] > 0): #If white has a piece outside home, can't bear off\n draw_draughts()\n return\n if(new_point == 25 and is_red): #if red trying to bear off\n for i in xrange(0,18):\n if(board_array[i][0] > 0): #If red has a piece outside home, can't bear off\n draw_draughts()\n return \n \n if(new_point-old_point == die_1_num and is_red) or (old_point-new_point == die_1_num and not is_red):\n if(doubles == False) or (die_2_num != 0):\n die_1_num = 0\n else: \n die_2_num = die_1_num\n doubles = False\n elif(new_point-old_point == die_2_num and is_red) or (old_point-new_point == die_2_num and not is_red):\n if(doubles == False) or (die_1_num != 0):\n die_2_num = 0\n else: \n die_1_num = die_2_num\n doubles = False\n else: #Can't move there on this roll\n draw_draughts()\n return\n update_dice()\n #Update board_array\n if is_red:\n board_array[old_point][0] -= 1\n board_array[new_point][0] += 1\n if(board_array[new_point][1] == 1): #Handle hits\n board_array[new_point][1] -= 1\n board_array[25][1] += 1\n else:\n board_array[old_point][1] -= 1\n board_array[new_point][1] += 1\n if(board_array[new_point][0] == 1): #Handle hits\n board_array[new_point][0] -= 1\n board_array[0][0] += 1\n\n draw_draughts()\n if(die_1_num == 0 and die_2_num == 0):\n comp_turn()",
"def step(self, action):\n # print(action)\n distances = self.agent.return_distances(self.agent.corners, self.agent.line_pos)\n\n left = distances[0]\n right = distances[1]\n self.agent.distances.append({\n 'left': left,\n 'right': right\n })\n reward = 0\n if action == 1:\n self.agent.angle -= 90\n if self.agent.angle < 0:\n self.agent.angle = 0\n self.agent.direction_history.append('left')\n self.reset_raycasts(self.agent.angle)\n self.render()\n if left > right:\n reward += 5\n else:\n reward -= 5\n\n elif action == 2:\n self.agent.angle += 90\n if self.agent.angle >= 360:\n self.agent.angle = 0\n\n self.reset_raycasts(self.agent.angle)\n self.render()\n self.agent.direction_history.append('right')\n if left < right:\n reward += 5\n else:\n reward -= 5\n\n elif action == 0:\n self.agent.direction_history.append('forward')\n if self.agent.angle >= 360: self.agent.angle == 0\n if self.agent.angle == 0 or self.agent.angle == 360:\n self.agent.agent_position['y'] -= 10\n self.reset_raycasts(self.agent.angle)\n elif self.agent.angle == 90: \n self.agent.agent_position['x'] += 10\n self.reset_raycasts(self.agent.angle)\n elif self.agent.angle == 180: \n self.agent.agent_position['y'] += 10\n self.reset_raycasts(self.agent.angle)\n elif self.agent.angle == 270:\n self.agent.agent_position['x'] -= 10\n self.reset_raycasts(self.agent.angle)\n \n if left + right >= 50:\n reward += 5\n\n self.render()\n\n elif action == 3:\n self.agent.direction_history.append('reverse')\n if self.agent.angle == 0:\n self.agent.agent_position['y'] += 10\n self.reset_raycasts(self.agent.angle)\n self.render()\n elif self.agent.angle == 90: \n self.agent.agent_position['x'] -= 10\n self.reset_raycasts(self.agent.angle)\n self.render()\n elif self.agent.angle == 180: \n self.agent.agent_position['y'] -= 10\n self.reset_raycasts(self.agent.angle)\n self.render()\n elif self.agent.angle == 270:\n self.agent.agent_position['x'] += 10\n self.reset_raycasts(self.agent.angle)\n self.render()\n \n if left + right <= 50:\n reward += 5\n\n \n else:\n reward -= 5\n\n if \"forward\" not in self.agent.direction_history[len(self.agent.direction_history)-6:len(self.agent.direction_history)-1]:\n reward -= 10\n\n \n info = {}\n if self.agent.check_collision():\n reward -= 10\n self.reset() \n self.agent.rewards.append({\n 'leftDistance': left,\n 'rightDistance': right,\n 'reward': reward,\n })\n self.render()\n print(f\"REWARD: {reward}\")\n # self.render()\n # print(self.agent.direction_history[-1])\n self.agent.rewards.append(reward)\n return np.array([left, right]), reward, False, info",
"def move_agent(self, state):\n m = self.m\n n = self.n\n\n cur_env = deepcopy(state.grid)\n cur_env[m, n] = 0\n action = self.choose_action(state)\n\n if action == 'Right':\n if n + 1 >= grid_size or cur_env[m][n+1] != 0:\n Rew = -2 # Reward -5 if we move into wall or another agent\n self.collisions += 1\n else:\n n += 1\n Rew = -0.1 # Reward -1 otherwise\n a = 0 # Action number\n elif action == 'Left':\n if n - 1 < 0 or cur_env[m][n-1] != 0:\n Rew = -2\n self.collisions += 1\n else:\n n -= 1\n Rew = -0.1\n a = 1\n elif action == 'Up':\n if m - 1 < 0 or cur_env[m-1][n] != 0:\n Rew = -2\n self.collisions += 1\n else:\n m -= 1\n Rew = -0.1\n a = 2\n elif action == 'Down':\n if m + 1 >= grid_size or cur_env[m+1][n] != 0:\n Rew = -2\n self.collisions += 1\n else:\n m += 1\n Rew = -0.1\n a = 3\n\n m = m % grid_size\n n = n % grid_size\n self.m = m # Update position of agent\n self.n = n # Update position of agent\n cur_env[m][n] = 1 # Update grid\n new_state = State(cur_env, [m, n]) # Set new state\n terminal = False\n\n if [m, n] == self.end:\n Rew = 10\n terminal = True\n self.carry = True\n\n return new_state, a, Rew, terminal",
"def Enmove(self):\r\n if self.vel > 0:\r\n if self.rect.x + self.vel < self.path[1]:\r\n self.rect.x += self.vel #Moving enemy towards end of path\r\n else:\r\n if self.flipped: #flip enemy and move along opposite direction\r\n self.image = pygame.transform.flip(self.image, True, False)\r\n self.flipped = False\r\n self.vel = -self.vel\r\n else:\r\n if self.rect.x - self.vel > self.path[0]:\r\n self.rect.x += self.vel #Moving enemy back to starting point\r\n else:\r\n if not self.flipped: #determining whether image should be flipped\r\n self.image = pygame.transform.flip(self.image, True, False)\r\n self.flipped = True \r\n self.vel = -self.vel",
"def ray(self):\n return self._ray",
"def draw_ray(env, ray, dist=0.03, linewidth=2, color=None):\n if dist < 0:\n newpos = ray.pos() + dist*ray.dir()\n newray = orpy.Ray(newpos, ray.dir())\n else:\n newray = ray\n iktype = orpy.IkParameterizationType.TranslationDirection5D\n ikparam = orpy.IkParameterization(ray, iktype)\n h = orpy.misc.DrawIkparam2(env, ikparam, dist=dist, linewidth=linewidth,\n coloradd=color)\n return h",
"def move(self):\n \"\"\" Responsible for transformations \"\"\"\n pos, com, success = self.perception \n if self.destination is None:\n return array([0,0])\n\n if not self.awake:\n return array([0,0])\n\n\n if self.phase == 4 and self.proper_formation is not None:\n no_go = []\n for i in range(0,len(self.proper_formation)):\n if i != self.order and self.proper_formation[i][0] == self.proper_formation[self.order][0]:\n no_go.append(self.transform(self.proper_formation[i][1] - self.position))\n pos = merge_array_lists(pos, no_go)\n\n if self.phase == 2:\n point = self.destination.copy() - self.position\n elif self.phase > 2:\n point = self.transform(self.destination.copy() - self.position)\n else:\n point = self.destination.copy()\n\n if not array_equal(point, array([0,0])):\n reachable, path = findpathtoclosest(array([0,0]), point, pos)\n \n if len(path) == 0:\n move = array([0,0]) \n else:\n move = path[0]\n if not reachable and not array_equal(move,array([0,0])):\n if self.phase == 2:\n self.closest_i_could_get = path[-1] + self.position\n elif self.phase > 2:\n self.closest_i_could_get = self.transform2(path[-1]) + self.position\n else:\n self.closest_i_could_get = path[-1]\n elif not reachable:\n if self.phase > 1:\n self.closest_i_could_get = self.position\n else:\n self.closest_i_could_get = array([0,0])\n else:\n self.closest_i_could_get = None\n\n if reachable and self.phase == 4 and array_equal(move,array([0,0])):\n move = self.randomStep()\n self.closest_i_could_get = None\n\n else:\n move = array([0,0])\n self.closest_i_could_get = None\n\n return move",
"def move_down():\n return __maze.move_down()",
"def mouse_move(self, obj, event):\n last_pos = self.iren.GetLastEventPosition()\n next_pos = self.iren.GetEventPosition()\n last_disp_coords = np.asarray([last_pos[0], last_pos[1], 0])\n next_disp_coords = np.asarray([next_pos[0], next_pos[1], 0])\n last_world_coords = self.display_to_world(last_disp_coords)\n next_world_coords = self.display_to_world(next_disp_coords)\n world_direction = (last_world_coords - next_world_coords)[0]\n\n if world_direction > 0:\n direction = 'forwards'\n elif world_direction < 0:\n direction = 'backwards'\n else:\n direction = 'none'\n\n if self.cone_dir == 'start':\n if direction == 'backwards':\n self.start_base_x += .5\n if self.start_base_x.is_integer():\n ind = str(int(self.start_base_x))\n isvalid = self.gaps.set_dragged_start(ind)\n if isvalid:\n self.ren_win.Render()\n else:\n self.start_base_x -= .5\n return\n\n elif direction == 'forwards':\n if self.start_base_x > 0:\n self.start_base_x -= .5\n if self.start_base_x.is_integer():\n ind = str(int(self.start_base_x))\n self.gaps.set_dragged_start(ind)\n self.ren_win.Render()\n\n if self.cone_dir == 'end':\n if direction == 'backwards':\n if self.end_base_x > 0:\n self.end_base_x -= .5\n if self.end_base_x.is_integer():\n ind = str(int(self.end_base_x))\n self.gaps.set_dragged_end(ind)\n self.ren_win.Render()\n\n elif direction == 'forwards':\n self.end_base_x += .5\n if self.end_base_x.is_integer():\n ind = str(int(self.end_base_x))\n isvalid = self.gaps.set_dragged_end(ind)\n if isvalid:\n self.ren_win.Render()\n else:\n self.end_base_x -= .5\n return",
"def _move(self, d, event):\n\n actor = None\n if event.source in self._crates:\n actor = self._crates[event.source]\n else:\n actor = self._clones[event.source][0]\n\n if d == Direction.NO_ACT or not event.success:\n actor.animation = actor.do_nothing_animation()\n return\n pos = actor.pos\n target = pos.dir_pos(d)\n actor.direction = d\n actor.animation = actor.walk_animation()\n self.repaint()",
"def move_step(self, direction):\n x = self.objects[0].x\n y = self.objects[0].y\n if direction == 0 and y >= 1:\n self.objects[0].y -= 1\n elif direction == 1 and y <= self.size_y - 2:\n self.objects[0].y += 1\n elif direction == 2 and x >= 1:\n self.objects[0].x -= 1\n elif direction == 3 and x <= self.size_x - 2:\n self.objects[0].x += 1",
"def search_my_move(self, env: ChessEnv, is_root_node=False) -> float:\n\t\tif env.done:\n\t\t\tif env.winner == Winner.draw:\n\t\t\t\treturn 0\n\t\t\t# assert env.whitewon != env.white_to_move # side to move can't be winner!\n\t\t\treturn -1\n\n\t\tstate = state_key(env)\n\n\t\twith self.node_lock[state]:\n\t\t\tif state not in self.tree:\n\t\t\t\tleaf_p, leaf_v = self.expand_and_evaluate(env)\n\t\t\t\tself.tree[state].p = leaf_p\n\t\t\t\treturn leaf_v # I'm returning everything from the POV of side to move\n\t\t\t#assert state in self.tree\n\n\t\t\t# SELECT STEP\n\t\t\taction_t = self.select_action_q_and_u(env, is_root_node)\n\n\t\t\tvirtual_loss = self.play_config.virtual_loss\n\n\t\t\tmy_visit_stats = self.tree[state]\n\t\t\tmy_stats = my_visit_stats.a[action_t]\n\n\t\t\tmy_visit_stats.sum_n += virtual_loss\n\t\t\tmy_stats.n += virtual_loss\n\t\t\tmy_stats.w += -virtual_loss\n\t\t\tmy_stats.q = my_stats.w / my_stats.n\n\n\t\tenv.step(action_t.uci())\n\t\tleaf_v = self.search_my_move(env) # next move from enemy POV\n\t\tleaf_v = -leaf_v\n\n\t\t# BACKUP STEP\n\t\t# on returning search path\n\t\t# update: N, W, Q\n\t\twith self.node_lock[state]:\n\t\t\tmy_visit_stats.sum_n += -virtual_loss + 1\n\t\t\tmy_stats.n += -virtual_loss + 1\n\t\t\tmy_stats.w += virtual_loss + leaf_v\n\t\t\tmy_stats.q = my_stats.w / my_stats.n\n\n\t\treturn leaf_v",
"def ray_status(ray, points, nodes):\n container = find_container(ray, nodes)\n \n # Handle special case of last step where ray is hitting the world node\n root = nodes[0].root\n if container == root and len(nodes) == 1:\n status = root, None, root\n return status\n\n if nodes[0] == container:\n surface_node = nodes[0]\n to_node = nodes[1]\n else:\n surface_node = nodes[0]\n to_node = nodes[0]\n status = container, to_node, surface_node\n return status",
"def reflect(self, ray):\n normal = self.normal(ray.position)\n if normal.dot(ray.direction) > 0:\n normal = -normal\n return Ray(\n ray.direction - 2 * dot(ray.direction, normal) * normal, ray.position)",
"def traceRay2XY(IKLE,MESHX,MESHY,neighbours,ei,xyi,en,xyn):\n # ~~> latest addition to the ray\n ax,bx,cx = MESHX[IKLE[en]]\n ay,by,cy = MESHY[IKLE[en]]\n bi = getBarycentricWeights( xyi,(ax,ay),(bx,by),(cx,cy) )\n pnt = {'n':1, 'xy':[xyi], 'e':[en], 'b':[bi],\n 'd':[np.power(xyi[0]-xyn[0],2) + np.power(xyi[1]-xyn[1],2)]}\n\n # ~~> convergence on distance to target xyn\n accuracy = np.power(10.0, -5+np.floor(np.log10(abs(ax+bx+cx+ay+by+cy))))\n if pnt['d'][0] < accuracy: return True,pnt\n\n # ~~> get the ray through to the farthest neighbouring edges\n ks = []; ds = []\n for k in [0,1,2]:\n xyj = getSegmentIntersection( (MESHX[IKLE[en][k]],MESHY[IKLE[en][k]]),(MESHX[IKLE[en][(k+1)%3]],MESHY[IKLE[en][(k+1)%3]]),xyi,xyn )\n if xyj == []: continue # there are no intersection with that edges\n ej = neighbours[en][k]\n if ej == ei: continue # you should not back track on your ray\n xyj = xyj[0]\n dij = np.power(xyi[0]-xyj[0],2) + np.power(xyi[1]-xyj[1],2)\n ks.append(k)\n ds.append(dij)\n if ds != []:\n k = ks[np.argmax(ds)]\n ej = neighbours[en][k]\n xyj = getSegmentIntersection( (MESHX[IKLE[en][k]],MESHY[IKLE[en][k]]),(MESHX[IKLE[en][(k+1)%3]],MESHY[IKLE[en][(k+1)%3]]),xyi,xyn )[0]\n djn = np.power(xyn[0]-xyj[0],2) + np.power(xyn[1]-xyj[1],2)\n\n # ~~> Possible recursive call\n if True or djn > accuracy: # /!\\ this may be a problem\n if ej < 0:\n # you have reach the end of the line\n bj = getBarycentricWeights( xyj,(ax,ay),(bx,by),(cx,cy) )\n pnt['n'] += 1; pnt['xy'].insert(0,xyj); pnt['e'].insert(0,en); pnt['b'].insert(0,bj); pnt['d'].insert(0,djn)\n return djn<accuracy,pnt\n else:\n found,ray = traceRay2XY(IKLE,MESHX,MESHY,neighbours,en,xyj,ej,xyn)\n ray['n'] += 1; ray['xy'].append(xyi); ray['e'].append(en); ray['b'].append(bi); ray['d'].append(dij)\n return found,ray\n\n # ~~> convergence on having found the appropriate triangle\n bn = isInsideTriangle( xyn,(ax,ay),(bx,by),(cx,cy) )\n if bn != []:\n pnt['n'] += 1; pnt['xy'].insert(0,xyn); pnt['e'].insert(0,en); pnt['b'].insert(0,bn); pnt['d'].insert(0,0.0)\n return True,pnt\n\n # ~~> you should not be here !\n return False,pnt",
"async def _move_radec(self, ra: float, dec: float, abort_event: asyncio.Event) -> None:\n\n # start slewing\n await self.__move(ra, dec, abort_event)",
"def move(self):\n\n # get the location we WOULD go to\n newX = self.xcor() + self.dx\n newY = self.ycor() + self.dy\n while (abs (newX) > self.BOX_RANGE) or (abs(newY) > self.BOX_RANGE):\n # print(\"choosing new direction... \",end=\"\")\n self.chooseNewDirection()\n # print(self.dx, self.dy)\n newX = self.xcor() + self.dx\n newY = self.ycor() + self.dy\n\n # now move our monster\n super().move()",
"def search_my_move(self, env:Chess, is_root_node=False) -> float:\n if env.over():\n if env.victor == Victor.draw:\n return 0\n # assert env.whitewon != env.white_to_move # side to move can't be winner!\n return -1\n\n state = board_state_key(env)\n\n with self.node_lock[state]:\n if state not in self.tree:\n leaf_p, leaf_v = self.expand_and_evaluate(env)\n self.tree[state].p = leaf_p\n return leaf_v # I'm returning everything from the POV of side to move\n\n # SELECT STEP\n action_t = self.select_action_q_and_u(env, is_root_node)\n\n virtual_loss = self.play_conf.virtual_loss\n\n my_visit_stats = self.tree[state]\n my_stats = my_visit_stats.a[action_t]\n\n my_visit_stats.sum_n += virtual_loss\n my_stats.n += virtual_loss\n my_stats.w += -virtual_loss\n my_stats.q = my_stats.w / my_stats.n\n\n env.make_move(action_t.uci())\n leaf_v = self.search_my_move(env) # next move from enemy POV\n leaf_v = -leaf_v\n\n # BACKUP STEP\n # on returning search path\n # update: N, W, Q\n with self.node_lock[state]:\n my_visit_stats.sum_n += -virtual_loss + 1\n my_stats.n += -virtual_loss + 1\n my_stats.w += virtual_loss + leaf_v\n my_stats.q = my_stats.w / my_stats.n\n\n return leaf_v"
] | [
"0.6431586",
"0.6042099",
"0.5938072",
"0.5936508",
"0.592946",
"0.58522797",
"0.56683385",
"0.5636461",
"0.5598364",
"0.55645293",
"0.55484056",
"0.5496541",
"0.54507935",
"0.543731",
"0.5407489",
"0.5379115",
"0.53595096",
"0.53525144",
"0.534523",
"0.5339988",
"0.5306413",
"0.5302718",
"0.5302426",
"0.53018546",
"0.52990806",
"0.5298072",
"0.5278345",
"0.52619886",
"0.5254762",
"0.5253217"
] | 0.71545416 | 0 |
shoot_ray shoots a ray from a given row and column if possible | def shoot_ray(self, origin_row, origin_column):
# get the the square object at row x column
origin = self._board.get_board_square((origin_row, origin_column))
# check that it is a valid "edge" to send a ray from
origin_check = origin.is_edge()
# if it's not then return false
if origin_check == False:
return False
# if we pass the origin check create shoot a new Ray.Ray object from row x column
new_ray = Ray.Ray(origin_row, origin_column)
# let the square we shot from know its an orign square
origin.set_originating_ray(new_ray)
# Deduct 1 from the score since we now have on exit point
self.set_score(-1)
# while the ray object has a direction (will be set to none when it reaches an endpoint)
# send it to the helper function that will move it
while new_ray.get_direction() != None:
self.move_ray(new_ray)
# if we hit an exit point (other than through reflection) deduct the point for that
terminus = new_ray.get_terminal_location()
# check the the terminal point is an edge (hitting an atom returns none as terminus)
if terminus != None:
# check that the terminus is not a reflection, which shouldn't be counted twice
terminal_square = self._board.get_board_square(terminus)
terminal_square.set_terminating_ray(new_ray)
if terminus != (origin_row, origin_column):
self.set_score(-1)
return terminus | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def shoot_ray(self, row, col):\n # Uses validate method to check if row,col are legal for ray entrance location\n if not self.valid_ray(row, col):\n return False\n # creates ray object from row, col integers\n ray = Ray(row, col)\n # checks if atom is in front of entrance position\n if not ray.can_continue(self.get_a_locations()):\n self.mark_portal(ray.get_start())\n if self.get_score() <= 0:\n self.change_state(\"LOST\")\n return None\n # while there is no atom in front of ray and ray will not exit board --\n while ray.can_continue(self.get_a_locations()):\n ray.check_diags(self.get_a_locations())\n # moves ray forward one space\n ray.advance()\n # if ray will exit board by advancing --\n if not ray.on_board():\n # adjusts score if entrance/exit do not match prior entrances/exits\n self.mark_portal(ray.get_start(), ray.get_pos())\n # changes state to lose if score is now <= 0\n if self.get_score() <= 0:\n self.change_state(\"LOST\")\n # returns tuple of exit location\n return tuple(ray.get_pos())\n # if ray is blocked by atom --\n if not ray.no_atom(self.get_a_locations()):\n # changes state to lost if score is now <= 0\n self.mark_portal(ray.get_start())\n if self.get_score() <= 0:\n self.change_state(\"LOST\")\n return None",
"def shoot_ray(self, entry_x, entry_y):\r\n\r\n # check to make sure entry_x and entry_y are valid\r\n if (entry_x in [0, 9] or entry_y in [0, 9]) and \\\r\n self._board.get_board_item(entry_x, entry_y) != \"o\":\r\n\r\n exit_tup = self._board.find_exit(entry_x, entry_y)\r\n # returned 0 if hit\r\n if exit_tup == 0:\r\n # decrement entry only if not visited\r\n marker = self.get_hit_marker()\r\n circle_tuple = self.calculate_entry_exit(entry_y, entry_x)\r\n marker.update_center(circle_tuple)\r\n points = self._player.add_entry_exit((entry_x, entry_y), marker,\r\n (entry_x, entry_y))\r\n self._stats.dec_player_score(points)\r\n return \"Hit\"\r\n elif exit_tup == 1:\r\n # decrement entry only if not visited\r\n marker = self.get_reflect_marker()\r\n circle_tuple = self.calculate_entry_exit(entry_y, entry_x)\r\n marker.update_center(circle_tuple)\r\n points = self._player.add_entry_exit((entry_x, entry_y), marker,\r\n (entry_x, entry_y))\r\n\r\n self._stats.dec_player_score(points)\r\n\r\n return \"reflect\"\r\n else:\r\n # decrement both entry and exit if not already visited\r\n marker = self.get_color_marker()\r\n exit_x, exit_y = exit_tup\r\n circle_entry = self.calculate_entry_exit(entry_y, entry_x)\r\n circle_exit = self.calculate_entry_exit(exit_y, exit_x)\r\n marker.update_center(circle_entry, circle_exit)\r\n points = self._player.add_entry_exit((entry_x, entry_y),\r\n marker, exit_tup)\r\n\r\n self._stats.dec_player_score(points)\r\n return exit_tup\r\n else:\r\n # returns false if the shoot_ray point is invalid\r\n return \"Bad shot\"",
"def rayShooting():\r\n \r\n \r\n if nbRay==1:\r\n maxi=1\r\n mini=1\r\n peaceofAngle=angleMax\r\n #to trace one ray at angleMax\r\n else:\r\n maxi=(nbRay-1)/2\r\n mini=-maxi\r\n peaceofAngle=2*angleMax/(nbRay-1)\r\n #to trace rays at regular intervals between [-angleMax;angleMax] \r\n\r\n tot=0 #to count the number of peace of ray\r\n indice=0 #to browse raysIndex\r\n\r\n raysMatrix=np.empty(shape=(0,5),dtype=np.float64)#will contain all the rays in a row\r\n raysIndex=np.empty(shape=(nbRay,),dtype=np.int16)#indexation of the rays in raysMatrix\r\n \r\n for i in np.arange(mini,maxi+1,1):#put maxi+1 to include maxi in the loop\r\n \r\n rayon=Rayon(source.position,angleToVector(peaceofAngle*i))#rayon is\r\n #the ray we will trace\r\n ray,compt=traceRay(rayon)\r\n tot+=(compt+1)\r\n\r\n \r\n raysIndex[indice]=tot #the rays index contains the indice just above\r\n #of the end of the i th ray\r\n\r\n raysMatrix=np.vstack((raysMatrix,ray))\r\n #the form of the ray matrix is a stack of peace of rays describe by\r\n #a,b,c,x1,reflexion. the polynome of the peace of ray being ax^2+bx+c and the\r\n #abscisses of the limiting point being x1, reflexion indicating if a reflexion happened\r\n #when we meet a 5-uple with a coefficient b or c infinite it means\r\n #a new ray begin\r\n \r\n indice+=1\r\n print(\"ray at indice\",i,\"and at angle\",peaceofAngle*i/np.pi*180,'degree(s)')\r\n \r\n print(\"the total number of peaces of ray is :\", tot)\r\n\r\n return(raysMatrix,raysIndex)",
"def check_click(self, mouse_x, mouse_y):\r\n # Change the x/y screen coordinates to grid coordinates\r\n column = mouse_x // 70\r\n row = mouse_y // 70\r\n\r\n if row in [0, 9] or column in [0, 9]:\r\n self.shoot_ray(row, column)\r\n elif 0 < row < 9 and 0 < column < 9:\r\n self.guess_atom(row, column)",
"def ship_shoot(ship, x, y):\n click.echo('Ship %s fires to %s,%s' % (ship, x, y))",
"def obj_ray_cast(obj, matrix):\r\n \r\n # get the ray relative to the object\r\n matrix_inv = matrix.inverted()\r\n ray_origin_obj = matrix_inv * ray_origin\r\n ray_target_obj = matrix_inv * ray_target\r\n ray_direction_obj = ray_target_obj - ray_origin_obj\r\n \r\n # cast the ray\r\n success, location, normal, face_index = obj.ray_cast(ray_origin_obj, ray_direction_obj)\r\n \r\n if success:\r\n return location, normal, face_index\r\n else:\r\n return None, None, None",
"def maybe_shoot(self):\n res = self.space.segment_query_first((self.tank.body.position[0] - \\\n 0.6 * math.sin(self.tank.body.angle), self.tank.body.position[1] +\\\n 0.6 * math.cos(self.tank.body.angle)), (self.tank.body.position[0] -\\\n 10*math.sin(self.tank.body.angle), self.tank.body.position[1] + \\\n 10*math.cos(self.tank.body.angle)), 0, pymunk.ShapeFilter())\n if res is not None:\n try:\n if hasattr(res, 'shape'):\n if isinstance(res.shape.parent, gameobjects.Tank):\n bullet = self.tank.shoot(self.space)\n if bullet is not None:\n self.game_objects_list.append(bullet)\n elif isinstance(res.shape.parent, gameobjects.Box):\n if res.shape.parent.boxmodel.destructable is True:\n bullet = self.tank.shoot(self.space)\n if bullet is not None:\n self.game_objects_list.append(bullet)\n except:\n pass",
"def raytrace(pos1: tuple, pos2: tuple) -> list:\n x0, y0 = pos1\n x1, y1 = pos2\n tiles = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n x, y = x0, y0\n n = 1 + dx + dy\n x_inc = 1 if x1 > x0 else -1\n y_inc = 1 if y1 > y0 else -1\n error = dx - dy\n dx *= 2\n dy *= 2\n\n while n > 0:\n tiles.append((x, y))\n if error > 0:\n x += x_inc\n error -= dy\n else:\n y += y_inc\n error += dx\n n -= 1\n return tiles",
"def draw_ray(env, ray, dist=0.03, linewidth=2, color=None):\n if dist < 0:\n newpos = ray.pos() + dist*ray.dir()\n newray = orpy.Ray(newpos, ray.dir())\n else:\n newray = ray\n iktype = orpy.IkParameterizationType.TranslationDirection5D\n ikparam = orpy.IkParameterization(ray, iktype)\n h = orpy.misc.DrawIkparam2(env, ikparam, dist=dist, linewidth=linewidth,\n coloradd=color)\n return h",
"def obj_ray_cast(obj, matrix):\n\n # get the ray relative to the object\n matrix_inv = matrix.inverted()\n ray_origin_obj = matrix_inv * ray_origin\n ray_target_obj = matrix_inv * ray_target\n ray_direction_obj = ray_target_obj - ray_origin_obj\n \n # cast the ray\n success, location, normal, face_index = obj.ray_cast(ray_origin_obj, ray_direction_obj)\n\n if success:\n return location, normal, face_index, ray_target\n else:\n return None, None, None, ray_target",
"def obj_ray_cast(obj, matrix):\n\n # get the ray relative to the object\n matrix_inv = matrix.inverted()\n ray_origin_obj = matrix_inv @ ray_origin\n ray_target_obj = matrix_inv @ ray_target\n ray_direction_obj = ray_target_obj - ray_origin_obj\n\n # cast the ray\n success, location, normal, face_index = obj.ray_cast(ray_origin_obj, ray_direction_obj)\n\n if success:\n return location, normal, face_index\n else:\n return None, None, None",
"def obj_ray_cast(obj, matrix):\n\n # get the ray relative to the object\n matrix_inv = matrix.inverted()\n ray_origin_obj = matrix_inv * ray_origin\n ray_target_obj = matrix_inv * ray_target\n ray_direction_obj = ray_target_obj - ray_origin_obj\n\n # cast the ray\n success, location, normal, face_index = obj.ray_cast(ray_origin_obj, ray_direction_obj)\n\n if success:\n return location, normal, face_index\n else:\n return None, None, None",
"def test_compute_pixel_rays() -> None:\n u = 12\n v = 2\n img_w = 20\n img_h = 10\n fx = 10\n fy = 10\n\n ray_dir = _compute_pixel_ray_direction(u, v, fx, fy, img_w, img_h)\n\n gt_ray_dir: NDArrayFloat = np.array([2.0, -3.0, 10.0])\n gt_ray_dir /= np.linalg.norm(gt_ray_dir)\n\n assert np.allclose(gt_ray_dir, ray_dir)",
"def shoot(self, pos_to_shoot):\n return [SHOOT, pos_to_shoot]",
"def sling_action():\n global mouse_distance\n global rope_lenght\n global angle\n global x_mouse\n global y_mouse\n # Fixing bird to the sling rope\n v = vector((sling_x, sling_y), (x_mouse, y_mouse))\n uv = unit_vector(v)\n uv1 = uv[0]\n uv2 = uv[1]\n # mouse_distance = distance(sling_x, sling_y, x_mouse, y_mouse)\n sling = Vec2d(sling_x, sling_y)\n mouse = Vec2d(x_mouse, y_mouse)\n mouse_distance = (sling - mouse).length\n\n pu = (uv1*rope_lenght+sling_x, uv2*rope_lenght+sling_y)\n bigger_rope = 102\n x_redbird = x_mouse - 20\n y_redbird = y_mouse - 20\n if mouse_distance > rope_lenght:\n pux, puy = pu\n pux -= 20\n puy -= 20\n pul = pux, puy\n screen.blit(redbird, pul)\n pu2 = (uv1*bigger_rope+sling_x, uv2*bigger_rope+sling_y)\n pygame.draw.line(screen, (0, 0, 0), (sling2_x, sling2_y), pu2, 5)\n screen.blit(redbird, pul)\n pygame.draw.line(screen, (0, 0, 0), (sling_x, sling_y), pu2, 5)\n else:\n mouse_distance += 10\n pu3 = (uv1*mouse_distance+sling_x, uv2*mouse_distance+sling_y)\n pygame.draw.line(screen, (0, 0, 0), (sling2_x, sling2_y), pu3, 5)\n screen.blit(redbird, (x_redbird, y_redbird))\n pygame.draw.line(screen, (0, 0, 0), (sling_x, sling_y), pu3, 5)\n # Angle of impulse\n dy = y_mouse - sling_y\n dx = x_mouse - sling_x\n if dx == 0:\n dx = 0.00000000000001\n angle = math.atan((float(dy))/dx)",
"def direct(sun_pos, grid):\n\n # for each pixel at top of grid pass sun rays in\n for i in xrange(grid.gr.shape[0]):\n \"\"\"\n Make an array starting at loc\n \"\"\"\n xpos = i * grid.xres\n ypos = grid.zres * grid.zsize\n pos = np.array(xpos, ypos)\n direction = pos - sun_pos / np.norm(pos - sun_pos) # this location minus \n r = ray(pos, direction)\n \"\"\"\n The ray now travels down through the canopy being\n altered by transmission and reflectance\n\n amount of scattering vs absorption is determined by leaf area density\n\n \"\"\"",
"def moveFunction(target, rays):\r\n for ray in rays:\r\n ray.hitTarget(target)",
"def check_pin_ball_hit(time_elapsed):\n\n pass",
"def get_initial_rays_trig(bs,\n num_steps,\n fov,\n resolution,\n ray_start,\n ray_end,\n device, ):\n\n W, H = resolution\n # Create full screen NDC (-1 to +1) coords [x, y, 0, 1].\n # Y is flipped to follow image memory layouts.\n x, y = torch.meshgrid(torch.linspace(-1, 1, W, device=device),\n torch.linspace(1, -1, H, device=device))\n x = x.T.flatten() # (HxW, ) [[-1, ..., 1], ...]\n y = y.T.flatten() # (HxW, ) [[1, ..., -1]^T, ...]\n z = -torch.ones_like(x, device=device) / np.tan((2 * math.pi * fov / 360) / 2) # (HxW, )\n\n rays_d_cam = normalize_vecs(torch.stack([x, y, z], -1)) # (HxW, 3)\n\n z_vals = torch.linspace(ray_start,\n ray_end,\n num_steps,\n device=device) \\\n .reshape(1, num_steps, 1) \\\n .repeat(W * H, 1, 1) # (HxW, n, 1)\n points = rays_d_cam.unsqueeze(1).repeat(1, num_steps, 1) * z_vals # (HxW, n_samples, 3)\n\n points = torch.stack(bs * [points]) # (b, HxW, n_samples, 3)\n z_vals = torch.stack(bs * [z_vals]) # (b, HxW, n_samples, 1)\n rays_d_cam = torch.stack(bs * [rays_d_cam]).to(device) # (b, HxW, 3)\n\n return points, z_vals, rays_d_cam",
"def test_compute_pixel_ray_directions_vectorized() -> None:\n fx = 10\n fy = 10\n\n # dummy 2d coordinates in the image plane.\n uv: NDArrayInt = np.array([[12, 2], [12, 2], [12, 2], [12, 2]])\n\n # principal point is at (10,5)\n img_w = 20\n img_h = 10\n\n pinhole_camera = _create_pinhole_camera(\n fx_px=fx,\n fy_px=fy,\n cx_px=img_w / 2,\n cy_px=img_h / 2,\n height_px=img_h,\n width_px=img_w,\n cam_name=\"ring_front_center\", # dummy name\n )\n ray_dirs = pinhole_camera.compute_pixel_ray_directions(uv)\n\n gt_ray_dir: NDArrayFloat = np.array([2, -3, 10.0])\n gt_ray_dir /= np.linalg.norm(gt_ray_dir)\n\n for i in range(4):\n assert np.allclose(gt_ray_dir, ray_dirs[i])",
"def point_on_ray(self, t=0.5):\n\n assert 0. <= t <=1., 't must be between 0 and 1'\n\n\n return self.detector_origin + (self._origin - self.detector_origin) * t",
"def ai_shoot(self, gk, goal_x):\n\n angles = {\n 1: { # For team 1\n 'SHOOT_E': math.pi/4,\n 'SHOOT_D': 0,\n 'SHOOT_C': -math.pi/4,\n },\n 2: { # For team 2\n 'SHOOT_Q': math.pi*3/4,\n 'SHOOT_A': math.pi,\n 'SHOOT_Z': -math.pi*5/4,\n },\n }\n\n self_pos = P(self.pos.x, H-self.pos.y)\n gk_pos = P(gk.pos.x, H-gk.pos.y)\n\n possible_shots = []\n for k, v in angles[self.team_id].items():\n line = [ # Equation of line as A*x +B*y + C = 0\n math.sin(v), # x coeff\n -math.cos(v), # y coeff\n self_pos.y*math.cos(v) - self_pos.x*math.sin(v), # constant\n ]\n intersection_pt = -(line[2] + line[0]*goal_x)/line[1]\n if GOAL_POS[0]*H < intersection_pt < GOAL_POS[1]*H:\n possible_shots.append((-self.dist_to_line(line, gk_pos), k))\n\n if possible_shots:\n shot = sorted(possible_shots)[0][1]\n else:\n shot = 'NOTHING'\n\n return shot",
"def shoot(self):\n pt = self.pt()\n assert pt >= 0\n m = self.mass()\n assert m >= 0\n sqrt_pt2_m2 = math.sqrt( pt**2 + m**2 )\n y = self.rap()\n e = sqrt_pt2_m2 * math.cosh(y)\n pz = sqrt_pt2_m2 * math.sinh(y)\n phi = self.phi()\n px = pt * math.cos(phi);\n py = pt * math.sin(phi);\n v4 = ROOT.TLorentzVector(px, py, pz, e)\n return v4",
"def hit(bx, by, r, px, py,h):\n if bx >= px:\n distance = bx - px\n else:\n distance = px - bx\n if py<=by and by<=py+h and distance <= r:\n return True\n else:\n return False",
"def sling_action():\n global mouse_distance\n global rope_length\n global angle\n global mouse_x_pos\n global mouse_y_pos\n\n #add code inside sling function\n # Fixing bird to the sling rope\n vec = vector((initial_x_sling, initial_y_sling), (mouse_x_pos, mouse_y_pos))\n unit_vec = unit_vector(vec)\n uv_1 = unit_vec[0]\n uv_2 = unit_vec[1]\n mouse_distance = distance(initial_x_sling, initial_y_sling, mouse_x_pos, mouse_y_pos) #point at which currrent bird id\n fix_pos = (uv_1*rope_length+initial_x_sling, uv_2*rope_length+initial_y_sling)\n highest_length = 102 #when stretched\n\n #to make bird stay within rope\n x_redbird = mouse_x_pos - 20\n y_redbird = mouse_y_pos - 20\n if mouse_distance > rope_length:\n pux, puy = fix_pos\n pux -= 20\n puy -= 20\n first_pos = pux, puy\n screen.blit(redbird, first_pos)\n second_pos = (uv_1*highest_length+initial_x_sling, uv_2*highest_length+initial_y_sling) #current position\n pygame.draw.line(screen, (255, 0, 0), (next_x_sling, next_y_sling), second_pos, 5) #catapult rope\n screen.blit(redbird, first_pos)\n pygame.draw.line(screen, (255, 0, 0), (initial_x_sling, initial_y_sling), second_pos, 5) #ANOTHER SIDE of catapult\n else:\n #when not fully stretched\n mouse_distance += 10\n third_pos = (uv_1*mouse_distance+initial_x_sling, uv_2*mouse_distance+initial_y_sling)\n pygame.draw.line(screen, (0, 0, 0), (next_x_sling, next_y_sling), third_pos, 5)\n screen.blit(redbird, (x_redbird, y_redbird))\n pygame.draw.line(screen, (0, 0, 0), (initial_x_sling, initial_y_sling), third_pos, 5)\n # Angle of impulse\n\n change_in_y = mouse_y_pos - initial_y_sling\n change_in_x = mouse_x_pos - initial_x_sling\n if change_in_x == 0:\n dx = 0.00000000000001\n angle = math.atan((float(change_in_y))/change_in_x)",
"def shoot_fire(self, camera):\n\n cursor_pos = pygame.mouse.get_pos()\n tempMouseRect = pygame.Rect(cursor_pos, (0, 0))\n tempMouseRect = camera.use_cam_rect(tempMouseRect)\n\n relPos = tempMouseRect.topleft\n\n self.intMousePos = relPos\n ang = self.get_shoot_angle(relPos)\n #ang = math.radians(170 - math.degrees(ang))\n ang = math.radians(( (math.degrees(ang)+ 180 )))\n #ang = int(ang)\n\n if self.canShoot and self.ammo: #and self.is_good_angle(ang):\n self.canShoot = False\n self.ammo -= 1\n self.timer_fire = time.time()\n\n # decide starting position of fireball\n\n xPos = self.rect.centerx\n\n fire = powersC.Fireball((xPos, self.rect.centery), ang, self.direction)\n self.powerGroup.add(fire)",
"def createRay(scorefn, resolution, opponent, ball, angle, maxBounces):\n scorefn = targetGoal['score']\n createRay(scorefn, pos, pos, angle, 3)",
"def start_shooting(agent):\n agent.step = Step.Shooting\n target = shooting_target(agent)\n speed = get_speed(agent, target)\n agent.drive.target = target\n agent.drive.speed = speed",
"def shoot(self, direction):\n self.type = self.boss.get_bullet_type()\n if self.type == 'shotgun':\n try:\n dx = abs(Laser.List[-1].x - self.x)\n dy = abs(Laser.List[-1].y - self.y)\n if dx < 50 and dy < 50 and self.type == 'shotgun':\n return\n except Exception:\n pass\n\n if(self.type == 'shotgun'):\n sound = pygame.mixer.Sound(Laser.sounds['shotgun'])\n else:\n sound = pygame.mixer.Sound(Laser.sounds['automatic'])\n sound.play()\n self.direction = direction\n self.velx = Laser.velocity[self.direction]['x']\n self.vely = Laser.velocity[self.direction]['y']\n\n if self.direction == 'n':\n south = pygame.transform.rotate(Laser.imgs[self.type], 90) # CCW\n self.img = pygame.transform.flip(south, False, True)\n\n if self.direction == 's':\n self.img = pygame.transform.rotate(Laser.imgs[self.type], 90) # CCW\n\n if self.direction == 'e':\n self.img = pygame.transform.flip(Laser.imgs[self.type], True, False)\n\n if self.direction == 'w':\n self.img = Laser.imgs[self.type]\n\n Laser.List.append(self)",
"def move_ray(self, ray):\n\n # look to the next spot in the ray's trajectory\n next_coordinates = ray.get_next_location()\n next_location = self._board.get_board_square(next_coordinates)\n\n # check for a collisition - return if it occurs\n if ray.check_for_collision(next_location):\n return\n\n # if we didn't collide as we moved we need to look to check our\n # diagonals for atoms\n ccw_diag_coordinates, cw_diag_coordinates = ray.get_diagonals()\n\n ccw_diagonal = self._board.get_board_square(ccw_diag_coordinates)\n cw_diagonal = self._board.get_board_square(cw_diag_coordinates)\n\n if ccw_diagonal.is_atom() or cw_diagonal.is_atom():\n\n # If we're on our first move and the immediately diagonals contain an atom we have a reflection\n if ray.get_current_location() == ray.get_origin_location():\n\n terminal_square = self._board.get_board_square(\n ray.get_current_location())\n\n # let's the ray know it's finished and the square that it's an endpoint\n # self.end_ray(ray, terminal_square)\n return ray.record_edge_collision(terminal_square)\n\n # otherwise they cause a bend in the path\n else:\n # we have to calculate our trajectory based on the pull\n # of the atoms in our path\n ray.recalculate_trajectory(ccw_diagonal, cw_diagonal)\n\n # get the coordinates of the next location in our new trajectory\n next_coordinates = ray.get_next_location()\n\n # determine the next coordinate will result in a collision - return if it would\n if ray.check_for_collision(\n self._board.get_board_square(next_coordinates)):\n return\n\n # move the ray to the next step forward in its current trajectory\n ray.set_current_location(next_coordinates)\n\n # finally, recursively call our current function from the next step in its path.\n self.move_ray(ray)"
] | [
"0.78674567",
"0.6780156",
"0.67677057",
"0.61004895",
"0.5816195",
"0.5722863",
"0.5721122",
"0.56699514",
"0.566448",
"0.5559462",
"0.5555234",
"0.55531144",
"0.55380315",
"0.55212104",
"0.55208117",
"0.5480533",
"0.54755425",
"0.5391073",
"0.53854626",
"0.5371789",
"0.5366143",
"0.5360588",
"0.5359978",
"0.5329839",
"0.53139365",
"0.53127253",
"0.52707887",
"0.5238637",
"0.5200795",
"0.51774096"
] | 0.748143 | 1 |
atoms_left returns the number of unguessed atoms still left | def atoms_left(self):
return len(self._atoms) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def atoms_left(self):\n return self._atoms",
"def atoms_left(self):\r\n return self._board.get_atoms()",
"def num_pieces_left(self):\n return self.num_white_pieces + self.num_black_pieces",
"def count_mass_left(self):\n self.mass_left_count = int(np.sum(self.array))",
"def number_of_their_pieces_to_left(column):\n row = __get_top_of_stack(column)\n return number_pieces_of_type_in_direction(column, row, THEM, 'left')",
"def count_left_players(definition):\n return int(parse_player_definition(definition)[1]['left_players'])",
"def n_atoms(self) -> int:\n return 0 if self.atoms is None else len(self.atoms)",
"def natoms(self):\n return len(self.atoms)",
"def remaining(self):\n\t\tmines = sum(1 for _ in self.get_mines())\n\t\tmarked = sum(1 for x in range(self.width)\n\t\t\t\t\t for y in range(self.height) if self.marks[x][y] == FLAG)\n\t\treturn mines - marked",
"def get_num_atoms(self):\n\n return len(self.atoms)",
"def n_charged_atoms(mol: Mol) -> int:\n return sum([at.GetFormalCharge() != 0 for at in mol.GetAtoms()])",
"def count_atoms(self):\n n = 0\n for chain in self.iter_chains():\n n += chain.count_atoms()\n return n",
"def count_atoms(self):\n n = 0\n for chain in self.iter_chains():\n n += chain.count_atoms()\n return n",
"def _contributions_left(self):\r\n if self.is_complete:\r\n return 0, 0\r\n online_left = self.online_quota - self.stats.num_online_contributions\r\n if online_left < 0:\r\n online_left = 0\r\n tickets_left = self.num_tickets_total - self.stats.num_tickets_redeemed\r\n return (online_left, tickets_left)",
"def counting_effused_rafts(prev_centers, prev_count, curr_centers, curr_count, boundary_x, max_displacement):\n effused_raft_to_left = 0\n effused_raft_to_right = 0\n cost_matrix = scipy_distance.cdist(prev_centers[:prev_count], curr_centers[:curr_count], 'euclidean')\n # note that row index refers to previous raft number, column index refers to current raft number\n\n # select the boundary crossing to be in the middle of the cropped image, so only deals with existing rafts\n for raftID in np.arange(prev_count):\n if np.any(cost_matrix[raftID, :] < max_displacement): # raft still exist\n curr_raft_id = np.nonzero(cost_matrix[raftID, :] < max_displacement)[0][\n 0] # [0][0] is to convert array into scalar\n if (prev_centers[raftID, 0] >= boundary_x) and (curr_centers[curr_raft_id, 0] < boundary_x):\n effused_raft_to_left = effused_raft_to_left + 1\n elif (prev_centers[raftID, 0] < boundary_x) and (curr_centers[curr_raft_id, 0] >= boundary_x):\n effused_raft_to_right = effused_raft_to_right + 1\n return effused_raft_to_left, effused_raft_to_right",
"def number_of_my_pieces_to_left(column):\n row = __get_top_of_stack(column)\n return number_pieces_of_type_in_direction(column, row, ME, 'left')",
"def num_tickets_left(self):\r\n return self._contributions_left[1]",
"def number_of_atoms(self):\n if self._number_of_atoms is None:\n if self.mol is not None:\n self._number_of_atoms = len(self.mol.atoms)\n elif not self.is_ts:\n self._number_of_atoms = len(self.get_xyz().splitlines())\n return self._number_of_atoms",
"def count_all_atoms(self):\n n = 0\n for atm in self.atom_order_list:\n if isinstance(atm, Atom):\n n += 1\n else:\n n += len(atm)\n return n",
"def numAtoms(self):\n\n\t\tnatoms = 0\n\t\tfor chain in self.chain:\n\t\t\tfor residue in chain.residue:\n\t\t\t\tnatoms += residue.numAtoms()\n\n\t\treturn natoms",
"def edges_left(self):\n return self._edges_left",
"def num_online_left(self):\r\n return self._contributions_left[0]",
"def count_all_atoms(self):\n n = 0\n for chain in self.iter_chains():\n n += chain.count_all_atoms()\n return n",
"def n_atoms(self):\n return self._n_atoms",
"def left(self, obs, object):\n for i in range(int((self.no_rays-1)/2)):\n if(obs[self.listOfObjects.index(object)][i] > 0):\n # print(\"found \" + str(object) + \" left\")\n return True\n return False",
"def misplaced_nodes(puzzle):\n\n\tcount = 0\n\tfor i in range(puzzle.dimension):\n\t\tfor j in range(puzzle.dimension):\n\t\t\tif (puzzle.board[i][j] != puzzle.final_state[i][j] and puzzle.board[i][j] != 0): count += 1\n\n\treturn count",
"def numAtoms(self):\n return self.nAtoms",
"def has_left(self):\n return self.l is not None",
"def __len__(self):\n return len(self.qc_mol.atoms) + len(self.br_mol.atoms) + len(self.pc_mol.atoms)",
"def nAtoms(self):\n if self._c_result is not NULL:\n return self._c_result.n_atoms\n return 0"
] | [
"0.7521378",
"0.74469197",
"0.6469958",
"0.6355561",
"0.6071514",
"0.6057757",
"0.6004638",
"0.59833574",
"0.59616107",
"0.5921324",
"0.5888561",
"0.5864343",
"0.5864343",
"0.5818085",
"0.57945406",
"0.57681686",
"0.5768063",
"0.5694106",
"0.56850934",
"0.56428653",
"0.55966717",
"0.5595428",
"0.5573298",
"0.5572095",
"0.5534848",
"0.5533742",
"0.55281496",
"0.5520084",
"0.55118674",
"0.55111206"
] | 0.84319353 | 0 |
Test GRU gnmt encoder. time_major=True | def runGRUEncoder(self, encoder, num_layers):
inputs_ph = tf.placeholder(
dtype=tf.float32,
shape=(None, common_utils.TIME_STEPS, common_utils.DEPTH))
inputs_length_ph = tf.placeholder(dtype=tf.int32, shape=(None))
outputs, states = encoder.encode(
mode=tf.estimator.ModeKeys.TRAIN,
sequence_inputs=inputs_ph,
sequence_length=inputs_length_ph)
num_bi_layers = 1
num_uni_layers = num_layers - num_bi_layers
if num_uni_layers == 1:
states_bi_bw, states_uni = states
# states_bi_bw = (states_bi_bw,)
self.assertEqual(1, len(states_bi_bw))
self.assertEqual(num_uni_layers, len(states_uni))
# unlike lstm, whose states is a tuple of (c,h),
# gru states has only one element
# states_bi_bw[0] is a states tensor
states_list = [states_bi_bw[0]]
for i in range(num_uni_layers):
states_list.append(states_uni[i])
states = tf.convert_to_tensor(states_list)
else:
states_uni = states
self.assertEqual(num_uni_layers, len(states_uni))
states_list = []
for i in range(num_uni_layers):
states_list.append(states_uni[i])
states = tf.convert_to_tensor(states_list)
inputs, inputs_length = common_utils.get_encoder_test_inputs()
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
outputs, states = sess.run(
[outputs, states],
feed_dict={
inputs_ph: inputs,
inputs_length_ph: inputs_length
})
self.assertAllEqual(
[common_utils.TIME_STEPS, common_utils.BATCH_SIZE, common_utils.DEPTH],
outputs.shape)
if num_uni_layers == 1:
self.assertEqual(num_layers, len(states))
self.assertAllEqual(
[num_layers, common_utils.BATCH_SIZE, common_utils.DEPTH],
states.shape)
else:
self.assertEqual(num_uni_layers, len(states))
self.assertAllEqual(
[num_uni_layers, common_utils.BATCH_SIZE, common_utils.DEPTH],
states.shape) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_golay_module1(self):\r\n sent = golay.encode([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0])\r\n rec = sent[:-1] + 'C' # possible error here\r\n decoded, errors = golay.decode(rec)\r\n self.assertEqual(decoded, sent)\r\n self.assertLess(errors, 1.5)\r\n rec = sent[:-1] + 'T' # possible error here\r\n decoded, errors = golay.decode(rec)\r\n self.assertEqual(decoded, sent)\r\n self.assertLess(errors, 1.5)",
"def testEncoder(self):\n params = copy.copy(self.typical_instance)\n params.prob_f = 0.5\n params.prob_p = 0.5\n params.prob_q = 0.75\n\n rand_funcs = rappor.SimpleRandFuncs(params, MockRandom())\n rand_funcs.cohort_rand_fn = lambda a, b: a\n e = rappor.Encoder(params, 0, rand_funcs=rand_funcs)\n\n cohort, bloom_bits_irr = e.encode(\"abc\")\n\n self.assertEquals(0, cohort)\n self.assertEquals(0x000ffff, bloom_bits_irr)",
"def test_generate_raw(self):\n raw_result = self.raw_test_particle.generate_raw()\n decoded_raw = json.loads(raw_result)\n \n driver_time = decoded_raw[\"driver_timestamp\"]\n self.sample_raw_particle[\"driver_timestamp\"] = driver_time\n \n # run it through json so unicode and everything lines up\n standard = json.dumps(self.sample_raw_particle, sort_keys=True)\n self.assertEqual(raw_result, standard)",
"def test_encoder(self):\n from sosbeacon.utils import number_encode\n\n number = 123\n encoded = number_encode(number)\n self.assertEqual(encoded, 'b6')",
"def test_standard_tonnetz():\n run_framesync(Tonnetz)",
"def test_UniformTime_repr():",
"def test_save_tsc_old_version(uvm_nano):\n uvm_nano.start()\n uvm_nano.snapshot_full(target_version=\"0.24.0\")\n uvm_nano.check_log_message(\"Saving to older snapshot version, TSC freq\")",
"def create_training_record(data_path: str, path_to_gt: str, ratio: float):\n with open(data_path) as file:\n data = json.load(file)\n\n base = os.path.join(os.path.dirname(data_path), datetime.now().strftime('%Y_%m_%d_%H%M%S'))\n train_filename = '{}_{}'.format(base, 'train.tfrecords')\n test_filename = '{}_{}'.format(base, 'test.tfrecords')\n\n train_writer = tf.python_io.TFRecordWriter(train_filename)\n test_writer = tf.python_io.TFRecordWriter(test_filename)\n\n gt_reader = GroundTruthReader(path_to_gt)\n train_set_len = 1\n test_set_len = 1\n zeros = 0\n ones = 0\n\n for i, key in enumerate(data):\n if not i % 1000:\n print('Data: {}/{}'.format(i, len(data)))\n sys.stdout.flush()\n\n features, labels = compute_feature(key, data[key], gt_reader)\n\n for j, feat in enumerate(features):\n label = labels[j]\n if test_set_len / train_set_len >= ratio:\n # balance out training dataset (there are normally more zero- than one-labels)\n if (label == 0.0 and (zeros - ones <= 0)) or label == 1.0:\n train_set_len += 1\n if label == 1.0:\n ones += 1\n else:\n zeros += 1\n feature = {'train/feature': float_feature(feat),\n 'train/label': float_feature(labels[j])}\n example = tf.train.Example(features=tf.train.Features(feature=feature))\n train_writer.write(example.SerializeToString())\n else:\n test_set_len += 1\n feature = {'test/feature': float_feature(feat),\n 'test/label': float_feature(labels[j])}\n example = tf.train.Example(features=tf.train.Features(feature=feature))\n test_writer.write(example.SerializeToString())\n train_writer.close()\n sys.stdout.flush()",
"def test_includinggaps(self):\n model = substitution_model.TimeReversibleNucleotide(model_gaps=True)\n assert len(model.get_alphabet()) == 5",
"def mkrngs(self):\n self.bkg[[0, -1]] = False\n bkgr = self.Time[self.bkg ^ np.roll(self.bkg, -1)]\n self.bkgrng = np.reshape(bkgr, [bkgr.size // 2, 2])\n\n self.sig[[0, -1]] = False\n sigr = self.Time[self.sig ^ np.roll(self.sig, 1)]\n self.sigrng = np.reshape(sigr, [sigr.size // 2, 2])\n\n self.trn[[0, -1]] = False\n trnr = self.Time[self.trn ^ np.roll(self.trn, 1)]\n self.trnrng = np.reshape(trnr, [trnr.size // 2, 2])",
"def compile_gru_model(input_dim=101, output_dim=4563, recur_layers=3, nodes=1000,\n conv_context=11, conv_border_mode='valid', conv_stride=2,\n initialization='glorot_uniform', batch_norm=True, num_gpu=1):\n logger.info(\"Building gru model\")\n # Main acoustic input\n acoustic_input = Input(shape=(None, input_dim), name='acoustic_input')\n\n # Setup the network\n #conv_1d = Conv1D(nodes, conv_context, name='conv_1d',\n # padding='same', strides=conv_stride,\n # kernel_initializer=initialization,\n # activation='relu')(acoustic_input)\n conv_1d = Convolution1D(nodes, conv_context, name='conv1d',\n border_mode=conv_border_mode,\n subsample_length=conv_stride, init=initialization,\n activation='relu')(acoustic_input)\n if batch_norm:\n output = normalization.BatchNormalization(name='bn_conv_1d')(conv_1d, training=True)\n else:\n output = conv_1d\n\n for r in range(recur_layers):\n # output = GRU(nodes, activation='relu',\n # name='rnn_{}'.format(r + 1), init=initialization,\n # return_sequences=True)(output)\n output = Bidirectional(GRU(nodes, return_sequences=True),name='bi_lstm_{}'.format(r + 1))(output)\n if batch_norm:\n bn_layer = normalization.BatchNormalization(name='bn_rnn_{}'.format(r + 1),moving_mean_initializer='zeros')\n output = bn_layer(output, training=True)\n\n network_output = TimeDistributed(Dense(\n output_dim+1, name='dense', activation='softmax', init=initialization,\n ))(output)\n model = Model(input=acoustic_input, output=network_output)\n #model.conv_output_length = lambda x: conv_output_length(\n # x, conv_context, conv_border_mode, conv_stride)\n # model = ParallelModel(model, num_gpu)\n return model",
"async def test_floating_point_encoding(self, r):\n await r.flushdb()\n timestamp = 1349673917.939762\n await r.zadd('a', timestamp, 'a1')\n assert await r.zscore('a', 'a1') == timestamp",
"def test_transcoder(self, raw, value):\n assert DPTSceneNumber.to_knx(value) == DPTArray(raw)\n assert DPTSceneNumber.from_knx(DPTArray(raw)) == value",
"def test_socialledge_encode_decode_mux_0(self):\n\n db = cantools.db.File()\n filename = os.path.join('tests', 'files', 'socialledge.dbc')\n db.add_dbc_file(filename)\n\n frame_id = 200\n data = {\n 'SENSOR_SONARS_mux': 0,\n 'SENSOR_SONARS_err_count': 1,\n 'SENSOR_SONARS_left': 2,\n 'SENSOR_SONARS_middle': 3,\n 'SENSOR_SONARS_right': 4,\n 'SENSOR_SONARS_rear': 5\n }\n\n encoded = db.encode_message(frame_id, data)\n self.assertEqual(encoded, b'\\x10\\x00\\x14\\xe0\\x01( \\x03')\n\n decoded = db.decode_message(frame_id, encoded)\n self.assertEqual(decoded, data)",
"def test_gendaymtx():\n # sky.gendaymtx(\n # sun_mtx, 6, data=wea_data, meta=wea_metadata, direct=True, onesun=True\n # )\n pass",
"def ug(micrograms):\n return Unit(micrograms,\"microgram\")",
"def testInitialize(self):\n golang_epoch = golang_time.GolangTimeEpoch()\n self.assertIsNotNone(golang_epoch)",
"def swiss_to_gts(v):\n return v - np.array([667400, 158800, 1700])",
"def test_excludeinggaps(self):\n model = substitution_model.TimeReversibleNucleotide(model_gaps=False)\n assert len(model.get_alphabet()) == 4",
"def RLenc(img,order='F',format=True):\n bytes = img.reshape(img.shape[0] * img.shape[1], order=order)\n runs = [] ## list of run lengths\n\n # RLeC < 30 Drop\n gt0 = np.where(bytes > 0)[0]\n #print(\"RlenC=%d\",len(gt0))\n #if len(gt0) < 3600 or len(gt0) > 16000: # 70x70 consider as empty\n if len(gt0) < 3600: # 70x70 consider as empty\n return '' \n\n r = 0 ## the current run length\n pos = 1 ## count starts from 1 per WK\n for c in bytes:\n if ( c == 0 ):\n if r != 0:\n runs.append((pos, r))\n pos+=r\n r=0\n pos+=1\n else:\n r+=1\n\n #if last run is unsaved (i.e. data ends with 1)\n if r != 0:\n runs.append((pos, r))\n pos += r\n r = 0\n\n if format:\n z = ''\n \n for rr in runs:\n #if rr[1] > 1: # drop single point \n z+='{} {} '.format(rr[0],rr[1])\n return z[:-1]\n else:\n return runs",
"def test_encoder(device='/gpu:0'):\n tf.reset_default_graph()\n B, H, W, C = 64, 256, 256, 1\n latent_dim = 16\n with tf.device(device):\n gray_imgs = tf.zeros((B, H, W, C))\n latent_samples, latent_mean, latent_sd = encoder(gray_imgs, latent_dim)\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n latent_samples_np, latent_mean_np, latent_sd_np = sess.run([latent_samples, latent_mean, latent_sd])\n print('Output shape should be (%d, %d)' % (B, latent_dim))\n print('latent_samples shape: ' + str(latent_samples_np.shape))\n print('latent_mean shape: ' + str(latent_mean_np.shape))\n print('latent_sd shape: ' + str(latent_sd_np.shape))",
"def gnss_ins_sim_recorder():\n # ensure gnss_ins_sim_node is unique:\n rospy.init_node('gnss_ins_sim_recorder_node')\n\n # parse params:\n motion_def_name = rospy.get_param('/gnss_ins_sim_recorder_node/motion_file')\n sample_freq_imu = rospy.get_param('/gnss_ins_sim_recorder_node/sample_frequency/imu')\n sample_freq_gps = rospy.get_param('/gnss_ins_sim_recorder_node/sample_frequency/gps')\n topic_name_imu = rospy.get_param('/gnss_ins_sim_recorder_node/imu_topic_name')\n topic_name_odom = rospy.get_param('/gnss_ins_sim_recorder_node/odom_gt_topic_name')\n rosbag_output_path = rospy.get_param('/gnss_ins_sim_recorder_node/output_path')\n rosbag_output_name = rospy.get_param('/gnss_ins_sim_recorder_node/output_name')\n gt_output_path = rospy.get_param(\"gnss_ins_sim_recorder_node/groundtruth_output_path\")\n gt_output_name = rospy.get_param(\"gnss_ins_sim_recorder_node/groundtruth_output_name\")\n\n # generate simulated data:\n motion_def_path = os.path.join(\n rospkg.RosPack().get_path('gnss_ins_sim'), 'config', 'motion_def', motion_def_name\n )\n imu_simulator = get_gnss_ins_sim(\n # motion def file:\n motion_def_path,\n # gyro-accel/gyro-accel-mag sample rate:\n sample_freq_imu,\n # GPS sample rate:\n sample_freq_gps\n )\n # gt_output_file = open(os.path.join(gt_output_path, gt_output_name), 'w')\n with rosbag.Bag(\n os.path.join(rosbag_output_path, rosbag_output_name), 'w'\n ) as bag:\n # get timestamp base:\n timestamp_start = rospy.Time.now()\n origin_pos = [0.0, 0.0, 0.0]\n for i, measurement in enumerate(imu_simulator):\n if i == 0:\n origin_pos = [measurement['data']['ref_pos_x'],\n measurement['data']['ref_pos_y'],\n measurement['data']['ref_pos_z']]\n # init:\n msg_imu = Imu()\n timestamp = timestamp_start + rospy.Duration.from_sec(measurement['stamp'])\n # a. set header:\n msg_imu.header.frame_id = 'NED'\n msg_imu.header.stamp = timestamp\n # b. set orientation estimation:\n msg_imu.orientation.x = 0.0\n msg_imu.orientation.y = 0.0\n msg_imu.orientation.z = 0.0\n msg_imu.orientation.w = 1.0\n # c. gyro:\n msg_imu.angular_velocity.x = measurement['data']['gyro_x']\n msg_imu.angular_velocity.y = measurement['data']['gyro_y']\n msg_imu.angular_velocity.z = measurement['data']['gyro_z']\n msg_imu.linear_acceleration.x = measurement['data']['accel_x']\n msg_imu.linear_acceleration.y = measurement['data']['accel_y']\n msg_imu.linear_acceleration.z = measurement['data']['accel_z']\n\n msg_odom = Odometry()\n msg_odom.header.stamp = timestamp\n msg_odom.header.frame_id = 'inertial'\n\n # b.set child frame id:\n msg_odom.child_frame_id = 'inertial'\n\n # b.set orientation:\n msg_odom.pose.pose.orientation.x = measurement['data']['ref_att_quat_q1']\n msg_odom.pose.pose.orientation.y = measurement['data']['ref_att_quat_q2']\n msg_odom.pose.pose.orientation.z = measurement['data']['ref_att_quat_q3']\n msg_odom.pose.pose.orientation.w = measurement['data']['ref_att_quat_q0']\n\n # c.set position:\n msg_odom.pose.pose.position.x = measurement['data']['ref_pos_x'] - origin_pos[0]\n msg_odom.pose.pose.position.y = measurement['data']['ref_pos_y'] - origin_pos[1]\n msg_odom.pose.pose.position.z = measurement['data']['ref_pos_z'] - origin_pos[2]\n\n # d.set velocity:\n msg_odom.twist.twist.linear.x = measurement['data']['ref_vel_x']\n msg_odom.twist.twist.linear.y = measurement['data']['ref_vel_y']\n msg_odom.twist.twist.linear.z = measurement['data']['ref_vel_z']\n # write:\n bag.write(topic_name_imu, msg_imu, msg_imu.header.stamp)\n bag.write(topic_name_odom, msg_odom, msg_odom.header.stamp)\n\n # gt_output_file.write(\"{0} {1} {2} {3} {4} {5} {6} {7}\\n\"\n # .format(timestamp,\n # msg_odom.pose.pose.position.x,\n # msg_odom.pose.pose.position.y,\n # msg_odom.pose.pose.position.z,\n # msg_odom.pose.pose.orientation.x,\n # msg_odom.pose.pose.orientation.y,\n # msg_odom.pose.pose.orientation.z,\n # msg_odom.pose.pose.orientation.w))\n # gt_output_file.close()",
"def time_encode(self):\n for ii in range(100):\n for fragment in self.msg.encode_msg(1, 16382):\n pass",
"def rc4_prga(r, t: int):\n w = 256\n i = j = 0\n s = BitArray()\n\n print(\"CHANGE THE STREAM LENGTH HERE !!!\")\n t = t // 8\n\n for l in range(t):\n i = (i + 1) % w\n j = (j + r[i]) % w\n r[i], r[j] = r[j], r[i]\n\n k = r[(r[i] + r[j]) % w]\n s += Bits(bytearray(k.to_bytes(1, byteorder='big')))\n\n debug(True, fun_name + \" : stream = \" + str(s))\n return s",
"def define_nmt(hidden_size, batch_size, en_timesteps, en_vsize, fr_timesteps, fr_vsize):\n\n # Define an input sequence and process it.\n if batch_size:\n encoder_inputs = Input(batch_shape=(batch_size, en_timesteps, en_vsize), name='encoder_inputs')\n decoder_inputs = Input(batch_shape=(batch_size, fr_timesteps - 1, fr_vsize), name='decoder_inputs')\n else:\n encoder_inputs = Input(shape=(en_timesteps, en_vsize), name='encoder_inputs')\n if fr_timesteps:\n decoder_inputs = Input(shape=(fr_timesteps - 1, fr_vsize), name='decoder_inputs')\n else:\n decoder_inputs = Input(shape=(None, fr_vsize), name='decoder_inputs')\n\n # Encoder GRU\n encoder_gru = GRU(hidden_size, return_sequences=True, return_state=True, name='encoder_gru')\n encoder_out, encoder_state = encoder_gru(encoder_inputs)\n\n # Set up the decoder GRU, using `encoder_states` as initial state.\n decoder_gru = GRU(hidden_size, return_sequences=True, return_state=True, name='decoder_gru')\n decoder_out, decoder_state = decoder_gru(decoder_inputs, initial_state=encoder_state)\n\n # Attention layer\n attn_layer = AttentionLayer(name='attention_layer')\n attn_out, attn_states = attn_layer([encoder_out, decoder_out])\n\n # Concat attention input and decoder GRU output\n decoder_concat_input = Concatenate(axis=-1, name='concat_layer')([decoder_out, attn_out])\n\n # Dense layer\n dense = Dense(fr_vsize, activation='softmax', name='softmax_layer')\n dense_time = TimeDistributed(dense, name='time_distributed_layer')\n decoder_pred = dense_time(decoder_concat_input)\n\n # Full model\n full_model = Model(inputs=[encoder_inputs, decoder_inputs], outputs=decoder_pred)\n full_model.compile(optimizer='adam', loss='categorical_crossentropy', metrics = ['accuracy'])\n\n full_model.summary()\n\n \"\"\" Inference model \"\"\"\n batch_size = 1\n\n \"\"\" Encoder (Inference) model \"\"\"\n encoder_inf_inputs = Input(batch_shape=(batch_size, en_timesteps, en_vsize), name='encoder_inf_inputs')\n encoder_inf_out, encoder_inf_state = encoder_gru(encoder_inf_inputs)\n encoder_model = Model(inputs=encoder_inf_inputs, outputs=[encoder_inf_out, encoder_inf_state])\n\n \"\"\" Decoder (Inference) model \"\"\"\n decoder_inf_inputs = Input(batch_shape=(batch_size, 1, fr_vsize), name='decoder_word_inputs')\n encoder_inf_states = Input(batch_shape=(batch_size, en_timesteps, hidden_size), name='encoder_inf_states')\n decoder_init_state = Input(batch_shape=(batch_size, hidden_size), name='decoder_init')\n\n decoder_inf_out, decoder_inf_state = decoder_gru(decoder_inf_inputs, initial_state=decoder_init_state)\n attn_inf_out, attn_inf_states = attn_layer([encoder_inf_states, decoder_inf_out])\n decoder_inf_concat = Concatenate(axis=-1, name='concat')([decoder_inf_out, attn_inf_out])\n decoder_inf_pred = TimeDistributed(dense)(decoder_inf_concat)\n decoder_model = Model(inputs=[encoder_inf_states, decoder_init_state, decoder_inf_inputs],\n outputs=[decoder_inf_pred, attn_inf_states, decoder_inf_state])\n\n return full_model, encoder_model, decoder_model",
"def test_pretrained():\n\n model = get_model()\n nums = generate_numbers()\n b = get_B(base=10, precision=[7, -9], size=1)[0]\n X = np.zeros((len(nums), 2 * len(b)))\n Y = np.zeros((len(nums), 1))\n\n for i, num in enumerate(nums):\n X[i] = encode(num, b)\n Y[i][0] = num\n\n loss = model.evaluate(x=X, y=Y)\n\n assert loss < 1e-5",
"def test_tte5_short_write_tile_signature(self):\n filename = str(self.temp_j2k_filename)\n xtx5_setup(filename, short_sig=True)\n self.assertTrue(True)",
"def benchmark_synth_1gpu_gpuparams(self):\n params = self._shared_params()._replace(num_gpus=1)\n self._run_benchmark(params)",
"def test_enc_FOR_MOTHER_RUSSIA(self):\n # test machine\n e1 = core.Machine(plugboard=['ZU', 'HL', 'CQ', 'WM', 'OA', 'PY', 'EB', 'TR', 'DN', 'VI'], settings=['X','I','S'],rotors=[erotors.M3_IV,erotors.M3_V, erotors.M3_II], offsets=['N','O','C'], reflector=reflectors.B)\n\n # set state\n e1.encrypt('MLD')\n \n # assert encryption output\n self.assertEqual(e1._buffer.decode(), 'DOR')\n\n\n e1 = core.Machine(plugboard=['ZU', 'HL', 'CQ', 'WM', 'OA', 'PY', 'EB', 'TR', 'DN', 'VI'], settings=['X','I','S'],rotors=[erotors.M3_IV,erotors.M3_V, erotors.M3_II], offsets=['R','O','D'], reflector=reflectors.B)\n\n\n # set state\n e1.encrypt('UMDPQ CUAQN LVVSP IARKC TTRJQ KCFPT OKRGO ZXALD RLPUH AUZSO SZFSU GWFNF DZCUG VEXUU LQYXO TCYRP SYGGZ HQMAG PZDKC KGOJM MYYDD H')\n\n print(e1._buffer.decode())\n\n self.assertEqual(e1._buffer.decode(), \"GROUP SOUTH COMMA NDFRO MGENP AULUS XSIXT HARMY ISENC IRCLE DXOPE RATIO NBLAU FAILE DXCOM MENCE RELIE FOPER ATION IMMED IATEL Y\")",
"def test_gregorian_mismatch(self):\n date = datetime(2017, 1, 1)\n prices = [2] * 7\n expected_sequence = 27\n expected_week = 52\n self.assertEqual(\n star_barcode.date_to_sequence_and_week(\n date=date, price_codes=prices),\n (expected_sequence, expected_week)\n )"
] | [
"0.53550947",
"0.5301906",
"0.5206506",
"0.49940667",
"0.49887952",
"0.49855092",
"0.49552405",
"0.48806253",
"0.4856835",
"0.48504332",
"0.48328564",
"0.48092747",
"0.48080942",
"0.477451",
"0.475015",
"0.47448063",
"0.47433698",
"0.47394577",
"0.47322595",
"0.47050416",
"0.4703761",
"0.47010073",
"0.47007525",
"0.47005183",
"0.4693379",
"0.469055",
"0.46904805",
"0.46882266",
"0.46831015",
"0.46646288"
] | 0.5863373 | 0 |
Create a CourseGraph, fetching unitary weights and edge weights from database, creating CourseNodes for each course, and | def __init__(self, database, session, max_suggestions=5, max_courses=30, cache_mult=4):
self._nodes = dict() # dict with courseid keys, CourseNode vals
self._max_suggestions = max_suggestions
self._max_courses = max_courses
self._cache_mult = cache_mult
db = database
# Get dict mapping courses to unitary weights
unitary_dict = db.get_unitary_dict(session)
# Get dict mapping courses to adjacent courses and weights
edge_dict = db.get_edges_dict(session)
# Create CourseNodes
for courseid in unitary_dict:
courseNode = CourseGraph.CourseNode(courseid=courseid, edges=dict(), popularity=unitary_dict[courseid])
self._nodes[courseid] = courseNode
# Create course edge dict for each CourseNode
for courseid in edge_dict:
node = self._nodes[courseid] # get node of interest
adj_courses = edge_dict[courseid] # get inner dict {otherid: edge_weight}
for otherid in adj_courses:
other_node = self._nodes[otherid]
node.addEdge(other_node, adj_courses[otherid]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _CreateGraph(self):\n self.nodes = []\n self.edges = []\n for i, r in self.airports.set_index('airport_id').iterrows():\n self.nodes.append((i,r.to_dict()))\n for i, r in self.routes.set_index(['src_id','dst_id']).iterrows():\n self.edges.append((i[0],i[1],r.to_dict()))\n # print('node ex: {}'.format(self.nodes[0]))\n # print('edge ex: {}'.format(self.edges[0]))\n\n self.graph = self._CreateAdjacencyListGraph()",
"def prepare_graph(\n self,\n adjacency,\n weights,\n weighted=False,\n undirected=False,\n force_dense=True,\n noselfloop=True,\n verbose=True,\n ):\n\n # df_adj = pd.read_csv(in_folder + adj_name, index_col=0) # read adjacency file\n print(\"\\nAdjacency shape: {0}\".format(adjacency.shape), flush=True)\n\n # create the graph adding nodes and edges\n A = self.read_graph(\n adj=adjacency,\n weights=weights,\n weighted=weighted,\n undirected=undirected,\n noselfloop=noselfloop,\n verbose=verbose,\n )\n\n nodes = list(A[0].nodes)\n print(\"\\nNumber of nodes =\", len(nodes), flush=True)\n print(\"Number of layers =\", len(A), flush=True)\n if verbose:\n self.print_graph_stat(A)\n\n # save the multilayer network in a tensor with all layers\n if force_dense:\n B = self.build_B_from_A(A, nodes=nodes)\n else:\n B = self.build_sparse_B_from_A(A)\n\n return A, B, nodes",
"def build_graph(self):\n\t\tself._create_placeholders()\n\t\tself._create_embedding()\n\t\tself._create_recurrent_layers()\n\t\tself._create_de_embedding()\n\t\tself._create_loss()\n\t\tself._create_optimizer()\n\t\tself._create_summaries()",
"def build_graph(self):\n self.__create_placeholders()\n self.__create_encoder()\n self.__create_latent()\n self.__create_decoder()\n self.__create_loss()\n self.__create_generate()\n self.__create_reconstruct()\n self.__create_optimizer()\n self.__create_summary()",
"def init_graph(self):\n import dgl\n\n adj_list = []\n for rel_type in range(1, self.n_relations, 1):\n edge_idxs = self.ckg.filter_edges(\n lambda edge: edge.data[\"relation_id\"] == rel_type\n )\n sub_graph = (\n dgl.edge_subgraph(self.ckg, edge_idxs, preserve_nodes=True)\n .adjacency_matrix(transpose=False, scipy_fmt=\"coo\")\n .astype(\"float\")\n )\n rowsum = np.array(sub_graph.sum(1))\n d_inv = np.power(rowsum, -1).flatten()\n d_inv[np.isinf(d_inv)] = 0.0\n d_mat_inv = sp.diags(d_inv)\n norm_adj = d_mat_inv.dot(sub_graph).tocoo()\n adj_list.append(norm_adj)\n\n final_adj_matrix = sum(adj_list).tocoo()\n indices = torch.LongTensor([final_adj_matrix.row, final_adj_matrix.col])\n values = torch.FloatTensor(final_adj_matrix.data)\n adj_matrix_tensor = torch.sparse.FloatTensor(indices, values, self.matrix_size)\n return adj_matrix_tensor.to(self.device)",
"def _initilise_graph_db(self):\n for collector in self.collectors:\n collector.init_graph_db()",
"def __init__(self, nodes, edges, start_kind='Compound', end_kind='Disease', max_length=4, w=0.4, n_jobs=1):\n # Initialize important class variables\n self.w = w\n self.n_jobs = n_jobs\n self.metagraph = None\n self.start_kind = start_kind\n self.end_kind = end_kind\n\n # Placeholders for variables to be defined later\n self.node_file = None\n self.edge_file = None\n self.nodes = None\n self.metaedges = None\n self.adj_matrices = None\n self.out_degree = dict()\n self.in_degree = dict()\n self.degree_weighted_matrices = None\n\n # Mappers to be used later\n self.nid_to_index = None\n self.index_to_nid = None\n self.id_to_metanode = None\n self.metanode_to_ids = None\n self.nid_to_name = None\n self.metanode_to_edges = dict()\n self._modified_edges = None\n self._weighted_modified_edges = None\n self._orig_in_degree = dict()\n self._orig_out_degree = dict()\n\n # Read and/or store nodes as DataFrame\n if type(nodes) == str:\n self.node_file = nodes\n print('Reading file information...')\n self._read_node_file()\n elif type(nodes) == pd.DataFrame:\n self.node_df = gt.remove_colons(nodes).copy()\n self._validate_nodes()\n\n # Read and/or store edges as DataFrame\n if type(edges) == str:\n self.edge_file = edges\n self._read_edge_file()\n elif type(edges) == pd.DataFrame:\n self.edge_df = gt.remove_colons(edges).copy()\n self._validate_edges()\n\n # Process the Node and Edge information\n print('Processing node and edge data...')\n self._process_nodes()\n self._process_edges()\n\n # Initalize the metagraph and determine the metapaths available\n self._make_metagraph()\n self._determine_metapaths(start_kind, end_kind, max_length)\n self._map_metanodes_to_metaedges()\n\n # Generate the adjacency matrices.\n print('Generating adjacency matrices...')\n time.sleep(0.5)\n self._generate_adjacency_matrices()\n\n # Make Degree Weighted matrices.\n print('\\nDetermining degrees for each node and metaedge'.format(w))\n time.sleep(0.5)\n self._compute_node_degrees()\n\n # Make Degree Weighted matrices.\n print('\\nWeighting matrices by degree with dampening factor {}...'.format(w))\n time.sleep(0.5)\n self._generate_weighted_matrices()",
"def neo4j_to_lkg():\n node_types = [\"judge\", \"keyword\", \"case\", \"catch\", \"act\", \"year\"]\n from backend.graph_formation.base.legal_knowledge_graph import LegalKnowledgeGraph\n\n lkg = LegalKnowledgeGraph()\n db = GraphDatabase(ENV[\"DB_URL\"], username=ENV[\"DB_USERNAME\"], password=ENV[\"DB_PASSWORD\"])\n # Authentication for NEO4J Browser\n\n for node_type in node_types:\n q = \"MATCH (c:{}) return c\".format(node_type) #Quering for all nodes in the graph\n results = db.query(q)\n for record in results:\n props={}\n node = record[0]\n if node:\n label = node[\"metadata\"][\"labels\"]\n node_id = node[\"data\"][\"id\"]\n node[\"data\"].pop(\"id\",None)\n props = node[\"data\"]\n props[\"type\"] = label\n lkg.add_node(id, **props)\n for node_type_1 in node_types:\n for node_type_2 in node_types:\n q = \"MATCH (c:{})-[r]->(m:{}) return c,m\".format(node_type_1, node_type_2) # Quering for all Relationships in the graph\n results = db.query(q)\n for record in results:\n node1 , node2 = record\n lkg.add_edge(node1[\"data\"][\"id\"], node2[\"data\"][\"id\"])\n return(lkg)",
"def build_graph(self, name='', dump=None, nodes=None, depth_goal=1,\n filter_top=True, remove_isolates=True, add_years=True,\n fill_empty_years=True, model=None, dct=None,\n compute_core_periphery=True, compute_communities=True,\n compute_community_cores=True):\n self.graph = nx.DiGraph()\n self.graph.name = name\n if not dump:\n raise AttributeError('wiki.Net: Provide wiki.Dump object.')\n print('wiki.Net: traversing Wikipedia...')\n Net.bft(self.graph, dump, nodes, depth_goal=depth_goal, \n nodes=nodes, filter_top=filter_top)\n if remove_isolates:\n print('wiki.Net: removing isolates...')\n self.graph.remove_nodes_from(nx.isolates(self.graph))\n if add_years:\n print('wiki.Net: adding years...')\n for node in self.graph.nodes:\n dump.load_page(node)\n self.graph.nodes[node]['year'] = dump.years[0] if len(dump.years)>0 else []\n self.graph.graph['num_years'] = sum(\n [bool(y) for y in nx.get_node_attributes(self.graph, 'year').values()]\n )\n if fill_empty_years:\n print('wiki.Net: filling empty years...')\n nodes_filled = True\n while nodes_filled:\n nodes_filled = Net.fill_empty_nodes(self.graph, full_parents=True)\n nodes_filled = True\n while nodes_filled:\n nodes_filled = Net.fill_empty_nodes(self.graph, full_parents=False)\n for node in self.graph.nodes:\n if not self.graph.nodes[node]['year']:\n self.graph.nodes[node]['year'] = Net.MAX_YEAR\n if model and dct:\n print('wiki.Net: calculating weights...')\n self.graph.graph['tfidf'] = Net.compute_tfidf(self.nodes, dump, model, dct)\n Net.set_weights(self.graph)\n if compute_core_periphery:\n print('wiki.Net: computing core-periphery...')\n Net.assign_core_periphery(self.graph)\n if compute_communities:\n print('wiki.Net: computing communities...')\n Net.assign_communities(self.graph)\n if compute_community_cores:\n print('wiki.Net: computing cores within communities...')\n Net.assign_cores_to_communities(self.graph)",
"def build_graph(self):\n edge_data_by_type, all_edges, all_nodes = self.load_training_data(\n self.train_edges_file,\n slf_loop=self.config['slf_loop'],\n symmetry_edge=self.config['symmetry_edge'])\n\n num_nodes = len(all_nodes)\n node_features = {\n 'index': np.array(\n [i for i in range(num_nodes)], dtype=np.int64).reshape(-1, 1)\n }\n\n self.graph = heter_graph.HeterGraph(\n num_nodes=num_nodes,\n edges=edge_data_by_type,\n node_types=None,\n node_feat=node_features)\n\n self.edge_types = sorted(self.graph.edge_types_info())\n logging.info('total %d nodes are loaded' % (self.graph.num_nodes))",
"def build_graph():\n file = open(\"../data/data.json\", \"r\")\n data = json.load(file)\n node_dict = {}\n for id in data:\n node_dict[id] = Node(data[id][\"name\"], data[id][\"product\"], data[id][\"production_volume\"])\n for id in data:\n current_node = node_dict[id]\n for costumer_id in data[id][\"costumers\"]:\n current_node.costumers.append(node_dict[str(costumer_id)])\n current_node.out_edge_capacity_drop[node_dict[str(costumer_id)].name] = 0\n for supplier_id in data[id][\"suppliers\"]:\n current_node.suppliers.append(node_dict[str(supplier_id)])\n current_node.in_edge_capacity_drop[node_dict[str(supplier_id)].name] = 0\n return node_dict",
"def create_graph(self, lat, lon):\n # Open connection to the database (nodes)\n cur = armaps.model.get_db()\n\n # Get the waypoints\n cur.execute(\n \"SELECT * FROM waypoints WHERE venue_id = %s\", \n (self.venue_id,)\n )\n waypoints = cur.fetchall()\n\n # Get the paths (edges)\n cur.execute(\n \"SELECT * FROM paths WHERE venue_id = %s\",\n (self.venue_id,)\n )\n paths = cur.fetchall()\n\n # Transform list of waypoints into dictionary with key = waypoint_id\n for waypoint in waypoints:\n self.waypoints[int(waypoint[\"waypoint_id\"])] = {\n \"lat\": float(waypoint[\"latitude\"]),\n \"lon\": float(waypoint[\"longitude\"]),\n \"waypoint_id\": int(waypoint[\"waypoint_id\"])\n }\n\n # Calculate weights of edges in graph\n for path in paths:\n # Get two nodes (waypoints) associated with edge\n inNode = int(path[\"innode\"])\n outNode = int(path[\"outnode\"])\n\n # Get the coordinates of nodes\n inNode_coords = (self.waypoints[inNode][\"lat\"], self.waypoints[inNode][\"lon\"])\n outNode_coords = (self.waypoints[outNode][\"lat\"], self.waypoints[outNode][\"lon\"])\n distance = geopy.distance.distance(inNode_coords, outNode_coords).miles\n\n # Add to graph (both ways for undirected)\n self.graph.add_edge(inNode, outNode, distance)\n self.graph.add_edge(outNode, inNode, distance)",
"def populate_graph(self):",
"def gen_graph(self):",
"def build_graph(self):\n for node in self.graph.nodes():\n self.c2py[node] = PyNode(node)\n for _input in node.inputs():\n if _input not in self.c2py:\n self.c2py[_input] = PyNode(_input, True)\n if _input in self.forward_edge:\n self.forward_edge[_input].append(node)\n else:\n self.forward_edge[_input] = [node]\n for output in node.outputs():\n if output not in self.c2py:\n self.c2py[output] = PyNode(output, True)\n if node in self.forward_edge:\n self.forward_edge[node].append(output)\n else:\n self.forward_edge[node] = [output]",
"def generate_graph(self):\n\t\tif self.joins == None:\n\t\t\tself.get_joins()\n\t\tprint('generating Networkx DiGraph object of {database} from query results'.format(**self.__dict__))\n\t\t# save distinct Child column values\n\t\tchilds = set([j.Child for j in self.joins])\n\t\t# save distinct Parent column values\n\t\tparents = set([j.Parent for j in self.joins])\n\t\t# save names of Leaf tables\n\t\tleafs = list(childs - parents)\n\t\tself._traverse_joins(leafs)",
"def create_wiki_graph(self):\n\n print 'Creating wiki corpus graph representation'\n\n for path, subdirs, files in os.walk(self.wk_path):\n\n here = os.path.split(path)[1]\n parent = os.path.split(os.path.split(path)[0])[1]\n\n self.categories.add_edge(parent, here)\n\n self.categories[parent][\"path\"] = path\n self.categories[here][\"path\"] = path\n\n for name in files:\n if fnmatch(name, \"*.yaml\") and \"Index\" not in name and \"index\" not in name: # check if there is a text file\n \n category_name = name[0:-5]\n yaml_file_path = os.path.join(\n path, category_name + \".yaml\")\n\n # yaml\n yaml_file = open(yaml_file_path, \"r\")\n docs = yaml.load_all(yaml_file)\n\n # category_name\n for doc in docs:\n cat_parent = doc[\"CategoryPath\"][0]\n\n self.categories.add_edge(\n slugify(cat_parent), slugify(category_name))\n self.categories[slugify(cat_parent)][\"path\"] = path\n self.categories[slugify(category_name)][\"path\"] = path\n\n for cat in doc[\"Categories\"][0][self.language]:\n self.categories.add_edge(\n slugify(category_name), slugify(cat))\n self.categories[slugify(cat)][\"path\"] = path\n\n print(\"The categories graph %s has %d nodes with %d edges\"\n % (self.categories.name,\n nx.number_of_nodes(self.categories),\n nx.number_of_edges(self.categories)))\n for node in nx.nodes(self.categories):\n self.get_corpus_from_node(node)\n\n pickle.dump(self.categories, open(self.graph_path, 'w'))\n\n print \"Graph saved as %s\"%(self.graph_path)",
"def generate_graph(self):\n glw = GraphLineWeights()\n\n node_id = 0\n last_key = list(self.storage.keys())[-1]\n\n for key in tqdm.tqdm(self.storage):\n for key_line in self.storage[key]:\n for node in self.storage[key][key_line]:\n # set unique node id and calculate centroid\n node.id = node_id\n node.center_x = node.left + int(node.width / 2)\n node.center_y = node.top + int(node.height / 2)\n node_id += 1\n for key in self.storage:\n for key_line in self.storage[key]:\n for node_with_id in self.storage[key][key_line]:\n # print(node_with_id.word)\n # print(node_with_id.left, node_with_id.top, node_with_id.width, node_with_id.height)\n # consider 4 sides: top, right, bottom, left\n # glw: 0 -> 1 -> 2 -> 3\n # 1. top, verified\n min_dist = self.get_top_node(node_with_id, key - 1, key_line, last_key)\n glw.add_node_id_connection(node_with_id.id, 0, node_with_id.top_node_id, min_dist)\n # 2. bottom\n min_dist = self.get_bottom_node(node_with_id, key + 1, key_line, last_key)\n glw.add_node_id_connection(node_with_id.id, 2, node_with_id.bottom_node_id, min_dist)\n # 3. left\n min_dist = self.get_left_node(node_with_id, key, key_line, last_key)\n glw.add_node_id_connection(node_with_id.id, 3, node_with_id.left_node_id, min_dist)\n # 4. right\n min_dist = self.get_right_node(node_with_id, key, key_line, last_key)\n glw.add_node_id_connection(node_with_id.id, 1, node_with_id.right_node_id, min_dist)\n\n return glw",
"def generate_courses():\r\n for category in CourseCategory.objects.all():\r\n Course.objects.create(name=category.name, category=category, is_active=True,\r\n is_featured=True)",
"def _build_graph(self):\n pass",
"def _construct_graph(self):\n raise NotImplementedError",
"def __init__(self, nodes, edges, weights='weight', start_kind='Compound', end_kind='Disease',\n scale_weights=True, max_length=4, w=0.4, n_jobs=1):\n\n super().__init__(nodes, edges, start_kind, end_kind, max_length, w, n_jobs)\n\n # Validate the weights\n if isinstance(weights, str):\n # Make sure that the weights is in the column\n assert weights in self.edge_df.columns\n # Ensure that weights are numberic\n assert np.issubdtype(self.edge_df[weights].dtype, np.number)\n # Store the column name\n self.weights = weights\n\n elif isinstance(weights, collections.Iterable):\n # Ensure that there's a weight for every edge\n assert len(weights) == len(self.edge_df)\n # Make sure the weights are numbers\n assert all(isinstance(w, (int, float)) for w in weights)\n # Store the weights and columname\n self.edge_df['weight'] = weights\n self.weights = 'weight'\n self.scale_weights = scale_weights\n if self.scale_weights:\n self.orig_weights = self.weights\n self._scale_weights_to_degree()\n self._scaling_skipped = False\n\n # Make special matrices required for weighted calculations\n self._generate_weighted_adj_matrices()\n self._degree_weight_weighted_matrices()\n self._modified_weighted_adj_matrices = None",
"def _generate_graph(self) -> None:\n self.g_ = nx.random_partition_graph(list(self._community_sizes),\n p_in=self.community_p_in,\n p_out=self.community_p_out,\n seed=self.seed)\n\n for _, nv in self.g_.nodes.data():\n nv[\"infected\"] = 0\n nv[\"immune\"] = False\n nv[\"alive\"] = True\n nv[\"_edges\"] = []\n nv[\"isolated\"] = False\n nv[\"mask\"] = 0.0",
"def build_graph(self):\n pass",
"def make_graph(self):\n\n # the root node\n self.graph.node(self.playbook_filename, style=\"dotted\", id=\"root_node\")\n\n # loop through the plays\n for play_counter, play in enumerate(self.playbook.get_plays(), 1):\n\n # the load basedir is relative to the playbook path\n if play._included_path is not None:\n self.data_loader.set_basedir(play._included_path)\n else:\n self.data_loader.set_basedir(self.playbook._basedir)\n self.display.vvv(\"Loader basedir set to {}\".format(self.data_loader.get_basedir()))\n\n play_vars = self.variable_manager.get_vars(play)\n play_hosts = [h.get_name() for h in self.inventory_manager.get_hosts(self.template(play.hosts, play_vars))]\n play_name = \"Play #{}: {} ({})\".format(play_counter, clean_name(play.get_name()), len(play_hosts))\n play_name = self.template(play_name, play_vars)\n\n self.display.banner(\"Graphing \" + play_name)\n\n play_id = \"play_\" + str(uuid.uuid4())\n\n self.graph_representation.add_node(play_id)\n\n with self.graph.subgraph(name=play_name) as play_subgraph:\n color, play_font_color = get_play_colors(play)\n # play node\n play_subgraph.node(play_name, id=play_id, style=\"filled\", shape=\"box\", color=color,\n fontcolor=play_font_color, tooltip=\" \".join(play_hosts))\n\n # edge from root node to plays\n play_edge_id = \"edge_\" + str(uuid.uuid4())\n play_subgraph.edge(self.playbook_filename, play_name, id=play_edge_id, style=\"bold\",\n label=str(play_counter), color=color, fontcolor=color)\n\n # loop through the pre_tasks\n self.display.v(\"Graphing pre_tasks...\")\n nb_pre_tasks = 0\n for pre_task_block in play.pre_tasks:\n nb_pre_tasks = self._include_tasks_in_blocks(current_play=play, graph=play_subgraph,\n parent_node_name=play_name, parent_node_id=play_id,\n block=pre_task_block, color=color,\n current_counter=nb_pre_tasks, play_vars=play_vars,\n node_name_prefix=\"[pre_task] \")\n\n # loop through the roles\n self.display.v(\"Graphing roles...\")\n role_number = 0\n for role in play.get_roles():\n # Don't insert tasks from ``import/include_role``, preventing duplicate graphing\n if role.from_include:\n continue\n\n # the role object doesn't inherit the tags from the play. So we add it manually.\n role.tags = role.tags + play.tags\n if not role.evaluate_tags(only_tags=self.options.tags, skip_tags=self.options.skip_tags,\n all_vars=play_vars):\n self.display.vv(\"The role '{}' is skipped due to the tags.\".format(role.get_name()))\n # Go to the next role\n continue\n\n role_number += 1\n role_name = \"[role] \" + clean_name(role.get_name())\n\n with self.graph.subgraph(name=role_name, node_attr={}) as role_subgraph:\n current_counter = role_number + nb_pre_tasks\n role_id = \"role_\" + str(uuid.uuid4())\n edge_id = \"edge_\" + str(uuid.uuid4())\n\n role_subgraph.node(role_name, id=role_id)\n # edge from play to role\n role_subgraph.edge(play_name, role_name, label=str(current_counter), color=color,\n fontcolor=color, id=edge_id)\n\n self.graph_representation.add_link(play_id, edge_id)\n self.graph_representation.add_link(edge_id, role_id)\n\n # loop through the tasks of the roles\n if self.options.include_role_tasks:\n role_tasks_counter = 0\n for block in role.compile(play):\n role_tasks_counter = self._include_tasks_in_blocks(current_play=play,\n graph=role_subgraph,\n parent_node_name=role_name,\n parent_node_id=role_id, block=block,\n color=color, play_vars=play_vars,\n current_counter=role_tasks_counter,\n node_name_prefix=\"[task] \")\n role_tasks_counter += 1\n self.display.v(\"{} roles added to the graph\".format(role_number))\n\n # loop through the tasks\n self.display.v(\"Graphing tasks...\")\n nb_tasks = 0\n for task_block in play.tasks:\n nb_tasks = self._include_tasks_in_blocks(current_play=play, graph=play_subgraph,\n parent_node_name=play_name, parent_node_id=play_id,\n block=task_block, color=color,\n current_counter=role_number + nb_pre_tasks,\n play_vars=play_vars, node_name_prefix=\"[task] \")\n\n # loop through the post_tasks\n self.display.v(\"Graphing post_tasks...\")\n for post_task_block in play.post_tasks:\n self._include_tasks_in_blocks(current_play=play, graph=play_subgraph, parent_node_name=play_name,\n parent_node_id=play_id, block=post_task_block, color=color,\n current_counter=nb_tasks, play_vars=play_vars,\n node_name_prefix=\"[post_task] \")\n\n self.display.banner(\"Done graphing {}\".format(play_name))\n self.display.display(\"\") # just an empty line\n # moving to the next play",
"def create_nodes(nd=None):\n\n if not nd:\n raise ValueError(\"No nodes data provided.\")\n\n nodes = []\n\n # Create Bus objects from buses table\n busd = {}\n\n for i, b in nd[\"buses\"].iterrows():\n if b[\"active\"]:\n bus = solph.Bus(label=b[\"label\"])\n nodes.append(bus)\n\n busd[b[\"label\"]] = bus\n if b[\"excess\"]:\n nodes.append(\n solph.Sink(\n label=b[\"label\"] + \"_excess\",\n inputs={\n busd[b[\"label\"]]: solph.Flow(\n variable_costs=b[\"excess costs\"]\n )\n },\n )\n )\n if b[\"shortage\"]:\n nodes.append(\n solph.Source(\n label=b[\"label\"] + \"_shortage\",\n outputs={\n busd[b[\"label\"]]: solph.Flow(\n variable_costs=b[\"shortage costs\"]\n )\n },\n )\n )\n\n # Create Source objects from table 'commodity sources'\n for i, cs in nd[\"commodity_sources\"].iterrows():\n if cs[\"active\"]:\n nodes.append(\n solph.Source(\n label=cs[\"label\"],\n outputs={\n busd[cs[\"to\"]]: solph.Flow(\n variable_costs=cs[\"variable costs\"]\n )\n },\n )\n )\n\n # Create Source objects with fixed time series from 'renewables' table\n for i, re in nd[\"renewables\"].iterrows():\n if re[\"active\"]:\n # set static outflow values\n outflow_args = {\n \"nominal_value\": re[\"capacity\"]\n }\n # get time series for node and parameter\n for col in nd[\"timeseries\"].columns.values:\n if col.split(\".\")[0] == re[\"label\"]:\n outflow_args[col.split(\".\")[1]] = nd[\"timeseries\"][col]\n\n # create\n nodes.append(\n solph.Source(\n label=re[\"label\"],\n outputs={\n busd[re[\"to\"]]: solph.Flow(**outflow_args)\n },\n )\n )\n\n # Create Sink objects with fixed time series from 'demand' table\n for i, de in nd[\"demand\"].iterrows():\n if de[\"active\"] and not pd.isnull(de['active']):\n # set static inflow values\n inflow_args = {\n \"nominal_value\": de[\"nominal value\"]\n }\n # get time series for node and parameter\n for col in nd[\"timeseries\"].columns.values:\n if col.split(\".\")[0] == de[\"label\"]:\n inflow_args[col.split(\".\")[1]] = nd[\"timeseries\"][col]\n\n # create\n nodes.append(\n solph.Sink(\n label=de[\"label\"],\n inputs={\n busd[de[\"from\"]]: solph.Flow(**inflow_args)\n },\n )\n )\n\n # Create Transformer objects from 'transformers' table\n for i, t in nd[\"transformers\"].iterrows():\n if t[\"active\"]:\n # set static inflow values\n inflow_args = {\"variable_costs\": t[\"variable input costs\"]}\n # get time series for inflow of transformer\n for col in nd[\"timeseries\"].columns.values:\n if col.split(\".\")[0] == t[\"label\"]:\n inflow_args[col.split(\".\")[1]] = nd[\"timeseries\"][col]\n # create\n nodes.append(\n solph.Transformer(\n label=t[\"label\"],\n inputs={busd[t[\"from\"]]: solph.Flow(**inflow_args)},\n outputs={\n busd[t[\"to\"]]: solph.Flow(nominal_value=t[\"capacity\"])\n },\n conversion_factors={busd[t[\"to\"]]: t[\"efficiency\"]},\n )\n )\n\n for i, s in nd[\"storages\"].iterrows():\n if s[\"active\"]:\n nodes.append(\n solph.components.GenericStorage(\n label=s[\"label\"],\n inputs={\n busd[s[\"bus\"]]: solph.Flow(\n nominal_value=s[\"capacity inflow\"],\n variable_costs=s[\"variable input costs\"],\n )\n },\n outputs={\n busd[s[\"bus\"]]: solph.Flow(\n nominal_value=s[\"capacity outflow\"],\n variable_costs=s[\"variable output costs\"],\n )\n },\n nominal_storage_capacity=s[\"nominal capacity\"],\n loss_rate=s[\"capacity loss\"],\n initial_storage_level=s[\"initial capacity\"],\n max_storage_level=s[\"capacity max\"],\n min_storage_level=s[\"capacity min\"],\n inflow_conversion_factor=s[\"efficiency inflow\"],\n outflow_conversion_factor=s[\"efficiency outflow\"],\n )\n )\n\n for i, p in nd[\"powerlines\"].iterrows():\n if p[\"active\"]:\n bus1 = busd[p[\"bus_1\"]]\n bus2 = busd[p[\"bus_2\"]]\n nodes.append(\n solph.custom.Link(\n label=\"powerline\" + \"_\" + p[\"bus_1\"] + \"_\" + p[\"bus_2\"],\n inputs={bus1: solph.Flow(), bus2: solph.Flow()},\n outputs={\n bus1: solph.Flow(nominal_value=p[\"capacity\"]),\n bus2: solph.Flow(nominal_value=p[\"capacity\"]),\n },\n conversion_factors={\n (bus1, bus2): p[\"efficiency\"],\n (bus2, bus1): p[\"efficiency\"],\n },\n )\n )\n\n return nodes",
"def build_graph(self):\n for each_list in self.lab.look():\n vertice = self._add_vertice(each_list)\n if vertice:\n self.unvisited.add(vertice)\n self.graph.addEdge((self.current, vertice))\n \n self.unvisited -= self.visited\n self._connect_neighbours()",
"def generate_model(self):\n rootpath = 'c:\\\\Users\\\\Gamelab\\\\Desktop\\\\RT\\\\Others\\\\Thesis\\\\Thesis_coding\\\\ABM\\\\' \n \n df = pd.read_csv(rootpath+'data\\\\subset_initialized_latlonvalues.csv')\n df = df.drop(columns='Unnamed: 0')\n households_in_block = {}\n household_ids_in_block = {}\n # holds all the graphs indexed by blockid [geoid]\n \n def add_and_remove_edges(G, p_new_connection, p_remove_connection): \n\n new_edges = [] \n rem_edges = [] \n for node in G.nodes(): \n # find the other nodes this one is connected to \n connected = [to for (fr, to) in G.edges(node)] \n # and find the remainder of nodes, which are candidates for new edges \n unconnected = [n for n in G.nodes() if not n in connected] \n\n # probabilistically add a random edge \n if len(unconnected): # only try if new edge is possible \n if random.random() < p_new_connection: \n new = random.choice(unconnected) \n G.add_edge(node, new) \n #print(\"\\tnew edge:\\t {} -- {}\".format(node, new) \n new_edges.append( (node, new) ) \n # book-keeping, in case both add and remove done in same cycle \n unconnected.remove(new) \n connected.append(new) \n\n # probabilistically remove a random edge \n if len(connected): # only try if an edge exists to remove \n if random.random() < p_remove_connection: \n remove = random.choice(connected) \n G.remove_edge(node, remove) \n #print \"\\tedge removed:\\t {} -- {}\".format(node, remove) \n rem_edges.append( (node, remove) ) \n # book-keeping, in case lists are important later? \n connected.remove(remove) \n unconnected.append(remove) \n return rem_edges, new_edges\n\n\n\n\n #now i need to get number of geoids unique \n for block in df['geoid'].unique(): \n G_temp=nx.Graph()\n households_in_block[block] = df[df['geoid']==block] # contains all the information about the households \n household_ids_in_block[block] = df[df['geoid']==block]['CASE_ID'].values \n # contains only their ID\n # you only need id to initialize a node\n tempdf = households_in_block[block]\n for household in household_ids_in_block[block]:\n lon = tempdf.loc[tempdf['CASE_ID']==household,'lon'].values[0]\n lat = tempdf.loc[tempdf['CASE_ID']==household,'lat'].values[0] \n \n G_temp.add_node(str(household), pos=(lon,lat))\n self.G.add_node(str(household), pos=(lon,lat))\n \n ## add G to the dictionary\n self.graph_dict[block] = G_temp\n \n \n rem_edges, new_edges = add_and_remove_edges(self.G, 0.5, 0.5)\n self.G.remove_edges_from(rem_edges)\n self.G.add_edges_from(new_edges)\n\n \n\n self.grid= NetworkGrid(self.G)\n \n for _, row in df.iterrows(): # index, row in ...\n \n agent = Household(unique_id = str(row['CASE_ID']),\n model = self, \n income = row['income'],\n age= row['age'],\n size= row['household_'],\n ami_category = row['ami_categ'],\n elec_consumption= row['elec_consumption'],\n attitude = row['attitude'],\n pbc = row['pbc'],\n subnorms = row['subnorms'],\n geoid = row['geoid'],\n tract = row['tract'],\n bgid = row['bgid'],\n adoption_status = 0)\n \n \n\n if agent:\n self.schedule.add(agent)\n y = row['lat']\n x = row['lon']\n self.grid.place_agent(agent, node_id=agent.unique_id)\n #self.space.place_agent(agent, (x, y))\n #agent.pos = (x, y)",
"def __create_graph(self):\n # create the nodes\n for h in range(self.height):\n row: List[JuncNode] = list()\n for w in range(self.width):\n jnodes: List[Node] = [self.add_node() for _ in range(4)]\n jn = JuncNode(jnodes, (h, w))\n row.append(jn)\n self.__juncs.append(row)\n # create all connections\n self.__create_connections()",
"def createOptimizedGraph(routes):\n\n\tgraphClass = Graph(routes, directed=True)\n\n\treturn graphClass.getGraph()"
] | [
"0.610946",
"0.6068693",
"0.5970327",
"0.59139115",
"0.5910554",
"0.59065074",
"0.5842171",
"0.58402723",
"0.5831642",
"0.5821705",
"0.58180076",
"0.5813589",
"0.5796841",
"0.57905227",
"0.5715409",
"0.5708784",
"0.56940794",
"0.5655929",
"0.5635845",
"0.5629975",
"0.56149757",
"0.56129605",
"0.56017387",
"0.5577498",
"0.5569718",
"0.55530834",
"0.5528388",
"0.55136675",
"0.55126846",
"0.5476829"
] | 0.7083467 | 0 |
Gets the crosslistings of the top edges from a course | def getTopEdgesFrom(self, session, courseid):
node = self.getNode(courseid) # get CourseNode
if not node:
return []
edges = node.getEdges() # get its Edge dict
return sorted(edges.keys(), key=lambda k: edges[k], reverse=True)[:5] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def assembly_courses(wall):\n courses = []\n vertices = set(wall.nodes())\n base = set(wall.nodes_where({'is_support': True}))\n\n if base:\n courses.append(list(base))\n\n seen = set()\n seen.update(base)\n\n vertices -= base\n\n while vertices:\n nbrs = set(nbr for key in courses[-1] for nbr in wall.neighbors(key))\n course = list(nbrs - seen)\n courses.append(course)\n seen.update(nbrs)\n vertices -= nbrs\n\n return courses",
"def GetTopConfidenceEdges(g, dia, topn=20):\r\n edgez = {(e[0], e[1]): e[2]['z'] for e in g.edges(data=True)}\r\n edgeconf = {(e[0], e[1]): e[2]['frac_minority'] for e in g.edges(data=True)}\r\n edgenum = {(e[0], e[1]): e[2]['num_patients'] for e in g.edges(data=True)}\r\n edgez_sorted = sorted(edgez.items(), key=operator.itemgetter(1), reverse=True)[:topn]\r\n newedgez_sorted = []\r\n for e in edgez_sorted:\r\n e = list(e)\r\n edge = e[0]\r\n e.append(edgenum[e[0]])\r\n e.append(edgeconf[e[0]])\r\n newedgez_sorted.append(e)\r\n PrintCodeDescr(g, dia, newedgez_sorted, mode=\"edge\")",
"def getCrossWithExtraEdgeInBetweenGraph(self):\n graph = self.graph\n makeLayer = self.makeLayer\n eastWestEdgeFromTo = self.eastWestEdgeFromTo\n addNodesToLayer = self.addNodesToLayer\n\n leftLayer = makeLayer()\n rightLayer = makeLayer()\n\n leftNodes = addNodesToLayer(3, leftLayer)\n rightNodes = addNodesToLayer(3, rightLayer)\n\n eastWestEdgeFromTo(leftNodes[0], rightNodes[2])\n eastWestEdgeFromTo(leftNodes[1], rightNodes[1])\n eastWestEdgeFromTo(leftNodes[2], rightNodes[0])\n\n return graph",
"def decreasing_cover_relations(self):\n relations = []\n for i in range(self.size(), 1, -1):\n for j in range(i - 1, 0, -1):\n if self.le(i, j):\n relations.append((i, j))\n break\n return relations",
"def add_edges(X, c=1):\n n = X.shape[0]\n edges = []\n for i in range(n):\n for j in range(i, n):\n if minkowski_dist(X[i], X[j], c) < 0:\n if X[i][0] < X[j][0]:\n edges.append((i, j))\n else:\n edges.append((j, i))\n return edges",
"def cc_visited(ugraph):\r\n\tremaining_node = ugraph.keys()\t\t#The keys are accessible directly.\r\n\t\r\n\tcon_com = [] #connected component\r\n\twhile len(remaining_node) != 0 :\r\n\t\tnode = random.choice(remaining_node)\r\n\t\tvisited = bfs_visited(ugraph,node)\r\n\t\tcon_com.append(visited)\r\n\t\tfor item in visited:\r\n\t\t\tremaining_node.remove(item)\r\n\treturn con_com",
"def top_face_edges(faces):\r\n top_edges = list({e for f in faces for e in f.edges})\r\n return sorted(top_edges, key=lambda e: calc_edge_median(e).z, reverse=True)[: len(faces)]",
"def increasing_cover_relations(self):\n relations = []\n size = self.size()\n for i in range(1, size):\n for j in range(i + 1, size + 1):\n if self.le(i, j):\n relations.append((i, j))\n break\n return relations",
"def get_neighbour_edges(self, cur: Union[str, int]) -> list:\n\t\treturn [edge for edge in self.edges if cur in edge]",
"def top_k_betweenness_centrality(self):\n d={}\n l=[]\n for v in vertices:\n a=self.betweenness_centrality(v)\n d[v]=a\n l.append(a)\n m=max(l)\n l1=[]\n for key in d:\n if d[key]==m:\n l1.append(key)\n\n return l1",
"def getRequiredCrossNodes(self, nodes: List[int], identifier: int) -> List[Tuple[int, int, int]]:\n\n return []",
"def edge_filtering(S, edge_list, co=0, type_='out'):\n edges = edge_list[:]\n for a in S:\n S_sort = sorted(S[a], key=S[a].get, reverse=True)\n for i in range(len(S[a])):\n b = S_sort[i]\n if (S[a][b] >= co) | (i == 0):\n if type_ != 'out':\n if (b,a) not in edges: edges.append((b,a))\n else:\n if (a,b) not in edges: edges.append((a,b))\n else: break\n return edges",
"def strongly_connected_components(G):\n preorder={}\n lowlink={} \n scc_found={}\n scc_queue = []\n scc_list=[]\n i=0 # Preorder counter\n for source in G:\n if source not in scc_found:\n queue=[source]\n while queue:\n v=queue[-1]\n if v not in preorder:\n i=i+1\n preorder[v]=i\n done=1\n v_nbrs=G[v]\n for w in v_nbrs:\n if w not in preorder:\n queue.append(w)\n done=0\n break\n if done==1:\n lowlink[v]=preorder[v]\n for w in v_nbrs:\n if w not in scc_found:\n if preorder[w]>preorder[v]:\n lowlink[v]=min([lowlink[v],lowlink[w]])\n else:\n lowlink[v]=min([lowlink[v],preorder[w]])\n queue.pop()\n if lowlink[v]==preorder[v]:\n scc_found[v]=True\n scc=[v]\n while scc_queue and preorder[scc_queue[-1]]>preorder[v]:\n k=scc_queue.pop()\n scc_found[k]=True\n scc.append(k)\n scc_list.append(scc)\n else:\n scc_queue.append(v)\n scc_list.sort(key=len,reverse=True) \n return scc_list",
"def generate_top20_candidates(cosine_sim):\n top20_indices = cosine_sim[0].argsort()[:-21:-1]\n top20_cosine = [cosine_sim[0][i] for i in top20_indices]\n return top20_indices, top20_cosine",
"def c_edges(self):\n self.compute_c_edges(self)\n return self._c_edges",
"def strongly_connected_component_subgraphs(G):\n cc=strongly_connected_components(G)\n graph_list=[]\n for c in cc:\n graph_list.append(G.subgraph(c))\n return graph_list",
"def cc_visited(ugraph):\n \n remaining = set(ugraph.keys())\n ccomp = []\n while len(remaining) > 0:\n node = remaining.pop()\n visited = bfs_visited(ugraph,node)\n ccomp.append(visited)\n remaining.difference_update(visited)\n \n return ccomp",
"def findcc(self):\n for vertex in self.graph.vertices:\n if vertex not in self.preOrderNumbers:\n self.dfs(vertex)\n return self.scComponents",
"def top(self):\n # if not empty\n if not self.empty():\n return self._data[0].get_value()[1].course\n # if empty\n return None",
"def edges(self):\n return map(Edge, self._top_exp.edges())",
"def edges(self):\n return self.dovetails + self.containments + self.internals",
"def get_courses(std):\n return std[2]",
"def edges(self):\n top_exp = TopologyUtils.TopologyExplorer(self.topods_shape(), ignore_orientation=True)\n return map(Edge, top_exp.edges())",
"def cc_visited(ugraph):\n remain = set(ugraph.keys())\n conn_comp = []\n while remain:\n node = remain.pop()\n visited = bfs_visited(ugraph, node)\n conn_comp.append(visited)\n remain = remain.difference(visited)\n return conn_comp",
"def get_crossing(self, threshold, start=None, stop=None, n=1, edge='both'):\n xval_list = self.get_all_crossings(threshold, start=start, stop=stop, edge=edge)\n if len(xval_list) < n:\n return None\n return xval_list[n - 1]",
"def eligible_edges(self):\n if len(self.edges) == 4:\n return [self.edges[0], self.edges[2]]\n return []",
"def cc_visited(ugraph):\n\tremain = []\n\tfor node in ugraph:\n\t\tremain.append(node)\n\tconnected = []\n\twhile remain:\n\t\tvisited = bfs_visited(ugraph, remain[0])\n\t\tconnected.append(visited)\n\t\tremain = [i for i in remain if not i in visited]\n\treturn connected",
"def nodesBetweenCriticalPoints(self, head: Optional[ListNode]) -> List[int]:\n critical = []\n pos = 1\n while head is not None and head.next is not None and head.next.next is not None:\n if head.val < head.next.val > head.next.next.val or head.val > head.next.val < head.next.next.val:\n critical.append(pos)\n head = head.next\n pos += 1\n if len(critical) <= 1:\n return [-1, -1]\n ret_min = pos\n ret_max = critical[-1] - critical[0]\n for i in range(1, len(critical)):\n ret = critical[i] - critical[i - 1]\n if ret < ret_min:\n ret_min = ret\n return [ret_min, ret_max]",
"def strongly_connected_components(G):\n preorder={}\n lowlink={}\n scc_found={}\n scc_queue = []\n i=0 # Preorder counter\n for source in G:\n if source not in scc_found:\n queue=[source]\n while queue:\n v=queue[-1]\n if v not in preorder:\n i=i+1\n preorder[v]=i\n done=1\n v_nbrs=G[v]\n for w in v_nbrs:\n if w not in preorder:\n queue.append(w)\n done=0\n break\n if done==1:\n lowlink[v]=preorder[v]\n for w in v_nbrs:\n if w not in scc_found:\n if preorder[w]>preorder[v]:\n lowlink[v]=min([lowlink[v],lowlink[w]])\n else:\n lowlink[v]=min([lowlink[v],preorder[w]])\n queue.pop()\n if lowlink[v]==preorder[v]:\n scc_found[v]=True\n scc=[v]\n while scc_queue and preorder[scc_queue[-1]]>preorder[v]:\n k=scc_queue.pop()\n scc_found[k]=True\n scc.append(k)\n yield scc\n else:\n scc_queue.append(v)",
"def get_top_corners(corners):\n top_corners = np.concatenate(\n [sorted(rect, key=getY)[:2] for rect in corners])\n return sorted(top_corners, key=getX)"
] | [
"0.59826374",
"0.55603427",
"0.5523582",
"0.5495732",
"0.5461872",
"0.54479295",
"0.53818995",
"0.5318768",
"0.52677137",
"0.5246417",
"0.520737",
"0.5162896",
"0.5158995",
"0.51440036",
"0.51215637",
"0.5089466",
"0.50795156",
"0.50770104",
"0.5055389",
"0.5054957",
"0.50231767",
"0.5018502",
"0.50170475",
"0.50167465",
"0.5011203",
"0.49917084",
"0.49874586",
"0.4986527",
"0.49810535",
"0.4973235"
] | 0.72403175 | 0 |
Initializes turtle instance for turtle game. | def initialize(turtle_shape, bg_color, turtle_color, turtle_speed):
turtle_instance = turtle.Turtle()
turtle_instance.shape(turtle_shape)
turtle.bgcolor(bg_color)
turtle_instance.color(turtle_color)
turtle_instance.speed(turtle_speed)
return turtle_instance | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def init_turtle():\n turtle.up()\n turtle.home()",
"def __init__(self, commands=[], turtle_name=\"Terry\", speed=6, shape=\"classic\"):\n super().__init__()\n turtle.colormode(255)\n self._name = turtle_name\n super().speed(speed)\n super().shape(shape)\n self.commands = commands\n self._pc = 0\n self._loop_stack = []\n self._variables = {'x':0, 'y':0}",
"def init():\n turtle.setworldcoordinates(-WINDOW_WIDTH / 2, -WINDOW_WIDTH / 2,\n WINDOW_WIDTH / 2, WINDOW_HEIGHT / 2)\n\n turtle.up()\n turtle.setheading(0)\n turtle.title('squares')\n pass",
"def setTurtle(t):\r\n t.pu()\r\n t.goto(initialCoordinates())",
"def _prepare_turtle():\n turtle.setup(width=screen_width)\n turtle.shape(turtle_shape)\n turtle.title(title)",
"def __init__(self, level, treasures, maze_size):\n turtle.Turtle.__init__(self)\n self.shape(\"player_right.gif\")\n self.color(\"blue\")\n self.penup()\n self.pensize(1)\n self.speed(0)\n self.score = 0\n self.level = level\n self.treasures = treasures\n self.maze_size = maze_size\n self.end_writer = writers.EndWriter(maze_size)\n\n turtle.Screen().onkey(self.go_left, \"Left\")\n turtle.Screen().onkey(self.go_right, \"Right\")\n turtle.Screen().onkey(self.go_up, \"Up\")\n turtle.Screen().onkey(self.go_down, \"Down\")\n turtle.Screen().onkey(self.find_path, \"f\")",
"def turtle_setup():\n # ___ ___ _ _ ___ _____ __ __ ___ ___ ___ _____ __\n # | \\ / _ \\ | \\| |/ _ \\_ _| | \\/ |/ _ \\| \\_ _| __\\ \\ / /\n # | |) | (_) | | .` | (_) || | | |\\/| | (_) | |) | || _| \\ V /\n # |___/ \\___/ |_|\\_|\\___/ |_| |_| |_|\\___/|___/___|_| |_|\n # _____ _ _ ___ ___ ___ _ _ _ _ ___ _____ ___ ___ _ _\n # |_ _| || |_ _/ __| | __| | | | \\| |/ __|_ _|_ _/ _ \\| \\| |\n # | | | __ || |\\__ \\ | _|| |_| | .` | (__ | | | | (_) | .` |\n # |_| |_||_|___|___/ |_| \\___/|_|\\_|\\___| |_| |___\\___/|_|\\_|\n #\n # Create the turtle graphics screen and set a few basic properties.\n screen = turtle.Screen()\n screen.setup( WIDTH, HEIGHT, MARGIN, MARGIN )\n screen.bgcolor( \"SkyBlue\" )\n\n # Create two turtles, one for drawing and one for writing.\n artist = turtle.Turtle()\n writer = turtle.Turtle()\n\n # Change the artist turtle's shape so the artist and writer are distinguishable.\n artist.shape( \"turtle\" )\n\n # Make the animation as fast as possible and hide the turtles.\n if DRAW_FAST:\n screen.delay( 0 )\n artist.hideturtle()\n artist.speed( \"fastest\" )\n writer.hideturtle()\n writer.speed( \"fastest\" )\n\n # Set a few properties of the writing turtle useful since it will only be writing.\n writer.setheading( 90 ) # Straight up, which makes it look sort of like a cursor.\n writer.penup() # A turtle's pen does not have to be down to write text.\n writer.setposition( 0, HEIGHT // 2 - FONT_SIZE * 2 ) # Centered at top of the screen.\n\n return screen, artist, writer",
"def __init__(self, pos=(0, 0)):\n super().__init__() # Call 'turtle' initiation\n self.penup() # Stop displaying trail\n self.shapesize(stretch_wid=1, stretch_len=1) # Set dimensions of ball object to same height and width\n self.color(\"white\") # Set colour to white\n self.shape(\"circle\") # Set ball shape to round\n self.setpos(pos) # Move ball to desired position on screen\n self.x_dir = 1 # Set ball horizontal movement to right\n self.y_dir = 1 # Set ball vertical movement to up",
"def __init__(self, _pendown=1, gridmode=False, gridsize=50, homeX = 50 + 25 + 5, homeY = 50 + 25 + 5, canvWidth = 400, canvHeight = 200, \\\n turtleMainColor=\"#00A651\", turtleAccentColor=\"#FFF600\", speed = 5, rotspeed = 5, pencolor = 'red', penwidth=3):\n self._turtleMainColor = turtleMainColor\n self._turtleAccentColor = turtleAccentColor\n self._speed = speed\n self._rotspeed = rotspeed\n self._pendown = _pendown\n self._pencolor = pencolor\n self._penwidth = penwidth\n self._rotation = 90\n self._gridsize = gridsize\n self._gridmode = gridmode\n \n if(gridmode and homeX == 80):\n homeX = 0\n homeY = 0\n \n self._x = homeX\n self._y = homeY\n self._homeX = homeX\n self._homeY = homeY\n \n self._canvWidth = canvWidth\n self._canvHeight = canvHeight\n self._actions = []\n self._levelDataString = [] \n \n self._walls = []\n self._lava = []\n \n self._appendCurrentState();",
"def initialize_plotter(width, height, min_x, max_x, min_y, max_y):\n global x_begin, x_end, x_increment\n turtle.delay(0)\n x_begin, x_end = min_x, max_x\n turtle.setup(width=width, height=height)\n turtle.screensize(width, height)\n turtle.setworldcoordinates(min_x, min_y, max_x, max_y)\n x_increment = (max_x - min_x)/width\n turtle.hideturtle()\n turtle.pencolor('black')\n turtle.penup()\n turtle.setposition(min_x, 0)\n turtle.setheading(0)\n turtle.pendown()\n turtle.forward(max_x - min_x)\n turtle.penup()\n turtle.setposition(0, min_y)\n turtle.setheading(90)\n turtle.pendown()\n turtle.forward(max_y - min_y)",
"def __init__(self):\r\n pen.up()\r\n pen.setheading(0)\r\n pen.hideturtle()\r\n turtle.title(\"My name\")\r\n pen.speed(0)\r\n pen.right(90)\r\n pen.forward(40)\r\n pen.left(90)",
"def reset(self):\n self._turtle.clear()\n self._turtle.setposition((0,0)) \n self._turtle.shape('turtle')\n self.color = 'red'\n self.heading = 180\n self.speed = 0",
"def __init__(self):\n self.x = int(constants.SCREEN_WIDTH/2)\n self.y = int(constants.SCREEN_HEIGHT/2)\n self.DX = self.getRandSpeed()\n self.DY = self.getRandSpeed()\n self.RADIUS = 5",
"def main():\n args = _argument_parsing()\n _prepare_turtle()\n _if_else_statement(args)\n turtle.mainloop()",
"def reset(self):\n self._turtle.clear()\n self._turtle.setposition((0,0)) \n try:\n self._turtle.shape('pen.gif')\n except:\n self._turtle.shape('classic')\n self._turtle.color('red')\n self.speed = 0\n \n #pair = self._turtle.color()\n self._pencolor = self._turtle.color()[0]\n self._fillcolor = self._turtle.color()[0]",
"def screen_setup(screen_size):\n window = turtle.Screen()\n window.bgcolor(\"black\")\n window.title(\"Maze Game\")\n window.setup(screen_size, screen_size)",
"def cool_turtle():\n # Make the TurtleWindow.\n window = rg.TurtleWindow()\n\n # Make the SimpleTurtle.\n cool_turtle = rg.SimpleTurtle('turtle')\n cool_turtle.pen = rg.Pen('forest green', 1) # Try thickness 5 too\n cool_turtle.speed = 1 # Slow\n\n # Move the SimpleTurtle to her starting position.\n start_at = rg.Point(100, -50)\n cool_turtle.pen_up()\n cool_turtle.go_to(start_at)\n cool_turtle.pen_down()\n\n # Set up some parameters that control the nature of the shape drawn.\n size = 100 # Try 150 too\n angle = 1 # Try 20 too\n iterations = 360 # Try 90 too\n\n # Store the animation speed (to reset it later).\n tracer_n, tracer_d = window.tracer(), window.delay()\n\n # Make the animation go much faster.\n # First number: bigger means faster.\n # Second number: bigger means slower.\n window.tracer(5, 5)\n\n for _ in range(iterations):\n cool_turtle.right(angle)\n cool_turtle.draw_square(size)\n\n # Reset the animation to its original speed.\n window.tracer(tracer_n, tracer_d)\n\n window.close_on_mouse_click()",
"def RobotInit():\n names = [\"Body\"]\n angles = [-0.038392066955566406, 0.1349501609802246, 1.1964781284332275, 0.07512402534484863, -1.4926238059997559, -1.3391400575637817, 0.11500811576843262, 0.029999971389770508, -0.25766992568969727, -0.09506607055664062, -0.9694461822509766, 2.086198091506958, -1.168950080871582, 0.07367396354675293, -0.25766992568969727, 0.10128593444824219, -0.9342479705810547, 2.0663399696350098, -1.186300277709961, -0.07205605506896973, -0.309826135635376, 0.24233007431030273, 0.06131792068481445, 0.8544800281524658, 1.5983860492706299, 0.17799997329711914]\n fractionMaxSpeed = 0.1\n time.sleep(1)\n motion.setAngles(names, angles, fractionMaxSpeed)",
"def __init__(self):\n super().__init__()\n\n # Robot state\n self.ask_mode = False\n\n # Connect two large motors on output ports B and C\n self.sound = Sound()\n self.leds = Leds()\n self.p1 = TouchSensor(INPUT_1)\n self.p2 = TouchSensor(INPUT_2)\n self.p3 = TouchSensor(INPUT_3)\n self.p4 = TouchSensor(INPUT_4)",
"def init():\n global tube, ball, faceTextureName, woodTextureName\n tube = gluNewQuadric()\n gluQuadricDrawStyle(tube, GLU_FILL)\n ball = gluNewQuadric()\n gluQuadricDrawStyle(ball, GLU_FILL)\n\n # Set up lighting and depth-test\n glEnable(GL_LIGHTING)\n glEnable(GL_NORMALIZE) # Inefficient...\n glEnable(GL_DEPTH_TEST) # For z-buffering!\n\n generateCheckerBoardTexture()\n faceTextureName = loadImageTexture(\"brick.jpg\")\n woodTextureName = loadImageTexture(\"wood.jpg\")",
"def __init__(self):\n\n self.parser = self.define_parser()\n self.pen = Pen()",
"def turtle(self,turtleType):\n if self.turtleType == turtleType:\n return\n if self.turtleType and self.turtleType != PLAYER:\n self.mc.removeEntity(self.turtleId)\n self.turtleType = turtleType\n if turtleType == PLAYER:\n self.turtleId = None\n elif turtleType:\n self.turtleId = self.mc.spawnEntity(turtleType,\n self.position.x,self.position.y,self.position.z,\n \"{NoAI:1}\")\n self.setEntityCommands()\n self.positionOut()\n self.directionOut()",
"def main():\r\n intialize()\r\n draw_hexagon()\r\n draw_square()\r\n draw_triangle()\r\n shapes()\r\n shapes2()\r\n print (\"Close the window\")\r\n turtle.done()",
"def __init__(self):\n self._symbols = set()\n self._blank_symbol = None\n self._states = set()\n self._start_state = None\n self._end_states = set()\n self._transitions = {}\n\n self._current_state = None\n self._tape = None\n self._head = None",
"def initialize_simulation(self) -> Simulation:\n pass",
"def init(self):\n\n pygame.init()\n pygame.joystick.init()\n self.controller = pygame.joystick.Joystick(0)\n self.controller.init()\n self.x=0\n self.y=0",
"def __init__(self):\n self.screen = pg.display.get_surface()\n self.screen_rect = self.screen.get_rect()\n self.joys = initialize_all_gamepads()\n self.done = False\n self.clock = pg.time.Clock()\n self.fps = 60\n self.keys = pg.key.get_pressed()\n self.cannon = Turret(self.joys[0], (250,250))\n self.objects = pg.sprite.Group()",
"def initialize(self):\n\n \"*** YOUR CODE HERE\"\n self.path = []\n MyAgent.customFood = None\n MyAgent.foodLeft = 0\n MyAgent.specialWalls = {}\n self.followOne = False\n if self.index == 0:\n MyAgent.finding = []\n MyAgent.finding.append(False)",
"def init(self):\n self.l_motor = lazytalonsrx.LazyTalonSRX(Constants.IL_MOTOR_ID)\n self.r_motor = lazytalonsrx.LazyTalonSRX(Constants.IR_MOTOR_ID)\n self.l_motor.initialize(\n inverted=False, encoder=False, phase=False, name=\"Intake Left\")\n self.r_motor.initialize(\n inverted=True, encoder=False, phase=False, name=\"Intake Right\")",
"def setup(self):\n self.total_time = 0.0\n self.timer_text = None\n arcade.set_background_color(arcade.color.WHITE)"
] | [
"0.81658715",
"0.7127116",
"0.704404",
"0.6922751",
"0.6751538",
"0.66928184",
"0.6515024",
"0.64465225",
"0.6263604",
"0.62292695",
"0.6227585",
"0.61615515",
"0.58694047",
"0.58318806",
"0.580501",
"0.575872",
"0.57573175",
"0.57200843",
"0.5657598",
"0.55897486",
"0.55519253",
"0.55367",
"0.5531332",
"0.5524546",
"0.54946375",
"0.54783654",
"0.5474411",
"0.5465285",
"0.54579556",
"0.5451918"
] | 0.8317453 | 0 |
Defines the turtle movement for the initialized turtle instance and executes that movement. | def turtle_movement(turtle_shape, bg_color, turtle_color, turtle_speed):
turtle_name = initialize(turtle_shape, bg_color,
turtle_color, turtle_speed)
for i in range(36):
for i in range(4):
turtle_name.forward(200)
turtle_name.right(90)
turtle_name.right(10) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def move_turtle(self):\n self.forward(self.move_speed)",
"def setTurtle(t):\r\n t.pu()\r\n t.goto(initialCoordinates())",
"def init_turtle():\n turtle.up()\n turtle.home()",
"def move():\n Robot.move()",
"def movement(self):",
"def move(self):\r\n segments = len(self.all_turtles) - 1\r\n for i in range(len(self.all_turtles)):\r\n if segments == 0:\r\n self.all_turtles[segments].forward(MOVE_DISTANCE)\r\n else:\r\n new_x = self.all_turtles[segments - 1].xcor()\r\n new_y = self.all_turtles[segments - 1].ycor()\r\n self.all_turtles[segments].goto(new_x, new_y)\r\n segments -= 1",
"def goto(x, y):\n turtleTmp.setposition(x, y)",
"def __init__(self, commands=[], turtle_name=\"Terry\", speed=6, shape=\"classic\"):\n super().__init__()\n turtle.colormode(255)\n self._name = turtle_name\n super().speed(speed)\n super().shape(shape)\n self.commands = commands\n self._pc = 0\n self._loop_stack = []\n self._variables = {'x':0, 'y':0}",
"def repositionTurtle(t, x, y):\n t.up()\n t.goto(x, y)\n t.down()",
"def move_turtle(self, x, y):\n tortuga = self.turtle\n if self.capture_mode:\n tortuga.setheading(tortuga.towards(x, y))\n tortuga.setpos(x, y)\n self.add_punto(Punto(x, y))",
"def move(self, friction = 0.0):\n try:\n newX = self.xcor() + self.dx\n newY = self.ycor() + self.dy\n self.goto(newX, newY)\n # apply friction\n self.dx = self.dx * (1 - friction)\n self.dy = self.dy * (1 - friction)\n except:\n print(\"Error, probably because dx and dy are not properties of the turtle\")",
"def test_move_step(self):\n t = AioBaseTurtle()\n t._move_step(Vec2D(-100, 0), 20, Vec2D(10,5))\n self.assertAlmostEqual(t._position[0], 100)\n self.assertAlmostEqual(t._position[1], 100)\n t.screen._drawline.assert_called_once_with(\n t.currentLineItem,\n ((-100.0, 0.0), (100.0, 100.0)), # called with mutable _position\n \"black\",\n 1,\n False\n )\n self.mock_update.assert_called_once_with()",
"def move(self,x,y):\n assert (type(x) in [int, float]), \"parameter x:%s is not a valid number\" % `x`\n assert (type(y) in [int, float]), \"parameter y:%s is not a valid number\" % `y`\n d = self._turtle.isdown()\n if d:\n self._turtle.penup()\n self._turtle.setposition(x,y)\n if d:\n self._turtle.pendown()",
"def moveturtle(x,y,t):\n t.penup()\n t.goto(x,y)\n t.pendown()",
"def startMovementAll(self):\n self.startMovementX()\n self.startMovementY()\n self.startMovementZ()",
"def setMovement(self, movement):\n self.ma = movement",
"def run(self):\n super(MovementControl,self).run()",
"def step(self):\r\n\r\n self.velocity = 1\r\n new_pos = self.pos\r\n self.model.space.move_agent(self, new_pos)",
"def change_movement(self, action):\r\n if action == \"diagonal\" and self.movement != \"diagonal\":\r\n self.movement = \"diagonal\"\r\n self.x_speed = 3\r\n self.y_speed = 3\r\n self.canvas.after(50, self.move_diagonal)\r\n elif action == \"horizontal\" and self.movement != \"horizontal\":\r\n self.movement = \"horizontal\"\r\n self.x_speed = 3\r\n self.y_speed = 0\r\n self.canvas.after(50, self.move_horizontal)\r\n elif action == \"vertical\" and self.movement != \"vertical\":\r\n self.movement = \"vertical\"\r\n self.x_speed = 0\r\n self.y_speed = 3\r\n self.canvas.after(50, self.move_vertical)\r\n elif action == \"inward_outward\":\r\n self.movement = \"inward_outward\"\r\n self.canvas.after(50, self.move_inward_outward)",
"def begin_auto_moving(self, direction):\n self.direction = direction\n self.image_list = self.animation_dict[direction]\n self.state = 'automoving'\n self.x_vel = self.vector_dict[direction][0]\n self.y_vel = self.vector_dict[direction][1]\n self.move_timer = self.current_time",
"def start(self):\n self.startAngMovementALl()\n self.startMovementAll()",
"def initialize(turtle_shape, bg_color, turtle_color, turtle_speed):\n turtle_instance = turtle.Turtle()\n turtle_instance.shape(turtle_shape)\n turtle.bgcolor(bg_color)\n turtle_instance.color(turtle_color)\n turtle_instance.speed(turtle_speed)\n return turtle_instance",
"def run(self):\n # type: () -> None\n self.move_to(self.location)",
"def __init__(self, level, treasures, maze_size):\n turtle.Turtle.__init__(self)\n self.shape(\"player_right.gif\")\n self.color(\"blue\")\n self.penup()\n self.pensize(1)\n self.speed(0)\n self.score = 0\n self.level = level\n self.treasures = treasures\n self.maze_size = maze_size\n self.end_writer = writers.EndWriter(maze_size)\n\n turtle.Screen().onkey(self.go_left, \"Left\")\n turtle.Screen().onkey(self.go_right, \"Right\")\n turtle.Screen().onkey(self.go_up, \"Up\")\n turtle.Screen().onkey(self.go_down, \"Down\")\n turtle.Screen().onkey(self.find_path, \"f\")",
"def move(self, direction):\n # replace with your code\n pass",
"def move(self, direction):\n # replace with your code\n pass",
"def step(self, move):",
"def setMovement(self, movement, isSpecial = False, canControl = True):\n\n vel = self.ode_body.getLinearVel()\n for i in range(len(self.direction)):\n vel[i] = self.direction[i] * movement\n\n self.ode_body.setLinearVel(vel)\n\n self.moveVal = self.direction\n self.moveSpecial = isSpecial\n self.isMove = [False, False]\n self.direction = [self.moveVal[0], self.moveVal[1]]\n\n if not canControl:\n self.knockback()\n self.moveLock(None, 9999)\n self.isKnockback = True\n \n # Play Sound\n if movement > 10:\n self.sfx['lunge'].play()",
"def movement(self, screen):\n if self.tx is not None and self.ty is not None: # Target is set\n\n X = self.x - self.tx\n Y = self.y - self.ty\n\n if X < 0: # --->\n self.img = pygame.image.load(next(self.walking_east_images))\n self.x += self.velocity\n elif X > 0: # <----\n self.img = pygame.image.load(next(self.walking_west_images))\n self.x -= self.velocity\n if Y > 0: # up\n self.img = pygame.image.load(next(self.walking_north_images))\n self.y -= self.velocity\n elif Y < 0: # dopwn\n self.img = pygame.image.load(next(self.walking_south_images))\n self.y += self.velocity\n screen.blit(self.img, (self.x, self.y))\n\n if X == 0 and Y == 0:\n self.tx, self.ty = None, None\n self.agent.actionCompleted()",
"def move(self):\n \n self.position = self.wander()"
] | [
"0.7389288",
"0.6786604",
"0.6750962",
"0.6614437",
"0.6599587",
"0.64865434",
"0.6407805",
"0.64050645",
"0.6318544",
"0.6280543",
"0.6203746",
"0.61156297",
"0.6057648",
"0.6023432",
"0.60230684",
"0.600326",
"0.59755194",
"0.5961362",
"0.59582186",
"0.59399325",
"0.5905899",
"0.58884126",
"0.58734035",
"0.58535314",
"0.58469945",
"0.58469945",
"0.5843393",
"0.58384305",
"0.5828846",
"0.5822152"
] | 0.69048256 | 1 |
Saves summary statistics as a csv file in the current directory and returns the output filename. | def save_summary_statistics_csv(
experiment_name, roi_summary_data, save_directory_path: str = ""
):
# Create directories on the path if they don't already exist
Path(save_directory_path).mkdir(parents=True, exist_ok=True)
csv_filename = f"{experiment_name} - summary statistics (generated {iso_datetime_for_filename(datetime.now())}).csv"
csv_filepath = Path(save_directory_path) / csv_filename
roi_summary_data.to_csv(csv_filepath, index=False)
print(f"Summary statistics saved to: {csv_filepath}\n")
return csv_filepath | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def checkpoint_stats(self, stats):\n stats.to_csv(\n self.params.stat.dir + self.params.model.name + \"_\" + self.params.data.name + \".stat\",\n sep='\\t',index=False,header=True)",
"def save_csv_file():\n global output_on_display, import_lst, column_names, data\n if data_base == '':\n mistake_load_table()\n else:\n column_names = data[0]\n save_name = asksaveasfilename(title=\"Select file\", filetypes=((\"CSV\", \"*.csv\"), (\"all files\", \"*.*\")),\n confirmoverwrite=True, defaultextension='.csv')\n step = len(column_names)\n data_csv = import_lst\n if len(data_csv[0]) == step:\n pass\n else:\n data_csv = import_lst[step::]\n\n with open(save_name, 'w+') as csv_file:\n csv_writer = csv.writer(csv_file)\n csv_writer.writerow(column_names)\n csv_writer.writerows(data_csv)",
"def save_to_csv(self):\n path = partial(os.path.join, 'datasets')\n save_name = self.name.lower().replace(' ', '_')\n self.df['values'].sum(axis=1).to_csv(path('{0}_values.csv'.format(save_name)))\n self.df['allocations'].to_csv(path('{0}_allocations.csv'.format(save_name)))\n self.df['returns'].to_csv(path('{0}_returns.csv'.format(save_name)))\n self.trades.to_csv(path('{0}_trades.csv'.format(save_name)))",
"def saveStatsFile(self):\n if not os.path.exists(\"stats\"):\n os.mkdir(\"stats\")\n now = datetime.datetime.now()\n parts = [now.year, now.month, now.day]\n parts = [\"%02d\"%x for x in parts]\n todaysFileName = \"-\".join(parts)+\".txt\" \n timeStamp = time.strftime(\"%y%m%d%H%M\", time.localtime())\n log = \",\".join(self.logLinesStats)\n fname = \"stats/\"+todaysFileName\n with open(fname, 'a') as f:\n f.write(timeStamp+\",\"+log+\"\\n\")\n self.log(\"wrote \"+fname)",
"def save_submission(results, file_name='submission.csv'):\n submission_path = path.join('..', 'output', file_name)\n results.to_csv(submission_path)",
"def save_csv(self, filename): # DONE\n self.data.to_csv(filename)",
"def model_summary_to_file(model, save_path):\n with open(save_path, 'w') as fh:\n model.summary(print_fn=lambda x: fh.write(x + \"\\n\"))",
"def generate_csv(summaries, filename):\n with open(filename, 'wb') as f:\n header = ','.join(['ACTIVATION', 'HIDDEN SIZE', 'TRAIN LOSS', 'VAL LOSS', 'TRAIN PPX', 'VAL PPX']) + '\\n'\n f.write(header)\n\n def extract_best(summary, metric):\n return min([h.metrics[metric] for h in summary['history']])\n for summary in summaries:\n activation = summary['meta']['ACTIVATION']\n h_size = summary['meta']['NUM_HIDDEN']\n train_loss, val_loss, train_ppx, val_ppx = extract_best(summary, 'train_loss'), extract_best(summary, 'val_loss'), extract_best(summary, 'train_ppx'), extract_best(summary, 'val_ppx')\n line = \",\".join([activation] + map(lambda x: \"%.2f\" % (x), [h_size, train_loss, val_loss, train_ppx, val_ppx])) + '\\n'\n f.write(line)",
"def save_csv(csv_fn, output_dir, df_to_save):\n\n # import packages\n import os\n import pandas as pd\n\n\n if os.path.isfile(output_dir + '/' + csv_fn):\n print('Data already saved and will not be saved again')\n else:\n df_to_save.to_csv(output_dir + '/' + csv_fn, index = False)\n\n return None",
"def save_results_to_csv(save_file_path, append=True, tmp_file_path=tmp_file_path, datefmt='%d/%m/%Y %H:%M:%S'):\n # load tmp results\n res_summary = open_json(tmp_file_path, data_format=pd.DataFrame)\n\n # calculate average scores\n combis = list(product(\n ['CV', 'Val'], \n ['precision', 'recall', 'f1', 'exact match', 'loss', \n 'precision_CE', 'recall_CE', 'f1_CE', 'exact match_CE']\n ))\n for combi in combis:\n get_average(res_summary, combi)\n\n # calculate end time\n end = datetime.now()\n res_summary['endtime'] = end.strftime(datefmt)\n res_summary['timetaken'] = end - \\\n datetime.strptime(res_summary['starttime'][0], datefmt)\n\n if append and os.path.isfile(save_file_path):\n # load old file\n old_summary = pd.read_csv(save_file_path)\n # append below\n res_summary = pd.concat([old_summary, res_summary], axis=0)\n\n # save final and delete tmp file\n res_summary.to_csv(save_file_path, index=False)\n os.remove(tmp_file_path)",
"def store_performance(results, out_dir='', name='results_summary'):\n\n results_file = os.path.join(out_dir, name + '.csv')\n\n results_summary = {\n 'pop_mean_accuracies': ['%.2f' % (100 * np.mean(results[:, 1]))],\n 'pop_max_accuracies': ['%.2f' % (100 * np.max(results[:, 1]))],\n 'pop_mean_rewards': [np.mean(results[:, 0])],\n 'pop_max_rewards': [np.max(results[:, 0])],\n }\n\n df = pd.DataFrame.from_dict(results_summary)\n\n if os.path.isfile(results_file):\n old_df = pd.read_csv(results_file, sep=';')\n df = pd.concat([old_df, df], sort=True)\n df.to_csv(results_file, sep=';', index=False)",
"def save_results_csv(fname, results, header=0):\n\n new_rows = []\n if not os.path.isfile(fname):\n args = fname.split('/')[:-1]\n directory = os.path.join(*args)\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n with open(fname, 'wt') as f:\n writer = csv.writer(f)\n if header == 0:\n writer.writerows(\n [['Model', 'Dataset', 'Method', 'Weight', 'Label', \n 'Step', 'AUROC', 'Precision', 'Recall',\n 'F1 score', 'Random Seed', 'Date']])\n if header == 1:\n writer.writerows(\n [['Precision', 'Recall', 'F1 score', 'Random Seed']])\n elif header ==2:\n writer.writerows(\n [['Step', 'AUROC', 'Precision', 'Recall',\n 'F1 score', 'Random Seed']])\n\n elif header == 5:\n writer.writerows(\n [['Model', 'Dataset', 'Method', 'Weight', 'Label', \n 'Step', 'Scores']])\n\n with open(fname, 'at') as f:\n # Overwrite the old file with the modified rows\n writer = csv.writer(f)\n new_rows.append(results) # add the modified rows\n writer.writerows(new_rows)",
"def write_csv(filename, summaries, float_format='%.02f'):\n data = [['solution', 'total time', 'ok', 'errors']]\n\n for var, s in summaries[0].stats.iteritems():\n for stat in s:\n data[0].append('%s %s' % (var, stat))\n\n for summary in summaries:\n row = [summary.solution, float_format % summary.total_time, summary.ok,\n summary.errors]\n for s in summary.stats.itervalues():\n for stat in s.itervalues():\n row.append(float_format % stat)\n data.append(row)\n\n with open(filename, 'wb') as csv_file:\n writer = csv.writer(csv_file)\n for row in data:\n writer.writerow(row)",
"def save_results(self, path):\n create_folder(path)\n self.get_scores().to_csv(path + r'/scores.csv', index=False)\n self.get_results().to_csv(path + r'/results.csv', index=False)\n self.get_pivot_last_epoch().to_csv(path + r'/pivot_last_epoch.csv', index=True)",
"def save_results_csv(fname, results, header=0):\n\n new_rows = []\n if not os.path.isfile(fname):\n args = fname.split('/')[:-1]\n directory = os.path.join(*args)\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n with open(fname, 'wt') as f:\n writer = csv.writer(f)\n if header == 0:\n writer.writerows(\n [['Model', 'Dataset', 'Method', 'Weight', 'Label', \n 'Step', 'AUPRC', 'AUROC', 'Precision', 'Recall',\n 'F1 score', 'Random Seed', 'Date']])\n if header == 1:\n writer.writerows(\n [['Precision', 'Recall', 'F1 score', 'Random Seed']])\n elif header ==2:\n writer.writerows(\n [['Step', 'AUPRC', 'AUROC', 'Precision', 'Recall',\n 'F1 score', 'Random Seed']])\n\n elif header == 5:\n writer.writerows(\n [['Model', 'Dataset', 'Method', 'Weight', 'Label', \n 'Step', 'Scores']])\n\n with open(fname, 'at') as f:\n # Overwrite the old file with the modified rows\n writer = csv.writer(f)\n new_rows.append(results) # add the modified rows\n writer.writerows(new_rows)",
"def write_output(basis, filename):\n\n logging.info('Writing output to {}'.format(filename))\n\n basis.to_csv(filename)",
"def matrix_export_save(simulation, demandsegment, dir):\n matrix = demandsegment.matrix\n matrix_couples = Matrix.objects.filter(matrices=matrix)\n # To avoid conflict if two users export a file at the same time, we\n # generate a random name for the export file.\n filename = dir + '/matrix(' + demandsegment.usertype.name + ')(' + str(demandsegment.usertype.user_id) + ').tsv'\n\n with codecs.open(filename, 'w', encoding='utf8') as f:\n writer = csv.writer(f, delimiter='\\t')\n # Get a dictionary with all the values to export.\n values = matrix_couples.values_list('p__user_id', 'q__user_id', 'r')\n # Write a custom header.\n writer.writerow(['origin', 'destination', 'population'])\n writer.writerows(values)\n\n return filename",
"def output_summary_stats(self, filename):\r\n\r\n total_return = self.equity_curve['equity_curve'][-1]\r\n returns = self.equity_curve['returns']\r\n pnl = self.equity_curve['equity_curve']\r\n\r\n sharpe_ratio = create_sharpe_ratio(returns, periods=252)\r\n drawdown, max_dd, dd_duration = create_drawdowns(pnl)\r\n self.equity_curve['drawdown'] = drawdown\r\n\r\n stats = [(\"Total Return\", \"%0.2f%%\" % \\\r\n ((total_return - 1.0) * 100.0)),\r\n (\"Sharpe Ratio\", \"%0.2f%%\" % sharpe_ratio),\r\n (\"Max Drawdown\", \"%0.2f%%\" % (max_dd * 100.0)),\r\n (\"Drawdown Duration\", \"%f\" % dd_duration)]\r\n self.equity_curve.to_csv(filename)\r\n return stats",
"def write_analysis_details(self, csvfile):\n #filepath, total words, line count, most common word\n f = open(csvfile, 'w')\n most_common = self.most_common()\n f.write('filepath,total words,line count,most common word\\n')\n f.write(f'{self.filepath},{self.word_count()},{self.sentence_count()},{self.most_common()[0]}')\n f.close()",
"def write_csv(self):\n with open(paths.CSV_FILE, 'w', newline='') as csv_file:\n writer = csv.writer(csv_file)\n assg = AssignmentConfig().get_assignment()\n writer.writerow([\"Student\"] + assg.get_test_list() + assg.get_programs_list() +\n [\"normalised_test_score\"] + [\"normalised_prog_score\"] + [\"total\"] + [\"total_rounded\"])\n\n for (submitter, submitter_data) in sorted(self.snapshot['results'].items()):\n total_score = submitter_data[\"normalised_test_score\"] + submitter_data[\"normalised_prog_score\"]\n total_rounded = round(total_score * 2) / 2 # total score rounded to nearest 0.5\n writer.writerow([submitter] +\n [submitter_data[\"tests\"][test] for test in sorted(submitter_data[\"tests\"])] +\n [submitter_data[\"progs\"][prog] for prog in sorted(submitter_data[\"progs\"])] +\n [submitter_data[\"normalised_test_score\"]] +\n [submitter_data[\"normalised_prog_score\"]] +\n [round(total_score, 2)] +\n [total_rounded])",
"def output():\n\n print(\"\\n*****************************************************************\")\n print(\"\\nAll transfer data is saved in 'All_transfer_frequencies.csv'\")\n print(\"\\nThe most likely transfers are saved in 'likely_transfers.csv'\")\n\n os.mkdir(\"Transfer_results\")\n os.system(\"mv *.csv Transfer_results\")\n\n print(\"\\nBoth results are saved in the 'Transfer_results' directory\")\n print(\"\\nScript finished running\")\n print(\"\\n*****************************************************************\")",
"def saveCSV(self):\n filename=tkFileDialog.asksaveasfilename(defaultextension='.csv',\n initialdir=os.getcwd(),\n filetypes=[(\"csv\",\"*.csv\"),(\"All files\",\"*.*\")])\n if not filename:\n return\n for m in self.matrices:\n matrix = self.matrices[m] \n if matrix != None: \n c=matrix.csvRepresentation()\n f=open(filename,'w')\n f.write(c)\n f.close()\n return",
"def save_csv(self, save_path=''):\n if not save_path:\n time = datetime.now()\n time = datetime.strftime(time, '%Y-%m-%d_%H:%M:%S')\n filename = time + '.csv'\n save_path = os.path.join(os.path.abspath(os.curdir), filename)\n data = self._get_data()\n with open(save_path, 'wb') as f:\n for line in data:\n f.write(line + '\\n')",
"def get_summary_filename(self):\n fn = os.path.join(SUMMARY_PREFIX,SUMMARY_CURRENT)\n if (os.path.isfile(fn)):\n try:\n fd = open(fn,\"r\")\n fname = fd.read()\n except :\n cmd = \"rm -f %s\"%fn\n result,status = self.cli(cmd)\n return \"\"\n return fname\n return \"\"",
"def export_csv(self):\n outputfile = tkinter.filedialog.asksaveasfilename(\n defaultextension=\".csv\",\n filetypes=((\"comma seperated values\", \"*.csv\"),\n (\"All Files\", \"*.*\")))\n if outputfile:\n tabledata = self.tabs.window.aistracker.create_table_data()\n export.write_csv_file(tabledata, outputfile)\n else:\n raise ExportAborted('Export cancelled by user.')",
"def export_csv(state, out_file=None):\n\n if out_file is None:\n csvfile = sys.stdout\n else:\n csvfile = open(out_file, 'w')\n\n try:\n writer = csv.writer(csvfile)\n for grade in state.grades:\n writer.writerow([grade.student_name(), grade.score(),\n grade.breakdown(state.user_name)])\n finally:\n if out_file is not None:\n csvfile.close()",
"def calculated_data_statistics_csv(my_df, feature_class_name):\n #remove Well ID and UTMs from dataframe\n updated_df = my_df.drop([0, 1, 8], axis = 1)\n raw_csv_name_stats = f\"{feature_class_name}_statistics.csv\"\n header_list = [\"T_min\",\n \"T_raw\",\n \"T_max\",\n \"K_min\",\n \"K_raw\",\n \"K_max\"]\n \n index_list = {0:'Count',\n 1:'Mean',\n 2:'Standard Deviation',\n 3:'Minimum',\n 4:'25th Percentile',\n 5:'Median',\n 6:'75th Percentile',\n 7:'Maximum',\n 8:'Logrithmic Mean',\n 9:'Logrithmic Standard Deviation'}\n log_mean = np.log10(updated_df.mean())\n log_std = np.log10(updated_df.std())\n useful_values = updated_df.describe()\n useful_values = useful_values.append(log_mean, ignore_index = True)\n useful_values = useful_values.append(log_std, ignore_index = True)\n useful_values = useful_values.rename(index = index_list) #gives the index unique names\n useful_values.to_csv(raw_csv_name_stats, header = header_list)",
"def save_performances(self):\r\n nb_datasets = len(self.results)\r\n resu = [[] for k in range(nb_datasets)]\r\n\r\n # fetch results\r\n for k in range(nb_datasets):\r\n best = np.argmax(self.results[k]['mean_test_score'])\r\n resu[k].append(('score', self.results[k]['mean_test_score'][best]))\r\n resu[k] = resu[k] + list(self.results[k]['params'][best].items())\r\n\r\n # write results in csv\r\n for k, resu in enumerate(resu):\r\n with open('results/final_results_{}.csv'.format(k), 'a') as file:\r\n writer = csv.writer(file)\r\n writer.writerow(resu)",
"def write_stats(self):\n with open(self.log_file,'a') as output:\n writer = csv.writer(output)\n n_comps,comp_size = self.connected_component() # Calculate number of connected components (sub-colonies)\n writer.writerow([self.pop_size,\n self.get_average_age(),\n self.get_average_survival(),\n # Nearest neighbor logging disabled for speed\n # Use c++ tool to calculate nearest neighbors after runs\n # or uncomment line below to calculate in python (slower)\n # self.get_average_repro()] + [self.get_average_neighbors(r) for r in range(0,16)] +\n self.get_average_repro()] +\n [n_comps,\",\".join(map(str,comp_size))])",
"def exportEvaluation(self, results, url):\r\n # research\r\n profprint()\r\n if not os.path.exists(url):\r\n print \"creating new results file: \",url\r\n open(url, 'w').close()\r\n myfile = open(url, 'a')\r\n\r\n wr = csv.writer(myfile)\r\n r = numpy.array(results)\r\n if len(r.shape) == 1:\r\n wr.writerow(results)\r\n else:\r\n wr.writerows(results)"
] | [
"0.67156446",
"0.6640688",
"0.65309376",
"0.65059",
"0.6463663",
"0.6374038",
"0.6361525",
"0.632994",
"0.63163084",
"0.62892014",
"0.6272734",
"0.62425643",
"0.62299234",
"0.62213546",
"0.6216482",
"0.6173342",
"0.6157177",
"0.6122196",
"0.61156815",
"0.60866743",
"0.6001306",
"0.59814596",
"0.59759885",
"0.5950357",
"0.594077",
"0.5935059",
"0.5926359",
"0.5910836",
"0.59108216",
"0.5897887"
] | 0.78815484 | 0 |
stack pandas DataFrames logically into a bigger DataFrame, resets the index of the resulting DataFrame to avoid duplicates in the index | def _stack_dataframes(dataframes: List[pd.DataFrame]) -> pd.DataFrame:
return pd.concat(dataframes).reset_index(drop=True) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_reset_index(self):\n\n # reminder on multi index in columns\n df1 = pd.DataFrame([[1, 3], [2, 4], [11, 33], [22, 44]]).T\n df1.index = pd.Series([1, 2], name=\"idx1\")\n df1.columns = pd.MultiIndex.from_product([['a', 'b'], ['aa', 'bb']], names=['idx_c', 'idx2'])\n\n # same data frame in single command\n df2 = pd.DataFrame([[1, 2, 11, 22], [3, 4, 33, 44]],\n index=pd.Series([1, 2], name=\"idx1\"),\n columns=pd.MultiIndex.from_product([['a', 'b'], ['aa', 'bb']], names=['idx_c', 'idx2']))\n\n df2.loc[:, pd.IndexSlice[:, 'aa']] # getting all info using the second level of the column index out of it\n\n df2.T.reset_index().set_index(['idx_c', 'idx2']) # all together a nop\n self.assertTrue(df2.T.equals(df2.T.reset_index().set_index(['idx_c', 'idx2'])))\n df2.T.reset_index(0) # pull out first index level (idx_c)\n df2.T.reset_index(1) # pull out second index level (idx2)",
"def _concat():\n df1 = pd.DataFrame({'A': ['A0', 'A1', 'A2', 'A3'],\n 'B': ['B0', 'B1', 'B2', 'B3'],\n 'C': ['C0', 'C1', 'C2', 'C3'],\n 'D': ['D0', 'D1', 'D2', 'D3']},\n index=[0, 1, 2, 3])\n\n df2 = pd.DataFrame({'A': ['A4', 'A5', 'A6', 'A7'],\n 'B': ['B4', 'B5', 'B6', 'B7'],\n 'C': ['C4', 'C5', 'C6', 'C7'],\n 'D': ['D4', 'D5', 'D6', 'D7']},\n index=[4, 5, 6, 7])\n\n df3 = pd.DataFrame({'A': ['A8', 'A9', 'A10', 'A11'],\n 'B': ['B8', 'B9', 'B10', 'B11'],\n 'C': ['C8', 'C9', 'C10', 'C11'],\n 'D': ['D8', 'D9', 'D10', 'D11']},\n index=[8, 9, 10, 11])\n frames = [df1, df2, df3]\n result = pd.concat(frames)\n print(result)\n result = pd.concat(frames, keys=['x', 'y', 'z'])\n print(result)\n print('-' * 20)\n df4 = pd.DataFrame({'B': ['B2', 'B3', 'B6', 'B7'],\n 'D': ['D2', 'D3', 'D6', 'D7'],\n 'F': ['F2', 'F3', 'F6', 'F7']},\n index=[2, 3, 6, 7])\n result = pd.concat([df1, df4], axis=1)\n print(result)\n print('*' * 40)\n result = pd.concat([df1, df4], axis=1, join='inner') # 取交集\n print(result)\n result = pd.concat([df1, df4], axis=1, join_axes=[df1.index])\n print(result)",
"def window_stack(df, width=3):\n n = df.shape[0]\n a = np.hstack(list(df.values[(width-1-i):(n-i)] for i in range(0, width)))\n\n times = [ ('t' if not idx else 't-{:d}'.format(idx)) for idx in range(width) ]\n columns = pd.MultiIndex.from_product((times, df.columns), names=('time', 'location'))\n\n return pd.DataFrame(a, index=df.index[width-1:], columns=columns)",
"def _adjust_indices(left_df, right_df):\n index_diff = left_df.shape[0] - right_df.shape[0]\n if index_diff > 0:\n # right_df is shorter\n empty_df = pd.DataFrame(\n np.full((np.abs(index_diff), right_df.shape[1]), np.nan),\n columns=right_df.columns,\n )\n right_df = pd.concat((empty_df, right_df), axis=0).reset_index(drop=True)\n elif index_diff < 0:\n # left_df is shorter\n empty_df = pd.DataFrame(\n np.full((np.abs(index_diff), left_df.shape[1]), np.nan),\n columns=left_df.columns,\n )\n left_df = pd.concat((empty_df, left_df), axis=0).reset_index(drop=True)\n\n return left_df, right_df",
"def reframe_df(previous_df, processed_data):\n idx = previous_df.index\n col = previous_df.columns\n df = pd.DataFrame(data=processed_data, index=idx, columns=col)\n return df",
"def cross(df1, df2, **kwargs):\r\n df1['_tmpkey'] = 1\r\n df2['_tmpkey'] = 1\r\n\r\n res = pd.merge(df1, df2, on='_tmpkey', **kwargs).drop('_tmpkey', axis=1)\r\n res.index = pd.MultiIndex.from_product((df1.index, df2.index))\r\n\r\n df1.drop('_tmpkey', axis=1, inplace=True)\r\n df2.drop('_tmpkey', axis=1, inplace=True)\r\n\r\n return res",
"def cross(df1, df2, **kwargs):\r\n df1['_tmpkey'] = 1\r\n df2['_tmpkey'] = 1\r\n\r\n res = pd.merge(df1, df2, on='_tmpkey', **kwargs).drop('_tmpkey', axis=1)\r\n res.index = pd.MultiIndex.from_product((df1.index, df2.index))\r\n\r\n df1.drop('_tmpkey', axis=1, inplace=True)\r\n df2.drop('_tmpkey', axis=1, inplace=True)\r\n return res",
"def merge (*a_data) :\n i = 0\n for loc_data in a_data :\n i += 1\n if i == 1 :\n loc_new_df = loc_data\n else :\n loc_new_df = __pd.merge(loc_new_df,loc_data,left_index=True,right_index=True)\n return loc_new_df",
"def dataframe_crossjoin(df1, df2, **kwargs):\n df1['_tmpkey'] = 1\n df2['_tmpkey'] = 1\n\n res = pd.merge(df1, df2, on='_tmpkey', **kwargs).drop('_tmpkey', axis=1)\n res.index = pd.MultiIndex.from_product((df1.index, df2.index))\n\n df1.drop('_tmpkey', axis=1, inplace=True)\n df2.drop('_tmpkey', axis=1, inplace=True)\n\n return res",
"def drop_multindex(df):\n\n if isinstance(df.index, pd.MultiIndex):\n df_flat = df.reset_index()\n # keep index if False\n else:\n df_flat = df.copy()\n return df_flat",
"def refresh_index(df):\n if isinstance(df.index, pandas.MultiIndex):\n return df.reset_index().set_index(df.index.names)\n else:\n return df",
"def create_shifted_df(df: pd.DataFrame, periods: int = 1) -> pd.DataFrame:\n data_df_shifted = df.shift(periods=periods)\n data_df_shifted = data_df_shifted.combine_first(df).add_suffix(\"_shifted\")\n return pd.concat([df, data_df_shifted], axis=1, join=\"inner\").reset_index(\n drop=True\n )",
"def reset_column_index(df: DataFrame, level: List[Any], drop: bool=True, inplace: bool=False):\n \n if inplace:\n if drop:\n df.columns = df.columns.droplevel(level)\n else:\n raise NotImplementedError\n return df\n else:\n if drop:\n result = df.copy()\n result.columns = df.columns.droplevel(level)\n else:\n result = df.stack(level)\n return result",
"def reset_index(self):\n self.df = self.df.reset_index()",
"def combined_df(self) -> pd.DataFrame:\n return pd.concat([self.data, self.latest_data.reset_index()], ignore_index=True)",
"def stack_index(self, index, on_top=True, axis=1, inplace=False):\n\n def apply_func(obj_index):\n if on_top:\n return index_fns.stack_indexes(index, obj_index)\n return index_fns.stack_indexes(obj_index, index)\n\n return self.apply_on_index(apply_func, axis=axis, inplace=inplace)",
"def stack(self, level, dropna):\n return DataFrameDefault.register(pandas.DataFrame.stack)(\n self, level=level, dropna=dropna\n )",
"def mergeDataframes(datasets, cut):\n #subset = []tion\n subset = [dataset.iloc[:, index:] for dataset in datasets[1:]]\n \n first = subset[0].join(subset[1:], how = 'outer')\n finance = datasets[0].iloc[:, index:].join(first, how = 'left') \n # don't need to cut as only using relatively recent data for training\n #finance = finance[finance.index > cut]\n return finance",
"def _stack_serieses(serieses: List[pd.Series]) -> pd.DataFrame:\n return pd.concat(serieses, axis=\"columns\").T",
"def merge_survey(self) -> pd.DataFrame:\n\n df_list = []\n for survey_id in self.survey_id:\n self.log.debug(f\"Reading: {survey_id}\")\n temp_df = self.get_survey_responses(survey_id)\n df_list.append(temp_df[2:])\n\n df_col = reduce(pd.Index.union, (df.columns for df in df_list))\n\n merged_df = pd.DataFrame()\n for df in df_list:\n temp_df = df.reindex(columns=df_col, fill_value=0)\n merged_df = merged_df.append([temp_df], ignore_index=True)\n return merged_df",
"def reset_index(self, **kwargs): # noqa: PR02\n return DataFrameDefault.register(pandas.DataFrame.reset_index)(self, **kwargs)",
"def augment_dataframe(self, df: pd.DataFrame) -> pd.DataFrame:",
"def broadcast_merge(s,df):\n \n return pd.merge(pd.DataFrame(data=[s.values]*len(df),\n columns=s.index,\n index=df.index),\n df, left_index=True, right_index=True)",
"def combine_position_dataframes(dataframe1, dataframe2):\n\n # check that the dataframes have the same number of columns\n print(\"Dimensions of dataframe1: \", dataframe1.shape)\n print(\"Dimensions of dataframe2: \", dataframe2.shape)\n\n frames = [dataframe1, dataframe2]\n\n combined_dataframe = pandas.concat(frames)\n\n dataframe1.drop(dataframe1.index, inplace=True) # Delete data from dataframe to save memory\n dataframe2.drop(dataframe2.index, inplace=True) # Delete data from dataframe to save memory\n\n # confirm that the dataframes no longer exist (saving memory)\n print(\"Dimensions of dataframe1: \", dataframe1.shape)\n print(\"Dimensions of dataframe2: \", dataframe2.shape)\n\n # check that all rows of both dataframes have been combined into the new dataframe. Sort by date and time.\n print(\"Dimensions of combined dataframe: \", combined_dataframe.shape)\n combined_dataframe_sorted = combined_dataframe.sort_values('date_time')\n\n print(\"Sample of combined dataframe: \", combined_dataframe_sorted.sample(10))\n\n return combined_dataframe_sorted",
"def __merge_dataframes(dataframes: List[pd.DataFrame], empty_df: pd.DataFrame = pd.DataFrame(),\n sorted_column: Optional[str] = None) -> pd.DataFrame:\n for df in dataframes:\n empty_df = empty_df.append(df, ignore_index=True)\n empty_df.drop_duplicates(keep='first')\n if sorted_column is not None:\n empty_df.sort_values(by=[sorted_column])\n return empty_df",
"def extend_dataset(intial_df):\n all_data = []\n for i,row in intial_df.iterrows():\n all_data.extend(create_all_combination(row))\n\n extended_results = pd.DataFrame(all_data)\n return extended_results",
"def coerce( self ):\n df = self.copy()\n gcond = ['neighbor', 'pdb'] if 'source' not in df.columns else ['neighbor', 'pdb', 'source']\n for frame_id, frame in df.groupby('frame'):\n g = frame.groupby(gcond)\n neighbors = len(g)\n neighbor = list(g.ngroup() + 1)\n position = list(g.cumcount() + frame_id)\n df.loc[(df['frame'] == frame_id), 'neighbors'] = [neighbors] * frame.shape[0]\n df.loc[(df['frame'] == frame_id), 'neighbor'] = neighbor\n df.loc[(df['frame'] == frame_id), 'position'] = position\n return df",
"def build_multi_index_data_frame(data_frames: list, sub_header: list, header_columns: list) -> pd.DataFrame:\r\n\r\n tuples = build_multi_index_tuples(header_columns, sub_header)\r\n\r\n multi_header = pd.MultiIndex.from_tuples(tuples)\r\n\r\n df = pd.concat(data_frames, axis=1).loc[:, dict(tuples).keys()]\r\n\r\n df.columns = multi_header\r\n\r\n return df",
"def concat_without_duplicates(dfs):\n temp_dfs = []\n for temp_df in dfs:\n # Joining the different dfs resulted in a df with more rows. This is why\n # I do this. More info on https://stackoverflow.com/a/34297689/5031446\n # This removes rows with duplicated indexes and keeps just the last observation\n temp_df = temp_df[~temp_df.index.duplicated(keep='last')]\n temp_dfs.append(temp_df)\n result = pd.concat(temp_dfs, axis=1)\n\n return result",
"def data_structure():\n\n items = [1.0, 2.0, 3.0, 4.0, 5.0 ]\n s = pd.Series(items, index=['a', 'b', 'c', 'd', 'e'])\n # s = pd.Series(np.random.randn(5), index=['a', 'b', 'c', 'd', 'e'])\n print s\n s = pd.Series(items)\n print s\n\n d= {'one': [1.0, 2.0, 3.0, 4.0], 'two': [4.0, 3.0, 2.0, 1.0]}\n\n df = pd.DataFrame(d)\n print df\n df = pd.DataFrame(d, index=['a', 'b', 'c', 'd'])\n print df\n\n\n data2 = [{'a': 1, 'b': 2}, {'a': 5, 'b': 10, 'c': 20}]\n df = pd.DataFrame(data2)\n\n print df"
] | [
"0.5980725",
"0.5887287",
"0.5851793",
"0.5812862",
"0.5757257",
"0.57415146",
"0.57319605",
"0.5689936",
"0.56405115",
"0.56170523",
"0.56154037",
"0.5591578",
"0.5586357",
"0.5564776",
"0.55478466",
"0.554424",
"0.5535979",
"0.54988015",
"0.5480869",
"0.54719794",
"0.5454091",
"0.5413194",
"0.53967744",
"0.53953075",
"0.5393822",
"0.53816986",
"0.53804",
"0.5370093",
"0.5324112",
"0.5324096"
] | 0.66242844 | 0 |
stack pandas Series logically into a DataFrame | def _stack_serieses(serieses: List[pd.Series]) -> pd.DataFrame:
return pd.concat(serieses, axis="columns").T | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def data_structure():\n\n items = [1.0, 2.0, 3.0, 4.0, 5.0 ]\n s = pd.Series(items, index=['a', 'b', 'c', 'd', 'e'])\n # s = pd.Series(np.random.randn(5), index=['a', 'b', 'c', 'd', 'e'])\n print s\n s = pd.Series(items)\n print s\n\n d= {'one': [1.0, 2.0, 3.0, 4.0], 'two': [4.0, 3.0, 2.0, 1.0]}\n\n df = pd.DataFrame(d)\n print df\n df = pd.DataFrame(d, index=['a', 'b', 'c', 'd'])\n print df\n\n\n data2 = [{'a': 1, 'b': 2}, {'a': 5, 'b': 10, 'c': 20}]\n df = pd.DataFrame(data2)\n\n print df",
"def expand_series(ser, columns):\n return ser.to_frame(columns[0]).reindex(columns=columns).ffill(axis=1)",
"def binarize(series):\n name = series.name\n df = pd.DataFrame()\n for category in series.value_counts().index:\n df[category] = (series == category)\n return df",
"def stack(self, level, dropna):\n return DataFrameDefault.register(pandas.DataFrame.stack)(\n self, level=level, dropna=dropna\n )",
"def sample_series(self, series, append_frame=None):\n\n columns, values = self.get_readings(series)\n\n dataframe = DataFrame(values, columns=columns)\n dataframe = self.format_index(dataframe, self.ENERGY_DB_INDEX)\n\n # https://pandas.pydata.org/pandas-docs/stable/merging.html\n if append_frame is not None:\n # dataframe = pandas.concat([dataframe, input_frame], axis=1, join='inner', join_axes=[input_frame.index])\n dataframe = pandas.merge(append_frame, dataframe, on=['time', 'time'])\n # print(dataframe)\n\n return dataframe",
"def pandas_series(arr, nan_to_null=False):\n import pandas as pd\n return pd.Series(arr, copy=False)",
"def CombineSeries(*args):\r\n df = pd.concat([*args], axis=1)\r\n\r\n return df",
"def to_series(self) -> pd.Series:\n df = self.to_dataframe(\"* values *\")\n dims = self.dims_list\n if len(dims) == 1:\n dims = dims[0]\n return df.set_index(dims)[\"* values *\"]",
"def stack_table(A: pd.DataFrame) -> pd.DataFrame:\r\n A = pd.DataFrame(A.stack(dropna=False))\r\n A.columns = ['factor']\r\n return A",
"def make_series(x, y, **options):\n underride(options, name='values')\n if isinstance(y, pd.Series):\n y = y.values\n series = pd.Series(y, index=x, **options)\n series.index.name = 'index'\n return series",
"def window_stack(df, width=3):\n n = df.shape[0]\n a = np.hstack(list(df.values[(width-1-i):(n-i)] for i in range(0, width)))\n\n times = [ ('t' if not idx else 't-{:d}'.format(idx)) for idx in range(width) ]\n columns = pd.MultiIndex.from_product((times, df.columns), names=('time', 'location'))\n\n return pd.DataFrame(a, index=df.index[width-1:], columns=columns)",
"def series_from_dataframe(df, index_column: str, value_column: str=None):\n\n if len(df.columns) > 2:\n df = df[[index_column, value_column]].copy()\n else:\n df = df.copy()\n df.set_index(index_column, inplace=True)\n sr = df.squeeze()\n sr.name = value_column\n return sr",
"def date_features(s: pd.Series, result: Optional[pd.DataFrame] = None) -> pd.DataFrame:\n if result is None:\n result = pd.DataFrame(s, copy=False)\n index = cast(pd.DatetimeIndex, s.index)\n\n result[\"year\"] = index.year\n result[\"month\"] = index.month\n result[\"day\"] = index.day\n result[\"dayofweek\"] = index.dayofweek\n result[\"dayofyear\"] = index.dayofyear\n result[\"quarter\"] = index.quarter\n result[\"season\"] = _map(index.month, _SEASON_MAP)\n result[\"weekofyear\"] = index.weekofyear\n try:\n # Work around numpy Deprecation Warning about parsing timezones\n # by converting to UTC and removing the tz info.\n dates = index.tz_convert(None).to_numpy()\n except TypeError:\n # No timezone.\n dates = index.to_numpy()\n first_of_month = pd.to_datetime(dates.astype(\"datetime64[M]\"))\n week_of_month = np.ceil((first_of_month.dayofweek + index.day) / 7.0)\n result[\"weekofmonth\"] = week_of_month.astype(int)\n # result[\"is_holiday\"] = ?\n # result[\"holiday_types\"] = ?\n result[\"is_weekend\"] = index.dayofweek >= 5\n result[\"is_leap_year\"] = index.is_leap_year\n result[\"is_leap_day\"] = (index.month == 2) & (index.day == 29)\n result[\"is_month_end\"] = index.is_month_end\n result[\"is_quarter_end\"] = index.is_month_end & (index.month % 4 == 3)\n\n return result",
"def to_real_series(self, data: pd.Series) -> pd.Series:\n ...",
"def parse_result_series(result):\n if isinstance(result, np.ndarray):\n return result\n\n if result is None or not len(result):\n return None\n\n dates, values = result\n return pd.DataFrame({0:dates.astype(int)/1000,1:values})",
"def to_work_series(self, data: pd.Series) -> pd.Series:\n ...",
"def as_series(self, arraylike: Iterable) -> pd.Series:\n return pd.Series(arraylike, index=self.data.index)",
"def as_series(self) -> \"pd.Series\":\n import pandas as pd\n\n data = {\"_row_id\": self.id, \"_row_num\": self.num, **self.as_dict()}\n series = pd.Series(data)\n return series",
"def index_reformat(series: pd.Series, preserve_order: bool) -> pd.DataFrame:\n series = series.copy()\n series = rewrite_index(series)\n series.index = remove_constant_levels(series.index)\n series.index.names = [LEVEL_NAMES.get(name, name) for name in series.index.names]\n series = series.rename(index=pretty_rewrite)\n\n # Preserve order of inputs\n df = series.unstack(\"Target\")\n if preserve_order:\n df = df.reindex(columns=series.index.get_level_values(\"Target\").unique())\n for level in series.index.names:\n kwargs = {}\n if isinstance(df.index, pd.MultiIndex):\n kwargs = dict(level=level)\n if level != \"Target\":\n df = df.reindex(index=series.index.get_level_values(level).unique(), **kwargs)\n else:\n df = df.sort_index()\n return df",
"def to_pandas(self, **kwargs) -> pd.Series | pd.DataFrame:\n\n if self.n_items != 1:\n return self.to_dataframe(**kwargs)\n else:\n return self[0].to_pandas(**kwargs)",
"def to_pandas_series_rdd(self):\n pd_index = self.index().to_pandas_index()\n return self.map(lambda x: (x[0], pd.Series(x[1], pd_index)))",
"def _wrap_in_pandas_container(\n data_to_wrap,\n *,\n columns,\n index=None,\n):\n if issparse(data_to_wrap):\n raise ValueError(\"Pandas output does not support sparse data.\")\n\n if callable(columns):\n try:\n columns = columns()\n except Exception:\n columns = None\n\n pd = check_pandas_support(\"Setting output container to 'pandas'\")\n\n if isinstance(data_to_wrap, pd.DataFrame):\n if columns is not None:\n data_to_wrap.columns = columns\n return data_to_wrap\n\n return pd.DataFrame(data_to_wrap, index=index, columns=columns, copy=False)",
"def series_view(self, **kwargs): # noqa: PR02\n return SeriesDefault.register(pandas.Series.view)(self, **kwargs)",
"def __call__(self, index: pd.Index) -> pd.DataFrame:\n if not isinstance(self.values, (list, tuple)):\n values = pd.Series(\n self.values,\n index=index,\n name=self.value_columns[0] if self.value_columns else None,\n )\n else:\n values = dict(\n zip(self.value_columns, [pd.Series(v, index=index) for v in self.values])\n )\n return pd.DataFrame(values)",
"def _convert_df_to_series(df):\n if isinstance(df, pd.DataFrame) and df.shape[1] == 1:\n return df.iloc[:, 0]\n elif isinstance(df, pd.DataFrame) and df.shape[1] > 1:\n raise TypeError('DataFrame cannot be converted to a Series as it contains more than 1 column.')\n return df",
"def transform_series(obj):\n vals = obj.values\n return transform_array(vals)",
"def SweepFrame(*args, **kwargs):\n underride(kwargs, dtype=float)\n return pd.DataFrame(*args, **kwargs)",
"def sandwich(self, s):\r\n s_ = pd.DataFrame(s)\r\n index = s_.index\r\n sandwich_ = sandwich(self.data.values, s_.values)\r\n if len(sandwich_.shape) == 0: \r\n sandwich_ = [sandwich_]\r\n sandwich_ = pd.DataFrame(sandwich_, index=index, columns=index)\r\n return self.__class__(sandwich_)",
"def to_dataframe(self, include_metadata: bool = True) -> pd.DataFrame:\n # Get all our data first with async\n # Note that all our pandas work will tax CPU so we wouldn't expect any\n # performance gains from doing the data parsing as a callback\n records = self.to_dict()\n data = []\n for series in records:\n df = pd.DataFrame(series.pop(\"data\"), columns=[\"period\", \"value\"])\n if include_metadata:\n df = df.assign(**series)\n data.append(df)\n return pd.concat(data, ignore_index=True)",
"def _timeseries_to_dataframe_value(timeseries, name):\n # Column headers\n columns = [\n [name],\n [timeseries.instance_or_contract_dataframe_column_header()],\n ['']\n ]\n # Convert a time series of (date, value)\n df = pd.DataFrame.from_records(\n ((v.value,) for v in timeseries),\n columns=columns,\n index=[v.date for v in timeseries],\n )\n df.index.name = 'date'\n return df"
] | [
"0.6693489",
"0.64312357",
"0.6289627",
"0.61618704",
"0.59844136",
"0.59790695",
"0.5950793",
"0.5896647",
"0.58869344",
"0.58781815",
"0.577991",
"0.5733921",
"0.56654763",
"0.566396",
"0.56531364",
"0.56504464",
"0.5612756",
"0.56065685",
"0.5595002",
"0.5558159",
"0.55369985",
"0.55344975",
"0.5499257",
"0.54340345",
"0.54299086",
"0.5423657",
"0.53765875",
"0.53391176",
"0.5338096",
"0.533013"
] | 0.76580703 | 0 |
Load instruments from configpath | def _load(self) -> list[Instrument]:
logger.info("Loading config...")
self._config = yml.load(self.configpath)
instruments, modespec = self._config["instruments"], self._config["modes"]
logger.success(f"Found {len(instruments)} instruments, {len(modespec)} modes") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_load_configs_simulation(self):\n global locator, config_paths\n locator.load_config(config_paths[1])\n\n self.assertEqual(locator.config['routines'], ['simulate'])\n self.assertEqual(locator.config['driver'],\n {\n 'type': 'SimDriver',\n 'kwargs': {\n \"arg\": \"val\"\n }\n })",
"def loadConfigs(self):\n self.onLoadConfig(urlopen(self.inipath))",
"def load_from_config(self, **config: Any) -> None:\n for key, filename in config.items():\n self.load(filename, key)",
"def test_load_configs_testing(self):\n global locator, config_paths\n locator.load_config(config_paths[0])\n\n self.assertEqual(locator.config['routines'], ['debug'])\n self.assertEqual(locator.config['driver'],\n {\n 'type': 'TestDriver',\n 'kwargs': {\n 'verbose': False\n }\n })",
"def load_config(self):\n pass",
"def load_cfg(self, path):\n if os.path.exists(path):\n self.djs_core = Librarian(path)\n if self.djs_core.load_cfg():\n self.plugins = self.djs_core.debug_info()['plugins']\n tmp = [plug.split(\":\") for plug in self.plugins]\n result = {}\n for lis in tmp:\n if not lis[0] in result:\n result[lis[0]] = []\n result[lis[0]].append(lis[1])\n self.info = dict2table(result)\n print(\"Load done.\\n\")\n else:\n print(\"Configuration file path not found.\\n\")",
"def load_config(path):\n with open(path, \"rt\") as reader:\n config = pyaml.yaml.load(reader, Loader=pyaml.yaml.Loader)\n if config[\"regularization\"][\"type\"] is None or config[\"regularization\"][\"type\"] == [None]:\n config[\"regularization\"][\"type\"] = []\n if \"attention\" in config[\"regularization\"][\"type\"]:\n raise NotImplementedError\n\n config[\"experiment\"] = os.path.splitext(os.path.basename(path))[0]\n config[\"ckpt_dir\"], config[\"runs_dir\"] = init_output_dirs(config[\"experiment\"])\n \n return config",
"def load(self, configs, container):\n pass;",
"def load_from_conf(self):\r\n raise NotImplementedError",
"def preload_all_configs(self):\n for _, _, filenames in os.walk(self.configDir):\n for filename in filenames:\n if filename[-3:] == \".py\" and filename != \"__init__.py\":\n configID = filename[0:-3]\n self.load_config(configID)",
"def _loadConfigFiles(self):\n for conf in self._configFiles():\n self.configManager.load(conf)",
"def load_from_conf(self):\n raise NotImplementedError",
"def load_config_files(cls, config_files):\n \n config = Config()\n for configfile in config_files:\n # Note: each file loaded by the config will overlay on the previously loaded files\n config.loadfile(configfile)\n return config.sim",
"def load_configuration(self, path):\n with open(path) as conf_file:\n if path.name not in self.configuration:\n self.configuration[path.name] = {}\n self.configuration[path.name] = json.load(conf_file)",
"def load_analysis_path():\n import json\n import os\n with open(os.path.join(os.path.dirname(__file__), \"analysis_config.json\")) as my_file:\n analysis_paths = json.load(my_file)\n return analysis_paths",
"def load_configurations() :\n\n local_path = os.path.dirname(os.path.abspath(__file__))\n print(local_path)\n file_path = local_path + os.sep + 'conf.ini'\n parser = configparser.ConfigParser()\n\n if os.path.exists(file_path) :\n config = parser.read(file_path)\n else :\n parser['PATH'] = {}\n parser['PATH']['PATH_TO_DB'] = os.path.expanduser('~/inlusio_data/InlusioDB_Juni_2015.sqlite')\n parser['PATH']['PHYSIO_PATH'] = os.path.expanduser('~/inlusio_data')\n print('Creating new configuration file!!!')\n print('Please fit conf.ini to your local data path!')\n with open(file_path, 'w') as configfile:\n parser.write(configfile)\n\n return parser",
"def load_config(config_path):\n global config\n with open(config_path) as config_file:\n config = munchify(yaml.safe_load(config_file))",
"def load ( self ):\n files = config.get_or_fail ( 'REPO.config_files' )\n for f in files:\n self.load_file ( f )",
"def load():\n # get (or create) config path\n p = initialize()\n return load_config(open(p['config']))",
"def load_config(self, path=\"\"):\n if not path:\n if not os.path.isdir(CONFIG_DIR):\n os.makedirs(CONFIG_DIR)\n file_path = QtGui.QFileDialog.getOpenFileName(self,\n \"Open Config\",\n CONFIG_DIR,\n \"Config Files (*.cfg)\")\n else:\n file_path = path\n self._load_state(file_path)\n #self.write_text(\"Loaded config @ {}\".format(file_path))",
"def load(self, config_instance):\r\n pass",
"def load_config():\n\t\ttry:\n\t\t\tconf = ConfigParser()\n\n\t\t\tconfig_path = get_config_path()\n\t\t\tconf.read(config_path)\n\n\t\t\t# save references to conf, and config_path in class variables\n\t\t\tConfig.config_path = config_path\n\t\t\tConfig.conf = conf\n\n\t\t\tConfig.source_dir = conf.get('paths', 'source_dir')\n\t\t\tConfig.lyrics_dir = conf.get('paths', 'lyrics_dir')\n\n\t\t\tConfig.save_to_file = conf.getboolean('actions', 'save_to_file')\n\t\t\tConfig.save_to_tag = conf.getboolean('actions', 'save_to_tag')\n\n\t\t\tConfig.overwrite = conf.getboolean('actions', 'overwrite')\n\n\t\t\t# Load all the sources\n\t\t\tConfig.lyric_wikia = conf.getboolean('sources', 'lyric_wikia')\n\t\t\tConfig.musix_match = conf.getboolean('sources', 'musix_match')\n\t\t\tConfig.lyricsmode = conf.getboolean('sources', 'lyricsmode')\n\t\t\tConfig.az_lyrics = conf.getboolean('sources', 'az_lyrics')\n\n\t\t\t# Loading this with user config, we need to call the load_config only once at start.\n\t\t\tConfig.lyric_files_in_dir = glob2.glob(os.path.join(Config.lyrics_dir, '**/*.txt'))\n\n\n\t\t# Catch file handling errors\n\t\texcept IOError as e:\n\t\t\tprint('Unable to load config.')\n\t\t\tprint(e)",
"def load_experiment(self):\n load_dir = select_dir(os.getcwd())\n if load_dir is not None:\n if os.path.isfile(os.path.join(load_dir, 'conf', 'config')):\n self.load_main(load_dir)\n else:\n msg_window('missing conf/config file, not experiment directory')\n return\n\n if self.t is None:\n self.t = Tabs(self)\n self.vbox.addWidget(self.t)\n self.t.clear_configs()\n self.t.load_conf(load_dir)\n\n self.set_experiment(True)\n else:\n msg_window('please select valid conf directory')",
"def load_configurations(self):\n path = os.path.join(self.user_directory, \"config\")\n configurations = {\n \"data_connector\": DataConnectorConfiguration,\n \"formats\": FormatsConfiguration,\n \"server\": ServerConfiguration,\n }\n\n for filename, configuration in configurations.items():\n config_path = os.path.join(path, filename + \".yml\")\n configuration = configuration.read_YAML(config_path)\n self.configurations[filename] = configuration",
"def _load (cls, *files):\n config = ConfigParser.ConfigParser()\n config.read(files)\n \n metadata = {}\n if config.has_section(\"metadata\"):\n for key in config.options(\"metadata\"):\n metadata[key] = config.get(\"metadata\", key)\n\n processes = {}\n datasources = {}\n for section in config.sections():\n if section == \"metadata\": continue\n if section.startswith(\"process_\"):\n try:\n processes[section[8:]] = FeatureServer.Processing.loadFromSection(config, section)\n except Exception, E:\n pass \n else: \n datasources[section] = cls.loadFromSection(\n config, section, 'DataSource')\n\n return cls(datasources, metadata, processes)",
"def load_data_from_config(self):\n\n config_file_name = \"cicada/config/config.yaml\"\n config_dict = None\n self.labels = []\n self.to_add_labels = []\n if os.path.isfile(config_file_name):\n with open(config_file_name, 'r') as stream:\n config_dict = yaml.safe_load(stream)\n print(f\"config_dict {config_dict}\")\n if (config_dict is not None) and config_dict.get(\"dir_name\"):\n self.load_data_from_dir(dir_name=config_dict[\"dir_name\"], method='clear')",
"def testLoadConfiguration(self):\n loader = Loader()\n loader.loadFromDirectory(self.__exampleDirectory)\n\n self.assertEqual(len(loader.taskHolders()), 1)\n\n self.assertEqual(\n os.path.basename(loader.taskHolders()[0].var('contextConfig')),\n 'config.hjson'\n )",
"def load(path: str, config_cls):\n\n return cfg.load(path, config_cls)",
"def _load_config(self):\n\n for p in self._paths:\n if p.exists():\n with p.open() as f:\n c = yaml.safe_load(f)\n if c:\n c['_config_file'] = str(p)\n return c\n else:\n raise ConfigurationError(f\"Didn't find a config file in paths: {self._paths}\")\n\n return {}",
"def load_from_config(self, **config: Any) -> None:\n for key, policy_spec in config.items():\n modelfile, templatefile = policy_spec\n self.load(modelfile, templatefile, key)"
] | [
"0.63088006",
"0.62734544",
"0.60392916",
"0.59869903",
"0.58425516",
"0.5792564",
"0.57058764",
"0.56768847",
"0.56603354",
"0.5657189",
"0.56550276",
"0.56537586",
"0.5621684",
"0.5609595",
"0.5587608",
"0.5586826",
"0.55499035",
"0.5522914",
"0.5517707",
"0.5516953",
"0.54643613",
"0.5454913",
"0.5450706",
"0.54503214",
"0.54438996",
"0.54210806",
"0.5406843",
"0.53966624",
"0.5388618",
"0.5362996"
] | 0.7140976 | 0 |
Expose unique instrument classes found in config | def _expose(self) -> None:
classes = {instrument.__class__ for instrument in self._config["instruments"]}
for class_ in classes:
pyro.expose(class_)
logger.success(f"Exposed {len(classes)} instrument class(es): {classes}") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _config_classes(self):\n pass",
"def config(self) -> InstrumentConfig:\n ...",
"def register_instrument(instrument):\n config.INSTRUMENTS.append(instrument.id)\n for m in instrument.modules:\n register_module(m)",
"def _instrument_class(self, cls):\n newcls = type('InstrumentedClass', (cls, MapRedBase), {})\n return newcls",
"def instrument_type(self):\n \n raise NotImplementedError()",
"def register_config(cls):\n _configs[cls.__name__] = cls",
"def __init__(self, configGroups):\r\n self.config = {cls:configGroup[classes] for configGroup in configGroups for classes in configGroup for cls in IterWrapper(classes)}",
"def register_classes():\n AnalyzeExtension.register_class()\n AnalyzeExtension_SG.register_class()",
"def classify_instrument(self):\n print(self._identity)\n if self._identity in multimeters:\n print('Instrument in multimeter list')\n return Multimeter.from_serial_instrument(self)\n elif self._identity in function_generators:\n print('Instrument in function generator list')\n return FunctionGenerator.from_serial_instrument(self)\n elif self._identity in power_supplies:\n print('Instrument in power supply list')\n return PowerSupply.from_serial_instrument(self)\n else:\n return None",
"def _instrument(self, **kwargs):\n klasses = get_base_estimators(packages=self.packages)\n attributes = kwargs.get(\"attributes\")\n for _, klass in klasses.items():\n if issubclass(klass, self.exclude_classes):\n logger.debug(\"Not instrumenting (excluded): %s\", str(klass))\n else:\n logger.debug(\"Instrumenting: %s\", str(klass))\n for method_name in self.methods:\n if hasattr(klass, method_name):\n self._instrument_class_method(\n estimator=klass,\n method_name=method_name,\n attributes=attributes,\n )",
"def instr_dict():\n out = base_dict()\n out['mro']['current'] = ['Instrument']\n ao(out, 'nSamples', 'Integer', 1, 'Number of samples', readLevel=3)\n ao(out, 'devices', 'List', attr=['Hidden'])\n ao(out, 'initTest', 'Progress', attr=['Hidden'])\n ao(out, 'closingTest', 'Progress', attr=['Hidden'])\n return out",
"def _serve(self) -> None:\n for instrument in self._config[\"instruments\"]:\n uri = self._daemon.register(instrument, objectId=str(instrument))\n self._services[instrument.id] = str(uri)\n logger.success(f\"Registered {instrument} at {uri}\")\n self.uri = self._daemon.register(self, objectId=self.servername)\n logger.success(f\"Registered self at {self.uri}\")",
"def test_instrument(self, nexus_base):\n assert isinstance(nexus_base.instrument, nx.NXinstrument)",
"def identify_class(self, cls):",
"def account_instruments(self, instruments: Instruments = sentinel):\n pass",
"def test_no_double_configuration(self):\n class A(pyperry.Base):\n def _config(cls):\n cls.add_processor('read', 'some processor')\n self.assertEqual(len(A.adapter_config['read']['_processors']), 1)\n\n class B(A): pass\n self.assertEqual(len(B.adapter_config['read']['_processors']), 1)",
"def sensor_classes(self):\n raise NotImplementedError",
"def setup_class(klass):",
"def setup_class(klass):",
"def add_instrument(self, mount, instrument):\n pass",
"def get_cls_dict(config_path):\n return {i: n for i, n in enumerate(get_names(config_path))}",
"def post_instrument_class(self, mapper):\n pass",
"def register_source(klass):\n EVENT_SOURCES[klass.__name__] = klass",
"def _register_outliner_classes(self):\n\n if not self._project:\n LOGGER.warning('Impossible to register outliner classes because Artella project is not defined!')\n return False\n\n outliners_data = self._config.get('outliners', default=dict())\n if not outliners_data:\n LOGGER.warning('No outliners found in artellapipe-tools-outliner configuration file to register!')\n return\n\n for outliner_type, outliner_info in outliners_data.items():\n full_outliner_class = outliner_info.get('class', None)\n if not full_outliner_class:\n LOGGER.warning('No class defined for Outliner Type \"{}\". Skipping ...'.format(outliner_type))\n continue\n outliner_class_split = full_outliner_class.split('.')\n outliner_class = outliner_class_split[-1]\n outliner_name = outliner_info.get('name', outliner_class)\n outliner_categories = outliner_info.get('categories', list())\n outliner_module = '.'.join(outliner_class_split[:-1])\n LOGGER.info('Registering Outliner: {}'.format(outliner_module))\n\n try:\n module_loader = loader.find_loader(outliner_module)\n except Exception as exc:\n LOGGER.warning('Impossible to register Outliner Module: {} | {}'.format(outliner_module, exc))\n continue\n if not module_loader:\n LOGGER.warning('Impossible to load Outliner Module: {}'.format(outliner_module))\n continue\n\n class_found = None\n try:\n mod = importlib.import_module(module_loader.fullname)\n except Exception as exc:\n LOGGER.warning('Impossible to register outliner class: {} | {}'.format(module_loader.fullname, exc))\n continue\n\n for cname, obj in inspect.getmembers(mod, inspect.isclass):\n if cname == outliner_class:\n class_found = obj\n break\n\n if not class_found:\n LOGGER.warning('No Outliner Class \"{}\" found in Module: \"{}\"'.format(outliner_class, outliner_module))\n continue\n\n obj.NAME = outliner_name\n obj.CATEGORIES = outliner_categories\n\n self.register_outliner_class(outliner_type, obj)\n\n return True",
"def __init__(self):\n self.classes = {}",
"def instruments_dict(self): # TODO DEPRECATE\n return self.instruments.dict",
"def instruments(self):\r\n return self.get_field('instrument')",
"def test_unique_adapters(self):\n class Super(pyperry.Base): pass\n Super.configure('read', adapter=TestAdapter, conf='super')\n\n class Child(Super): pass\n Child.configure('read', adapter=TestAdapter, conf='child')\n\n super_adapter = Super.adapter('read')\n child_adapter = Child.adapter('read')\n\n self.assertTrue(super_adapter is not child_adapter)\n self.assertEqual(super_adapter.config.conf, 'super')\n self.assertEqual(child_adapter.config.conf, 'child')",
"def instruments(self) -> dict:\n return self._instruments",
"def register_classes():\n DiffuseCompChain.register_class()\n CatalogCompChain.register_class()\n DiffuseAnalysisChain.register_class()"
] | [
"0.6167917",
"0.6165286",
"0.6108895",
"0.5712228",
"0.56472594",
"0.5590002",
"0.54058063",
"0.5276742",
"0.5238212",
"0.517786",
"0.5136025",
"0.5066814",
"0.5047145",
"0.5043953",
"0.5038126",
"0.50306547",
"0.502609",
"0.5012202",
"0.5012202",
"0.50089884",
"0.49899292",
"0.4988904",
"0.49703974",
"0.49652734",
"0.495198",
"0.49453932",
"0.4932651",
"0.49235672",
"0.49103412",
"0.4871281"
] | 0.66089696 | 0 |
Register instrument instances and self with daemon and storing uris | def _serve(self) -> None:
for instrument in self._config["instruments"]:
uri = self._daemon.register(instrument, objectId=str(instrument))
self._services[instrument.id] = str(uri)
logger.success(f"Registered {instrument} at {uri}")
self.uri = self._daemon.register(self, objectId=self.servername)
logger.success(f"Registered self at {self.uri}") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def register_instrument(instrument):\n config.INSTRUMENTS.append(instrument.id)\n for m in instrument.modules:\n register_module(m)",
"def register():\n signals.initialized.connect(initialize)\n signals.article_generator_context.connect(add_libravatar)",
"def on_register(cls):",
"def _registerOnServer(self, daemon, nameserver,vclock):\n uri = daemon.register(self)\n nameserver.register(self._name, uri)\n self.updateVectorClock(vclock)\n print(\"Gateway registered. Name {} and uri {} \".format(self._name,uri))",
"def __init__(self, instrument):\n endpoint = self.ENDPOINT.format(instrument=instrument)\n super(Instruments, self).__init__(endpoint, method=self.METHOD)",
"def register(self):\n self._register_dockyard()\n self._register_docker()",
"def add_instrument(self, mount, instrument):\n pass",
"def initiate(self):\n\n for item in config.WEATHER_PROVIDERS[self.title]:\n self.__setattr__(item, config.WEATHER_PROVIDERS[self.title][item])\n\n # RP5 and Sinoptik have same URLs for hourly and next day weather info\n if self.title in ('RP5', 'Sinoptik'):\n self.URL_hourly = self.URL\n self.URL_next_day = self.URL\n\n self.logger = self._get_logger(self.title, self.app.args.verbosity)",
"def register(self):\n self.logger.info(\"Registering agent %s\", \"/registry/\" + self._configuration[\"identification\"][\"uuid\"])\n self._coordination.update(\"/registry/\" + self._configuration[\"identification\"][\"uuid\"], self._configuration[\"identification\"])",
"def register_publisher(self, hostname, expire=-1):",
"def register(self, target, hostname, listener_type, expire=-1):",
"def setInstrument(self,instrument):\n self.instrument = instrument\n self.instrument.attach(self)",
"def register(self):\n raise NotImplementedError",
"def register(self):\n raise NotImplementedError",
"def __init__(self):\n self._inst = {}",
"def __init__(self, udisks):\n self.log = logging.getLogger('udiskie.daemon.Daemon')\n self.state = {}\n self.udisks = udisks\n\n self.event_handlers = {\n 'device_added': [],\n 'device_removed': [],\n 'device_mounted': [],\n 'device_unmounted': [],\n 'media_added': [],\n 'media_removed': [],\n 'device_unlocked': [],\n 'device_locked': [],\n 'device_changed': [self.on_device_changed]\n }\n\n for device in self.udisks.get_all_handleable():\n self._store_device_state(device)\n\n udisks.bus.add_signal_receiver(\n self._device_added,\n signal_name='DeviceAdded',\n bus_name='org.freedesktop.UDisks')\n udisks.bus.add_signal_receiver(\n self._device_removed,\n signal_name='DeviceRemoved',\n bus_name='org.freedesktop.UDisks')\n udisks.bus.add_signal_receiver(\n self._device_changed,\n signal_name='DeviceChanged',\n bus_name='org.freedesktop.UDisks')",
"def init(self):\n self._service_store = ServiceStore(self.driver, self.network)\n self._emulator = NetworkEmulator(self.store, self.driver)",
"def __init__(self):\n self._registry = {}",
"def async_register_services(hass, config, insteon_modem):\n\n def add_all_link(service):\n \"\"\"Add an INSTEON All-Link between two devices.\"\"\"\n group = service.data.get(SRV_ALL_LINK_GROUP)\n mode = service.data.get(SRV_ALL_LINK_MODE)\n link_mode = 1 if mode.lower() == SRV_CONTROLLER else 0\n insteon_modem.start_all_linking(link_mode, group)\n\n def del_all_link(service):\n \"\"\"Delete an INSTEON All-Link between two devices.\"\"\"\n group = service.data.get(SRV_ALL_LINK_GROUP)\n insteon_modem.start_all_linking(255, group)\n\n def load_aldb(service):\n \"\"\"Load the device All-Link database.\"\"\"\n entity_id = service.data[CONF_ENTITY_ID]\n reload = service.data[SRV_LOAD_DB_RELOAD]\n if entity_id.lower() == ENTITY_MATCH_ALL:\n for entity_id in hass.data[DOMAIN][INSTEON_ENTITIES]:\n _send_load_aldb_signal(entity_id, reload)\n else:\n _send_load_aldb_signal(entity_id, reload)\n\n def _send_load_aldb_signal(entity_id, reload):\n \"\"\"Send the load All-Link database signal to INSTEON entity.\"\"\"\n signal = f\"{entity_id}_{SIGNAL_LOAD_ALDB}\"\n dispatcher_send(hass, signal, reload)\n\n def print_aldb(service):\n \"\"\"Print the All-Link Database for a device.\"\"\"\n # For now this sends logs to the log file.\n # Future direction is to create an INSTEON control panel.\n entity_id = service.data[CONF_ENTITY_ID]\n signal = f\"{entity_id}_{SIGNAL_PRINT_ALDB}\"\n dispatcher_send(hass, signal)\n\n def print_im_aldb(service):\n \"\"\"Print the All-Link Database for a device.\"\"\"\n # For now this sends logs to the log file.\n # Future direction is to create an INSTEON control panel.\n print_aldb_to_log(insteon_modem.aldb)\n\n def x10_all_units_off(service):\n \"\"\"Send the X10 All Units Off command.\"\"\"\n housecode = service.data.get(SRV_HOUSECODE)\n insteon_modem.x10_all_units_off(housecode)\n\n def x10_all_lights_off(service):\n \"\"\"Send the X10 All Lights Off command.\"\"\"\n housecode = service.data.get(SRV_HOUSECODE)\n insteon_modem.x10_all_lights_off(housecode)\n\n def x10_all_lights_on(service):\n \"\"\"Send the X10 All Lights On command.\"\"\"\n housecode = service.data.get(SRV_HOUSECODE)\n insteon_modem.x10_all_lights_on(housecode)\n\n def scene_on(service):\n \"\"\"Trigger an INSTEON scene ON.\"\"\"\n group = service.data.get(SRV_ALL_LINK_GROUP)\n insteon_modem.trigger_group_on(group)\n\n def scene_off(service):\n \"\"\"Trigger an INSTEON scene ON.\"\"\"\n group = service.data.get(SRV_ALL_LINK_GROUP)\n insteon_modem.trigger_group_off(group)\n\n hass.services.async_register(\n DOMAIN, SRV_ADD_ALL_LINK, add_all_link, schema=ADD_ALL_LINK_SCHEMA\n )\n hass.services.async_register(\n DOMAIN, SRV_DEL_ALL_LINK, del_all_link, schema=DEL_ALL_LINK_SCHEMA\n )\n hass.services.async_register(\n DOMAIN, SRV_LOAD_ALDB, load_aldb, schema=LOAD_ALDB_SCHEMA\n )\n hass.services.async_register(\n DOMAIN, SRV_PRINT_ALDB, print_aldb, schema=PRINT_ALDB_SCHEMA\n )\n hass.services.async_register(DOMAIN, SRV_PRINT_IM_ALDB, print_im_aldb, schema=None)\n hass.services.async_register(\n DOMAIN, SRV_X10_ALL_UNITS_OFF, x10_all_units_off, schema=X10_HOUSECODE_SCHEMA\n )\n hass.services.async_register(\n DOMAIN, SRV_X10_ALL_LIGHTS_OFF, x10_all_lights_off, schema=X10_HOUSECODE_SCHEMA\n )\n hass.services.async_register(\n DOMAIN, SRV_X10_ALL_LIGHTS_ON, x10_all_lights_on, schema=X10_HOUSECODE_SCHEMA\n )\n hass.services.async_register(\n DOMAIN, SRV_SCENE_ON, scene_on, schema=TRIGGER_SCENE_SCHEMA\n )\n hass.services.async_register(\n DOMAIN, SRV_SCENE_OFF, scene_off, schema=TRIGGER_SCENE_SCHEMA\n )\n _LOGGER.debug(\"Insteon Services registered\")",
"def __init__(self):\n self.registry = {}",
"def __init__(self):\n self._discovered_devices = {}\n self._discovered_ip = None",
"def __init__(self):\n dispatcher.connect(self.stats_spider_closed, signal=signals.stats_spider_closed)\n dispatcher.connect(self.stats_spider_closed, signal=signals.spider_closed)\n dispatcher.connect(self.stats_spider_closed, signal=signals.engine_stopped)",
"def register(self):\n raise NotImplementedError()",
"def register_router(self, hostname, expire=-1):",
"def __init__(self, bootstrap_interval=None, run_migrations=True):\n super().__init__(bootstrap_interval, run_migrations)\n self.data_source_provider = RegistryDataSourceProvider()\n self.adapters = []\n self.adapters_by_domain = defaultdict(list)\n self.domains_to_skip = None",
"def consul_register(self):\n self.log.debug(\"consul-register\")\n self.consul.agent.service.register(\n self.svc_name,\n address=self.this_host,\n check=consulate.models.agent.Check(\n name=\"qemu-process\",\n args=[\n \"/bin/sh\",\n \"-c\",\n \"test -e /proc/$(< /run/qemu.{}.pid )/mem || exit 2\".format(\n self.name\n ),\n ],\n interval=\"5s\",\n ),\n )",
"def register(self, instance, storage_interface):\r\n self._instances[instance] = storage_interface",
"def onRegister(self):\n pass",
"def onRegister(self):\n pass",
"def register(self):\n raise NotImplementedError(\"Should have implemented this\")"
] | [
"0.61770165",
"0.586058",
"0.5787275",
"0.57755834",
"0.5748999",
"0.5597987",
"0.5547358",
"0.5466158",
"0.5462284",
"0.5456908",
"0.5437423",
"0.54095894",
"0.5405339",
"0.5405339",
"0.53873736",
"0.53591174",
"0.5346121",
"0.5344868",
"0.5339623",
"0.53348446",
"0.53146374",
"0.5310043",
"0.530068",
"0.5300645",
"0.52885294",
"0.52769274",
"0.5273985",
"0.52710813",
"0.52710813",
"0.52694774"
] | 0.78891295 | 0 |
Disconnect instruments and shutdown daemon | def shutdown(self) -> None:
logger.info("Disconnecting instruments...")
for instrument in self._config["instruments"]:
instrument.disconnect()
logger.info(f"Shutting down {self}...")
self._daemon.shutdown() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def shutdown(self):\n if self.alive:\n libplasma.disconnect(self.conn)\n self.alive = False",
"def shutdown(self):",
"def stopAndDisconnectWalabot():\n wlbt.Stop()\n wlbt.Disconnect()\n print ('Termination successful')",
"def shutdown(self) -> None:",
"def shutdown(self) -> None:",
"def shutdown(self):\n ...",
"def test_disconnect(self):\n self.inverter.disconnect()\n sleep(0.02)",
"def initiate_shutdown(self) -> None:",
"def device_disconnect(self):\n pass",
"def close(self):\n logger.warning('Shutting down')\n self.display.off()\n self.mqtt.disconnect()",
"def shutdown(self):\t\r\n\t\tself.is_running = False\r\n\t\tfor connection in self.established_connection_list:\r\n\t\t\tconnection.send('The server has been shutdown adruptly by the server owner.\\n')\r\n\t\t\tconnection.socket_send()",
"def shutdown(self):\n try:\n self.driver.stop()\n except:\n logging.exception(\"Could not stop driver on shutdown\")\n\n self.arduino.stop()",
"def disconnectAllServers():\n _disconnectAllServers()",
"def disconnect(self):",
"def Disconnect_from_ePCSim_Server(ePCSim_conn):\r\n ePCSim_conn.Disconnect()",
"async def shutdown(self) -> int:",
"async def shutdown(self):",
"def shutdown(self):\n pass",
"def shutdown(self):\n pass",
"def shutdown(self):\n pass",
"def _shutdown(self):",
"def signal_handler(*args):\n if station:\n station.shutdown()",
"def shutdown(self):\n\n raise NotImplementedError",
"def disconnect(self):\n self.arduino.close()\n self.arduino = None",
"def shutdown_all(self, now=False):",
"def shutdown(self):\n\n pass",
"def disconnect(self) -> None:\n ...",
"def shutdown():\n\n cmd = dict()\n cmd[\"type_\"] = \"shutdown\"\n cmd[\"name_\"] = \"all\"\n\n ## In case of the shutdown there will be no returned message to\n ## check the success.\n s = comm.send_and_receive_socket(cmd)\n\n s.close()",
"def _stop(self):\n\n if self._daemon_id:\n pyro_proxy_name = 'PySwitchLib.' + self._daemon_id\n uri = None\n\n try:\n with Pyro4.locateNS(host='localhost', port=self._pyro_ns_port) as ns:\n try:\n uri = ns.lookup(pyro_proxy_name)\n except:\n pass\n\n if uri:\n ns.remove(pyro_proxy_name)\n except:\n pass\n finally:\n ns_daemon_dict = ConfigFileUtil().read(filename=pyswitchlib_ns_daemon_file)\n\n if self._daemon_id in ns_daemon_dict:\n uri = ns_daemon_dict[self._daemon_id]\n del ns_daemon_dict[self._daemon_id]\n\n if len(ns_daemon_dict):\n ConfigFileUtil().write(filename=pyswitchlib_ns_daemon_file, conf_dict=ns_daemon_dict, do_merge=False)\n else:\n try:\n os.unlink(pyswitchlib_ns_daemon_file)\n except:\n pass\n\n if uri:\n try:\n with Pyro4.Proxy(uri) as pyro_proxy:\n pyro_proxy.shutdown()\n pyro_proxy._pyroRelease()\n except:\n pass\n\n super(PySwitchLibApiDaemonRunner, self)._stop()",
"def disconnect(self):\n pass"
] | [
"0.67708665",
"0.6769665",
"0.6741867",
"0.66888916",
"0.66888916",
"0.6571312",
"0.65665215",
"0.6500033",
"0.64636064",
"0.64133114",
"0.6387295",
"0.6361316",
"0.63584465",
"0.6352385",
"0.6346766",
"0.6346196",
"0.6339963",
"0.63314235",
"0.63314235",
"0.63314235",
"0.6310234",
"0.63076043",
"0.6282716",
"0.62790745",
"0.62790096",
"0.62705433",
"0.6265983",
"0.6265871",
"0.62589806",
"0.6241178"
] | 0.7881212 | 0 |
python ~/code/xdoctest/testing/test_linenos.py test_lineno_failcase_called_code python ~/code/xdoctest/testing/test_linenos.py | def test_lineno_failcase_called_code():
text = _run_case(utils.codeblock(
r'''
def func(a):
"""
Example:
>>> func(0)
>>> # this doesnt do anything
>>> print('this passes')
this passes
>>> # call the failing code
>>> func(3)
"""
if a > 0:
nested_failure(a)
return a
def nested_failure(a):
if a > 0:
nested_failure(a - 1)
else:
raise Exception('fail case')
'''))
assert 'rel: 6, abs: 9,' in text
assert text | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_error(doctest):",
"def test_expt(doctest):",
"def test_exp(doctest):",
"def testit(did_pass):\n\n # This function works correctly--it is verbatim from the text, chapter 6\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)",
"def test_error_control(testdir):\n testdir.makepyfile(\"\"\"import sys\"\"\")\n result = testdir.runpytest('--pylint', '--pylint-error-types=EF')\n assert '1 passed' in result.stdout.str()",
"def testit(did_pass):\n\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)",
"def _test():\n\n # perform doctest\n import sys\n import doctest\n\n doctest.testmod()\n\n sys.exit(0)",
"def test_edge_cases(doctest):",
"def test_expected_failures(modpath, expected_failure):\n code = os.path.dirname(expected_failure)\n retcode, out = flake8(join(modpath, expected_failure))\n assert retcode, \"expected failure (%s), got success\" % code\n needle = \": %s \" % code\n assert needle in out\n\n with open(os.path.join(modpath, expected_failure)) as f:\n doc = ast.get_docstring(\n ast.parse(f.read(), expected_failure),\n clean=True,\n )\n\n # keep \"literal\" lines, skip shell lines\n result_check = \"\".join(\n line + \"\\n\" for line in doc.splitlines() if line.startswith(\" RST\")\n )\n if result_check:\n modpath = os.path.join(modpath, \"\")\n assert out.replace(modpath, \" \") == result_check",
"def _test():\n import doctest",
"def test_basic(testdir):\n testdir.makepyfile(\"\"\"import sys\"\"\")\n result = testdir.runpytest('--pylint')\n assert 'Missing module docstring' in result.stdout.str()\n assert 'Unused import sys' in result.stdout.str()\n assert 'Final newline missing' in result.stdout.str()\n assert 'passed' not in result.stdout.str()",
"def test(did_pass):\r\n linenum = sys._getframe(1).f_lineno # Get the caller's line number\r\n if did_pass:\r\n msg = \"Test at line {0} is ok\".format(linenum)\r\n else:\r\n msg = \"Test at line {0} is FAILED\".format(linenum)\r\n print(msg)",
"def test(did_pass):\n linenum = sys._getframe(1).f_lineno\n if did_pass:\n msg = 'Test at line {0} ok.'.format(linenum)\n else:\n msg = 'Test at line {0} FAILED.'.format(linenum)\n print(msg)",
"def _test():\n import doctest\n doctest.testmod(verbose=1)",
"def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILIED.\".format(linenum))\n print(msg)",
"def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILIED.\".format(linenum))\n print(msg)",
"def test(cline):\n print(\"Running unit tests.\")\n cline.run(\"TF_CPP_MIN_LOG_LEVEL=3 python3 -m unittest\")",
"def test_simple_pytest_import_error_cli():\n module_text = utils.codeblock(\n '''\n # There are lines before the bad line\n import os\n import sys\n import does_not_exist\n\n def module_func1():\n \"\"\"\n This module has a doctest\n\n Example:\n >>> print('hello world')\n \"\"\"\n ''')\n temp_module = util_misc.TempModule(module_text, modname='imperr_test_mod')\n command = sys.executable + ' -m pytest -v -s --xdoctest-verbose=3 --xdoctest-supress-import-errors --xdoctest ' + temp_module.dpath\n print(command)\n print('--')\n info = cmd(command)\n print('--')\n # print('info = {}'.format(info))\n print(info['out'])\n # We patched doctest_example so it no longer outputs this in the traceback\n assert 'util_import' not in info['out']\n print(info['out'])\n # Note: flaky changes the return code from 1 to 3, so test non-zero\n assert info['ret'] != 0\n\n # Remove the supress import error flag and now we should get the traceback\n temp_module = util_misc.TempModule(module_text, modname='imperr_test_mod')\n command = sys.executable + ' -m pytest -v -s --xdoctest-verbose=3 --xdoctest ' + temp_module.dpath\n print(command)\n print('--')\n info = cmd(command)\n print('--')\n # print('info = {}'.format(info))\n print(info['out'])\n # We patched doctest_example so it no longer outputs this in the traceback\n assert 'util_import' in info['out']\n print(info['out'])\n # Note: flaky changes the return code from 1 to 3, so test non-zero\n assert info['ret'] != 0",
"def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)",
"def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)",
"def test_pylint_works(capsys: \"CaptureFixture\") -> None:\n # Pass one file with absolute path and the other one with relative path\n notebook1 = os.path.join(\"tests\", \"data\", \"notebook_for_testing.ipynb\")\n notebook2 = os.path.join(\"tests\", \"data\", \"notebook_with_indented_magics.ipynb\")\n\n main([\"pylint\", notebook1, notebook2, \"--disable=C0114\"])\n\n # check out and err\n out, _ = capsys.readouterr()\n\n expected_out = (\n \"************* Module tests.data.notebook_for_testing\\n\" # noqa: E501\n f\"{notebook1}:cell_2:19:8: C0303: Trailing whitespace (trailing-whitespace)\\n\" # noqa: E501\n f\"{notebook1}:cell_2:15:11: C0209: Formatting a regular string which could be a f-string (consider-using-f-string)\\n\" # noqa: E501\n f'{notebook1}:cell_4:1:0: C0413: Import \"from random import randint\" should be placed at the top of the module (wrong-import-position)\\n' # noqa: E501\n f'{notebook1}:cell_5:1:0: C0413: Import \"import pprint\" should be placed at the top of the module (wrong-import-position)\\n' # noqa: E501\n f'{notebook1}:cell_5:2:0: C0413: Import \"import sys\" should be placed at the top of the module (wrong-import-position)\\n' # noqa: E501\n f\"{notebook1}:cell_1:1:0: W0611: Unused import os (unused-import)\\n\" # noqa: E501\n f\"{notebook1}:cell_1:3:0: W0611: Unused import glob (unused-import)\\n\" # noqa: E501\n f\"{notebook1}:cell_1:5:0: W0611: Unused import nbqa (unused-import)\\n\" # noqa: E501\n f\"{notebook1}:cell_4:1:0: W0611: Unused randint imported from random (unused-import)\\n\" # noqa: E501\n f'{notebook1}:cell_4:1:0: C0411: standard import \"from random import randint\" should be placed before \"import nbqa\" (wrong-import-order)\\n' # noqa: E501\n f'{notebook1}:cell_5:1:0: C0411: standard import \"import pprint\" should be placed before \"import nbqa\" (wrong-import-order)\\n' # noqa: E501\n f'{notebook1}:cell_5:2:0: C0411: standard import \"import sys\" should be placed before \"import nbqa\" (wrong-import-order)\\n' # noqa: E501\n \"************* Module tests.data.notebook_with_indented_magics\\n\" # noqa: E501\n f\"{notebook2}:cell_1:1:0: W0611: Unused randint imported from random (unused-import)\\n\" # noqa: E501\n f\"{notebook2}:cell_1:2:0: W0611: Unused get_ipython imported from IPython (unused-import)\\n\" # noqa: E501\n f'{notebook2}:cell_3:3:0: C0411: standard import \"import operator\" should be placed before \"from IPython import get_ipython\" (wrong-import-order)\\n' # noqa: E501\n \"\\n\"\n \"-----------------------------------\\n\"\n \"Your code has been rated at 4.32/10\\n\"\n \"\\n\"\n )\n horizontal_bar = \"-----------------------------------\"\n assert out.split(horizontal_bar)[0] == expected_out.split(horizontal_bar)[0]",
"def unitdoctest():\r\n\r\n pass",
"def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)",
"def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)",
"def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)",
"def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)",
"def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)",
"def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)",
"def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)",
"def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)"
] | [
"0.704092",
"0.70044655",
"0.6639948",
"0.6638953",
"0.65686834",
"0.65359443",
"0.6508109",
"0.6495619",
"0.643054",
"0.6405408",
"0.63980496",
"0.63929",
"0.6392692",
"0.63884795",
"0.63859123",
"0.63859123",
"0.6350053",
"0.6331821",
"0.6329094",
"0.6329094",
"0.63262904",
"0.63060284",
"0.6253524",
"0.6253524",
"0.6253524",
"0.6253524",
"0.6253524",
"0.6253524",
"0.6253524",
"0.62514967"
] | 0.7327849 | 0 |
Add to the list of describing adjectives. | def add_adjectives(self, adjective):
self.adjectives += [adjective] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_adjectives(self, *sAdjs):\n self.adjectives += list(sAdjs)",
"def add(self):\n pass",
"def add_many_descriptors(self, descriptors):",
"def add(self, PlugLead):\n\n self.check_conflicts(PlugLead)\n self.plugleads.append(PlugLead)",
"def add_disease(self, disease):\n self.diseases.append(disease)",
"def addObjective(self, *args):\n return _libsbml.ListOfObjectives_addObjective(self, *args)",
"def add_experience(self, state, action, reward, next_state, done):\n experience = (state, action, reward, next_state, done)\n for i, k in enumerate(self.data_keys):\n getattr(self, k).append(experience[i])\n self.size += 1",
"def add_diameter(self, dia):\n self.diameters.append(dia)",
"def addEntry(self, listDictions):\n ## load xml\n improvDoc = loadIMProvFile(self.argsFile)\n entrname= 'Job'\n for dictions in listDictions:\n report = IMProvNode(entrname , None, **dictions)\n improvDoc.addNode(report)\n outfile = file( self.argsFile, 'w').write(str(improvDoc))\n return",
"def Add(self, *args):\n return _BRepAlgo.BRepAlgo_AsDes_Add(self, *args)",
"def add(self, *args):\n pass",
"def add(self, *args):\n pass",
"def AddConcept(self, concept):\n self.concepts.append(concept)",
"def add_descriptor(self, descriptor):",
"def do_add(self):\n\n\t\t#debug\n\t\t'''\n\t\tself.debug(('self.',self,['AddingVariablesList']))\n\t\t'''\n\n\t\t#Apply\t\n\t\tself.map('append',map(\n\t\t\t\t\t\t\t\t\tlambda __AddingVariable:\n\t\t\t\t\t\t\t\t\t{'LiargVariablesList':[__AddingVariable]},\n\t\t\t\t\t\t\t\t\tself.AddingVariablesList\n\t\t\t\t\t\t\t\t)\n\t\t\t\t)",
"def addDemographics(self):\n p = self.p\n demographics_data = {\n 'dob': p.dob,\n 'gender': p.gender,\n 'email': p.email,\n 'fname': p.fname,\n 'lname': p.lname,\n 'hphone': p.home,\n 'cphone': p.cell,\n 'country': p.country,\n 'city': p.city,\n 'pcode': p.pcode,\n 'region': p.region,\n 'street': p.street,\n }\n self.demographics_doc = DEMOGRAPHICS.sub(demographics_data).done()",
"def add(self, name, content):\n raise NotImplementedError",
"def add(self, el):\n raise Exception('TODO IMPLEMENT ME !')",
"def add_tag(self, tag):\n\n # directional relation: tag is the blank of everything in the list\n self.relations[tag] = {\n \"overlord\": [],\n \"hegemon\": [], # for tributary\n \"tributary\": [],\n \"vassal\": [],\n \"guaranteeing\": [],\n \"guarantor\": [],\n \"alliance\": [],\n \"senior\": [],\n \"junior\": [],\n \"marriage\": []\n }",
"def add_condiments(self):\n print(\"Adding Lemon\")",
"def add_food(self, _food):\n self.food.append(_food)",
"def add_objective(self, objective):\n self.objectives.append(objective)",
"def __add__(self, notes):\n self.add_notes(notes)\n return self",
"def add_emb(self, emb):\n self.embs.append(emb)",
"def add(self, *items):",
"def append(self, dpr):\r\n self.childlist.append(dpr)",
"def add(self, experience):\n self.buffer.append(experience)",
"def add_descriptions_to_confusion_matrix(self):\n topic_names = []\n for topic_num in self.topic_numbers:\n topic_names.append(self.topic_names[topic_num])\n for index, row in enumerate(self.confusion_matrix):\n row.insert(0,topic_names[index])\n topic_names_for_matrix = topic_names.copy()\n topic_names_for_matrix.insert(0,\"\")\n self.confusion_matrix.insert(0,topic_names_for_matrix)\n self.confusion_matrix_true.insert(0,topic_names_for_matrix)",
"def to_add(self):\n pass",
"def add(self, item):"
] | [
"0.7263275",
"0.62598264",
"0.5947216",
"0.57094675",
"0.5695133",
"0.5606961",
"0.56066847",
"0.5586834",
"0.5575344",
"0.5548141",
"0.5520845",
"0.5520845",
"0.55087703",
"0.5452977",
"0.5440875",
"0.53886217",
"0.5388016",
"0.5352896",
"0.5326072",
"0.5325428",
"0.5324635",
"0.5306004",
"0.5275961",
"0.5274832",
"0.52725625",
"0.52449703",
"0.522418",
"0.52171415",
"0.52053314",
"0.5205009"
] | 0.7504388 | 0 |
Returns the list of describing adjectives. The list is shuffled first because generally this is used to get a random adjective. | def get_adjectives(self):
random.shuffle(self.adjectives)
return self.adjectives | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_adjectives(lyrics):\n doc = nlp(lyrics.lower())\n all_adjectives = [token.lemma_ for token in doc if token.pos_ == \"ADJ\"]\n return all_adjectives",
"def succ(self):\n return [ self.simple_reflection(i) for i in self.descents(positive=True) ]",
"def getAdjectives(self, word):\n\t\tadjectives = set()\n\t\tfor synset in wordnet.synsets(word):\n\t\t\tif synset.pos == ADJ:\n\t\t\t\tfor synonym in synset.lemma_names:\n\t\t\t\t\tadjectives.add(synonym)\n\t\treturn adjectives",
"def adjectives_sorted(lyrics):\n adjectives = get_adjectives(lyrics)\n sorted_adjectives = Counter(adjectives)\n return sorted_adjectives",
"def desc_with_default(self) -> List[str]:\n return self.desc[:]",
"def pred(self):\n return [ self.simple_reflection(i) for i in self.descents() ]",
"def getAllDescWords(itemList):\r\n itemList = list(set(itemList)) # make itemList unique\r\n descWords = []\r\n for item in itemList:\r\n descWords.extend(worldItems[item][DESCWORDS])\r\n return list(set(descWords))",
"def _read_advantages(root):\n output_list = []\n for _, value in enumerate(root[0][3]):\n output_list.append(Advantage(value))\n return output_list",
"def get_advice():\n json_response = random_adviceslip()\n advice = parse_advice(json_response=json_response)\n return advice",
"def get_objectives(self):\n return copy.deepcopy(self.objectives), self.gates_names",
"def get_advisories(self):\n\n advisories = []\n\n for i in range(len(self.__data['advisories'])):\n data = requests.get(self.__data['advisories'][i]['links']['self']['href'], headers=getHeaders()).json()\n this = {}\n this['id'] = data['id']\n this['name'] = data['name']\n advisories.append(this)\n\n return advisories",
"def get_ads():\n return coll_ad.distinct(KEY_AD_ID)",
"def disease_descriptors(civic_did8):\n return [civic_did8]",
"def _choose_babble_phrases(self) -> tuple:\n noun_choices = ('singular nouns', 'plural nouns')\n noun_choice = self.random_element(noun_choices)\n\n adjective_choices = (\n 'adjectives starting with consonant',\n 'adjectives starting with vowel')\n\n if noun_choice == 'singular nouns':\n article_choice = self.random_element(self.article_choices)\n else:\n article_choice = 'the'\n\n if article_choice == 'an':\n adjective_choice = 'adjectives starting with vowel'\n elif article_choice == 'a':\n adjective_choice = 'adjectives starting with consonant'\n else:\n adjective_choice = self.random_element(adjective_choices)\n\n return (\n self.technobabble['verbs'],\n article_choice,\n self.technobabble[adjective_choice],\n self.technobabble[noun_choice])",
"def load_verbs(self, verbs: List[str]) -> List[str]:\n\n neg_verbs = get_verbs(verbs)\n\n neg_verbs_preceding = neg_verbs.loc[\n ((neg_verbs[\"mode\"] == \"Indicatif\") & (neg_verbs[\"tense\"] == \"Présent\"))\n | (neg_verbs[\"tense\"] == \"Participe Présent\")\n | (neg_verbs[\"tense\"] == \"Participe Passé\")\n | (neg_verbs[\"tense\"] == \"Infinitif Présent\")\n ]\n neg_verbs_following = neg_verbs.loc[neg_verbs[\"tense\"] == \"Participe Passé\"]\n list_neg_verbs_preceding = list(neg_verbs_preceding[\"term\"].unique())\n list_neg_verbs_following = list(neg_verbs_following[\"term\"].unique())\n\n return (list_neg_verbs_preceding, list_neg_verbs_following)",
"def getEssentialList(self):\n return self.essentials",
"def card_fields_in_order(self) -> List[str]:\n card_in_anki_order = [self.word, self.pronunciation, self.sentence,\n self.definitions, self.book_title, self.author]\n return card_in_anki_order",
"def DumpDetails(self, sentences, label=\"N.A.\"):\n AdjR = 0.0\n adjAll = []\n for sentence in sentences:\n # if sentence[\"Text\"].startswith(\"Joanie is not helpful\"):\n # x = 1\n adjectives, dependencies = self.ExtractSentDetails(sentence)\n adjAll.extend(adjectives)\n allAdjectives = adjectives | Angel.GlobalAdjList\n AdjS = 0.0\n words = wordpunct_tokenize(sentence[\"Text\"])\n if len(words) <= 3:\n allAdjectives |= set([x.lower() for x in words])\n for i in range(len(words)):\n word = words[i].lower()\n if word in {\"but\", \"if\"}:\n AdjS = 0.0\n print words[i],\n elif word in allAdjectives and word in self.lexicon:\n multiplier = self.PredictMultiplier(word, dependencies[word], words, i)\n score = float(self.lexicon[word]) * multiplier\n if multiplier < 1:\n colortext = colored(words[i] + \" (\" + '{:.3}'.format(score) + \")\", 'red',None,['underline'])\n elif multiplier > 1:\n colortext = colored(words[i] + \" (\" + '{:.3}'.format(score) + \")\", 'red',None,['bold'])\n else:\n colortext = colored(words[i] + \" (\" + '{:.3}'.format(score) + \")\", 'red')\n AdjS += score\n print colortext,\n else:\n print words[i],\n print\n colortext = colored(\"Adjectives: \" + '{:.3}'.format(AdjS),'red')\n print colortext\n AdjR += AdjS\n print\n print \"Label:\", label\n base = self.PredictBase(adjAll)\n colortext = colored(\"Adjectives: \" + str(AdjR) + \"*\" + str(base) + \" = \" + str(AdjR*base),'red')\n print colortext",
"def add_adjectives(self, adjective):\n self.adjectives += [adjective]",
"def random_advice(message):\n advice = requests.get(\"https://api.adviceslip.com/advice\").json()['slip']['advice']\n\n return advice",
"def generate_products():\n # initialize list of noun and adj\n num_products = 30\n products = [0] * num_products\n prices = [0] * num_products\n weights = [0] * num_products\n flammabilities = [0] * num_products\n\n # initlize random word object\n random = RandomWords()\n\n adj = [random.get_random_word(includePartOfSpeech=\"adjective\")\n for product in products]\n noun = [random.get_random_word(includePartOfSpeech=\"noun\")\n for product in products]\n products = [noun + \" \" + adj for noun, adj in zip(adj, noun)]\n\n prices = [random.randint(5, 100) for price in prices]\n weights = [random.randint(5, 100) for weight in weights]\n flammabilities = [random.randint(0.0, 2.5)\n for flammability in flammabilities]\n\n return products, prices, weights, flammabilities",
"def setup_random_opinions_representatives():\r\n global CATEGORIES\r\n \r\n ideas_dic = {}\r\n \r\n for i in CATEGORIES:\r\n #idea = Idea(1,\"\",i, 1-random.expovariate(6))\r\n if i == 1:\r\n idea = Idea(1,\"\",i, random.uniform(-1,-0.5))\r\n elif i == 2:\r\n idea = Idea(1,\"\",i, random.uniform(-1,-0.5))\r\n elif i == 3:\r\n idea = Idea(1,\"\",i, random.uniform(0.5,1))\r\n \r\n# idea = Idea(1,\"\",i, random.uniform(0.5,1))\r\n ideas_dic[i] = idea\r\n \r\n return ideas_dic",
"def getAchievements(self) -> list:\n return self.state[ACHIEVEMENTS]",
"def darts(self):\r\n return self.alphas[0].keys()",
"def add_adjectives(self, *sAdjs):\n self.adjectives += list(sAdjs)",
"def skills():\n all_skills = [\"strength\", \"patience\", \"cleanliness\", \"leadership\", \"communication\",\n \"science\", \"math\", \"engineering\", \"intelligence\", \"driving\"]\n\n random.shuffle(all_skills)\n return all_skills[0:2]",
"def effect_list(self):\n moods = []\n for mood in self._moodlist:\n if \"name\" in mood:\n moods.append(mood['name'])\n return moods",
"def get_ideas(self):\n fields = ['content', 'clusterIDs', 'isGamechanger',\n 'userID', 'promptID']\n return self.get_data(\"ideas\", fields)",
"def get_list(self):\n categories = []\n for attribut in self.attributes:\n attr = getattr(self, attribut, False)\n if attr is True:\n categories.append(attribut)\n if getattr(self, 'education') is True:\n categories.append(_(u'education'))\n if getattr(self, 'training') is True:\n categories.append(_(u'training'))\n if getattr(self, 'tutoring') is True:\n categories.append(_(u'tutoring'))\n\n return categories",
"def p_banner():\n return random.choice([banner, banner_two, banner_three, banner_four, banner_five])"
] | [
"0.605831",
"0.5822134",
"0.57419574",
"0.573242",
"0.55850464",
"0.5502515",
"0.5492401",
"0.5489824",
"0.5483387",
"0.5447286",
"0.54260534",
"0.5316058",
"0.52798134",
"0.5279397",
"0.5267892",
"0.5265623",
"0.525828",
"0.52477735",
"0.5244685",
"0.5200043",
"0.51937425",
"0.51871073",
"0.51700115",
"0.5165577",
"0.51619357",
"0.5148933",
"0.5103721",
"0.5102739",
"0.5100347",
"0.5086335"
] | 0.81158966 | 0 |
Returns the noun, including all its describing adjectives, as a string. | def full_string(self):
return "{}: {}".format(str(self.word), " ".join([str(adj) for adj in self.adjectives])) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def en_noun(t, label):\r\n s = label\r\n p = []\r\n is_uncountable = False\r\n \r\n # http://en.wiktionary.org/wiki/Template:en-noun\r\n head = t.arg(\"head\", label)\r\n p1 = t.arg(0)\r\n p2 = t.arg(1)\r\n \r\n if p1 == \"-\":\r\n # uncountable\r\n is_uncountable = True\r\n \r\n if p2 == \"s\":\r\n # ends by s\r\n p.append(head + \"s\")\r\n \r\n elif p2 is not None:\r\n # word\r\n p.append(p2)\r\n \r\n elif p1 == \"es\":\r\n # add es\r\n p.append(head + \"es\")\r\n \r\n elif p1 is not None:\r\n # use term\r\n p.append(p1)\r\n \r\n elif p1 is None and p2 is None:\r\n p.append(head+\"s\")\r\n\r\n for k,a in t.args.items():\r\n if not a.is_named():\r\n if k == 0 or k == 1:\r\n continue\r\n \r\n p.append(a.as_string())\r\n \r\n return (s, p, is_uncountable)",
"def get_nouns(self):\n\t\tblob = TextBlob(self.raw_string)\n\n\t\tfor word, tag in blob.tags:\n\t\t\tif tag in ['NNP', 'NN']:\n\t\t\t\tself.nouns.append(word.lemmatize())",
"def to_adverb(self):\n\n if 'AdverbPhrase' in self.variants:\n return self.variants['AdverbPhrase']\n\n return self.copy_with(typ=\"AdverbPhrase\",\n text=\"with \" + self.text)",
"def noun_string(data_org):\n chains = []\n tokens = word_tokenize(data_org)\n #tokenize to prepare for tagging\n w_tag = dict(nltk.pos_tag(tokens))\n chain = []\n for w, tag in w_tag.items():\n #find all nouns based on treebank format\n if tag.startswith('N'):\n chain.append(w)\n else:\n if len(chain) >= 3:\n chains.append(\" \".join(chain))\n chain = []\n\n #move information to dataframe for printing to excel\n df_noun_string = pd.DataFrame({'Noun Strings (3+ Nouns in a row)': chains}, columns = ['Noun Strings (3+ Nouns in a row)'])\n return df_noun_string",
"def __str__(self):\n out = self.san\n if self.comment != \"\":\n out += \" {\" + self.comment.replace('\\n', ' ') + \"}\"\n if len(self.nags) > 0:\n for n in self.nags:\n out += \" \" + n\n for v in self.variations:\n out += \" (\" + str(v).strip(' ') + \")\"\n return out",
"def get_compound_noun( ngram ):\n try:\n pattern = re.compile( '((?: ?\\\\b[^\\\\s]+(?:/NN.?/[a-z]+/[\\\\d]+)){2,})' )\n match = re.search( pattern, ngram )\n if match:\n compound = ''\n contains_root = False\n tokens = match.group().strip().split(' ')\n for t in tokens:\n # embed()\n items = t.split('/')\n compound += ( items[0] + ' ' )\n if items[3] == 0:\n contains_root = True\n if contains_root:\n return compound\n else:\n return None\n else:\n return None\n \n except ValueError:\n return None",
"def get_article_str(article_sents):\n article_str = \"\"\n for nlp_sent in article_sents:\n article_str += (' ' + nlp_sent.text + ' ')\n return article_str",
"def get_noun_phrases(blob):\n return blob.noun_phrases",
"def to_adverb(self):\n\n if 'AdverbPhrase' in self.variants:\n return self.variants['AdverbPhrase']\n\n return self.copy_with(typ=\"AdverbPhrase\",\n text=self.text + \" to\")",
"def indefinite(self):\n return \"an\" if self.short_desc[0] in 'aeiou' else \"a\"",
"def noun_lemma(word):\n if word.endswith(\"s\"):\n if word.endswith(\"ss\"):\n return word.lower()\n elif word.endswith(\"ies\"):\n return word[:-3].lower() + (\"y\")\n else:\n return word[:-1].lower()\n if word.endswith(\"men\"):\n return word[:-2].lower() + (\"an\")\n else:\n return word.lower()",
"def to_adverb(self):\n\n text = self.text\n ending = text[-1]\n if ending == \"e\":\n text = text[0:-1]+\"ly\"\n else:\n text = text+\"ly\"\n\n return self.copy_with(typ=\"AdverbPhrase\",\n text=text)\n\n # return AdverbPhrase(**self.locals(skip=[\"text\", \"typ\", \"variants\"]),\n # text=text,\n # **self.variants)",
"def a(noun):\n if p.singular_noun(noun) is not False:\n return noun\n else:\n return p.a(noun)",
"def get_abbreviated_description(self):\n word_array = str(self.description).split()[:25]\n abbreviated_description = \" \".join(word_array)\n return abbreviated_description",
"def __str__(self):\n return self.underscoreSentence.get().__str__()",
"def get_sentence(self):",
"def short_description(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"short_description\")",
"def greco_latin_plural_noun(base_token=None):\n\n output_string = \"\"\n if base_token is not None:\n if base_token.endswith(\"us\"):\n output_string = base_token[:-2] + \"i\"\n elif base_token.endswith(\"ma\"):\n output_string = base_token + \"ta\"\n elif base_token.endswith(\"a\"):\n output_string = base_token[:-1] + \"ae\"\n elif base_token.endswith((\"on\", \"um\")):\n output_string = base_token[:-2] + \"a\"\n elif base_token.endswith(\"sis\"):\n output_string = base_token[:-3] + \"ses\"\n elif base_token.endswith(\"is\"):\n output_string = base_token[:-2] + \"ides\"\n elif base_token.endswith(\"men\"):\n output_string = base_token[:-3] + \"mina\"\n elif base_token.endswith(\"ex\"):\n output_string = base_token[:-2] + \"ices\"\n elif base_token.endswith(\"x\"):\n output_string = base_token[:-1] + \"ces\"\n\n return output_string",
"def to_string(self):\n return \" \".join(self._words)",
"def __repr__(self):\r\n s = 'Words:\\n' + str(self.words) + '\\n\\n'\r\n s += 'Word lengths:\\n' + str(self.wordlengths) + '\\n\\n'\r\n s += 'Stems:\\n' + str(self.stems) + '\\n\\n'\r\n s += 'Sentence lengths:\\n' + str(self.sentencelengths) + '\\n\\n'\r\n s += 'Gerunds:\\n' + str(self.gerund)\r\n return s",
"def _get_sentence(sentence_data):\n return \" \".join([word for word, ne_tag in sentence_data])",
"def dish_str(n:Dish):\r\n return (n.name + \" $\" + str(n.price) + \" \" + str(n.calories) + \" cal\")",
"def get_description(self):\n return \"-\".join(\n map(str, (self.release, self.chromosome, self.start, self.reference, self.alternative))\n )",
"def find_noun(sent):\n noun = None\n\n if not noun:\n for w, p in sent.pos_tags:\n if p == 'NN': # This is a noun\n noun = w\n break\n if noun:\n #logger.info(\"Found noun: %s\", noun)\n pprint(\"FOUND NOUN\")\n pprint(noun)\n\n return noun",
"def NoDeltaGExplanation(self):\n for compound in self.reactants:\n if compound.compound.no_dg_explanation:\n name = compound.compound.common_names.all()[0].name\n return '%s %s' % (name,\n compound.compound.no_dg_explanation.lower())\n return None",
"def base_verb_string(self): \n if self.is_derived: \n _base_verb_str= getattr(self, '_base_verb_string', None)\n if is_empty_str(_base_verb_str):\n if self.is_phrase:\n # a phrase means the base verb is the actual verb being conjugated.\n self._base_verb_string = self.inf_verb_string\n elif self.reflexive == Reflexive.base_reflexive:\n self._base_verb_string = self.core_characters + self.inf_ending +'se'\n else: \n self._base_verb_string = self.core_characters + self.inf_ending\n return self._base_verb_string\n else:\n return None",
"def construct_response(pronoun, noun, verb):\n resp = []\n\n if pronoun:\n resp.append(pronoun)\n\n # We always respond in the present tense, and the pronoun will always either be a passthrough\n # from the user, or 'you' or 'I', in which case we might need to change the tense for some\n # irregular verbs.\n if verb:\n verb_word = verb[0]\n if verb_word in ('be', 'am', 'is', \"'m\"): # This would be an excellent place to use lemmas!\n if pronoun.lower() == 'you':\n # The bot will always tell the person they aren't whatever they said they were\n resp.append(\"aren't really\")\n else:\n resp.append(verb_word)\n if noun:\n pronoun = \"an\" if starts_with_vowel(noun) else \"a\"\n resp.append(pronoun + \" \" + noun)\n\n resp.append(random.choice((\"bro\", \"lol\", \"bruh\", \"nigga\", \"ha ha ha xD\", \"zzz.. oh i fell asleep :P\")))\n\n return \" \".join(resp)",
"def is_noun(tag):\r\n return tag in ['NN', 'NNS', 'NNP', 'NNPS']",
"def _create_formatted_string(self):\n string = NALSyntax.StatementSyntax.Start.value + \\\n self.get_subject_term().get_formatted_string()\n\n string += \" \" + self.get_copula_string() + \" \"\n\n string += self.get_predicate_term().get_formatted_string() + \\\n NALSyntax.StatementSyntax.End.value\n\n return string",
"def get_sentence(self):\n words = []\n for i in sorted([ind.index for ind in self.subtree_dict.keys()]):\n if isinstance(self.subtree_dict[i].label, (str, unicode)):\n words.append(self.subtree_dict[i].label)\n return ' '.join(words)"
] | [
"0.6249946",
"0.6021164",
"0.600624",
"0.5979195",
"0.59327227",
"0.58711636",
"0.57597136",
"0.5741161",
"0.57387596",
"0.57079136",
"0.56950766",
"0.5683717",
"0.56733876",
"0.5652428",
"0.5563646",
"0.55163616",
"0.5513116",
"0.5472722",
"0.5459045",
"0.5425532",
"0.5423237",
"0.5419519",
"0.541946",
"0.5412078",
"0.5409855",
"0.5401926",
"0.5398518",
"0.5398444",
"0.53919876",
"0.5374007"
] | 0.69946307 | 0 |
Parse a noun object from a data file containing nouns and their describing adjectives. | def parse(text):
parts = text.split(' ')
noun = Noun(parts[0], int(parts[1]))
parts = parts[2:]
while len(parts) > 0:
noun.add_adjectives(Word(parts[0], int(parts[1])))
parts = parts[2:]
return noun | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __read_data__(self):\n with open(self.file, 'r') as data:\n sentence = []\n tags = []\n for line in data:\n terms = line.rstrip().split(WHITESPACE)\n for term in terms:\n word_tag = tuple(term.split(TAGCHAR))\n word = word_tag[0]\n tag = word_tag[1]\n self.word_tag_dict[word_tag] += 1\n self.tag_dict[tag] += 1\n self.__add_to_word_dict__(word, tag)\n if self.isNumberWord(word):\n self.numbers += 1\n if word[0].isupper() and len(sentence) > 0:\n self.cap_no_start += 1\n sentence.append(word)\n tags.append(tag)\n if tag == ENDOFSENTENCE:\n self.sentences.append(tuple(sentence))\n self.tags.append(tuple(tags))\n sentence = []\n tags = []",
"def process_data_from_input_file(triplet):\n\n sentence = triplet.subject + ' ' + triplet.predicate + ' ' + triplet.object\n doc = nlp(unicode(sentence))\n root = doc[0]\n for t in doc:\n if t.pos_ == 'VERB' and t.head == t:\n root = t\n # elif t.pos_ == 'NOUN'\n\n # also, if only one sentence\n # root = doc[:].root\n\n\n \"\"\"\n CURRENT ASSUMPTIONS:\n - People's names are unique (i.e. there only exists one person with a certain name).\n - Pet's names are unique\n - The only pets are dogs and cats\n - Only one person can own a specific pet\n - A person can own only one pet\n \"\"\"\n\n\n # Process (PERSON, likes, PERSON) relations\n if root.lemma_ == 'like':\n if triplet.subject in [e.text for e in doc.ents if e.label_ == 'PERSON' or e.label_ == 'ORG'] and triplet.object in [e.text for e in doc.ents if e.label_ == 'PERSON' or e.label_ == 'ORG'] and \"n't\" not in triplet.predicate:\n s = add_person(triplet.subject)\n o = add_person(triplet.object)\n s.likes.append(o)\n\n if root.lemma_ == 'be' and triplet.object.startswith('friends with'):\n fw_doc = nlp(unicode(triplet.object))\n with_token = [t for t in fw_doc if t.text == 'with'][0]\n # get text after with\n after_with = fw_doc.text.split(with_token.text+ ' ')[1]\n people = []\n for p in after_with.split(' '):\n if nlp(p)[0].tag_ == 'NNP':\n people.append(nlp(p)[0].text)\n # fw_who = [t for t in with_token.children if t.dep_ == 'pobj'][0].text\n # fw_who = [e for e in fw_doc.ents if e.label_ == 'PERSON'][0].text\n for p in people:\n if triplet.subject in [e.text for e in doc.ents if e.label_ == 'PERSON']:\n s = add_person(triplet.subject)\n o = add_person(p)\n s.likes.append(o)\n o.likes.append(s)\n if root.lemma_ == 'be' and triplet.object == 'friends':\n fw_doc = nlp(unicode(triplet.subject))\n and_token = [t for t in fw_doc if t.text == 'and']\n if and_token:\n and_token = and_token[0].text\n if and_token == 'and' and fw_doc[0].text in [e.text for e in doc.ents if e.label_ == 'PERSON'] and fw_doc[2].text in [e.text for e in doc.ents if e.label_ == 'PERSON']:\n s = add_person(fw_doc[0].text)\n o = add_person(fw_doc[2].text)\n s.likes.append(o)\n o.likes.append(s)\n\n # Process (PET, has, NAME) Mary's dog's name is Rover\n if triplet.subject.endswith('name') and ('dog' in triplet.subject or 'cat' in triplet.subject):\n obj_span = doc.char_span(sentence.find(triplet.object), len(sentence))\n\n # handle single names, but what about compound names? Noun chunks might help.\n if (len(obj_span) == 1 or len(obj_span) == 2) and obj_span[-1].pos_ == 'PROPN':\n name = triplet.object\n subj_start = sentence.find(triplet.subject)\n subj_doc = doc.char_span(subj_start, subj_start + len(triplet.subject))\n\n s_people = [token.text for token in subj_doc if token.ent_type_ == 'PERSON']\n assert len(s_people) == 1\n s_person = select_person(s_people[0])\n\n pet = get_persons_pet(s_person.name)\n\n pet.name = name\n s_person.has.append(pet)\n\n # Process (Who has dog)\n if root.lemma_ == 'have'and ('dog' in triplet.object or 'cat' in triplet.object):\n # find pets name and instantiate name empty str\n obj_span = doc.char_span(sentence.find(triplet.object), len(sentence))\n name = ''\n\n if obj_span[-1].pos_ == 'PROPN':\n name = obj_span[-1].text\n s = add_person(triplet.subject)\n s_pet_type = 'dog' if 'dog' in triplet.object else 'cat'\n pet = add_pet(s_pet_type, name)\n s.has.append(pet)\n\n date = [e.text for e in doc.ents if e.label_ == 'DATE']\n gpe = [e.text for e in doc.ents if e.label_ == 'GPE']\n person = [e.text for e in doc.ents if e.label_ == 'PERSON' or e.label_ == 'ORG']\n # if person and GPE exists, we add it into trip(departs_on, departs_to)\n if person and (gpe or date):\n s = add_person(triplet.subject)\n o = add_trip(date, gpe)\n s.travels.append(o)",
"def loadDataFile(self, filename):\n self.datafile = vocloadlib.readTabFile(filename,\n [ 'term', 'accID', 'status', 'abbreviation',\n 'note', 'comment', 'synonyms', 'synonymTypes',\n 'otherIDs', 'start', 'end', 'parent' \n ])",
"def noun_phrase_chunking(part_of_speech_data):\n\n grammar = r\"\"\"\n NP: {<DT|JJ|NN.*>+}\n PP: {<IN><NP>}\n VP: {<VB.*><NP|PP|CLAUSE>+$}\n CLAUSE: {<NP><VP>}\n \"\"\"\n\n grammar2 = r\"\"\"\n NP: {<DT|NN>+} # Chunk sequences of NN and DT\n {<DT><JJ><NN>} # Chunk det+adj+noun\n \"\"\"\n\n return RegexpParser(grammar).parse(part_of_speech_data).draw()",
"def parse(self, word):\n word = self.son.segs(word)\n son_map = self._sonority_map(word)\n son_map = self._mark_offglides(son_map)\n son_map = self._adjust_anom_fric_cod(son_map)\n son_map = self._adjust_anom_fric_ons(son_map)\n ons_son = self._initial_onset(son_map)\n cod_son = self._final_coda(son_map)\n ons = self.from_map(ons_son, word)\n cod = self.from_reverse_map(cod_son, word)\n return (ons, cod)",
"def get_nouns(self):\n\t\tblob = TextBlob(self.raw_string)\n\n\t\tfor word, tag in blob.tags:\n\t\t\tif tag in ['NNP', 'NN']:\n\t\t\t\tself.nouns.append(word.lemmatize())",
"def parse_data(filename):\r\n labels = []\r\n documents = []\r\n with open(filename, 'r') as f:\r\n for line in f:\r\n values = line.split()\r\n label = values[0]\r\n document = []\r\n for wordCount in values[1:]:\r\n parsed = wordCount.split(':')\r\n word = parsed[0]\r\n count = int(parsed[1])\r\n document.append((word, count))\r\n labels.append(label)\r\n documents.append(document)\r\n return (labels, documents)",
"def parse(self, infile):\r\n raise NotImplementedError()",
"def loadDataFile(self, filename):\n \n self.datafile = vocloadlib.readTabFile(filename,\n [ 'term', 'accID', 'status', 'abbreviation',\n 'note', 'comment', 'synonyms', 'synonymTypes',\n 'otherIDs', 'emapa', 'ts', 'parent']\n )",
"def read_tagged_word_list(filename):\n # TODO: write and test this method\n print 'reading tagged file'",
"def parse_label(self):\n # TODO: make this work with attached labels as well as\n # stand alone labels.\n # Save the RAW full text of the label to self._raw\n input_stream = FileStream(self.infile)\n lexer = ODLv21Lexer(input_stream)\n tokens = CommonTokenStream(lexer)\n\n parser = ODLv21Parser(tokens)\n parse_tree = parser.label()\n self._parse_tree = parse_tree\n visitor = Pds3LabelVisitor()\n visitor.visit(parse_tree)\n return visitor.root_dict",
"def parse(las_file):\n io_stream = io.TextIOWrapper(las_file)\n \n entry_date = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')\n entry_filename = 'las_file-' + entry_date + '.las'\n\n entry = SectionInfo()\n entry.filename = entry_filename\n section = ''\n\n for line in io_stream.readlines():\n\n line = line.rstrip()\n\n if not line:\n continue\n\n # Lines beginning with '~' denote the next section header.\n if line[0] == '~':\n section = line\n continue\n # Skip comment lines.\n elif line[0] == '#':\n continue\n\n # LAS standard option 'OTHER' section\n if section[1] == 'O': \n entry.value = line\n entry.section = section\n # The rest of the standard metadata sections\n elif section[1] in ['V', 'W', 'C', 'P']:\n entry = parse_formatted_section_line(section, line, entry)\n # the data section and non-standard sections\n else:\n # print(\"Non-Metadata-Section: [{}]: [{}]\".format(section[0:2], line))\n continue\n\n # Write entry to db\n entry.save()\n\n # Initialize next entry\n entry = SectionInfo()\n entry.filename = entry_filename\n\n return entry_filename",
"def read_data(filename, prefix=None):\n p_data = {}\n with open(filename) as f:\n # This first line is the header for the entire file.\n line = f.next()\n line = line.strip()\n # prev_line = line\n top_header = line.split(',')\n if not top_header:\n # Don't parse this for now.\n pass\n # Now read in per-participant data.\n while True:\n word_list = []\n all_words_data = {}\n # The first line for the participant is a header.\n try:\n line = f.next()\n except StopIteration:\n # We had previously read everything, so we're done.\n break\n line = line.strip()\n p_header = line.split(',')\n\n # The participant's ID # comes first.\n p_id = p_header[0]\n if not p_id:\n # This happens when the previous participant didn't answer.\n \"\"\"\n print 'previous line:', prev_line\n print 'current line:', line\n print 'p header:', p_header\n print\n \"\"\"\n continue\n if prefix:\n p_id = prefix + p_id\n # print 'SN #', p_id\n # The number of N/A's this p is at 28.\n try:\n p_nas = int(p_header[28])\n except ValueError:\n # This happens when an RA messes up the file.\n \"\"\"\n print 'nas: previous line:', prev_line\n print 'nas: current line:', line\n print 'nas: p header:', p_header\n print\n \"\"\"\n raise\n # print \"NA's: #\", p_nas\n # Check if this participant left everything blank.\n # XXX: Have to hard-code this.\n if p_nas == 20:\n \"\"\"Don't record anything.\n p_data[p_id] = {'words': None,\n 'word_data': None,\n 'nas': None,\n 'overall': None}\n \"\"\"\n continue\n # The next line after the header has both the data\n # for the first word and overall statistics.\n # prev_line = line\n try:\n line = f.next()\n except StopIteration:\n # We had previously read everything, so we're done.\n break\n line = line.strip()\n word, word_data, overall_data = parse_first_line(line.split(','))\n word_list.append(word)\n all_words_data[word] = word_data\n # Now read data for the rest of the words.\n for line in f:\n line = line.strip()\n word, word_data = parse_data_lines(line.split(','))\n if word == '':\n \"\"\"\n print \"loop's previous line:\", prev_line\n print \"loop's current line:\", line\n print\n \"\"\"\n # prev_line = line\n break\n word_list.append(word)\n all_words_data[word] = word_data\n # prev_line = line\n # Compute per-word averages\n all_total_avg, future_total_avg, past_total_avg = \\\n datacomputer.compute_all_future_past(all_words_data)\n overall_data['all'] = all_total_avg\n overall_data['future'] = future_total_avg\n overall_data['past'] = past_total_avg\n p_data[p_id] = {'words': word_list,\n 'word_data': all_words_data,\n 'nas': p_nas,\n 'overall': overall_data}\n # print 'p_data'\n # print p_data[p_id]\n # print\n print \"Processed {} participants' data\".format(len(p_data))\n return p_data",
"def _parse(self, infile):\n raise NotImplementedError()",
"def parse_voc(filename):\n tree = ET.parse(filename)\n objects = []\n for obj in tree.findall('object'):\n obj_struct = {}\n obj_struct['name'] = obj.find('name').text\n obj_struct['difficult'] = int(obj.find('difficult').text)\n bbox = obj.find('bndbox')\n obj_struct['bbox'] = [int(bbox.find('xmin').text),\n int(bbox.find('ymin').text),\n int(bbox.find('xmax').text),\n int(bbox.find('ymax').text)]\n objects.append(obj_struct)\n\n return objects",
"def get_noun_phrases(blob):\n return blob.noun_phrases",
"def import_data(in_file):\n\n print '\\n\\tImport data'\n sentence = []\n concept = []\n sentences = []\n concepts = []\n for line in open(in_file, 'r'):\n if line != '\\n':\n sentence += [ line.split()[0] ]\n concept += [ line.split()[1] ]\n else:\n sentences += [ sentence ]\n concepts += [ concept ]\n sentence = [ ]\n concept = [ ]\n pos = []\n lemma = []\n poss = []\n lemmas = []\n for line in open(in_file.replace('.data', '.feats.txt'), 'r'):\n if line != '\\n':\n pos += [ line.split()[ 1 ] ]\n lemma += [ line.split()[ 2 ] ]\n else:\n poss += [ pos ]\n lemmas += [ lemma ]\n pos = [ ]\n lemma = [ ]\n print '\\t--done'\n return sentences, poss, lemmas, concepts",
"def read_pronunciation(pronunciation_file):\n # file = open('dictionary.txt', 'r')\n #\n # for line in file:\n # print line\n\n ################# https://m.reddit.com/r/CompSciPortfolio/comments/303fyo/assignment_3_poetry_reader/\n\n pronunciation_dictionary = {}\n line = pronunciation_file.readline()\n while line.startswith(';;;'):\n line = pronunciation_file.readline()\n while line != '':\n stripped_line = line.strip()\n separation = stripped_line.find(' ')\n pronunciation_dictionary[stripped_line[:separation]] = stripped_line[(separation + 2):].split()\n line = pronunciation_file.readline()\n return pronunciation_dictionary\n\n\n\n # my_list = {}\n # for line in pronunciation_file.readlines():\n # line = line.strip()\n # if line and \";;;\" not in line:\n # r = line.split()\n # word = r[0]\n # phonemes = r[1:]\n # my_list[word] = phonemes\n # return my_list",
"def main ():\n\n\tfio = fileIo('input.txt')\n text = fio.getInput()\n\n\tp = re.compile(r'#?\\d[\\s\\.]?[\\s]?')\n\tout = filter(None, p.split(text))\n\ti = 0\n\tlistOfLists = []\n\t\n\n\tfor s in out:\n\t\ti += 1\n\t\ttext = nltk.word_tokenize(s)\n\t\tpos = nltk.pos_tag(text)\n\t\tpattern = \"NP: {<DT>?<JJ>*<NN>}\"\n\t\tNPChunker = nltk.RegexpParser(pattern)\n\t\tresult = NPChunker.parse(pos)\n\t\tlistOfLists.append( result )\n\n\tprint \"Noun Count:\\n\" + str(countNouns( listOfLists ))\n\tprint \"Verb Count:\\n\" + str(countVerbs( listOfLists ))\n\tprint \"Adjective Count:\\n\" + str(countAdjectives( listOfLists ))",
"def load_file(filename):\n\tlabels = []\n\tdocs = []\n\n\twith open(filename) as f:\n\t\tfor line in f:\n\t\t\tcontent = line.split('\\t')\n\n\t\t\tif len(content) > 2:\n\t\t\t\tprint('incorrect read')\n\t\t\t\texit()\n\n\t\t\tif len(content[1]) == 0: continue\n\n\t\t\tdocs.append(str(content[1]).strip('\\r').strip('\\n').strip('\\r\\n'))\n\t\t\tlabels.append(content[0])\n\n\treturn docs, labels",
"def parseFile(self, filename):\n self.__filename = filename\n\n if os.path.isfile(filename) == False:\n self.LogError(\"Unable to open input file \" + str(filename))\n raise IOError\n\n self.__file = open(filename, 'r')\n\n while True:\n string = self.__file.readline()\n if string == \"\":\n break\n\n if string.upper().find(\"[SYSTEM]\") != -1:\n #print string.upper()\n self.__parseSystem()\n\n if string.upper().find(\"[GRASS]\") != -1:\n #print string.upper()\n self.__parseGrass()\n\n if string.upper().find(\"[COMPLEXDATA]\") != -1:\n #print string.upper()\n self.complexDataList.append(ComplexData(self.__file))\n\n if string.upper().find(\"[COMPLEXOUTPUT]\") != -1:\n #print string.upper()\n self.complexOutputList.append(ComplexOutput(self.__file))\n\n if string.upper().find(\"[LITERALDATA]\") != -1:\n #print string.upper()\n LD = LiteralData(self.__file)\n if LD.identifier == 'multi_output':\n self.LogWarning(\"multi_output: \" + LD.value.upper())\n if LD.value.upper() == 'TRUE':\n self.multiOutput = True\n else:\n self.literalDataList.append(LD)",
"def parseFileName(filename):\n entry = DataEntry(\"\",0,{},{},0,0)\n wordArray = filename.split(\".\")\n entry.publication_name = wordArray[1]\n entry.year = wordArray[0]\n return entry",
"def load(path: str) -> \"DataDescriptor\":\n\n\t\twith open(path, \"r\") as f:\n\t\t\tinfo_dict = json.load(f)\n\n\t\treturn DataDescriptor(\n\t\t\tn_gram_size=int(info_dict[\"n_gram_size\"]),\n\t\t\tcaseless=bool(info_dict[\"caseless\"]),\n\t\t\tignore_punctuation=bool(info_dict[\"ignore_punctuation\"]),\n\t\t\tadd_pos_tags=bool(info_dict[\"add_pos_tags\"]),\n\t\t\tuses_lemma=bool(info_dict[\"uses_lemma\"]),\n\t\t\tuses_sentences=bool(info_dict[\"uses_sentences\"])\n\t\t)",
"def parse_object(self, word_list):\n\n self.skip(word_list, 'stop')\n\n next_word = self.peek(word_list)\n\n if next_word == 'noun':\n return self.match(word_list, 'noun')\n\n elif next_word == 'direction':\n return self.match(word_list, 'direction')\n\n else:\n raise ParserError(\n 'Expected a noun or direction. Got a %s.' % next_word\n )",
"def parse_data(fp):\n pass",
"def _parse(self):\n with open(_join(self.man_dir, self.man_fn)) as fp:\n lines = fp.readlines()\n \n desc_indxs = []\n for i, L in enumerate(lines):\n if \"#landuse\" in L or \" # landuse\" in L:\n desc_indxs.append(i-1)\n desc_indxs.append(i-2)\n desc_indxs.append(i-3)\n \n lines = [L[:L.find('#')].strip() for L in lines]\n lines = [L for i, L in enumerate(lines) if len(L) > 0 or i in desc_indxs]\n\n del desc_indxs\n \n self.datver = lines.pop(0)\n self.nofe = int(lines.pop(0))\n self.sim_years = int(lines.pop(0))\n \n # Read Plant Growth Section\n self.plants = PlantLoops(lines, self)\n\n # Read Operation Section\n self.ops = OpLoops(lines, self)\n \n # Read Initial Condition Section\n self.inis = IniLoops(lines, self)\n \n # Read Surface Effects Section\n self.surfs = SurfLoops(lines, self)\n \n # Read Contour Section\n self.contours = ContourLoops(lines, self)\n \n # Read Drainage Section\n self.drains = DrainLoops(lines, self)\n \n # Read Yearly Section\n self.years = YearLoops(lines, self)\n \n # Read Management Section \n self.man = ManagementLoop(lines, self)",
"def parse_file(self, infile, chardict, labeldict):\n examples = []\n fin = io.open(infile, 'r')\n # idx is for the index of the row in the \n # original file before shuffling and randomization\n idx = 0\n for line in fin: \n entity, label = map(clean, line.rstrip().split('\\t')[:2])\n # print entity\n ent = map(lambda c:chardict[c], list(entity))\n lab = map(lambda l:labeldict[l] if l in labeldict else 0, label.split(','))\n examples.append((idx, ent, lab))\n idx += 1\n fin.close()\n print \"num_rows:\", len(examples), \" index\", idx\n return examples",
"def __init__(self, variable, pnoun, nucleus):\n super(ProperNounExpression, self).__init__(variable, EmptyExpression(), nucleus)\n assert(pnoun in proper_nouns)\n self.pnoun = pnoun",
"def read_ann_file(fileid, ann_dir):\n ann_file = \"%s/%s.ann\"%(ann_dir,fileid)\n with codecs.open(ann_file, 'r', 'utf-8') as f:\n data = f.read()\n rows = data.split('\\n')\n entities = {}\n ent_count = 0\n relations = {}\n #annotations = []\n for row in rows:\n cols = row.split(\"\\t\")\n ann_id = cols[0]\n if(u\"#\" in cols[0]):\n tmp = cols[1].split()[1:],\" \",cols[2]\n annotations.append(tmp)\n elif(len(cols)==3 and u\"T\" in cols[0]):\n # is an entity\n ent_count += 1\n ent_type = cols[1].split()[0]\n ranges = cols[1].replace(\"%s\"%ent_type,\"\")\n if \";\" in ranges:\n ranges = [{\"start\":int(r.split()[0]),\"end\":int(r.split()[1])} for r in ranges.split(';')]\n else:\n ranges = [{\"start\":int(ranges.split()[0]),\"end\":int(ranges.split()[1])}]\n entities[cols[0]] = {\"ann_id\":ann_id\n ,\"entity_type\": ent_type\n ,\"positions\": ranges\n ,\"surface\":cols[2]\n ,\"continuation\":False}\n elif(len(cols)>=2 and u\"R\" in cols[0]):\n rel_type, arg1, arg2 = cols[1].split()\n relations[cols[0]] = {\"ann_id\":ann_id\n ,\"arguments\":(arg1.split(\":\")[1], arg2.split(\":\")[1])\n ,\"relation_type\":rel_type}\n else:\n if(len(cols)>1):\n if(cols[1].split()[0]==\"Continuation\"):\n continued_entity_id = cols[1].split()[1]\n #print cols[1].split()[0],continued_entity_id\n entities[continued_entity_id][\"continuation\"] = True\n return entities, relations",
"def data_parser(data):\n\n with open(data, 'r') as inp:\n\n # take every sample\n # the last line in the text file is empty, so reading until -1\n samples = inp.read().split('\\n')[:-1]\n\n vec = []\n labels = []\n for sample in samples:\n # file is tab delimited\n split_samples = sample.split('\\t')\n # last column contains the label\n labels.append(int(split_samples[-1]))\n\n features = []\n for feature in split_samples[:-1]:\n features.append(float(feature))\n vec.append(features)\n\n # make the features and labels as a numpy array\n vec = np.array(vec)\n labels = np.array(labels)\n return vec, labels"
] | [
"0.5926569",
"0.56615496",
"0.5575726",
"0.5492108",
"0.54724497",
"0.54638004",
"0.5449956",
"0.5432941",
"0.5421955",
"0.53711075",
"0.53692234",
"0.535761",
"0.53158194",
"0.5268704",
"0.52373093",
"0.5237189",
"0.5234773",
"0.5220252",
"0.52158135",
"0.5205877",
"0.52025634",
"0.5196424",
"0.51920056",
"0.51561964",
"0.51454395",
"0.5133324",
"0.5114785",
"0.511348",
"0.5100287",
"0.5081667"
] | 0.69166636 | 0 |
Returns the self.guessed_by and self.metaphors_used data as a readable string. | def get_str_metadata(self):
return "\n".join(["Guessed by {}".format(self.guessed_by), "{} metaphors used".format(self.metaphors_used)]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_strings(self):\n return self._guessed_strings",
"def __str__(self):\n d = {}\n d[\"tuner_number\"] = self.tuner_number\n d[\"output_format\"] = self.output_format\n d[\"output_source\"] = self.output_source\n return str(d)",
"def get_human_readable(self):\n\n def yesno(key):\n if getattr(self, key) and getattr(self, key) > 0:\n return \"Y\"\n else:\n return \"N\"\n\n keys = (\n \"pvs1\",\n \"ps1\",\n \"ps2\",\n \"ps3\",\n \"ps4\",\n \"pm1\",\n \"pm2\",\n \"pm3\",\n \"pm4\",\n \"pm5\",\n \"pm6\",\n \"pp1\",\n \"pp2\",\n \"pp3\",\n \"pp4\",\n \"pp5\",\n \"ba1\",\n \"bs1\",\n \"bs2\",\n \"bs3\",\n \"bs4\",\n \"bp1\",\n \"bp2\",\n \"bp3\",\n \"bp4\",\n \"bp5\",\n \"bp6\",\n \"bp7\",\n )\n result = \", \".join([\"%s: %s\" % (key.upper(), yesno(key)) for key in keys])\n result += \", ACMG classification: %s\" % self.class_auto\n if self.class_override:\n result += \", ACMG class. override: %s\" % self.class_override\n return result",
"def to_formatted(self) -> str:\n return str(self.google_confidence) + \\\n \"\\t\" + str(self.normalized_sentence_score) + \\\n \"\\t\" + str(self.gaps_transcript) + \\\n \"\\t\" + str(self.gaps_google)",
"def __str__(self):\n result = \", \".join(map(str, self.hand))\n result += \"\\n \" + str(self.get_score()) + \" points\"\n return result",
"def details(self) -> str:\n return f\"- **language**: [{self.language}]\\n\" \\\n f\"- **opengame**: [{self.opengame}]\\n\" \\\n f\"- **system**: [{self.system}]\\n\" \\\n f\"- **mode**: [{self.mode}]\\n\" \\\n f\"- **attributes**: [{self.attributes}]\\n \" \\\n f\"- **score_threshold**: [{self.score_threshold}]\\n \" \\\n f\"- **monsters**: [{self.monsters}]\\n\"",
"def __repr__(self):\n return \"{} hp:{:.1f} stunned: {} potions: {}\".format(self.name, self.hitpoints,\n self.stunned, self.potions)",
"def __str__(self):\r\n to_string = \"ID: \" + str(self.dat_id) + \" --- CLASSIFICATION: \" + str(self.dat_party) + \" --- VOTED: \" + str(self.dat_votes)\r\n return to_string",
"def __str__(self):\n s = 'hit '+str(self.hit)+'\\n'\n s+= 'states '+str(self.states)+'\\n'\n s+= 'chi2 '+str(self.chi2)\n return s",
"def __str__(self):\n header = [\n ' GnoweeHeuristics:']\n header += [('Population = {}').format(self.population)]\n header += [('Sampling Method = {}').format(self.initSampling)]\n header += [('Discovery Fraction = {}').format(self.fracMutation)]\n header += [('Elitism Fraction = {}').format(self.fracElite)]\n header += [('Levy Fraction = {}').format(self.fracLevy)]\n header += [('Levy Alpha = {}').format(self.alpha)]\n header += [('Levy Gamma = {}').format(self.gamma)]\n header += [('Levy Independent Samples = {}').format(self.n)]\n header += [('Levy Scaling Parameter = {}').format(self.scalingFactor)]\n header += [('Constraint Violaition Penalty = {}').format(self.penalty)]\n header += [('Max # of Generations = {}').format(self.maxGens)]\n header += [('Max # of Function Evaluations = {}').format(self.maxFevals)]\n header += [('Convergence Tolerance = {}').format(self.convTol)]\n header += [('Stall Limit = {}').format(self.stallLimit)]\n header += [('Optimal Convergence Tolerance = {}').format(self.optConvTol)]\n header += [' Attributes Inhereted from ProblemParameters:']\n header += [('{}').format(ProblemParameters.__str__(self))]\n return ('\\n').join(header) + '\\n'",
"def __str__(self):\n out = \"!!!!!!! REPORTED STATISTICS !!!!!!!\\n\"\n for k in self.order:\n if k in self.keys():\n if k in self.explainer.keys():\n out += self.explainer[k].replace('XXXX', str(\n self[k])) + \"\\n\"\n else:\n out += self[k] + \"\\n\"\n for k in self.keys():\n if k not in self.order:\n out += str(self[k])\n return out",
"def __str__(self):\n return (str(self.chromosome_id) + '. Chromosome: Genes: ' + str(\n self.genes) + '; Fitness: ' + str(self.fitness_value))",
"def human_readable_info(self) -> str:\n next_session = unix_str(self._stat.next_session)\n last_session = unix_str(self._stat.last_session)\n return \"\"\"\n Next Session: {}\n Last Session: {}\n Repetitions: {}\n Health: {}\n ------------------------\n Past Quality (last 20):\n ------------------------\n {}\n \"\"\".format(\n next_session,\n last_session,\n self._stat.actual_repetitions,\n self._health(),\n self._past_quality_graph(),\n )",
"def info(self):\n return (f\"Match id: {self._id}\\n\"\n f\"dire_score: {self.dire_score}\\n\"\n f\"dire_team: {self.dire_team}\\n\"\n f\"duration: {self.duration}\\n\"\n f\"game_mode: {self.game_mode}\\n\"\n f\"patch: {self.patch}\\n\"\n f\"radiant_score: {self.radiant_score}\\n\"\n f\"radiant_team: {self.radiant_team}\\n\"\n f\"radiant_win: {self.radiant_win}\\n\"\n f\"skill: {self.skill}\\n\"\n f\"start_time: {self.start_time}\\n\")",
"def _to_str(self):\n\t\tprint(\"predictors: {}, types: {} \\n method: {}, preprocessing: {}\\\n\t\t\t \\n partition_rate: {}, metric: {}, file name: {}\".format(\n\t\t\t self.predictors, self.predictors_types, self.method_name,\n\t\t\t self.preprocessing_methods, self.data_split, self.metric,\n\t\t\t self.plotting_file_name))",
"def __str__(self):\n return \"{}\".format(self._matches.keys())",
"def __str__(self):\n prob = str(round(self.probability, 5))\n dprob = str(round(self.postdProbability, 5))\n output = \"dprob: \" + dprob + \" \\tprob: \" + prob + \"\\t: \"\n for key in self.attackDict.keys():\n output += key + \" \"\n return output",
"def __str__(self):\n sorted_table = InferenceUtils.get_n_best(self._table, max(len(self._table), 1))\n\n result = []\n for key, value in sorted_table.items():\n result.append('P(%s):=%f\\n' % (str(key), value))\n\n return ''.join(result)[:-1] if len(result) > 0 else ''",
"def getOpponentFormat(self):\r\n return self.opponent + \"\\t\"",
"def str(self):\n out = \"{0}:\".format(self.gtype) if self.gtype else \"\"\n out += \"{0}\".format(repr(self.coords))\n out += \"[{0}]\".format(str(self.goalPtr)) if self.goalPtr else \"\"\n return out",
"def to_string(self):\n\n return '[[%s], [%s]], [%d, %d], [%s], %s, %s, [%s]' % \\\n (', '.join(INT2STRING_CARD[h] for h in self.hand[0]),\n ', '.join(INT2STRING_CARD[h] for h in self.hand[1]),\n self.pot[0], self.pot[1],\n ', '.join(INT2STRING_CARD[p] for p in self.pub),\n INT2STRING_PHASE[self.phase],\n INT2STRING_PLAYER[self.player],\n ', '.join(INT2STRING_STATUS[s] for s in self.status))",
"def __repr__(self):\n return \"{} hp:{:.1f} stunned: {}\".format(self.name, self.hitpoints,\n self.stunned)",
"def __str__(self):\n return \"{} : {}\".format(self._conference, self.win_ratio_avg())",
"def human_readable(self):\n if self.no_flags_set():\n return \"no flags set\"\n else:\n flag_desc = []\n for name in (\"bookmarked\", \"for_validation\", \"candidate\", \"final causative\"):\n if getattr(self, \"flag_%s\" % name.replace(\" \", \"_\")):\n flag_desc.append(name)\n for name in (\"visual\", \"validation\", \"molecular\", \"phenotype_match\", \"summary\"):\n field = getattr(self, \"flag_%s\" % name)\n if field and field != \"empty\":\n flag_desc.append(\"%s rating is %s\" % (name.split(\"_\")[0], field))\n return \", \".join(flag_desc)",
"def get_info(self):\n out = ''\n for k in sorted(self.components.keys()):\n out += '{:s}: {:s}'.format(k, self.info[k]) + '\\n'\n return(out)",
"def _player_info(self):\n return \"%r %s seat:%s m:%r c:%s b:%s \" % (self.name, self.serial, self.seat, self.money, self._chips, self._bet)",
"def to_string(self):\n return \"User: {} Description: {} Ratings: {}\".format(self.id_user, self.description, self.ratings)",
"def __str__(self):\n return ', '.join([self.yftf_data, self.info_hash, str(self.num_pieces), str(self.peers)])",
"def __str__(self):\n return str((self.code, self.fitness,))",
"def _get_problem_report_results_str(self):\n return 'curr_rew: %0.3f, best_rew: %0.3f'%(self.curr_reward, self.curr_best_reward)"
] | [
"0.66056585",
"0.6369337",
"0.63330597",
"0.63225114",
"0.62778735",
"0.62415534",
"0.6170479",
"0.6140569",
"0.60968494",
"0.60799277",
"0.60438675",
"0.60398",
"0.60284495",
"0.6009198",
"0.59779775",
"0.59726894",
"0.597072",
"0.59602815",
"0.594238",
"0.5917474",
"0.5892665",
"0.5871034",
"0.5856106",
"0.5852676",
"0.58474237",
"0.5828681",
"0.5828217",
"0.5819358",
"0.58059597",
"0.5805112"
] | 0.80824745 | 0 |
Quick plot of a `tick.base.TimeFunction` | def plot_timefunction(time_function, labels=None, n_points=300, show=True,
ax=None):
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=(4, 4))
else:
show = False
if time_function.is_constant:
if labels is None:
labels = ['value = %.3g' % time_function.border_value]
t_values = np.arange(10).astype('float')
ax.plot(t_values, time_function.value(t_values), label=labels[0])
else:
if labels is None:
interpolation_to_legend = {
TimeFunction.InterLinear: 'Linear',
TimeFunction.InterConstLeft: 'Constant on left',
TimeFunction.InterConstRight: 'Constant on right'
}
border_to_legend = {
TimeFunction.Border0:
'border zero',
TimeFunction.BorderConstant:
'border constant at %.3g' % time_function.border_value,
TimeFunction.BorderContinue:
'border continue',
TimeFunction.Cyclic:
'cyclic'
}
labels = [
'original points',
'%s and %s' %
(interpolation_to_legend[time_function.inter_mode],
border_to_legend[time_function.border_type])
]
original_t = time_function.original_t
if time_function.border_type == TimeFunction.Cyclic:
cycle_length = original_t[-1]
original_t = np.hstack((original_t, original_t + cycle_length,
original_t + 2 * cycle_length))
t_values = _extended_discrete_xaxis(original_t, n_points=n_points)
ax.plot(time_function.original_t, time_function.original_y, ls='',
marker='o', label=labels[0])
ax.plot(t_values, time_function.value(t_values), label=labels[1])
ax.legend()
if show is True:
plt.show()
return ax.figure | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def plotTime(self):\n plt.figure()\n t = [i for i in range(len(self.nodes_infected))]\n print(t)\n plt.title('Nodos infectados vs Tiempo')\n plt.xlabel('Instantes de tiempo')\n plt.ylabel('# de nodos infectados')\n plt.plot(t, self.nodes_infected)\n plt.grid(True)\n plt.show()",
"def plot_time_series(self, *args, **kwargs):\n return SimulationStaticVisualizer(self, *args, **kwargs)",
"def plot_times(self, train_time, title=None, xmin=None, xmax=None,\n ymin=None, ymax=None, ax=None, show=True, color=None,\n xlabel=True, ylabel=True, legend=True, chance=True,\n label='Classif. score'):\n if not np.array(train_time).dtype is np.dtype('float'):\n raise ValueError('train_time must be float | list or array of '\n 'floats. Got %s.' % type(train_time))\n\n return plot_gat_times(self, train_time=train_time, title=title,\n xmin=xmin, xmax=xmax,\n ymin=ymin, ymax=ymax, ax=ax, show=show,\n color=color, xlabel=xlabel, ylabel=ylabel,\n legend=legend, chance=chance, label=label)",
"def plot(axes, axis, values, c='chartreuse'):\n a = axes[axis]\n a.set_xlabel('time (s)')\n x = np.array(range(len(values))) / 1000\n dim = 'x' if axis == 0 else 'y' if axis == 1 else 'z'\n a.set_title('-'.join([dim, 'acceleration']))\n a.plot(x, values / 1000, c=c)",
"def time_function(t):\n\n omega = np.pi\n return np.sin(omega * t) + np.sin(10 * omega * t) + np.sin(20 * omega * t)",
"def plot():\n pass",
"def visualize(x, y, xlabel=None, ylabel=None, title=None, ylim=None):\n total_seconds = (x[-1] - x[0]).total_seconds()\n if total_seconds <= 86400 * 1 * 3:\n return plot_one_day(x, y, xlabel, ylabel, title, ylim)\n \n elif total_seconds <= 86400 * 7 * 2:\n return plot_one_week(x, y, xlabel, ylabel, title, ylim)\n \n elif total_seconds <= 86400 * 30 * 1.5:\n return plot_one_month(x, y, xlabel, ylabel, title, ylim)\n \n elif total_seconds <= 86400 * 90 * 1.5:\n return plot_one_quarter(x, y, xlabel, ylabel, title, ylim)\n \n elif total_seconds <= 86400 * 365 * 1.5:\n return plot_one_year(x, y, xlabel, ylabel, title, ylim)",
"def matshow_tseries(time_series, fig=None, axis=0, xtick_n=5, time_unit=None,\r\n xlabel=None, ylabel=None):\r\n\r\n if fig is None:\r\n fig = plt.figure()\r\n\r\n if not fig.get_axes():\r\n ax = fig.add_subplot(1, 1, 1)\r\n else:\r\n ax = fig.get_axes()[axis]\r\n\r\n #Make sure that time displays on the x axis with the units you want:\r\n #If you want to change the time-unit on the visualization from that used to\r\n #represent the time-series:\r\n if time_unit is not None:\r\n tu = time_unit\r\n conv_fac = ts.time_unit_conversion[time_unit]\r\n #Otherwise, get the information from your input:\r\n else:\r\n tu = time_series.time_unit\r\n conv_fac = time_series.time._conversion_factor\r\n\r\n this_time = time_series.time / float(conv_fac)\r\n ax.matshow(time_series.data)\r\n\r\n ax.set_xticks(list(range(len(this_time)))[::len(this_time) / xtick_n])\r\n ax.set_xticklabels(this_time[::len(this_time) / xtick_n])\r\n\r\n if xlabel is None:\r\n ax.set_xlabel('Time (%s)' % tu)\r\n else:\r\n ax.set_xlabel(xlabel)\r\n\r\n if ylabel is not None:\r\n ax.set_ylabel(ylabel)\r\n\r\n return fig",
"def plot_time(self, X, x0, t):\n\n Pressure = [Solution(self, (x-x0)/t).pressure for x in X]\n Velocity = [Solution(self, (x-x0)/t).velocity for x in X]\n Density = [Solution(self, (x-x0)/t).rho for x in X]\n\n fig, axs = plt.subplots(3, sharex=True)\n fig.suptitle(\"Solution of the Riemann problem\\nat t = {}s\".format(t))\n axs[0].plot(X, Density)\n axs[1].plot(X, Velocity)\n axs[2].plot(X, Pressure)\n\n axs[0].grid()\n axs[0].set(ylabel = \"Density\")\n axs[1].grid()\n axs[1].set(ylabel = \"Velocity\")\n axs[2].grid()\n axs[2].set(ylabel = \"Pressure\")\n\n plt.xlabel(\"Location x\")",
"def plottf(tfarray,tlst,flst,fignum=1,starttime=0,timeinc='hrs',\r\n dt=1.0,title=None,vmm=None,cmap=None,aspect=None,interpolation=None,\r\n cbori=None,cbshrink=None,cbaspect=None,cbpad=None,scale='log',\r\n normalize='n',):\r\n \r\n #time increment\r\n if timeinc=='hrs':\r\n tinc=3600/dt\r\n elif timeinc=='min':\r\n tinc=60/dt\r\n elif timeinc=='sec':\r\n tinc=1/dt\r\n else:\r\n raise ValueError(timeinc+'is not defined')\r\n #colormap\r\n if cmap==None:\r\n cmap='jet'\r\n else:\r\n cmap=cmap\r\n #aspect ratio\r\n if aspect==None:\r\n aspect='auto'\r\n else:\r\n aspect=aspect\r\n #interpolation\r\n if interpolation==None:\r\n interpolation='gaussian'\r\n else:\r\n interpolation=interpolation\r\n #colorbar orientation\r\n if cbori==None:\r\n cbori='vertical'\r\n else:\r\n cbori=cbori\r\n #colorbar shinkage\r\n if cbshrink==None:\r\n cbshrink=.99\r\n else:\r\n cbshrink=cbshrink\r\n #colorbar aspect\r\n if cbaspect==None:\r\n cbaspect=20\r\n else:\r\n cbaspect=cbaspect\r\n #colorbar pad\r\n if cbpad==None:\r\n cbpad=.1\r\n else:\r\n cbpad=cbpad\r\n #scale\r\n if scale=='log':\r\n zerofind=np.where(abs(tfarray)==0)\r\n tfarray[zerofind]=1.0\r\n if normalize=='y':\r\n plottfarray=10*np.log10(abs(tfarray/np.max(abs(tfarray))))\r\n else:\r\n plottfarray=10*np.log10(abs(tfarray))\r\n elif scale=='linear':\r\n if normalize=='y':\r\n plottfarray=abs(tfarray/np.max(abs(tfarray)))\r\n else:\r\n plottfarray=abs(tfarray)\r\n \r\n plt.rcParams['font.size']=10\r\n plt.rcParams['figure.subplot.left']=.08\r\n plt.rcParams['figure.subplot.right']=.99\r\n plt.rcParams['figure.subplot.bottom']=.07\r\n plt.rcParams['figure.subplot.top']=.96\r\n plt.rcParams['figure.subplot.wspace']=.25\r\n plt.rcParams['figure.subplot.hspace']=.20\r\n \r\n \r\n plt.figure(fignum)\r\n if vmm!=None:\r\n vmin=vmm[0]\r\n vmax=vmm[1]\r\n plt.imshow(plottfarray,extent=(tlst[0]/tinc+starttime,\r\n tlst[-1]/tinc+starttime,flst[0],flst[-1]),aspect=aspect,\r\n vmin=vmin,vmax=vmax,cmap=cmap,interpolation=interpolation)\r\n else:\r\n plt.imshow(plottfarray,extent=(tlst[0]/tinc+starttime,\r\n tlst[-1]/tinc+starttime,flst[0],flst[-1]),aspect=aspect,\r\n cmap=cmap,interpolation=interpolation)\r\n plt.xlabel('Time('+timeinc+')',fontsize=12,fontweight='bold')\r\n plt.ylabel('Frequency (Hz)',fontsize=12,fontweight='bold')\r\n if title!=None:\r\n plt.title(title,fontsize=14,fontweight='bold')\r\n plt.colorbar(orientation=cbori,shrink=cbshrink,pad=cbpad,aspect=cbaspect)\r\n plt.show()",
"def running_time(func, counter, plot_type, *args, **kwargs):\n plots = []\n counter = 0\n #include counter\n output = (func(counter, *args, **kwargs)) #returns [value, counter]\n value, counter = output\n if plot_type.upper() == \"STANDARD\":\n plots.append(output)\n else:\n plots.append((log(float(value)), log(float(counter))))\n #print plots\n return plt.plot(plots)",
"def plot_time_frames(self):\n\n fig = plt.figure()\n plt.grid(True)\n\n plt.ylim([-1.5,1.5])\n plt.xlim([0,1])\n\n for key in self.timeframes.keys():\n if key == 0:\n plt.plot(self.x, self.timeframes[key], label=\"time: \" + str(round(key*self.dt, 3)), linewidth=5)\n else:\n plt.plot(self.x, self.timeframes[key], label=\"time: \" + str(round(key*self.dt, 3)))\n\n plt.title(\"Wave at different times\")\n plt.legend(loc=\"upper right\")\n plt.show()\n\n # fig.savefig('results/pics_wave/vibrating_string_'+ self.type + '.png', dpi=150)",
"def plotAll(fx,tfarray,tlst,flst,fignum=1,starttime=0,timeinc='hrs',\r\n dt=1.0,title=None,vmm=None,cmap=None,aspect=None,interpolation=None,\r\n cbori=None,cbshrink=None,cbaspect=None,cbpad=None,normalize='n',\r\n scale='log'):\r\n \r\n #time increment\r\n if timeinc=='hrs':\r\n tinc=3600/dt\r\n elif timeinc=='min':\r\n tinc=60/dt\r\n elif timeinc=='sec':\r\n tinc=1/dt\r\n else:\r\n raise ValueError(timeinc+'is not defined')\r\n #colormap\r\n if cmap==None:\r\n cmap='jet'\r\n else:\r\n cmap=cmap\r\n #aspect ratio\r\n if aspect==None:\r\n aspect='auto'\r\n else:\r\n aspect=aspect\r\n #interpolation\r\n if interpolation==None:\r\n interpolation='gaussian'\r\n else:\r\n interpolation=interpolation\r\n #colorbar orientation\r\n if cbori==None:\r\n cbori='vertical'\r\n else:\r\n cbori=cbori\r\n #colorbar shinkage\r\n if cbshrink==None:\r\n cbshrink=.99\r\n else:\r\n cbshrink=cbshrink\r\n #colorbar aspect\r\n if cbaspect==None:\r\n cbaspect=20\r\n else:\r\n cbaspect=cbaspect\r\n #colorbar pad\r\n if cbpad==None:\r\n cbpad=.1\r\n else:\r\n cbpad=cbpad\r\n \r\n #scale\r\n if scale=='log':\r\n zerofind=np.where(abs(tfarray)==0)\r\n tfarray[zerofind]=1.0\r\n if normalize=='y':\r\n plottfarray=20*np.log10(abs(tfarray/np.max(abs(tfarray))))\r\n else:\r\n plottfarray=20*np.log10(abs(tfarray))\r\n elif scale=='linear':\r\n if normalize=='y':\r\n plottfarray=abs(plottfarray/np.max(abs(plottfarray)))**2\r\n else:\r\n plottfarray=abs(tfarray)**2\r\n \r\n t=np.arange(len(fx))*dt+starttime*dt\r\n FX=np.fft.fft(padzeros(fx))\r\n FXfreq=np.fft.fftfreq(len(FX),dt)\r\n \r\n #set some plot parameters\r\n plt.rcParams['font.size']=10\r\n plt.rcParams['figure.subplot.left']=.13\r\n plt.rcParams['figure.subplot.right']=.98\r\n plt.rcParams['figure.subplot.bottom']=.07\r\n plt.rcParams['figure.subplot.top']=.96\r\n plt.rcParams['figure.subplot.wspace']=.25\r\n plt.rcParams['figure.subplot.hspace']=.20\r\n #plt.rcParams['font.family']='helvetica'\r\n \r\n fig=plt.figure(fignum)\r\n \r\n #plot FFT of fx\r\n fax=fig.add_axes([.05,.25,.1,.7])\r\n plt.plot(abs(FX[0:len(FX)/2]/max(abs(FX)))**2,FXfreq[0:len(FX)/2],'-k')\r\n plt.xlim(0,1)\r\n plt.ylim(0,FXfreq[len(FX)/2-1])\r\n fax.xaxis.set_major_locator(MultipleLocator(.5))\r\n \r\n #plot TFD\r\n pax=fig.add_axes([.25,.25,.75,.7])\r\n if vmm!=None:\r\n vmin=vmm[0]\r\n vmax=vmm[1]\r\n plt.imshow(plottfarray,extent=(tlst[0]/tinc,tlst[-1]/tinc,\r\n flst[0],flst[-1]),aspect=aspect,vmin=vmin,vmax=vmax,cmap=cmap,\r\n interpolation=interpolation)\r\n else:\r\n plt.imshow(plottfarray,extent=(tlst[0]/tinc,tlst[-1]/tinc,\r\n flst[0],flst[-1]),aspect=aspect,cmap=cmap,\r\n interpolation=interpolation)\r\n plt.xlabel('Time('+timeinc+')',fontsize=12,fontweight='bold')\r\n plt.ylabel('Frequency (Hz)',fontsize=12,fontweight='bold')\r\n if title!=None:\r\n plt.title(title,fontsize=14,fontweight='bold')\r\n plt.colorbar(orientation=cbori,shrink=cbshrink,pad=cbpad,aspect=cbaspect)\r\n \r\n #plot timeseries\r\n tax=fig.add_axes([.25,.05,.60,.1])\r\n plt.plot(t,fx,'-k')\r\n plt.axis('tight')\r\n plt.show()",
"def example3():\n arrive_time=example2() # Get packets arrive time using example1\n time_series.plot_time_series(arrive_time) # Plot time series using packets arrive time",
"def drawAxes(t):\r\n t.speed(0)\r\n t.pd()\r\n t.forward(500)\r\n t.back(500)",
"def plot_dt_signal(x, title=None):\n pylab.figure()\n pylab.stem(range(len(x)), x)\n pylab.title(title)\n pylab.xlabel(\"samples\")",
"def plot_time_slices(self):\n U = self.r.u[:, 15:-15, :]\n T = range(U.shape[2])\n kwarglist = [dict(t=t,\n index=self.index,\n U=U,\n levels=self.levels,\n fname=self.time_slice_path(t))\n for t in T]\n util.parallel_process(plot_time_slice, kwarglist=kwarglist)",
"def plot_time(signal,\n fs=None,\n ax=None,\n scale='linear',\n sides='onesided',\n title=None,\n label=None,\n **kwargs):\n if ax is None:\n ax = plt.gca()\n if fs is None:\n fs = 1\n ax.set_xlabel(\"Samples\")\n else:\n ax.set_xlabel(\"t / s\")\n t = _time_vector_onesided(signal, fs)\n if scale == 'linear':\n ax.set_ylabel('Amplitude (linear)')\n elif scale == 'db':\n signal = _db_calculation(signal)\n ax.set_ylabel('Amplitude / dB')\n else:\n raise NameError(\"Invalid scale\")\n if sides == 'onesided':\n ax.plot(t, signal, label=label, linewidth=2.0)\n elif sides == 'twosided':\n ax.plot(\n _time_vector_twosided(signal,\n fs),\n np.fft.fftshift(signal),\n label=label, linewidth=1.0)\n else:\n raise NameError(\"Invalid sides\")\n if title is not None:\n ax.set_title(title)\n ax.grid(True)\n ax.ticklabel_format(useOffset=False)\n return ax",
"def plot_x(t, x):\n plt.figure()\n plt.plot(t, x)\n plt.title(\"Vertical position of the skydiver as a function of time\")\n plt.xlabel(\"Time t [s]\")\n plt.ylabel(\"Height [m]\")\n plt.savefig('Parachute_position.png')",
"def plot_tseries(time_series, fig=None, axis=0,\r\n xticks=None, xunits=None, yticks=None, yunits=None,\r\n xlabel=None, ylabel=None, yerror=None, error_alpha=0.1,\r\n time_unit=None, **kwargs):\r\n\r\n if fig is None:\r\n fig = plt.figure()\r\n\r\n if not fig.get_axes():\r\n ax = fig.add_subplot(1, 1, 1)\r\n else:\r\n ax = fig.get_axes()[axis]\r\n\r\n #Make sure that time displays on the x axis with the units you want:\r\n #If you want to change the time-unit on the visualization from that used to\r\n #represent the time-series:\r\n if time_unit is not None:\r\n tu = time_unit\r\n conv_fac = ts.time_unit_conversion[time_unit]\r\n #Otherwise, get the information from your input:\r\n else:\r\n tu = time_series.time_unit\r\n conv_fac = time_series.time._conversion_factor\r\n\r\n this_time = time_series.time / float(conv_fac)\r\n ax.plot(this_time, time_series.data.T, **kwargs)\r\n\r\n if xlabel is None:\r\n ax.set_xlabel('Time (%s)' % tu)\r\n else:\r\n ax.set_xlabel(xlabel)\r\n\r\n if ylabel is not None:\r\n ax.set_ylabel(ylabel)\r\n\r\n if yerror is not None:\r\n if len(yerror.data.shape) == 1:\r\n this_e = yerror.data[np.newaxis, :]\r\n else:\r\n this_e = yerror.data\r\n delta = this_e\r\n e_u = time_series.data + delta\r\n e_d = time_series.data - delta\r\n for i in range(e_u.shape[0]):\r\n ax.fill_between(this_time, e_d[i], e_u[i], alpha=error_alpha)\r\n\r\n return fig",
"def coordinate_vs_time_plotter(array, xyz_axis=0, bird=0, axis_of_time_steps=2, start=0., end=1.):\r\n y_values = array[bird, xyz_axis, :]\r\n x_values = get_time_array(array, axis_of_time_steps, start, end)\r\n\r\n fig = plt.figure()\r\n ax = fig.add_subplot()\r\n\r\n if xyz_axis == 0:\r\n ax.set_ylabel('X (m)')\r\n elif xyz_axis == 1:\r\n ax.set_ylabel('Y (m)')\r\n elif xyz_axis == 2:\r\n ax.set_ylabel('Z (m)')\r\n else:\r\n print(\"That is not a valid axis choice. Please choose one of: 0, 1, 2\")\r\n ax.set_xlabel('Time (s)')\r\n ax.scatter(x_values, y_values)\r\n return fig.show()",
"def plot_global(type):\n click.echo(click.style(\n \"Generating Plot....\", fg='cyan', bold='true'))\n plot_time_series.TimeSeriesPloTs.plot_global(type)\n click.echo(click.style(\n \"Done....\", fg='green', bold='true'))",
"def demo(self, tmin=0, tmax=27.4, cadence=30.0 / 60.0 / 24.0, offset=0, raw=False, ax=None):\n t = np.arange(tmin, tmax, cadence)\n if ax is None:\n plt.figure('demo', figsize=(8, 3))\n else:\n plt.sca(ax)\n y = self.model(t)\n if raw:\n plt.plot(t, y + offset, alpha=0.25, linewidth=1, color='royalblue')\n plt.plot(t, self.integrated(t) + offset, alpha=0.5, linewidth=1, color='darkorange')\n plt.xlim(tmin, tmax)\n # plt.ylim(np.max(y)+0.01, np.min(y)-0.01)\n plt.xlabel('Time (days)')\n plt.ylabel('Flux (mag.)')",
"def plot_timefrequency(z, time, f, signal=None, method=\"stft\"):\n\n if method == \"stft\":\n figure_title = \"Short-time Fourier Transform Magnitude\"\n fig, ax = plt.subplots()\n for i in range(len(time)):\n ax.plot(f, z[:, i], label=\"Segment\" + str(np.arange(len(time))[i] + 1))\n ax.legend()\n ax.set_title(\"Signal Spectrogram\")\n ax.set_ylabel(\"STFT Magnitude\")\n ax.set_xlabel(\"Frequency (Hz)\")\n\n elif method == \"cwt\":\n figure_title = \"Continuous Wavelet Transform Magnitude\"\n elif method == \"wvd\":\n figure_title = \"Wigner Ville Distrubution Spectrogram\"\n fig = plt.figure()\n plt.plot(time, signal)\n plt.xlabel(\"Time (sec)\")\n plt.ylabel(\"Signal\")\n\n elif method == \"pwvd\":\n figure_title = \"Pseudo Wigner Ville Distribution Spectrogram\"\n\n fig, ax = plt.subplots()\n spec = ax.pcolormesh(time, f, z, cmap=plt.get_cmap(\"magma\"), shading=\"auto\")\n plt.colorbar(spec)\n ax.set_title(figure_title)\n ax.set_ylabel(\"Frequency (Hz)\")\n ax.set_xlabel(\"Time (sec)\")\n return fig",
"def __plot_T__(self, refresh=False, *args):\n # If plot is not requested, return:\n if not self.plotTeVar.get() or not self.plotTiVar.get():\n return\n\n # Check for a closed window:\n if 'T' in self.plots.keys() and not matplotlib.pyplot.fignum_exists(self.plots['T'].number):\n del self.plots['T']\n refresh = False\n # Update the existing plot, if it exists\n refresh = refresh or 'T' in self.plots.keys()\n if refresh:\n if 'T' in self.plots.keys():\n fig = self.plots['T']\n fig = matplotlib.pyplot.figure(fig.number)\n fig.clear()\n else:\n return\n # Make a Tew window:\n else:\n fig = matplotlib.pyplot.figure(figsize=(4,3))\n fig.canvas.set_window_title('T, time = ' + '{:.3f}'.format(1e9*self.imp.t(self.it)))\n ax = fig.add_subplot(111)\n\n # Plot:\n if self.plotTeVar.get():\n ax.plot(1e4*self.imp.r((self.it), self.ir)[0], self.imp.Te((self.it), self.ir)[0], 'r-', label='e')\n if self.plotTiVar.get():\n ax.plot(1e4*self.imp.r((self.it), self.ir)[0], self.imp.Ti((self.it), self.ir)[0], 'b-', label='i')\n\n ax.set_xlabel('r (um)', fontsize=12)\n ax.set_ylabel('T (keV)', fontsize=12)\n ax.legend()\n\n if self.logxVar.get():\n ax.set_xscale('log')\n if self.logyVar.get():\n ax.set_yscale('log')\n\n matplotlib.pyplot.tight_layout()\n\n if not refresh:\n fig.show()\n fig.canvas.draw()\n if self.wm is not None:\n self.wm.addWindow(matplotlib.pyplot.get_current_fig_manager().window)\n self.plots['T'] = fig",
"def plot_basic(time, data, lgnd=None):\n pylab.figure()\n pylab.plot(time, data)\n pylab.xlabel('time, s')\n pylab.ylabel('data')\n pylab.title('Basic Plotter')\n if lgnd != None:\n pylab.legend(lgnd)\n pylab.grid(True)\n pylab.show()",
"def comp_time_plot(p1=database['K+'], p2=database['pi+'], pmax=80, plot=True):\r\n dt = []\r\n p_range = np.linspace(10, pmax, 1000)\r\n m1 = p1.mass\r\n m2 = p2.mass\r\n for p in p_range:\r\n t1_per_m = 76.273/(beta(p, m1)*gamma(p, m1)*c)\r\n t2_per_m = 76.273/(beta(p, m2)*gamma(p, m2)*c)\r\n dt.append(abs(t1_per_m - t2_per_m)*1e12)\r\n dt_12_5 = dt[np.argmin(abs(p_range-12.5))]\r\n dt_75 = dt[np.argmin(abs(p_range-75))]\r\n ratio = dt_12_5/dt_75\r\n if plot==True:\r\n fig = plt.figure(figsize=[10, 5])\r\n ax = fig.add_subplot(1, 1, 1)\r\n ax.plot(p_range, dt, 'b', label=r'$\\Delta t$')\r\n ax.axvline(12.5, color='r', label='p=12.5 GeV')\r\n ax.axvline(75, color='g', label='p=75 GeV')\r\n ax.set_xlim(10, pmax)\r\n ax.set_ylim(0)\r\n ax.set_xlabel('p / GeV', fontsize=20)\r\n# ax.set_yscale('log')\r\n ax.set_ylabel(r'$\\Delta t$ / ps', fontsize=20)\r\n title = f'{p1.name} to {p2.name} '\r\n title += r'$\\Delta t$ dependancy on particle momenta'\r\n ax.set_title(title, fontsize=20)\r\n ax.legend(fontsize=20)\r\n text = 'dt(12.5) = {0:.2f} ps, '.format(dt_12_5)\r\n text += 'dt(75) = {0:.2f} ps, '.format(dt_75)\r\n text += 'ratio = {0:.3f}'.format(ratio)\r\n plt.show()\r\n print(text)\r\n return [dt_12_5, dt_75, ratio]",
"def plot_time(time_to_complete, plot_num):\n average = []\n for i, point in enumerate(time_to_complete):\n average.append(sum(time_to_complete[:i+1])/ (i+1))\n plt.plot(time_to_complete, color= 'blue', label=\"Epoch Time\")\n plt.plot(average, color = 'red', label= \"Average Time\", zorder = 3)\n plt.legend()\n plt.title(\"Time to complete FetchReach\")\n plt.ylabel(\"Time (seconds)\")\n plt.xlabel(\"Number iterations\")\n plt.savefig(\"./plots/time/time_to_complete_{}.png\".format(plot_num))\n plt.clf()",
"def tick(self):",
"def test_plot_timeseries_univariate(tmpdir, random):\n x = np.linspace(0, 10, 20)\n y = np.sin(x)\n segments = get_test_segments(data=np.expand_dims(y, 0))\n\n output_path = Path(tmpdir) / 'temp_visualization_test_univariate.png'\n\n plot_timeseries(x=x,\n y=y,\n segments=segments,\n show_plot=False,\n output_filename=output_path)\n\n assert output_path.exists()"
] | [
"0.6426607",
"0.62802076",
"0.62569886",
"0.62506014",
"0.6236783",
"0.6092056",
"0.6068509",
"0.60091037",
"0.60037977",
"0.60024494",
"0.6001193",
"0.59628236",
"0.59590256",
"0.5951325",
"0.59470856",
"0.59453624",
"0.5932996",
"0.5908867",
"0.5891108",
"0.5865043",
"0.58623916",
"0.58553123",
"0.5847916",
"0.5842294",
"0.5838787",
"0.5835769",
"0.5827261",
"0.5806125",
"0.5788757",
"0.5769874"
] | 0.68075925 | 0 |
Generates mapping from water measurements column names to indices of the given header. | def get_water_index_map(archive, header):
column_re = {
'surface': {
'flow': 'pretok',
'level': 'vodostaj'
},
'ground': {
'altitude': 'nivo',
'level': 'vodostaj'
}
}
column_map = {key: -1 for key in column_re[archive].keys()}
empty = True
# Do regex search of every db column for every CSV file column heading.
for i, column in enumerate(header):
for column_name in column_re[archive].keys():
if re.search(column_re[archive][column_name], column, re.IGNORECASE):
if column_map[column_name] != -1:
continue
column_map[column_name] = i
empty = False
return None if empty else column_map | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __get_column_mapping(self):\n\n s1 = list(Sample(\"FL\", \"M1.0@265_Primary_ar115_s2010-08-06T06_36_00_e2010-08-06T18_24_00.csv\").get_data().columns)[:25]\n column_mapping = {}\n for i in range(len(s1)):\n column_mapping[i] = s1[i]\n\n return column_mapping",
"def column_indexer(data):\n idCol = {label: index for index, label in enumerate(data.columns)}\n return idCol",
"def getColumnIndices(*args, filepath=\"CO2.tab\"):\n # idxDict = {\"PT\": 0, \"TM\": 0, \"HG\": 0, \"SEG\": 0}\n idxDict = {\"PT\": 0, \"TM\": 0, \"HG\": 0, \"VISG\": 0, \"VISHL\": 0, \"ROG\": 0, \"ROHL\": 0}\n if filepath:\n cols = tabLineToList(readFullLine(filepath, 52))\n for key in idxDict:\n idxDict[key] = cols.index(key)\n return idxDict",
"def getIndividual2ColIndex(cls, header, col_name2index, sampleStartingColumn=9):\n\t\tsys.stderr.write(\"Finding all individuals ...\")\n\t\tno_of_cols = len(header)\n\t\tindividual_name2col_index = {}\t#individual's column name -> an opened file handler to store genetic data\n\t\tcounter = 0\n\t\tfor i in xrange(sampleStartingColumn, no_of_cols):\n\t\t\tindividualName = header[i]\n\t\t\tcol_index = col_name2index.get(individualName)\n\t\t\tif not individualName:\t#ignore empty column\n\t\t\t\tcontinue\n\t\t\tif individualName[:-4]=='.bam':\n\t\t\t\tindividualCode = individualName[:-4]\t#get rid of .bam\n\t\t\telse:\n\t\t\t\tindividualCode = individualName\n\t\t\tindividual_name2col_index[individualCode] = col_index\n\t\t\tcounter += 1\n\t\tsys.stderr.write(\"%s individuals added. Done.\\n\"%(counter))\n\t\treturn individual_name2col_index",
"def find_indeces(self, header):\n indeces = {'T': None, 'WV': None, 'WK': None, 'BZ': None, 'SPR': None,\n 'WBER': None, 'ABG.': None, 'UNG.': None, 'SPOE': None,\n 'FPOE': None, 'OEVP': None, 'GRUE': None, 'NEOS': None,\n 'WWW': None, 'ANDAS': None, 'GFW': None, 'SLP': None,\n 'WIFF': None, 'M': None, 'FREIE': None}\n for index, item in enumerate(header):\n indeces[item] = index\n return indeces",
"def _get_columns_mapping_dict():\n\n columns_mapping_dict = {}\n for original_header in COLUMN_HEADERS_MAPPER:\n new_header = COLUMN_HEADERS_MAPPER[original_header]\n columns_mapping_dict[new_header] = [original_header]\n return columns_mapping_dict",
"def get_header_indices(filepath):\n\theaders = get_header_list(filepath, sort=False)\n\treturn {h: i for i, h in enumerate(headers)}",
"def _get_header_index(self, columnname):\n\n return self.headers.index(columnname)",
"def get_mapping():\n \n import pandas as pd\n data = pd.read_csv('/home/yuheng/Downloads/ADE20K_2016_07_26/objectInfo150.txt',sep='\\t',lineterminator='\\n') \n mapping = {}\n for i in range(150):\n line = data.loc[i]\n mapping[ int(line['Idx']) ] = line['Name']\n \n return mapping",
"def create_matrix_mapping(train_mh, unk_vec_id):\n mh_index_map = {}\n matrix_idx = 0\n for vector_idx in train_mh:\n if vector_idx == unk_vec_id:\n unk_matrix_id = matrix_idx\n mh_index_map[vector_idx] = matrix_idx\n matrix_idx += 1\n return mh_index_map, unk_matrix_id",
"def column_info(colum_header):\n commas = colum_header.count(',')\n if commas == 0:\n return (column_name(clean_token(colum_header)))\n\n (key, units, location) = colum_header.split(',')\n key = column_name(clean_token(key))\n units = clean_token(units)\n location = clean_token(location)\n return (key, units, location)",
"def parseHeader(header):\n tokens = [t for t in header.split(' ') if t]\n result = {}\n for i in range(len(tokens)):\n result[tokens[i]] = i \n\n return result",
"def _get_table_columns(self):\n try:\n table_header = parse_table_head(self.table.value, version=self.version)\n merged_data = self.table.value[table_header.tdef_header_end:]\n if table_header.TDEF_header.next_page_ptr:\n merged_data = merged_data + self._merge_table_data(table_header.TDEF_header.next_page_ptr)\n\n parsed_data = parse_table_data(merged_data, table_header.real_index_count,\n table_header.column_count, version=self.version)\n\n # Merge Data back to table_header\n table_header['column'] = parsed_data['column']\n table_header['column_names'] = parsed_data['column_names']\n\n except ConstructError:\n logging.error(f\"Failed to parse table header {self.table.value}\")\n return\n col_names = table_header.column_names\n columns = table_header.column\n\n # Add names to columns metadata so we can use only columns for parsing\n for i, c in enumerate(columns):\n c.col_name_str = col_names[i].col_name_str\n\n # column_index is more accurate(id is always incremented so it is wrong when a column is deleted).\n # Some tables like the catalog don't have index, so if indexes are 0 use id.\n\n # create a dict of index to column to make it easier to access. offset is used to make this zero based\n offset = min(x.column_index for x in columns)\n column_dict = {x.column_index - offset: x for x in columns}\n # If column index is not unique try best effort\n if len(column_dict) != len(columns):\n # create a dict of id to column to make it easier to access\n column_dict = {x.column_id: x for x in columns}\n\n if len(column_dict) != table_header.column_count:\n logging.debug(f\"expected {table_header.column_count} columns got {len(column_dict)}\")\n return column_dict, table_header",
"def get_colnumber(self, header):\n for i in range(0, len(self.data)):\n if self.data[i][0] == header:\n return i\n return None",
"def header_population(headers):\n return [{'id': field, 'name': field, 'field': field, 'sortable': True} for field in headers]",
"def get_report_column_names(self):\r\n # Compose the list of report_column names required for\r\n # summary_report.dsw.DictWriter()\r\n sr = self.summary_report\r\n dict_leader = sr.dict_leader\r\n dict_out = sr.dict_out\r\n column_names = self.column_names\r\n report_column_names = []\r\n #if dict_leader is not None and dict_out is not None:\r\n if dict_leader is not None and dict_out is not None:\r\n for key,value in dict_leader.iteritems():\r\n #print \"Adding report_column_name(from dict_leader)=\",key\r\n report_column_names.append(key)\r\n dict_out[key] = value\r\n # We have to initialize the DictWriter with the report_column_names\r\n # below. \r\n # Also need matched coord_val and var names for calling node_report()\r\n # below, so we do this duplication of storage of names. \r\n coord_var_names = []\r\n coord_val_names = []\r\n for idx, column_name in enumerate(column_names):\r\n var_name = \"Var_%s\" % str(idx+1)\r\n report_column_names.append(var_name)\r\n coord_var_names.append(var_name)\r\n val_name = \"Val_%s\" % str(idx+1)\r\n report_column_names.append(val_name)\r\n coord_val_names.append(val_name)\r\n # Add the entry report_column_names\r\n report_column_names += self.EntryClass.report_column_names\r\n return report_column_names",
"def parse_header(f):\n columns = ['pokemon', 'species_id', 'height', 'weight', 'type_1', 'type_2',\n 'url_image', 'generation_id', 'evolves_from_species_id']\n sep = ','\n result = {}\n allData = []\n with open(const.DATA_FILENAME, newline=\"\") as myData:\n for line in myData:\n line = line.strip()\n line = line.split(sep)\n allData.append(line)\n for i in columns:\n j = 0\n while j < len(allData[0]):\n if allData[0][j] == i:\n result[i] = j\n j += 1\n return result",
"def set_column_headers(self, headers):\n if isinstance(self.columns.idx[0], int):\n self.data = [sorted(headers)] + self.data\n\n increment = [i + 1 for i in self.rows.idx]\n self.rows.idx = [0] + increment\n\n elif isinstance(self.columns.idx[0], str):\n datum = {}\n for i, key in enumerate(self.columns.idx):\n datum.update({key: headers[i]})\n self.data = [datum] + self.data\n\n increment = [i + 1 for i in self.rows.idx]\n self.rows.idx = [0] + increment",
"def get_interesting_mapping_fields(mapping_data, mapping_headers):\r\n result = []\r\n num_samples = len(mapping_data)\r\n num_cols = len(mapping_headers)\r\n transposed_data = array(mapping_data).T\r\n for header, datum in zip(mapping_headers, transposed_data):\r\n d = set(datum)\r\n len_d = len(d)\r\n if len_d > 1 and len_d < num_samples:\r\n result.append(header)\r\n return result",
"def header(self) -> list:\n cols = self.data.columns.tolist()\n header = [\"index\"]\n for col_int in cols:\n header.append(col_int)\n return header",
"def map_column_indexes(self, merge_specification, ingredients):\n last_column = len(ingredients) - 1\n accumulating = {}\n remove = set()\n # default behavior, no column merge\n for column_index in range(0, last_column + 1):\n self.column_index_to_columns[column_index] \\\n = [(column_index, 1.0)]\n \n for columns in merge_specification:\n accumulating_column = columns[0][0]\n if accumulating_column > last_column or accumulating_column < 0:\n raise MergeConfigError(\n \"Attempted to merge missing column %d\" % accumulating_column)\n # specifies which columns should be merged into this one\n accumulating[accumulating_column] = columns\n for column_index, _ in columns[1:]:\n column_index = column_index\n if column_index > last_column or column_index < 0:\n raise MergeConfigError(\n \"Attempted to merge missing column %d\" % column_index) \n # drop this column; it will be merged into another\n remove.add(column_index)\n \n # drop columns first so that any columns both specified as\n # accumulating *and* merged columns do not get dropped\n for column_index in remove:\n self.column_index_to_columns[column_index] = None\n \n for column_index, columns in accumulating.items():\n self.column_index_to_columns[column_index] = columns",
"def get_columns_dict(table, replace):\n # 0 is name, 1 is id\n if type(table.index) == pd.MultiIndex:\n colcount = 1 + len(table.index[0])\n else:\n colcount = 2\n cols = {}\n for c in table.columns:\n c_repres = \",\".join(c)\n if \"Filtergroups\" not in c:\n cols[colcount] = replace_in_str(str(c_repres), replace)\n colcount = colcount + 1\n return cols",
"def _transform_map_data(self):\n WARD_FMT = '%s-%s'\n self.map_data_trans = []\n lookup = {i.column: ''.join(filter(lambda x: x.isdigit(), i.value)) for i in self.sht[1]}\n\n #skip over header\n rs = iter(self.sht.rows)\n next(rs)\n next(rs)\n for r in rs:\n pka = r[0].value\n for c in r[1:]:\n if c.value is None:\n c.value = 0\n\n self.map_data_trans.append((WARD_FMT%(pka, lookup[c.column]), c.value))",
"def indices(self):\n if self._indices is None:\n i = []\n\n # TODO: this is not right for multi-column keys\n # TODO: new style indexes\n\n global_name = '^DD(%s,0,\"IX\",\"0\")' % self.fileid\n prefix = '^DD(%s,0,\"IX\",' % self.fileid\n while 1:\n global_name = M.mexec('set s0=$query(%s)' % global_name, M.INOUT(\"\"))[0]\n if not global_name or not global_name.startswith(prefix):\n break\n suffix = global_name[len(prefix):-1]\n parts = suffix.split(\",\")\n idx_name = parts[0][1:-1]\n idx_table = parts[1]\n idx_columns = parts[2:]\n index = Index(idx_name, idx_table, idx_columns)\n i.append(index)\n\n # A second list, gives indices for a field\n columns = {}\n for idx in i:\n for c in idx.columns:\n columns[c] = 1\n\n # Now trawl the listed columns in the data dictionary, and load their\n # cross references.\n cr_names = {}\n for c in columns.keys():\n idx_root = M.Globals[\"^DD\"][self.fileid][c][1]\n if not idx_root[0].exists():\n continue\n for cr_id, val in idx_root.keys_with_decendants():\n if float(cr_id) > 0:\n cr_header = idx_root[cr_id][0].value\n parts = cr_header.split(\"^\")\n if len(parts) == 2 and parts[1]: # if more than 2 parts, assume MUMPs trigger\n f = cr_names.get(parts[1], list())\n f.append(c)\n cr_names[parts[1]] = f\n\n # Now, just delete items from the index list if they are not in cr_names\n self._indices = []\n for index in i:\n cr = cr_names.get(index.name)\n if cr:\n # verify columns - lots of errors in real systems\n if len(cr) == len(index.columns):\n invalid = False\n for c in cr:\n if c not in index.columns:\n invalid = True\n continue\n if not invalid:\n self._indices.append(index)\n\n return self._indices",
"def get_sip_keywords(header):\n cd = np.matrix([[header.get('CD1_1', 0.0), header.get('CD1_2', 0.0)],\n [header.get('CD2_1', 0.0), header.get('CD2_2', 0.0)]], dtype=np.float64)\n a_order = int(header.get('A_ORDER', 0))\n b_order = int(header.get('B_ORDER', 0))\n ac = np.matrix(np.zeros((a_order+1, a_order+1), dtype=np.float64))\n bc = np.matrix(np.zeros((b_order+1, b_order+1), dtype=np.float64))\n for m in range(a_order+1):\n for n in range(0, a_order+1-m):\n ac[m, n] = header.get('A_%d_%d' % (m, n), 0.0)\n for m in range(b_order+1):\n for n in range(0, b_order+1-m):\n bc[m, n] = header.get('B_%d_%d' % (m, n), 0.0)\n return cd, ac, bc",
"def header(self, cols, parent_row):\n out = []\n for col in cols:\n if col == 'gau_id':\n out.append(self.name_for('Geographies', parent_row['geography_id']))\n elif col == 'oth_1_id':\n out.append(self.name_for('OtherIndexes', parent_row['other_index_1_id']))\n elif col == 'oth_2_id':\n out.append(self.name_for('OtherIndexes', parent_row['other_index_2_id']))\n else:\n out.append(col)\n return out",
"def get_tbl_headers(rows):\n tbl_header = rows.pop(0)\n tbl_headers = {}\n for index, header_name in enumerate(tbl_header.find_all('th')):\n if header_name.text in conf.TABLE_HEADER_COLS:\n tbl_headers[header_name.text] = index\n return tbl_headers",
"def getIndex(self,filt):\n indx = [i for i in xrange(len(self._header)) if filt == self._header[i]]\n return indx",
"def vector_indx_to_map_matrix_indx(index,senzory_map):\n xs = dict(zip(np.unique(senzory_map[:,0]), it.count()))\n ys = dict(zip(np.negative(np.unique(senzory_map[:,1])), it.count()))\n x, y = senzory_map[index]\n return ys[y],xs[x]",
"def _create_indices(cls):\r\n from thunderdome.connection import _hosts, _index_all_fields, create_key_index\r\n \r\n if not _hosts: return\r\n for column in cls._columns.values():\r\n if column.index or _index_all_fields:\r\n create_key_index(column.db_field_name)"
] | [
"0.60388774",
"0.5985021",
"0.5983487",
"0.5812272",
"0.573689",
"0.57180965",
"0.5631869",
"0.55975854",
"0.55681896",
"0.5565681",
"0.554784",
"0.5505632",
"0.5441756",
"0.5394642",
"0.53900725",
"0.53899986",
"0.5387639",
"0.537637",
"0.5363915",
"0.53568316",
"0.53414994",
"0.53264475",
"0.53180736",
"0.52968967",
"0.5296696",
"0.52717334",
"0.523432",
"0.52338445",
"0.5221073",
"0.5197063"
] | 0.6957255 | 0 |
Generates mapping from water measurements column names to values of the given CSV row. | def get_water_value_map(row, column_names_map):
column_values_map = column_names_map.copy()
row_length = len(row)
empty = True
for key, index in column_names_map.items():
# Check if non-empty value exist for given index.
if -1 < index < row_length:
value = row[index].strip()
if value:
column_values_map[key] = value
empty = False
continue
# Else NULL is inserted in db.
column_values_map[key] = 'NULL'
return None if empty else column_values_map | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __get_column_mapping(self):\n\n s1 = list(Sample(\"FL\", \"M1.0@265_Primary_ar115_s2010-08-06T06_36_00_e2010-08-06T18_24_00.csv\").get_data().columns)[:25]\n column_mapping = {}\n for i in range(len(s1)):\n column_mapping[i] = s1[i]\n\n return column_mapping",
"def create_deft_table_csv_mappings():\n mappings = list()\n mappings.append(CsvColumnMapping(columnName=\"rownumber\", cslDataType=\"int\", ordinal=0))\n mappings.append(CsvColumnMapping(columnName=\"rowguid\", cslDataType=\"string\", ordinal=1))\n mappings.append(CsvColumnMapping(columnName=\"xdouble\", cslDataType=\"real\", ordinal=2))\n mappings.append(CsvColumnMapping(columnName=\"xfloat\", cslDataType=\"real\", ordinal=3))\n mappings.append(CsvColumnMapping(columnName=\"xbool\", cslDataType=\"bool\", ordinal=4))\n mappings.append(CsvColumnMapping(columnName=\"xint16\", cslDataType=\"int\", ordinal=5))\n mappings.append(CsvColumnMapping(columnName=\"xint32\", cslDataType=\"int\", ordinal=6))\n mappings.append(CsvColumnMapping(columnName=\"xint64\", cslDataType=\"long\", ordinal=7))\n mappings.append(CsvColumnMapping(columnName=\"xuint8\", cslDataType=\"long\", ordinal=8))\n mappings.append(CsvColumnMapping(columnName=\"xuint16\", cslDataType=\"long\", ordinal=9))\n mappings.append(CsvColumnMapping(columnName=\"xuint32\", cslDataType=\"long\", ordinal=10))\n mappings.append(CsvColumnMapping(columnName=\"xuint64\", cslDataType=\"long\", ordinal=11))\n mappings.append(CsvColumnMapping(columnName=\"xdate\", cslDataType=\"datetime\", ordinal=12))\n mappings.append(CsvColumnMapping(columnName=\"xsmalltext\", cslDataType=\"string\", ordinal=13))\n mappings.append(CsvColumnMapping(columnName=\"xtext\", cslDataType=\"string\", ordinal=14))\n mappings.append(CsvColumnMapping(columnName=\"xnumberAsText\", cslDataType=\"string\", ordinal=15))\n mappings.append(CsvColumnMapping(columnName=\"xtime\", cslDataType=\"timespan\", ordinal=16))\n mappings.append(CsvColumnMapping(columnName=\"xtextWithNulls\", cslDataType=\"string\", ordinal=17))\n mappings.append(CsvColumnMapping(columnName=\"xdynamicWithNulls\", cslDataType=\"dynamic\", ordinal=18))\n return mappings",
"def tsvRowToDict(row):\n return {col: getattr(row, col) for col in row._columns_}",
"def _properties_from_csv_row(row, header, ignored_columns):\n props = {}\n for h, prop in enumerate(header):\n # Handle a strange edge case where the length of the row is longer than the length of the header.\n # We do this to prevent an out of range error.\n x = h\n if x > len(row) - 1:\n x = len(row) - 1\n if row[x] == '' or prop in ignored_columns:\n continue\n else:\n try:\n # We use literal_eval() here to de-stringify numbers, lists and objects in the CSV data\n p = literal_eval(row[x])\n props[prop] = p\n except (SyntaxError, ValueError) as e:\n props[prop] = row[x]\n return props",
"def make_dict(row):\n return dict((key[0], value) for key, value in zip(colnames, row))",
"def _transform_map_data(self):\n WARD_FMT = '%s-%s'\n self.map_data_trans = []\n lookup = {i.column: ''.join(filter(lambda x: x.isdigit(), i.value)) for i in self.sht[1]}\n\n #skip over header\n rs = iter(self.sht.rows)\n next(rs)\n next(rs)\n for r in rs:\n pka = r[0].value\n for c in r[1:]:\n if c.value is None:\n c.value = 0\n\n self.map_data_trans.append((WARD_FMT%(pka, lookup[c.column]), c.value))",
"def read_name_map( name_map_path) :\n with open( name_map_path, newline=\"\") as csvfile:\n table = { }\n reader = csv.reader(csvfile)\n for row in reader:\n if len(row) < 2:\n continue\n if row[key_col] == key_header:\n continue\n key = row[key_col]\n val = row[val_col]\n table[key] = val\n return table",
"def process_csv():\n csv_rows = []\n fieldnames = ['site',\n 'latitude',\n 'longitude',\n 'city',\n 'region_code',\n 'country_code',\n 'continent_code',\n 'min_ip_hex',\n 'max_ip_hex',\n 'transit_provider',\n 'min_ip',\n 'max_ip',\n 'ip_prefix',\n 'min_ipv6_hex',\n 'max_ipv6_hex',\n 'min_ipv6',\n 'max_ipv6',\n 'ipv6_prefix']\n\n location_map = build_location_map()\n\n # Read in the CSV file and augment the columns\n with open(INPUT_FILE, 'rb') as csvfile:\n reader = csv.DictReader(csvfile)\n\n for row in reader:\n csv_rows.append(process_row(row, location_map))\n\n # Write the new CSV file with new columns\n with open(OUTPUT_FILE, 'w') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writeheader()\n for row in csv_rows:\n writer.writerow(row)\n\n print(\"MLab Sites CSV generated at {0}\".format(OUTPUT_FILE))",
"def create_waves_dict(csv_file):\n with open(csv_file) as file:\n reader = csv.DictReader(file)\n waves_dict = {row[\"Date\"]: row[\"Wave Height\"] for row in reader}\n return waves_dict",
"def parse_csv_row(self, row):\n\n for key in self.field_map:\n if self.field_map[key] is not None:\n if key == 'marking':\n self.obstacle_data[key] = self.get_marking_value(row[self.field_map[key]].strip())\n elif key == 'lighting':\n self.obstacle_data[key] = self.get_lighting_value(row[self.field_map[key]].strip())\n elif key == 'obst_type':\n self.obstacle_data['obst_type_id'] = self.get_obstacle_type_id(row[self.field_map[key]].strip())\n else:\n self.obstacle_data[key] = row[self.field_map[key]].strip()",
"def get_m_to_me_metabolite_mapping():\n f = pandas.read_csv(fixpath(\"m_to_me_mets.csv\"), index_col=0)[\"me_name\"]\n return f.dropna().to_dict()",
"def map_csv_fields(self):\n etod_csv_fields = {\n 'ctry_id': None,\n 'obst_identifier': None,\n 'obst_name': None,\n 'lon_src': None,\n 'lat_src': None,\n 'agl': None,\n 'amsl': None,\n 'vert_uom': None,\n 'hor_acc': None,\n 'hor_acc_uom': None,\n 'vert_acc': None,\n 'vert_acc_uom': None,\n 'obst_type': None,\n 'lighting': None,\n 'marking': None,\n 'is_group': None,\n }\n\n for field in etod_csv_fields:\n try:\n etod_csv_fields[field] = etod_map[self.ctry_short_name]['fields'][field]\n except KeyError:\n etod_csv_fields[field] = None\n\n self.field_map = etod_csv_fields",
"def map_line(reader, headers):\n\n readings = {}\n\n line = reader.readline()\n\n if len(line) == 0:\n raise EOFError('That\\'s all the data!')\n\n line = line.rstrip()\n\n value_strings = line.split(' ')\n for i, string in enumerate(value_strings):\n if string != 'NaN':\n value = float(string)\n\n if i < len(headers):\n if headers[i]['is_point']:\n value = get_decimal_degrees(value)\n key = headers[i]['name'] + \"-\" + headers[i]['units']\n readings[key] = value\n\n # Provide generic timestamp regardless of type for iterator\n # convenience\n # Keep originals for those interested\n if 'm_present_time-timestamp' in readings:\n readings['timestamp'] = readings['m_present_time-timestamp']\n elif 'sci_m_present_time-timestamp' in readings:\n readings['timestamp'] = readings['sci_m_present_time-timestamp']\n\n return readings",
"def get_mapping():\n \n import pandas as pd\n data = pd.read_csv('/home/yuheng/Downloads/ADE20K_2016_07_26/objectInfo150.txt',sep='\\t',lineterminator='\\n') \n mapping = {}\n for i in range(150):\n line = data.loc[i]\n mapping[ int(line['Idx']) ] = line['Name']\n \n return mapping",
"def conform_input_data(rowdict):\n # rowdict['Value'] = float(rowdict['Value'])\n rowdict['TimeStamp'] = TS_to_date(rowdict['TimeStamp'][:19])\n for floatcolumn in ['LowPx','OpenPx','ClosePx','QuoteCount','HighPx','TradeCount']:\n if floatcolumn in rowdict:\n rowdict[floatcolumn] = float(rowdict[floatcolumn])\n return rowdict",
"def table_row_to_dict(row, make_quantity=True):\n data = {}\n for name, col in row.columns.items():\n val = row[name]\n\n if make_quantity and col.unit:\n val = Quantity(val, unit=col.unit)\n data[name] = val\n return data",
"def map_cols_to_attr(self):\n ## this is from the base class:\n ## for attr, label in zip(self.attr_names, self.labels):\n ## col_ind = self.col_inds[label]\n ## if len(self.data) > 0:\n ## setattr(self, attr, self.data[:,col_ind])\n #\n # hard coding based on what I know about saleae files:\n self.t = self.data[:,0]#.astype(float)\n nr, nc = self.data.shape\n self.num_cols = nc-1\n \n for i in range(0,self.num_cols):\n attr = 'ch_%i' % i\n j = i+1\n setattr(self, attr, self.data[:,j])#.astype(float))",
"def make_row_map(file_path, key_field, field_map=None, transforms=None, \\\n file_encoding=None):\n\n with open(file_path, encoding=file_encoding) as file:\n # preprocess transforms\n if transforms:\n _transforms = {}\n for tf_field, tf in transforms.items():\n _type = type(tf).__name__\n if _type not in ['str', 'function']:\n raise ValueError('Invalid transform')\n _transforms[tf_field] = {\n 'transform': tf,\n 'type': _type\n }\n\n # get fields from csv\n fields_reader = csv.reader(file)\n fields = next(fields_reader)\n\n # make sure we aren't missing any field names\n first_row = next(fields_reader)\n if len(fields) != len(first_row):\n raise ValueError('Header has a different number of columns than data')\n\n # apply field map\n if field_map:\n # TODO use a case insensitive dictionary for field map\n fields = [field_map.get(field.lower()) or field for field in fields]\n key_field = field_map.get(key_field) or key_field\n\n # lowercase\n fields = [field.lower() for field in fields]\n\n # handle spaces\n fields = [field.replace(' ', '_') for field in fields]\n\n # use namedtuple for rows\n fields_joined = ' '.join(fields)\n Row = namedtuple('Row', fields_joined)\n\n # make map\n row_map = {}\n reader = csv.DictReader(file, fieldnames=fields)\n\n for i, row in enumerate(reader):\n key = row[key_field]\n\n # apply transforms\n if transforms:\n for tf_field, tf_map in _transforms.items():\n tf = tf_map['transform']\n tf_type = tf_map['type']\n source_val = row[tf_field]\n if tf_type == 'str':\n val = getattr(source_val, tf)()\n else:\n val = tf(source_val)\n row[tf_field] = val\n\n # row_map[key] = row\n # str_row = {key: str(val) for key, val in row.items()}\n row_map[key] = Row(**row)\n # from pprint import pprint\n # pprint(str_row)\n # row_map[key] = Row(**str_row)\n\n return row_map",
"def list_water_temps(csv_file):\n with open(csv_file) as file:\n reader = csv.DictReader(file)\n temp_list = [temp[\"Water Temp\"] for temp in reader]\n return temp_list",
"def create_dict_from_file(filename, delimeters, first_char, column_names):\n\n # This opens the\n measurement_output = open('measurement_output.txt', \"w\", encoding=\"utf8\")\n # This creates and initializes a list to serve as a dictionary container outside of the for-loop.\n measurements_file_container = {}\n\n # This opens the file and then splits it (preserving the commas because of the landfall count requirement).\n if not filename.endswith('.txt'):\n print('Input File Must Be a .txt File')\n return None\n elif delimeters != '{}=|{}=|{}='.format(column_names[0], column_names[1], column_names[2]):\n print('Please Check Syntax for Delimeters and colunm_names.')\n return None\n else:\n with open(filename, 'r') as infile:\n for line in infile:\n line = line.strip()\n # This checks to see if line begins with a numeric character; if so, it is a header for a new measurement.\n if line[0].isnumeric():\n measurement_current_line = line.split()\n # This initializes a new measurement dictionary with the 3 items in column_names\n key = measurement_current_line[0]\n new_measurement_dictionary = {\n column_names[0]: '0',\n column_names[1]: '0',\n column_names[2]: '0',\n }\n #print(measurement_current_line)\n # this determines if a line starts with 'X', splits it at the X =,Y =,Z = indicators\n # to spit out a list containing only the 3 values and then updates the corresponding\n # value in the dictionary\n if line[0] == first_char:\n measurement_current_line = re.split(delimeters, line.strip(' '))\n if len(measurement_current_line) == 4:\n new_measurement_dictionary[column_names[0]] = float(measurement_current_line[1].strip())\n new_measurement_dictionary[column_names[1]] = float(measurement_current_line[2].strip())\n new_measurement_dictionary[column_names[2]] = float(measurement_current_line[3].strip())\n measurements_file_container[key] = new_measurement_dictionary\n # this stops the processing when the end of data key '$$EOE' is reached.\n elif line == '$$EOE':\n break\n\n\n return(measurements_file_container)",
"def mapper(self, line_no, line):\n cell = csv_readline(line)\n if cell[0] == 'V':\n yield cell[4],1",
"def gen_dict():\n lines = [line for line in csv.reader(open(__ppath__ + \"/data/occupations.csv\"))] # uses a csv.reader to parse the file, converts the generic iterable to a list\n lines = [(line[0],float(line[1])) for line in lines[1:-2]]# removes the column names and \"Total\" row, re-expresses as a list of tuples to enable dictionary conversion\n lines.append((\"Unemployed\",0.2)) # accounts for missing 0.2% of jobs\n return dict(lines) # converts to dictionary",
"def columnar(row_table: list[dict[str, str]]) -> dict[str, list[str]]:\n result: dict[str, list[str]] = {}\n \n first_row: dict[str, str] = row_table[0]\n for column in first_row:\n result[column] = column_values(row_table, column)\n \n return result",
"def write_row(row: dict):\n row = {k: format_float(v) for k, v in row.items()}\n writer.writerow(row)\n csvfile.flush()",
"def loadData(self, aircraftCSV='aircraft.csv'):\n aircraftDict = {}\n \n with open(aircraftCSV, 'r') as f:\n reader = csv.reader(f, delimiter=',')\n for line in reader:\n #if imperial convert to metric\n if line[2] == 'imperial':\n range = float(line[4]) * 8 / 5\n else:\n range = float(line[4])\n aircraftDict[line[0]] = [line[1], line[3], range]\n self.aircraftDict = aircraftDict",
"def process_columns(tup: tuple):\n column_name, data, source_name, data_type, quantiles = tup\n column = Column(column_name, data, source_name, data_type, quantiles)\n print(\"Processing column: \", column.get_long_name())\n column.quantile_histogram = QuantileHistogram(column.get_long_name(), column.ranks, column.size, quantiles)\n with open('cache/' + column.get_long_name() + '.pkl', 'wb') as output:\n pickle.dump(column, output, pickle.HIGHEST_PROTOCOL)",
"def _deduct_types(cls, row: Row) -> ColumnTypes:\n return {\n key: get_value_type(cls.whole_number_to_int(value))\n for key, value in row.items()\n }",
"def map_csv_dimensions(length: str, width: str, height: str):\n return {\n \"length\": length,\n \"width\": width,\n \"height\": height\n }",
"def getHourlyWeatherFromCSV(self,town,scale,key):\n\n\t\t# Variables\n\t\tfile = \"data/weather/\"+town+\"_\"+scale+\".csv\"\n\t\tcsv_data = []\n\t\tweather_data = []\n\t\tweather = {}\n\n\t\t# Reading csv file and storing data in file\n\t\twith open(file) as csvfile:\n\t\t\treader = csv.DictReader(csvfile)\n\t\t\tfor row in reader:\n\t\t\t\tcsv_data.append(row) \n\t\t# Getting data that is needed for visualization\n\n\t\tprint csv_data\n\n\t\tfor data in csv_data:\n\t\t\t# Parsing date\n\t\t\thour = int(data[\"date\"].split(\" \")[4].split(\":\")[0])\n\t\t\tpm_or_am = data[\"date\"].split(\" \")[5]\n\t\t\tday = data[\"date\"].split(\",\")[0]\n\t\t\tif hour == 12 and pm_or_am == \"AM\":\n\t\t\t\tdata[\"date\"] = \"\".join(data[\"date\"].split(\" \")[:-2]) + \" 00:00\"\n\t\t\telif hour < 10 and pm_or_am == \"AM\":\n\t\t\t\tdata[\"date\"] = \"\".join(data[\"date\"].split(\" \")[:-2]) + \" 0\" + str(hour) + \":00\"\n\t\t\telif hour >= 10 and pm_or_am == \"AM\":\n\t\t\t\tdata[\"date\"] = \"\".join(data[\"date\"].split(\" \")[:-2]) + \" \" + str(hour) + \":00\"\n\t\t\tif pm_or_am == \"PM\":\n\t\t\t\tif hour == 12: \n\t\t\t\t\tdata[\"date\"] = \"\".join(data[\"date\"].split(\" \")[:-2]) + \" \" + str(hour) + \":00\"\n\t\t\t\telse:\n\t\t\t\t\thour +=12\n\t\t\t\t\tdata[\"date\"] = \"\".join(data[\"date\"].split(\" \")[:-2]) + \" \" + str(hour) + \":00\"\n\t\t\tweather[\"date\"] = data[\"date\"]\n\n\t\t\t# Appending weather data\n\t\t\tweather[key] = data[key]\n\t\t\tweather_data.append(weather)\n\t\t\tweather = {}\n\t\treturn weather_data",
"def parse_trick_ascii(csv_file):\n data_file = csv.DictReader(open(csv_file))\n single_run_data_dict = {'altitude' : [0.0],\n 'latitude' : [0.0],\n 'longitude' : [0.0]}\n # Your code here\n # ...\n # return the dict\n return single_run_data_dict"
] | [
"0.62421864",
"0.57918304",
"0.5663072",
"0.5657489",
"0.56149256",
"0.5612935",
"0.5567542",
"0.5544542",
"0.5500382",
"0.5480754",
"0.545259",
"0.544672",
"0.5385878",
"0.53811884",
"0.5380167",
"0.5332743",
"0.53060913",
"0.5292364",
"0.527309",
"0.52203315",
"0.5217865",
"0.5204893",
"0.51956916",
"0.51823163",
"0.5171073",
"0.51400554",
"0.5128875",
"0.5127345",
"0.5125955",
"0.5108462"
] | 0.62722737 | 0 |
Populate water measurements table for selected `archive`, `directory` and `stations`. | def populate_water_measurements(cursor, archive, directory, station):
csv_path = get_data_path(
'water',
'raw',
archive,
directory,
f'{station}.csv'
)
with open(csv_path, 'r', encoding='utf-8') as file:
reader = csv.reader(file, delimiter=';')
header = next(reader)
column_names_map = get_water_index_map(archive, header)
if not column_names_map:
return False
water_body = get_water_definitions(archive)['body']
for row in reader:
column_values_map = get_water_value_map(row, column_names_map)
if column_values_map:
date = datetime.strptime(row[0], '%d.%m.%Y').date()
data_columns = ', '.join(column_values_map.keys())
data_values = ', '.join(column_values_map.values())
cursor.execute(f'''INSERT INTO {water_body}_measurements (station_id, date, {data_columns})
VALUES ({station}, '{str(date)}', {data_values})''')
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def populate_water_tables(connection):\n metadata = load_metadata('water')\n cursor = connection.cursor()\n\n # Check if tables are already populated.\n cursor.execute('SELECT count(*) FROM watercourses')\n watercourse_count = cursor.fetchone()[0]\n cursor.execute('SELECT count(*) FROM aquifers')\n aquifer_count = cursor.fetchone()[0]\n\n if watercourse_count and aquifer_count:\n print('Water tables already populated!')\n return\n\n station_data = get_station_data()\n\n for archive in metadata.keys():\n print(f'{archive}-water:'.upper())\n water_body = get_water_definitions(archive)['body']\n\n # 1. Populate watercourses/aquifers:\n stations = {}\n for water_body_name in metadata[archive].keys():\n print(f'\\tPopulating {water_body}: \"{water_body_name}\"')\n cursor.execute(f'''INSERT INTO {water_body}s(location_id, name)\n VALUES (0, '{water_body_name}')''')\n water_body_id = cursor.lastrowid\n\n # 2. Populate watercourse_stations/aquifer_stations:\n for station_id in metadata[archive][water_body_name]['stations']:\n station_name = clean_name(metadata[archive][water_body_name]['stations'][station_id]['name'])\n\n if station_id in stations:\n # Prefer watercourses/aquifer with more stations\n current_len = len(metadata[archive][water_body_name]['stations'])\n previous_len = len(metadata[archive][stations[station_id]]['stations'])\n\n if current_len < previous_len:\n print(f'\\t\\tStation already exists: {station_id} - \"{station_name}\" (\"{water_body_name}\")')\n continue\n else:\n cursor.execute(f'''DELETE \n FROM {water_body}_stations\n WHERE id = {station_id}''')\n print(f'\\t\\tRemoved station: {station_id} - \"{station_name}\" from \"{stations[station_id]}\")')\n\n stations[station_id] = water_body_name\n print(f'\\t\\tPopulating station: {station_id} - \"{station_name}\"')\n\n # Insert station location if station data exists.\n location_id = 0\n station_row = station_data.query(f'ŠIFRA == \"{station_id}\"')\n if not station_row.empty:\n index = station_row.index[0]\n lat = station_row.at[index, 'LAT']\n lng = station_row.at[index, 'LON']\n if not np.isnan(lat) and not np.isnan(lng):\n name = f\"{station_row.at[index, 'VODOMERNA POSTAJA']} ({station_row.at[index, 'VODOTOK']})\"\n cursor.execute(f'''INSERT INTO locations(name, lat, lng)\n VALUES ('{name}', {lat}, {lng})''')\n location_id = cursor.lastrowid\n\n # Insert station.\n cursor.execute(f'''INSERT INTO {water_body}_stations(id, {water_body}_id, location_id, name)\n VALUES ({station_id}, {water_body_id}, {location_id}, '{station_name}')''')\n\n # 3. Populate watercourse_measurements/aquifer_measurements:\n if not populate_water_measurements(cursor, archive, metadata[archive][water_body_name]['dir'],\n station_id):\n cursor.execute(f'''DELETE \n FROM {water_body}_stations\n WHERE id = {station_id}''')\n print(f'\\t\\tRemoved station with useless data: {station_id} - \"{station_name}\"')\n\n # Remove empty watercourses/aquifers.\n cursor.execute(f'''SELECT w.id, w.name\n FROM {water_body}s w\n WHERE NOT EXISTS (\n SELECT s.id \n FROM {water_body}_stations s \n WHERE w.id = s.{water_body}_id\n )''')\n\n for row in cursor.fetchall():\n cursor.execute(f'''DELETE \n FROM {water_body}s\n WHERE id = {row[0]}''')\n print(f'\\tRemoved empty {water_body}: \"{row[1]}\"')",
"def populate_weather(connection):\n metadata = load_metadata('weather')\n cursor = connection.cursor()\n water_defs = get_water_definitions()\n\n # Check if tables are already populated.\n cursor.execute('SELECT count(*) FROM weather')\n weather_count = cursor.fetchone()[0]\n\n if weather_count:\n print('Weather tables already populated!')\n return\n\n print('WEATHER:')\n\n # Darksky data\n for dir_name, location in metadata.items():\n print(f'\\tPopulating weather: \"{location[\"name\"]}\".')\n\n # Insert location.\n cursor.execute(f'''INSERT INTO locations(name, lat, lng)\n VALUES ('{location['name']}', {location['lat']}, {location['lng']})''')\n location_id = cursor.lastrowid\n\n # Set weather locations for watercourses/aquifers.\n for water_body in [d['body'] for d in water_defs.values()]:\n if water_body in location:\n cursor.execute(f'''UPDATE {water_body}s\n SET location_id = {location_id}\n WHERE name IN ('{\"','\".join(location[water_body])}')''')\n break\n\n dir_path = get_data_path('weather', 'raw', dir_name)\n for json_file_name in os.listdir(dir_path):\n json_path = os.path.join(dir_path, json_file_name)\n with open(json_path, 'r', encoding='utf-8') as json_file:\n print(f'\\t\\tPopulating year: {json_file_name[0:-5]}')\n year_forecasts = json.load(json_file)\n for date, date_forecast in year_forecasts.items():\n hourly_forecasts = date_forecast['hourly']\n\n if not hourly_forecasts:\n print(f'\\t\\tNo hourly forecasts for {date}!')\n continue\n\n daily_forecast = {\n 'location_id': location_id,\n 'time': date_forecast['time'],\n 'day_time': date_forecast['sunset_time'] - date_forecast['sunrise_time'],\n 'precipitation': 0,\n 'snow_accumulation': 0\n }\n # List of value names with `avg`, `min` and `max` values\n value_names = {\n 'temperature': 'temperature',\n 'cloud_cover': 'cloudCover',\n 'dew_point': 'dewPoint',\n 'humidity': 'humidity',\n 'pressure': 'pressure',\n 'uv_index': 'uvIndex',\n 'precipitation_probability': 'precipProbability',\n 'precipitation_intensity': 'precipIntensity'\n }\n # Value name counters, which indicate how many times (out of 24)\n # certain value appears in hourly data.\n value_counts = {k: 0 for k in value_names.keys()}\n\n for value_name in value_names.keys():\n daily_forecast[f'{value_name}_avg'] = 0.0\n daily_forecast[f'{value_name}_min'] = float('inf')\n daily_forecast[f'{value_name}_max'] = float('-inf')\n\n # Calculate daily forecast values from hourly forecasts.\n for hourly_forecast in hourly_forecasts:\n for value_name in value_names.keys():\n orig_value_name = value_names[value_name]\n if is_forecast_number(orig_value_name, hourly_forecast):\n daily_forecast[f'{value_name}_avg'] += hourly_forecast[orig_value_name]\n daily_forecast[f'{value_name}_min'] = min(\n hourly_forecast[orig_value_name],\n daily_forecast[f'{value_name}_min']\n )\n daily_forecast[f'{value_name}_max'] = max(\n hourly_forecast[orig_value_name],\n daily_forecast[f'{value_name}_max']\n )\n value_counts[value_name] += 1\n\n if is_forecast_number('precipAccumulation', hourly_forecast) \\\n and hourly_forecast['precipType'] == 'snow':\n daily_forecast['snow_accumulation'] += hourly_forecast['precipAccumulation']\n elif is_forecast_number('precipIntensity', hourly_forecast) \\\n and is_forecast_number('precipProbability', hourly_forecast):\n daily_forecast['precipitation'] += \\\n hourly_forecast['precipIntensity'] * hourly_forecast['precipProbability']\n\n for value_name, value_count in value_counts.items():\n if value_count:\n # Calculate average.\n daily_forecast[f'{value_name}_avg'] = daily_forecast[f'{value_name}_avg'] / value_count\n else:\n # If value never appeared\n daily_forecast[f'{value_name}_avg'] = 'NULL'\n daily_forecast[f'{value_name}_min'] = 'NULL'\n daily_forecast[f'{value_name}_max'] = 'NULL'\n\n cursor.execute(f'''INSERT INTO weather({', '.join(daily_forecast.keys())})\n VALUES ({', '.join([str(v) for v in daily_forecast.values()])})''')\n\n # IOT data:\n for location in SETTINGS['weather_locations_iot']:\n print(f'\\tPopulating weather: \"{location[\"name\"]}\".')\n\n # Insert location.\n cursor.execute(f'''INSERT INTO locations(name, lat, lng)\n VALUES ('{location['name']}', {location['lat']}, {location['lng']})''')\n location_id = cursor.lastrowid\n\n # Set weather locations for watercourses/aquifers.\n for water_body in [d['body'] for d in water_defs.values()]:\n if water_body in location:\n cursor.execute(f'''UPDATE {water_body}s\n SET location_id = {location_id}\n WHERE name IN ('{\"', '\".join(location[water_body])}')''')\n\n # Set locations for all stations on given water body to match its location.\n cursor.execute(f'''SELECT id\n FROM {water_body}s\n WHERE location_id = {location_id}''')\n ids = [row[0] for row in cursor.fetchall()]\n if len(ids):\n cursor.execute(f'''UPDATE {water_body}_stations\n SET location_id = {location_id}\n WHERE {water_body}_id IN ({', '.join([str(v) for v in ids])})''')\n\n break \n \n file_name = f'''{location['lat']}-{location['lng']}.json'''\n json_path = get_data_path('weather', 'raw', file_name)\n\n # If data file doesn't exist, download it first.\n if not os.path.isfile(json_path):\n with open(json_path, 'wb', encoding=\"utf-8\") as file:\n file.write(read_from_url(location['url'], decode=False))\n \n with open(json_path, 'r', encoding='utf-8') as json_file:\n row_names = {\n \"Sun_duration\": \"sun_duration\",\n \"CloudCover\": \"cloud_cover_avg\",\n \"Percipitation\": \"precipitation\",\n \"New_snow_blanket\": \"snow_accumulation\",\n \"Snow_blanket\": \"snow_depth\",\n \"TemperatureAvg\": \"temperature_avg\",\n \"TemperatureMin\": \"temperature_min\",\n \"TemperatureMax\": \"temperature_max\"\n }\n forecasts = json.load(json_file)\n for forecast in forecasts:\n f = {row_names[k]: forecast[k] for k in row_names.keys()}\n f['location_id'] = location_id\n f['time'] = round(forecast['LastUpdatedEpoch'] / 1000)\n cursor.execute(f'''INSERT INTO weather({', '.join(f.keys())})\n VALUES ({', '.join([str(v) for v in f.values()])})''')",
"def main(daymet_dir,pickles,start_date='1980-10-01',end_date='2020-09-30',huc_col = 'huc8', **kwargs):\r\n\tprint(f'The huc col being processed is: {huc_col}')\r\n\t################################################################\r\n\t#first do the daymet data \r\n\t#read in all the files in this dir and combine them into one df\r\n\tearly=FormatData(glob.glob(daymet_dir+f'*_12_{huc_col}.csv'),drop_cols=['system:index','.geo','dayl','vp']).read_in_csvs()\r\n\tmid=FormatData(glob.glob(daymet_dir+f'*_2_{huc_col}.csv'),drop_cols=['system:index','.geo','dayl','vp']).read_in_csvs()\r\n\tlate=FormatData(glob.glob(daymet_dir+f'*_4_{huc_col}.csv'),drop_cols=['system:index','.geo','dayl','vp']).read_in_csvs()\r\n\t################################################################\r\n\t#next do the snotel data \r\n\toutput=[]\r\n\r\n\t#read in some pickled objects, these look like a list of dfs with each being a station for the full time period \r\n\tfor item in ['PREC','TAVG','WTEQ']:\r\n\t\t#get the pickled objects for each parameter \r\n\t\tfiles = glob.glob(pickles+f'*{item}_{start_date}_{end_date}_snotel_data_list') #hardcoded currently\r\n\t\tdf=FormatData(files,drop_cols=['year','month','day']).read_in_pickles()\r\n\t\toutput.append(df) #the df here is 365 days x ~30 yrs x 237 stations so these are pretty big dfs\r\n\t\r\n\t#join the three enviro params \r\n\toutput_df = reduce(lambda left,right: pd.merge(left,right,how='inner',on=['date','id']), output)\r\n\t\r\n\t\r\n\t#convert the temp column from F to C \r\n\toutput_df['TAVG'] = (output_df['TAVG']-32)*(5/9) \r\n\t#there are a couple of erroneous temp values, remove those \r\n\toutput_df = output_df.loc[output_df['TAVG'] <= 50]\r\n\r\n\t#convert prec and swe cols from inches to cm \r\n\toutput_df['PREC'] = output_df['PREC']*2.54\r\n\toutput_df['WTEQ'] = output_df['WTEQ']*2.54\r\n\t\r\n\t#remove rows that have one of the data types missing- this might need to be amended because \r\n\t#it means that there are different numbers of records in some of the periods. \r\n\toutput_df=output_df.dropna()\r\n\t\r\n\t#cast the snotel id col to int to add the hucs \r\n\toutput_df['id'] = output_df['id'].astype('int')\r\n\r\n\t#add the as yet nonexistant hucs data to the outputs \r\n\thucs = kwargs.get('hucs')\r\n\toutput_df[huc_col] = output_df['id'].map(hucs)\r\n\r\n\t#there are multiple snotel stations in some of the basins, \r\n\t#combine those so there is just one number per basin like the \r\n\t#daymet and RS data. \r\n\r\n\toutput_df=output_df.groupby([huc_col,'date'])[['PREC','WTEQ','TAVG']].mean().reset_index()\r\n\r\n\tperiod_list = []\r\n\tfor p1,p2 in zip(['early','mid','late'],[early,mid,late]): \r\n\t\t\t#get snotel first\r\n\t\t#make a temporal chunk of data \r\n\t\tsnotel_chunk=FormatData(None,time_period=p1).split_yearly_data(output_df)\r\n\r\n\t\t##########working below here\r\n\t\t############################\r\n\t\t#calculate the snow droughts for that chunk \r\n\t\tif (p1 == 'mid') | (p1 == 'late'): \r\n\t\t\tsnotel_drought=CalcSnowDroughts(snotel_chunk,swe_c='WTEQ',precip='PREC',temp='TAVG',start_year=1991,sort_col=huc_col).prepare_df_cols()\r\n\t\t\t#print('snotel')\r\n\t\t\t#print(snotel_drought)\r\n\t\telse: \r\n\t\t\tsnotel_drought=CalcSnowDroughts(snotel_chunk,swe_c='WTEQ',precip='PREC',temp='TAVG',sort_col=huc_col).prepare_df_cols()\r\n\r\n\t\t#get cols of interest \r\n\t\t#snotel_drought=snotel_drought[['huc8','year','dry','warm','warm_dry']]\r\n\t\t#rename cols so they don't get confused when data are merged \r\n\t\t#snotel_drought.columns=['huc8','year']+['s_'+column for column in snotel_drought.columns if not (column =='huc8') | (column=='year')]\r\n\t\t\r\n\t\t#then do the same for daymet \r\n\t\tif (p1 == 'mid') | (p1 == 'late'): \r\n\t\t\tdaymet_drought=CalcSnowDroughts(p2,start_year=1991,sort_col=huc_col).prepare_df_cols()\r\n\t\telse: \r\n\t\t\tdaymet_drought=CalcSnowDroughts(p2,sort_col=huc_col).prepare_df_cols()\r\n\t\t#print('daymet',daymet_drought)\r\n\t\t#daymet_drought=daymet_drought[['huc8','year','dry','warm','warm_dry']]\r\n\t\t\r\n\t\t#daymet_drought.columns=['huc8','year']+['d_'+column for column in daymet_drought.columns if not (column =='huc8') | (column=='year')]\r\n\r\n\t##########################################\r\n\t\r\n\t\t#run the kmeans with drought types as intiilization conditions (centroids) for the clusters\r\n\t\t\r\n\t\t#these are all of the huc 4 basins in the study area \r\n\t\thuc4s = ['1708','1801','1710','1711','1709','1701','1702','1705','1703','1601','1707','1706','1712','1704']\r\n\t\ts_output = []\r\n\t\td_output = []\r\n\t\tfor huc4 in huc4s: \r\n\t\t\thuc4_s = sd.prep_clusters(snotel_drought,huc4,huc_col=huc_col) #get the subset of the snow drought data for a given huc4\r\n\t\t\thuc4_d = sd.prep_clusters(daymet_drought,huc4,huc_col=huc_col)\r\n\t\t\t#make the centroids that serve as the intialization for the kmeans clusters- these are like endmembers (ish)\r\n\t\t\ts_centroids = DefineClusterCenters(huc4_s,'WTEQ','PREC','TAVG').combine_centroids() #makes a numpy array with four centroids\r\n\t\t\td_centroids = DefineClusterCenters(huc4_d,'swe','prcp','tavg').combine_centroids() #makes a numpy array with four centroids\r\n\r\n\t\t\t#clusters should be like: {0:dry, 1:warm, 2:warm_dry, 3:no_drought} 6/8/2021 DOUBLE CHECK\r\n\t\t\t#run kmeans for the snotel data\r\n\t\t\ts_clusters = sd.run_kmeans(huc4_s[['WTEQ','PREC','TAVG']].to_numpy(),huc4_s['label'],s_centroids)\r\n\t\t\ts_clusters = sd.add_drought_cols_to_kmeans_output(s_clusters, huc_col=huc_col) #add a few cols needed for plotting \r\n\t\t\t#run kmeans for the daymet data \r\n\t\t\td_clusters = sd.run_kmeans(huc4_d[['swe','prcp','tavg']].to_numpy(),huc4_d['label'],d_centroids)\r\n\t\t\td_clusters = sd.add_drought_cols_to_kmeans_output(d_clusters, huc_col=huc_col) #add a few cols needed for plotting \r\n\r\n\t\t\ts_output.append(s_clusters)\r\n\t\t\td_output.append(d_clusters)\r\n\t\ts_plot = pd.concat(s_output)\r\n\r\n\t\t#select the cols of interest and rename so there's no confusion when dfs are merged \r\n\t\ts_plot=s_plot[[huc_col,'year','dry','warm','warm_dry']]\r\n\t\ts_plot.columns=[huc_col,'year']+['s_'+column for column in s_plot.columns if not (column == huc_col) | (column=='year')]\r\n\r\n\t\td_plot = pd.concat(d_output)\r\n\t\td_plot=d_plot[[huc_col,'year','dry','warm','warm_dry']]\r\n\t\td_plot.columns=[huc_col,'year']+['d_'+column for column in d_plot.columns if not (column == huc_col) | (column=='year')]\r\n\t\r\n\t\t#merge the two datasets into one df \r\n\t\tdfs = s_plot.merge(d_plot,on=[huc_col,'year'],how='inner')\r\n\t\t\r\n\t\t#deal with the scenario that there are basins with less than 30 years of data, remove those here\r\n\t\tdfs = sd.remove_short_dataset_stations(dfs,huc_col)\r\n\t\tperiod_list.append(dfs)\r\n\r\n\tplot_counts(period_list,kwargs.get('stats_dir'),huc_col=huc_col,**kwargs)",
"def get_outdoor_data(temp_dir,site):\n if site == 'berk':\n files_od = glob(join(temp_dir,'outdoor','20*.xlsx'))\n elif site == 'bus':\n files_od = glob(join(temp_dir,'outdoor','Busara*.csv'))\n else:\n raise NameError(site)\n\n dfs = []\n for f in files_od:\n if site == 'berk':\n this_df = pd.read_excel(f,sheet_name=0,usecols='B:D',index_col=0,parse_dates=True, header=1)\n elif site == 'bus':\n this_df = pd.read_csv(f,usecols=[0,1,2],index_col=0,parse_dates=True,header=2)\n \n # drop missing values that prevented conversion to float type\n if this_df.iloc[:,0].dtype != np.float64:\n this_df = this_df[this_df.iloc[:,0] != ' ']\n this_df = this_df.astype(np.float64)\n\n # correct for weird timezones in berkeley datalogger\n this_df = correct_tz(this_df,site)\n \n this_df.columns = ['T','RH']\n this_df.index.name = 'time'\n\n # convert to celsius\n this_df['T'] = (this_df['T'] - 32) * 5/9\n dfs.append(this_df)\n \n df_od = pd.concat(dfs)\n\n # drop duplicated measurements\n df_od = df_od[~df_od.index.duplicated(keep='last')].sort_index()\n \n # separate out into daily min,mean,max\n groups = df_od.groupby(df_od.index.date)\n dfs_od = {'all':df_od,\n 'min': groups.min(),\n 'mean': groups.mean(),\n 'max': groups.max()}\n \n for i in ['min','mean','max']:\n # remove first and last day to ignore days where we did not get full recording\n dfs_od[i] = dfs_od[i].iloc[1:-1,:]\n \n # name index so that we can merge onto multiIndex'd dataframe\n dfs_od[i].index.name = 'date'\n \n return dfs_od",
"def extract_archive_data():\n extract_from_db_info = [\n {\n 'source_db': 'ecommerce_db',\n 'dest_db': 'ecommerce_db',\n 'source_table': 'raw_customer',\n 'dest_table': 'raw_customer_archive',\n 'sql_select': None,\n 'sql_insert': '../sql/insert/insert_raw_customer_archive.sql'\n },\n {\n 'source_db': 'ecommerce_db',\n 'dest_db': 'ecommerce_db',\n 'source_table': 'raw_product',\n 'dest_table': 'raw_product_archive',\n 'sql_select': None,\n 'sql_insert': '../sql/insert/insert_raw_product_archive.sql'\n },\n {\n 'source_db': 'ecommerce_db',\n 'dest_db': 'ecommerce_db',\n 'source_table': 'raw_sales',\n 'dest_table': 'raw_sales_archive',\n 'sql_select': None,\n 'sql_insert': '../sql/insert/insert_raw_sales_archive.sql'\n }\n ]\n\n for extract_info in extract_from_db_info:\n try:\n extract_data_from_db(extract_info['source_db'], extract_info['dest_db'], extract_info['dest_table'], extract_info['sql_select'], extract_info['sql_insert'])\n except Exception as e:\n print(\"An error occurred: \", e)\n else:\n print(\"Successfully inserted records in {} table of {} database from {} table of {} database.\".format(extract_info['dest_table'], extract_info['dest_db'], extract_info['source_table'], extract_info['source_db']))",
"def import_stations(time_res='hourly',time_format='%Y%m%d%H',\r\n campaign_time=[datetime(2018,12,9), datetime(2018,12,12)],\r\n data_category='air_temperature', station_ids=['00044','00091'],\r\n dbase_dir='dbase', table_dir='tables',Output=True,\r\n memory_save=True):\r\n timeranges=['recent','historical']\r\n #%%load the datasets available at each timestep\r\n dwd_datasets_meta=dwd_datasets_meta=json.load(open(table_dir+\"\\\\dwd_station_meta.txt\"))\r\n #try to get a variable from the category, otherwise use interpolation of higher frequency data\r\n resample_frequency=None\r\n time_res_dbase=time_res\r\n try:\r\n dwd_datasets_meta[time_res][data_category]\r\n except Exception:\r\n if time_res=='daily':\r\n try:\r\n dwd_datasets_meta['hourly'][data_category]\r\n print(data_category,' is not provided at the required resolution, daily_mean of hourly data used instead')\r\n resample_frequency='D'\r\n time_res_dbase='hourly'\r\n except Exception:\r\n try: \r\n dwd_datasets_meta['10_minutes'][data_category]\r\n print(data_category,' is not provided at the required resolution, daily_mean of 10_minutes data used instead')\r\n resample_frequency='D'\r\n time_res_dbase='10_minutes'\r\n except Exception:\r\n print(data_category, 'not available')\r\n sys.exit(1)\r\n if time_res=='hourly':\r\n try: \r\n dwd_datasets_meta['10_minutes'][data_category]\r\n print(data_category,' is not provided at the required resolution, hourly_mean of 10_minutes data used instead')\r\n resample_frequency='H'\r\n time_res_dbase='10_minutes'\r\n except Exception:\r\n print(data_category, 'not available')\r\n sys.exit(1)\r\n \r\n \r\n #%% download from dwd if necessary\r\n #connect to server\r\n server='opendata.dwd.de'\r\n ftp=connect_ftp(server = server,connected = False)\r\n #get the mean time of the campaign\r\n date_mean=campaign_time[0]+(campaign_time[1]-campaign_time[0])/2 \r\n # load the inititial ds\r\n dbase_path=dbase_dir+'\\\\db_stations_'+time_res+'_'+data_category+'.nc'\r\n if os.path.exists(dbase_path):\r\n with xr.open_dataset(dbase_path) as dwd_dbase:\r\n dwd_dbase.load()\r\n print('Existing database imported')\r\n #get the non_nans stations\r\n current_stations=np.array(dwd_dbase[list(dwd_dbase.keys())[0]].sel(time=date_mean,method='nearest').dropna('STATIONS_ID').coords['STATIONS_ID'])\r\n else:\r\n print(dbase_path, 'does not exist, we create a new netcdf_file')\r\n dwd_dbase=xr.Dataset()\r\n current_stations=np.array((-9999)).reshape(1)\r\n #change directory on server\r\n for timerange in timeranges:\r\n archive_url='/climate_environment/CDC/observations_germany/climate/'+time_res_dbase+'/'+data_category+'/'+timerange \r\n ftp.cwd(archive_url)\r\n #get the archive\r\n for station_id in station_ids:\r\n #we check whether the station is in the database with this parameter already\r\n if int(station_id) in current_stations:\r\n print('Station', station_id, 'with category', data_category,'in ',timerange,'dbase already')\r\n continue\r\n try:\r\n archive_name=[s for s in ftp.nlst() if station_id in s][0]\r\n except:\r\n print('No ',timerange,'data for station',station_id)\r\n continue\r\n print('Retrieving {}...'.format(archive_name))\r\n retrieved = False\r\n archive = io.BytesIO()\r\n # try to retrieve file\r\n while not retrieved:\r\n try:\r\n ftp.retrbinary(\"RETR \" + archive_name, archive.write)\r\n retrieved = True\r\n except:\r\n ftp=connect_ftp(server = server,connected = False)\r\n ftp.cwd(archive_url)\r\n archive.seek(0)\r\n with ZipFile(archive) as myzip:\r\n for f in myzip.infolist():\r\n # This is the data file\r\n #print('zip content:', f.filename)\r\n if f.filename.startswith('produkt_'):\r\n product = io.StringIO(str(myzip.read(f.filename),'utf-8'))\r\n #get dataframe from product \r\n dwd_product=pd.read_csv(product,sep=';',skipinitialspace=True)\r\n #get datetime\r\n dwd_product['time']=pd.to_datetime(dwd_product['MESS_DATUM'],format=time_format) \r\n dwd_product=dwd_product.rename(columns=dwd_datasets_meta[time_res_dbase][data_category])\r\n dwd_product=dwd_product.reset_index()\r\n dwd_product=dwd_product.set_index(['time','STATIONS_ID'])\r\n dwd_product=dwd_product.drop(columns=['MESS_DATUM','quality_level_of_next_columns','end_of_record','index'])\r\n #append to database\r\n dwd_xr=dwd_product.to_xarray()\r\n #replace all values equal to -999 to nan\r\n for data_var in dwd_xr.data_vars:\r\n dwd_xr[data_var]=dwd_xr[data_var].where(dwd_xr[data_var]>-999)\r\n if station_id=='05009':\r\n print('ok') \r\n #only add relevant dates if available memoryis rather small\r\n \r\n if memory_save and timerange=='historical':\r\n dwd_xr=dwd_xr.sel(time=slice(campaign_time[0]-timedelta(days=1),campaign_time[1]+timedelta(days=1)))\r\n #dwd_xr=dwd_xr.squeeze()\r\n \r\n try:\r\n dwd_dbase=xr.merge([dwd_dbase,dwd_xr])\r\n except Exception as e:\r\n print(e)\r\n print('try merging with compat=override')\r\n dwd_dbase=xr.merge([dwd_dbase,dwd_xr],compat='override')\r\n print(archive_name,' added to database')\r\n #upscale to required temporal resolution\r\n if resample_frequency is not None:\r\n dwd_dbase=dwd_dbase.resample(time=resample_frequency).mean(skipna=True)\r\n print('DWD data upscaled to',time_res,'averages')\r\n if Output==True:\r\n dwd_dbase.to_netcdf(dbase_path)\r\n print('Updated database' ,dbase_path)\r\n return dwd_dbase",
"def compute_aggregate_weather_data():\n\n # get a list of all the csv files names in the 'weather_data' directory\n files = get_all_csv_files_in_directory('weather_data')\n\n # Todo: if the number of csv files doesn't match the expected value, unzip remaining using the 'os' module\n\n if len(files) == 0:\n\n # Unzip all files in current directory and subdirectories\n print \"unzipping weather files...\"\n os.system(\"unzip 'weather_data/*.zip' -d weather_data\")\n\n\n # Try again to get files\n files = get_all_csv_files_in_directory('weather_data')\n\n # Throw exception if still missing csv files\n if len(files) == 0:\n raise ValueError(\"Missing weather data in csv format in the 'weather_data' directory\")\n\n # convert the list of csv file names to a list of corresponding DataFrames\n dallas_files = filter(lambda file_name : \"KDAL\" in file_name, files)\n houston_files = filter(lambda file_name : \"KHOU\" in file_name, files)\n san_antonio_files = filter(lambda file_name : \"KSAT\" in file_name, files)\n\n print \"Retrieved weather data files...\"\n print \"\\t# of Dallas weather files found: \", len(dallas_files)\n print \"\\t# of Houston weather files found: \", len(houston_files)\n print \"\\t# of San Antonio weather files found: \", len(san_antonio_files)\n\n dallas_dfs = map(lambda file_name: read_weather_data_from_csv(\"./weather_data/\" + file_name), dallas_files)\n houston_dfs = map(lambda file_name: read_weather_data_from_csv(\"./weather_data/\" + file_name), houston_files)\n san_antonio_dfs = map(lambda file_name: read_weather_data_from_csv(\"./weather_data/\" + file_name), san_antonio_files)\n\n dallas_df = pd.concat(dallas_dfs)\n houston_df = pd.concat(houston_dfs)\n san_antonio_df = pd.concat(san_antonio_dfs)\n\n print \"Aggregating all of the weather data...\"\n # fold the list of data frames into a single data frame\n aggregate_df = reduce(lambda df1, df2: pd.merge(df1, df2, on=\"Date\", how=\"outer\"), [dallas_df, houston_df, san_antonio_df]).sort_values(\"Date\")\n\n return aggregate_df",
"def collect_stations(self):\n # First, iterate provinces and build url's\n site = urllib.request.urlopen(self.base_url)\n\n # Check that the site is still valid or operating by collecting a list of provinces\n print(\"Collecting provinces\")\n provinces = [s[9:11] for s in re.findall('<a href=\"../\">../</a>', site.read())]\n\n # Iterate provinces and collect list of available times\n print(\"Collecting time periods and station ID's\")\n self.stations = defaultdict(dict)\n for prov in provinces:\n site = urllib.request.urlopen(self.build_url(prov))\n expression = '<a href=\"[hd][a-zA-Z]*/\">[hd][a-zA-Z]*/</a>'\n times = [s.split('>')[1].split('<')[0].replace('/', '') for s in re.findall(expression, site.read())]\n\n # Iterate times and collect the station ID's\n for time in times:\n site = urllib.request.urlopen(self.build_url(prov, time))\n expression = '<a href=\"{0}_[a-zA-Z0-9]*_{1}_hydrometric.csv\">{0}_[a-zA-Z0-9]*_{1}_hydrometric.csv</a>'\n expression = expression.format(prov.upper(), time.lower())\n stations = [s.split('_')[1] for s in re.findall(expression, site.read())]\n self.stations[prov][time] = stations",
"def gatherStationData():\n flist = list_files()\n station_dics = {}\n print(\"Reading in csv data...\")\n for f_in in flist:\n start,end = find_timespan(f_in)\n station = station_name(f=f_in)\n print(\"File: {0} Station: {1} {2}--{3}\".format(f_in, \n station, start, end))\n station_dics[station] = read_precip(fname=f_in, \n label=station, start_year=start, end_year=end)\n data_list = []\n for s in station_dics:\n data_list.append(station_dics[s]) \n return pd.concat(data_list,axis=1)",
"def run():\n #Initialise variables\n data = build_station_list()\n update_water_levels(data)\n ls = []\n ID = []\n \n #Number of days in past taken data from\n dt = 7\n #How many graphs per window\n limit = 4\n #How many stations\n number = 6\n \n #Create list of measuring_id's sorted by water level\n for station in data:\n if station.typical_range_consistent() == True and station.relative_water_level() != None:\n ls.append((station, station.relative_water_level()))\n\n ls = sorted_by_key(ls, 1)\n \n for station in ls:\n ID.append(station[0])\n \n s = count_inconsistent_sets(ID[:number], dt)\n \n ID = ID[:number+s]\n\n plot_water_levels(ID, dt, limit, s)",
"def import_temp_data(counties):\n for index, row in counties.iterrows():\n station = row[2]\n url = f'https://wrcc.dri.edu/WRCCWrappers.py?sodxtrmts+0{station}+por+por+maxt+none+mave+5+01+F'\n result = requests.get(url)\n soup = BeautifulSoup(result.text, 'html.parser')\n table = soup.find('table')\n data = pd.read_html(str(table))\n df = data[0]\n df.columns = df.iloc[0]\n df = df.drop([0])\n df = df.iloc[-65:-8, :]\n df = df.rename(columns={'YEAR(S)': 'Year'})\n df['Year'] = pd.to_datetime(df['Year'], format='%Y')\n df = df.set_index('Year')\n df = df.dropna(axis=1)\n df = df.replace(to_replace='-----', value=np.nan)\n df = df.astype('float64')\n df = df.fillna(df.mean().round(2))\n df = df.add_suffix('_t')\n name = row[0]\n df['County'] = name\n df.to_csv(f'{name}_avgmaxtemp.csv')\n print(f'Avg. max. temp. data from {name} saved')\n time.sleep(3.14)\n print('Done')",
"def _get_data(self, gas, loc, voltage, speed, trial):\n cols = []\n for g in gas:\n for l in loc:\n try:\n (sub, files) = self._get_sensor_col_files(g, l)\n except OSError as e:\n print('{}\\n Keeping calm and carrying on.'.format(e))\n continue\n for v in voltage:\n for s in speed:\n end = \"_board_setPoint_%s_fan_setPoint_%s_mfc_setPoint_%sppm_p%s\" % (\n self.SensorVoltages[v],\n self.FanSpeeds[s],\n self.GasNames[g],\n self.AltLocs[l])\n filtered = [f.split('/')[-1] for f in files if f.endswith(end)]\n if not filtered:\n if self._args['verbose']:\n print('No valid files found for \"%s\", skipping!' % sub)\n continue\n timeStamp = [filt.split('_', 1)[0] for filt in filtered]\n date = [time.strptime(ts, '%Y%m%d%H%M') for ts in timeStamp]\n date = [time.strftime('%Y-%m-%d %H:%M', d) for d in date]\n filtered = [os.path.join(sub, f) for f in filtered]\n for i, filt in enumerate(filtered):\n j = i + 1\n if j in trial:\n p = os.path.sep.join([self.dataloc_prefix,\n self.data_location,\n filt])\n\n cols.append(SensorColumn(data_location=p,\n gas=self.GasNames[g],\n loc=self.Locs[l],\n voltage=self.SensorVoltages[v],\n speed=self.AltFanSpeeds[s],\n trial=j,\n _args=self._args))\n\n if self._args['verbose']:\n print('\\nSelected %i single trial SensorColumns!' % len(cols))\n return cols",
"def populate_database(telescope_name, instrument_name):\n telescope = Telescope.objects.create(\n name=telescope_name, latitude=25.0, longitude=45.0)\n instrument = Instrument.objects.create(\n name=instrument_name, telescope=telescope)\n for year_int in (2012, 2013):\n for month_int in range(1, 13):\n for night_int in (1, monthrange(year_int, month_int)[1]):\n ut_date = date(year_int, month_int, night_int)\n night = Night.objects.create(\n ut_date=ut_date, instrument=instrument, observers='Smith')\n Exposure.objects.create(\n night=night, run_number=1, ut_start=time(10, 0, 0),\n exposed=20.0, ra=60.0, dec=30.0, object_exp=True)\n Exposure.objects.create(\n night=night, run_number=2, ut_start=time(11, 0, 0),\n exposed=30.0, ra=90.0, dec=0.0, object_exp=True)\n Exposure.objects.create(\n night=night, run_number=3, ut_start=time(12, 0, 0),\n exposed=40.0, ra=120.0, dec=-30.0, object_exp=False)",
"def ingest():\n\n base_path = '/home/mnichol3/Coding/wx-scripts/wtlma'\n\n flash_files = ['flash-out-05232019-2050.txt',\n 'flash-out-05232019-2100.txt',\n 'flash-out-05232019-2110.txt',\n 'flash-out-05232019-2120.txt',\n 'flash-out-05232019-2130.txt',\n 'flash-out-05232019-2140.txt',\n 'flash-out-05232019-2150.txt']\n\n df_cols = ['start', 'end', 'duration', 'area', 'ctr_alt', 'ctr_lat', 'ctr_lon',\n 'tot_energy']\n\n flash_df = pd.read_csv(join(base_path, flash_files[0]), sep=',', names=df_cols)\n\n for f in flash_files[1:]:\n curr_path = join(base_path, f)\n curr_df = pd.read_csv(curr_path, sep=',', names=df_cols)\n flash_df = pd.concat([flash_df, curr_df], ignore_index=True)\n\n return flash_df",
"def _setData(self):\n\n if not self.stationId:\n return\n \"\"\" \n # get the ressource url and adjust lat and lon from data portal\n query = sparqls.stationResource(self.stationId)\n key, val = RunSparql(query, 'array').run()\n if val: \n self.url = val[0][0]\n self.lat = float(val[0][2])\n self.lon = float(val[0][3])\n \"\"\"\n\n # it is possible, that a station id has multiple URI\n # ask for all URI\n query = sparqls.stationData(self.uri, 'all')\n data = RunSparql(query, 'pandas').run()\n\n if not data.empty:\n self._data = data\n else:\n self._data = 'no data available'\n\n # check if data is available and extract the 'unique' data products\n if isinstance(self._data, pd.DataFrame):\n p = self._data['specLabel'].unique()\n self._products = pd.DataFrame(p)\n\n # replace samplingheight=None with empty string\n self._data.samplingheight.replace(to_replace=[None], value=\"\", inplace=True)\n else:\n self._products = 'no data available'",
"def get_data():\n \n data = {\n 'loadAvg1Min': 0, #load average 1 min\n 'loadAvg5Min': 0, #load average 5 min\n 'loadAvg15Min': 0, #load average 15 min\n 'cpuUsage': [], #usage distribution for each cpu\n 'memUsage': {}, #memory usage \n 'networkReads': [], #network reads per second for each interface\n 'networkWrites': [], #network writes per second for each interface\n 'diskReads': [], #disk reads per second for each disk\n 'diskWrites': [] #disk writes per second for each disk\n }\n \n #metrics that doesnt need sampling\n data['loadAvg1Min'], data['loadAvg5Min'], data['loadAvg15Min'] = get_load_avg() #get load avg\n data['memUsage'].update(get_mem_usage()) #memory usage\n \n #metrics that needs sampling\n #they are written as a generator so that we can sleep before collection again\n sampling_duration = 1\n cpu_usage_gen = get_cpu_usage(sampling_duration) #generator for cpu usage\n net_rw_gen = get_net_rw(sampling_duration) #generator for network read write\n disk_rw_gen = get_disk_rw(sampling_duration) #generator for disk read write\n \n while 1: #now start sampling, whenever we have walid data, we can exit the loop\n cpu_usage = next(cpu_usage_gen)\n net_rw = next(net_rw_gen)\n disk_rw = next(disk_rw_gen)\n \n if cpu_usage or net_rw or disk_rw: #we have valid data\n break\n \n time.sleep(sampling_duration)\n \n #append cpu usage for each cpu core\n for cpu, usage in cpu_usage.items():\n data['cpuUsage'].append({'name': cpu, 'value': usage})\n \n #append network read and write for each interface\n for interface, rw in net_rw.items():\n data['networkReads'].append({'name': interface, 'value': rw['reads']})\n data['networkWrites'].append({'name': interface, 'value': rw['writes']}) \n \n #append disk read and write for each logical disk\n for device, rw in disk_rw.items():\n data['diskReads'].append({'name': device, 'value': rw['reads']})\n data['diskWrites'].append({'name': device, 'value': rw['writes']})\n \n return data",
"def test_seed_station_information(self):\n\t\tget_info.seed_station_information()\n\n\t\tMacDougal_Prince = db.session.query(Station).filter(Station.id == 128).one()\n\t\tself.assertTrue(MacDougal_Prince, 'Station at MacDogual/Pride did not get sucessfully added.')\n\n\t\tself.assertEqual(MacDougal_Prince.num_bikes_available, 0, 'Bike counts were not initialized properly')\n\t\tself.assertEqual(MacDougal_Prince.num_docks_available, 0, 'Dock counts were not initialized properly')",
"def get_prepared_data(cls, ext_stations=None):\n ext_stations = ext_stations or StationDAO.get_all_with_prices()\n features = (cls.get_station_features(row) for row in ext_stations)\n classes = (cls.get_category(row) for row in ext_stations)\n return features, classes",
"def getAllDataFromDirectory(prediction_directory, actual_directory, write_directory, cities_file, utc_offset = False):\n city_dictionary = getCities(cities_file)\n actualGetter = getActualWeather(actual_directory, city_dictionary, get_API_keys())\n #For each day and for each city, get all the data and put it into a spreadsheet.",
"def initialize_data(self , station = '', datasets = {} ): \n self.datasets = datasets\n self.datasets_keys = datasets.keys()\n self.station = station\n self.out_name = self.out_dir + '/' + self.station + '_CEUAS_premerged_v0.nc'\n\n self.observations_table_vars = ['date_time', 'z_coordinate' , 'z_coordinate_type', 'observed_variable' , 'observation_value' , 'report_id' , 'observation_id' , 'latitude' , 'longitude', 'units', 'source_id']\n\n \"\"\" Loading the econding of the tables created from the harvester script and to be applied again \"\"\"\n self.encodings = np.load('groups_encodings.npy' , allow_pickle = True ).item()\n self.encodings['era5fb'] = np.load('era5fb_encodings_all.npy' , allow_pickle = True ).item() \n self.dic_type_attributes = np.load('dic_type_attributes.npy',allow_pickle= True).item()\n \n self.era5fb_columns = self.dic_type_attributes['era5fb'].keys()\n\n self.obstab_nans_filled = False \n\n data['cdm_tables'] = {} \n \n \"\"\" Loop over all the datasets \n k: name of the dataset \n v: list of file paths, eg 'era5_1':[filepath_1, filepath_2 ] \"\"\" \n for k,v in self.datasets.items() :\n data[k] = {}\n for F in v:\n \n logging.info(' Dataset ::: *** %s %s ' , k , F ) \n \n data[k][F] = {}\n\n h5py_file = h5py.File(F, 'r')\n data[k][F]['h5py_file'] = h5py_file \n \n a = h5py_file['recordtimestamp']\n \n data[k][F]['recordtimestamp'] = a\n data[k][F]['recordindex'] = h5py_file['recordindex']\n data[k][F]['dateindex'] = h5py_file['dateindex']\n a = h5py_file['recordtimestamp']\n data[k][F]['max_date'] = max(a)\n data[k][F]['min_date'] = min(a)\n \n data[k][F]['counter'] = 0\n\n #######\n # HEADER TABLE\n #######\n head_tab = h5py_file['header_table']\n logging.info('*** header_table')\n data[k][F]['header_table'] = {}\n for var in head_tab.keys():\n if ('string' in var or 'hdrlen' in var): continue\n try: \n data[k][F]['header_table'][var] = (np.array(head_tab[var][:])).astype(self.dic_type_attributes['header_table'][var]['type'] )\n except:\n print('failed convertion type header' , k , ' ' , F , ' ' , var )\n \n ####### \n # STATION CONFIGURATION\n ####### \n d = xr.open_dataset(F , engine = 'h5netcdf' , group = 'station_configuration' , decode_times = False )\n data[k][F]['station_configuration'] = d.to_dataframe()\n logging.debug('Done with %s station_configuration' , str(k) )\n d.close()\n\n ####### \n # SOURCE CONFIGURATION \n ####### \n d = xr.open_dataset(F , engine = 'h5netcdf' , group = 'source_configuration' , decode_times = False )\n data[k][F]['source_configuration'] = d\n logging.debug('Done with %s source_configuration' , str(k) )\n d.close()\n\n\n data['cdm_tables'] = {}\n \"\"\" Reading the CDM tables that do not depend on specific stations or observations (fixed values), for the first file only \"\"\"\n for t in self.standard_cdm: # [ 'crs' , 'observed_variable', 'units' , 'z_coordinate_type' , 'station_type', 'station_configuration_codes'] \n if t not in data['cdm_tables'].keys():\n #data['cdm_tables'][t] = ''\n cdm = xr.open_dataset(F , engine = 'h5netcdf' , group = t )\n data['cdm_tables'][t] = cdm \n\n print(blue + 'Memory used after reading data: ', process.memory_info().rss/1000000000 , cend)\n\n self.data = data\n\n \"\"\" Making all date_times \"\"\"\n self.make_all_datetime()",
"def Find_nearest_dwd_stations(inpt_data,\r\n date_start='20051201',\r\n date_end='20201231',\r\n dwd_time_format='%Y%m%d%H',\r\n data_category='air_temperature',\r\n temp_resolution='hourly',\r\n no_of_nearest_stations=4,\r\n memory_save=True,\r\n Output='True'):\r\n if isinstance(data_category,list):\r\n if len(list(data_category)) > 1:\r\n print(\r\n 'Currently only one dwd category allowed, please run function multiple times for each category'\r\n )\r\n return None\r\n \r\n #convert time to datetime\r\n dt_start=datetime.strptime(date_start,'%Y%m%d')\r\n dt_end=datetime.strptime(date_end,'%Y%m%d')\r\n print('Start quering data from DWD')\r\n #define the database folder\r\n pypath = os.path.dirname(os.path.abspath(__file__))\r\n table_dir = pypath + '\\\\' + 'tables'\r\n dbase_dir = pypath + '\\\\' + 'dbase' \r\n #%% we check all available stations and create a valid list\r\n filename_stations=update_stationlist(time_res='hourly',dbase_dir=table_dir)\r\n stations_all=pd.read_csv(filename_stations, dtype={'STATIONS_ID': object})\r\n # delete all stations which do not cover the category\r\n dwd_stations=stations_all[stations_all[data_category]==True].copy()\r\n #correct to datetime\r\n dwd_stations['date_end']=pd.to_datetime(stations_all.date_end,format='%Y%m%d')\r\n dwd_stations['date_start']=pd.to_datetime(stations_all.date_start,format='%Y%m%d')\r\n # clean to stations which cover the campaign time #dt_low <= dt <= dt_high:\r\n dwd_stations=dwd_stations[(dwd_stations.date_start<=dt_start) & (dwd_stations.date_end>=dt_end)]\r\n #make a geodataframe out of it\r\n dwd_stations=gpd.GeoDataFrame(dwd_stations,geometry=gpd.points_from_xy(dwd_stations.geo_lon, dwd_stations.geo_lat))\r\n \r\n #loop through all rows to get the n closest points\r\n distances=pd.DataFrame()\r\n for _, station in dwd_stations.iterrows():\r\n distances[station.STATIONS_ID]=inpt_data.distance(station.geometry)\r\n \r\n #%% get the n stations with smallest distance and update database\r\n id_nearest_stations=distances.apply(lambda s: s.nsmallest(no_of_nearest_stations).index.tolist(), axis=1).values.tolist() #station ids\r\n #get them as unique values by sum a list of lists https://bit.ly/353iZQB\r\n id_dwd_stations=list(set(sum(id_nearest_stations,[])))\r\n \r\n #update the database\r\n db_dwd_stations=import_stations(time_res=temp_resolution,time_format=dwd_time_format,campaign_time=[dt_start,dt_end],data_category=data_category,station_ids=id_dwd_stations,dbase_dir=dbase_dir,Output=Output,table_dir=table_dir,memory_save=memory_save)\r\n \r\n #distance of nearest stattions\r\n dist_nearest_stations=pd.DataFrame(np.sort(distances.values)[:,:no_of_nearest_stations]).values.tolist() #distances themself\r\n #create new columns in the input data\r\n station_col_nm=list()\r\n for i in range(0,no_of_nearest_stations):\r\n station_col_nm.append(data_category+'_station_'+str(i))\r\n for i in range(0,no_of_nearest_stations):\r\n station_col_nm.append(data_category+'_distance_'+str(i))\r\n #create new dataframe\r\n distance_data=pd.concat([pd.DataFrame(id_nearest_stations).astype(int),pd.DataFrame(dist_nearest_stations)],axis=1)\r\n distance_data.columns=station_col_nm\r\n #add to main dataset\r\n inpt_data=pd.concat([inpt_data, distance_data],axis=1) \r\n \r\n return inpt_data,db_dwd_stations",
"def generate_training_testing_dataset(store_id, transactions, meteo_day, max_days=2500,\n single_barcode=0):\n\n # Get the minimum and maximum of date in the transactions\n min_date = transactions[(transactions['STO_EAN'] == store_id)].min()['TRX_DATETIME'].date()\n max_date = transactions[(transactions['STO_EAN'] == store_id)].max()['TRX_DATETIME'].date()\n\n # Get the number of days between the two date\n num_days = (max_date - min_date).days\n\n # Get the list of unique products barcode in the transactions\n products_barcode = transactions['BARCODE'].unique()\n\n # Only do one single barcode if activated\n if single_barcode is not None:\n products_barcode = [products_barcode[single_barcode]]\n\n\n # Array to contain all training data\n all_data_first_level = []\n\n # For each day and for each product\n for day in xrange(num_days):\n\n print(day)\n\n # If we have already considered more days than allowed, stop\n if day > max_days:\n break\n\n\n # Get the date corresponding to this day\n date = min_date + pd.DateOffset(day)\n # Get the weather of the date\n weather = get_weather_on_date(date, meteo_day, store_id).head(n=1)\n\n # If the weather is empty we skip this day\n if weather.empty:\n continue\n\n # For each product to include\n for product_barcode in products_barcode:\n\n # Get the volume and inventory data\n volume = get_volume_product_on_date(product_barcode, date, store_id, transactions)\n\n # If no volume could be found skip this date,product pair\n if volume is None:\n continue\n\n # Get the type of the current date\n day_type = generate_day_type(date)\n\n\n # Generating complex features based on the simpler one\n\n # This contains respectively yesterday, the day before yesterday and the same day as current one in\n # previous week\n yesterday = date - pd.DateOffset(1)\n two_days_ago = date - pd.DateOffset(2)\n one_week_ago = date - pd.DateOffset(7)\n\n # Get the day type of yesterday and 2 days ago\n day_type_yesterday = generate_day_type(yesterday)\n day_type_2days_ago = generate_day_type(two_days_ago)\n\n # Get the volume of yesterday, 2days ago and 1 week ago\n volume_yesterday = get_volume_product_on_date(product_barcode, yesterday, store_id, transactions)\n volume_2days_ago = get_volume_product_on_date(product_barcode, two_days_ago, store_id, transactions)\n volume_one_week_ago = get_volume_product_on_date(product_barcode, one_week_ago, store_id, transactions)\n\n\n # Get the total sales and the total weight of product done yesterday, 2 days ago and 1 week ago\n volume_price_yesterday = 0\n volume_weight_yesterday = 0\n if volume_yesterday is not None:\n volume_price_yesterday = volume_yesterday[\"price\"]\n volume_weight_yesterday = volume_yesterday[\"weight\"]\n\n volume_price_2days_ago = 0\n volume_weight_2days_ago = 0\n if volume_2days_ago is not None:\n volume_price_2days_ago = volume_2days_ago[\"price\"]\n volume_weight_2days_ago = volume_2days_ago[\"weight\"]\n\n volume_price_one_week_ago = 0\n volume_weight_one_week_ago = 0\n if volume_one_week_ago is not None:\n volume_price_one_week_ago = volume_one_week_ago[\"price\"]\n volume_weight_one_week_ago = volume_one_week_ago[\"weight\"]\n\n\n\n # Using historical weather data\n weather_yesterday = get_weather_on_date(yesterday, meteo_day, store_id).head(n=1)\n temperature_min_yesterday = 0\n temperature_max_yesterday = 0\n if not weather_yesterday.empty:\n temperature_min_yesterday = weather_yesterday['TEMPERATURE_VALUE_MIN'].values[0]\n temperature_max_yesterday = weather_yesterday['TEMPERATURE_VALUE_MIN'].values[0]\n\n\n #tmp = [weather['TEMPERATURE_VALUE_MIN'].values[0], weather['TEMPERATURE_VALUE_MAX'].values[0],\n # weather['PRECIPITATION_VALUE'].values[0], weather['SUNSHINE_DURATION'].values[0],\n # weather['SNOW_DEPTH'].values[0], day_type, volume[\"price\"], volume[\"weight\"]]\n\n\n # Saving Features\n tmp = [weather['TEMPERATURE_VALUE_MIN'].values[0], weather['TEMPERATURE_VALUE_MAX'].values[0],\n day_type, volume[\"price\"], volume_price_yesterday,volume_weight_yesterday,\n volume_price_2days_ago, volume_weight_2days_ago,\n volume_price_one_week_ago, volume_weight_one_week_ago, temperature_min_yesterday,\n temperature_max_yesterday,day_type_yesterday, day_type_2days_ago,\n volume[\"weight\"]]\n\n all_data_first_level.append(tmp)\n\n return all_data_first_level",
"def _populate_output(self):\n self._store_atomic_queries_table()\n self._store_composite_queries_table()",
"def run(self):\n\t\tdf_iter = self.file_to_df(50000)\n\t\tdf_airport = self.airport_file_to_df()\n\t\tfor df in df_iter: # type: pd.DataFrame\n\t\t\tdf.drop_duplicates(inplace=True)\n\t\t\tdf = self.transform(df, df_airport)\n\n\t\t\tdf_result = self.get_only_new_records(\n\t\t\t\tdf=df,\n\t\t\t\tdf_columns=self.join_columns,\n\t\t\t\ttable_columns=self.join_columns\n\t\t\t)\n\n\t\t\tif len(df_result) > 0:\n\t\t\t\t# df_result.drop(self.table_columns, axis=1)\n\n\t\t\t\tself.save(\n\t\t\t\t\tdf=df_result,\n\t\t\t\t\ttable_name=\"travel_dimension\",\n\t\t\t\t\tdf_columns=self.table_columns,\n\t\t\t\t\ttable_colums=self.table_columns\n\t\t\t\t)",
"def padova_interpolated_isomake(directories, bands_dict, output_filename,\n bands_ordered=None):\n\n if isinstance(directories, basestring):\n directories = [directories]\n\n if bands_ordered is None:\n bands_ordered = bands_dict.values()\n\n output_obj = open(output_filename, \"w\")\n\n header_string = \"#\\t[M/H]\\tMi\\tlogAge\\tlogTe\\tlogg\\tJacobian\"\n for band in bands_ordered:\n header_string += \"\\t{}\".format(band)\n header_string += \"\\tinner_count\\touter_count\\n\"\n output_obj.write(header_string)\n\n iso_metal_dict = {}\n bands_metal_dicts = {}\n for band in bands_dict.keys():\n bands_metal_dicts[band] = {}\n\n # instead do this on band-by-band basis? *******************\n\n for direc in directories:\n iso_files_gz = gb.glob(\"{}/*.dat.gz\".format(direc.rstrip(\"/\")))\n iso_files = gb.glob(\"{}/*.dat\".format(direc.rstrip(\"/\")))\n\n # check for metallicity of each file\n # and check which bands it has\n\n for iso_file1 in iso_files_gz:\n metal = None\n iso_data = gz.open(\"{0}\".format(iso_file1))\n for line in iso_data:\n split_line = line.split()\n if \"[M/H]\" in split_line:\n metal = float(split_line[split_line.index(\"[M/H]\")+2])\n if \"M_ini\" in split_line:\n for band in bands_metal_dicts.keys():\n if band in split_line:\n bands_metal_dicts[band][metal] = iso_file1\n\n for iso_file1 in iso_files:\n metal = None\n iso_data = open(\"{0}\".format(iso_file1), \"r\")\n for line in iso_data:\n split_line = line.split()\n if \"[M/H]\" in split_line:\n metal = float(split_line[split_line.index(\"[M/H]\")+2])\n if \"M_ini\" in split_line:\n for band in bands_metal_dicts.keys():\n if band in split_line:\n bands_metal_dicts[band][metal] = iso_file1\n\n for metal in bands_metal_dicts[bands_metal_dicts.keys()[0]]:\n filenames = []\n for band in bands_metal_dicts:\n if metal in bands_metal_dicts[band]:\n if bands_metal_dicts[band][metal] not in filenames:\n filenames.append(bands_metal_dicts[band][metal])\n else:\n break\n else:\n iso_metal_dict[metal] = filenames\n\n print(iso_metal_dict)\n keys = iso_metal_dict.keys()\n keys.sort()\n\n if len(keys) > 2:\n # iso_metal_weights=dict(zip(keys, np.gradient(np.array(keys)) ) )\n # in numpy 1.9.0 gradient has changed to use second order behaviour\n # at boundaries which gives wrong results in this context\n iso_metal_weights = dict(zip(keys,\n replacement_gradient(np.array(keys))))\n else:\n iso_metal_weights = dict(zip(keys, np.ones(len(keys))))\n print(\"metals and weights: \", iso_metal_weights)\n\n# interp in metallicity order\n\n for key in keys:\n iso_interp(iso_metal_dict[key], key, iso_metal_weights[key],\n output_obj, bands_dict, bands_ordered)\n\n output_obj.close()",
"def ddf_parser():\n num_available, total = 0, 0\n indicator_twn_tuples = list() # format of a single tuple: (indicator_name, #twn rows, earliest available year)\n concept_metadata = dict() # {top_tag: second_layer_tag:\n\n # parse all ddf files provided by GapMinder and find how many of them with Taiwan statistics\n for f_path in glob.glob(os.path.join('statistics', '*datapoints*.csv')):\n total += 1\n df = pd.read_csv(f_path)\n if 'twn' in df.geo.unique():\n num_available += 1\n indicator = f_path.replace('statistics/ddf--datapoints--', '').replace('--by--geo--time.csv', '')\n # print('[Indicator]', indicator)\n print(f\"\\t{len(df[df.geo == 'twn'])} indicators including Taiwan statistics.\")\n\n # stat_name = df.columns[-1]\n # df_p = df.pivot(index='geo', columns='time')[stat_name]\n # df_p.insert(loc=0, column='indicator', value=stat_name)\n # df_p.to_csv(f'statistics_transformed/{stat_name}.csv', sep=';')\n\n indicators.append(indicator)\n\n\n # print(\"{:.1f}% datapoints have Taiwan statistics\".format(num_available / float(total) * 100))\n\n\n\n df_c = pd.read_csv(CONCEPT_CSV_PATH)\n df_t = pd.read_csv(TAG_CSV_PATH)\n df = pd.merge(df_c, df_t, how='left', left_on='tags', right_on='tag')\n for idr, num_rows, earliest_year in indicator_twn_tuples:\n ancestors = list()\n\n row_values = df[df['concept'] == idr].values[0]\n name_catalog, parent, ancestor = (row_values[i] for i in [9, 17, 18])\n if type(parent) is str:\n ancestors.append(parent)\n\n # get ancestors recursively\n while type(ancestor) is str:\n tag_row_values = df_t[df_t['tag'] == ancestor].values[0]\n ancestors.append(tag_row_values[1])\n ancestor = tag_row_values[2]\n\n # build concept structure\n ancestors.insert(0, name_catalog)\n print('/'.join(ancestors[::-1]))",
"def create_station_dics(data_directories):\n \n files_all = {} \n for k,v in data_directories.items() :\n files = os.listdir(v)\n \n for f in files:\n station = f.split('_')[0] \n if station not in files_all.keys():\n files_all[station] = {}\n \n if k == 'ncar': # separating ncar temperature and wind files \n if 'trhc' in f:\n k = 'ncar_t'\n elif 'windc' in f:\n k = 'ncar_w'\n files_all[station][k] = ''\n files_all[station][k] = v + '/' + f # compelte path to the netCDF file \n\n #print('check') \n \n \n return files_all",
"def generate_weather_data(self):\n months = pd.to_datetime(self.output['Local Time']).dt.month\n self.output['Month'] = months # set month values for later joins\n\n # merge output data frame with historical data to get ranges\n keys = ['Location', 'Month']\n m = pd.merge(self.output, self.histdata, how='left',\n left_on=keys, right_on=keys)\n\n # uniformly select random pressure, temperature\n # and humidity values between the historical max and min ranges\n r = np.random.rand(m.shape[0])\n m['Temperature'] = ((m['Tmean_high'] - m['Tmean_low']\n ) * r + m['Tmean_low']).round(1)\n m['Pressure'] = ((m['Pmax'] - m['Pmin']) * r + m['Pmin']).round(1)\n m['Humidity'] = ((m['Hmax'] - m['Hmin']) * r + m['Hmin']).astype(int)\n\n # drop redundant columns and assign to output\n dcols = ['Month', 'Timezone', 'Pmax', 'Pmin',\n 'Hmax', 'Hmin', 'Tmean_high', 'Tmean_low']\n m.drop(columns=dcols, inplace=True)\n self.output = m",
"def write_stations_to_metro_stations_table(config, tables_cache):\r\n database = deepcopy(config[\"database\"])\r\n metro_stations_table = deepcopy(config[\"tables\"][\"metro_stations_table\"])\r\n in_tests.test_write_to_database_from_dict(\r\n database, metro_stations_table, tables_cache)\r\n\r\n station_id = tables_cache[\"address_metro_stations_station_id\"]\r\n station_name = tables_cache[\"address_metro_stations_station_name\"]\r\n line_name = tables_cache[\"address_metro_stations_line_name\"]\r\n station_lat = tables_cache[\"address_metro_stations_lat\"]\r\n station_lng = tables_cache[\"address_metro_stations_lng\"]\r\n\r\n if station_id:\r\n write_to_database(database, metro_stations_table, {\r\n \"station_id\": station_id,\r\n \"station_name\": station_name,\r\n \"line_name\": line_name,\r\n \"station_lat\": station_lat,\r\n \"station_lng\": station_lng\r\n })\r\n return ()",
"def load_data(city, month, week_day):\n# name for day variable changed from \"day_name\" into week_day to take into account new pandas method \".day_name()\"\n# read in file form selected city\n df = pd.read_csv(CITY_DATA[city])\n# create additional columns for months, days, start/ end times, hours and station combinations\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['End Time'] = pd.to_datetime(df['End Time'])\n df['month_start'] = df['Start Time'].dt.month\n df['month_end'] = df['End Time'].dt.month\n df['day_start'] = df['Start Time'].dt.day_name()\n df['day_end'] = df['End Time'].dt.day_name()\n df['hour'] = df['Start Time'].dt.hour\n df['station_comb'] = df['Start Station'] + ' &AND& ' + df['End Station']\n# filter data file by month: capture start and end months\n if month != 7:\n df1 = df[df['month_start'] == month]\n df2 = df1.append(df[df['month_end'] == month])\n df = df2.drop_duplicates()\n# filter data file by day: capture start and end days\n if week_day != 'All':\n df3 = df[df['day_start'] == week_day]\n df4 = df3.append(df[df['day_end'] == week_day])\n df = df4.drop_duplicates()\n# reset index to facilitate looping in station_stats function\n df = df.reset_index()\n# check if user wants to check first data lines\n req_check_df = input('\\nIf you want to check the selected data please enter y.')\n if req_check_df[0:1].lower() == 'y':\n print('check df = \\n', df.head())\n wait = input('Press Enter to continue. ')\n\n return df"
] | [
"0.69976664",
"0.58139586",
"0.5748849",
"0.5552527",
"0.5550863",
"0.5530793",
"0.55279726",
"0.5518004",
"0.5421447",
"0.54047465",
"0.5358192",
"0.53122264",
"0.52683693",
"0.5263279",
"0.52280056",
"0.5153076",
"0.51038533",
"0.51007193",
"0.507282",
"0.50530833",
"0.50488514",
"0.50456476",
"0.50298846",
"0.50112516",
"0.50000465",
"0.49984008",
"0.49943525",
"0.4993162",
"0.49832112",
"0.49795166"
] | 0.7368926 | 0 |
Populate watercourse and aquifer related data tables. | def populate_water_tables(connection):
metadata = load_metadata('water')
cursor = connection.cursor()
# Check if tables are already populated.
cursor.execute('SELECT count(*) FROM watercourses')
watercourse_count = cursor.fetchone()[0]
cursor.execute('SELECT count(*) FROM aquifers')
aquifer_count = cursor.fetchone()[0]
if watercourse_count and aquifer_count:
print('Water tables already populated!')
return
station_data = get_station_data()
for archive in metadata.keys():
print(f'{archive}-water:'.upper())
water_body = get_water_definitions(archive)['body']
# 1. Populate watercourses/aquifers:
stations = {}
for water_body_name in metadata[archive].keys():
print(f'\tPopulating {water_body}: "{water_body_name}"')
cursor.execute(f'''INSERT INTO {water_body}s(location_id, name)
VALUES (0, '{water_body_name}')''')
water_body_id = cursor.lastrowid
# 2. Populate watercourse_stations/aquifer_stations:
for station_id in metadata[archive][water_body_name]['stations']:
station_name = clean_name(metadata[archive][water_body_name]['stations'][station_id]['name'])
if station_id in stations:
# Prefer watercourses/aquifer with more stations
current_len = len(metadata[archive][water_body_name]['stations'])
previous_len = len(metadata[archive][stations[station_id]]['stations'])
if current_len < previous_len:
print(f'\t\tStation already exists: {station_id} - "{station_name}" ("{water_body_name}")')
continue
else:
cursor.execute(f'''DELETE
FROM {water_body}_stations
WHERE id = {station_id}''')
print(f'\t\tRemoved station: {station_id} - "{station_name}" from "{stations[station_id]}")')
stations[station_id] = water_body_name
print(f'\t\tPopulating station: {station_id} - "{station_name}"')
# Insert station location if station data exists.
location_id = 0
station_row = station_data.query(f'ŠIFRA == "{station_id}"')
if not station_row.empty:
index = station_row.index[0]
lat = station_row.at[index, 'LAT']
lng = station_row.at[index, 'LON']
if not np.isnan(lat) and not np.isnan(lng):
name = f"{station_row.at[index, 'VODOMERNA POSTAJA']} ({station_row.at[index, 'VODOTOK']})"
cursor.execute(f'''INSERT INTO locations(name, lat, lng)
VALUES ('{name}', {lat}, {lng})''')
location_id = cursor.lastrowid
# Insert station.
cursor.execute(f'''INSERT INTO {water_body}_stations(id, {water_body}_id, location_id, name)
VALUES ({station_id}, {water_body_id}, {location_id}, '{station_name}')''')
# 3. Populate watercourse_measurements/aquifer_measurements:
if not populate_water_measurements(cursor, archive, metadata[archive][water_body_name]['dir'],
station_id):
cursor.execute(f'''DELETE
FROM {water_body}_stations
WHERE id = {station_id}''')
print(f'\t\tRemoved station with useless data: {station_id} - "{station_name}"')
# Remove empty watercourses/aquifers.
cursor.execute(f'''SELECT w.id, w.name
FROM {water_body}s w
WHERE NOT EXISTS (
SELECT s.id
FROM {water_body}_stations s
WHERE w.id = s.{water_body}_id
)''')
for row in cursor.fetchall():
cursor.execute(f'''DELETE
FROM {water_body}s
WHERE id = {row[0]}''')
print(f'\tRemoved empty {water_body}: "{row[1]}"') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def populate_db():\n\n populate_table(db, models.Department, departments_data)\n populate_table(db, models.Employee, employees_data)",
"def populate_tables(connection: sqlite3.Connection) -> None:\n fake = Faker()\n Faker.seed(0)\n\n c = conn.cursor()\n\n number_of_courses = fake.pyint(min_value=5, max_value=20)\n\n for _ in range(number_of_courses):\n course_name = fake.word()\n\n insert_statement = f'insert into courses (name) values (\"{course_name}\");'\n c.execute(insert_statement)\n\n connection.commit()\n\n number_of_users = fake.pyint(min_value=1, max_value=23)\n\n Faker.seed()\n\n for _ in range(number_of_users):\n\n if fake.pybool():\n user_name = f'{fake.first_name_female()} {fake.last_name_female()}'\n else:\n user_name = f'{fake.first_name()} {fake.last_name()}'\n\n insert_statement = f'insert into users (name) values (\"{user_name}\");'\n c.execute(insert_statement)\n\n connection.commit()\n\n for _ in range(50000):\n Faker.seed()\n\n random_user_id = fake.pyint(1, number_of_users)\n random_course_id = fake.pyint(1, number_of_courses)\n Faker.seed()\n random_lesson_no = fake.pyint(3, 12)\n Faker.seed()\n random_exercise_no = fake.pyint(1, 50)\n random_data = fake.sentence()\n\n insert_statement = f\"\"\"insert into saves (user_id, course_id, lesson_no, exercise_no,data) \n values ({random_user_id}, {random_course_id}, {random_lesson_no}, \n {random_exercise_no}, '{random_data}');\"\"\"\n c.execute(insert_statement)\n\n connection.commit()",
"def populate_database(self):\n self.insert_products()\n self.insert_categories()\n self.insert_products_categories()\n self.insert_stores()\n self.insert_products_stores()",
"def insert_relations_staging(self):\n\n for year in range(START_YEAR_CREATIVE_WORKS, CURRENT_YEAR, YEARS_RANGE):\n self.load_wikidata(\"movie_roles\", MOVIE_ROLES_BY_YEAR_SPARQL_QUERY, INSERT_MOVIE_ROLE_SQL_QUERY, INSERT_MOVIE_ROLE_MAP_COLUMNS, year, YEARS_RANGE)\n\n self.load_wikidata(\"song_roles\", SONG_ROLES_BY_YEAR_SPARQL_QUERY, INSERT_SONG_ROLE_SQL_QUERY, INSERT_SONG_ROLE_MAP_COLUMNS, year, YEARS_RANGE, True)\n self.load_wikidata(\"tvshow_roles\", TVSHOW_ROLES_SPARQL_QUERY, INSERT_TVSHOW_ROLE_SQL_QUERY,\n INSERT_TVSHOW_ROLE_MAP_COLUMNS)\n self.load_wikidata(\"animatedmovie_roles\", ANIMATEDMOVIE_ROLES_SPARQL_QUERY, INSERT_ANIMATEDMOVIE_ROLE_SQL_QUERY,\n INSERT_ANIMATEDMOVIE_ROLE_MAP_COLUMNS)\n self.load_wikidata(\"videogame_roles\", VIDEOGAME_ROLES_SPARQL_QUERY, INSERT_VIDEOGAME_ROLE_SQL_QUERY, INSERT_VIDEOGAME_ROLE_MAP_COLUMNS)\n self.load_wikidata(\"book_roles\", BOOK_ROLES_SPARQL_QUERY, INSERT_BOOK_ROLE_SQL_QUERY, INSERT_BOOk_ROLE_SQL_QUERY)",
"def populate_db():\n stdout.write('Emptying the tables...\\n')\n empty_tables()\n stdout.write('Populating Language records...\\n')\n populate_language()\n stdout.write('Populating Lemma, Wordform, and Definition records...\\n')\n populate_lexical()\n stdout.write('Populating ProperName records...\\n')\n populate_proper_names()",
"def load_dwh_tables(self):\n print(\"Loading the creative works table\")\n self.cur.execute(dwh_queries.INSERT_CREATIVE_WORKS_SQL_QUERY)\n self.conn.commit()\n\n print(\"Loading the participations table\")\n\n self.cur.execute(dwh_queries.INSERT_PARTICIPATIONS_SQL_QUERY)\n self.conn.commit()",
"def setUp(self):\n resume.objects.create(\n first_name='Nicholas',\n last_name='Bielinski',\n )\n experience.objects.create(\n title='Helpdesk Technician',\n location='L3 Technologies',\n start_date='6/26/2017',\n end_date='present',\n description='blah blah blah'\n )\n education.objects.create(\n institution_name='UNH Manchester',\n location='Manchester',\n degree='Bachelor',\n major='CIS',\n gpa = '3.5'\n )",
"def synthesize_employment_data(self, config):\r\n jobs_by_zone_by_sector_table_name = config['jobs_by_zone_by_sector']\r\n gridcells_table_name = config['gridcells']\r\n jobs_table_name = config['jobs']\r\n gridcells_output_table_name = config['gridcells_output']\r\n jobs_output_table_name = config['jobs_output']\r\n \r\n input_db_name = config['db_config'].database_name\r\n output_db_name = config['output_database_name']\r\n \r\n sectors = config['sector_names_and_ids']\r\n building_types_and_ids_and_home_based = config[\r\n 'building_type_column_names_and_ids_and_home_based']\r\n \r\n building_types = []\r\n building_ids = []\r\n home_based = [] \r\n for type, id, home in building_types_and_ids_and_home_based:\r\n building_types += [type]\r\n building_ids += [id]\r\n home_based += [home]\r\n \r\n \r\n from_database_configuration = ScenarioDatabaseConfiguration(\r\n database_name = input_db_name,\r\n host_name = config['db_config'].host_name,\r\n user_name = config['db_config'].user_name,\r\n password = config['db_config'].password \r\n )\r\n to_database_configuration = ScenarioDatabaseConfiguration(\r\n database_name = output_db_name,\r\n host_name = config['db_config'].host_name,\r\n user_name = config['db_config'].user_name,\r\n password = config['db_config'].password \r\n )\r\n\r\n FlattenScenarioDatabaseChain().copy_scenario_database(\r\n from_database_configuration = from_database_configuration, \r\n to_database_configuration = to_database_configuration,\r\n tables_to_copy = [gridcells_table_name, jobs_table_name])\r\n \r\n db_server = DatabaseServer(to_database_configuration) \r\n output_database = db_server.get_database(output_db_name)\r\n \r\n sector_name = 0; sector_id = 1\r\n \r\n sector = {}\r\n for entry in sectors:\r\n name = entry[sector_name]\r\n id = entry[sector_id]\r\n sector[id] = self._get_jobs_per_building_type_in_sector_by_zone(\r\n output_database, jobs_by_zone_by_sector_table_name, \r\n jobs_table_name, name, id)\r\n\r\n results = self._get_building_type_proportion_by_zone(output_database, \r\n gridcells_table_name)\r\n \r\n grid_id = 0; zone_id = 1\r\n dist = {}\r\n \r\n type_index = {}\r\n \r\n for name in building_types:\r\n for i in range(len(results[0])):\r\n column_name = results[0][i]\r\n if name == column_name:\r\n type_index[name] = i\r\n break;\r\n else:\r\n raise KeyError, ('No column by the name of \\'%s\\' found in '\r\n 'the database.' % name) \r\n\r\n for name in building_types:\r\n dist[name] = {}\r\n \r\n for row in results[1:]:\r\n for name in building_types:\r\n dist[name][row[zone_id]] = []\r\n \r\n for row in results[1:]:\r\n for name in building_types:\r\n dist[name][row[zone_id]] += [(row[grid_id], \r\n row[type_index[name]])]\r\n \r\n jobs_table_data = self._create_jobs_table_data(dist, sector,\r\n building_types_and_ids_and_home_based)\r\n \r\n output_database.execute('USE %(out_db)s' % {'out_db':output_db_name})\r\n \r\n output_database.execute(\"\"\"\r\n CREATE TABLE %(jobs_out)s (\r\n JOB_ID INT AUTO_INCREMENT, PRIMARY KEY(JOB_ID),\r\n GRID_ID INT, HOME_BASED INT, SECTOR_ID INT, BUILDING_TYPE INT);\r\n \"\"\" % {'jobs_out':jobs_output_table_name})\r\n \r\n if len(jobs_table_data) > 0:\r\n output_prefix = (\r\n \"\"\"INSERT INTO %(jobs_out)s \r\n (GRID_ID, HOME_BASED, SECTOR_ID, BUILDING_TYPE) VALUES\r\n \"\"\" % {'jobs_out':jobs_output_table_name})\r\n output_postfix = ';'\r\n \r\n step = 1000\r\n length = len(jobs_table_data)\r\n iterations = int(length/step) + 1\r\n \r\n for i in range(iterations):\r\n low = i*step\r\n high = (i+1)*step\r\n \r\n if high > length: high = length\r\n \r\n output_body = \"\"\r\n \r\n for j in range(low, high):\r\n output_body += (\r\n '(%(grid)s, %(home)s, %(sector)s, %(building)s),\\n' \r\n % jobs_table_data[j])\r\n \r\n output_query = \"%s%s%s\" % (output_prefix, \r\n output_body[:-2], \r\n output_postfix)\r\n\r\n output_database.execute(output_query)\r\n \r\n \r\n ### TODO: \r",
"def populate_database(telescope_name, instrument_name):\n telescope = Telescope.objects.create(\n name=telescope_name, latitude=25.0, longitude=45.0)\n instrument = Instrument.objects.create(\n name=instrument_name, telescope=telescope)\n for year_int in (2012, 2013):\n for month_int in range(1, 13):\n for night_int in (1, monthrange(year_int, month_int)[1]):\n ut_date = date(year_int, month_int, night_int)\n night = Night.objects.create(\n ut_date=ut_date, instrument=instrument, observers='Smith')\n Exposure.objects.create(\n night=night, run_number=1, ut_start=time(10, 0, 0),\n exposed=20.0, ra=60.0, dec=30.0, object_exp=True)\n Exposure.objects.create(\n night=night, run_number=2, ut_start=time(11, 0, 0),\n exposed=30.0, ra=90.0, dec=0.0, object_exp=True)\n Exposure.objects.create(\n night=night, run_number=3, ut_start=time(12, 0, 0),\n exposed=40.0, ra=120.0, dec=-30.0, object_exp=False)",
"def populate_database(num_patients, min_checkins, max_checkins):\n departments = [\n Department(department_name=\"Cardiology\"),\n Department(department_name=\"Emergency\"),\n Department(department_name=\"Gynecology\"),\n Department(department_name=\"Pediatrics\"),\n Department(department_name=\"Obstetrics\"),\n Department(department_name=\"Oncology\"),\n Department(department_name=\"Orthopedics\"),\n Department(department_name=\"Neurology\")\n ]\n\n for i in xrange(num_patients):\n patient = Patient(**generate_patient())\n patient.departments.append(choice(departments))\n db.add(patient)\n\n for j in xrange(randrange(min_checkins, max_checkins)):\n checkin = CheckIn(**generate_checkin())\n checkin.patient_nhi = patient.nhi\n\n lci = patient.latest_checkin_time\n vid = checkin.checkin_time\n\n lci = vid if lci is None or vid > lci else lci\n patient.latest_checkin_time = lci\n\n db.add(checkin)\n\n for k in xrange(randrange(0, 3)):\n appointment = Appointment(**generate_appointment())\n appointment.patient_nhi = patient.nhi\n\n db.add(appointment)\n\n db.commit()",
"def populate_from_samples():\n\n # Tags\n try:\n for row in get_csv_data('samples/tags.csv'):\n tag = Tag(name=row['Name'], desc=row['Description'])\n db_session.add(tag)\n finally:\n db_session.commit()\n\n # Organizations\n try:\n for row in get_csv_data('samples/organizations.csv'):\n org = Organization(desc=row['Name'])\n db_session.add(org)\n finally:\n db_session.commit()\n\n # Departments\n try: \n for row in get_csv_data('samples/departments.csv'):\n org = db_session.query(Organization).filter_by(desc=row['Organization']).one()\n dpt = Department(desc=row['Department'], org=org)\n\n db_session.add(dpt)\n finally:\n db_session.commit()\n\n # Application types\n try:\n for row in get_csv_data('samples/apptypes.csv'):\n apptype = AppType(desc=row['Name'])\n db_session.add(apptype)\n finally:\n db_session.commit()\n\n # Applications\n try:\n for row in get_csv_data('samples/applications.csv'):\n apptype = db_session.query(AppType).filter_by(desc=row['AppType']).one()\n dpt = db_session.query(Department).join(Organization).\\\n filter(Department.desc==row['Department']).\\\n filter(Organization.desc==row['Organization']).\\\n one()\n\n app = App(desc=row['Application'], \n app_type=apptype, \n department=dpt,\n version=row['Version'],\n environment=row['Environment'],\n platform=row['Platform']\n )\n\n db_session.add(app)\n finally:\n db_session.commit()\n\n # Connections and Headers\n try:\n for row in get_csv_data('samples/connections.csv'):\n conn = Connection(conn_type=row['Type'], url=row['URL'], port=row['Port'], answer=row['Answer'])\n header = Header(conn_id=conn.id, header=row['Header'], value=row['Value'], conn=conn)\n\n db_session.add(conn)\n db_session.add(header)\n finally:\n db_session.commit()",
"def _setup_all_awardees():\n hpo_data = _prep_awardee_csv_data('tests/test-data/fixtures/awardees.csv')\n org_data = _prep_awardee_csv_data('tests/test-data/fixtures/organizations.csv')\n site_data = _prep_awardee_csv_data('tests/test-data/fixtures/sites.csv')\n dao = HPODao()\n #\n # Import HPO records\n #\n for column in range(0, len(hpo_data[0]) - 1):\n data = _convert_csv_column_to_dict(hpo_data, column)\n dao.insert(HPO(hpoId=column+1, displayName=data['Name'], name=data['Awardee ID'],\n organizationType=OrganizationType(data['Type']), isObsolete=ObsoleteStatus.ACTIVE))\n #\n # Import Organization records\n #\n with dao.session() as session:\n for column in range(0, len(org_data[0]) - 1):\n data = _convert_csv_column_to_dict(org_data, column)\n result = session.query(HPO.hpoId).filter(HPO.name == data['Awardee ID']).first()\n dao.insert(Organization(externalId=data['Organization ID'], displayName=data['Name'], hpoId=result.hpoId))\n #\n # Import Site records\n #\n with dao.session() as session:\n for column in range(0, len(site_data[0]) - 1):\n data = _convert_csv_column_to_dict(site_data, column)\n result = session.query(Organization.hpoId, Organization.organizationId).\\\n filter(Organization.externalId == data['Organization ID']).first()\n try:\n mayo_link_id = data['MayoLINK Client #']\n except KeyError:\n mayo_link_id = str(random.randint(7040000, 7999999))\n dao.insert(Site(siteName=data['Site'], googleGroup=data['Site ID / Google Group'].lower(),\n mayolinkClientNumber=mayo_link_id, hpoId=result.hpoId,\n organizationId=result.organizationId))",
"def setup_vars(self):\n # Add Full time positions\n self.manager_id = self._add_person(\"Manager\", \"ARRAY['Database', 'OS', 'AI']\", 30)\n self.admin_id = self._add_person(\"Admin\", salary=40)\n self.full_instructor_id = self._add_person(\n \"Instructor\", \"ARRAY['Database']\", 20\n )\n\n # Add Part time instructor\n self.part_instructor_id = self._add_part_time_instr(\"ARRAY['OS']\", 10)\n self.part_instructor_id = self._add_part_time_instr(\"ARRAY['AI']\", 10)\n\n # Add courses\n self.course_id1 = self._add_course(\"Database\", 1, \"Database\")\n self.course_id2 = self._add_course(\"OS\", 1, \"OS\")\n self.course_id3 = self._add_course(\"AI\", 1, \"AI\")\n\n # Add room\n self.room_id = self._add_room(1, 'Test room', 20)\n self.room_id2 = self._add_room(2, 'Test room 2', 20)\n\n # Add course offerings\n self.course_offering1 = self._add_course_offering('2021-01-21', 10, [('2021-06-21', 9, self.room_id), ('2021-06-21', 11, self.room_id)], '2021-05-31', 20, self.course_id1, self.admin_id)\n self.course_offering2 = self._add_course_offering('2021-01-21', 10, [('2021-06-22', 9, self.room_id), ('2021-06-22', 11, self.room_id)], '2021-05-31', 20, self.course_id2, self.admin_id)\n self.course_offering3 = self._add_course_offering('2021-01-21', 10, [('2021-06-22', 9, self.room_id2), ('2021-06-22', 11, self.room_id2)], '2021-05-31', 20, self.course_id3, self.admin_id)\n\n # Add customers\n self.customer_id1 = self._add_customer('Test1', \"test\", 987654321, '[email protected]', '1234123412341234', '123', '2025-05-31')\n self.customer_id2 = self._add_customer('Test2', \"test\", 987654321, '[email protected]', '1234123412341235', '123', '2025-05-31')\n self.customer_id3 = self._add_customer('Test3', \"test\", 987654321, '[email protected]', '1234123412341236', '123', '2025-05-31')\n\n # Register sessions\n self._register_credit_card('2021-01-21', self.course_id1, 1, self.customer_id1)\n self._register_credit_card('2021-01-21', self.course_id1, 1, self.customer_id2)\n self._register_credit_card('2021-01-21', self.course_id1, 1, self.customer_id3)\n\n # Add course packages\n self.package1 = self._add_course_package(\"Best Package\", 2, '2021-03-01', '2021-08-02', 50)\n self.package2 = self._add_course_package(\"Medium Package\", 2, '2021-03-01', '2021-08-02', 100)\n self.package3 = self._add_course_package(\"Worst Package\", 2, '2021-03-01', '2021-08-02', 150)\n\n # Buy course packages\n self._buy_package(self.customer_id1, self.package1)\n self._buy_package(self.customer_id2, self.package2)\n self._buy_package(self.customer_id3, self.package3)\n\n # Redeem sessions\n self._register_redeems('2021-01-21', self.course_id2, 1, self.customer_id1)\n self._register_redeems('2021-01-21', self.course_id2, 1, self.customer_id2)\n self._register_redeems('2021-01-21', self.course_id2, 1, self.customer_id3)\n\n # Cancel registrations\n self._cancel_registration(self.customer_id1, self.course_id1)\n self._cancel_registration(self.customer_id2, self.course_id2)",
"def insert_entities_staging(self):\n\n for year in range(1900, CURRENT_YEAR, YEARS_RANGE):\n self.load_wikidata(\"movies\", MOVIES_BY_YEAR_SPARQL_QUERY, INSERT_MOVIE_SQL_QUERY, INSERT_MOVIE_MAP_COLUMNS, year, YEARS_RANGE)\n\n self.load_wikidata(\"songs\", SONGS_BY_YEAR_SPARQL_QUERY, INSERT_SONG_SQL_QUERY, INSERT_SONG_MAP_COLUMNS, year, YEARS_RANGE, True)\n self.load_wikidata(\"tvshows\", TVSHOWS_SPARQL_QUERY, INSERT_TVSHOW_SQL_QUERY, INSERT_TVSHOW_MAP_COLUMNS)\n self.load_wikidata(\"animatedmovies\", ANIMATEDMOVIES_SPARQL_QUERY, INSERT_ANIMATEDMOVIE_SQL_QUERY,\n INSERT_ANIMATEDMOVIE_MAP_COLUMNS)\n self.load_wikidata(\"videogames\", VIDEOGAMES_SPARQL_QUERY, INSERT_VIDEOGAME_SQL_QUERY, INSERT_VIDEOGAME_MAP_COLUMNS)\n self.load_wikidata(\"books\", BOOKS_SPARQL_QUERY, INSERT_BOOK_SQL_QUERY, INSERT_BOOK_MAP_COLUMNS)",
"def run(self):\n\n for table in self.TABLES:\n self.dictionary_cursor.execute(f\"TRUNCATE TABLE {table}_Work\")\n self.dictionary_conn.commit()\n self.logger.info(\"work tables cleared\")\n for id in self.ids:\n drug = self.Drug(self, id)\n if drug.wanted:\n drug.load()\n self.logger.info(\"work tables populated\")\n for table in self.TABLES:\n insert = f\"INSERT INTO {table} SELECT * FROM {table}_Work\"\n self.dictionary_cursor.execute(f\"TRUNCATE TABLE {table}\")\n self.dictionary_cursor.execute(insert)\n self.dictionary_conn.commit()\n self.logger.info(\"live tables ready\")",
"def setup_table(self):\n\n self.setup.create_basic_table_in_dev()\n self.setup.insert_random_records_into_dev()",
"def init_tables(self) -> None:\n with self.table_access_condition:\n conn = self._get_connection()\n conn.execute(\"PRAGMA foreign_keys = 1\")\n c = conn.cursor()\n c.execute(tables.CREATE_STUDIES_TABLE)\n c.execute(tables.CREATE_SUBMISSIONS_TABLE)\n c.execute(tables.CREATE_REQUESTERS_TABLE)\n c.execute(tables.CREATE_UNITS_TABLE)\n c.execute(tables.CREATE_WORKERS_TABLE)\n c.execute(tables.CREATE_RUNS_TABLE)\n c.execute(tables.CREATE_RUN_MAP_TABLE)\n c.execute(tables.CREATE_PARTICIPANT_GROUPS_TABLE)\n c.execute(tables.CREATE_PARTICIPANT_GROUP_QUALIFICATIONS_MAPPING_TABLE)\n conn.commit()",
"def _populate(self):\n self.addDemographics()\n self.addLabs()\n self.addProblems()\n self.addMeds()\n self.addAllergies()\n self.addImmunizations()\n self.addVitals()\n self.populated_p = True",
"def populate_weather(connection):\n metadata = load_metadata('weather')\n cursor = connection.cursor()\n water_defs = get_water_definitions()\n\n # Check if tables are already populated.\n cursor.execute('SELECT count(*) FROM weather')\n weather_count = cursor.fetchone()[0]\n\n if weather_count:\n print('Weather tables already populated!')\n return\n\n print('WEATHER:')\n\n # Darksky data\n for dir_name, location in metadata.items():\n print(f'\\tPopulating weather: \"{location[\"name\"]}\".')\n\n # Insert location.\n cursor.execute(f'''INSERT INTO locations(name, lat, lng)\n VALUES ('{location['name']}', {location['lat']}, {location['lng']})''')\n location_id = cursor.lastrowid\n\n # Set weather locations for watercourses/aquifers.\n for water_body in [d['body'] for d in water_defs.values()]:\n if water_body in location:\n cursor.execute(f'''UPDATE {water_body}s\n SET location_id = {location_id}\n WHERE name IN ('{\"','\".join(location[water_body])}')''')\n break\n\n dir_path = get_data_path('weather', 'raw', dir_name)\n for json_file_name in os.listdir(dir_path):\n json_path = os.path.join(dir_path, json_file_name)\n with open(json_path, 'r', encoding='utf-8') as json_file:\n print(f'\\t\\tPopulating year: {json_file_name[0:-5]}')\n year_forecasts = json.load(json_file)\n for date, date_forecast in year_forecasts.items():\n hourly_forecasts = date_forecast['hourly']\n\n if not hourly_forecasts:\n print(f'\\t\\tNo hourly forecasts for {date}!')\n continue\n\n daily_forecast = {\n 'location_id': location_id,\n 'time': date_forecast['time'],\n 'day_time': date_forecast['sunset_time'] - date_forecast['sunrise_time'],\n 'precipitation': 0,\n 'snow_accumulation': 0\n }\n # List of value names with `avg`, `min` and `max` values\n value_names = {\n 'temperature': 'temperature',\n 'cloud_cover': 'cloudCover',\n 'dew_point': 'dewPoint',\n 'humidity': 'humidity',\n 'pressure': 'pressure',\n 'uv_index': 'uvIndex',\n 'precipitation_probability': 'precipProbability',\n 'precipitation_intensity': 'precipIntensity'\n }\n # Value name counters, which indicate how many times (out of 24)\n # certain value appears in hourly data.\n value_counts = {k: 0 for k in value_names.keys()}\n\n for value_name in value_names.keys():\n daily_forecast[f'{value_name}_avg'] = 0.0\n daily_forecast[f'{value_name}_min'] = float('inf')\n daily_forecast[f'{value_name}_max'] = float('-inf')\n\n # Calculate daily forecast values from hourly forecasts.\n for hourly_forecast in hourly_forecasts:\n for value_name in value_names.keys():\n orig_value_name = value_names[value_name]\n if is_forecast_number(orig_value_name, hourly_forecast):\n daily_forecast[f'{value_name}_avg'] += hourly_forecast[orig_value_name]\n daily_forecast[f'{value_name}_min'] = min(\n hourly_forecast[orig_value_name],\n daily_forecast[f'{value_name}_min']\n )\n daily_forecast[f'{value_name}_max'] = max(\n hourly_forecast[orig_value_name],\n daily_forecast[f'{value_name}_max']\n )\n value_counts[value_name] += 1\n\n if is_forecast_number('precipAccumulation', hourly_forecast) \\\n and hourly_forecast['precipType'] == 'snow':\n daily_forecast['snow_accumulation'] += hourly_forecast['precipAccumulation']\n elif is_forecast_number('precipIntensity', hourly_forecast) \\\n and is_forecast_number('precipProbability', hourly_forecast):\n daily_forecast['precipitation'] += \\\n hourly_forecast['precipIntensity'] * hourly_forecast['precipProbability']\n\n for value_name, value_count in value_counts.items():\n if value_count:\n # Calculate average.\n daily_forecast[f'{value_name}_avg'] = daily_forecast[f'{value_name}_avg'] / value_count\n else:\n # If value never appeared\n daily_forecast[f'{value_name}_avg'] = 'NULL'\n daily_forecast[f'{value_name}_min'] = 'NULL'\n daily_forecast[f'{value_name}_max'] = 'NULL'\n\n cursor.execute(f'''INSERT INTO weather({', '.join(daily_forecast.keys())})\n VALUES ({', '.join([str(v) for v in daily_forecast.values()])})''')\n\n # IOT data:\n for location in SETTINGS['weather_locations_iot']:\n print(f'\\tPopulating weather: \"{location[\"name\"]}\".')\n\n # Insert location.\n cursor.execute(f'''INSERT INTO locations(name, lat, lng)\n VALUES ('{location['name']}', {location['lat']}, {location['lng']})''')\n location_id = cursor.lastrowid\n\n # Set weather locations for watercourses/aquifers.\n for water_body in [d['body'] for d in water_defs.values()]:\n if water_body in location:\n cursor.execute(f'''UPDATE {water_body}s\n SET location_id = {location_id}\n WHERE name IN ('{\"', '\".join(location[water_body])}')''')\n\n # Set locations for all stations on given water body to match its location.\n cursor.execute(f'''SELECT id\n FROM {water_body}s\n WHERE location_id = {location_id}''')\n ids = [row[0] for row in cursor.fetchall()]\n if len(ids):\n cursor.execute(f'''UPDATE {water_body}_stations\n SET location_id = {location_id}\n WHERE {water_body}_id IN ({', '.join([str(v) for v in ids])})''')\n\n break \n \n file_name = f'''{location['lat']}-{location['lng']}.json'''\n json_path = get_data_path('weather', 'raw', file_name)\n\n # If data file doesn't exist, download it first.\n if not os.path.isfile(json_path):\n with open(json_path, 'wb', encoding=\"utf-8\") as file:\n file.write(read_from_url(location['url'], decode=False))\n \n with open(json_path, 'r', encoding='utf-8') as json_file:\n row_names = {\n \"Sun_duration\": \"sun_duration\",\n \"CloudCover\": \"cloud_cover_avg\",\n \"Percipitation\": \"precipitation\",\n \"New_snow_blanket\": \"snow_accumulation\",\n \"Snow_blanket\": \"snow_depth\",\n \"TemperatureAvg\": \"temperature_avg\",\n \"TemperatureMin\": \"temperature_min\",\n \"TemperatureMax\": \"temperature_max\"\n }\n forecasts = json.load(json_file)\n for forecast in forecasts:\n f = {row_names[k]: forecast[k] for k in row_names.keys()}\n f['location_id'] = location_id\n f['time'] = round(forecast['LastUpdatedEpoch'] / 1000)\n cursor.execute(f'''INSERT INTO weather({', '.join(f.keys())})\n VALUES ({', '.join([str(v) for v in f.values()])})''')",
"def _populate():\n models.Base.metadata.create_all(engine)\n logger.info(\"Initalized database\")\n db = Session()\n\n hermann = models.Account(id=\"test\",\n name=\"Hermann Dörkschneider\",\n email=\"[email protected]\")\n db.add(hermann)\n\n journey = models.Journey(id=str(uuid.uuid4()),\n account_id=\"test\",\n visibility=\"PUBLIC\",\n start_time_utc=datetime.datetime.now(),\n stop_time_utc=datetime.datetime.now())\n db.add(journey)\n\n waypoint1 = models.Waypoint(journey=journey,\n time_utc=datetime.datetime.now(),\n accuracy_m=2.71,\n latitude=3.1416,\n longitude=1.618,\n height_m=10)\n db.add(waypoint1)\n\n waypoint2 = models.Waypoint(journey=journey,\n time_utc=datetime.datetime.now(),\n accuracy_m=5.1,\n latitude=3.1410,\n longitude=1.620,\n height_m=5)\n db.add(waypoint2)\n\n db.commit()\n logger.info(\"Created test account {}\".format(hermann))\n logger.info(\"Created test journey {}\".format(journey))",
"def database_setup():\n Base.metadata.create_all(bind=engine)\n db = LocalSession()\n try:\n populate_from_json(db, Vector, str(VECTORS))\n populate_from_json(db, Gender, str(GENDERS))\n populate_from_json(db, Tag, str(TAGS))\n finally:\n db.close()",
"def importData():\n #importChallengeDataToDB()\n importTrendingDataToDB()",
"def set_up_tables():\n table_users = \"\"\"\n CREATE TABLE IF NOT EXISTS users (\n id SERIAL PRIMARY KEY,\n username VARCHAR (24) NOT NULL UNIQUE,\n firstname VARCHAR (24) NOT NULL,\n lastname VARCHAR (24) NOT NULL,\n othername VARCHAR (24),\n phone VARCHAR (24) NOT NULL,\n email VARCHAR (30) NOT NULL UNIQUE,\n password VARCHAR (128) NOT NULL,\n passportUrl VARCHAR (200),\n isPolitician BOOLEAN,\n isAdmin BOOLEAN\n )\"\"\"\n\n parties_table = \"\"\" \n CREATE TABLE IF NOT EXISTS parties (\n id SERIAL PRIMARY KEY,\n name VARCHAR (35) NOT NULL UNIQUE,\n hqAddress VARCHAR (30),\n logoUrl VARCHAR\n )\"\"\"\n\n offices_table = \"\"\"\n CREATE TABLE IF NOT EXISTS offices (\n id SERIAL PRIMARY KEY,\n name VARCHAR (35) NOT NULL UNIQUE,\n type VARCHAR (35)\n )\"\"\"\n\n canditates_table = \"\"\"\n CREATE TABLE IF NOT EXISTS candidates (\n id SERIAL,\n candidate INTEGER,\n office INTEGER,\n PRIMARY KEY (office, candidate),\n FOREIGN KEY (candidate) REFERENCES users(id) ON DELETE CASCADE,\n FOREIGN KEY (office) REFERENCES offices(id) ON DELETE CASCADE\n )\"\"\"\n\n voters_table = \"\"\"\n CREATE TABLE IF NOT EXISTS votes (\n id SERIAL,\n office INTEGER,\n candidate INTEGER,\n voter INTEGER,\n PRIMARY KEY (office, voter),\n FOREIGN KEY (office) REFERENCES offices(id) ON DELETE CASCADE,\n FOREIGN KEY (candidate) REFERENCES users(id) ON DELETE CASCADE,\n FOREIGN KEY (voter) REFERENCES users(id) ON DELETE CASCADE\n )\"\"\"\n\n return [table_users, parties_table,\n offices_table, canditates_table, voters_table]",
"def create_tables(): \n \n pk_contraint = \"CONSTRAINT {}_pk PRIMARY KEY ({})\"\n uq_contraint = \"CONSTRAINT {}_uq UNIQUE ({})\"\n fk_query = \"\"\"CONSTRAINT {}_fk_{} \n FOREIGN KEY ({}) \n REFERENCES {}({}) \n ON UPDATE CASCADE \n ON DELETE RESTRICT\n \"\"\"\n \n create_dict = {}\n index = 1\n\n\n ############################## public SCHEMA ##############################\n \n schema = 'public'\n create_schema(schema)\n\n #################### site ####################\n table_name = 'site'\n pk_id = 'site_id'\n uq_list = ['site_code']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n site_code CHAR(3),\n purok VARCHAR,\n sitio VARCHAR,\n barangay VARCHAR,\n municipality VARCHAR,\n province VARCHAR,\n region VARCHAR,\n psgc INTEGER,\n active BOOLEAN NOT NULL DEFAULT TRUE,\n season SMALLINT,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n\n ############################## spatial SCHEMA ##############################\n \n schema = 'spatial'\n create_schema(schema)\n \n #################### exposure ####################\n table_name = 'exposure'\n pk_id = 'exp_id'\n uq_list = ['exp_name']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n exp_name VARCHAR,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n \n #################### site_exposure ####################\n table_name = 'site_exposure'\n pk_id = 'se_id'\n uq_list = ['site_id', 'exp_id', 'geom']\n fk_dict = {'site_id': {'ref_schema': 'public', 'ref_table': 'site'},\n 'exp_id': {'ref_schema': 'spatial', 'ref_table': 'exposure'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n site_id INTEGER,\n exp_id INTEGER,\n label_name VARCHAR,\n geom GEOMETRY,\n activated DATE NOT NULL DEFAULT CURRENT_DATE,\n deactivated DATE,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n \n #################### feature ####################\n table_name = 'feature'\n pk_id = 'feat_id'\n uq_list = ['feat_name']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n feat_name VARCHAR,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### site_feature ####################\n table_name = 'site_feature'\n pk_id = 'sf_id'\n uq_list = ['site_id', 'feat_id', 'geom']\n fk_dict = {'site_id': {'ref_schema': 'public', 'ref_table': 'site'},\n 'feat_id': {'ref_schema': 'spatial', 'ref_table': 'feature'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n site_id INTEGER,\n feat_id INTEGER,\n geom GEOMETRY,\n activated DATE NOT NULL DEFAULT CURRENT_DATE,\n deactivated DATE,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### hazard_zone ####################\n table_name = 'hazard_zone'\n pk_id = 'hz_id'\n uq_list = ['site_id, geom']\n fk_dict = {'site_id': {'ref_schema': 'public', 'ref_table': 'site'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n site_id INTEGER,\n geom GEOMETRY,\n activated DATE NOT NULL DEFAULT CURRENT_DATE,\n deactivated DATE,\n {}, {} {}\n );\n \"\"\"\n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### monitoring ####################\n table_name = 'monitoring'\n pk_id = 'mon_id'\n uq_list = ['mon_name']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n mon_name VARCHAR,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### site_monitoring ####################\n table_name = 'site_monitoring'\n pk_id = 'sm_id'\n uq_list = ['site_id', 'mon_id', 'geom']\n fk_dict = {'site_id': {'ref_schema': 'public', 'ref_table': 'site'},\n 'mon_id': {'ref_schema': 'spatial', 'ref_table': 'monitoring'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n site_id INTEGER,\n mon_id INTEGER,\n label_name VARCHAR,\n geom GEOMETRY,\n activated DATE NOT NULL DEFAULT CURRENT_DATE,\n deactivated DATE,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n\n ############################### comm SCHEMA ###############################\n \n schema = 'comm'\n create_schema(schema)\n\n #################### gsm_server ####################\n table_name = 'gsm_server'\n pk_id = 'server_id'\n uq_list = ['server_name']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n server_name VARCHAR,\n platform_type VARCHAR,\n version SMALLINT,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### server_port ####################\n table_name = 'server_port'\n pk_id = 'port_id'\n uq_list = ['server_id', 'port']\n fk_dict = {'server_id': {'ref_schema': 'comm', 'ref_table': 'gsm_server'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n server_id INTEGER,\n port BOOLEAN,\n ser_port VARCHAR,\n pwr_on_pin SMALLINT,\n ring_pin SMALLINT,\n module_type SMALLINT,\n {}, {} {}\n );\n \"\"\"\n query += \"\"\" COMMENT ON TABLE {}.{} IS \n '0- left\n 1- right'\n ;\"\"\".format(schema, table_name)\n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### network_type ####################\n table_name = 'network_type'\n pk_id = 'prefix'\n uq_list = ['prefix']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} VARCHAR(3), \n carrier SMALLINT,\n {}, {} {}\n );\n \"\"\"\n query += \"\"\" COMMENT ON TABLE {}.{} IS \n '1- globe\n 2- smart\n 3- landline'\n ;\"\"\".format(schema, table_name)\n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### gsm_module ####################\n table_name = 'gsm_module'\n pk_id = 'gsm_id'\n uq_list = ['prefix', 'num', 'activated']\n fk_dict = {'prefix': {'ref_schema': 'comm', 'ref_table': 'network_type'},\n 'port_id': {'ref_schema': 'comm', 'ref_table': 'server_port'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n prefix VARCHAR(3),\n num CHAR(7),\n activated DATE NOT NULL DEFAULT CURRENT_DATE,\n port_id INTEGER,\n {}, {} {}\n );\n \"\"\"\n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n\n ############################# temporal SCHEMA #############################\n \n schema = 'temporal'\n create_schema(schema)\n\n #################### marker_observation ####################\n table_name = 'marker_observation'\n pk_id = 'mo_id'\n uq_list = ['site_id', 'ts']\n fk_dict = {'site_id': {'ref_schema': 'public', 'ref_table': 'site'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n site_id INTEGER,\n ts TIMESTAMP,\n meas_type VARCHAR(7),\n weather VARCHAR,\n observer_name VARCHAR,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### marker_history ####################\n table_name = 'marker_history'\n pk_id = 'hist_id'\n uq_list = ['sm_id', 'ts', 'event']\n fk_dict = {'sm_id': {'ref_schema': 'spatial', 'ref_table': 'site_monitoring'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL,\n sm_id BIGINT,\n ts TIMESTAMP,\n event BOOLEAN,\n label_name VARCHAR,\n {}, {} {}\n );\n \"\"\"\n query += \"\"\" COMMENT ON TABLE {}.{} IS \n '0- rename\n 1- reposition'\n ;\"\"\".format(schema, table_name)\n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### marker_data ####################\n table_name = 'marker_data'\n pk_id = 'data_id'\n uq_list = ['sm_id', 'mo_id']\n fk_dict = {'sm_id': {'ref_schema': 'spatial', 'ref_table': 'site_monitoring'},\n 'mo_id': {'ref_schema': 'temporal', 'ref_table': 'marker_observation'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL,\n mo_id BIGINT,\n sm_id BIGINT,\n measurement NUMERIC(5,1),\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### marker_alert ####################\n table_name = 'marker_alert'\n pk_id = 'alert_id'\n uq_list = ['data_id']\n fk_dict = {'data_id': {'ref_schema': 'temporal', 'ref_table': 'marker_data'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL,\n data_id BIGINT,\n displacement NUMERIC(4,1),\n time_delta FLOAT,\n alert_level SMALLINT,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### logger_model ####################\n table_name = 'logger_model'\n pk_id = 'model_id'\n uq_list = ['has_tilt', 'has_rain', 'has_piezo', 'has_soms', 'logger_type']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n has_tilt BOOLEAN,\n has_rain BOOLEAN,\n has_piezo BOOLEAN,\n has_soms BOOLEAN,\n logger_type SMALLINT,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### logger ####################\n table_name = 'logger'\n pk_id = 'logger_id'\n uq_list = ['sm_id']\n fk_dict = {'sm_id': {'ref_schema': 'spatial', 'ref_table': 'site_monitoring'},\n 'model_id': {'ref_schema': 'temporal', 'ref_table': 'logger_model'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n sm_id BIGINT,\n model_id INTEGER,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n \n #################### logger_mobile ####################\n table_name = 'logger_mobile'\n pk_id = 'mobile_id'\n uq_list = ['logger_id', 'activated']\n fk_dict = {'logger_id': {'ref_schema': 'temporal', 'ref_table': 'logger'},\n 'gsm_id': {'ref_schema': 'comm', 'ref_table': 'gsm_module'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL,\n logger_id INTEGER,\n activated DATE NOT NULL DEFAULT CURRENT_DATE,\n sim_num VARCHAR(12),\n gsm_id INTEGER,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n\n #################### EXECUTE QUERY TO CREATE TABLES ####################\n for index in create_dict.keys():\n dct = create_dict[index]\n schema = dct['schema']\n table_name = dct['table_name']\n query = dct['query']\n pk_id = dct['pk_id']\n uq_list = dct['uq_list']\n fk_dict = dct['fk_dict']\n if len(fk_dict.keys()) == 0:\n fk_constraint = ''\n else:\n fk_constraint_list = ['']\n for fk_id in fk_dict.keys():\n ref_schema = fk_dict.get(fk_id)['ref_schema']\n ref_table = fk_dict.get(fk_id)['ref_table']\n fk_part = fk_query.format(table_name, ref_table, fk_id,\n \"{}.{}\".format(ref_schema, ref_table),\n fk_id)\n fk_constraint_list.append(fk_part)\n fk_constraint = ', '.join(fk_constraint_list)\n \n query = query.format(schema, table_name, pk_id, \n pk_contraint.format(table_name, pk_id),\n uq_contraint.format(table_name, ', '.join(uq_list)),\n \"{}\".format(fk_constraint))\n qdb.execute(query)",
"def init_db():\n db.drop_all()\n db.create_all()\n seed_companies()\n seed_emission_reports()\n seed_reduction_targets()\n seed_milestones()",
"def example_data():\n\n # In case this is run more than once, empty out existing data\n EmployeeProject.query.delete()\n Employee.query.delete()\n Department.query.delete()\n Project.query.delete()\n\n # Add sample employees and departments\n df = Department(dept_code='fin', dept_name='Finance', phone='555-1000')\n dl = Department(dept_code='legal', dept_name='Legal', phone='555-2222')\n dm = Department(dept_code='mktg', dept_name='Marketing', phone='555-9999')\n\n leonard = Employee(name='Leonard', dept=dl)\n liz = Employee(name='Liz', dept=dl)\n maggie = Employee(name='Maggie', state='DC', dept=dm)\n nadine = Employee(name='Nadine')\n\n db.session.add_all([df, dl, dm, leonard, liz, maggie, nadine])\n db.session.commit()\n\n pc = Project(proj_code='car', proj_name='Design Car',\n assignments=[EmployeeProject(emp_id=liz.id, role='Chair'),\n EmployeeProject(emp_id=maggie.id)])\n ps = Project(proj_code='server', proj_name='Deploy Server',\n assignments=[EmployeeProject(emp_id=liz.id),\n EmployeeProject(emp_id=leonard.id, role='Auditor')])\n\n db.session.add_all([ps, pc])\n db.session.commit()",
"def prepare_database(self, waterscenario=None, trafficscenario=None):\n\n # Validate input\n if waterscenario:\n waterscenario = Path(waterscenario)\n assert waterscenario.exists(), 'Waterscenario file not found'\n\n BIVAS = pyBIVAS(self.BIVAS_database)\n df_trafficscenarios = BIVAS.trafficscenario_numberoftrips()\n\n\n # Do changes to database:\n con = sqlite3.connect(self.BIVAS_database)\n c = con.cursor()\n\n # Update waterscenario with given file\n if waterscenario:\n # Delete current water_scenario_values\n sql = \"DELETE FROM water_scenario_values WHERE 1\"\n c.execute(sql)\n\n sql = \"DELETE FROM water_scenarios WHERE 1\"\n c.execute(sql)\n\n # Write waterdata to database\n\n # Read waterscenario file\n df = pd.read_csv(waterscenario, header=0, index_col=None)\n df = df[['ArcID', 'SeasonID', 'WaterLevel__m', 'RateOfFlow__m3_s', 'WaterSpeed__m_s', 'WaterDepth__m']]\n df['WaterScenarioID'] = 1\n\n # Add new water_scenario\n df.to_sql('water_scenario_values', con,\n if_exists='append', index=False)\n\n # Rename water_scenario\n # waterscenario_name = waterscenario.stem\n # sql = \"\"\"UPDATE water_scenarios SET Description = \"{}\" WHERE ID = {}\"\"\".format(\n # waterscenario_name, waterscenario)\n # c.execute(sql)\n\n\n waterscenario_id = 1\n waterscenario_name = 'TEST waterscenario'\n waterscenario_type = 1\n sql = \"\"\"INSERT into water_scenarios VALUES ({}, '{}', {})\"\"\".format(\n waterscenario_id,\n waterscenario_name,\n waterscenario_type\n )\n c.execute(sql)\n\n # Remove water scenario. I'm simply updating all scenarios\n # Otherwise I should check the BranchSet structure\n sql = \"\"\"UPDATE parameters SET WaterScenarioID = 1 WHERE 1\"\"\"\n c.execute(sql)\n\n else:\n # Remove water scenario. I'm simply updating all scenarios\n # Otherwise I should check the BranchSet structure\n sql = \"\"\"UPDATE parameters SET WaterScenarioID = NULL WHERE 1\"\"\"\n c.execute(sql)\n\n # Set scenario name and description\n date_string = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n self.description = f'Date: {date_string}, Waterscenario: {waterscenario}, TrafficScenario: {trafficscenario},'\n\n sql = \"\"\"\n UPDATE scenarios\n SET Name = \"{}\",\n Description = \"{}\"\n WHERE ID = {}\n \"\"\".format(\n self.scenarioName, self.description, self.scenarioID)\n c.execute(sql)\n\n # Update traffic Scenario. I'm simply updating all scenarios\n # Otherwise I should check the BranchSet structure\n if trafficscenario:\n if isinstance(trafficscenario, int):\n sql = \"\"\"UPDATE parameters SET TrafficScenarioID = \"{}\" WHERE 1\"\"\".format(trafficscenario)\n c.execute(sql)\n else:\n trafficScenarioID = df_trafficscenarios.index[df_trafficscenarios['Description'] == trafficscenario][0]\n sql = \"\"\"UPDATE parameters SET TrafficScenarioID = \"{}\" WHERE 1\"\"\".format(trafficScenarioID)\n c.execute(sql)\n\n con.commit()\n con.close()\n\n logger.info('BIVAS database copied and updated')",
"def update_data(self):\n staff = Staff.objects.all()\n orgs = Organization.objects.all()\n depts = Department.objects.all()\n\n existing = self.all()\n if existing.count():\n existing.delete()\n\n if staff.count():\n for s in staff:\n record = CombinedTeledata(\n id=s.id,\n alpha=s.alpha,\n name=s.name,\n first_name=s.first_name,\n last_name=s.last_name,\n sort_name=s.sort_name,\n email=s.email,\n phone=s.phone,\n postal=s.postal,\n job_position=s.job_position,\n department=s.dept.name,\n dept_id=s.dept.id,\n organization=s.dept.org.name,\n org_id=s.dept.org.id,\n building=s.bldg.name,\n bldg_id=s.bldg.import_id,\n room=s.room,\n from_table='staff'\n )\n\n try:\n record.save(doing_import=True)\n record.keywords_combined.set(s.keywords.all())\n except Exception as e:\n logger.error(str(e))\n\n if orgs.count():\n for o in orgs:\n record = CombinedTeledata(\n id=o.id,\n name=o.name,\n sort_name=o.name,\n phone=o.phone,\n fax=o.fax,\n building=o.bldg.name,\n bldg_id=o.bldg.import_id,\n room=o.room,\n from_table='organizations'\n )\n\n try:\n record.save(doing_import=True)\n record.keywords_combined.set(o.keywords.all())\n except Exception as e:\n logger.error(str(e))\n\n if depts.count():\n for d in depts:\n record = CombinedTeledata(\n id=d.id,\n name=d.name,\n sort_name=d.name,\n phone=d.phone,\n fax=d.fax,\n organization=d.org.name,\n org_id=d.org.id,\n building=d.bldg.name,\n bldg_id=d.bldg.import_id,\n room=d.room,\n from_table='departments'\n )\n\n try:\n record.save(doing_import=True)\n record.keywords_combined.set(d.keywords.all())\n except Exception as e:\n logger.error(str(e))",
"def create_final_table(conn, county):\r\n for county in county:\r\n query = f\"SELECT name FROM sqlite_master WHERE type ='table' AND name NOT LIKE 'sqlite_%' AND name = '{county}'\"\r\n result = execute_query(conn, query)\r\n try:\r\n if len(result) == 0:\r\n query = f\"create table {county} as select * from {county}_stg;\"\r\n execute_query(conn, query)\r\n\r\n load_final_table(conn, county)\r\n except Exception as e:\r\n print(f\"This query {query} failed with exception {e}\")",
"def example_data():\n\n seed.add_sfpl_branches()\n seed.add_formats()\n\n book_1 = Book(title=\"Alanna: The First Adventure\", author=\"Tamora Pierce\")\n book_2 = Book(title=\"The Hitchhiker's Guide to the Galaxy\", author=\"Douglas Adams\")\n book_3 = Book(title=\"The Hobbit\", author=\"J.R.R. Tolkien\")\n db.session.add(book_1)\n db.session.add(book_2)\n db.session.add(book_3)\n\n esqg = User(first_name=\"Elizabeth\", last_name=\"Goodman\", email=\"[email protected]\", password=\"programmer\")\n db.session.add(esqg)\n db.session.commit()\n\n esqg_gr = GoodreadsUser(user_id=esqg.user_id, goodreads_id=ESQG)\n db.session.add(esqg_gr)\n\n my_mission = UserBranch(branch_code=\"miss\", user_id=esqg.user_id)\n db.session.add(my_mission)\n\n my_main = UserBranch(branch_code=\"main\", user_id=esqg.user_id)\n db.session.add(my_main)\n\n db.session.commit()"
] | [
"0.69008756",
"0.66442764",
"0.6618634",
"0.65720314",
"0.6367745",
"0.63610566",
"0.61599475",
"0.61476094",
"0.60915166",
"0.60889375",
"0.60289264",
"0.59944206",
"0.59331673",
"0.5864972",
"0.582192",
"0.5799883",
"0.5796334",
"0.5792097",
"0.5777224",
"0.5760808",
"0.5741017",
"0.5714303",
"0.5696844",
"0.5693156",
"0.5693014",
"0.5668316",
"0.5635157",
"0.56118464",
"0.55973214",
"0.55897576"
] | 0.71547 | 0 |
Populate locations data table. | def populate_locations(connection):
print('Populating locations...')
cursor = connection.cursor()
with open(get_data_path('locations', 'locations.json'), 'r', encoding='utf-8') as json_file:
locations = json.load(json_file)
for station_id, location in locations.items():
cursor.execute(f'''SELECT id
FROM watercourse_stations
WHERE id = {station_id}''')
if len(cursor.fetchall()):
cursor.execute(f'''INSERT INTO locations(name, lat, lng)
VALUES ('{location['name']}', {location['lat']}, {location['lng']})''')
cursor.execute(f'''UPDATE watercourse_stations
SET location_id = {cursor.lastrowid}
WHERE id = {station_id}''') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generate_position_data(self):\n # populate 'Location' field randomly\n self.output['Location'] = np.random.choice(self.locations, self.obs)\n\n # clean up geodata data frame and create 'Position' attribute\n nc = self.geodata[['Lat', 'Lng', 'Elevation']].round(2)\n nc['Elevation'] = nc['Elevation'].astype(int)\n self.geodata['Position'] = nc.astype(\n str).apply(lambda x: ','.join(x), axis=1)\n self.geodata.drop(columns=['Lat', 'Lng', 'Elevation'], inplace=True)\n\n # update 'Position' column in output data frame\n left = self.output.set_index('Location') # set left index\n right = self.geodata.set_index('Location') # set right index\n self.output = left.loc[:, left.columns.union(right.columns)] # union\n self.output.update(right) # update self.output \"Position\" column\n self.output.reset_index(inplace=True)",
"def add_locations(self):\n for _ in range(0, self.num_locations):\n detector_id = self.generate_id()\n detector_direction = self.generate_direction()\n detector_point = self.generate_point()\n self.dataset[detector_id] = (detector_direction, detector_point)\n assert len(self.dataset) == self.num_locations",
"def get_all_locations(self):",
"def load_data():\n if _LOCATIONS_BY_ID:\n return _LOCATIONS_BY_NAME, _LOCATIONS_BY_ID\n\n # We need to read the locations in order of country -> admin level 1 -> admin level 2 -> city.\n # This is so that the higher resolution locations can look up the lower resolution locations\n # that they belong to, and compute the necessary fields.\n countries_by_code = _load_country_data(_DATA_FILES['country'])\n admin1_by_code = _load_admin1_data(_DATA_FILES['admin_1'], countries_by_code)\n admin2_by_code = _load_admin2_data(_DATA_FILES['admin_2'], countries_by_code, admin1_by_code)\n _load_city_data(_DATA_FILES['city'], countries_by_code, admin1_by_code, admin2_by_code)\n _add_alternate_names(_DATA_FILES['alt_wiki_names'])\n _add_estimated_importances(_DATA_FILES['estimated_importance'])\n\n return _LOCATIONS_BY_NAME, _LOCATIONS_BY_ID",
"def generate_test_locations(self):\n def generate_locations_for_organization(\n location_names, organization_name):\n item_dict = {}\n for name in location_names:\n item_dict['{}_{}'.format(name, organization_name)] = {\n 'name': name,\n 'organization': organization_name\n }\n return item_dict\n\n self.ls_o1_dict = \\\n generate_locations_for_organization(\n ['l1', 'l2', 'l3', 'l4', 'l5'], 'o1')\n\n self.ls_sub1_o1_dict = \\\n generate_locations_for_organization(\n ['l1', 'l2', 'l3', 'l4'], 'sub1_o1')\n\n self.ls_o2_dict = \\\n generate_locations_for_organization(['l1', 'l2', 'l3', 'l4'], 'o2')\n\n self.ls_sub1_o2_dict = \\\n generate_locations_for_organization(['l1', 'l2'], 'sub1_o2')\n\n # generate locations of org_3\n self.ls_o3_dict = \\\n generate_locations_for_organization(['l1', 'l2'], 'o3')\n\n # generate locations dictionary\n self.ls_dict = {\n **self.ls_o1_dict,\n **self.ls_sub1_o1_dict,\n **self.ls_o2_dict,\n **self.ls_sub1_o2_dict,\n **self.ls_o3_dict,\n }\n\n # generate locations in database\n self.locations = self.create_locations_from_data(\n self.ls_dict, self.orgs)",
"def populate_stops(self):\n stops = self.load_csv('stops.txt')\n stops = self.process_stops(stops)\n\n connection = db.connect()\n for stop in stops:\n try:\n connection.execute(schema.stops.insert(), stop)\n except DataError:\n print \"Missing data for stop: %s\" % (stop)",
"def create_locations(self, data):\n total_objects = len(data)\n parsed_objects = 0\n\n for object in data:\n # Get location title. 'name' val is available to all objects, but Building 'title'\n # and RegionalCampus 'description' are more descriptive. Use them if available.\n if hasattr(object, 'title'):\n title = object['title']\n elif hasattr(object, 'description'):\n title = object['description']\n else:\n title = object['name']\n\n # Get other data.\n mapurl = object['profile_link']\n import_id = object['id']\n\n if title:\n # Check to see if the location name, map url are too long\n if len(title) > 256:\n title = title[0:256]\n if len(mapurl) > 400:\n mapurl = mapurl[0:400]\n if len(import_id) > 256:\n import_id = import_id[0:256]\n\n # See if an existing location exists with the current object ID.\n # Update the existing location if it exists; else, save the new location\n try:\n old_location = Location.objects.get(import_id=import_id)\n except Exception as e:\n logging.debug('No existing location found for %s: %s. Creating new location...' % (title, e))\n # No existing matches found, or the matches were duplicate\n new_location = Location(title=title, url=mapurl, room='', import_id=import_id, reviewed=True)\n try:\n new_location.save()\n except Exception as e:\n logging.error('Unable to save new location %s: %s' % (title, str(e)))\n else:\n parsed_objects += 1\n logging.info('New location %s created.' % title)\n else:\n logging.debug('Existing location %s found with Import ID %s. Updating existing location...' % (title, import_id))\n old_location.title = title\n old_location.url = mapurl\n old_location.room = ''\n old_location.reviewed = True\n try:\n old_location.save()\n except Exception as e:\n logging.error('Unable to save existing location %s: %s' % (title, str(e)))\n else:\n parsed_objects += 1\n logging.info('Existing location %s with Import ID %s updated.' % (title, import_id))\n\n logging.info('Done. %s of %s available objects successfully imported.' % (parsed_objects, total_objects))",
"def add_locations(updatemedf):\n # List of unique identifiers to search in text\n uniquedf = pd.read_csv('uniquelist.csv', encoding=\"latin-1\")\n\n # Use lookup list to find text and look for institution matches, \n # once found append unique list of matching institution lookups\n df = add_univ_city(updatemedf, uniquedf)\n # Output this to a file for checking and adding more values as needed\n return df",
"def __init__(self, num_locations):\n self.dataset = {}\n self.num_locations = num_locations\n self.add_locations()",
"def test_bulk_locations(self):\n # do twice to check if it really updates\n lengths = []\n for i in range(2):\n file_path = os.path.join(os.path.dirname(__file__),\n self.testdata_folder,\n self.filename_locations)\n data = {\n 'bulk_upload' : open(file_path, 'rb'),\n }\n\n res = self.client.post(self.location_url, data)\n assert res.status_code == status.HTTP_201_CREATED, (\n responses.get(res.status_code, res.status_code), res.content)\n lengths.append(len(AdministrativeLocation.objects.all()))\n\n assert lengths[0] == lengths[1]\n\n file_path = os.path.join(os.path.dirname(__file__),\n self.testdata_folder,\n self.filename_locations_duplicates)\n data = {\n 'bulk_upload' : open(file_path, 'rb'),\n }\n\n res = self.client.post(self.location_url, data)\n assert res.status_code == status.HTTP_400_BAD_REQUEST",
"def get_locations():\n\n dtype = {'id': str,\\\n 'lat': float,\n 'lon': float,\n 'address': str,\n }\n\n try: \n locations = pd.read_csv('Data/kulkijat-mittauspisteet.csv', sep=',', dtype=dtype)\n except FileNotFoundError:\n print('\\nMittauspisteet sisältävää tiedostoa kulkijat-mittauspisteet.csv ei löytynyt.\\n')\n locations = pd.DataFrame()\n\n return locations",
"def _update_locations(self):\n raw_data = self._read_data()\n processed_data = self._process_data(raw_data) if self._process_data is not None else raw_data\n if processed_data:\n for k, v in processed_data.items():\n if k in self._locations.keys():\n self.predictors[k].update(*v)\n self._locations[k] = v\n for k, v in self._locations.items():\n if k not in processed_data:\n self._locations[k] = self.predictors[k].predict()\n else:\n self._locations = {k: self.predictors[k].predict() for k in self._locations.keys()}\n self._logger.debug(\"Locator updated locations\")",
"def set_locations():\n STATUS['locations']['monster'][0] = generate_random_coord(STATUS['grid_size'])\n STATUS['locations']['monster'][1] = generate_random_coord(STATUS['grid_size'])\n STATUS['locations']['weapon'][0] = generate_random_coord(STATUS['grid_size'])\n STATUS['locations']['weapon'][1] = generate_random_coord(STATUS['grid_size'])",
"def get_locations(self):\n try:\n output_json = {}\n total_locations = list(self.mongo_db_object.find_all(AppConfigurations.MONGO_DATABASE,\n AppConstants.LOCATION.MONGO_LOCATION_COLLECTION_NAME))\n output_json = total_locations\n return AppConstants.result_success_template(output_json)\n\n except Exception as e:\n print(\"Error while fetching the Location Data.\", str(e))",
"def load(self, locations):\n try:\n assert isinstance(locations, list)\n super(Arrivals, self).load({'locIDs': locations})\n except AssertionError:\n print(\"Locations must be a list\")",
"def _gen_locs_dbscan(sp, distance_metric, db):\n p = np.array([sp.geometry.x, sp.geometry.y]).transpose()\n if distance_metric == \"haversine\":\n p = np.deg2rad(p) # haversine distance metric assumes input is in rad\n labels = db.fit_predict(p)\n sp[\"location_id\"] = labels\n return sp",
"def load(self):\n return list(self.obj.locations_set.all())",
"def populate_contents(self):\n\n data_table = self.data_table\n world = self.world\n\n self.add_text_row('World Name', data_table.world_name_label.text())\n self.add_text_row('Coordinates', data_table.world_coords_label.text())\n self.add_text_row('World Type', data_table.world_type_label.text())\n if data_table.world_extra_label.text() != '':\n self.add_text_row('Extra Info', data_table.world_extra_label.text())\n self.add_text_row('Filename', world.base_filename)\n self.add_text_row('Size', '{}x{}'.format(*world.info.size))\n\n if len(world.info.dungeons) > 0:\n dungeons = self.add_text_row('Dungeons', '<br/>'.join(sorted(world.info.dungeons)))\n else:\n self.add_text_row('Dungeons', '-')\n\n if len(world.info.biomes) > 0:\n biomes = self.add_text_row('Biomes', '<br/>'.join(sorted(world.info.biomes)))\n else:\n self.add_text_row('Biomes', '-')",
"def populate_cities():\n if City.query.filter_by(name=CITIES[0]).first():\n return\n\n for city in CITIES:\n _add_city(city)",
"def create_data():\n # Locations\n data = {}\n num_vehicles = 20\n depot = 0\n locations = loc1\n demands = popn\n\n num_locations = len(locations)\n dist_matrix = {}\n\n for from_node in range(0,num_locations):\n dist_matrix[from_node] = {}\n\n for to_node in range(0,num_locations):\n dist_matrix[from_node][to_node] = (\n haversine(\n locations[from_node],[to_node])\n #locations[to_node],[from_node])\n \"\"\"\n data[\"distances\"] =dist_matrix\n data[\"num_locations\"] = len(dist_matrix)\n data[\"num_vehicles\"] = 6\n data[\"depot\"] = 0\n data[\"demands\"] = demands\n #data[\"vehicle_capacities\"] = capacities\n data[\"time_per_demand_unit\"] = 0.05\n return data\n \"\"\"\n return [ num_vehicles, depot, locations, dist_matrix]",
"def create_locations_from_data(self, data, orgs):\n item_dict = {}\n for (item_name, data) in data.items():\n item_dict[item_name] = \\\n Location(\n name=data['name'],\n organization=orgs.get(data['organization']))\n item_dict[item_name].save()\n return item_dict",
"def getAllLocation(table):\n\tlocs = []\n\n\tnum = len(table)\n\n\tfor i in range(num):\n\t\t# first field is the name\n\t\tloc = getLatAndLong(table[i][1])\n\n\t\tlocs.append(loc)\n\n\treturn locs",
"def get_all_locations():\n rs = run_query('''select * from zlrz_office_location''')\n return [] if rs is None else list(map(lambda t: Location(t[1], t[2], t[3], t[4], t[5], t[0]), rs))",
"def locations_fixture(location):\n return [location, _create_location()]",
"def build_polling_location_txt(self):\n self.base_df['address_line'] = self.base_df.apply(\n lambda row: self.get_address_line(row['index'], row['address1'], row['address2'], row['city'],\n row['state'], row['zip_code']), axis=1)\n\n self.base_df['directions'] = self.base_df.apply(\n lambda row: self.get_directions(), axis=1)\n #\n self.base_df['hours'] = self.base_df.apply(\n lambda row: self.get_hours(row['index'],row['start_time'], row['end_time']), axis=1)\n\n self.base_df['photo_uri'] = self.base_df.apply(\n lambda row: self.get_photo_uri(), axis=1)\n\n self.base_df['hours_open_id'] = self.base_df.apply(\n lambda row: self.create_hours_open_id(row['index'], row['address1'], row['address2'], row['city'],\n row['state'], row['zip_code']), axis=1)\n\n self.base_df['is_drop_box'] = self.base_df.apply(\n lambda row: self.is_drop_box(), axis=1)\n\n self.base_df['is_early_voting'] = self.base_df.apply(\n lambda row: self.is_early_voting(), axis=1)\n\n self.base_df['latitude'] = self.base_df.apply(\n lambda row: self.get_latitude(), axis=1)\n\n self.base_df['longitude'] = self.base_df.apply(\n lambda row: self.get_longitude(), axis=1)\n\n self.base_df['latlng_source'] = self.base_df.apply(\n lambda row: self.get_latlng_source(), axis=1)\n\n self.base_df['id'] = self.base_df.apply(\n lambda row: self.create_id(row['index'], row['ocd_division'],row['address1'], row['address2'],\n row['city'], row['state'], row['zip_code']), axis=1)\n\n return self.base_df",
"def process_location(pid, data_source, out_loc, start_date, end_date, debug=False):\n\n #query = client.query_data_points(page_size=PAGE_SIZE, source=pid)\n query = try_query(pid)\n\n location_query = try_filter(query, pid, 'pdk-location', start_date, end_date)\n \"\"\" location_query = query.filter(source=pid, \n generator_identifier='pdk-location',\n created__gte=start_date,\n created__lte=end_date).order_by('created')\n \"\"\"\n tot_count = location_query.count()\n count = 0\n frac = int(tot_count / 100)\n\n loc_df = pd.DataFrame()\n for point in location_query:\n point_df = pd.DataFrame.from_dict(point).iloc[0].to_frame().transpose()\n metadata_df = pd.Series(point['passive-data-metadata']).to_frame().transpose()\n # TODO check if ignoring errors is safe\n metadata_df = metadata_df.drop(['latitude', 'longitude'], axis='columns', errors=\"ignore\")\n point_df.reset_index(inplace=True, drop=True)\n point_df = pd.concat([metadata_df, point_df], axis=1, sort=True)\n \n point_df.drop('passive-data-metadata', axis='columns', inplace=True)\n missing_cols = [col for col in loc_df.columns.values if col not in point_df.columns.values]\n \n if len(missing_cols) > 0 and loc_df.shape[0] > 0:\n for col in missing_cols:\n point_df[col] = np.nan\n point_df = point_df[loc_df.columns]\n loc_df = loc_df.append(point_df)\n count += 1\n if debug and (count % frac == 0):\n print(\"{0:.2f}% complete\".format(float(count)/float(tot_count)*100))\n\n loc_df['pid'] = pid \n loc_df['data_source'] = data_source\n print(loc_df.shape)\n \n pickle.dump(loc_df, open(\"{}/pdk-location/{}.df\".format(out_loc, pid), 'wb'), -1)",
"def populate_stat(self, table):\n myrow = table.row\n # HDF5 doesn't handle unicode strings, so we need to convert to \n # *byte* strings, which we can put in the HDF5 file \n addy = numpy.zeros(len(self.address), \n dtype=(numpy.str, glob.nchar_address))\n for i in range(len(addy)):\n addy[i] = (self.address[i]).encode('utf8')\n\n myrow[\"address\"] = addy\n myrow[\"bike_stands\"] = self.bike_stands\n myrow[\"number\"] = self.number\n myrow[\"position\"] = self.position\n myrow.append()\n table.flush()",
"def __init__(self,\n locations: List['LocationOutput']) -> None:\n self.locations = locations",
"def test_locations(self):\n url = reverse(\"locations\", args=[00000])\n response = self.client.get(url)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertTrue(isinstance(response.data, list))\n self.assertTrue(response.data) # list not empty\n\n location_data = response.data[0]\n data_keys = [\n \"title\",\n \"address\",\n \"address2\",\n \"city\",\n \"state\",\n \"postalCode\",\n \"distance\",\n \"hours\",\n \"phone\",\n \"geocode\",\n ]\n self.assertEqual(list(location_data.keys()), data_keys)",
"def populate(self):\n\n NUM_COUNTRIES = 2 # random.randint(1, 4)\n\n # find a suitable hex\n with Timer(\"Creating initial data\", debug=self.debug):\n\n for i in range(NUM_COUNTRIES):\n country, provinces, pops = create_country(self, self.map)\n country.determine_tax_policy()\n self.countries.append(country)"
] | [
"0.6048028",
"0.6013519",
"0.59421986",
"0.5901815",
"0.58811194",
"0.5729807",
"0.5710549",
"0.56738675",
"0.56535035",
"0.554302",
"0.5542974",
"0.55208075",
"0.5469795",
"0.54697716",
"0.5454492",
"0.54353935",
"0.5430382",
"0.542676",
"0.5424422",
"0.5422587",
"0.54031056",
"0.537971",
"0.5377886",
"0.5370541",
"0.53700954",
"0.53338283",
"0.5321584",
"0.5311861",
"0.5309858",
"0.52906114"
] | 0.6565873 | 0 |
Check if given forecast dictionary contains a numeric value with provided key. | def is_forecast_number(key, forecast):
return key in forecast and type(forecast[key]) in [float, int] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def moreThanOne(dict, key):\n\treturn key in dict and dict[key] > 0",
"def _has_numeric_strict(self) -> bool:\n return bool({'i', 'f'} & self._data.keys())",
"def contains_200(dictnr):\n contains = False\n for i in dictnr:\n if dictnr[i] == 200:\n contains = True\n print(contains)",
"def is_key(number):\n res = False\n if is_integer(number):\n if int(number) > 0:\n res = True\n return res",
"def _cast_to_number(self, key):\n q = DBSession.query(cast(self.db_value.value, Float)). \\\n join(self.db_tag). \\\n join(self.db_key). \\\n filter(self.db_key.key == key)\n try:\n q.all()\n return True\n except:\n return False",
"def hasValue(self, key):\n return self.has_key('__' + key)",
"def _has_science_data(data_dict, particle_class):\n return_value = False\n\n # Modified to make this check more efficient\n if len(particle_class.science_parameters) < len(data_dict):\n for key in particle_class.science_parameters:\n value = data_dict.get(key, None)\n if value is not None and not(isnan(float(value))):\n return_value = True\n break\n if particle_class._data_particle_type == 'glider_eng_telemetered':\n log.info(\"GliderParser._has_science_data failed: key=[%s] value=[%s]\", key, value)\n else:\n for key, value in data_dict.iteritems():\n if not (isnan(float(value))) and key in particle_class.science_parameters:\n return_value = True\n break\n if particle_class._data_particle_type == 'glider_eng_telemetered':\n log.info(\"GliderParser._has_science_data failed: key=[%s] value=[%s]\", key, value)\n\n return return_value",
"def _has_numeric_or_bool(self) -> bool:\n dtypes: Set[str] = set(self._data.keys())\n return 'i' in dtypes or 'f' in dtypes or 'b' in dtypes",
"def contains(self, key: int) -> bool:\n if key in self.d:\n return True\n else:\n return False",
"def is_number(value):\n try:\n float(value)\n return True\n except ValueError:\n return False",
"def _is_number(value):\n try:\n float(value)\n return True\n except (TypeError, ValueError):\n return False",
"def contains(self, key):\n if key in self.nums:\n return True\n return False",
"def contains_double_count(key, value, similarity_dict):\n if value in similarity_dict.keys():\n if key in similarity_dict[value]:\n return True\n return False",
"def data_dict_points(data_dict, feature):\n return len(filter(lambda k: isinstance(data_dict[k][feature],\n (int, float)), data_dict))",
"def is_number(self,val):\n try:\n float(val)\n return True\n except ValueError:\n return False",
"def is_zero_dict( dict ):\n has_any_features = False\n for key in dict:\n has_any_features = has_any_features or dict[key]\n\n return not has_any_features",
"def exists(field):\n try:\n float(field)\n return True\n except:\n return False",
"def contains(self, key):\n\t\tfor i in self.getBitArrayIndices(key):\n\t\t\tif self.ba[i] <= 0:\n\t\t\t\treturn False\n\t\treturn True",
"def anyMoreThanOne(dict, keys):\n\tfor key in keys:\n\t\tif key in dict and dict[key] > 0:\n\t\t\treturn True\n\treturn False",
"def values_are_pandas_numbers(values: List[str]):\n for v in values:\n try:\n float(v)\n except ValueError:\n return False\n return True",
"def __contains__(self, k) :\n return k in self.precision()",
"def contains_value(kv_json, value):\n if isinstance(kv_json, str):\n kv_dict = loads(kv_json)\n for key in kv_dict:\n if kv_dict[key] == value: # Found value in dictionary\n return True\n return False\n else:\n print(\"Provide A JSON Key Value String\")",
"def is_float(possible_number):\r\n try:\r\n float(possible_number)\r\n return True\r\n except ValueError:\r\n return False",
"def checkNaN(data_dict):\n for k, v in data_dict.iteritems():\n mark = True\n for feature, value in v.iteritems():\n if (value != 'NaN') and (feature != 'poi'):\n mark = False\n break\n if mark:\n print k\n print v['poi']",
"def _check_feature_by_keys(service_data=None, service_keys=None, ns_data=None, ns_keys=None):\n\n if service_data and not isinstance(service_data, Exception) and service_keys:\n if _is_keyval_greater_than_value(service_data, service_keys):\n return True\n\n if ns_data and ns_keys:\n for ns, nsval in ns_data.iteritems():\n if not nsval or isinstance(nsval, Exception):\n continue\n if _is_keyval_greater_than_value(nsval, ns_keys):\n return True\n\n return False",
"def contains_key(kv_json, key):\n if isinstance(kv_json, str):\n kv_dict = loads(kv_json)\n try:\n res = kv_dict[key]\n return True\n except KeyError:\n return False\n else:\n print(\"Provide A JSON Key Value String\")",
"def validateNumber(key, value):\n if value is None or isinstance(value, (int, float)) and not isinstance(value, bool):\n return None\n else:\n return {'error': 'invalid value: %s (%s), valid values number/null' % (value, pythonTypeToJSONType(value))}",
"def isnumber(x):\n try:\n float(x)\n return True\n except ValueError:\n return False",
"def haskey(featureVals, fkey):\n try:\n featureVals[fkey]\n except KeyError:\n return False\n\n #warn(HASKEYMSG % (fkey))\n return True",
"def contains(self, key: int) -> bool:\n return self._find_key(key, find_empty=False) >= 0"
] | [
"0.6130827",
"0.6060219",
"0.5976841",
"0.5809231",
"0.5789039",
"0.5752602",
"0.56971747",
"0.5649147",
"0.56462014",
"0.5644481",
"0.56440645",
"0.5638638",
"0.56340736",
"0.560666",
"0.5580076",
"0.55792755",
"0.55783355",
"0.55679023",
"0.55529094",
"0.5530956",
"0.5520167",
"0.55132616",
"0.5469083",
"0.54569",
"0.54385626",
"0.54303396",
"0.54266495",
"0.5414168",
"0.5401341",
"0.54001594"
] | 0.842359 | 0 |
Populate weather data tables. | def populate_weather(connection):
metadata = load_metadata('weather')
cursor = connection.cursor()
water_defs = get_water_definitions()
# Check if tables are already populated.
cursor.execute('SELECT count(*) FROM weather')
weather_count = cursor.fetchone()[0]
if weather_count:
print('Weather tables already populated!')
return
print('WEATHER:')
# Darksky data
for dir_name, location in metadata.items():
print(f'\tPopulating weather: "{location["name"]}".')
# Insert location.
cursor.execute(f'''INSERT INTO locations(name, lat, lng)
VALUES ('{location['name']}', {location['lat']}, {location['lng']})''')
location_id = cursor.lastrowid
# Set weather locations for watercourses/aquifers.
for water_body in [d['body'] for d in water_defs.values()]:
if water_body in location:
cursor.execute(f'''UPDATE {water_body}s
SET location_id = {location_id}
WHERE name IN ('{"','".join(location[water_body])}')''')
break
dir_path = get_data_path('weather', 'raw', dir_name)
for json_file_name in os.listdir(dir_path):
json_path = os.path.join(dir_path, json_file_name)
with open(json_path, 'r', encoding='utf-8') as json_file:
print(f'\t\tPopulating year: {json_file_name[0:-5]}')
year_forecasts = json.load(json_file)
for date, date_forecast in year_forecasts.items():
hourly_forecasts = date_forecast['hourly']
if not hourly_forecasts:
print(f'\t\tNo hourly forecasts for {date}!')
continue
daily_forecast = {
'location_id': location_id,
'time': date_forecast['time'],
'day_time': date_forecast['sunset_time'] - date_forecast['sunrise_time'],
'precipitation': 0,
'snow_accumulation': 0
}
# List of value names with `avg`, `min` and `max` values
value_names = {
'temperature': 'temperature',
'cloud_cover': 'cloudCover',
'dew_point': 'dewPoint',
'humidity': 'humidity',
'pressure': 'pressure',
'uv_index': 'uvIndex',
'precipitation_probability': 'precipProbability',
'precipitation_intensity': 'precipIntensity'
}
# Value name counters, which indicate how many times (out of 24)
# certain value appears in hourly data.
value_counts = {k: 0 for k in value_names.keys()}
for value_name in value_names.keys():
daily_forecast[f'{value_name}_avg'] = 0.0
daily_forecast[f'{value_name}_min'] = float('inf')
daily_forecast[f'{value_name}_max'] = float('-inf')
# Calculate daily forecast values from hourly forecasts.
for hourly_forecast in hourly_forecasts:
for value_name in value_names.keys():
orig_value_name = value_names[value_name]
if is_forecast_number(orig_value_name, hourly_forecast):
daily_forecast[f'{value_name}_avg'] += hourly_forecast[orig_value_name]
daily_forecast[f'{value_name}_min'] = min(
hourly_forecast[orig_value_name],
daily_forecast[f'{value_name}_min']
)
daily_forecast[f'{value_name}_max'] = max(
hourly_forecast[orig_value_name],
daily_forecast[f'{value_name}_max']
)
value_counts[value_name] += 1
if is_forecast_number('precipAccumulation', hourly_forecast) \
and hourly_forecast['precipType'] == 'snow':
daily_forecast['snow_accumulation'] += hourly_forecast['precipAccumulation']
elif is_forecast_number('precipIntensity', hourly_forecast) \
and is_forecast_number('precipProbability', hourly_forecast):
daily_forecast['precipitation'] += \
hourly_forecast['precipIntensity'] * hourly_forecast['precipProbability']
for value_name, value_count in value_counts.items():
if value_count:
# Calculate average.
daily_forecast[f'{value_name}_avg'] = daily_forecast[f'{value_name}_avg'] / value_count
else:
# If value never appeared
daily_forecast[f'{value_name}_avg'] = 'NULL'
daily_forecast[f'{value_name}_min'] = 'NULL'
daily_forecast[f'{value_name}_max'] = 'NULL'
cursor.execute(f'''INSERT INTO weather({', '.join(daily_forecast.keys())})
VALUES ({', '.join([str(v) for v in daily_forecast.values()])})''')
# IOT data:
for location in SETTINGS['weather_locations_iot']:
print(f'\tPopulating weather: "{location["name"]}".')
# Insert location.
cursor.execute(f'''INSERT INTO locations(name, lat, lng)
VALUES ('{location['name']}', {location['lat']}, {location['lng']})''')
location_id = cursor.lastrowid
# Set weather locations for watercourses/aquifers.
for water_body in [d['body'] for d in water_defs.values()]:
if water_body in location:
cursor.execute(f'''UPDATE {water_body}s
SET location_id = {location_id}
WHERE name IN ('{"', '".join(location[water_body])}')''')
# Set locations for all stations on given water body to match its location.
cursor.execute(f'''SELECT id
FROM {water_body}s
WHERE location_id = {location_id}''')
ids = [row[0] for row in cursor.fetchall()]
if len(ids):
cursor.execute(f'''UPDATE {water_body}_stations
SET location_id = {location_id}
WHERE {water_body}_id IN ({', '.join([str(v) for v in ids])})''')
break
file_name = f'''{location['lat']}-{location['lng']}.json'''
json_path = get_data_path('weather', 'raw', file_name)
# If data file doesn't exist, download it first.
if not os.path.isfile(json_path):
with open(json_path, 'wb', encoding="utf-8") as file:
file.write(read_from_url(location['url'], decode=False))
with open(json_path, 'r', encoding='utf-8') as json_file:
row_names = {
"Sun_duration": "sun_duration",
"CloudCover": "cloud_cover_avg",
"Percipitation": "precipitation",
"New_snow_blanket": "snow_accumulation",
"Snow_blanket": "snow_depth",
"TemperatureAvg": "temperature_avg",
"TemperatureMin": "temperature_min",
"TemperatureMax": "temperature_max"
}
forecasts = json.load(json_file)
for forecast in forecasts:
f = {row_names[k]: forecast[k] for k in row_names.keys()}
f['location_id'] = location_id
f['time'] = round(forecast['LastUpdatedEpoch'] / 1000)
cursor.execute(f'''INSERT INTO weather({', '.join(f.keys())})
VALUES ({', '.join([str(v) for v in f.values()])})''') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def populate_water_tables(connection):\n metadata = load_metadata('water')\n cursor = connection.cursor()\n\n # Check if tables are already populated.\n cursor.execute('SELECT count(*) FROM watercourses')\n watercourse_count = cursor.fetchone()[0]\n cursor.execute('SELECT count(*) FROM aquifers')\n aquifer_count = cursor.fetchone()[0]\n\n if watercourse_count and aquifer_count:\n print('Water tables already populated!')\n return\n\n station_data = get_station_data()\n\n for archive in metadata.keys():\n print(f'{archive}-water:'.upper())\n water_body = get_water_definitions(archive)['body']\n\n # 1. Populate watercourses/aquifers:\n stations = {}\n for water_body_name in metadata[archive].keys():\n print(f'\\tPopulating {water_body}: \"{water_body_name}\"')\n cursor.execute(f'''INSERT INTO {water_body}s(location_id, name)\n VALUES (0, '{water_body_name}')''')\n water_body_id = cursor.lastrowid\n\n # 2. Populate watercourse_stations/aquifer_stations:\n for station_id in metadata[archive][water_body_name]['stations']:\n station_name = clean_name(metadata[archive][water_body_name]['stations'][station_id]['name'])\n\n if station_id in stations:\n # Prefer watercourses/aquifer with more stations\n current_len = len(metadata[archive][water_body_name]['stations'])\n previous_len = len(metadata[archive][stations[station_id]]['stations'])\n\n if current_len < previous_len:\n print(f'\\t\\tStation already exists: {station_id} - \"{station_name}\" (\"{water_body_name}\")')\n continue\n else:\n cursor.execute(f'''DELETE \n FROM {water_body}_stations\n WHERE id = {station_id}''')\n print(f'\\t\\tRemoved station: {station_id} - \"{station_name}\" from \"{stations[station_id]}\")')\n\n stations[station_id] = water_body_name\n print(f'\\t\\tPopulating station: {station_id} - \"{station_name}\"')\n\n # Insert station location if station data exists.\n location_id = 0\n station_row = station_data.query(f'ŠIFRA == \"{station_id}\"')\n if not station_row.empty:\n index = station_row.index[0]\n lat = station_row.at[index, 'LAT']\n lng = station_row.at[index, 'LON']\n if not np.isnan(lat) and not np.isnan(lng):\n name = f\"{station_row.at[index, 'VODOMERNA POSTAJA']} ({station_row.at[index, 'VODOTOK']})\"\n cursor.execute(f'''INSERT INTO locations(name, lat, lng)\n VALUES ('{name}', {lat}, {lng})''')\n location_id = cursor.lastrowid\n\n # Insert station.\n cursor.execute(f'''INSERT INTO {water_body}_stations(id, {water_body}_id, location_id, name)\n VALUES ({station_id}, {water_body_id}, {location_id}, '{station_name}')''')\n\n # 3. Populate watercourse_measurements/aquifer_measurements:\n if not populate_water_measurements(cursor, archive, metadata[archive][water_body_name]['dir'],\n station_id):\n cursor.execute(f'''DELETE \n FROM {water_body}_stations\n WHERE id = {station_id}''')\n print(f'\\t\\tRemoved station with useless data: {station_id} - \"{station_name}\"')\n\n # Remove empty watercourses/aquifers.\n cursor.execute(f'''SELECT w.id, w.name\n FROM {water_body}s w\n WHERE NOT EXISTS (\n SELECT s.id \n FROM {water_body}_stations s \n WHERE w.id = s.{water_body}_id\n )''')\n\n for row in cursor.fetchall():\n cursor.execute(f'''DELETE \n FROM {water_body}s\n WHERE id = {row[0]}''')\n print(f'\\tRemoved empty {water_body}: \"{row[1]}\"')",
"def generate_weather_data(self):\n months = pd.to_datetime(self.output['Local Time']).dt.month\n self.output['Month'] = months # set month values for later joins\n\n # merge output data frame with historical data to get ranges\n keys = ['Location', 'Month']\n m = pd.merge(self.output, self.histdata, how='left',\n left_on=keys, right_on=keys)\n\n # uniformly select random pressure, temperature\n # and humidity values between the historical max and min ranges\n r = np.random.rand(m.shape[0])\n m['Temperature'] = ((m['Tmean_high'] - m['Tmean_low']\n ) * r + m['Tmean_low']).round(1)\n m['Pressure'] = ((m['Pmax'] - m['Pmin']) * r + m['Pmin']).round(1)\n m['Humidity'] = ((m['Hmax'] - m['Hmin']) * r + m['Hmin']).astype(int)\n\n # drop redundant columns and assign to output\n dcols = ['Month', 'Timezone', 'Pmax', 'Pmin',\n 'Hmax', 'Hmin', 'Tmean_high', 'Tmean_low']\n m.drop(columns=dcols, inplace=True)\n self.output = m",
"def init_and_update_observe_table(self):\n # print CHN_CITY_LIST_FILE\n location = ''\n id = ''\n f = open(CHN_CITY_LIST_FILE, 'r')\n for line in f.readlines():\n line_list = line.strip('\\n').split(':')\n location = line_list[0]\n id = line_list[1]\n pm = get_pm(location)\n # get current weather\n weather_dict = get_open_weather(id)\n if weather_dict not in ('', None, [], {}):\n if 'error' in pm or pm == False:\n weather_dict['aqi'] = '无数据'#'N/A'\n else:\n weather_dict['aqi'] = pm['quality'] + '(' + str(pm['aqi']) + ')'\n db_record = self.db.search_observe_record(str(id))\n # db_record = []\n now_date = get_local_format_time()\n if db_record != []:#update\n self.db.update_observe_data(weather_dict['ptime'],weather_dict['time'],now_date,weather_dict['WD'],weather_dict['WS'],weather_dict['SD'],weather_dict['weather'],weather_dict['img1'],weather_dict['img2'],weather_dict['temp'],weather_dict['temp1'],weather_dict['temp2'],weather_dict['aqi'],id)\n else:#insert\n self.db.insert_observe_data(id,weather_dict['city'],weather_dict['ptime'],weather_dict['time'],now_date,weather_dict['WD'],weather_dict['WS'],weather_dict['SD'],weather_dict['weather'],weather_dict['img1'],weather_dict['img2'],weather_dict['temp'],weather_dict['temp1'],weather_dict['temp2'],weather_dict['aqi'])\n f.close()\n return True",
"def insert_humans_staging(self):\n for year in range(1880, CURRENT_YEAR):\n self.load_wikidata(\"humans\", HUMANS_BY_YEAR_SPARQL_QUERY, INSERT_HUMAN_SQL_QUERY,\n INSERT_HUMAN_MAP_COLUMNS, year=year)",
"def populate_database(telescope_name, instrument_name):\n telescope = Telescope.objects.create(\n name=telescope_name, latitude=25.0, longitude=45.0)\n instrument = Instrument.objects.create(\n name=instrument_name, telescope=telescope)\n for year_int in (2012, 2013):\n for month_int in range(1, 13):\n for night_int in (1, monthrange(year_int, month_int)[1]):\n ut_date = date(year_int, month_int, night_int)\n night = Night.objects.create(\n ut_date=ut_date, instrument=instrument, observers='Smith')\n Exposure.objects.create(\n night=night, run_number=1, ut_start=time(10, 0, 0),\n exposed=20.0, ra=60.0, dec=30.0, object_exp=True)\n Exposure.objects.create(\n night=night, run_number=2, ut_start=time(11, 0, 0),\n exposed=30.0, ra=90.0, dec=0.0, object_exp=True)\n Exposure.objects.create(\n night=night, run_number=3, ut_start=time(12, 0, 0),\n exposed=40.0, ra=120.0, dec=-30.0, object_exp=False)",
"def get_data(table_name, end, num, start=None):\n if start == None:\n if table_name == \"days\": start = end - timedelta(days=num-1) \n if table_name == \"weeks\": start = end - timedelta(weeks=num-1) \n if table_name == \"months\": start = end - relativedelta(months=+num-1) \n if table_name == \"years\": start = end - relativedelta(years=+num-1) \n else: \n start = days.get_entry(table_name, start).date\n \n dates = []\n data = []\n weather = []\n density = []\n \n while start <= end:\n entry = days.get_entry(table_name, start)\n data.append(entry.sentiment)\n \n if table_name == \"days\": \n dates.append(entry.date.strftime(\"%B %d, %Y\"))\n start = start + timedelta(days=1)\n if table_name == \"weeks\": \n dates.append(entry.date.strftime(\"%B %d, %Y\"))\n start = start + timedelta(weeks=1) \n if table_name == \"months\": \n dates.append(entry.date.strftime(\"%B %Y\"))\n start = start + relativedelta(months=+1) \n if table_name == \"years\": \n dates.append(entry.date.strftime(\"%Y\"))\n start = start + relativedelta(years=+1) \n\n # 7/15/15 is the last entry in the current weather dictionary\n num_days = (min(start, date(2015,7,15)) - entry.date).days\n temp = {entry.date + timedelta(days=i): weather_dict[entry.date + timedelta(days=i)] for i in range(num_days)}\n weather.append(float(sum(temp.values()))/float(len(temp)))\n\n if density_dict != None:\n d = max(entry.date, date(2014,7,1))\n num_days = (min(start, date(2015,7,28)) - d).days\n rho = {d + timedelta(days=i): density_dict[d + timedelta(days=i)] for i in range(num_days)}\n density.append(float(sum(rho.values()))/float(len(rho)))\n\n return dates, data, weather, density",
"def setup_table(self):\n\n self.setup.create_basic_table_in_dev()\n self.setup.insert_random_records_into_dev()",
"def populate_db():\n\n populate_table(db, models.Department, departments_data)\n populate_table(db, models.Employee, employees_data)",
"def sql_functions(cities, weather):\n con = lite.connect(\"sql_database.db\")\n tables_tuple = (\"cities\", \"weather\")\n with con:\n #Generate tables in database:\n cur = con.cursor() #Get cursor object\n for table in tables_tuple:\n cur.execute(\"DROP TABLE IF EXISTS {0}\".format(table)) #Drop tables if they already exist.\n cur.execute(\"CREATE TABLE cities (name text, state text)\")\n cur.execute(\"CREATE TABLE weather (city text, year integer, warm_month text, cold_month text, average_high integer)\")\n #Populate tables in database:\n cur.executemany(\"INSERT INTO cities VALUES (?,?)\", cities)\n cur.executemany(\"INSERT INTO weather VALUES (?,?,?,?,?)\", weather)\n #Retrieve data from database:\n cur.execute(\"SELECT * FROM cities INNER JOIN weather ON city = name\")\n rows = cur.fetchall()\n cols = [desc[0] for desc in cur.description]\n output_dataframe = pd.DataFrame(rows, columns = cols)\n \n return output_dataframe",
"def create_tables(cxn):\n\tcursor = cxn.cursor()\n\tcursor.execute(\"DROP TABLE IF EXISTS WEATHER\")\n\tcursor.execute(\"\"\"CREATE TABLE IF NOT EXISTS WEATHER(\n\t\tstate varchar(3),\n\t\tcity varchar (15),\n\t\tobs_date varchar(12),\n\t\thour int,\n\t\tminute int,\n\t\tcurr_temp float,\n\t\tunique(state, city, obs_date, hour, minute)\n\t\t)\"\"\")\n\tcursor.close()",
"def create_db(temp: list, rain: list, humidity: list, wind: list) -> dict:\r\n weather = {}\r\n for i in range(len(temp)):\r\n weather[i+1] = [temp[i], rain[i], humidity[i], wind[i]]\r\n return weather",
"def _setupWeather(self, w, config):\n wnames = ('cloud', 'seeing')\n if w not in wnames:\n raise Exception('w should be one of %s' %(wnames))\n filename = config['%s_datafile' %(w)]\n file = open(filename, 'r')\n # Also assume flat file contains only date / value in a space or tab separated file. \n self.dates[w] = []\n self.weather[w] = []\n # Read the data file.\n print '# Reading weather data file %s' %(filename)\n for line in file:\n if line.startswith('#') | line.startswith('!'):\n continue\n self.dates[w].append(line.split()[0])\n self.weather[w].append(line.split()[1])\n file.close()\n self.dates[w] = numpy.array(self.dates[w], float)\n self.weather[w] = numpy.array(self.weather[w], float)\n # Check the total amount of data (mostly for user awareness):\n print '# Read %d weather values from %s file. ' %(len(self.weather[w]), filename)\n # Check that weather data is monotonically increasing in time. \n if not(numpy.all(numpy.diff(self.dates[w]))):\n order = self.dates[w].argsort()\n self.weather[w] = self.weather[w][order]\n self.dates[w] = self.dates[w][order]\n # Get the total length of time included in this (seeing/cloud) file,\n # so that we can determine a wrap-around date if we need that.\n self.maxtime[w] = self.dates[w].max()\n return",
"def set_tables(self):\n with sql.connect('./{}.db'.format(self.name)) as conn:\n conn.execute(\"\"\"CREATE TABLE IF NOT EXISTS tweets(\n id INTEGER PRIMARY KEY,\n tweet_id INTEGER,\n insert_date TEXT,\n created_at TEXT,\n hashtag TEXT)\n \"\"\")\n\n conn.execute(\"\"\"CREATE TABLE tweet_peaks(\n peak_datetime TEXT NOT NULL,\n hashtag TEXT NOT NULL,\n time_frame TEXT,\n mean REAL,\n std REAL,\n sensibility REAL,\n freq_limit REAL,\n qt_tweets INTEGER,\n id TEXT PRIMARY KEY,\n probability REAL);\n \"\"\")",
"def load_up_initial_db(self, date_dict):\n df_tot = []\n for chunk in pd.read_sql_table(self.table, self.disk_engine, chunksize=10000, parse_dates=date_dict):\n df_tot.append(chunk)\n self.df = pd.concat(df_tot)",
"def import_weather(keys):\n # imports weather and cleans\n df_all_weather = get_weather_as_df(keys)\n return clean_weather_df(df_all_weather)",
"def update_weather(location_request, db):\n with open(expanduser(\"~/bin/my_utilities/config/darksky-key\")) as f:\n ds_key = f.readline().strip()\n current = []\n current_day = 0\n with forecast(ds_key, *location_request, units=\"uk2\") as location:\n raw = location['hourly']['data'][0]\n current.append(datetime.datetime.now().hour)\n current.append(day_relative_to_absolute(current_day))\n current.append(raw[\"temperature\"])\n current.append(raw[\"apparentTemperature\"])\n current.append(raw[\"precipIntensity\"])\n current.append(raw[\"precipProbability\"] * 100)\n current.append(raw[\"humidity\"] * 100)\n current.append(raw[\"dewPoint\"])\n current.append(raw[\"windSpeed\"])\n current.append(raw[\"windBearing\"])\n current.append(raw[\"windGust\"])\n current.append(raw[\"pressure\"])\n current.append(raw[\"cloudCover\"] * 100)\n current.append(raw[\"uvIndex\"])\n current.append(raw[\"visibility\"])\n current = format_list_for_db(current)\n\n columns = [\"hour\", \"day\", \"temp\", \"apptemp\", \"precipint\", \"precipprob\",\n \"humidity\", \"dewpoint\", \"windspeed\", \"windbearing\",\n \"windgust\", \"pressure\", \"cloudcover\", \"uvindex\", \"visibility\"]\n columns = format_list_for_db(columns)\n statement = f\"INSERT INTO WEATHER {columns} VALUES {current}\"\n print(statement)\n cursor = db.cursor()\n cursor.execute(statement)\n cursor.close()",
"def compute_aggregate_weather_data():\n\n # get a list of all the csv files names in the 'weather_data' directory\n files = get_all_csv_files_in_directory('weather_data')\n\n # Todo: if the number of csv files doesn't match the expected value, unzip remaining using the 'os' module\n\n if len(files) == 0:\n\n # Unzip all files in current directory and subdirectories\n print \"unzipping weather files...\"\n os.system(\"unzip 'weather_data/*.zip' -d weather_data\")\n\n\n # Try again to get files\n files = get_all_csv_files_in_directory('weather_data')\n\n # Throw exception if still missing csv files\n if len(files) == 0:\n raise ValueError(\"Missing weather data in csv format in the 'weather_data' directory\")\n\n # convert the list of csv file names to a list of corresponding DataFrames\n dallas_files = filter(lambda file_name : \"KDAL\" in file_name, files)\n houston_files = filter(lambda file_name : \"KHOU\" in file_name, files)\n san_antonio_files = filter(lambda file_name : \"KSAT\" in file_name, files)\n\n print \"Retrieved weather data files...\"\n print \"\\t# of Dallas weather files found: \", len(dallas_files)\n print \"\\t# of Houston weather files found: \", len(houston_files)\n print \"\\t# of San Antonio weather files found: \", len(san_antonio_files)\n\n dallas_dfs = map(lambda file_name: read_weather_data_from_csv(\"./weather_data/\" + file_name), dallas_files)\n houston_dfs = map(lambda file_name: read_weather_data_from_csv(\"./weather_data/\" + file_name), houston_files)\n san_antonio_dfs = map(lambda file_name: read_weather_data_from_csv(\"./weather_data/\" + file_name), san_antonio_files)\n\n dallas_df = pd.concat(dallas_dfs)\n houston_df = pd.concat(houston_dfs)\n san_antonio_df = pd.concat(san_antonio_dfs)\n\n print \"Aggregating all of the weather data...\"\n # fold the list of data frames into a single data frame\n aggregate_df = reduce(lambda df1, df2: pd.merge(df1, df2, on=\"Date\", how=\"outer\"), [dallas_df, houston_df, san_antonio_df]).sort_values(\"Date\")\n\n return aggregate_df",
"def populate_db():\n stdout.write('Emptying the tables...\\n')\n empty_tables()\n stdout.write('Populating Language records...\\n')\n populate_language()\n stdout.write('Populating Lemma, Wordform, and Definition records...\\n')\n populate_lexical()\n stdout.write('Populating ProperName records...\\n')\n populate_proper_names()",
"def populate_stops(self):\n stops = self.load_csv('stops.txt')\n stops = self.process_stops(stops)\n\n connection = db.connect()\n for stop in stops:\n try:\n connection.execute(schema.stops.insert(), stop)\n except DataError:\n print \"Missing data for stop: %s\" % (stop)",
"def get_weather_data():\n keys = ['1364038.csv',\n '1364041.csv',\n '1364042.csv',\n '1364043.csv',\n '1364044.csv',\n '1364046.csv',\n '1364047.csv',\n '1364048.csv',\n '1364051.csv',\n '1364052.csv',\n '1364053.csv',\n '1364054.csv',\n '1364055.csv',\n '1364058.csv',\n '1364059.csv',\n '1364060.csv',\n '1364061.csv',\n '1364062.csv',\n '1364063.csv',\n '1364064.csv',\n '1364066.csv']\n df_weather = import_weather(keys)\n df_weather_dist = df_weather[[\n 'LATITUDE', 'LONGITUDE', 'name']].drop_duplicates().reset_index()\n return df_weather, df_weather_dist",
"def collect(self, start_date=None, end_date=None):\n if start_date is None:\n start_date = self.default_start\n if end_date is None:\n end_date = self.default_end\n\n cur = self.conn.cursor()\n\n # Maximum return is 1000 entries\n num_days = 1000 // len(self.stations)\n # Maximum date-range is 1 year\n if num_days > 365:\n num_days = 365\n\n for interval in netzero.util.time_intervals(\n start_date, end_date, days=num_days\n ):\n netzero.util.print_status(\n \"Weather\",\n \"Collecting: {} to {}\".format(\n interval[0].strftime(\"%Y-%m-%d\"), interval[1].strftime(\"%Y-%m-%d\")\n ),\n )\n\n # TODO -- REMOVE ASSUMPTION THAT LEN(DATA) < LIMIT\n raw_data = self.query_api(interval[0], interval[1])\n\n if raw_data is None:\n print(\"ERROR QUERYING API\") # TODO exception here?\n continue\n\n for entry in raw_data.get(\"results\", []):\n # Insert the weather data to the table, to be averaged later\n date = datetime.datetime.strptime(\n entry[\"date\"], \"%Y-%m-%dT%H:%M:%S\"\n ).date()\n value = entry[\"value\"]\n station = entry[\"station\"]\n\n cur.execute(\n \"INSERT OR IGNORE INTO weather VALUES (?, ?, ?)\", (date, value, station)\n )\n\n self.conn.commit()\n\n cur.close()\n\n netzero.util.print_status(\"Weather\", \"Complete\", newline=True)",
"def setUpClass(cls):\n dt_index = pd.date_range(start=datetime(2019, 1, 1, 0, 1), periods=15,\n freq='1Min')\n\n # Create a temperature array with an average of 2.\n temp = [1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3]\n\n # Create ghi array with an average of 3.\n ghi = [2, 3, 4, 2, 3, 4, 2, 3, 4, 2, 3, 4, 2, 3, 4]\n\n # Create DataFrame.\n cls.weather_data = pd.DataFrame({'temperature': temp, 'ghi': ghi},\n index=dt_index)\n\n # Create expected data.\n dt_index_2 = pd.date_range(start=datetime(2019, 1, 1, 0, 15), periods=1,\n freq='15Min')\n cls.expected_data = pd.DataFrame({'temperature': [2], 'ghi': [3]},\n index=dt_index_2)",
"def read_weather(self):\n print \"Reading weather data from file\",self.datafile\n tab = ascii.read(self.datafile)\n \n # Fix 'T' values in precipitation column, which represent tiny\n # amounts of rain (not measurable)\n TINY_VALUE = '.005' # 0.005 is half the smallest measurable value\n rain = tab['PrecipitationIn']\n wbad = (rain == 'T')\n rain[wbad] = TINY_VALUE\n rain = numpy.array(rain).astype(\"float\")\n\n # Replace string version of precip with float version\n tab['PrecipIn'] = rain\n tab.remove_column('PrecipitationIn')\n\n self.table = tab",
"def update(self):\n if self.last_update and (\n self.last_update + timedelta(hours=1)\n > datetime.utcnow().replace(tzinfo=dt_util.UTC)\n ):\n return # Not time to update yet; data is only hourly\n\n for row in self.current_observations():\n if row.get(\"Station\") == self._station_id:\n api_fields = {\n col_heading: (standard_name, dtype)\n for standard_name, (\n _,\n _,\n _,\n col_heading,\n dtype,\n ) in SENSOR_TYPES.items()\n }\n self.data = {\n api_fields.get(col_heading)[0]: api_fields.get(col_heading)[1](\n v.replace(\",\", \".\")\n )\n for col_heading, v in row.items()\n if col_heading in api_fields and v\n }\n break\n else:\n raise ValueError(f\"No weather data for station {self._station_id}\")",
"def create_table(self):\n # Connect to database\n conn = sqlite3.connect(self)\n # Create a cursor\n c = conn.cursor()\n\n # Create a Table\n c.execute(\"\"\"CREATE TABLE weather (\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n sensor text,\n location text,\n temperature real,\n description text,\n time text\n )\"\"\")\n # Commit our command\n conn.commit()\n # Close our connection\n conn.close()",
"def init_datasets(self, dataset_names, columns):\n for dataset_name in dataset_names:\n hdf5_dataset_name = self.schema.get(dataset_name)\n if hdf5_dataset_name is None:\n warnings.warn(\"Skipping %s (not in schema)\" % dataset_name)\n else:\n self[dataset_name] = tokio.timeseries.TimeSeries(dataset_name=hdf5_dataset_name,\n start=self.query_start,\n end=self.query_end_plusplus,\n timestep=self.timestep,\n num_columns=len(columns),\n column_names=columns,\n sort_hex=self.sort_hex)",
"def populate(self):\n\n NUM_COUNTRIES = 2 # random.randint(1, 4)\n\n # find a suitable hex\n with Timer(\"Creating initial data\", debug=self.debug):\n\n for i in range(NUM_COUNTRIES):\n country, provinces, pops = create_country(self, self.map)\n country.determine_tax_policy()\n self.countries.append(country)",
"def testWeatherFetch(self):\n\n timeCol = 'timestamp'\n rows = []\n for row in self.aggregator.rawData(dataType = 'weather',\n orderBy = [timeCol],\n timestampCol = timeCol,\n startDate = self.testStart,\n endDate = self.testEnd):\n rows.append(row)\n self.assertIsNotNone(rows, 'Rows are present.')",
"def populate_database(self):\n self.insert_products()\n self.insert_categories()\n self.insert_products_categories()\n self.insert_stores()\n self.insert_products_stores()",
"def create_all_tables(self):\n pass"
] | [
"0.6654514",
"0.66226166",
"0.6348557",
"0.632052",
"0.62991303",
"0.62082505",
"0.61876976",
"0.6142009",
"0.60627335",
"0.6002963",
"0.5983051",
"0.59750384",
"0.5964339",
"0.5926051",
"0.59136045",
"0.59097177",
"0.58774203",
"0.5861508",
"0.5857061",
"0.5843847",
"0.58242774",
"0.58109164",
"0.58074266",
"0.5801949",
"0.57935333",
"0.57928663",
"0.5791196",
"0.5760634",
"0.57488865",
"0.5742264"
] | 0.78376013 | 0 |
Helper function to construct multidimensional dictionaries e.g myhash = _makehash() myhash[1][2] = 4 myhash[2][5][8] = 17 | def _makehash():
return defaultdict(_makehash) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def hashMap(self,arr):\r\n n = len(arr)\r\n dict1 = {}\r\n i = 1\r\n for i in range(n): \r\n if(i > 0): \r\n key=arr[i]\r\n value=arr[0]\r\n dict1[key] = value\r\n return dict1",
"def __init__(self):\n self.hashmap = [[[],[]] for _ in range(self.N)]",
"def boardtohashmap(board_2d: List[List[str]]) -> Dict[Tuple[int, int], Gridspace]:\n\n nrows, ncols = len(board_2d), len(board_2d[0])\n return {\n (r, c): Gridspace(r, c, board_2d[r][c], nrows, len(board_2d[r]))\n for r in range(nrows) for c in range(len(board_2d[r]))\n }",
"def generate_dict(length):\r\n primeDict = {}\r\n index = 2\r\n \r\n while (index < length):\r\n primeDict[index]=True\r\n index = index+1\r\n \r\n return primeDict",
"def _build_hash_table(arr: [str]):\n ht = {}\n for cur_str in arr:\n\n anagram = cur_str[::-1]\n if cur_str in ht.keys():\n # This string is an anagram of some previous\n # Increase anagram count for hash table item\n (original, orig_cnt, anag_cnt) = ht[cur_str]\n ht[cur_str] = (original, orig_cnt, anag_cnt + 1)\n elif anagram in ht.keys():\n # This string equals to some prevoius\n # Increase original count for hash table item\n (original, orig_cnt, anag_cnt) = ht[anagram]\n ht[anagram] = (original, orig_cnt+1, anag_cnt)\n else:\n # This string is new\n ht[anagram] = (cur_str, 1, 0)\n return ht",
"def _make_hashable(items):\n\n def convert(x):\n # Perform any conversions here to make a variable hashable\n if isinstance(x, np.ndarray):\n # Create an sha1 of the data, and throw in a string\n # and the shape.\n return ('__type_np.ndarray', x.shape,\n xxhash.xxh3_128_hexdigest(x))\n elif isinstance(x, (list, tuple)):\n return _make_hashable(x)\n elif isinstance(x, dict):\n return _make_hashable(sorted(x.items()))\n return x\n\n return tuple(map(convert, items))",
"def createdict(Matrix,List):\r\n n = len(List)\r\n #to get all possible combinations\r\n input_combns = list(itertools.combinations(range(0,n),2))\r\n d = defaultdict(dict)\r\n for x in input_combns:\r\n i,j = x\r\n p,q = List[i],List[j]\r\n d[p][q] = Matrix[i][j]\r\n return d",
"def generate_grid_dict(height, width):\n board = {}\n for i in range(height):\n for j in range(width):\n position = (i, j)\n board[position] = 0\n return board",
"def _hash(self) -> None:\r\n # for a unit cube there are 8 possible hashes\r\n # returns the tuple of with all 8 hashes\r\n\r\n self.hashes[\"aaa\"] = P[P[P[self.xi] + self.yi] + self.zi]\r\n self.hashes[\"aab\"] = P[P[P[self.xi] + self.yi] + self._inc(self.zi)]\r\n self.hashes[\"aba\"] = P[P[P[self.xi] + self._inc(self.yi)] + self.zi]\r\n self.hashes[\"abb\"] = P[P[P[self.xi] + self._inc(self.yi)] + self._inc(self.zi)]\r\n self.hashes[\"baa\"] = P[P[P[self._inc(self.xi)] + self.yi] + self.zi]\r\n self.hashes[\"bab\"] = P[P[P[self._inc(self.xi)] + self.yi] + self._inc(self.zi)]\r\n self.hashes[\"bba\"] = P[P[P[self._inc(self.xi)] + self._inc(self.yi)] + self.zi]\r\n self.hashes[\"bbb\"] = P[P[P[self._inc(self.xi)] + self._inc(self.yi)] + self._inc(self.zi)]",
"def initialize_d(d, square_sides, offset=0):\n return {key:[] for key in range(offset, square_sides ** 2 + offset)}",
"def fresh_hash(self):\n _h = defaultdict(lambda: 0)\n very_small = 0.000000000001\n for g in self.groups: _h[g] = { \"total\": very_small, \"var_all\": 0 }\n return _h",
"def make_dict(unused_s, unused_l, toks):\n result = {}\n key_value_pairs = chunks(toks, 2)\n for key_value_pair in key_value_pairs:\n result[key_value_pair[0]] = key_value_pair[1]\n return result",
"def create_dictionary():\n d = {}\n for y in range(HEIGHT):\n if (y % 2) != 0:\n pos = (10*y)+10\n else:\n pos =((10*y)-9)+10 \n for x in range(WIDTH):\n xy_tuple = (x,y)\n d[pos] = xy_tuple\n if (y % 2) != 0:\n pos = pos - 1\n else:\n pos = pos + 1\n \n return d",
"def create_dict(*args):\n output = {}\n idx = 0\n while idx < len(args):\n output[args[idx + 1]] = args[idx]\n idx += 2\n\n return output",
"def __init__(self,n):\n\t\tself._dictOut={}\n\t\tself._dictIn = {}\n\t\tfor i in range(n):\n\t\t\tself._dictOut[i]=[]\n\t\t\tself._dictIn[i] = []",
"def build_anagram_dict(word_gen, starting_dict={}, hash_fn=lambda x: tuple(sorted(x))):\n\n dict = starting_dict\n\n for word in word_gen:\n key = hash_fn(word)\n # Using dictionary as hashtable to eliminate duplicates (when reading from literature etc)\n word_list = dict.get(key, {})\n word_list[word] = None\n dict[key] = word_list\n\n return dict",
"def createMap(*values):\n\tresult = dict()\n\tfor i in range(0, len(values), 2):\n\t\tresult[values[i]] = values[i+1]\n\treturn result",
"def Dictionary_create_from(nMarkers, markerSize, baseDictionary):\n pass",
"def get_hash_map(init_addr):\n addr = init_addr\n hash_map = []\n for i in range(0, len(WIN_HASH), 2):\n pair = WIN_HASH[i:i+2]\n hash_map.append((addr, pair[1]))\n hash_map.append((addr+1, pair[0]))\n addr += 8\n\n return hash_map",
"def DictFunction2():\r\n print \"Create Second Dictionary\"\r\n NumberDict = dict(zip((i for i in range(16)), (hex(i) for i in range(16))))\r\n print NumberDict",
"def __init__(self, n):\n self._dictOut = {}\n self._dictIn = {}\n for i in range(n):\n self._dictOut[i] = []\n self._dictIn[i] = []",
"def Dictionary_create(nMarkers, markerSize):\n pass",
"def make_hash(o):\n\n if isinstance(o, (set, tuple, list)):\n\n return hash( tuple([make_hash(e) for e in o]) )\n\n elif not isinstance(o, dict):\n\n return hash(o)\n\n new_o = copy.deepcopy(o)\n for k, v in new_o.items():\n new_o[k] = make_hash(v)\n\n return hash(tuple(frozenset(sorted(new_o.items()))))",
"def create_pristine_board(size=100):\n board = defaultdict(dict)\n\n for i in xrange(1, size + 1):\n board[i] = {j: (j - i) for j in xrange(min(i + 1, size + 1), min(i + 7, size + 1))}\n\n return board",
"def construct(self):\n\n newSet = {}\n current_index = 0\n\n for key_1, value_1 in self._sets[self._currentSet].items():\n current_index += 1\n for key_2,value_2 in list(self._sets[self._currentSet].items())[current_index:]:\n # join the 2 tuples\n join = key_1 + key_2\n # remove duplicates\n join = tuple(set(join))\n # get combinations\n combined = tuple(combinations(join, self._currentSet+1))\n # sort combination\n combined = tuple(sorted(combined[0]))\n\n # append new combination to dict\n if len(combined) != 0 :\n newSet[combined] = 0\n\n self._currentSet += 1\n # append the new itemset in the sets dict \n self._sets[self._currentSet] = newSet",
"def __create_level_entries_dict__(self,\n tree_level_labels,\n tree_level_values,\n ):\n # | - create_level_entries_dict\n level_entries_dict = {}\n for index, variable in enumerate(tree_level_labels):\n level_entries_dict[variable] = tree_level_values[index]\n\n return(level_entries_dict)\n # __|",
"def test_hash_numpy():\r\n rnd = np.random.RandomState(0)\r\n arr1 = rnd.random_sample((10, 10))\r\n arr2 = arr1.copy()\r\n arr3 = arr2.copy()\r\n arr3[0] += 1\r\n obj_list = (arr1, arr2, arr3)\r\n for obj1 in obj_list:\r\n for obj2 in obj_list:\r\n yield nose.tools.assert_equal, hash(obj1) == hash(obj2), \\\r\n np.all(obj1 == obj2)\r\n\r\n d1 = {1: arr1, 2: arr1}\r\n d2 = {1: arr2, 2: arr2}\r\n yield nose.tools.assert_equal, hash(d1), hash(d2)\r\n\r\n d3 = {1: arr2, 2: arr3}\r\n yield nose.tools.assert_not_equal, hash(d1), hash(d3)\r\n\r\n yield nose.tools.assert_not_equal, hash(arr1), hash(arr1.T)",
"def _build_board(y_size, x_size, game_board):\n\n for y_coordinate in range(1, y_size + 1):\n for x_coordinate in range(1, x_size + 1):\n game_board[(x_coordinate, y_coordinate)] = {0: {}, 1: {}, 2: {}}",
"def _make_limb_dict():\n\n return {'left_arm_y': 10, 'right_arm_y': 13,\n 'left_arm_z': 11, 'right_arm_z': 14,\n 'left_leg_y': 4, 'right_leg_y': 7,\n 'left_leg_z': 5, 'right_leg_z': 8,\n 'hip_y': 2, 'hip_x': 1}",
"def make_dict(keys, values):\n\n return dict(zip(keys, values))"
] | [
"0.67796296",
"0.5938041",
"0.5885213",
"0.5752976",
"0.5734703",
"0.5709291",
"0.5704826",
"0.5652041",
"0.5644233",
"0.56247675",
"0.56185186",
"0.5611323",
"0.56072676",
"0.5539448",
"0.5488976",
"0.5445115",
"0.54388916",
"0.54211164",
"0.54074925",
"0.53994673",
"0.537734",
"0.5345715",
"0.53311265",
"0.53041047",
"0.5297536",
"0.52854407",
"0.52676785",
"0.5266973",
"0.5258118",
"0.5250899"
] | 0.6773854 | 1 |
Convert headers of fetched tickers to same format for convenient data storage in Database. This method assumes that parser's headers are configured properly(headers_dict), if one of the headers is missing in config file exception raised | def convert_headers(self, tickers):
result = _makehash()
for pair_name, fetched_values_dict in list(tickers.items()):
for header, value in list(fetched_values_dict.items()):
result[pair_name][self.config['headers'][header]] = value
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _parse_headers(headers):\n\n headers_new = []\n # reformat column headers if needed\n for j, hd in enumerate(headers):\n # rename so always have T1/2 (s)\n if hd == \"T1/2 (num)\" or hd == \"T1/2 (seconds)\":\n hd = \"T1/2 (s)\"\n # for uncertainties, add previous column header to it\n if j > 0 and \"Unc\" in hd:\n hd = headers[j - 1] + \" \" + hd\n if \"Unc\" in hd and \"Unc.\" not in hd:\n hd = hd.replace(\"Unc\", \"Unc.\")\n # expand abbreviated headers\n if \"Energy\" in hd and \"Energy Level\" not in hd:\n hd = hd.replace(\"Energy\", \"Energy Level\")\n if \"Par. Elevel\" in hd:\n hd = hd.replace(\"Par. Elevel\", \"Parent Energy Level\")\n if \"Abund.\" in hd:\n hd = hd.replace(\"Abund.\", \"Abundance (%)\")\n if \"Ene.\" in hd:\n hd = hd.replace(\"Ene.\", \"Energy\")\n if \"Int.\" in hd:\n hd = hd.replace(\"Int.\", \"Intensity (%)\")\n if \"Dec\" in hd and \"Decay\" not in hd:\n hd = hd.replace(\"Dec\", \"Decay\")\n if \"Rad\" in hd and \"Radiation\" not in hd:\n hd = hd.replace(\"Rad\", \"Radiation\")\n if \"EP\" in hd:\n hd = hd.replace(\"EP\", \"Endpoint\")\n if \"Mass Exc\" in hd and \"Mass Excess\" not in hd:\n hd = hd.replace(\"Mass Exc\", \"Mass Excess\")\n headers_new.append(hd)\n if len(set(headers_new)) != len(headers_new):\n raise NNDCRequestError(\n \"Duplicate headers after parsing\\n\"\n + f' Original headers: \"{headers}\"\\n'\n + f' Parsed headers: \"{headers_new}\"'\n )\n return headers_new",
"def _unpack_headers(self, headers):\n return dict((k,v[0]) for (k,v) in headers.getAllRawHeaders())",
"def fill_headers(self, headers):\n self.headers = {h[0]: h[1] for h in headers}",
"def _parse_headers(raw_headers: List[str]) -> Dict[str, str]:\n headers: Dict[str, str] = {}\n for header in raw_headers:\n name = header[: header.find(\":\")].strip()\n value = header[header.find(\":\") + 1 :].strip()\n headers[name.lower()] = value\n\n return headers",
"def _parse_headers(headers):\n try:\n return dict(header.split(\":\") for header in headers)\n except:\n raise ValueError(\"Invalid headers %s\" % headers)",
"def _headers(self, headers_dict):\n return Headers(dict((k,[v]) for (k,v) in headers_dict.items()))",
"def manage_headers(dem_header_file, header_paths):\n dem_header = parse_dem_header(dem_header_file)\n # find param files containing filename dates\n if len(header_paths) == 2:\n headers = [parse_epoch_header(hp) for hp in header_paths]\n combined_header = combine_headers(headers[0], headers[1], dem_header)\n else:\n # probably have DEM or incidence file\n combined_header = dem_header\n combined_header[ifc.DATA_TYPE] = ifc.DEM\n\n return combined_header",
"def __parseHeaders(headers):\n global __all_headers\n if headers and len(headers) > 0:\n for header in headers:\n name = header.getElementsByTagName(\"name\")[0].childNodes[0].data\n value = header.getElementsByTagName(\"value\")[0].childNodes[0].data\n __addHeader(name, value)\n #print(__all_headers)",
"def _ToTuples(headers):\n all_headers = []\n for line in headers:\n if line[0] in '\\t ':\n if not all_headers:\n logging.warning(\n 'Unexpected response header continuation line [%s]', line)\n continue\n name, value = all_headers.pop()\n value += '\\n ' + line.strip()\n else:\n name_value = RealHttpFetch._GetHeaderNameValue(line)\n if not name_value:\n logging.warning(\n 'Response header in wrong format [%s]', line)\n continue\n name, value = name_value # pylint: disable=unpacking-non-sequence\n all_headers.append((name, value))\n return all_headers",
"def updateheader(self, headerlist=[], http_s_obj=None):\n header = {}\n for headerparam in headerlist:\n key_value = headerparam.split(\":\", 1)\n if len(key_value) == 2:\n try:\n key = key_value[0]\n value = key_value[1].strip()\n header.update({key: value})\n if http_s_obj:\n if http_s_obj.header.get(key):\n http_s_obj.header.update({key: value})\n except Exception:\n continue\n return header",
"def _build_headers(self):\n headers = {}\n headers.update(self.data_sources)\n headers.update(self.seasons)\n headers.update(self.region)\n headers.update(self.subregions)\n return headers",
"def test_normalize_headers():\n headers = [\n 'AllocationTransferAgencyIdentifier', 'BeginningPeriodOfAvailability', 'flex_mycol', 'FLEX_ANOTHER'\n ]\n mapping = {'allocationtransferagencyidentifier': 'ata', 'beginningperiodofavailability': 'boa'}\n\n result = csvReader.normalize_headers(headers, False, mapping)\n assert list(result) == [\n 'allocationtransferagencyidentifier', 'beginningperiodofavailability', 'flex_mycol', 'flex_another'\n ]\n result = csvReader.normalize_headers(headers, True, mapping)\n assert list(result) == ['ata', 'boa', 'flex_mycol', 'flex_another']",
"def _make_headers_df(headers_response):\n\n headers_df = util.make_dataframe(headers_response)\n headers_df = headers_df[\n [\"text\", \"column_index_begin\", \"column_index_end\", \"row_index_begin\", \"row_index_end\", \"cell_id\",\n \"text_normalized\"]]\n return headers_df",
"def _headers(self) -> Mapping[str, str]:\n return {}",
"def _normalize_headers(self):\n self.ncookies=dict((k.lower(), v) for k, v in self.request.cookies.iteritems())\n self.nheaders=dict((k.lower(), v) for k, v in self.request.headers.iteritems())",
"def scrub_headers(headers):\n if isinstance(headers, dict):\n headers = headers.items()\n headers = [\n (parse_header_string(key), parse_header_string(val))\n for (key, val) in headers\n ]\n if not logger_settings.get('redact_sensitive_headers', True):\n return dict(headers)\n if logger_settings.get('reveal_sensitive_prefix', 16) < 0:\n logger_settings['reveal_sensitive_prefix'] = 16\n return {key: safe_value(key, val) for (key, val) in headers}",
"def headers_raw_to_dict(headers_raw):\n\n if headers_raw is None:\n return None\n headers = headers_raw.splitlines()\n headers_tuples = [header.split(':', 1) for header in headers]\n\n result_dict = {}\n for header_item in headers_tuples:\n if not len(header_item) == 2:\n continue\n\n item_key = header_item[0].strip()\n item_value = header_item[1].strip()\n result_dict[item_key] = item_value\n\n return result_dict",
"def my_normalize(self, headers):\n ret = normalize(self, headers)\n if 'authorization' in ret:\n ret['Authorization'] = ret.pop('authorization')\n return ret",
"def parse_header(self):",
"def set_headers(self, headers):\n self.headers = headers\n process_headers(self)\n self.character_encoding = self.parsed_headers.get(\n 'content-type', (None, {})\n )[1].get('charset', 'utf-8') # default isn't UTF-8, but oh well",
"def decode_header(header):\n new_header = {}\n\n for item in header:\n split = item.split('\\t')\n new_header[split[0].replace(':', '')] = split[1].replace(\"\\r\\n\", \"\")\n\n return new_header",
"def from_headers(self, headers):\n try:\n # First IP address is the one of the client\n ip = headers['X_FORWARDED_FOR'].split(',')[0].strip()\n except KeyError:\n ip = headers.get('REMOTE_ADDR')\n\n if ip:\n # Double-check if the address has a valid format\n if re.match(r'^[\\d+]{1,3}\\.[\\d+]{1,3}\\.[\\d+]{1,3}\\.[\\d+]{1,3}$',\n ip, re.I):\n ip = None\n\n # Exclude private IP address ranges\n if re.match(r'^(?:127\\.0\\.0\\.1|10\\.|192\\.168\\.|172\\.(?:1[6-9]|2[0-9]|3[0-1])\\.)', ip):\n ip = None\n\n self.ip_address = ip\n\n self.user_agent = headers.get('HTTP_USER_AGENT')\n\n if 'HTTP_ACCEPT_LANGUAGE' in headers:\n parsed_locales = []\n res = re.findall(\n r'(^|\\s*,\\s*)([a-zA-Z]{1,8}(-[a-zA-Z]{1,8})*)\\s*(;\\s*q\\s*=\\s*(1(\\.0{0,3})?|0(\\.[0-9]{0,3})))?', \n headers['HTTP_ACCEPT_LANGUAGE'], re.I)\n for r in res:\n name = r[1].replace('-', '_')\n value = 1 if not r[4] else r[4]\n parsed_locales += [(name, value)]\n\n self.locale = sorted(parsed_locales, key=lambda x: x[1],\n reverse=True)[0][0]\n\n return self",
"def filter_headers(self, header):\n if header == \"Ticker symbol\":\n return \"symbol\"\n elif header == \"GICS Sector\":\n return \"sector\"\n elif header == \"Security\":\n return \"name\"\n elif header == \"GICS Sub Industry\":\n return \"industry\"\n else:\n return header",
"def _parse_rate_limit_headers(headers):\n limit = int(headers[\"X-RateLimit-Limit\"])\n remaining = int(headers[\"X-RateLimit-Remaining\"])\n reset_at_utc = int(headers[\"X-RateLimit-Reset\"])\n return {\n \"limit\": limit,\n \"used\": limit - remaining,\n \"remaining\": remaining,\n \"reset_at_utc\": reset_at_utc,\n \"reset_in_sec\": reset_at_utc - round(time.time()),\n \"last_update\": round(time.time())\n }",
"def generate_header_dic(self, header_strings):\n headers = dict()\n\n for header_values in header_strings:\n header_list = header_values.split(':')\n headers[header_list[0]] = header_list[1]\n return headers",
"def headers(self) -> dict:\n raise NotImplementedError # pragma: no cover",
"def _headercorrected(hdr):\n # COM*** -> COMMENT\n i = 1\n while 'COM%03d' % i in hdr:\n value = hdr['COM%03d' % i]\n comment = hdr.cards['COM%03d' % i].comment\n hdr['COMMENT'] = '[%s] %s' % (comment, value)\n del hdr['COM%03d' % i]\n i += 1\n # HIST*** -> HISTORY\n i = 1\n while 'HIST%03d' % i in hdr:\n value = hdr['HIST%03d' % i]\n comment = hdr.cards['HIST%03d' % i].comment\n hdr['HISTORY'] = '%s (%s)' % (value, comment)\n del hdr['HIST%03d' % i]\n i += 1\n # ORIGIN -> FROM\n if 'ORIGIN' in hdr.keys():\n hdr.rename_keyword('ORIGIN', 'FROM')\n if 'ORIGIN_V' in hdr.keys():\n hdr.rename_keyword('ORIGIN_V', 'FROM_V')\n # SOURCE_V -> FORMAT\n if 'SOURCE_V' in hdr.keys():\n hdr.rename_keyword('SOURCE_V', 'FORMAT')\n # SRC_VERS -> SRC_V\n if 'SRC_VERS' in hdr.keys():\n hdr.rename_keyword('SRC_VERS', 'SRC_V')",
"def missing_header_fields():\n auth_token = get_auth_token()\n\n headers = '{\"Host\": \"$host\",\"Date\": \"DATE\",'\n headers += '\"Accept-Encoding\": \"gzip\",'\n headers += '\"X-Auth-Token\": \"$token\"}'\n headers = string.Template(headers)\n\n return headers.substitute(host=CFG.host, token=auth_token)",
"def get_header(header_row):\n header = {}\n header['station'], c1, c2, c3, date, time, tz = header_row.split()\n header['short_model'] = c1\n header['model'] = f'{c1} {c2} {c3}' \n header['runtime'] = dateutil.parser.parse(f'{date} {time} {tz}')\n return header",
"def _split_headers(headers):\n amz_headers = {}\n reg_headers = {}\n for cur in headers:\n if cur.lower().startswith('x-amz-'):\n amz_headers[cur] = headers[cur]\n else:\n reg_headers[cur] = headers[cur]\n return (amz_headers, reg_headers)"
] | [
"0.6760923",
"0.6707738",
"0.66021603",
"0.6435935",
"0.62480223",
"0.62002486",
"0.6085462",
"0.60607",
"0.605833",
"0.6038865",
"0.5981734",
"0.5961042",
"0.5954765",
"0.59422773",
"0.5939061",
"0.5936525",
"0.5867014",
"0.5862857",
"0.5862484",
"0.586054",
"0.5848212",
"0.57505476",
"0.57465124",
"0.57360345",
"0.5724249",
"0.5721163",
"0.57163894",
"0.57017416",
"0.57010096",
"0.56939006"
] | 0.7734185 | 0 |
Calculate the similarity based on Cosine Similarity between two CTRDMs | def cosinesimilarity_cal(CTRDM1, CTRDM2):
# get number of conditions
n_cons = np.shape(CTRDM1)[0]
# calculate the number of value above the diagonal in RDM
n = n_cons * (n_cons - 1)
# initialize two vectors to store the values above the diagnal of two RDMs
v1 = np.zeros([n], dtype=np.float64)
v2 = np.zeros([n], dtype=np.float64)
# assignment
nn = 0
for i in range(n_cons):
for j in range(n_cons):
if i != j:
v1[nn] = CTRDM1[i, j]
v2[nn] = CTRDM2[i, j]
nn = nn + 1
# calculate the Cosine Similarity
V1 = np.mat(v1)
V2 = np.mat(v2)
num = float(V1 * V2.T)
denom = np.linalg.norm(V1) * np.linalg.norm(V2)
cos = num / denom
similarity = 0.5 + 0.5 * cos
return similarity | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cosine_similarity(v1, v2):\n # Cosine Sim:\n # Get the words that both have in common\n\n v1words = set(v1.keys())\n v2words = set(v2.keys())\n\n numerator_words = v1words.intersection(v2words)\n\n # Multiply and sum those counts\n numerator = 0.0\n for word in numerator_words:\n numerator += v1[word] * v2[word]\n\n\n # Divide by the sqrt of the product of the sum of the squares of the counts\n denominator = math.sqrt(math.magnitude(list(v1.values())) * math.magnitude(list(v2.values())))\n\n return numerator/denominator",
"def cosine_similarity(v1, v2):\n return np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))",
"def cosineSimilarity(dict1,dict2):\n product1 = 0.0\n product2 = 0.0\n for key in dict1.keys():\n product1 += (dict1[key] * dict1[key])\n for key in dict2.keys():\n product2 += (dict2[key] * dict2[key])\n product1 = math.sqrt(product1)\n product2 = math.sqrt(product2)\n fenmu = product1 * product2\n fenzi = 0.0\n for key in dict1.keys():\n if key in dict2:\n fenzi += (dict1[key] * dict2[key])\n cosSim = fenzi / fenmu\n return cosSim",
"def get_cosine_sim(self):\r\n return CosineSimilarity().calculate_similarity(self.tweets)",
"def get_cosine_similarity(doc1, doc2):\n count_vectorizer = CountVectorizer(stop_words='english')\n sparse_matrix = count_vectorizer.fit_transform(raw_documents=[doc1, doc2])\n dtm = sparse_matrix.todense()\n df_dtm = pd.DataFrame(data=dtm, \n columns=count_vectorizer.get_feature_names(), \n index=['doc1', 'doc2'])\n similarity_matrix = cosine_similarity(df_dtm, df_dtm)\n similarity_score = round(similarity_matrix[0][1], 6)\n return similarity_score",
"def cosine_similarity(self, v1: np.ndarray, v2: np.ndarray) -> float:\n return np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))\n # return cosine_similarity(v1, v2)[0][0]",
"def cosin_sim_pairs(a, b):\n wordsA = set(a.keys())\n wordsB = set(b.keys())\n inter = wordsA.intersection(wordsB)\n if(len(inter) == 0):\n return 0.0\n aa, bb, ab = 0, 0, 0\n for k in inter:\n aa += a[k] ** 2\n bb += b[k] ** 2\n ab += a[k] * b[k]\n for k in wordsA - inter:\n aa += a[k] ** 2\n for k in wordsB - inter:\n bb += b[k] ** 2\n return ab / float(math.sqrt(aa) * math.sqrt(bb))",
"def cosine_similarity(v1, v2):\n sim = np.sum(v1*v2)/np.sqrt(np.sum(v1**2))/np.sqrt(np.sum(v2**2))\n return sim",
"def cosine_similarity(a, b):\n cs = dot_product(a, b)/(norm(a) * norm(b))\n return cs",
"def cosine_similarity(v1: Vector, v2: Vector) -> float:\n return dot_product(v1, v2) / (vector_len(v1) * vector_len(v2))",
"def calculate_cosine_similarity(self):\n tfidf_matrix = self.calculate_tfidf()\n\n cosine_similarity = linear_kernel(tfidf_matrix, tfidf_matrix) # Cosine similarity matrix calculation\n\n return cosine_similarity",
"def cosine_similarity(vec1, vec2) -> float:\n return np.dot(vec1, vec2) / (np.linalg.norm(vec1) * np.linalg.norm(vec2))",
"def compute_cosine_similarity(self):\n cos_matrix = []\n for i in range(len(self.train_vec)):\n val = self.vec1 * self.train_vec[i]\n cos_matrix.append(val[0])\n out = np.argmax(cos_matrix)\n print(self.train_output[out])",
"def cosine_similarity(v1, v2):\n v1_len = 0\n v2_len = 0\n dot_product = 0\n\n for context_id, count in v1.items():\n v1_len += count ** 2\n if context_id in v2:\n dot_product += count*v2[context_id]\n for count in v2.values():\n v2_len += count ** 2\n\n v1_len = math.sqrt(v1_len)\n v2_len = math.sqrt(v2_len)\n return dot_product/(v1_len * v2_len)",
"def cosine_similarity(x1, x2, dim=1, eps=1e-8):\n w12 = torch.sum(x1 * x2, dim)\n w1 = torch.norm(x1, 2, dim)\n w2 = torch.norm(x2, 2, dim)\n return (w12 / (w1 * w2).clamp(min=eps)).squeeze()",
"def compute_cosine_sim(vec1, vec2):\n numer = np.dot(vec1.reshape((300,)), vec2.reshape((300,)))\n denom = np.sqrt(np.sum(np.square(vec1.reshape(300, )))) * np.sqrt(\n np.sum(np.square(vec2.reshape(300, ))))\n\n similarity = numer / denom\n\n return similarity",
"def cosine_similarity(x1, x2, dim=1, eps=1e-8):\r\n w12 = torch.sum(x1 * x2, dim)\r\n w1 = torch.norm(x1, 2, dim)\r\n w2 = torch.norm(x2, 2, dim)\r\n return (w12 / (w1 * w2).clamp(min=eps)).squeeze()",
"def cosine_similarity(cls, vec_a, vec_b):\n return np.dot(vec_a, vec_b) / \\\n (np.linalg.norm(vec_a) * np.linalg.norm(vec_b))",
"def cosine_similarity(self, x, y):\n return np.dot(x, y) / (np.linalg.norm(x) * np.linalg.norm(y))",
"def cosine_similarity(self, source_doc, input_doc):\n vectorizer = self.vectorizer or TfidfVectorizer(tokenizer=PlagiarismDetector.tokenize_and_stem, stop_words='english')\n tfidf = vectorizer.fit_transform([source_doc, input_doc])\n return ((tfidf * tfidf.T).A)[0, 1]",
"def cosine_similarity(vec_x, vec_y):\n sim_prod = 0.0\n len_x = 0\n len_y = 0\n\n for ngram in vec_x:\n len_x += vec_x[ngram] ** 2\n\n for ngram in vec_y:\n len_y += vec_y[ngram] ** 2\n\n len_x = math.sqrt(len_x)\n len_y = math.sqrt(len_y)\n\n for ngram in vec_x:\n if ngram in vec_y:\n sim_prod += vec_x[ngram] * vec_y[ngram]\n\n return sim_prod / (len_x * len_y)",
"def test_cossim(self):\n metrics = SimilarityMetrics()\n test1 = metrics.cosine_similarity(np.asarray([1,1]),np.asarray([-1,1]))\n np.testing.assert_almost_equal(test1,0.0)\n\n test2 = metrics.cosine_similarity(np.asarray([1,-1]),np.asarray([-1,1]))\n np.testing.assert_almost_equal(test2,-1.0)\n\n test3 = metrics.cosine_similarity(np.asarray([1,1]),np.asarray([1,1]))\n np.testing.assert_almost_equal(test3,1.0)",
"def cosine_dist(d1, d2):\n suma=0\n for x in d1:\n if x in d2:\n suma+=(d1[x]*d2[x])\n sqrt1=0\n sqrt2=0\n for i in d1:\n sqrt1+=math.pow(d1[i],2)\n for i in d2:\n sqrt2+=math.pow(d2[i],2)\n return 1-suma/(math.sqrt(sqrt1)*math.sqrt(sqrt2))",
"def cosine_similarity_tensorflow(tf_word_representation_A, tf_words_representation_B):\n a_normalized = tf.nn.l2_normalize(tf_word_representation_A, axis=-1)\n b_normalized = tf.nn.l2_normalize(tf_words_representation_B, axis=-1)\n similarity = tf.reduce_sum(\n tf.multiply(a_normalized, b_normalized), \n axis=-1\n )\n \n return similarity",
"def pairwise_cosine_similarity(x, y):\n x = torch.div(x, torch.sqrt(torch.max(torch.sum(x ** 2), 1e-12)))\n y = torch.div(y, torch.sqrt(torch.max(torch.sum(y ** 2), 1e-12)))\n return torch.mm(x, torch.transpose(y, 1, 0))",
"def content_similarity(self, movie1, movie2):\n v1, v2 = self.get_tfidf(movie1), self.get_tfidf(movie2)\n return self.cosine_similarity(v1, v2)",
"def test_cosine_similarity_compiled():\n vector1 = np.array([1, 1, 0, 0])\n vector2 = np.array([1, 1, 1, 1])\n score11 = cosine_similarity(vector1, vector1)\n score12 = cosine_similarity(vector1, vector2)\n score22 = cosine_similarity(vector2, vector2)\n\n assert score12 == 2 / np.sqrt(2 * 4), \"Expected different score.\"\n assert score11 == score22 == 1.0, \"Expected different score.\"",
"def cosine_similarity(a, b):\n\n numerator = tf.reduce_sum(tf.multiply(a, b), axis=1)\n denominator = tf.multiply(tf.norm(a, axis=1), tf.norm(b, axis=1))\n cos_similarity = numerator/denominator\n return cos_similarity",
"def CosineSimilarity(test_vec, source_vecs):\n cos_dist = 0\n for source_vec in source_vecs:\n cos_dist += FacePredictor.findCosineDistance(test_vec, source_vec)\n return cos_dist / len(source_vecs)",
"def getGloveoCosineSimilarity(question1, question2):\n questions = [question1, question2]\n\n ## for the sentences we need to get the count vectors\n vec = CountVectorizer(max_features=5000, stop_words=None,binary=True)\n count_vectors = vec.fit_transform(questions)\n\n ## get the vocabulary of words from the questions\n vocab_index = vec.vocabulary_\n\n ## get the index of the words and embeddings\n index_word = {v:k for k, v in vocab_index.items()}\n\n ## get the question vectors\n question_vectors = np.zeros((count_vectors.shape[0], 300))\n\n ## iterate through count vectors for each word get the embeddings\n ## for each embedding, we will then average by the number of words\n ## this will be then used for cosine similarity\n for i in range(count_vectors.shape[0]):\n row = count_vectors[i, :].toarray()\n word_ids = np.where(row > 0)[1]\n word_counts = row[:, word_ids][0]\n numWords = np.sum(word_counts)\n\n ## if there are no words, continue\n if numWords == 0:\n continue\n\n ## initialize the word embeddings to 0\n word_embeddings = np.zeros((word_ids.shape[0], 300))\n\n ## update the word embeddings\n for j in range(word_ids.shape[0]):\n word_id = word_ids[j]\n word_embeddings[j, :] = word_counts[j] * gloveDict[index_word[word_id]]\n question_vectors[i, :] = np.sum(word_embeddings, axis=0) / numWords\n\n return(cosine_similarity(question_vectors[0], question_vectors[1])[0][0])"
] | [
"0.7754487",
"0.7676584",
"0.7633492",
"0.76006675",
"0.7589376",
"0.7576636",
"0.75592244",
"0.75373095",
"0.7522885",
"0.746666",
"0.74491453",
"0.7418762",
"0.7398554",
"0.73664916",
"0.7334395",
"0.7312988",
"0.7309643",
"0.7292841",
"0.72754246",
"0.7240708",
"0.72226435",
"0.7130961",
"0.7123031",
"0.70562273",
"0.70501834",
"0.70479953",
"0.7040821",
"0.7039914",
"0.70334846",
"0.70334363"
] | 0.80601525 | 0 |
Adds basic_vector to the basic vectors. If there are at least 3 arrays in _basic_vectors, then add a new array to _featureVector. This added array is composed of the basic vectors and its 2 first central derivatives basic_vector must be the array returned by the mfcc. | def build_feature_vector(self, basic_vector):
basic_vector = basic_vector - np.mean(basic_vector)
self._basic_vectors.append(basic_vector)
if len(self._basic_vectors) > 2:
#if there are at least 3 basic vectors we can calculate the central derivative for the vector before this one
first_derivative = (basic_vector - self._basic_vectors[-3])/(2*self.seconds_to_next_vector)
second_derivative = (basic_vector - 2*self._basic_vectors[-2] + self._basic_vectors[-3])/(self.seconds_to_next_vector**2)
feature_vector = np.concatenate((basic_vector, first_derivative, second_derivative))
self._feature_vectors.append(feature_vector) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_vector(self, doc_name, add_cnt, new_docvec):\n \n # determine the weight of the merging pieces\n old_weight = float(self.vector_cnt) / (self.vector_cnt + add_cnt)\n new_weight = float(add_cnt) / (self.vector_cnt + add_cnt)\n \n if len(self.name) == 0:\n self.name = doc_name\n else:\n self.name += \", %s\" % doc_name\n \n # computes magnitude as it goes.\n self.length = 0\n \n # reduce weight of values already in vector\n for key in self.centroid_vector.keys():\n if key in new_docvec: # if is in both vectors!\n \n oldvalue = float(self.centroid_vector[key]) * old_weight\n newvalue = float(new_docvec[key]) * new_weight\n value = oldvalue + newvalue\n \n self.centroid_vector[key] = value\n self.length += (value * value) # magnitude\n \n # so when we go through to add in all the missing ones we won't \n # have excess.\n del new_docvec[key]\n else: # if it is strictly in the old vector\n \n oldvalue = float(self.centroid_vector[key]) * old_weight\n self.centroid_vector[key] = oldvalue\n self.length += (oldvalue * oldvalue) # magnitude\n \n # add new values to vector\n for key, value in new_docvec.iteritems():\n # we don't so we'll have to create a new value with the weight of \n # the added vector\n value = float(value) * new_weight\n self.centroid_vector[key] = value\n self.length += (value * value)\n\n self.vector_cnt += add_cnt\n\n # calculate magnitude\n self.length = sqrt(self.length)",
"def feature_vector(features, vector):\n clean_features = set(features)\n new_features_vector = featurize(vector,clean_features)\n return new_features_vector",
"def _add_support_vectors(self, x: np.ndarray, y: np.ndarray) -> None:\n\n n_vectors = x.shape[0]\n\n self.support_vectors = np.vstack([self.support_vectors, x])\n self.alpha = np.append(self.alpha, np.zeros(n_vectors))\n self.target = np.append(self.target, y)\n\n new_kernel_values = self._kernel(x, self.support_vectors)\n\n self.kernel_mx = np.vstack([self.kernel_mx, new_kernel_values[:, :-n_vectors]])\n self.kernel_mx = np.hstack([self.kernel_mx, new_kernel_values.T])\n\n gradient = y - new_kernel_values.dot(self.alpha)\n self.gradient = np.append(self.gradient, gradient)\n\n a = y * self.c\n a[a > 0] = 0\n self.a = np.append(self.a, a)\n\n b = y * self.c\n b[b < 0] = 0\n self.b = np.append(self.b, b)",
"def append(self, vector):\n self._vectors.append(Vec2(*vector))",
"def create_feature_vector(self, files=[], name=\"\"):\n\n if( len(files)==0 ):\n return\n\n epsilon = 1e-8\n set = []\n\n #iterating all files obtaining the significant data to compute the feature vectors\n for file in files:\n\n #reading the csv files and keeping the first 3 columns (x,y,time)\n file_data = pd.read_csv(file)\n file_data = file_data.to_numpy()\n data = np.zeros((file_data.shape[0],7))\n data[:,0:3] = file_data[:,0:3]\n\n #computing the other interesting features\n angle = np.arctan(data[:,1]/(data[:,0]+epsilon))\n velocity = np.sqrt( np.square(data[:,1]) + np.square(data[:,0]) )\n log_curvature = np.log10( velocity/(angle+epsilon) )\n acceleration = np.sqrt( np.square(velocity) + np.square(velocity*angle) )\n\n #assigning the new computed features\n data[:,3] = angle\n data[:,4] = velocity\n data[:,5] = log_curvature\n data[:,6] = acceleration\n\n #normalizing the data\n data = self.normalization(data)\n set.append(data)\n\n return set",
"def feature_vector1(self, feature_vector1):\n\n self._feature_vector1 = feature_vector1",
"def add(self, featVect, label):\n if label in self.labelToNum:\n l = self.labelToNum[label]\n else:\n l = len(self.numToLabel)\n self.numToLabel.append(label)\n self.labelToNum[label] = l\n \n self.blocks.append((featVect.reshape((1,featVect.shape[0])).astype(numpy.double),[l]))",
"def augment_feature_vector(X):\n column_of_ones = np.zeros([len(X), 1]) + 1\n\n return np.hstack((column_of_ones, X))",
"def __add__(self, other):\n return Vector([c1 + c2 for (c1, c2) in zip(self.components, other.components)])",
"def apply_new_basis(new_base, vector_array):\n return np.dot(new_base, vector_array).T",
"def register_vectors(self, vectors):\n\n self.vectors.extend(vectors)",
"def add(first, other):\n if isinstance(first,FreeCAD.Vector) and isinstance(other,FreeCAD.Vector):\n return FreeCAD.Vector(first.x+other.x, first.y+other.y, first.z+other.z)",
"def add(self, vector):\n self.x += vector.x\n self.y += vector.y",
"def concatenation(self, StdVectorFst other):\n cdef StdVectorFst result = self.copy()\n result.concatenate(other)\n return result",
"def __add__(self, vector):\n return self.translated(vector)",
"def feature_vector2(self, feature_vector2):\n\n self._feature_vector2 = feature_vector2",
"def project_vectors(self, vectors):\n X = vectors - self._mean\n return np.dot(X, self.components.T)",
"def vector_add(v, w):\n\treturn [v_i + w_i for v_i, w_i in zip(v, w)]",
"def add_basic_block(self, basic_block):\n self.basic_blocks.append(basic_block)\n basic_block.function = self",
"def _update_feature_vec(fvec, word, tag_ngram):",
"def _make_feature_vec(self, word_list):\n\n # Pre-initialize an empty numpy array (for speed)\n feature_vec = np.zeros((self.num_features,), dtype=\"float32\")\n\n # index2word is a list that contains the names of the words in\n # the model's vocabulary. Convert it to a set, for speed.\n index2word_set = set(self.w2v_model.index2word)\n\n # Loop over each word in the word_list and, if it is in the model's\n # vocabulary, add its feature vector to the total\n nwords = 0\n for word in word_list:\n # NOTE: Careful there, if all words are in caps in the article,\n # this function will return nan values and blow up the forest.\n word = word.lower()\n if word in index2word_set:\n nwords += 1\n feature_vec = np.add(feature_vec, self.w2v_model[word])\n\n # Divide the result by the number of words to get the average\n feature_vec = np.divide(feature_vec, nwords)\n return feature_vec",
"def get_feature_vector(self, feature_idxs):\n feat_vec = np.zeros(len(feature_idxs))\n if hasattr(self, 'generic'):\n generics = self.generic\n else:\n generics = dict()\n for feature in self.features:\n key = generics.get(feature, feature)\n if key in feature_idxs:\n feat_vec[feature_idxs[key]] = self.features[feature]\n return feat_vec",
"def to_basic_block(self):\n return _add_vector_swig.add_vector_2_cpp_sptr_to_basic_block(self)",
"def get_feature_vector(cc, img, quiet=False):\n savefilename = config.get_classifier_featvect_name(cc.d.images[img]) \n if os.path.isfile(savefilename):\n print 'load feat_vect %s'%(cc.d.images[img].name)\n feat_vect = cPickle.load(open(savefilename,'r'))\n else:\n feat_vect = compute_feature_vector(cc, img, quiet=quiet)\n cPickle.dump(feat_vect, open(savefilename,'w'))\n return feat_vect",
"def add_feature(x, x1):\n if x is None:\n x = x1\n else:\n x = np.concatenate((x, x1), axis=1)\n return x",
"def __init__(self, feature_vectors):\n # Initialize key variables\n (rows, _) = feature_vectors.shape\n\n # Append a column of ones to array\n ones = np.ones((rows, 1))\n kessler_array = np.hstack((ones, feature_vectors))\n\n # Make array available to rest of class\n self.data = kessler_array.tolist()",
"def __add__(self, other):\n # other is a scalar\n if isinstance(other, (int, float, complex, Fraction)) and not isinstance(other, bool):\n return Vector([i + other for i in self.data], self.column)\n # other is a Vector\n elif isinstance(other, Vector):\n if len(self.data) != len(other):\n raise Exception('Vectors are not of equal length')\n elif self.column != other.column:\n raise Exception('Vectors are not of equal orientation')\n else:\n return Vector([self.data[i] + other.data[i] for i in range(len(self.data))], self.column)\n # other is not a scalar or a Vector\n else:\n raise Exception('Argument is not a number or a Vector') from TypeError",
"def __add__(self, other):\n if len( self) != len(other):\n raise ValueError('Dimensions must match.')\n result = Vector(len(self))\n for i in range(len(self)):\n result[i] = self[i] + other[i]\n return result",
"def processFeature(prevWord, word, vector):\n \n # We add feature whether it exists or not\n unigram, exists = vector.getUnigram(prevWord)\n if not exists:\n vector.addUnigram(prevWord)\n \n \n bigram, exists = vector.getBigram(prevWord, word)\n if not exists:\n vector.addBigram(prevWord, word)",
"def add_vectors(u, v): #11.22.5\r\n new_vector = []\r\n \"\"\"Because they have same length so we\r\n should take advantage from this one\"\"\"\r\n for i in range(len(u)):\r\n m = u[i] + v[i] # Get their value of i index at the same time!\r\n new_vector.append(m)\r\n return new_vector"
] | [
"0.5917165",
"0.5735873",
"0.57002974",
"0.5500693",
"0.54043525",
"0.5337707",
"0.5288758",
"0.5286068",
"0.5255515",
"0.5167239",
"0.51532346",
"0.5146452",
"0.5016166",
"0.5007243",
"0.49999866",
"0.4954787",
"0.49387354",
"0.49348387",
"0.4906069",
"0.48973984",
"0.48947722",
"0.48843622",
"0.48737946",
"0.48659736",
"0.48557988",
"0.4851156",
"0.48325557",
"0.48302197",
"0.48273098",
"0.48233554"
] | 0.8391323 | 0 |
If there is at least an feature vector then returns it, else returns None | def get_last_feature_vectors(self):
if len(self._feature_vectors):
return self._feature_vectors[-1]
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_training(feature_path): \n features = np.loadtxt(feature_path)\n feature_size = features.shape[1] -1 \n features_in = features[:,0:feature_size]\n features_out = features[:,-1]\n #features_out = np.array(map(lambda x: x if x else 0, features_out_unnorm))\n return features_in, features_out",
"def get_feature_by_name(self, feature_name):\n feature_index = self.feature_name_index.get(feature_name,-1)\n if feature_index > -1:\n return self.features[feature_index]\n else:\n logger.error(\"{} does not exist!\".format(feature_name))\n return None",
"def get_feature_vector(self, board):\n return self.hot_one(board)\n # return self.get_tesauro_feature_vector(self, board)",
"def _get_features(task, features, model, similarity_strategy=None):\n X = []\n langs = analysis_utils.get_langs_for_task(task)\n for feature in features:\n if feature != \"size\":\n # this is a nested array\n X_feature = analysis_utils.load_lang2vec_vectors(task=task, features=feature)\n if X_feature is None:\n #continue\n return None\n if similarity_strategy != \"-\":\n # We start with similarities to english\n X_feature = [[sim] for sim in analysis_utils.compute_similarities_of_lang_vecs(X_feature, strategy=similarity_strategy)]\n elif feature == \"size\" and model == \"xlmr\":\n # this is an array, we put it in a list\n X_feature = [[size] for size in analysis_utils.xlmr_input_corpus_sizes(langs)]\n elif feature == \"size\" and model == \"mbert\":\n X_feature = [[size] for size in analysis_utils.mbert_input_corpus_sizes(langs)]\n else:\n raise ValueError()\n # we now have a feature vector for a single feature or feature set\n if len(X) == 0:\n X = np.array(X_feature)\n else:\n X = np.concatenate((X,np.array(X_feature)), axis=1)\n if len(X) == 0:\n return None\n return np.array(X, dtype=float)",
"def featureByName(self, name):\n for feature in self.features:\n if feature.name == name:\n return feature\n return None",
"def get_features(self):\n if not self.exposes_features:\n return None\n\n return self._last_features",
"def __getitem__(self, feat):\n # We perform the test for presence explicitly, to maintain a consistent\n # notion of len(self). If we just returned self.features[k], the\n # defaultdict self.features could self.update(k=float()), thus\n # extending self's length by one.\n return self.features[feat] if feat in self.features else 0.",
"def feature_vector(features, vector):\n clean_features = set(features)\n new_features_vector = featurize(vector,clean_features)\n return new_features_vector",
"def _get_feature(self, layer_name):\n if (\n layer_name in self.config[\"layers\"]\n and \"feature\" in self.config[\"layers\"][layer_name]\n ):\n return self.config[\"layers\"][layer_name][\"feature\"]\n else:\n return 0",
"def getFeature(self, featureName):\n # loop through all the existing features\n for feature in self.features:\n # when we have a match with the name\n if featureName == feature.name:\n # return the value in the solution\n return feature\n # feature doesn't exist\n return None",
"def get_vector(self, token):\n try:\n idx = self.token_to_idx[token]\n except KeyError:\n print(\"Input token <{}> is not in the model. Will return None type vector\".format(token))\n return None\n return self.embeddings_mat[idx]",
"def feature_set(self) -> Optional[pulumi.Input['OrganizationFeatureSet']]:\n return pulumi.get(self, \"feature_set\")",
"def feature():\n pass",
"def get_vector(self) -> Optional[List[_Score]]:\n\n if len(self._vector) is 0:\n return None\n else:\n return self._vector",
"def get_feature(self, feature: FeatureName) -> FeatureInfo:\n if feature not in self.feature_map:\n state = FeatureState.Unsupported\n else:\n state = self.feature_map[feature]\n return FeatureInfo(state=state)",
"def get_features(words, vectors):\n result = [vectors.loc[word].values for word in words if word in df_keys.values.reshape(-1)]\n if result:\n return np.stack(result)\n return None",
"def filter_feature(feature, typ, value):\n return value is None or feature.__getattribute__(typ) == value",
"def parse_optional_vector(x, dtype=None):\n if x == 'none':\n return None\n else:\n return parse_vector(x, dtype)",
"def get_feature_vector(cc, img, quiet=False):\n savefilename = config.get_classifier_featvect_name(cc.d.images[img]) \n if os.path.isfile(savefilename):\n print 'load feat_vect %s'%(cc.d.images[img].name)\n feat_vect = cPickle.load(open(savefilename,'r'))\n else:\n feat_vect = compute_feature_vector(cc, img, quiet=quiet)\n cPickle.dump(feat_vect, open(savefilename,'w'))\n return feat_vect",
"def _get_relevant_features(self, X):\n if self.only_binary_features:\n feature_mask = which_columns_are_binary(X)\n else:\n feature_mask = np.ones(X.shape[1], dtype=bool)\n return feature_mask",
"def has_feature(self, feature):\n features = self.features\n if features is None:\n return False\n \n return feature in features",
"def extract_single_feature_vect(gray):\n\n hist, hog_img = skimHOG(gray)\n reduced = reduce_single_vector_dimension(hist)\n\n return reduced, hog_img",
"def get(self, name):\n try:\n return(self._d_features[name])\n except:\n log.error(\"Can't get feature '%s'\" % name)\n return",
"def select_features(vec):\n return sorted(vec, key=vec.get, reverse=True)[\n : min(len(vec), FEATURE_LENGTH)\n ]",
"def features(self) -> List[np.ndarray]:\n return None",
"def processFeature(prevWord, word, vector):\n \n # We add feature whether it exists or not\n unigram, exists = vector.getUnigram(prevWord)\n if not exists:\n vector.addUnigram(prevWord)\n \n \n bigram, exists = vector.getBigram(prevWord, word)\n if not exists:\n vector.addBigram(prevWord, word)",
"def features_size(self) -> int:\n return len(self.data[0].features) if len(self.data) > 0 and self.data[0].features is not None else None",
"def feature(self):\n return self._feature",
"def feature(self):\n return self._feature",
"def GetVectorArticleInput(dico_vector_input, features):\n features_left = set(features) - set(dico_vector_input.keys())\n if len(features_left) > 0:\n sentence = \"Some features aren't in the dict:\\n\"\n raise MyException(sentence + \"{}\".format(features_left))\n vector_art = []\n other_features = ['abstract', 'syn', 'exergue', 'title', 'secTitle']\n other_features += ['subTitle', 'supTitle']\n for feature in features:\n if feature == 'nbSign':\n if dico_vector_input['nbSign'] == 0:\n print(\"NbSign == 0 l.176 - GetVectorArticleInput\")\n vector_art.append(dico_vector_input[feature])\n else:\n vector_art.append(dico_vector_input[feature])\n # Conversion des variables en indicatrices\n # Normalement plus la peine, comme déjà fait auparavant\n elif feature in other_features:\n if dico_vector_input[feature] > 0:\n vector_art.append(1)\n else:\n vector_art.append(0)\n else:\n vector_art.append(dico_vector_input[feature])\n return (dico_vector_input['melodyId'], np.array([vector_art]))"
] | [
"0.6076066",
"0.6039801",
"0.60045683",
"0.5997051",
"0.59677297",
"0.5927279",
"0.5924938",
"0.592038",
"0.58969414",
"0.58928514",
"0.5883369",
"0.5868851",
"0.5810611",
"0.57908976",
"0.57573485",
"0.5748689",
"0.57457215",
"0.5690704",
"0.5680142",
"0.5675147",
"0.5661629",
"0.5642336",
"0.5623844",
"0.5604885",
"0.5604279",
"0.56018674",
"0.5579186",
"0.5577575",
"0.5577575",
"0.55653566"
] | 0.6274125 | 0 |
function used for marking deducted Late checkin request. | def action_payslip_done(self):
for recd in self.late_check_in_ids:
recd.state = 'deducted'
return super(PayslipLateCheckIn, self).action_payslip_done() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def loan(self):",
"def checkin(self):\n folio = self.folio_id\n if folio.payment_deposits <= 0:\n raise UserError(_(\"\"\"No record of security deposit found on folio {}\n \"\"\".format(folio.name)))\n if folio.state != 'on_queue':\n raise UserError(_(\n 'Folio {} is not yet to be processed'.format(self.folio_id.name)))\n hours, minutes = decimal_to_time(self.env.user.company_id.checkin_hour)\n can_check_in = datetime.combine(\n date.today(), tm(hours, minutes)) < datetime.now()\n if not can_check_in:\n raise UserError(\n 'Guest(s) cannot be checked in earlier than {}'.format(\n self.env.user.company_id.checkin_hour))\n if self.folio_id.room_id.occupy():\n self.folio_id.write({'state': 'checkin'})",
"def checkin(self, checkin):\n\n self._checkin = checkin",
"def Daysleftverification():\n pass",
"def leave_request_decline(self, token, **kwargs):\n cr, uid, context = self._get_cr_uid_context()\n res = self._check_leave_request(\n cr, uid, request, token, context=context\n )\n if isinstance(res, http.Response):\n return res\n if res:\n res.signal_workflow('refuse')\n if res.state == 'refuse':\n return request.website.render(\n \"tk_hr_approve_request.leave_request_refused\"\n )",
"def loan(self):\n self.rent_date = datetime.datetime.now()\n self.back_date = datetime.datetime.now() + datetime.timedelta(14)\n self.book.loan()\n self.book.save()\n self.save()",
"def check_leave_request_holiday(self, cr, uid, att, context=None):\n if att:\n # check have overtime yet?\n att_name = datetime.strptime(att.name, DEFAULT_SERVER_DATETIME_FORMAT)\n param_obj = self.pool.get('ir.config_parameter') \n max_early = param_obj.get_param(cr, uid, 'maximum_early_minutes', default=60)\n max_late = param_obj.get_param(cr, uid, 'maximum_late_minutes', default=60)\n try:\n max_early = int (max_early)\n max_late = int (max_late)\n except:\n raise except_osv(_(\"Warning !\"),_(\"maximum_early_minutes or maximum_late_minutes in config parameter is incorrect\"))\n \n time_early = att_name + timedelta(minutes = max_early)\n time_late = att_name - timedelta(minutes = max_late)\n \n overtime_obj = self.pool.get('hr.overtime')\n overtime_confirmed_ids = overtime_obj.search(cr, uid, [('employee_id', '=', att.employee_id.id),\n ('mode', '=', 'by_employee'),\n ('name', '=', att.day_tz),\n ('datetime_start', '<=', time_early.strftime(DEFAULT_SERVER_DATETIME_FORMAT)),\n ('datetime_stop', '>=', time_late.strftime(DEFAULT_SERVER_DATETIME_FORMAT)),\n ('state', 'in', ['confirmed'])\n ])\n if overtime_confirmed_ids:\n return False\n \n public_holiday_obj = self.pool.get('trobz.hr.public.holidays')\n public_holiday_ids = public_holiday_obj.search(cr, uid, [('date', '=', att.day_tz), ('state', '=', 'approved')], context=context)\n if public_holiday_ids:\n return True\n sql = '''\n SELECT line.first_date_type, line.first_date, line.last_date_type, line.last_date\n FROM hr_holidays_line line JOIN hr_holidays h ON line.holiday_id = h.id\n WHERE h.employee_id = %d\n AND line.first_date <= '%s' AND line.last_date >= '%s'\n AND h.state = 'validate'\n '''% (att.employee_id.id, att.day_tz, att.day_tz)\n cr.execute(sql)\n for leave in cr.fetchall():\n if att.action == 'sign_out':\n afternoon = datetime.strptime(att.name_tz, DEFAULT_SERVER_DATETIME_FORMAT).hour >= 13\n else:\n afternoon = datetime.strptime(att.name_tz, DEFAULT_SERVER_DATETIME_FORMAT).hour >= 12\n if att.day_tz == leave[1]:\n if leave[0] == 'afternoon' and afternoon:\n return True\n if leave[0] == 'morning' and not afternoon:\n return True\n if leave[0] == 'full':\n return True\n if att.day_tz == leave[3]:\n if leave[2] == 'afternoon' and afternoon:\n return True\n if leave[2] == 'morning' and not afternoon:\n return True\n if leave[2] == 'full':\n return True\n if datetime.strptime(att.day_tz, '%Y-%m-%d') > datetime.strptime(leave[1], '%Y-%m-%d')\\\n and datetime.strptime(att.day_tz, '%Y-%m-%d') < datetime.strptime(leave[3], '%Y-%m-%d'):\n return True\n return False",
"def _check_leave_request(self, cr, uid, request, token, context=None):\n holidays_obj = request.registry['hr.holidays']\n holidays_ids = holidays_obj.search(cr, uid, [\n ('token', '=', token)\n ])\n\n if len(holidays_ids) == 0:\n return request.website.render(\n \"tk_hr_approve_request.leave_request_not_found\"\n )\n\n _id = holidays_ids[0] if len(holidays_ids) else None\n if _id:\n leave_request = holidays_obj.browse(\n cr, uid, _id, context=context\n )\n return leave_request",
"def _tick(self):\n\t\tself.pay_tax()\n\t\tself.inhabitant_check()\n\t\tself.level_check()",
"def _check_approval_update(self, state):\n\t\tcurrent_employee = self.env['hr.employee'].search([('user_id', '=', self.env.uid)], limit=1)\n\t\t# is_officer = self.env.user.has_group('hr_holidays.group_hr_holidays_user')\n\t\tis_manager = self.env.user.has_group('hr_holidays.group_hr_holidays_manager')\n\t\tfor holiday in self:\n\t\t\tval_type = holiday.holiday_status_id.validation_type\n\t\t\tif state == 'confirm':\n\t\t\t\tcontinue\n\n\t\t\tif state == 'draft':\n\t\t\t\tif holiday.employee_id != current_employee and not is_manager:\n\t\t\t\t\traise UserError(_('Only a Leave Manager can reset other people leaves.'))\n\t\t\t\tcontinue\n\n\t\t\t# if not is_officer:\n\t\t\t# \traise UserError(_('Only a Leave Officer or Manager can approve or refuse leave requests.'))\n\n\t\t\t# if is_officer:\n\t\t\t# \t# use ir.rule based first access check: department, members, ... (see security.xml)\n\t\t\tholiday.check_access_rule('write')\n\n\t\t\tif holiday.employee_id == current_employee and not is_manager:\n\t\t\t\traise UserError(_('Only a Leave Manager can approve its own requests.'))\n\n\t\t\tif (state == 'validate1' and val_type == 'both') or (state == 'validate' and val_type == 'manager'):\n\t\t\t\tmanager = holiday.employee_id.parent_id or holiday.employee_id.department_id.manager_id\n\t\t\t\tif (manager and manager != current_employee) and not self.env.user.has_group('hr_holidays.group_hr_holidays_manager'):\n\t\t\t\t\traise UserError(_('You must be either %s\\'s manager or Leave manager to approve this leave') % (holiday.employee_id.name))\n\n\t\t\tif state == 'validate' and val_type == 'both':\n\t\t\t\tif not self.env.user.has_group('hr_holidays.group_hr_holidays_manager'):\n\t\t\t\t\traise UserError(_('Only an Leave Manager can apply the second approval on leave requests.'))",
"def check_absent_pre_date(self, cr, uid, att, context=None):\n if att:\n # check employee absent pre date\n pre_att_ids = self.search(cr, uid, [('employee_id', '=', att.employee_id.id), \n ('name', '<', att.name), \n ('action', 'in', ('sign_in', 'sign_out'))], \n limit=1)\n param_obj = self.pool.get('ir.config_parameter')\n working_hour_obj = self.pool.get('hr.payroll.working.hour')\n max_early = param_obj.get_param(cr, uid, 'maximum_early_minutes', default=60)\n max_late = param_obj.get_param(cr, uid, 'maximum_late_minutes', default=60)\n trobz_base_obj = self.pool.get('trobz.base')\n att_name = datetime.strptime(att.name_tz, DEFAULT_SERVER_DATETIME_FORMAT)\n try:\n max_early = int (max_early)\n max_late = int (max_late)\n except:\n raise except_osv(_(\"Warning !\"),_(\"maximum_early_minutes or maximum_late_minutes in config parameter is incorrect\"))\n \n time_late = att_name - timedelta(minutes = max_late)\n \n working_hour_ids=[] #Payroll Working Hours (Only read working PWH, Not Leave or Overtime PWH) \n if not pre_att_ids:\n working_hour_ids = working_hour_obj.search(cr, uid, [('employee_id', '=', att.employee_id.id),\n ('expected_end', '<', time_late.strftime(DEFAULT_SERVER_DATETIME_FORMAT)),\n ('plan_line_id', '!=', False)\n ], \n context=context)\n else:\n pre_time_early = self.read(cr, uid, pre_att_ids[0], ['name_tz'], context=context)['name_tz']\n time_start_early = datetime.strptime(pre_time_early, DEFAULT_SERVER_DATETIME_FORMAT) + timedelta(minutes = max_early)\n working_hour_ids = working_hour_obj.search(cr, uid, [('employee_id', '=', att.employee_id.id),\n ('expected_start', '>', time_start_early.strftime(DEFAULT_SERVER_DATETIME_FORMAT)),\n ('expected_end', '<', time_late.strftime(DEFAULT_SERVER_DATETIME_FORMAT)),\n ('plan_line_id', '!=', False)\n ], context=context, order='date DESC')\n if not working_hour_ids:\n return False\n else:\n for working in working_hour_obj.browse(cr, uid, working_hour_ids, context=context):\n # check public holiday\n holiday_ids = self.pool.get('trobz.hr.public.holidays').search(cr, uid, [('date','=', working.date)], context=context) \n if holiday_ids:\n return False\n # full\n sql = '''\n SELECT line.id\n FROM hr_holidays_line line JOIN hr_holidays h ON line.holiday_id = h.id\n WHERE h.employee_id = %d\n AND line.first_date < '%s' AND line.last_date > '%s'\n AND h.state = 'validate'\n '''% (working.employee_id.id, working.date, working.date)\n cr.execute(sql)\n if cr.fetchall():\n continue\n else:\n sql = False\n expected_start = trobz_base_obj.convert_from_utc_to_current_timezone(cr, uid, working.expected_start, False, DEFAULT_SERVER_DATETIME_FORMAT, False, context=context)\n time_start = expected_start.hour\n expected_end = trobz_base_obj.convert_from_utc_to_current_timezone(cr, uid, working.expected_end, False, DEFAULT_SERVER_DATETIME_FORMAT, False, context=context)\n time_end = expected_end.hour\n # wh afternoon\n if time_start >= 12 and time_end >=12:\n sql = '''\n SELECT line.id\n FROM hr_holidays_line line JOIN hr_holidays h ON line.holiday_id = h.id\n WHERE h.employee_id = %d\n AND (line.first_date = '%s' OR line.last_date = '%s')\n AND h.state = 'validate'\n AND (line.last_date_type = 'afternoon' OR line.first_date_type = 'afternoon')\n '''% (working.employee_id.id, working.date, working.date)\n # wh morning\n elif time_start < 12 and time_end <= 12:\n sql = '''\n SELECT line.id\n FROM hr_holidays_line line JOIN hr_holidays h ON line.holiday_id = h.id\n WHERE h.employee_id = %d\n AND (line.first_date = '%s' OR line.last_date = '%s')\n AND h.state = 'validate'\n AND (line.last_date_type = 'morning' OR line.first_date_type = 'morning')\n '''% (working.employee_id.id, working.date, working.date)\n \n if sql:\n cr.execute(sql)\n if cr.fetchall():\n continue\n # wh full\n sql = '''\n SELECT line.id\n FROM hr_holidays_line line JOIN hr_holidays h ON line.holiday_id = h.id\n WHERE h.employee_id = %d\n AND (line.first_date = '%s' OR line.last_date = '%s')\n AND h.state = 'validate'\n AND (line.last_date_type = 'full' OR line.first_date_type = 'full')\n '''% (working.employee_id.id, working.date, working.date)\n cr.execute(sql)\n res = cr.fetchall()\n if res or (time_late >= expected_start and time_late <= expected_end):\n continue\n return True\n return False",
"def awaiting_payment(self):",
"def already_spent_redemption() -> UnsuccessfulRedemption:\n return UnsuccessfulRedemption(\"double-spend\")",
"def set_in_check(self, state):\n\n self._in_check = state",
"def mdm_checkin(request):\n data = json.loads(request.body)\n laptop = get_object_or_404(Laptop, api_key_hash=sha256(data['APIKey'].encode('utf-8')).hexdigest(),\n mdm_enrolled=True)\n system_profiles = []\n user_profiles = []\n system_profiles_remove = []\n user_profiles_remove = []\n password = None\n\n for record in InstallationRecord.objects.filter(device=laptop, profile__isnull=False, version=\"RM\", active=True):\n profile = record.profile\n if profile.scope == 'System':\n system_profiles_remove.append(profile.pk)\n else:\n user_profiles_remove.append(profile.pk)\n password = settings.MDM_PASS\n\n for profile in laptop.pending.all():\n if profile.pk not in system_profiles_remove and profile.pk not in user_profiles_remove:\n if profile.scope == 'System':\n system_profiles.append(profile.pk)\n else:\n user_profiles.append(profile.pk)\n\n if len(system_profiles) > 0 or len(user_profiles) > 0 or len(system_profiles_remove) > 0 or \\\n len(user_profiles_remove) > 0:\n response_data = {\"status\": 100, \"system_profiles\": system_profiles, \"user_profiles\": user_profiles,\n \"system_profiles_remove\": system_profiles_remove, \"user_profiles_remove\": user_profiles_remove,\n \"removal_password\": password, \"password\": laptop.admin_password}\n else:\n response_data = {\"status\": 200}\n laptop.last_checkin = timezone.now()\n laptop.last_ip = data['networkIP']\n laptop.save()\n return JsonResponse(response_data)",
"def tickets(number, day, premium_seating):\n #fill in your code here. \n return 0.0",
"def use(self):\n if self.flag:\n if self.credit < self.price_of_trip:\n return \"Your credit is not enough, please increase your credit\"\n else:\n self.credit -= self.price_of_trip\n return \"Done\"\n else:\n return \"Sorry, your card has expired.\"",
"def landlord_button_deposite_received(self):\n payment_id = False\n acc_pay_form = self.env.ref(\n 'account.view_account_payment_form')\n account_jrnl_obj = self.env['account.journal'].search(\n [('type', '=', 'sale')], limit=1)\n payment_obj = self.env['account.payment']\n payment_method_id = self.env.ref(\n 'account.account_payment_method_manual_in')\n for tenancy_rec in self:\n if tenancy_rec.acc_pay_dep_rec_id and \\\n tenancy_rec.acc_pay_dep_rec_id.id:\n return {\n 'view_type': 'form',\n 'view_id': acc_pay_form.id,\n 'view_mode': 'form',\n 'res_model': 'account.payment',\n 'res_id': tenancy_rec.acc_pay_dep_rec_id.id,\n 'type': 'ir.actions.act_window',\n 'target': 'current',\n 'context': self._context,\n }\n if tenancy_rec.deposit == 0.00:\n raise Warning(_('Please Enter Deposit amount.'))\n if tenancy_rec.deposit < 0.00:\n raise Warning(\n _('The deposit amount must be strictly positive.'))\n vals = {\n 'partner_id': tenancy_rec.property_owner_id.parent_id.id,\n 'partner_type': 'customer',\n 'journal_id': account_jrnl_obj.id,\n 'payment_type': 'inbound',\n 'communication': 'Deposit Received',\n 'tenancy_id': tenancy_rec.id,\n 'amount': tenancy_rec.deposit,\n 'property_id': tenancy_rec.property_id.id,\n 'payment_method_id': payment_method_id.id\n }\n payment_id = payment_obj.create(vals)\n return {\n 'view_mode': 'form',\n 'view_id': acc_pay_form.id,\n 'view_type': 'form',\n 'res_id': payment_id and payment_id.id,\n 'res_model': 'account.payment',\n 'type': 'ir.actions.act_window',\n 'nodestroy': True,\n 'target': 'current',\n 'domain': '[]',\n 'context': {\n 'close_after_process': True,\n }\n }",
"async def legsubmit(self, ctx):\n\n new_value = await self.toggle_dm_setting(ctx.author.id, \"leg_session_submit\")\n\n if new_value:\n message = f\":white_check_mark: You will now receive DMs when you are a member of the \" \\\n f\"{self.bot.mk.LEGISLATURE_CABINET_NAME} \" \\\n f\"and someone submits a Bill or Motion. \" \\\n f\"Note that you will never get a DM when a member of the \" \\\n f\"{self.bot.mk.LEGISLATURE_CABINET_NAME} is the one submitting.\"\n else:\n message = f\":white_check_mark: You will no longer receive DMs when you are a member of the \" \\\n f\"{self.bot.mk.LEGISLATURE_CABINET_NAME} and someone submits a Bill or Motion.\"\n\n await ctx.send(message)",
"def checkbalance(self, dt):\n return",
"def pre_approve(self, cr, uid, ids, context={}):\n \tfor voucher in self.browse(cr, uid, ids, context=context):\n \t if not voucher.department_id.analytic_account_id:\n \t raise osv.except_osv(_('Configration Check!'), _(\"Please add cost center for your department!\"))\n \t periods = self.pool.get('account.period').search(cr, uid, [('date_start','<=',voucher.date),('date_stop','>=',voucher.date),('company_id','=',voucher.company_id.id)], context=context)\n\n\n res=0.0\n if voucher.purpose:\n if not voucher.purpose.account_id: raise osv.except_osv(_('Warning!'), _('Please configure account for this purpose!')) \n voucher_line = {\n \t\t'voucher_id': voucher.id,\n \t\t'partner_id': voucher.partner_id.id,\n \t\t'untax_amount': voucher.amount,\n \t\t'amount': voucher.amount,\n 'name': voucher.narration,\n \t\t'type': 'dr',\n \t\t'account_analytic_id': voucher.department_id.analytic_account_id and voucher.department_id.analytic_account_id.id,\n 'account_id': voucher.purpose.account_id.id,\n \t }\n new_amount = res and res or voucher.amount \n voucher_line.update({'amount':new_amount,'untax_amount':new_amount})\n \t if voucher.line_ids :\n for line in voucher.line_ids:\n \t\t self.pool.get('account.voucher.line').write(cr, uid, line.id, {\n \t\t'voucher_id': voucher.id,\n \t\t'partner_id': voucher.partner_id.id,\n \t\t'untax_amount': res or line.amount,\n \t\t'amount': line.amount,\n 'name': voucher.narration,\n \t\t'type': 'dr',\n \t\t'account_analytic_id': line.account_analytic_id and line.account_analytic_id.id or voucher.department_id.analytic_account_id.id,\n 'account_id': voucher.purpose.account_id.id or line.account_id.id,\n \t }, context=context)\n \t else:\n\n \t\t new_voucher_line = self.pool.get('account.voucher.line').create(cr, uid, voucher_line, context=context)\n context.update({'purchase':True})\n self.create_budget_confirmation(cr, uid, [voucher.id], context)\n \tself.write(cr, uid, ids,{'state': 'preapprove','type':'purchase','ratification':True}, context=context)\n #cxt = context.copy()\n #cxt.update({'type':'ratification'})\n if not super(account_voucher, self).create_budget_confirmation(cr, uid, ids, context=context):\n self.write(cr, uid, ids, {'state': 'approved'}, context=context)\n\n \t'''self.write(cr, uid, ids, {'state': 'preapprove'})\n if not super(account_voucher, self).create_budget_confirmation(cr, uid, ids, context=context):\n self.write(cr, uid, ids, {'state': 'approve','type':'purchase','ratification':True}, context=context)'''\n return True",
"def pay_off_fully(balance, annualInterestRate):\n\n #variable assignment\n currentBalance = balance\n monthlyInterestRate = annualInterestRate/12",
"def ToggleApprovalTracker(self, event):\n pass",
"def _lend(self, \n\t\t\t borrower, \n\t\t\t asked_value):\n\t\tif self.strategy == 1:\n\t\t\tloan_value = min(self.stock, asked_value)\n\t\t\tself.stock -= loan_value\n\t\t\tdebt_link = DebtLink(self, borrower, loan_value * (1.0 + self.interest_rate))\n\t\t\tself.loans.append(debt_link)\n\t\t\tborrower.debt_link = debt_link\n\t\t\treturn loan_value\n\t\telse: return 0.0",
"def out_chky(uid):\n\tx = db.checkouts_today(uid)\n\tif x == None: return False\n\telse: return True",
"async def passing(self, check, *, note=None):\n return await self.mark(check, \"passing\", note=note)",
"def setInDownTime(self, downtime):\n self.adParams['GLIDEIN_In_Downtime'] = str(downtime)",
"def check_to_Done(self,cr,uid,ids,context=None):\n\n for rec in self.browse(cr, uid, ids, context):\n if not rec.maintenance_id: \n raise osv.except_osv(_('ValidateError'), _(\"There Is NO maintenace request refrence to this accident.\"))\n return False\n return True",
"def evaluate_cancellation_pending_due_to_non_pay(self, date_cursor=None):\n pass",
"def on_update(self):\n if self.get('update_request') and not self.is_pending_approval():\n if self.is_revert:\n self.set_as_reverted()\n else:\n self.set_as_success()"
] | [
"0.56058526",
"0.557811",
"0.5529027",
"0.5488796",
"0.52010155",
"0.5192642",
"0.516891",
"0.51390547",
"0.5094012",
"0.50648564",
"0.5062245",
"0.5061174",
"0.49786085",
"0.49332282",
"0.4931753",
"0.49173915",
"0.4892089",
"0.4874537",
"0.48676074",
"0.4837274",
"0.48028046",
"0.47757152",
"0.47741687",
"0.47650734",
"0.4737783",
"0.47371477",
"0.47341275",
"0.4727474",
"0.47264427",
"0.4719434"
] | 0.5966537 | 0 |
Decode next layer protocol. | def _decode_next_layer(self, *args, **kwargs): # pylint: disable=signature-differs
raise UnsupportedCall(f"'{self.__class__.__name__}' object has no attribute '_decode_next_layer'") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _decode_next_layer(self, dict_, length=None):\n # make next layer protocol name\n proto = str(self._prot or 'Raw').lower()\n\n # make BytesIO from frame package data\n bytes_ = io.BytesIO(self._file.read(dict_['len']))\n info, protochain = self._import_next_layer(bytes_, length)\n\n # write info and protocol chain into dict\n self._protos = ProtoChain(self._prot, protochain)\n dict_[proto] = info\n dict_['protocols'] = self._protos.chain\n return dict_",
"def decode(data):\n raise NotImplementedError",
"def decode(data): #@NoSelf",
"def _dinamic_decode(self):\n raise NotImplementedError",
"def _DecodeFn():\n _, decode_dict = self._model.ConstructDecodeGraph(\n input_batch=inp_instance.TpuDequeueBatch())\n self.decode_nm = py_utils.NestedMap(decode_dict)\n return self.decode_nm.Flatten()",
"def _DecodeStep():\n _, decode_dict = self._model.ConstructDecodeGraph()\n self.decode_nm = py_utils.NestedMap(decode_dict)\n return [self._OutfeedEnqueue(decode_dict)]",
"def decode(self, code):\n raise NotImplementedError",
"def decoder(self):\n pass",
"def handle_decode(self, encoded_data):\n \n config.COD_PROMPT = config.DEC_PROMPT\n print config.DEC_PROMPT + \" decoding...\"\n \n # while there is another decoder, run each item through the next decoder\n data = encoded_data\n success = False\n for decoder in self.decoder_list:\n current_decoder = decoder()\n success, data = self.recursive_decoder(current_decoder.decode, data)\n if not success:\n break\n print config.DEC_PROMPT + \"%s decoded to '%s'\" % ( current_decoder.name(),data)\n return success, data",
"def _decode(self):\n \n self.version = int(data_to_hex_str(self.packet[0])[2])\n self.header_len = int(data_to_hex_str(self.packet[0])[3]) * 4\n self.type_of_service = data_to_hex_str(self.packet[1:2])\n self.total_len = int(data_to_hex_str(self.packet[2:4]), 16)\n self.id = data_to_hex_str(self.packet[4:6])\n \n #parse the flags fields(reservedbit, don't fragment, more fragment)\n if ((ord(self.packet[6]) & (1 << 7)) != 0):\n self.flags_reservedbit = 1\n else:\n self.flags_reservedbit = 0\n #endof if\n \n if ((ord(self.packet[6]) & (1 << 6)) != 0):\n self.flags_dont_fragment = 1\n else:\n self.flags_dont_fragment = 0\n #endof if\n \n if ((ord(self.packet[6]) & (1 << 5)) != 0):\n self.flags_more_fragment = 1\n else:\n self.flags_more_fragment = 0\n #endof if\n \n #parse the offset field(in packet[6:7]): 00011111 & packet[6] (to filter flags) -->> get packet[6:7] in hex_str\n #tmp = str(31 & ord(self.packet[6]))\n self.fragment_offset = int(data_to_hex_str(self.packet[6:8]), 16)\n if (self.fragment_offset >= (1 << 13)):\n #take away the flags fields: 00011111 11111111 & self.fragment_offset\n self.fragment_offset = self.fragment_offset & ((1 << 13) - 1) \n \n self.TTL = ord(self.packet[8])\n self.protocol = IPPROTO[ord(self.packet[9])]\n self.header_checksum = data_to_hex_str(self.packet[10:12])\n \n self.src = str(ord(self.packet[12])) + '.' + str(ord(self.packet[13])) + '.' + \\\n str(ord(self.packet[14])) + '.' + str(ord(self.packet[15]))\n self.dst = str(ord(self.packet[16])) + '.' + str(ord(self.packet[17])) + '.' + \\\n str(ord(self.packet[18])) + '.' + str(ord(self.packet[19]))\n \n if (self.header_len > 20):\n self.opt_paddings = self.packet[20 : (self.header_len)]",
"def decode(decode_format):\n return output_from_decode",
"def decode(self):\n for layer in self.layers:\n layer.decode()",
"def decode(self):\n for layer in self.layers:\n layer.decode()",
"def decode(self, z):\n raise NotImplementedError",
"def decode(cls, data):\n # Message (PHYPayload) must be at least 1 byte\n if len(data) < 1:\n raise DecodeError()\n # Decode the MAC Header\n mhdr = MACHeader.decode(data[0])\n # Decode the Message\n if mhdr.mtype == JOIN_REQUEST:\n return JoinRequestMessage.decode(mhdr, data)\n elif mhdr.mtype == UN_DATA_UP or mhdr.mtype == CO_DATA_UP:\n return MACDataUplinkMessage.decode(mhdr, data)\n else:\n return None",
"def decode(self, encoded):",
"def _define_decoder(self):\n raise NotImplementedError",
"def decode(self, data: bytes) -> bytes:\n ...",
"def decode(self): # pragma: no cover\n pass",
"def decode(self):\n if self.ciphered:\n msg = self.result \n self.result = ''\n else:\n msg = self.msg\n try:\n self.result = self.doDecode(msg,self.shift)\n except Exception as e:\n raise CipherError(\"decoding failure {}.\".format(e))\n self.ciphered = False\n return self.result",
"def doDecode(self):\n raise CipherError(\"override this funct and return the decoded msg\")",
"def _decode_end(_fp):\n return 0",
"def test_decode(self):\n pass # TODO(tlarsen)",
"def decode(self,m):\n raise NotImplementedError('subclasses must override decode()!')",
"def bdecode(f):\n\tbtype = TYPES[f.read(1)]\n\tif btype is not None:\n\t\tf.seek(-1, SEEK_CUR)\n\t\treturn DECODERS[btype](f)\n\telse: #Used in dicts and lists to designate an end\n\t\treturn None",
"def _recv_protocol(self):\n if not self._protocol_recv:\n try:\n data = self._read_bytes(1, timeout=1.0)\n if len(data) == 0:\n self.close()\n raise PipeClosed()\n peer_protocol = struct.unpack('>B', data)[0]\n self._protocol = min(self._protocol or pickle.HIGHEST_PROTOCOL, peer_protocol)\n self._protocol_recv = True\n self._serializer = _PickleSerializer(self._protocol)\n except (OSError, socket.error):\n self.close()\n raise PipeClosed()",
"def decode(cls, data):\n if len(data) == 0:\n return None\n cid = struct.unpack('B', data[0])[0]\n if cid == LINKCHECKREQ:\n return LinkCheckReq.decode(data)\n elif cid == LINKADRANS:\n return LinkADRAns.decode(data)\n # TODO\n #elif cid == DUTYCYCLEANS:\n # return DutyCycleReq.decode(data)\n #elif cid == RXPARAMSETUPANS:\n # return RxParamSetupReq.decode(data)\n #elif cid == DEVSTATUSANS:\n # return DevStatusReq.decode(data)\n #elif cid == NEWCHANNELANS:\n # return NewChannelReq.decode(data)\n #elif cid == RXTIMINGSETUPANS:\n # return RxTimingSetupReq.decode(data)\n else:\n return None",
"def decode(self, s):",
"def decode(self, s):",
"def greedy_decode(self, z):\r\n\r\n raise NotImplementedError"
] | [
"0.75996035",
"0.6549479",
"0.65207684",
"0.6393973",
"0.6335503",
"0.6324604",
"0.6315522",
"0.62680393",
"0.62302494",
"0.6203099",
"0.61799216",
"0.6160706",
"0.6160706",
"0.6118852",
"0.6044695",
"0.6001132",
"0.5993506",
"0.5988984",
"0.59845906",
"0.59568155",
"0.5906239",
"0.5903562",
"0.5901664",
"0.58828473",
"0.5867668",
"0.5863176",
"0.58596295",
"0.583821",
"0.583821",
"0.58307487"
] | 0.67941993 | 1 |
Import next layer extractor. | def _import_next_layer(self, *args, **kwargs): # pylint: disable=signature-differs
raise UnsupportedCall(f"'{self.__class__.__name__}' object has no attribute '_import_next_layer'") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _import_next_layer(self, file_, length):\n if self._prot == 'Ethernet':\n from .link import Ethernet as Protocol\n elif self._prot == 'IPv4':\n from .internet import IPv4 as Protocol\n elif self._prot == 'IPv6':\n from .internet import IPv6 as Protocol\n else:\n data = file_.read(*[length]) or None\n return data, None\n next_ = Protocol(file_, length)\n return next_.info, next_.protochain",
"def set_next_layer(self, layer):\n self._next_layer = layer",
"def extractor(self):\n \n if self._extractor is None:\n if self.extractor_type == '29v2':\n self._extractor = self.build_extractor_29layers_v2(name='extract29v2', block=self._res_block, layers=[1, 2, 3, 4])\n elif self.extractor_type == '29':\n self._extractor = self.build_extractor_29layers(name='extract29', block=self._res_block, layers=[1, 2, 3, 4])\n elif self.extractor_type == '9':\n self._extractor = self.build_extractor_9layers(name='extract9')\n \n if self.extractor_weights is not None:\n self._extractor.load_weights(self.extractor_weights)\n \n return self._extractor",
"def handle(self, configuration_options=[{'index': 0}], *args, **kwargs):\n layers = self.import_file(configuration_options=configuration_options)\n\n for layer, config in layers:\n config['handler_results'] = self.run_import_handlers(layer, config)\n\n return layers",
"def _imported_functions(self):\n\n i = 0\n while 1:\n thunk = obj.Object('_IMAGE_THUNK_DATA',\n offset = self.obj_parent.DllBase + self.OriginalFirstThunk +\n i * self.obj_vm.profile.get_obj_size('_IMAGE_THUNK_DATA'),\n vm = self.obj_native_vm)\n\n # We've reached the end when the element is zero \n if thunk == None or thunk.AddressOfData == 0:\n break\n\n o = obj.NoneObject(\"Ordinal not accessible?\")\n n = obj.NoneObject(\"Imported by ordinal?\")\n f = obj.NoneObject(\"FirstThunk not accessible\")\n\n # If the highest bit (32 for x86 and 64 for x64) is set, the function is \n # imported by ordinal and the lowest 16-bits contain the ordinal value. \n # Otherwise, the lowest bits (0-31 for x86 and 0-63 for x64) contain an \n # RVA to an _IMAGE_IMPORT_BY_NAME struct. \n if thunk.OrdinalBit == 1:\n o = thunk.Ordinal & 0xFFFF\n else:\n iibn = obj.Object(\"_IMAGE_IMPORT_BY_NAME\",\n offset = self.obj_parent.DllBase +\n thunk.AddressOfData,\n vm = self.obj_native_vm)\n o = iibn.Hint\n n = iibn.Name\n\n # See if the import is bound (i.e. resolved)\n first_thunk = obj.Object('_IMAGE_THUNK_DATA',\n offset = self.obj_parent.DllBase + self.FirstThunk +\n i * self.obj_vm.profile.get_obj_size('_IMAGE_THUNK_DATA'),\n vm = self.obj_native_vm)\n if first_thunk:\n f = first_thunk.Function.v()\n\n yield o, f, str(n or '')\n i += 1",
"def import_file(self, *args, **kwargs):\n filename = self.file\n self.completed_layers = []\n err = GdalErrorHandler()\n gdal.PushErrorHandler(err.handler)\n gdal.UseExceptions()\n configuration_options = kwargs.get('configuration_options', [{'index': 0}])\n\n # Configuration options should be a list at this point since the importer can process multiple layers in a\n # single import\n if isinstance(configuration_options, dict):\n configuration_options = [configuration_options]\n\n data, inspector = self.open_source_datastore(filename, *args, **kwargs)\n\n datastore_layers = inspector.describe_fields()\n\n if len(datastore_layers) == 0:\n logger.debug('No Dataset found')\n\n layers_info = []\n\n # Add index for any layers configured by name\n for layer_configuration in configuration_options:\n if 'layer_name' in layer_configuration:\n lookup = 'layer_name'\n elif 'index' in layer_configuration:\n lookup = 'index'\n else:\n lookup = None\n logger.debug('could not find lookup')\n continue\n\n for datastore_layer in datastore_layers:\n if datastore_layer.get(lookup) == layer_configuration.get(lookup):\n layer_configuration.update(datastore_layer)\n layers_info.append(layer_configuration)\n\n for layer_options in layers_info:\n if layer_options['raster']:\n \"\"\"\n File is a raster, we need to convert into optimized GeoTiff\n and skip any further testing or loading into target_store\n \"\"\"\n # Increment filename to make sure target doesn't exists\n filedir, filebase = os.path.split(filename)\n outfile = '%s.tif' % os.path.splitext(filebase)[0]\n fileout = increment_filename(os.path.join(RASTER_FILES, outfile))\n raster_import(layer_options['path'], fileout)\n self.completed_layers.append([fileout, layer_options])\n else:\n target_file, _ = self.open_target_datastore(self.target_store)\n target_create_options = []\n\n # Prevent numeric field overflow for shapefiles https://trac.osgeo.org/gdal/ticket/5241\n if target_file.GetDriver().GetName() == 'PostgreSQL':\n target_create_options.append('PRECISION=NO')\n\n layer_options['modified_fields'] = {}\n layer = data.GetLayer(layer_options.get('index'))\n layer_name = layer_options.get('name', layer.GetName().lower())\n layer_type = self.get_layer_type(layer, data)\n srs = layer.GetSpatialRef()\n\n if layer_name.lower() == 'ogrgeojson':\n try:\n layer_name = os.path.splitext(os.path.basename(filename))[0].lower()\n except IndexError:\n pass\n\n layer_name = launder(str(layer_name))\n\n # default the layer to 4326 if a spatial reference is not provided\n if not srs:\n srs = osr.SpatialReference()\n srs.ImportFromEPSG(4326)\n\n # pass the srs authority code to handlers\n if srs.AutoIdentifyEPSG() == 0:\n layer_options['srs'] = '{0}:{1}'.format(srs.GetAuthorityName(None), srs.GetAuthorityCode(None))\n\n n = 0\n while True:\n n += 1\n try:\n target_layer = self.create_target_dataset(target_file, layer_name, srs, layer_type,\n options=target_create_options)\n except RuntimeError as e:\n # logger.exception('exception in creating target dataset')\n # the layer already exists in the target store, increment the name\n if 'Use the layer creation option OVERWRITE=YES to replace it.' in e.message:\n layer_name = increment(layer_name)\n\n # try 100 times to increment then break\n if n >= 100:\n break\n\n continue\n else:\n raise e\n break\n\n # adding fields to new layer\n layer_definition = ogr.Feature(layer.GetLayerDefn())\n source_fid = None\n\n wkb_field = 0\n\n for i in range(layer_definition.GetFieldCount()):\n\n field_def = layer_definition.GetFieldDefnRef(i)\n\n if field_def.GetName() == target_layer.GetFIDColumn() and field_def.GetType() != 0:\n field_def.SetType(0)\n\n if field_def.GetName() != 'wkb_geometry':\n target_layer.CreateField(field_def)\n new_name = target_layer.GetLayerDefn().GetFieldDefn(i - wkb_field).GetName()\n old_name = field_def.GetName()\n\n if new_name != old_name:\n layer_options['modified_fields'][old_name] = new_name\n\n if old_name == target_layer.GetFIDColumn() and not layer.GetFIDColumn():\n source_fid = i\n else:\n wkb_field = 1\n\n if wkb_field is not 0:\n layer.SetIgnoredFields(['wkb_geometry'])\n\n for i in range(0, layer.GetFeatureCount()):\n feature = layer.GetFeature(i)\n\n if feature and feature.geometry():\n\n if not layer.GetFIDColumn():\n feature.SetFID(-1)\n\n if feature.geometry().GetGeometryType() != target_layer.GetGeomType() and \\\n target_layer.GetGeomType() in range(4, 7):\n\n conversion_function = ogr.ForceToMultiPolygon\n\n if target_layer.GetGeomType() == 5:\n conversion_function = ogr.ForceToMultiLineString\n\n elif target_layer.GetGeomType() == 4:\n conversion_function = ogr.ForceToMultiPoint\n\n geom = ogr.CreateGeometryFromWkb(feature.geometry().ExportToWkb())\n feature.SetGeometry(conversion_function(geom))\n\n if source_fid is not None:\n feature.SetFID(feature.GetField(source_fid))\n\n try:\n target_layer.CreateFeature(feature)\n\n except:\n for field in range(0, feature.GetFieldCount()):\n if feature.GetFieldType(field) == ogr.OFTString:\n try:\n feature.GetField(field).decode('utf8')\n except UnicodeDecodeError:\n feature.SetField(field, decode(feature.GetField(field)))\n except AttributeError:\n continue\n try:\n target_layer.CreateFeature(feature)\n except err as e:\n logger.error('Create feature failed: {0}'.format(gdal.GetLastErrorMsg()))\n raise e\n\n self.completed_layers.append([target_layer.GetName(), layer_options])\n\n return self.completed_layers",
"def add_layer(self, layer):\n assert isinstance(layer, torch.nn.Module)\n setattr(self, 'layer'+str(self._layer_counter), layer)\n self._layer_counter += 1\n # layer indexing : layer 0 is closest to input",
"def importer():\n pass",
"def zoo_import(name, head=''):\n net = gz.get_model(name, pretrained=True)\n export_block(head + name, net, preprocess=True)",
"def import_ops(self):\n if self.is_training:\n self.lr = tf.get_collection_ref(\"lr\")[0]\n self.new_lr = tf.get_collection_ref(\"new_lr\")[0]\n self.lr_update = tf.get_collection_ref(\"lr_update\")[0]\n\n self.cost = tf.get_collection_ref(util.with_prefix(self.name, \"cost\"))[0]\n self.initial_state = util.import_state_tuples(\n self.initial_state, self.initial_state_name, self.name)\n self.final_state = util.import_state_tuples(\n self.final_state, self.final_state_name, self.name)",
"def get_feature_extractor():\n net = alexnet(pretrained=False)\n net.load_state_dict(model_zoo.load_url(model_urls['alexnet'], \n model_dir=model_urls['local']))\n\n feature_extractor = nn.Sequential(*list(net.classifier.children())[:-1])\n net.classifier = feature_extractor\n net.eval()\n return net",
"def _init_layers(self):\n self._init_predictor()\n if self.use_edge_fusion:\n self._init_edge_module()",
"def add_layers(self, layers):\n\n existing_layers = self.layers\n assert len(existing_layers) > 0\n for layer in layers:\n assert layer.get_mlp() is None\n layer.set_mlp(self)\n layer.set_input_space(existing_layers[-1].get_output_space())\n existing_layers.append(layer)\n assert layer.layer_name not in self.layer_names\n self.layer_names.add(layer.layer_name)",
"def import_forward(self):\n self.import_property('OG')\n self.import_property('IBU')\n self.import_property('ABV')\n self.import_property('SRM')",
"def add_layer(self, full_path, delimiter=\"::\"):\n if self.find_layer_from_fullpath(full_path):\n return self.find_layer_from_fullpath(full_path)\n else:\n # Cumulative List Split\n # Using accumulate() + join()\n temp = full_path.split(delimiter)\n res = list(accumulate(temp, lambda x, y: delimiter.join([x, y])))\n parent_layer = Layer()\n for part in res:\n if self.find_layer_from_fullpath(part):\n parent_layer = self.find_layer_from_fullpath(part)\n continue\n else:\n *parent_name, name = part.split(delimiter)\n _layer = Layer() # Create Layer\n _layer.Name = name # Set Layer Name\n if parent_layer:\n _layer.ParentLayerId = parent_layer.Id # Set parent Id\n self._file3dm.Layers.Add(_layer) # Add Layer\n _layer = self._file3dm.Layers.FindName(name, parent_layer.Id)\n\n # set parent layer to this layer (for next iter)\n parent_layer = _layer\n # Sets Layer as class attr\n setattr(UmiLayers, _layer.FullPath, _layer)\n return _layer",
"def set_next(self, next_layer):\n self.next_layer = next_layer",
"def layer_from_name(layer_name):\n if layer_name in _layer_name_cache:\n return _layer_name_cache[layer_name]\n layer_names = layer_name.split('.')\n layer_module, module_layer_name = layer_names[:-1], layer_names[-1]\n module_name = '.'.join(layer_module)\n module = import_name(module_name)\n try:\n return getattr(module, module_layer_name)\n except AttributeError:\n # the default error is very uninformative:\n # AttributeError: 'module' object has no attribute 'DemoLayer'\n # it doesn't say *which* module\n raise AttributeError('module %r has no attribute %r'\n % (module_name, module_layer_name))",
"def _decode_next_layer(self, *args, **kwargs): # pylint: disable=signature-differs\n raise UnsupportedCall(f\"'{self.__class__.__name__}' object has no attribute '_decode_next_layer'\")",
"def _decode_next_layer(self, dict_, length=None):\n # make next layer protocol name\n proto = str(self._prot or 'Raw').lower()\n\n # make BytesIO from frame package data\n bytes_ = io.BytesIO(self._file.read(dict_['len']))\n info, protochain = self._import_next_layer(bytes_, length)\n\n # write info and protocol chain into dict\n self._protos = ProtoChain(self._prot, protochain)\n dict_[proto] = info\n dict_['protocols'] = self._protos.chain\n return dict_",
"def _analyse_stmt_Import(self, statement: ast.Import, *, next: CFNode) -> CFNode:\n return self._ast_node(statement, next=next, error=self._raise)",
"def connect_layers(self):\n if not self.check():\n msg = \"Failed to check neural network.\"\n print(msg)\n logging.error(msg)\n return\n\n # 1. set input layer\n pre_layer = self.input_layer\n for layer in self.hidden_layers:\n layer.set_input_layer(pre_layer)\n pre_layer = layer\n self.output_layer.set_input_layer(pre_layer)\n\n # 2. set output layer\n next_layer = self.output_layer\n for layer in reversed(self.hidden_layers):\n layer.set_next_layer(next_layer)\n next_layer = layer\n self.input_layer.set_next_layer(next_layer)\n\n # 3. call layer init\n self.input_layer.init()\n for layer in self.hidden_layers:\n layer.init()\n self.output_layer.init()\n\n return",
"def run(layers):",
"def forward(self, curr_layer):\n if self.cin == self.cout and self.stride == 1:\n return self.path(curr_layer) + curr_layer\n else:\n return self.path(curr_layer)",
"def forward(self, curr_layer):\n if self.cin == self.cout and self.stride == 1:\n return self.path(curr_layer) + curr_layer\n else:\n return self.path(curr_layer)",
"def forward(self, curr_layer):\n if self.cin == self.cout and self.stride == 1:\n return self.path(curr_layer) + curr_layer\n else:\n return self.path(curr_layer)",
"def infer_layer(f):\n return layer_host",
"def import_module(self, location, name):",
"async def async_step_import(self, import_data: dict[str, str]) -> FlowResult:\n import_source = import_data.pop(\"import_source\")\n if import_source == \"geography_by_coords\":\n return await self.async_step_geography_by_coords(import_data)\n return await self.async_step_geography_by_name(import_data)",
"def _post_install(dir_):\n scapy_locations = get_scapy_locations(get_site_packages())\n for scapy_location in scapy_locations:\n scapy_config = os.path.join(scapy_location, \"config.py\")\n processing_layer_list = False\n for line in fileinput.input(scapy_config, inplace=1, backup=\".bak\"):\n if line.strip().startswith(\"load_layers\"):\n print(line, end=\"\")\n processing_layer_list = True\n else:\n if processing_layer_list and line.strip().endswith(\"]\"):\n # TODO, consider single quote strings, and consider lonely\n # ] characters\n last_quote = line.rfind(\"\\\"\")\n if last_quote > 0 and \"http2\" not in line:\n print(\"%s, \\\"http2\\\" ]\" % line[\n :last_quote + 1], end=\"\")\n processing_layer_list = False\n else:\n print(line)\n processing_layer_list = False\n else:\n print(line, end=\"\")",
"def forward(self, input):\n return self.layers(input)"
] | [
"0.6111195",
"0.54837346",
"0.54835594",
"0.5450349",
"0.5450116",
"0.54399455",
"0.5409996",
"0.5303996",
"0.52164537",
"0.52136886",
"0.516096",
"0.51069194",
"0.5101633",
"0.5099417",
"0.5078807",
"0.5042868",
"0.4991108",
"0.49878004",
"0.4973067",
"0.49313796",
"0.49146825",
"0.48918876",
"0.48812124",
"0.48812124",
"0.48812124",
"0.4869186",
"0.4861221",
"0.4856358",
"0.48529756",
"0.4848147"
] | 0.66877997 | 0 |
Build QA data dict from the nights | def build_data(self):
from desiutil.io import combine_dicts
# Loop on exposures
odict = {}
for qanight in self.qa_nights:
for qaexp in qanight.qa_exps:
# Get the exposure dict
idict = write_qa_exposure('foo', qaexp, ret_dict=True)
odict = combine_dicts(odict, idict)
# Finish
self.data = odict | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_data(self, inroot=None):\n self.data = {}\n # Load\n for night in self.mexp_dict.keys():\n qaNight = QA_Night(night, specprod_dir=self.specprod_dir, qaprod_dir=self.qaprod_dir)\n qaNight.load_data()\n #\n self.data[night] = qaNight.data[night]",
"def raw_data() -> Dict:\n return {\"neighbourhood\":\"Buttes-Montmartre\",\"room_type\":\"Entire home/apt\",\"minimum_nights\":1.555,\"mois\":2,\"voyageurs\":2.5,\"chambres\":1,\"lits\":1,\"salle_de_bains\":1}",
"def to_dict(self) -> Dict[str, Any]:\n return {\n \"1Q\": {\n \"{}\".format(qs.id): {\n \"f1QRB\": qs.f1QRB,\n \"f1QRB_std_err\": qs.f1QRB_std_err,\n \"f1Q_simultaneous_RB\": qs.f1Q_simultaneous_RB,\n \"f1Q_simultaneous_RB_std_err\": qs.f1Q_simultaneous_RB_std_err,\n \"fRO\": qs.fRO,\n \"T1\": qs.T1,\n \"T2\": qs.T2,\n \"fActiveReset\": qs.fActiveReset,\n }\n for qs in self.qubits_specs\n },\n \"2Q\": {\n \"{}-{}\".format(*es.targets): {\n \"fBellState\": es.fBellState,\n \"fCZ\": es.fCZ,\n \"fCZ_std_err\": es.fCZ_std_err,\n \"fCPHASE\": es.fCPHASE,\n \"fCPHASE_std_err\": es.fCPHASE_std_err,\n \"fXY\": es.fXY,\n \"fXY_std_err\": es.fXY_std_err,\n \"fISWAP\": es.fISWAP,\n \"fISWAP_std_err\": es.fISWAP_std_err,\n }\n for es in self.edges_specs\n },\n }",
"def _build_results(self):\n results = {}\n cols = []\n for pol in POLLUTANTS:\n for adj in ADJUSTMENTS:\n cols.append(get_rate_column(pol, adjustment=adj, generated=False))\n cols.append(get_column(pol, adjustment=adj))\n cols.append(\"net_consumed_mwh\")\n for ba in self.regions:\n results[ba] = pd.DataFrame(\n index=self.generation.index, columns=cols, dtype=np.float64\n )\n return results",
"def nutrition_data(self):\n data = dict()\n\n # get required data, generally from nutrient fields but some special cases\n data['cost'] = self.best_price\n data['grams'] = settings.STANDARD_WEIGHT # data stored per KG or 100g\n if self.serving:\n data['grams_serve'] = self.serving # optional serving size\n for k in settings.NUTRITION_DATA_ITEMS_BASIC:\n data[k] = getattr(self,k)\n\n return add_nutrition_ratios(data) # generate ratios and values from above",
"def _build_contest_kwargs(self, row, sheet):\n\n jurisdiction = str(\n sheet.cell(\n rowx=row,\n colx=self.jurisdiction_index).value).strip()\n\n return {\n 'office': sheet.cell(\n rowx=row,\n colx=self.contest_index).value.strip(),\n 'jurisdiction': jurisdiction\n }",
"def _aiida_ndb_qp(self, data ):\n pdata = ArrayData()\n pdata.set_array('Eo', numpy.array(data['Eo']))\n pdata.set_array('E_minus_Eo', numpy.array(data['E-Eo']))\n pdata.set_array('Z', numpy.array(data['Z']))\n pdata.set_array('qp_table', numpy.array(data['qp_table']))\n try:\n pdata.set_array('So', numpy.array(data['So']))\n except KeyError:\n pass\n return pdata",
"def test1():\n print( 'testing state data processing...')\n fname = \"HPI_PO_state.txt\"\n data = indexTools.read_state_house_price_data( \"data/\" + fname )\n\n answer = dict()\n answer[\"HPI_PO_state.txt 1993 1\"] = [('UT', 117.69), ('OR', 116.94)]\n answer[\"HPI_PO_state.txt 1993 3\"] = [('UT', 128.49), ('CO', 125.16)]\n answer[\"HPI_PO_state.txt 1993 None\"] = [('UT', 125.77499999999999), ('CO', 122.3775)]\n answer[\"HPI_PO_state.txt 1997 1\"] = [('OR', 162.61), ('MT', 162.09)]\n answer[\"HPI_PO_state.txt 1997 3\"] = [('OR', 166.34), ('CO', 162.8)]\n answer[\"HPI_PO_state.txt 1997 None\"] = [('OR', 164.875), ('MT', 162.20499999999998)]\n answer[\"HPI_PO_state.txt 2010 1\"] = [('MT', 298.92), ('WY', 281.91)]\n answer[\"HPI_PO_state.txt 2010 3\"] = [('MT', 293.55), ('WY', 281.33)]\n answer[\"HPI_PO_state.txt 2010 None\"] = [('MT', 292.9875), ('WY', 281.6325)]\n\n for year in [ 1993, 1997, 2010]:\n for qtr in [ 1, 3, None]:\n\n if qtr != None:\n results = periodRanking.quarter_data( data, year, qtr )\n else:\n results = periodRanking.annual_data( indexTools.annualize( data), year )\n key = fname + \" \" + str(year) + \" \" + str(qtr) \n #print( key )\n #if key in answer:\n print( fname, year, qtr, \":\", ( results[1:3] == answer[ key] ))\n #else:\n # print( fname, year, qtr, \":\", \"incorrect\", results[1:3] )\n return",
"def extractData(self) -> Dict[str, str]:\n if self.hasScore():\n result = {}\n result[\"date\"] = self.getMatchDate()\n result[\"score\"] = self.getMatchScore()\n for team in self.TEAMS:\n result[\"team_\" + team] = self.getSquadName(team)\n\n return result\n return None",
"def serialize(self):\n return{\n # 'date': self.date,\n 'q1': self.q1,\n 'q2': self.q2,\n 'q3': self.q3,\n 'q4': self.q4,\n 'finalscore': self.finalscore,\n 'id': self.id,\n }",
"def process_question(qu):\n\n ## global ranking\n rank_info = {}\n rank_info_k = [\"viewcount\",\"score\",\"favoritecount\"]\n for k in rank_info_k:\n rank_info[k] = int(qu[k])\n qu.pop(k,None)\n\n rank_info[\"creationdate\"] = qu[\"creationdate\"]\n\n if qu[\"acceptedanswer\"]:\n qu[\"acceptedanswer\"] = list(qu[\"acceptedanswer\"])\n else:\n qu[\"acceptedanswer\"] = []\n\n qu.pop('comments',None) # discard comments, maybe add back later\n qu[\"rank_info\"] = rank_info\n\n return qu",
"def make_params(query: str = \"bangladesh floods\", site: str = \"www.thedailystar.net\", date_start: str = '1/1/2020',\n date_end: str = '1/1/2021', num_results: int = 100, paper: str = 'theDailyStar') -> (dict,dict):\n query_r = {\n 'query': query,\n 'paper': paper,\n 'date_range': [date_start, date_end]\n }\n params = {\n \"engine\": \"google\",\n \"q\": \"{} site:{}\".format(query, site),\n \"google_domain\": \"google.com\",\n \"gl\": \"bd\",\n \"hl\": \"en\",\n \"tbm\": \"nws\",\n 'filter':'0',\n \"num\": num_results,\n \"tbs\": \"cdr:1,cd_min:{},cd_max:{}\".format(date_start, date_end),\n \"api_key\": os.getenv('SERPAPI_KEY')\n }\n return query_r, params",
"def __init_q_values(self, game_state):\n encoded_game_state = self.__encode_state(game_state)\n if encoded_game_state in self.q_values:\n return\n self.q_values[encoded_game_state] = {}\n for free_seat in self.__get_free_seats(game_state):\n self.q_values[encoded_game_state][free_seat] = (self.INITIAL_STATE_VALUE, 0)",
"def _setData(self):\n #offset = datetime.timedelta(prefs.getNoOfDaysBeforeQuestionSchedule())\n date_formatter = date.getLocaleFormatter(self.request, \"date\", \"long\")\n def _q_data_item(q):\n item = {}\n item[\"qid\"]= \"q_%s\" % q.question_id\n if q.question_number:\n item[\"subject\"] = u\"Q %s %s\" % (q.question_number, q.short_name)\n else:\n item[\"subject\"] = q.short_name\n item[\"title\"] = q.short_name\n item[\"result_item_class\"] = \"workflow-state-%s\" % q.status\n item[\"url\"] = url.set_url_context(\"questions/obj-%s\" % q.question_id)\n item[\"status\"] = misc.get_wf_state(q)\n item[\"status_date\"] = date_formatter.format(q.status_date)\n item[\"owner\"] = \"%s %s\" %(q.owner.first_name, q.owner.last_name)\n item[\"type\"] = _(q.type)\n item[\"to\"] = q.ministry.short_name\n return item\n self._data = [ _q_data_item(question) for question in self.query.all() ]",
"def as_dict(self):\n\n data = {}\n data['text'] = self.question\n data['tier'] = self._get_points(int(self.game_round), int(self.tier))\n try:\n data['source'] = self.source\n except AttributeError:\n data['source'] = False\n print self.question\n print self.answers\n data['answers'] = [\n {'text': answer[False]} if answer.has_key(False) \\\n else {'text': answer[True], 'right': True} \\\n for answer in self.answers\n ]\n if hasattr(self, 'media'):\n def gen_questions():\n q_data = {}\n for f in self.media['question']:\n q_data[self.__type_by_extension(\n os.path.sep.join(os.path.join([self.media_path, f]))\n )] = os.sep.join([self.web_root, f])\n return q_data\n def gen_explanation():\n \"\"\"Sorry, hacky. Quick fix required only 1st element is taken\"\"\"\n f = self.media['explanation'][0]\n k = self.__type_by_extension(os.path.sep.join(\n os.path.join([self.media_path, f])))\n v = [os.sep.join([self.web_root, expl]) \\\n for expl in self.media['explanation']]\n if v:\n v = v[0]\n else:\n v = \"\"\n return {'explanation': {k: v}}\n #): os.sep.join([self.web_root, f])\n\n #[os.sep.join([self.web_root, expl]) \\\n # for expl in self.media['explanation']]}\n def k_not_found():\n raise KeyError(\"Media keyword not found\")\n\n for k in self.media.keys():\n m_data = dict(\n question = gen_questions,\n explanation= gen_explanation,\n k_not_found = \"lambda x: pass\",\n ).get(k, 'k_not_found')()\n for key, value in m_data.items():\n data[key] = value\n return data",
"def build_response_dict(self):\n return {\n \"release\": self.settings['bookstore'][\"release\"],\n \"features\": self.settings['bookstore'][\"features\"],\n }",
"def fill_testing_dates(self):\r\n \r\n now = datetime.now()\r\n month = now.strftime('%m')\r\n year = now.year \r\n most_recent_date = '{}-{}-01'.format(year, month)\r\n self.testing_dates[1] = {'cv_start': '1972-01-01', \r\n 'cv_end': '1975-12-01', \r\n 'pred_start': '1976-01-01',\r\n 'pred_end': '1981-07-01'}\r\n self.testing_dates[2] = {'cv_start': '1976-01-01', \r\n 'cv_end': '1981-07-01', \r\n 'pred_start': '1981-08-01',\r\n 'pred_end': '1983-07-01'}\r\n self.testing_dates[3] = {'cv_start': '1976-01-01', \r\n 'cv_end': '1983-07-01', \r\n 'pred_start': '1983-08-01',\r\n 'pred_end': '1992-12-01'}\r\n self.testing_dates[4] = {'cv_start': '1983-08-01', \r\n 'cv_end': '1992-12-01', \r\n 'pred_start': '1993-01-01',\r\n 'pred_end': '2003-07-01'}\r\n self.testing_dates[5] = {'cv_start': '1993-01-01', \r\n 'cv_end': '2003-07-01', \r\n 'pred_start': '2003-08-01',\r\n 'pred_end': '2010-09-01'}\r\n self.testing_dates[6] = {'cv_start': '2003-08-01', \r\n 'cv_end': '2010-09-01', \r\n 'pred_start': '2010-10-01',\r\n 'pred_end': '2021-07-01'}\r\n self.testing_dates[7] = {'cv_start': '2010-10-01', \r\n 'cv_end': '2021-07-01', \r\n 'pred_start': '2021-08-01',\r\n 'pred_end': most_recent_date}",
"def create_dicts_for_results(dict_all_embeddings, dict_mission, our_initial, n):\r\n keys_ours, keys_state_of_the_art = divide_to_keys(dict_all_embeddings)\r\n keys = list(dict_all_embeddings.keys())\r\n\r\n list_dicts = []\r\n\r\n for key in keys:\r\n if key in keys_ours:\r\n embd_algo = dict_all_embeddings[key][1]\r\n regression = dict_all_embeddings[key][0]\r\n initial = our_initial\r\n else:\r\n embd_algo = key\r\n regression = \"\"\r\n initial = [n]\r\n t = round(dict_all_embeddings[key][2], 3)\r\n dict_results_by_arr = dict_mission[key]\r\n ratio_arr = list(dict_results_by_arr.keys())\r\n for r in ratio_arr:\r\n all_micro = dict_results_by_arr[r][0]\r\n all_macro = dict_results_by_arr[r][1]\r\n all_auc = dict_results_by_arr[r][3]\r\n for i in range(len(initial)):\r\n std_micro, std_macro, std_auc = calculate_std(r, i, dict_mission, keys_ours, keys_state_of_the_art)\r\n if key in keys_ours:\r\n t = round(dict_all_embeddings[key][8][i])\r\n initial_size = initial[i]\r\n test_ratio = r\r\n micro_f1 = float(round(all_micro[i], 3))\r\n macro_f1 = float(round(all_macro[i], 3))\r\n auc = float(round(all_auc[i], 3))\r\n if key in keys_state_of_the_art:\r\n initial_size = \"\"\r\n dict_results = {\"initial size\": initial_size, \"embed algo\": embd_algo, \"regression\": regression,\r\n \"test\": test_ratio, \"micro-f1\": str(micro_f1)+\"+-\"+std_micro,\r\n \"macro-f1\": str(macro_f1)+\"+-\"+std_macro, \"auc\": str(auc)+\"+-\"+std_auc, \"time\": t}\r\n list_dicts.append(dict_results)\r\n return list_dicts",
"def initDictionary(bands):\r\n for x in bands:\r\n d[\"{}\".format(x)] = {ProdCost: [], AlbumSales: []}",
"def dataExtract(queryResults):\n days = ['MondayCollect',\n 'TuesdayCollect',\n 'WednesdayCollect',\n 'ThursdayCollect',\n 'FridayCollect',\n 'SaturdayCollect',\n 'SundayCollect']\n\n #counting the instances of bin collections\n parkCount = 0\n roadingCount = 0\n otherCount = 0\n\n #output totals of bin collections\n parkOutput = []\n roadingOutput = []\n otherOutput = []\n \n #iterate over each day\n for day in days:\n \n #iterate over the number of bins\n for i in range(len(queryResults)):\n \n #check if the bin was collected on the day...\n if str(queryResults[i]['attributes'][day]).strip().lower() == 'yes':\n \n #unknown formatting issue with the data, these lines fix it\n strResult = str(queryResults[i]['attributes']['Owner'])\n strResultForm = strResult.lower().strip()\n \n #update the counts if True\n if strResultForm == 'roading':\n roadingCount += 1\n elif strResultForm == 'parks':\n parkCount += 1\n elif strResultForm == 'private':\n otherCount += 1\n else:\n otherCount +=1\n\n #print \"Day: {} \\nparkCount: {} \\nroadingCount: {} \\notherCount: {} \\n\\n\".format(day,parkCount,roadingCount,otherCount)\n \n parkOutput.append(parkCount)\n roadingOutput.append(roadingCount)\n otherOutput.append(otherCount)\n \n parkCount = 0\n roadingCount =0\n otherCount =0\n \n return parkOutput,roadingOutput,otherOutput",
"def qasmCircuitResults(self):\n returnedDictionary={}\n self.circutDrawing = self.draw()\n self.blochSpheres=self.separatedBlochSpheres()\n returnedDictionary[\"wires\"]=self.num_qubits\n returnedDictionary[\"probabilities\"] = self.separatedProbabilities()\n #returnedDictionary[\"blochSpheres\"] = self.separatedBlochSpheres()\n returnedDictionary[\"diracNotation\"] = self.diracNotation()\n returnedDictionary['chart'] = self.graph()\n returnedDictionary[\"link\"] = \"\"\n #returnedDictionary[\"qasmRows\"] = np.transpose(cols).tolist()\n \n if self.API_TOKEN != \"\":\n returnedDictionary[\"link\"] = self.runOnIBMQ()\n \n return returnedDictionary",
"def get_soup_general_data(soup):\n data_dict = {}\n\n name = soup.find(class_='product_title')\n if name:\n data_dict['name_of_game'] = name.h1.text\n\n pub = soup.find('li', class_='summary_detail publisher')\n if pub:\n data_dict['publisher'] = pub.a.text.strip()\n\n rel_date = soup.find('li', class_='summary_detail release_data')\n if rel_date:\n rel_date = rel_date.find('span', class_='data')\n if rel_date:\n data_dict['release_date'] = rel_date.text.strip()\n\n num_p = soup.find(\"li\", class_=\"summary_detail product_players\")\n if num_p:\n data_dict['num_players'] = num_p.find(class_=\"data\").text\n\n genres = soup.find(\"li\", class_='summary_detail product_genre')\n if genres:\n genres = genres.find_all('span', class_='data')\n data_dict['genres'] = [genre.text for genre in genres]\n\n age = soup.find(\"li\", class_=\"summary_detail product_rating\")\n if age:\n data_dict['age_rating'] = age.find('span', class_=\"data\").text\n\n return data_dict",
"def extract_games(self) -> Dict[int, Dict[str, Any]]:\n optadocument = self._get_doc()\n attr = assertget(optadocument, '@attributes')\n matchdata = assertget(optadocument, 'MatchData')\n matches = {}\n for match in matchdata:\n matchattr = assertget(match, '@attributes')\n matchinfo = assertget(match, 'MatchInfo')\n matchinfoattr = assertget(matchinfo, '@attributes')\n game_id = int(assertget(matchattr, 'uID')[1:])\n matches[game_id] = dict(\n # Fields required by the base schema\n game_id=game_id,\n competition_id=int(assertget(attr, 'competition_id')),\n season_id=int(assertget(attr, 'season_id')),\n game_day=int(assertget(matchinfoattr, 'MatchDay')),\n game_date=datetime.strptime(assertget(matchinfo, 'Date'), '%Y-%m-%d %H:%M:%S'),\n # home_team_id=see below,\n # away_team_id=see below,\n # Optional fields\n # home_score=see below,\n # away_score=see below,\n # duration=?\n # referee=?\n # venue=?,\n # attendance=?\n # home_manager=?\n # away_manager=?\n )\n teamdata = assertget(match, 'TeamData')\n for team in teamdata:\n teamattr = assertget(team, '@attributes')\n side = assertget(teamattr, 'Side')\n teamid = assertget(teamattr, 'TeamRef')\n score = assertget(teamattr, 'Score')\n if side == 'Home':\n matches[game_id]['home_team_id'] = int(teamid[1:])\n matches[game_id]['home_score'] = int(score)\n else:\n matches[game_id]['away_team_id'] = int(teamid[1:])\n matches[game_id]['away_score'] = int(score)\n return matches",
"def get_faulty_scenario_data():\n return [\n {\n \"population_count\": 100,\n \"county\": \"oxford\",\n \"season\": \"cold_month\",\n \"year\": 2017,\n },\n {\n \"population_count\": 150,\n \"county\": \"oxford\",\n \"season\": \"spring_month\",\n \"year\": 2017,\n },\n {\n \"population_count\": 200,\n \"county\": \"oxford\",\n \"season\": \"hot_month\",\n \"year\": 2017,\n },\n {\n \"population_count\": 210,\n \"county\": \"oxford\",\n \"season\": \"fall_month\",\n \"year\": 2017,\n },\n ]",
"def get_county() -> Dict:\n model = get_data_model()\n\n chart_ids = {\n \"cases\": \"Eq6Es\",\n \"deaths\": \"bSxdG\",\n \"age\": \"zSHDs\",\n \"gender\": \"FEciW\",\n \"race_eth\": \"aBeEd\",\n \"tests\": \"7sHQq\",\n }\n # The time series data for negative tests is gone, so I've just scraped positive test data using the new chart referenced above.\n\n with MarinDashboardPage() as page:\n model['name'] = \"Marin County\"\n model['update_time'] = datetime.now(tz=timezone.utc).isoformat()\n model[\"meta_from_baypd\"] = \"\"\n model['source_url'] = page.url\n model['meta_from_source'] = get_chart_meta(page, chart_ids.values())\n\n model[\"series\"][\"cases\"] = get_series_data(page, chart_ids[\"cases\"], ['Date', 'Total Cases', 'Total Recovered*'], \"cumul_cases\", 'Total Cases', 'cases')\n model[\"series\"][\"deaths\"] = get_series_data(page, chart_ids[\"deaths\"], ['Event Date', 'Total Hospitalizations', 'Total Deaths'], \"cumul_deaths\", 'Total Deaths', 'deaths', date_column='Event Date')\n\n model[\"series\"][\"tests\"] = get_test_series(page, chart_ids[\"tests\"])\n model[\"case_totals\"][\"age_group\"], model[\"death_totals\"][\"age_group\"] = get_breakdown_age(page, chart_ids[\"age\"])\n model[\"case_totals\"][\"gender\"], model[\"death_totals\"][\"gender\"] = get_breakdown_gender(page, chart_ids[\"gender\"])\n model[\"case_totals\"][\"race_eth\"], model[\"death_totals\"][\"race_eth\"] = get_breakdown_race_eth(page, chart_ids[\"race_eth\"])\n\n return model",
"def prepare_data_with_warehouse(self,from_date,to_date,warehouses,all_products):\n data_dict = {}\n stock_quant_obj=self.env['stock.quant']\n for warehouse in warehouses:\n all_locations = self.get_all_locations(warehouse)\n if not all_locations:\n continue\n \n #here we are finding the opening stock for these we are using base query\n #of inventory at date v10\n result = self.get_product_qty(all_locations,from_date)\n qty_dict = dict((x,y) for x, y in result)\n \n for product in all_products:\n last_sales = ''\n qty_purchase_in_duration = 0\n qty_sales_in_duration = 0\n last_purchase_date = ''\n scrap_location_qty = 0\n adjusted_qty_in_duration = 0\n warehouse_out_qty = 0\n warehouse_in_qty = 0\n# here from result of inventory at date we are seaching for specific product.\n opening_product_qty = qty_dict.get(product.id)\n\n #finding last sales qty\n last_sales = self.find_last_sales_qty(from_date,to_date,warehouse,all_locations,product)\n #finding last purchase date of product\n last_purchase_date = self.find_last_purchase_date(from_date,to_date,all_locations,product)\n #fiding date purchase qty in duration for specific product\n qty_purchase_in_duration = self.find_purchase_qty_in_duration(from_date,to_date,all_locations,product)\n #fiding scrap qty of precific product\n scrap_location_qty = self.find_scap_location_qty(from_date,to_date,product,all_locations)\n #finding sales qty in duration\n qty_sales_in_duration = self.find_sale_qty_in_duration(from_date,to_date,warehouse,all_locations,product)\n #fidning adjusted qty in duration\n adjusted_qty_in_duration = self.find_adjusted_qty_in_duration(from_date, to_date, product, all_locations)\n \n dest_location_lst = self.get_other_wahouse_locations(warehouse)\n \n if any(all_locations) and any(dest_location_lst):\n #fidning warehouse in qty \n warehouse_in_qty = self.find_warehouse_transer_in_qty(product, all_locations, dest_location_lst,from_date,to_date)\n #fidning warehouse out qty for specific product.\n warehouse_out_qty = self.find_warehouse_transer_out_qty(product, all_locations, dest_location_lst,from_date,to_date)\n \n if warehouse_out_qty:\n warehouse_out_qty = warehouse_out_qty and warehouse_out_qty[0][0] or ''\n if warehouse_in_qty:\n warehouse_in_qty = warehouse_in_qty and warehouse_in_qty[0][0] or ''\n \n if adjusted_qty_in_duration:\n adjusted_qty_in_duration = adjusted_qty_in_duration and adjusted_qty_in_duration[0][0] or '' \n if scrap_location_qty:\n scrap_location_qty = scrap_location_qty and scrap_location_qty[0][0] or ''\n \n # if qty_sales_in_duration:\n # qty_sales_in_duration = qty_sales_in_duration and qty_sales_in_duration[0][0] or ''\n # if qty_purchase_in_duration:\n # qty_purchase_in_duration = qty_purchase_in_duration[0][0] or ''\n if last_sales:\n last_sales = datetime.strptime(last_sales and last_sales[0][0], '%Y-%m-%d %H:%M:%S').strftime('%d-%m-%Y') or ''\n \n if last_purchase_date:\n last_purchase_date = datetime.strptime(last_purchase_date and last_purchase_date[0][0], '%Y-%m-%d %H:%M:%S').strftime('%d-%m-%Y') or ''\n \n if data_dict.has_key(warehouse.id):\n data_lst=data_dict.get(warehouse.id)\n data_lst.append({'product':product,'sku':product.default_code or '','name':product.name,\n 'Cost':product.standard_price or '','sales_price':product.lst_price or '',\n 'opening_qty':opening_product_qty or 0,'last_sales':last_sales or '',\n 'last_purchase_date':last_purchase_date or '','qty_purchase_in_duration':qty_purchase_in_duration or 0,\n 'qty_sales_in_duration': qty_sales_in_duration or 0,'scrap_location_qty':scrap_location_qty or 0,\n 'adjusted_qty_in_duration':adjusted_qty_in_duration or 0\n ,'warehouse_in_qty':warehouse_in_qty or 0,\n 'warehouse_out_qty':warehouse_out_qty or 0 \n })\n data_dict.update({warehouse.id:data_lst})\n continue\n data_dict.update({warehouse.id:[{'product':product,'sku':product.default_code or '','name':product.name,\n 'Cost':product.standard_price or '','sales_price':product.lst_price or '',\n 'opening_qty':opening_product_qty or 0,\n 'last_sales':last_sales or '','last_purchase_date':last_purchase_date or '',\n 'qty_purchase_in_duration':qty_purchase_in_duration or 0,\n 'qty_sales_in_duration': qty_sales_in_duration or 0,\n 'scrap_location_qty':scrap_location_qty or 0,\n 'adjusted_qty_in_duration':adjusted_qty_in_duration or 0,\n 'warehouse_in_qty':warehouse_in_qty or 0,\n 'warehouse_out_qty':warehouse_out_qty or 0\n }]})\n return data_dict",
"def _build_data(self):\n licence_types = [('all', 'All')] + [(lt.pk, lt.display_name) for lt in LicenceType.objects.all()]\n data = {\n 'applications': {\n 'columnDefinitions': [],\n 'filters': {\n 'licenceType': {\n 'values': licence_types,\n },\n 'status': {\n 'values': [],\n }\n },\n 'ajax': {\n 'url': ''\n }\n },\n 'licences': {\n 'columnDefinitions': [],\n 'filters': {\n 'licenceType': {\n 'values': licence_types,\n },\n },\n 'ajax': {\n 'url': ''\n }\n },\n 'returns': {\n 'columnDefinitions': [],\n 'filters': {\n 'licenceType': {\n 'values': licence_types,\n },\n },\n 'ajax': {\n 'url': ''\n }\n }\n }\n return data",
"def __init__(self, data, team_criteria):\n self.data = data\n self.athletes = dl.get_athletes(data)\n self.data_engine = dict()\n for squad in dl.get_squads(data):\n self.data_engine[squad[\"id\"]] = {\n \"team_criteria\": deepcopy(team_criteria),\n \"team_members\": list()}",
"def get_day_query_info():\n all_rt_heat_metric_list = get_rt_day_query_count()\n day_query_rt_dict = {}\n for each_rt in all_rt_heat_metric_list:\n query_list = []\n dataset_id = each_rt[\"key\"]\n query_count = each_rt[\"doc_count\"]\n for each_appcode in each_rt[\"app_count\"][\"buckets\"]:\n app_code = each_appcode[\"key\"]\n app_query_count = each_appcode[\"doc_count\"]\n for each_day in each_appcode[\"agg_by_day\"][\"buckets\"]:\n timestamp = each_day[\"key\"] / 1000\n time_str = each_day[\"key_as_string\"]\n day_query_count = each_day[\"doc_count\"]\n query_list.append(\n {\n \"dataset_id\": dataset_id,\n \"app_code\": app_code,\n \"timestamp\": timestamp,\n \"time_str\": time_str,\n \"day_query_count\": day_query_count,\n \"app_query_count\": app_query_count,\n }\n )\n day_query_rt_dict[dataset_id] = {\n \"query_list\": query_list,\n \"query_count\": query_count,\n }\n # 有查询量的rt\n day_query_rt_list = list(day_query_rt_dict.keys())\n return day_query_rt_dict, day_query_rt_list",
"def prepare_data_with_location(self,from_date,to_date,locations,all_products):\n data_dict = {}\n stock_quant_obj=self.env['stock.quant']\n for loc in locations:\n all_locations = self.get_all_locations(warehouse=False, location=loc)\n if not all_locations:\n continue\n #here we are finding the opening stock for these we are using base query\n #of inventory at date v10\n result = self.get_product_qty(all_locations,from_date)\n qty_dict = dict((x,y) for x, y in result)\n \n for product in all_products:\n last_sales = ''\n qty_purchase_in_duration = 0\n qty_sales_in_duration = 0\n last_purchase_date = ''\n scrap_location_qty = 0\n adjusted_qty_in_duration = 0\n warehouse_out_qty = 0\n warehouse_in_qty = 0\n# here from result of inventory at date we are seaching for specific product.\n opening_product_qty = qty_dict.get(product.id)\n\n #finding last sales qty\n last_sales = self.find_last_sales_qty(from_date,to_date,False,all_locations,product)\n #finding last purchase date of product\n last_purchase_date = self.find_last_purchase_date(from_date,to_date,all_locations,product)\n #fiding date purchase qty in duration for specific product\n qty_purchase_in_duration = self.find_purchase_qty_in_duration(from_date,to_date,all_locations,product)\n #fiding scrap qty of precific product\n scrap_location_qty = self.find_scap_location_qty(from_date,to_date,product,all_locations)\n #finding sales qty in duration\n qty_sales_in_duration = self.find_sale_qty_in_duration(from_date,to_date,False,all_locations,product)\n #fidning adjusted qty in duration\n adjusted_qty_in_duration = self.find_adjusted_qty_in_duration(from_date, to_date, product, all_locations)\n\n # dest_location_lst = self.get_other_wahouse_locations(warehouse)\n \n # if any(all_locations) and any(dest_location_lst):\n # #fidning warehouse in qty \n # warehouse_in_qty = self.find_warehouse_transer_in_qty(product, all_locations, dest_location_lst,from_date,to_date)\n # #fidning warehouse out qty for specific product.\n # warehouse_out_qty = self.find_warehouse_transer_out_qty(product, all_locations, dest_location_lst,from_date,to_date)\n \n # if warehouse_out_qty:\n # warehouse_out_qty = warehouse_out_qty and warehouse_out_qty[0][0] or ''\n # if warehouse_in_qty:\n # warehouse_in_qty = warehouse_in_qty and warehouse_in_qty[0][0] or ''\n \n if adjusted_qty_in_duration:\n adjusted_qty_in_duration = adjusted_qty_in_duration and adjusted_qty_in_duration[0][0] or '' \n if scrap_location_qty:\n scrap_location_qty = scrap_location_qty and scrap_location_qty[0][0] or ''\n \n # if qty_sales_in_duration:\n # qty_sales_in_duration = qty_sales_in_duration and qty_sales_in_duration[0][0] or ''\n # if qty_purchase_in_duration:\n # qty_purchase_in_duration = qty_purchase_in_duration or ''\n if last_sales:\n last_sales = datetime.strptime(last_sales and last_sales[0][0], '%Y-%m-%d %H:%M:%S').strftime('%d-%m-%Y') or ''\n \n if last_purchase_date:\n last_purchase_date = datetime.strptime(last_purchase_date and last_purchase_date[0][0], '%Y-%m-%d %H:%M:%S').strftime('%d-%m-%Y') or ''\n \n if data_dict.has_key(loc.id):\n data_lst=data_dict.get(loc.id)\n data_lst.append({'product':product,'sku':product.default_code or '','name':product.name,\n 'Cost':product.standard_price or '','sales_price':product.lst_price or '',\n 'opening_qty':opening_product_qty or 0,'last_sales':last_sales or '',\n 'last_purchase_date':last_purchase_date or '','qty_purchase_in_duration':qty_purchase_in_duration or 0,\n 'qty_sales_in_duration': qty_sales_in_duration or 0,'scrap_location_qty':scrap_location_qty or 0,\n 'adjusted_qty_in_duration':adjusted_qty_in_duration or 0\n ,'warehouse_in_qty':warehouse_in_qty or 0,\n 'warehouse_out_qty':warehouse_out_qty or 0 \n })\n data_dict.update({loc.id:data_lst})\n continue\n data_dict.update({loc.id:[{'product':product,'sku':product.default_code or '','name':product.name,\n 'Cost':product.standard_price or '','sales_price':product.lst_price or '',\n 'opening_qty':opening_product_qty or 0,\n 'last_sales':last_sales or '','last_purchase_date':last_purchase_date or '',\n 'qty_purchase_in_duration':qty_purchase_in_duration or 0,\n 'qty_sales_in_duration': qty_sales_in_duration or 0,\n 'scrap_location_qty':scrap_location_qty or 0,\n 'adjusted_qty_in_duration':adjusted_qty_in_duration or 0,\n 'warehouse_in_qty':warehouse_in_qty or 0,\n 'warehouse_out_qty':warehouse_out_qty or 0\n }]})\n return data_dict"
] | [
"0.5826197",
"0.55371296",
"0.5417445",
"0.5337191",
"0.52416694",
"0.51694846",
"0.5151711",
"0.5091198",
"0.5085503",
"0.5074048",
"0.5069187",
"0.5063415",
"0.50576264",
"0.5021445",
"0.50159806",
"0.5006675",
"0.4991931",
"0.49876162",
"0.49839446",
"0.49812287",
"0.49771714",
"0.4976073",
"0.49660462",
"0.4920108",
"0.49189234",
"0.49144468",
"0.49057344",
"0.49013743",
"0.48851863",
"0.48794577"
] | 0.6585092 | 0 |
Test case for add_or_update_case | def test_add_or_update_case(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_update_case(self):\n pass",
"def test_update_one(self):\n pass",
"def test_update_record(self):\n pass",
"def test_update(self):\n pass",
"def test_update(self):\n pass",
"def test_update(self):\n pass",
"def test_update_scenario(self):\n pass",
"def test_add_or_update_state_for_none_state_key(self):\n def test_update_value(name, value):\n return f'{name}-{value}'\n\n state_manager = ActorStateManager(self._fake_actor)\n has_value, val = _run(state_manager.try_get_state('state1'))\n self.assertTrue(has_value)\n self.assertEqual('value1', val)\n\n val = _run(state_manager.add_or_update_state('state1', 'value1', test_update_value))\n self.assertEqual('state1-value1', val)",
"def test_add_or_update_state_for_state_in_storage(self):\n def test_update_value(name, value):\n return f'{name}-{value}'\n\n state_manager = ActorStateManager(self._fake_actor)\n state_change_tracker = state_manager._get_contextual_state_tracker()\n val = _run(state_manager.add_or_update_state('state1', 'value1', test_update_value))\n self.assertEqual('state1-value1', val)\n state = state_change_tracker['state1']\n self.assertEqual(StateChangeKind.update, state.change_kind)",
"def test_update_update_has_a_value(self):\n self.Person.drop_collection()\n\n author = self.Person.objects.create(name=\"Test User\")\n\n with pytest.raises(OperationError):\n self.Person.objects(pk=author.pk).update({})\n\n with pytest.raises(OperationError):\n self.Person.objects(pk=author.pk).update_one({})",
"def test_update(self):\n # this is tested graphically, as it is UI\n pass",
"def test_add_or_update_state_for_new_state(self):\n def test_update_value(name, value):\n return f'{name}-{value}'\n\n state_manager = ActorStateManager(self._fake_actor)\n state_change_tracker = state_manager._get_contextual_state_tracker()\n val = _run(state_manager.add_or_update_state('state1', 'value1', test_update_value))\n self.assertEqual('value1', val)\n state = state_change_tracker['state1']\n self.assertEqual(StateChangeKind.add, state.change_kind)",
"def test_update_collection(self):\n pass",
"def test_update_with_no_matches(test_store, andy, pandy, candy):\n n_updated = test_store.update(fields={\"age\": 15}, name=\"Mark\")\n assert n_updated == 0\n\n items = list(test_store.get_by())\n assert len(items) == 3\n assert andy in items\n assert pandy in items\n assert candy in items",
"def test_update(test_store, andy, pandy, candy):\n n_updated = test_store.update(fields={\"age\": 15}, name=\"Candy\")\n assert n_updated == 1\n items = list(test_store.get_by())\n\n candy.age = 15\n assert andy in items\n assert pandy in items\n assert candy in items",
"def test_put_db_fail(self):\n test_data = {\n 'first_name': 'new_first_name',\n 'last_name': 'new_last_name'\n }\n with mock.patch('user_profile.models.UserProfile.update') as update:\n update.return_value = False\n response = self.client.put(self.url, json.dumps(test_data), content_type='application/json')\n self.assertEquals(response.status_code, 400)",
"def test_update_occurrence(self):\n pass",
"def test_update_or_create_with_zero(self):\n\n d = {'name': 'winboat', 'some_number': 0}\n e = twsu.update_or_create(self.DBTestCls1, d)\n self.session.flush()\n eq_(e.some_number, 0)",
"def test_update9(self):\n pass",
"def test_add_item_at_using_put(self):\n pass",
"def test_update_rule(self):\n pass",
"def test_user_update_request(self):\n pass",
"def test_update(app):\n\n assert False",
"def test_update_twice_same_result():\n starting_db = create_db(STARTING_DB_INPUT)\n actual: dict = o_obj.update_object_in_db(\n starting_db,\n \"some_uid\",\n INP\n )\n actual2: dict = o_obj.update_object_in_db(\n starting_db,\n \"some_uid\",\n INP\n )\n assert actual == EXPECTED == actual2",
"def test_update_on_unique_field_raises(test_store):\n\n with pytest.raises(NotImplementedError):\n test_store.update(fields={\"name\": \"Andy\"})",
"def test_update(self):\n self.my_task.key = self.task_storage.add(self.my_task)\n\n self.my_task.title = 'foo'\n key = self.task_storage.update(self.my_task)\n new_task = self.task_storage.find(key)\n\n self.assertEqual(self.my_task, new_task)",
"def add_or_update(self, answer):\n if self.exists(answer):\n self.update(answer)\n else:\n self.add(answer)",
"def test_update_no_match(self):\n self.my_task.key = self.task_storage.add(self.my_task)\n\n self.task_storage.delete(self.my_task.key)\n\n self.my_task.title = 'foo'\n\n self.key = self.task_storage.update(self.my_task)\n\n self.assertIsNone(self.key)",
"def test_simple_patches(self):\n payload = json.dumps([\n {\"op\": \"add\", \"path\": \"/name\", \"value\": \"New name\"},\n {\"op\": \"copy\", \"from\": \"/email\", \"path\": \"/username\"},\n {\"op\": \"replace\", \"path\": \"/subscriber\", \"value\": True}\n ])\n result = patch_item(self.valid_users[0], payload)\n user = Users.query.filter_by(UserID=self.valid_users[0]).first_or_404()\n self.assertEqual(\"New name\", result[\"name\"])\n self.assertEqual(\"[email protected]\", result[\"username\"])\n self.assertEqual(True, result[\"subscriber\"])\n self.assertEqual(\"New name\", user.Name)\n self.assertEqual(\"[email protected]\", user.Username)\n self.assertEqual(True, user.Subscriber)\n self.assertNotEqual(None, user.Updated) # Should update automatically",
"def test_update_user(self):\n pass"
] | [
"0.79237324",
"0.7314126",
"0.7272837",
"0.72685695",
"0.72685695",
"0.72685695",
"0.712033",
"0.69949913",
"0.6982817",
"0.69368035",
"0.69113",
"0.68479264",
"0.68319446",
"0.6824832",
"0.6721132",
"0.6690393",
"0.6686279",
"0.66511667",
"0.6622248",
"0.6599744",
"0.6579878",
"0.6571779",
"0.6547084",
"0.6512169",
"0.6478061",
"0.6466246",
"0.64359325",
"0.6411041",
"0.63484573",
"0.63103485"
] | 0.90939504 | 0 |
Test case for delete_case | def test_delete_case(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_delete_run(self):\n pass",
"def test_delete(self):\n pass",
"def test_delete1(self):\n pass",
"def test_delete_record(self):\n pass",
"def test_CovidCase_delete(self):\n # setting up by creating and saving the the database\n del_Covid = self.create_CovidCase()\n del_Covid.save()\n del_id = del_Covid.id\n # we are going to delete by calling the delete function\n del_deleted = CovidCase.objects.get(id=del_id)\n del_deleted.delete()\n\n self.assertNotIn(del_Covid, CovidCase.objects.all())",
"def test_delete7(self):\n pass",
"def test_delete_records(self):\n pass",
"def test_delete_goal(self):\n pass",
"def test_delete_item_using_delete(self):\n pass",
"def test_delete_occurrence(self):\n pass",
"def test_delete_rule(self):\n pass",
"def test_delete_activity(self):\n pass",
"def test_delete(self):\r\n course = CourseFactory.create(org='edX', course='999')\r\n with self.assertRaises(ValueError):\r\n tabs.primitive_delete(course, 0)\r\n with self.assertRaises(ValueError):\r\n tabs.primitive_delete(course, 1)\r\n with self.assertRaises(IndexError):\r\n tabs.primitive_delete(course, 6)\r\n tabs.primitive_delete(course, 2)\r\n self.assertFalse({u'type': u'textbooks'} in course.tabs)\r\n # Check that discussion has shifted up\r\n self.assertEquals(course.tabs[2], {'type': 'discussion', 'name': 'Discussion'})",
"def test_delete_risk_profile_using_delete(self):\n pass",
"def test_delete_boat(self):\n pass",
"def delete():",
"def test_delete(self):\n SampleTemplate.create(self.metadata, self.new_study)\n SampleTemplate.delete(2)\n obs = self.conn_handler.execute_fetchall(\n \"SELECT * FROM qiita.required_sample_info WHERE study_id=2\")\n exp = []\n self.assertEqual(obs, exp)\n obs = self.conn_handler.execute_fetchall(\n \"SELECT * FROM qiita.study_sample_columns WHERE study_id=2\")\n exp = []\n self.assertEqual(obs, exp)\n with self.assertRaises(QiitaDBExecutionError):\n self.conn_handler.execute_fetchall(\n \"SELECT * FROM qiita.sample_2\")",
"def test_delete(self):\n scenario = factories.Scenario(config='', status=Scenario.Status.INACTIVE)\n scenario.delete()\n self.assertEqual(scenario.status, Scenario.Status.INACTIVE)",
"def test_delete_identity(self):\n pass",
"def test_delete(self):\n self.assertTrue(self.run_function(\"group.add\", [self._group]))\n\n # correct functionality\n self.assertTrue(self.run_function(\"group.delete\", [self._group]))\n\n # group does not exist\n self.assertFalse(self.run_function(\"group.delete\", [self._no_group]))",
"def test_delete(self) -> None:\n\n expected = False\n actual = self.helper.set_name(self.test_name).exists()\n\n self.assertEqual(expected, actual)\n\n self.helper.set_value(\"Hello, World!\")\n\n expected = True\n actual = self.helper.exists()\n\n self.assertEqual(expected, actual)\n\n self.helper.delete()\n\n expected = False\n actual = self.helper.exists()\n\n self.assertEqual(expected, actual)",
"def test_deleting_a_segment(self):\n pass",
"def test_delete(self):\n # add a task\n self.add(title=\"Sample task doing\", description=\"for sample\", state=\"doing\")\n task = Task.query.filter_by(title='Sample task doing').first()\n\n # delete\n self.delete(id=task.id)\n task = Task.query.filter_by(title='Sample task doing').first()\n self.assertIsNone(task)",
"def test_delete(self):\n person = Person('test_person_b')\n person.delete()\n with database() as db:\n results = db.query(\"SELECT * FROM persons WHERE person_name = 'test_person_b'\")\n self.assertEqual(results, [])",
"def test_delete_category(self):\n pass",
"def test_delete_category(self):\n self.add_success(self.test_data['pants'])\n self.delete_success('pants')",
"def test_delete_cases_with_dry_run(cli_runner, base_context, base_store: Store, helpers, caplog):\n # GIVEN a database with a case\n case_obj = helpers.add_case(base_store)\n case_id = case_obj.internal_id\n sample = helpers.add_sample(base_store)\n helpers.add_relationship(store=base_store, case=case_obj, sample=sample)\n\n # WHEN deleting a case\n caplog.set_level(logging.DEBUG)\n cli_runner.invoke(\n delete_cases,\n [\"--sample-identifier\", \"name\", sample.name, \"--dry-run\"],\n obj=base_context,\n )\n\n # THEN it should not have been deleted\n assert \"Cases (that will NOT be deleted due to --dry-run):\" in caplog.text\n assert case_id in caplog.text",
"def test_delete_activity_template(self):\n pass",
"def delete(self):\n ...",
"def test_delete_cloud(self):\n pass"
] | [
"0.85045195",
"0.83174175",
"0.8187092",
"0.81214935",
"0.7901589",
"0.78933346",
"0.7868206",
"0.7762365",
"0.7726762",
"0.77229226",
"0.7537628",
"0.74579",
"0.73829216",
"0.735922",
"0.7357545",
"0.7353939",
"0.73434424",
"0.7334559",
"0.7333317",
"0.7319243",
"0.72404516",
"0.7239504",
"0.72335696",
"0.7200652",
"0.718772",
"0.71629775",
"0.7159546",
"0.7159288",
"0.71318567",
"0.7113472"
] | 0.94501704 | 0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.