query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
sequencelengths
30
30
negative_scores
sequencelengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Reopens bloomberg connection. Function is called when the 'Restart Bloomberg Connection' button from the pricer frame is clicked
def reOpenConnection(self): self.blptsAnalytics.closeSession() self.blptsAnalytics = None self.bbgstreamBIDEM.closeSubscription() self.bbgstreamBIDEM = None self.streamWatcherBID = None self.streamWatcherAnalytics = None self.blptsPriceOnly.closeSession() self.streamWatcherPriceOnly = None self.firstPass() self.startUpdates()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def restart(self):\r\n self._safe_close()\r\n self._stopped.clear()\r\n self.reconnect()", "def REBpowerup(self):\n #specific to REB1\n self.cabac_reset()\n\n self.load_sequencer()\n #sets the default sequencer clock states to 0\n self.fpga.send_function(0, fpga0.Function( name=\"default state\", timelengths={0: 2, 1: 0}, outputs={0: 0, 1: 0}))\n\n print(\"REB ready to connect to CCD\")", "def bcp_reset(self):\n self.machine.bcp.transport.send_to_all_clients(\"reset\")", "def reconnect(self):\n self.close()\n self.connect()", "def reset(self):\n self.reconnect()", "def REBshutdown(self):\n pass", "def restart(self):", "def reconnecting(self) -> bool:", "def reload():\n xd = display.XKCDDisplayService()\n if xd.is_running():\n click.echo(\"gracefully reloading changes\")\n xd.send_signal(signal.SIGHUP)\n else:\n click.echo(\"xkcd service not running\")", "def restart(self):\r\n pass", "def maintainConnection():\n return RoboCaller().call(\"maintainConnection\", \"void\")", "def restart(self):\n self.state ='active'\n if self.budget <= 0:\n return self.leave()\n self.cards =BJCards()\n self.bet_amount =0", "def restart(self):\n pass", "def restart(self) -> None:", "def reconnect(self):\n self.close()\n self._db = self._pool.get_connection()", "def reconnect(self):\n raise NotImplementedError()", "def reconnect(self):\n self.should_reconnect = True\n self.stop()", "def hard_reset(self):\n self.close()\n self.open()", "def _on_connection_close(self):\n print(\"connection was closed, reconnecting...\")\n self.connect()", "def close(self):\n self.log.debug('RFSwitch - in RFSwitch close()')\n # Add code here to be executed only when the resource is closed.\n print(\"Calling RFSwitch:close\")", "def acquire_restart(self):\n self.bus.write('ACQ:STATE RUN')", "async def attempt_reconnect(self):\n await deploy.reconnect()", "def _restart(self):\n pass", "def restart(self):\n self.gui_block.set()", "def lz_restart_pondering(self):\n self.lz_wrapper.restart_ponder()", "def reconnect(self):\n self.__connection.reconnect()", "def reinit(self,*args):\n print \"Finding and testing board...\"\n try:\n if self.__instrumentID.value != 0: # if a handle to the board is known, reinitialize, i.e. close the access first \n self.__acqiris_QuantroDLL1.FinishApplication(self.__instrumentID,c_bool(True))\n self.__acqiris_QuantroDLL1.FindDevices(byref(self.__instrumentID),byref(self.__numInstruments),c_bool(True))\n self.__acqiris_QuantroDLL1.NbrChannels(self.__instrumentID,byref(self.__nbrOfChannels),c_bool(True))\n self.__acqiris_QuantroDLL1.NbrADCBits(self.__instrumentID,byref(self.__nbrADCBits),c_bool(True))\n self.Temperature()\n print \"OK\" \n except:\n print \"Error when trying to access Acqiris board.\"", "def reopen():", "def reconect(self):\n self.connection.reset_connection()\n return self.connection", "def close(self): \n\t\tself.connection = None" ]
[ "0.6018849", "0.5963336", "0.5805716", "0.5769722", "0.5753302", "0.5612181", "0.555544", "0.549498", "0.5492464", "0.54829496", "0.54354995", "0.5419979", "0.53779286", "0.5329917", "0.5328147", "0.5313881", "0.5282361", "0.52603227", "0.5255711", "0.5238575", "0.5236348", "0.5235073", "0.5209213", "0.5206247", "0.5204471", "0.5189871", "0.5182568", "0.5172463", "0.5168195", "0.5160797" ]
0.7195131
0
Refreshes the swap rates. Function is called when the 'Refresh Rates' button from the pricer menu is clicked.
def refreshSwapRates(self): self.firstPass()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_refresh_button_cicked_(self):\n for slider in self.sliders:\n slider.refresh()", "def refresh(self):\n pass", "def refresh(self):\n pass", "def refresh(self):\n self._refresh_method()", "def refresh(self) -> None:\n pass", "def refresh(self) -> None:\n pass", "def refresh(self) -> None:\n pass", "def refresh(self):\n self.Refresh()", "def Refresh(self):\n pass", "def refresh(self):\n self.__refresh()", "def update(self):\n self.rate = self.exchange.latest()", "def refresh(self):\n\n self._refreshed_on = time.time() * 1000", "def _refresh(self):\n self._need_display_update = True\n self._update()", "def refresh(self):\n\t\tself.driver.refresh()", "def refreshCredit(self):\n self.mainmenu.updateCR()", "def refresh(self):\n raise NotImplementedError", "def refresh(self):\n raise NotImplementedError", "def refresh(self):\n raise NotImplementedError(\"To be implemented\")", "def update(self) -> None:\n self.data.update()\n self._state = round(self.data.rate[\"rates\"][self._target], 3)", "def update(self):\n if self._refreshed_at is None or (\n self._refreshed_at + self._refresh_rate <= datetime.datetime.now()):\n\n self.run()", "def rDataChanged(self):\n\n self._queues.uResolutionTab.refreshData()\n self._layerManager.updateReviewLayer()", "def rates(self, rates):\n\n self._rates = rates", "def rates(self, rates):\n\n self._rates = rates", "def _refreshActionTriggeredSlot(self):\r\n \r\n self._controller.model.refresh(self._controller.model.activeIndex)", "def update_exchange_rates():\n try:\n from djmoney.contrib.exchange.models import Rate\n\n from common.settings import currency_code_default, currency_codes\n from InvenTree.exchange import InvenTreeExchange\n except AppRegistryNotReady: # pragma: no cover\n # Apps not yet loaded!\n logger.info(\"Could not perform 'update_exchange_rates' - App registry not ready\")\n return\n except Exception: # pragma: no cover\n # Other error?\n return\n\n backend = InvenTreeExchange()\n base = currency_code_default()\n logger.info(f\"Updating exchange rates using base currency '{base}'\")\n\n try:\n backend.update_rates(base_currency=base)\n\n # Remove any exchange rates which are not in the provided currencies\n Rate.objects.filter(backend=\"InvenTreeExchange\").exclude(currency__in=currency_codes()).delete()\n except OperationalError:\n logger.warning(\"Could not update exchange rates - database not ready\")\n except Exception as e: # pragma: no cover\n logger.error(f\"Error updating exchange rates: {e} ({type(e)})\")", "def update_rates(request):\n if request.method == 'GET':\n obj = requests.get(request_address).json()\n curr_inst = Currencies()\n curr_inst.timestamp = obj['timestamp']\n curr_inst.usd = obj['rates']['USD']\n curr_inst.eur = obj['rates']['EUR']\n curr_inst.czk = obj['rates']['CZK']\n curr_inst.pln = obj['rates']['PLN']\n curr_inst.save()\n serializer = CurrenciesSerializer(curr_inst)\n return Response(serializer.data)", "def handle_rates_response(self, rates):\n\n if rates.rates_id not in self.modules:\n return\n\n counter = self.modules[rates.rates_id]\n\n # update cache\n lvap = RUNTIME.lvaps[counter.lvap]\n lvap.rates = {x[0]: x[1] for x in rates.rates}\n\n # update this object\n counter.rates = {x[0]: x[1] for x in rates.rates}\n\n # call callback\n handle_callback(counter, counter)", "def set_default_refresh_rate(self, rate: int) -> None:\n self._update_thread.update_global_refresh_rate(rate)", "def refresh(self):\n\n self.driver.implicitly_wait(5)\n self.driver.refresh()", "def refresh_all(self):\n\t\t\n\t\tself.symbolsList.set_datasource(self.source)\n\t\tself.symbolsList.refresh()\n\t\t\n\t\tself.plotFrame.set_datasource(self.source)\n\t\tself.plotFrame.refresh()" ]
[ "0.6369983", "0.6226615", "0.6226615", "0.6216588", "0.61472285", "0.61472285", "0.61472285", "0.61254907", "0.6106044", "0.608328", "0.6068694", "0.59617007", "0.59551466", "0.58658063", "0.58497584", "0.58342487", "0.58342487", "0.5820157", "0.5804136", "0.5759256", "0.5748559", "0.5734647", "0.5734647", "0.5723842", "0.57054555", "0.5673152", "0.56665295", "0.56552243", "0.5633878", "0.5627296" ]
0.77100295
0
Fill historical prices and ratings. Function is called when the pricer menu first launches.
def fillHistoricalPricesAndRating(self): time_start = time.time() self.buildPriceHistory() savepath = TEMPPATH + 'bondhistoryrating.csv' #If bondhistoryratingUAT.csv doesn't exist, download data and write file. cols = ['SNP', 'MDY', 'FTC', 'P1D', 'P1W', 'P1M', 'Y1D', 'Y1W', 'Y1M', 'ACCRUED', 'D2CPN', 'SAVG', 'ISP1D', 'ISP1W', 'ISP1M', 'RISK_MID', 'PRINCIPAL_FACTOR', 'SIZE'] if not (os.path.exists(savepath)) or datetime.datetime.fromtimestamp( os.path.getmtime(savepath)).date() < datetime.datetime.today().date(): isins = self.df['ISIN'] + BBGHand + ' Corp' isins = list(isins.astype(str)) ## flds = ['RTG_SP', 'RTG_MOODY', 'RTG_FITCH', 'INT_ACC', 'DAYS_TO_NEXT_COUPON', 'YRS_TO_SHORTEST_AVG_LIFE', 'RISK_MID', 'PRINCIPAL_FACTOR', 'AMT_OUTSTANDING'] out = blpapiwrapper.simpleReferenceDataRequest(pandas.Series((self.df['ISIN'] + ' Corp').values, index=self.df.index).to_dict(),flds)[flds] #loop for f in flds: self.df[bbgToBdmDic[f]] = out[f] self.df['RISK_MID'].fillna(0, inplace=True) ## self.df.drop(['P1D', 'P1W', 'P1M', 'Y1D', 'Y1W', 'Y1M', 'ISP1D', 'ISP1W', 'ISP1M'], axis=1, inplace=True) dbPriceHistory = pandas.read_csv(PHPATH + 'dbPriceHistory.csv', index_col=0) dbYieldHistory = pandas.read_csv(PHPATH + 'dbYieldHistory.csv', index_col=0) dbSpreadHistory = pandas.read_csv(PHPATH + 'dbSpreadHistory.csv', index_col=0) hdt = [] if self.dtYesterday.strftime('%Y%m%d') in dbPriceHistory.columns: hdt.append(self.dtYesterday.strftime('%Y%m%d')) else: self.df['P1D'] = pandas.np.nan self.df['Y1D'] = pandas.np.nan self.df['ISP1D'] = pandas.np.nan if self.dtLastWeek.strftime('%Y%m%d') in dbPriceHistory.columns: hdt.append(self.dtLastWeek.strftime('%Y%m%d')) else: self.df['P1W'] = pandas.np.nan self.df['Y1W'] = pandas.np.nan self.df['ISP1W'] = pandas.np.nan if self.dtLastMonth.strftime('%Y%m%d') in dbPriceHistory.columns: hdt.append(self.dtLastMonth.strftime('%Y%m%d')) else: self.df['P1M'] = pandas.np.nan self.df['Y1M'] = pandas.np.nan self.df['ISP1M'] = pandas.np.nan ohdt = [self.dtYesterday.strftime('%Y%m%d'), self.dtLastWeek.strftime('%Y%m%d'), self.dtLastMonth.strftime('%Y%m%d')] self.df = self.df.join(dbPriceHistory[hdt], on='ISIN') self.df.rename(columns={ohdt[0]:'P1D', ohdt[1]:'P1W', ohdt[2]:'P1M'}, inplace=True) self.df = self.df.join(dbYieldHistory[hdt], on='ISIN') self.df.rename(columns={ohdt[0]:'Y1D', ohdt[1]:'Y1W', ohdt[2]:'Y1M'}, inplace=True) self.df = self.df.join(dbSpreadHistory[hdt], on='ISIN') self.df.rename(columns={ohdt[0]:'ISP1D', ohdt[1]:'ISP1W', ohdt[2]:'ISP1M'}, inplace=True) self.df[cols].to_csv(savepath) self.df['ACCRUED'] = self.df['ACCRUED'].apply(lambda x: '{:,.2f}'.format(float(x))) self.df['D2CPN'].fillna(-1, inplace=True) self.df['D2CPN'] = self.df['D2CPN'].astype(int) self.df[['RISK_MID','PRINCIPAL_FACTOR','SIZE']] = self.df[['RISK_MID','PRINCIPAL_FACTOR','SIZE']].astype(float) self.df[['SNP', 'MDY', 'FTC']] = self.df[['SNP', 'MDY', 'FTC']].fillna('NA') # ,'ACCRUED','D2CPN' self.df[['SNP', 'MDY', 'FTC', 'ACCRUED']] = self.df[['SNP', 'MDY', 'FTC', 'ACCRUED']].astype(str) #Otherwise, load and read from file. else: print 'Found existing file from today' df = pandas.read_csv(savepath, index_col=0) self.df[cols] = df[cols] self.df[['RISK_MID','PRINCIPAL_FACTOR','SIZE','SAVG', 'ISP1D','ISP1W','ISP1M']] = self.df[['RISK_MID','PRINCIPAL_FACTOR','SIZE','SAVG', 'ISP1D','ISP1W','ISP1M']].astype(float) self.df[['SNP', 'MDY', 'FTC']] = self.df[['SNP', 'MDY', 'FTC']].astype(str) self.df['ACCRUED'].fillna(-1,inplace=True)#HACK SO NEXT LINE DOESN'T BLOW UP - WE DON'T WANT TO PUT 0 THERE! self.df['ACCRUED'] = self.df['ACCRUED'].astype(float) self.df['ACCRUED'] = self.df['ACCRUED'].apply(lambda x: '{:,.2f}'.format(float(x))) self.df['D2CPN'].fillna(-1, inplace=True)#HACK SO NEXT LINE DOESN'T BLOW UP - WE DON'T WANT TO PUT 0 THERE! self.df['D2CPN'] = self.df['D2CPN'].astype(int) print 'History fetched in: ' + str(int(time.time() - time_start)) + ' seconds.'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_current_prices(self) -> None:\n strategy = cast(Strategy, self.context.strategy)\n eth_price = strategy.contract_status.get(\n \"priceprovider_get_latest_answer\", None\n )\n btc_price = strategy.contract_status.get(\n \"btcpriceprovider_get_latest_answer\", None\n )\n\n if eth_price is None or btc_price is None:\n self.context.logger.info(\"No price to store.....\")\n return\n self._current_price = {\n ETH: self._get_price(eth_price),\n BTC: self._get_price(btc_price),\n }", "def update_historical_data():\n print('updating historical data')\n for sp in SupplyPoint.objects.filter(supplypointwarehouserecord__isnull=True).exclude(type__code=SupplyPointCodes.ZONE):\n update_historical_data_for_supply_point(sp)", "def set_hist_price(self, start_date, end_date):\n fin = YahooFinancials(self.ticker + '=X')\n prices = fin.get_historical_price_data(start_date, end_date, 'daily')\n price_per_day = {}\n for value in prices[self.ticker + '=X']['prices']:\n price_per_day[value['date']] = value['close']\n self.hist = price_per_day", "def simulate_future_prices(self, market_names, fixing_dates, observation_date, path_count, calibration_params):", "def _update_quote(self):\n # If this is the first quote or a price is outside current price ladder,\n # reset the price ladder.\n if (self._quotes_row == 0 or (\n self._quotes_df.loc[self._quotes_row, 'ask_price'] > \\\n self._price_ladder[0] + .5 * self._config['tick_size']) or (\n self._quotes_df.loc[self._quotes_row, 'bid_price'] < \\\n self._price_ladder[-1] - .5 * self._config['tick_size'])):\n max_price = (self._quotes_df.loc[self._quotes_row, 'ask_price'] +\n self._config['tick_size'] * np.floor(\n (self._config['row_count'] - 1) / 2))\n self._price_ladder = np.linspace(\n max_price,\n max_price - (\n self._config['row_count'] - 1) * self._config['tick_size'],\n self._config['row_count'])\n self._price_ladder_df.iloc[:, [0, 1, 3, 4]] = ''\n self._price_ladder_df.iloc[:, 2] = [self._config[\n 'price_format'].format(x) for x in self._price_ladder]\n\n # Populate price ladder dataframe and update table cells.\n for i in range(self._config['row_count']):\n if math.isclose(self._price_ladder[i],\n self._quotes_df.loc[self._quotes_row, 'ask_price']):\n self._price_ladder_df.iloc[i, 3] = str(\n self._quotes_df.loc[self._quotes_row, 'ask_size'])\n else:\n self._price_ladder_df.iloc[i, 3] = ''\n if math.isclose(self._price_ladder[i],\n self._quotes_df.loc[self._quotes_row, 'bid_price']):\n self._price_ladder_df.iloc[i, 1] = str(\n self._quotes_df.loc[self._quotes_row, 'bid_size'])\n else:\n self._price_ladder_df.iloc[i, 1] = ''\n\n # Print this quote row and update counter.\n print(self._quotes_df.iloc[self._quotes_row, ].values)\n self._quotes_row += 1", "def __populate_historical_trade_data(self):\n\n trade_data = self.__transactions.pivot_table(\n index=\"Date\",\n columns=[\"Ticker\"],\n values=[\n \"Quantity\",\n \"Investment\",\n ],\n aggfunc={\"Quantity\": np.sum, \"Investment\": np.sum},\n )\n\n # Make historical prices columns a multi-index. This helps the merging.\n self.portfolio_historical_prices.columns = pd.MultiIndex.from_product(\n [[\"Close\"], self.portfolio_historical_prices.columns]\n )\n\n trade_data = pd.merge(\n trade_data,\n self.portfolio_historical_prices,\n how=\"outer\",\n left_index=True,\n right_index=True,\n )\n\n trade_data[\"Close\"] = trade_data[\"Close\"].fillna(method=\"ffill\")\n trade_data.fillna(0, inplace=True)\n\n trade_data[\"Quantity\"] = trade_data[\"Quantity\"].cumsum()\n trade_data[\"Investment\"] = trade_data[\"Investment\"].cumsum()\n trade_data[\"Investment\", \"Total\"] = trade_data[\"Investment\"].sum(axis=1)\n trade_data[\n pd.MultiIndex.from_product(\n [[\"Investment delta\"], self.tickers_list + [\"Total\"]]\n )\n ] = (trade_data[\"Investment\"].diff(periods=1).fillna(trade_data[\"Investment\"]))\n\n # End Value = Quantity * Close\n trade_data[pd.MultiIndex.from_product([[\"End Value\"], self.tickers_list])] = (\n trade_data[\"Quantity\"][self.tickers_list]\n * trade_data[\"Close\"][self.tickers_list]\n )\n\n trade_data.loc[:, (\"End Value\", \"Total\")] = trade_data[\"End Value\"][\n self.tickers_list\n ].sum(axis=1)\n\n # Initial Value = Previous End Value + Investment changes\n trade_data[\n pd.MultiIndex.from_product(\n [[\"Initial Value\"], self.tickers_list + [\"Total\"]]\n )\n ] = 0\n\n trade_data[\"Initial Value\"] = trade_data[\"End Value\"].shift(1) + trade_data[\n \"Investment\"\n ].diff(periods=1)\n\n # Set first day Initial Value as the Investment (NaNs break first period)\n for t in self.tickers_list + [\"Total\"]:\n trade_data.at[trade_data.index[0], (\"Initial Value\", t)] = trade_data.iloc[\n 0\n ][\"Investment\"][t]\n\n trade_data = trade_data.reindex(\n columns=[\n \"Quantity\",\n \"Investment\",\n \"Investment delta\",\n \"Close\",\n \"Initial Value\",\n \"End Value\",\n ],\n level=0,\n )\n self.historical_trade_data = trade_data", "def refresh_lines(self):\n for line_data in self._data_lines:\n line = BasketLine.from_dict(self, line_data)\n pricing_context = PricingContext(shop=self.shop, customer=self.customer, supplier=line.supplier)\n line.cache_info(pricing_context)\n self._add_or_replace_line(line)", "def refreshSwapRates(self):\r\n self.firstPass()", "def fill_data_product(self):\n self.product.fill_data_product(self.list_products, self.mycursor, self.my_database)", "def update_data(self):\n data, meta_data = ts.get_daily(symbol=self.stock_ticker, outputsize='full')\n self.data = data\n self.meta_data = meta_data", "def generate_portfolio_data(self):\n self.__load_portfolio_historical_prices()\n self.__populate_historical_trade_data()\n self.__calculate_portfolio_returns()\n self.__calculate_portfolio_performance()", "def update_at_init(self, price, dt):\n for name, feature in self._d_features.items():\n feature.update_at_init(price, dt)", "def _initialize(self):\n for s in Subsidiary.all():\n self.__create_stock(s)\n self.get_stock()", "def updateUI(self):\n\n try:\n # Getting the values selected by the user\n from_ = self.from_currency.currentText()\n to = self.to_currency.currentText()\n from_amt = Decimal(self.getMostRecentRelevantRate(self.rates[from_]))\n to_amt = Decimal(self.getMostRecentRelevantRate(self.rates[to]))\n amt = Decimal(self.from_amount.value())\n\n # Calculating the new conversion value\n amount = (to_amt / from_amt) * amt\n self.to_amount.setText('%.02f' % amount)\n\n # Getting the dates selected by the user\n self.from_date = self.from_calendar.selectedDate().toPyDate()\n self.to_date = self.to_calendar.selectedDate().toPyDate()\n\n # Updating the graph only if something in relationship with it changes\n if self.last_clicked != 'amount':\n # Update the dates selected according to the user selection if the user selects a negative range\n if self.to_date < self.from_date:\n if self.last_clicked == 'from':\n date = self.from_calendar.selectedDate()\n self.to_calendar.setSelectedDate(date)\n self.to_date = date.toPyDate()\n else:\n date = self.to_calendar.selectedDate()\n self.from_calendar.setSelectedDate(date)\n self.from_date = date.toPyDate()\n\n # Getting and calculating the currencies rates according to the range selected by the user\n from_rates = self.getRatesInRange(self.rates[from_])\n to_rates = self.getRatesInRange(self.rates[to])\n conv_rates = self.getConvRates(from_rates, to_rates)\n\n # Getting the number of days included in the range\n nb_days = (self.to_date - self.from_date).days + 1\n date_range = range(0, nb_days)\n\n # Clearing the graph and the legend\n self.rates_plot.clear()\n self.legend.scene().removeItem(self.legend)\n self.legend = self.rates_plot.addLegend()\n\n # Updating the graph with our new values\n self.rates_plot.setXRange(0, nb_days)\n self.rates_plot.setYRange(0, max(from_rates + to_rates + conv_rates))\n self.rates_plot.plot(date_range, from_rates, pen='b', symbol='x', symbolPen='b', symbolBrush=0.2, name=from_)\n self.rates_plot.plot(date_range, to_rates, pen='r', symbol='o', symbolPen='r', symbolBrush=0.2, name=to)\n self.rates_plot.plot(date_range, conv_rates, pen='g', symbol='+', symbolPen='g', symbolBrush=0.2, name='conversion rate')\n except Exception as e:\n print('Failed to update UI')\n print(e)", "def refresh(self):\n lastDate = max(etf.data.index[-1] for etf in self.etfs.values())\n for etf in self.etfs.values():\n isLastDayMissing = etf.data.index[-1] < lastDate\n if isLastDayMissing and not etf.sold():\n lastDay = pd.DataFrame([etf.data.iloc[-1]], columns=etf.data.columns, index=[lastDate])\n etf.data = etf.data.append(lastDay)\n etf.calculateStats()\n # Get Profit/Loss series\n p_l = pd.DataFrame()\n for name, etf in self.etfs.items():\n p_l[name] = etf.data['Profit/Loss']\n p_l.fillna(method='ffill', inplace=True)\n self.data['Profit/Loss'] = p_l.sum(axis=1)\n\n # Get Invested amount seires\n inv = pd.DataFrame()\n for name, etf in self.etfs.items():\n inv[name] = etf.data['Invested']\n if etf.sold():\n inv.loc[etf.sell_date:,name] = -etf.profit_loss()\n inv.fillna(method='ffill', inplace=True)\n self.data['Invested'] = inv.sum(axis=1)\n\n self.data['Profit/Loss%'] = self.data['Profit/Loss'] / self.data['Invested'] * 100 # Calculates the Profit/Loss (%)\n self.data['Value'] = round(self.data['Invested'] + self.data['Profit/Loss'], 2)\n self.data['Gains'] = self.data['Profit/Loss'] - self.data['Profit/Loss'].shift(1)\n self.data['Gains%'] = self.data['Gains'] / self.data['Value'].shift(1) * 100", "def populate(self, myEmpireDict, mySystemDict):\n self.myEmpireDict = myEmpireDict\n self.mySystemDict = mySystemDict\n \n # disable buttons\n self.btnAddMarketOrder.disable()\n self.btnCancelOrder.disable()\n self.clearBidData()\n \n # load resources\n try:\n (myAvgAL, myAvgEC, myAvgIA) = self.getAvgMarketCosts()\n myMarketOrders = self.buildMarketOrdersData()\n except:\n # this allows for testing panel outside game\n (myAvgAL, myAvgEC, myAvgIA) = (10.10100, 20.10100, 30.123120)\n myMarketOrders = self.testDict\n \n # system Resources\n self.lblTotalAL.setText('%.1f' % myAvgAL)\n self.lblTotalEC.setText('%.1f' % myAvgEC)\n self.lblTotalIA.setText('%.1f' % myAvgIA)\n \n # trade route list\n self.populateListbox(self.lstOrders, myMarketOrders)", "def Update(self, prices):\n \n if self._bdaqmid != None:\n # check that we got new prices for this market this tick.\n if (self._bdaqmid in prices[const.BDAQID] and\n self._bfmid in prices[const.BFID]):\n self._bdaqsels = [prices[const.BDAQID][self._bdaqmid][i]\n for i in [s.id for s in self._bdaqsels]]\n self._bfsels = [prices[const.BFID][self._bfmid][i]\n for i in [s.id for s in self._bfsels]]\n\n # call the listener functions.\n self.UpdateViews()", "def __init__(self):\n\n super().__init__()\n\n self.rates = dict()\n self.currencies = list()\n self.getData() # Fetch the data from the csv online file\n\n # Initialization of the currencies choice dropdown boxes\n self.from_currency = QComboBox()\n self.from_currency.addItems(self.currencies)\n self.to_currency = QComboBox()\n self.to_currency.addItems(self.currencies)\n\n self.from_amount = QDoubleSpinBox()\n self.from_amount.setRange(0.01, 10000000.00)\n self.from_amount.setValue(1.00)\n self.to_amount = QLabel('1.00')\n self.from_currency_label = QLabel('From Currency:')\n self.to_currency_label = QLabel('To Currency:')\n self.from_amount_label = QLabel('Amount to convert:')\n self.to_amount_label = QLabel('Result of conversion based on most recent rates:')\n\n self.from_calendar = QCalendarWidget()\n self.to_calendar = QCalendarWidget()\n self.rates_plot = pg.PlotWidget()\n self.from_date = QDate()\n self.to_date = QDate()\n self.last_clicked = \"\"\n\n hint_font = QFont()\n hint_font.setItalic(True)\n self.graph_hint = QLabel('Hint: you can interact with the graph using your mouse')\n self.graph_hint.setFont(hint_font)\n\n\n self.initUI()", "def refreshCredit(self):\n self.mainmenu.updateCR()", "def reqData(self):\r\n #self.reqGlobalCancel()\r\n #self.add_historical(\"Stock('TSLA', 'SMART', 'USD')\")\r\n #self.add_historical(\"Stock('IBM', 'SMART', 'USD')\")\r\n #self.add_historical(\"Stock('MSFT', 'SMART', 'USD')\")\r\n self.add_historical(\"Stock('FB', 'SMART', 'USD')\")", "def run(self, max_risk=0, min_return=0, num=0, init_holdings=None):\n if not self.dates:\n self.dates = ['2010-01-01', '2012-12-31']\n self.load_data()\n\n num_months = len(self.df_all)\n first_purchase = True\n result = {}\n baseline_result = {}\n self.baseline_values = [0]\n self.update_values = [0]\n months = []\n\n # Define dataframe to save output data \n headers = ['Date', 'Value'] + self.stocks + ['Variance', 'Returns']\n self.opt_results_df = pd.DataFrame(columns=headers)\n row = []\n\n self.price_df = pd.DataFrame(columns=self.stocks)\n\n # Initialize the plot\n plt.ylim(ymax = 1.5*self.budget, ymin = -1.5*self.budget)\n plt.xticks(list(range(0, num_months, 2)), \n self.df_baseline.index.strftime('%b')[::2], rotation='vertical')\n plt.locator_params(axis='x', nbins=num_months/2)\n plt.plot(list(range(0, num_months)), [0]*(num_months), \n color='red', label=\"Break-even\", linewidth=0.5)\n\n for i in range(3, num_months):\n\n # Look at just the data up to the current month\n df = self.df_all.iloc[0:i+1,:].copy()\n baseline_df_current = self.df_baseline.iloc[0:i+1,:]\n print(\"\\nDate:\", df.last_valid_index())\n months.append(df.last_valid_index().date()) \n\n if first_purchase:\n budget = self.budget\n initial_budget = self.budget\n baseline_shares = (budget / baseline_df_current.iloc[-1])\n baseline_result = {self.baseline[0]: baseline_shares} \n else:\n # Compute profit of current portfolio\n budget = sum([df.iloc[-1][s]*result['stocks'][s] for s in self.stocks]) \n self.update_values.append(budget - initial_budget)\n\n # Compute profit of fund portfolio\n fund_value = sum([baseline_df_current.iloc[-1][s]*baseline_result[s] \n for s in self.baseline]) \n self.baseline_values.append(fund_value - initial_budget)\n\n self.budget = budget \n\n self.load_data(df=df)\n\n self.price_df.loc[i-2] = list(self.price.values)\n\n # Output for user on command-line and plot\n update_values = np.array(self.update_values, dtype=object)\n baseline_values = np.array(self.baseline_values, dtype=object)\n plt.plot(range(3, i+1), update_values, \n color='blue', label=\"Optimized portfolio\")\n plt.plot(range(3, i+1), baseline_values, \n color='gray', label=\"Fund portfolio\", linewidth=0.5)\n \n if first_purchase:\n plt.legend(loc=\"lower left\")\n plt.title(\"Start: {start}, End: {end}\".format\\\n (start=self.df_all.first_valid_index().date(), \n end=self.df_all.last_valid_index().date()))\n\n plt.savefig(\"portfolio.png\")\n plt.pause(0.05)\n \n # Making solve run\n if self.model_type == 'DQM':\n print(f\"\\nMulti-Period DQM Run...\")\n \n self.build_dqm()\n self.solution['DQM'] = self.solve_dqm()\n result = self.solution['DQM']\n else:\n print(f\"\\nMulti-Period CQM Run...\")\n\n # Set budget to 0 to enforce that portfolio is self-financing \n if self.t_cost and not first_purchase:\n self.budget = 0 \n\n self.solution['CQM'] = self.solve_cqm(max_risk=max_risk, \n min_return=min_return,\n init_holdings=init_holdings)\n result = self.solution['CQM']\n init_holdings = result['stocks']\n\n # Print results to command-line\n value = sum([self.price[s]*result['stocks'][s] for s in self.stocks])\n returns = result['return']\n variance = result['risk'] \n\n row = [months[-1].strftime('%Y-%m-%d'), value] + \\\n [result['stocks'][s] for s in self.stocks] + \\\n [variance, returns] \n self.opt_results_df.loc[i-2] = row \n \n first_purchase = False\n\n print(self.opt_results_df)\n print(f'\\nRun completed.\\n')\n\n plt.savefig(\"portfolio.png\")\n plt.show(block=False)", "def Update(self):\n print(f\"Updating {self.name} from yfinance API...\")\n import yfinance as yf\n import datetime\n stock = yf.Ticker(self._symbol)\n if (self.name == None or self.name == self.symbol) and stock.info is not None:\n if \"shortName\" in stock.info:\n self.name = stock.info['shortName']\n yhistory = stock.history(period=\"max\")\n print(yhistory)\n\n dividends = []\n for date, row in yhistory.iterrows():\n dividend_today = row['Dividends']\n dividends.append((date, dividend_today))\n if dividend_today != 0.:\n while date - dividends[0][0] > datetime.timedelta(days=360):\n dividends.remove(dividends[0])\n else:\n while date - dividends[0][0] > datetime.timedelta(days=370):\n dividends.remove(dividends[0])\n\n annualDividend = 0.\n for dividend in dividends:\n annualDividend += dividend[1]\n \n self.AddSnapshot(price=row['Open'], date=date, dividend=dividend_today, annualDividend=annualDividend)\n #self.AddSnapshot(price=row['Close'], date=date, annualDividend=annualDividend)\n\n try:\n self.short_percent_of_float = stock.info['shortPercentOfFloat']\n except(KeyError):\n self.short_percent_of_float = 0.\n try:\n self.pe_ratio = stock.info['forwardPE']\n except(KeyError, TypeError):\n self.pe_ratio = float('inf')\n\n print(f\"History for {self.name} updated.\")", "def update(self):\n\n self.stats = statistics.get()\n self.ticker = exchangerates.get_ticker()", "def get_prices(self):\n pass", "def update_price_signals(self, monthly_data, time_series_data):\n if self.combined_market:\n try:\n fr_price = time_series_data.loc[:, 'FR Price ($/kW)']\n except KeyError:\n pass\n else:\n self.p_regu = np.divide(fr_price, 2)\n self.p_regd = np.divide(fr_price, 2)\n\n try:\n self.price = time_series_data.loc[:, 'DA Price ($/kWh)']\n except KeyError:\n pass\n else:\n try:\n self.p_regu = time_series_data.loc[:, 'Reg Up Price ($/kW)']\n except KeyError:\n pass\n\n try:\n self.p_regd = time_series_data.loc[:, 'Reg Down Price ($/kW)']\n except KeyError:\n pass\n\n try:\n self.price = time_series_data.loc[:, 'DA Price ($/kWh)']\n except KeyError:\n pass", "def _get_book_prices(self):\n for k in self.orders.keys():\n if self.orders[k].type == 'ask':\n self.ask_prices.append(self.orders[k].price)\n self.ask_snapshot[k] = self.orders[k]\n elif self.orders[k].type == 'bid':\n self.bid_prices.append(self.orders[k].price)\n self.bid_snapshot[k] = self.orders[k]\n # Sorting and removing dubbing\n self.ask_prices = list(dict.fromkeys(sorted(self.ask_prices)))\n self.bid_prices = list(dict.fromkeys(sorted(self.bid_prices, reverse=True)))", "def initialize(context):\n context.stocks = {symbol(\"TMF\"): 0.2, symbol(\"UJB\"): 0.2, symbol(\"TQQQ\"): 0.6}\n\n context.target_leverage = 1\n\n schedule_function(\n rebalance, date_rules.every_day(), time_rules.market_open(minutes=11)\n )", "def update_book(self):\n while self.lowest_sell is not None and self.highest_buy is not None and self.lowest_sell <= self.highest_buy:\n sell = self.sell_levels[self.lowest_sell].head_order\n buy = self.buy_levels[self.highest_buy].head_order\n self.execute_trade(sell, buy)", "def prices_available(self, prices_available):\n\n self._prices_available = prices_available", "def populate_price_algorithm_information(self, user_price_scale, user_max_price, user_desired_price,\n user_min_price):\n self.max_price = user_max_price\n self.min_price = user_min_price\n self.desired_price = user_desired_price\n self.price_user_scale_factor = user_price_scale" ]
[ "0.60714465", "0.5873897", "0.5567629", "0.5556391", "0.5556155", "0.54767716", "0.54149306", "0.5359882", "0.5354679", "0.5351185", "0.5349947", "0.5340057", "0.5323976", "0.52928245", "0.5278397", "0.52684623", "0.52457297", "0.5233734", "0.522753", "0.5223621", "0.5218184", "0.52028084", "0.5183737", "0.5180919", "0.5161882", "0.51478076", "0.51352644", "0.5131186", "0.5121903", "0.51101285" ]
0.6407883
0
Check if a test value is within permissive relative difference from refval. Returns a boolean.
def _isInAllowedRange( self, testval, refval, reltol=1.e-2 ): denom = refval if refval == 0: if testval == 0: return True else: denom = testval rdiff = (testval-refval)/denom del denom,testval,refval return (abs(rdiff) <= reltol)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def within_value(v1, v2):\n percentage = 0.1\n error_allowed = percentage * v1\n high = v1 + error_allowed\n low = v1 - error_allowed\n\n return low <= v2 <= high", "def sanity_check(self):\n res = True\n res = res and self.detected\n res = res and np.sum(self.diffs) < 30000 # experimental value\n return res", "def fp_eq(x: float, y: float) -> bool:\n return fabs(x-y) < 10**-12", "def _check_within_tolerance(value, tolerance):\n return tf.norm(tensor=value, ord=np.inf) <= tolerance", "def check_compare(change, reference_value):\n rounded_change = round(change, 2)\n compare_values(reference_value, rounded_change)", "def is_almost_active(self,\n env\n ):\n if not hasattr(self, \"tolerance\") or self.tolerance is None:\n return False\n c_value = self.get_value(env)\n flag = np.any(np.greater(c_value + self.tolerance, 0.))\n return bool(flag)", "def check_initial_confidence(self): # pragma: no cover\n if self.test_type != 'perf':\n return True\n\n if self.required_initial_confidence is None:\n return True # pragma: no cover\n\n # TODO(robertocn): Remove all uses of \"confidence\".\n if self.dummy_initial_confidence is not None:\n self.initial_confidence = float(\n self.dummy_initial_confidence)\n if (float(self.initial_confidence) <\n float(self.required_initial_confidence)):\n self._set_insufficient_confidence_warning()\n return False\n return True\n\n if self.dummy_builds:\n dummy_result = self.good_rev.values != self.bad_rev.values\n if not dummy_result:\n self._set_insufficient_confidence_warning()\n return dummy_result\n\n with self.api.m.step.nest('Re-testing reference range'):\n expiration_time = time.time() + REGRESSION_CHECK_TIMEOUT\n while time.time() < expiration_time:\n if len(self.good_rev.values) >= 5 and len(self.bad_rev.values) >= 5:\n if self.significantly_different(self.good_rev.values,\n self.bad_rev.values):\n return True\n if len(self.good_rev.values) == len(self.bad_rev.values):\n revision_to_retest = self.last_tested_revision\n else:\n revision_to_retest = min(self.good_rev, self.bad_rev,\n key=lambda x: len(x.values))\n if len(revision_to_retest.values) < MAX_REQUIRED_SAMPLES:\n revision_to_retest.retest()\n else:\n break\n self._set_insufficient_confidence_warning()\n return False", "def checkFloat(comment, value, expected, tol=1e-10, update=True):\n if np.isnan(value) and np.isnan(expected):\n res = True\n elif np.isnan(value) or np.isnan(expected):\n res = False\n else:\n res = abs(value - expected) <= tol\n if update:\n if not res:\n print(\"checking float\",comment,'|',value,\"!=\",expected)\n results[\"fail\"] += 1\n else:\n results[\"pass\"] += 1\n return res", "def check_performance(self):\n self.lg.debug('Checking performance.')\n avg_up = (sum(self.results_up)) / len(self.results_up)\n avg_down = (sum(self.results_down)) / len(self.results_down)\n if (\n avg_up < self.tolerance * self.up or\n avg_down < self.tolerance * self.down\n ):\n self.bad_performance = True\n else:\n self.bad_performance = False", "def compare2float_relative(x_base, y_check, relative_error):\n value_x = float(x_base)\n value_y = float(y_check)\n return ((abs(value_x - value_y)) / (abs(value_x))) <= relative_error", "def _check_approx_fixed_point(V_current, V_previous, tol):\n\n # Compute the sup norm between `V_current` and `V_previous`\n sup_norm = np.max(np.abs(V_current - V_previous))\n\n # Algorithm termination condition\n fp = sup_norm <= tol\n\n return fp, sup_norm", "def validate(self, value: Union[int, float]) -> bool:\n if self.left_boundary['open']:\n if self.left_boundary['value'] >= value:\n return False\n else:\n if self.left_boundary['value'] > value:\n return False\n if self.right_boundary['open']:\n if value >= self.right_boundary['value']:\n return False\n else:\n if value > self.right_boundary['value']:\n return False\n return True", "def checkSanity(self, valuePreviouslySet):\n firstGet = self._pfwClient.get(self._paramPath)\n\n try:\n returnValue = Decimal(firstGet)\n except ValueError:\n print(\"ERROR: Can't convert %s to a decimal\" % firstGet)\n return firstGet, False\n\n upperAllowedValue = Decimal(valuePreviouslySet) + (Decimal(self._quantum) / Decimal(2))\n lowerAllowedValue = Decimal(valuePreviouslySet) - (Decimal(self._quantum) / Decimal(2))\n\n if not (lowerAllowedValue <= returnValue <= upperAllowedValue):\n print('%s <= %s <= %s is not true' %\n (lowerAllowedValue, returnValue, upperAllowedValue))\n return firstGet, False\n\n return firstGet, True", "def supports(self, x):\n return 0.0 < x", "def supports(self, x):\n return 0.0 < x", "def checkPointInLampsReach(self, p):\n v1 = XYPoint(self.Lime.x - self.Red.x, self.Lime.y - self.Red.y)\n v2 = XYPoint(self.Blue.x - self.Red.x, self.Blue.y - self.Red.y)\n\n q = XYPoint(p.x - self.Red.x, p.y - self.Red.y)\n s = self.crossProduct(q, v2) / self.crossProduct(v1, v2)\n t = self.crossProduct(v1, q) / self.crossProduct(v1, v2)\n\n return (s >= 0.0) and (t >= 0.0) and (s + t <= 1.0)", "def ok(self, point):\n [x1, x2, x3, x4, x5, x6] = point.decisions\n if x1 + x2 -2 < 0:\n return False\n if 6 - x1 - x2 < 0:\n return False\n if 2 - x2 + x1 < 0:\n return False\n if 2 - x1 + 3*x2 < 0:\n return False\n if 4 - (x3 - 3)**2 - x4 < 0:\n return False\n if (x5 - 3)**3 + x6 - 4 < 0:\n return False\n for i, d in enumerate(point.decisions):\n if d < self.decisions[i].low or d > self.decisions[i].high:\n print i, d, self.decisions[i].low, self.decisions[i].high\n return False\n return True", "def verify(self, parameter_map):\n first_contextual_note = parameter_map[self.note_one]\n second_contextual_note = parameter_map[self.note_two]\n if first_contextual_note.note is None or second_contextual_note.note is None:\n return False\n\n diff = second_contextual_note.note.diatonic_pitch.chromatic_distance - \\\n first_contextual_note.note.diatonic_pitch.chromatic_distance\n total_distance = self.up_interval.chromatic_distance + self.down_interval.chromatic_distance\n return abs(diff) <= total_distance", "def close_to_exceeding(self) -> bool:\n mean = self.current / self.num_cuts\n if self.max_frames is not None:\n return self.current + mean > self.max_frames\n if self.max_samples is not None:\n return self.current + mean > self.max_samples\n if self.max_duration is not None:\n return self.current + mean > self.max_duration\n return False", "def is_valid(self, value: Union[float, int]) -> bool:\n if self.min is not None:\n if self.include_min:\n if value < self.min:\n return False\n else:\n if value <= self.min:\n return False\n\n if self.max is not None:\n if self.include_max:\n if value > self.max:\n return False\n else:\n if value >= self.max:\n return False\n\n if self.step is None:\n return True\n\n if self.min is not None:\n value -= self.min\n return (value % self.step) == 0", "def isGE(self, a : float, b : float) -> bool:\n return (a >= b - self.tol * max(abs(a), abs(b), 1.0)) #and (a >= b - 0.1)", "def test_if_between(a, b, test_val):\n if a < b:\n return a <= test_val <= b\n else:\n return b <= test_val <= a", "def withinPercent(val1, val2, percent = 1.):\n if (val1 == np.nan) | (val2 == np.nan) :\n print(\"One of your values is NOT A NUMBER\")\n lowval = np.min(np.array([val1, val2]))\n meanval = np.mean(np.array([val1, val2]))\n absDif = np.abs(np.subtract(val1, val2))\n percentDif = np.abs(100* (absDif/lowval))\n within_percent_bool = percentDif <= percent\n return within_percent_bool, percentDif", "def is_violated(self,\n env\n ):\n c_value = self.get_value(env)\n flag = np.any(np.greater(c_value, 0.))\n return bool(flag)", "def evaluate_stopping_condition(self, current_value: Union[float, int, np.float64, np.ndarray]):\n\n if self.__reference_value is not None:\n\n if type(current_value) in [float, int, np.float64]:\n if not self.__smaller_value_required:\n if not self.__equal_required:\n return current_value > self.__reference_value\n else:\n return current_value >= self.__reference_value\n else:\n if not self.__equal_required:\n return current_value < self.__reference_value\n else:\n return current_value <= self.__reference_value\n\n elif type(current_value) == np.ndarray:\n if not self.__smaller_value_required:\n if not self.__equal_required:\n return (current_value > self.__reference_value).all()\n else:\n return (current_value >= self.__reference_value).all()\n else:\n if not self.__equal_required:\n return (current_value < self.__reference_value).all()\n else:\n return (current_value <= self.__reference_value).all()\n\n else:\n raise NotImplementedError\n\n else:\n return False", "def __call__(self): # run test\n\n try: # Check if any errors were raised during calling of self.func\n return abs(self.func(*self.args, **self.kwargs) - self.res) < self._tolerance\n\n except IndexError:\n return False", "def within_delta(dt1, dt2, delta):\n delta = abs(delta)\n difference = dt1 - dt2\n return -delta <= difference <= delta", "def arecloseenough(x1, x2):\n\n if abs(x1 - x2) <= VERYSMALL:\n return True\n \n return False", "def assertAlmostEqualAbsolute(self, value: float, ref_value: float):\n delta = TestGuesses.__tolerance_percent__ * np.abs(ref_value)\n self.assertAlmostEqual(value, ref_value, delta=delta)", "def deviation_ok(norm, value, epsilon):\n deviation = abs(norm-value)/norm\n # print(abs(d-epsilon))\n return deviation <= epsilon" ]
[ "0.6399123", "0.62219507", "0.60484093", "0.60059714", "0.59347", "0.59186506", "0.590363", "0.5893858", "0.58391315", "0.58225876", "0.58073807", "0.5764574", "0.5754584", "0.5754552", "0.5754552", "0.5742861", "0.5693243", "0.56759703", "0.5661897", "0.5655538", "0.5655186", "0.5647076", "0.56451255", "0.56436485", "0.5626147", "0.5589876", "0.55692273", "0.55685395", "0.5552875", "0.55327076" ]
0.76101834
0
Convert input to a list If input is None, this method simply returns None.
def _to_list( self, input ): import numpy listtypes = (list, tuple, numpy.ndarray) if input == None: return None elif type(input) in listtypes: return list(input) else: return [input]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _convert_to_list(self, input_argument):\n if type(input_argument) is not list:\n input_argument = [input_argument]\n return input_argument", "def _MakeList(input):\n if len(input) == 0:\n raise ValueError(\n 'input cannot be empty.')\n elif len(input) == 1:\n output = input[0]\n if not isinstance(output, list):\n output = [output]\n else:\n output = list(input)\n return output", "def str2list(input):\n if isinstance(input, str):\n return [input]\n\n else:\n return input", "def convert_to_list(item):\n return item if item is None or isinstance(item, list) else [item]", "def makelist(input):\n if isinstance(input, list) or isinstance(input, np.array):\n output = input\n else:\n output = [input]\n return output", "def tolist(x):\n return x if isinstance(x, list) else [x]", "def to_list(x):\n if isinstance(x, list):\n return x\n return [x]", "def default_to_list(value):\n if not isinstance(value, list) and value is not None:\n value = [value]\n elif value is None:\n value = []\n\n return value", "def ensure_list(self, x):\n return x if isinstance(x, list) else [x]", "def as_list(self):\n return self._flattened_inputs", "def coerce_to_list(obj) -> list:\n if obj is None:\n return []\n elif isinstance(obj, list):\n return obj\n else:\n return [obj]", "def _inputs_to_list(self, inputs: InputsType) -> list:\n if isinstance(inputs, str):\n backend = get_file_backend(inputs)\n if hasattr(backend, 'isdir') and isdir(inputs):\n # Backends like HttpsBackend do not implement `isdir`, so only\n # those backends that implement `isdir` could accept the inputs\n # as a directory\n filename_list = list_dir_or_file(inputs, list_dir=False)\n inputs = [\n join_path(inputs, filename) for filename in filename_list\n ]\n\n if not isinstance(inputs, (list, tuple)):\n inputs = [inputs]\n\n return list(inputs)", "def _init_as_list(arg):\n if arg is None:\n return []\n elif isinstance(arg, str):\n return [arg]\n else:\n return arg", "def _as_list(value):\n if not isinstance(value, list):\n value = [value]\n return value", "def _to_list(obj):\n if not isinstance(obj, list):\n return [obj]\n else:\n return obj", "def as_list(arg):\n if _is_list(arg):\n return arg\n return [arg]", "def controlled_list(input_list):\n output_list = input_list\n\n if not isinstance(input_list, list):\n\n dummy_list = []\n dummy_list.append(input_list)\n output_list = dummy_list\n print('Converting')\n print('Before return')\n print(output_list)\n return output_list", "def aslist(something):\n return something if isinstance(something, list) else [something]", "def to_list(x):\n import collections\n if not isinstance(x, collections.Iterable) or isinstance(x, str):\n x = [x]\n return x", "def safelist(listable):\n if type(listable) == str:\n return [listable]\n else:\n return listable.tolist()", "def _param_to_list(param: OptionalConfigUnitList) -> List[\"ConfigUnit\"]:\n if param is None:\n return []\n if isinstance(param, list):\n return param.copy()\n return [param]", "def convert_to_list(element):\r\n return list(element)", "def to_list(value):\n ret_val = copy.deepcopy(value)\n whitelist = (list, tuple)\n if ret_val is not None:\n ret_val = list(ret_val) if isinstance(ret_val, whitelist) else [value]\n return ret_val", "def to_list(value):\n if hasattr(value, '__iter__') and not isinstance(value, str):\n return list(value)\n return [value]", "def cast_to_list(value):\n if isinstance(value, str):\n value = cast_to_jdict(value)\n assert isinstance(value, list)\n return value\n elif hasattr(value, 'tolist'): # meant for numpy arrays\n # what other potential attributes to check for?\n return value.tolist()\n else:\n return list(\n value\n ) # will work with set, tuple, and other iterables (not recursively though: just level 0)", "def list_cast(inputs, dst_type):\n return iter_cast(inputs, dst_type, return_type=list)", "def tolist(self) -> List[T]:\n if isinstance(self.array, list):\n return self.array\n return list(self.array)", "def _input_lyrs(self):\n input_lyrs = None\n if hasattr(self, 'input_lyrs'):\n _input_lyrs = self.input_lyrs\n if isinstance(_input_lyrs, list) and len(_input_lyrs) == 1:\n input_lyrs = _input_lyrs[0]\n elif _input_lyrs.__class__.__name__ == \"ListWrapper\" and len(_input_lyrs) == 1:\n input_lyrs = _input_lyrs[0]\n else:\n input_lyrs = _input_lyrs\n\n return input_lyrs", "def force_list(object):\n try:\n return list(object)\n except TypeError:\n return [object]", "def to_python(self, value):\n # Return an empty list if no input was given.\n if not value:\n return []\n return value.split(',')" ]
[ "0.7752635", "0.73784405", "0.7284458", "0.7166048", "0.7078194", "0.7020485", "0.6989503", "0.689384", "0.6810329", "0.676979", "0.67141014", "0.6616923", "0.6545607", "0.6530351", "0.6525318", "0.6469693", "0.64340585", "0.6415135", "0.6366174", "0.632168", "0.62872195", "0.62218064", "0.61655223", "0.6125493", "0.61247873", "0.60765207", "0.6064188", "0.6056793", "0.60491544", "0.6046471" ]
0.89297014
0
check if FFT used in sinusoidal baselining properly handles flag info checking is done by comparing the baseline fitting results from two input data, defined as 'infile_spk' and 'infile_int'. 'infile_spk' has six spiky features in its spectra at ch 2,22,42,62,82,and 97 and channels around these spikes (namely, 04,2024,4044,6064,8084, and 9599) are flagged, while the other one 'infile_int' has
def testFlagFFT(self): mode = "list" infile_spk = self.infile_02spk outfile_spk = self.outroot+"_flagFFT_spk.asap" result = sdbaseline(infile=infile_spk,maskmode=mode,outfile=outfile_spk,blfunc='sinusoid',fftthresh='top3') infile_int = self.infile_02int outfile_int = self.outroot+"_flagFFT_int.asap" result = sdbaseline(infile=infile_int,maskmode=mode,outfile=outfile_int,blfunc='sinusoid',fftthresh='top3') bsuffix = "_blparam.txt" self._compareCoefficients(outfile_spk+bsuffix, outfile_int+bsuffix)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def processFile(filename,length = 256,q=1,fs_in=8000,divide=4,plot=False):\n length = length*divide\n #fs = sample rate, sound = multichannel sound signal\n try:\n fs1, sound = wavfile.read(filename)\n except ValueError:\n print(str(filename) + ' failed to process')\n return 'failed'\n if fs1 != fs_in:\n raise ValueError('Sampling rate should be ' + str(fs_in) + ' for: ' + filename)\n sig1 = sound[:0] #left channel\n pre_emphasis = 0.97\n sig1 = np.append(sig1[0], sig1[1:] - pre_emphasis * sig1[:-1])\n\n \n fs2, sig2 = downsample(sig1,fs1,q)\n N2 = len(sig2)\n sig3 = sig2[N2//2-length:N2//2+length]\n #print(len(sig3))\n\n FFT = abs(scipy.fft(sig3))\n FFT_side = FFT[range(len(FFT)//2)]\n #freqs = scipy.fftpack.fftfreq(sig3.size, 1/fs2)\n #plt.plot(freqs,FFT)\n if len(FFT_side) != length:\n print('ERROR MESSAGE DETAILS')\n print('filename: ' + filename)\n print('length = ' + str(length))\n print('fs_in = ' + str(fs_in))\n print('q = ' + str(q))\n print('divide = ' + str(divide))\n total_time = len(sig1)/fs1\n print('total_time = ' + str(total_time))\n print('Please check: length < total_time*fs//(2*q)')\n print('Check: ' + str(length) + ' < ' + str(total_time*fs1//(2*q)))\n raise ValueError('Length FFT_side != length: ' + str(len(FFT_side)) + ' != ' + str(length))\n \n \n FFT_log = []\n # normalize FFT\n for value in FFT_side:\n value = np.log(value)\n FFT_log.append(value)\n max_val = getMax(FFT_log)[1]\n FFT_norm = []\n for value in FFT_log:\n FFT_norm.append(value/max_val)\n \n \n FFT_side = np.array(FFT_norm)\n FFT_divided = FFT_side[range(length//divide)]\n #plot = True\n if plot == True:\n freqs = scipy.fftpack.fftfreq(sig3.size, 1/fs2)\n freqs_divided = np.array(freqs[range(len(FFT_divided))])\n plt.plot(freqs_divided,FFT_divided) # plotting the complete fft spectrum\n plt.show()\n \n return FFT_divided", "def _cutoff(xdata, ydata, btype, fs, ff):\r\n try:\r\n# print ff\r\n if ff != None:\r\n nPts = int(1./(((xdata.max()-xdata.min())/xdata.shape[0])*(ff/10.)))\r\n else:\r\n nPts = 0\r\n if nPts%2 == 0:\r\n nPts = nPts + 1\r\n if nPts < xdata.shape[0]:\r\n nPts = xdata.shape[0]\r\n# print nPts\r\n window = np.hanning(ydata.shape[0])\r\n freq = FourierFrequency(xdata, nPts)\r\n index = np.argsort(freq)\r\n tdf = FourierTransform(ydata*window, nPts)\r\n tdf = abs(tdf)\r\n pp = _maxima(tdf[index], freq[index], lookahead = 1)\r\n# mm = _minima(tdf[index], freq[index], lookahead=1)\r\n pp, hh = np.array(np.array(pp).T[0]), np.array(np.array(pp).T[1])\r\n# mm = np.array(np.array(mm).T[0])#, np.array(np.array(mm).T[1])\r\n ind = np.where(pp == min(abs(pp)))[0][0]\r\n ind2 = np.where(hh == max(hh[(ind+1):]))[0][0]\r\n for u, i in enumerate(freq):\r\n if i > abs(pp[ind2])*1.5 or i < -abs(pp[ind2])*1.5 or (i < abs(pp[ind2])/2. and i > -abs(pp[ind2])/2.) or (tdf[u] > hh[ind2]*1.05): #(abs(i) < abs(mm[indmin])) or \r\n tdf[u] = 0.\r\n def lor2(x, A0, x0, gamma0):\r\n return A0*(1/np.pi)*(gamma0/2)/((x-x0)**2+(gamma0/2)**2)+A0*(1/np.pi)*(gamma0/2)/((x+x0)**2+(gamma0/2)**2)\r\n lmod2 = lmf.Model(lor2)\r\n lmod2.make_params()\r\n lmod2.set_param_hint('A0', value=max(tdf), min=max(tdf)/1000.)\r\n lmod2.set_param_hint('x0', value=abs(pp[ind2]), min=0.)\r\n lmod2.set_param_hint('gamma0', value=1., min=0.)\r\n result2 = lmod2.fit(tdf[index], x=freq[index])\r\n# print result2.values.get('x0'), result2.values.get('gamma0')\r\n if btype=='high':\r\n if result2.values.get('x0')-result2.values.get('gamma0') > 0.:\r\n# print \"frequency: \", result2.values.get('x0')-result2.values.get('gamma0')\r\n if hh[ind2] != max(hh[(ind+1):]):\r\n print \"False\", \" maximum\", \"\\n\", \"\\n\", \"\\n\"\r\n return result2.values.get('x0')-result2.values.get('gamma0')\r\n else:\r\n# print \"failed: 0\"\r\n return 0.\r\n elif btype=='low':\r\n return result2.values.get('x0')+result2.values.get('gamma0')\r\n except Exception:\r\n pass\r\n finally:\r\n pass", "def analyze_data(inputFileList, **kwargs):\n OBSKEY = 'OBSTYPE'\n MTKEY = 'MTFLAG'\n SCNKEY = 'SCAN_TYP'\n FILKEY = 'FILTER'\n FILKEY1 = 'FILTER1'\n FILKEY2 = 'FILTER2'\n APKEY = 'APERTURE'\n TARKEY = 'TARGNAME'\n EXPKEY = 'EXPTIME'\n FGSKEY = 'FGSLOCK'\n CHINKEY = 'CHINJECT'\n\n acsFiltNameList = [FILKEY1, FILKEY2]\n\n catalog = None # Astrometric catalog used for alignment\n catalogSources = 0 # Number of astrometric catalog sources determined based upon coordinate overlap with image WCS\n foundSources = 0 # Number of sources detected in images\n matchSources = 0 # Number of sources cross matched between astrometric catalog and detected in image\n rms_x = -1.0\n rms_y = -1.0\n rms_ra = -1.0\n rms_dec = -1.0\n chisq_x = -1.0\n chisq_y = -1.0\n completed = False # If true, there was no exception and the processing completed all logic\n dateObs = None # Human readable date\n mjdutc = -1.0 # MJD UTC start of exposure\n fgslock = None\n processMsg = None\n status = 9999\n compromised = 0\n headerletFile = None\n\n fit_rms = -1.0\n total_rms = -1.0\n datasetKey = -1.0\n\n namesArray = ('imageName', 'instrument', 'detector', 'filter', 'aperture', 'obstype',\n 'subarray', 'dateObs', 'mjdutc', 'doProcess', 'processMsg', 'catalog', 'foundSources',\n 'catalogSources','matchSources', 'rms_x', 'rms_y', 'rms_ra', 'rms_dec', 'completed',\n 'fit_rms', 'total_rms', 'datasetKey', 'status', 'headerletFile')\n dataType = ('S20', 'S20', 'S20', 'S20', 'S20', 'S20', 'b', 'S20', 'f8', 'b', 'S30',\n 'S20', 'i4', 'i4', 'i4', 'f8', 'f8', 'f8', 'f8', 'b', 'f8', 'f8', 'i8', 'i4', 'S30')\n\n # Create an astropy table\n outputTable = Table(names=namesArray,dtype=dataType)\n\n # Loop over the list of images to determine viability for alignment processing\n #\n # Capture the data characteristics before any evaluation so the information is\n # available for the output table regardless of which keyword is used to \n # to determine the data is not viable for alignment.\n\n for inputFile in inputFileList:\n\n header_hdu = 0\n header_data = getheader(inputFile, header_hdu)\n\n # Keywords to use potentially for downstream analysis\n instrume = (header_data['INSTRUME']).upper()\n detector = (header_data['DETECTOR']).upper()\n subarray = header_data['SUBARRAY']\n dateObs = header_data['DATE-OBS']\n mjdutc = header_data['EXPSTART']\n\n # Obtain keyword values for analysis of viability\n obstype = (header_data[OBSKEY]).upper()\n mtflag = (header_data[MTKEY]).upper()\n \n scan_typ = ''\n if instrume == 'WFC3':\n scan_typ = (header_data[SCNKEY]).upper()\n\n sfilter = ''\n if instrume == 'WFC3':\n sfilter = (header_data[FILKEY]).upper()\n # Concatenate the two ACS filter names together with an underscore\n # If the filter name is blank, skip it\n if instrume == 'ACS':\n for filtname in acsFiltNameList:\n\n # The filter keyword value could be zero or more blank spaces \n # Strip off any leading or trailing blanks\n if len(header_data[filtname].upper().strip()) > 0:\n\n # If the current filter variable already has some content,\n # need to append an underscore before adding more text\n if len(sfilter) > 0:\n sfilter += '_'\n sfilter += header_data[filtname].upper().strip()\n\n aperture = (header_data[APKEY]).upper()\n targname = (header_data[TARKEY]).upper()\n exptime = header_data[EXPKEY]\n fgslock = (header_data[FGSKEY]).upper()\n\n chinject = 'NONE'\n if instrume == 'WFC3' and detector == 'UVIS':\n chinject = (header_data[CHINKEY]).upper()\n\n # Determine if the image has one of these conditions. The routine\n # will exit processing upon the first satisfied condition.\n\n noProcKey = None\n noProcValue = None\n doProcess = True\n # Imaging vs spectroscopic or coronagraphic\n if obstype != 'IMAGING':\n noProcKey = OBSKEY\n noProcValue = obstype \n\n # Moving target\n elif mtflag == 'T':\n noProcKey = MTKEY\n noProcValue = mtflag \n\n # Bostrophidon without or with dwell (WFC3 only)\n elif any ([scan_typ == 'C', scan_typ == 'D']):\n noProcKey = SCNKEY\n noProcValue = scan_typ\n\n # Filter which does not begin with: 'F'(F###), 'C'(CLEAR), 'N'(N/A), and is not blank\n # The sfilter variable may be the concatenation of two filters (F160_CLEAR)\n elif sfilter[0] != 'F' and sfilter[0] != '' and sfilter[0] != 'C' and sfilter[0] != 'N': \n noProcKey = FILKEY\n noProcValue = sfilter\n\n elif '_' in sfilter:\n pos = sfilter.index('_')\n pos += 1\n\n if sfilter[pos] != 'F' and sfilter[pos] != '' and sfilter[pos] != 'C' and sfilter[pos] != 'N': \n noProcKey = FILKEY\n noProcValue = sfilter\n\n # Ramp, polarizer, grism, or prism \n elif any (x in aperture for x in ['RAMP', 'POL', 'GRISM', '-REF', 'PRISM']):\n noProcKey = APKEY\n noProcValue = aperture \n\n # Calibration target\n elif any (x in targname for x in ['DARK', 'TUNG', 'BIAS', 'FLAT', 'DEUT', 'EARTH-CAL']):\n noProcKey = TARKEY\n noProcValue = targname\n\n # Exposure time of effectively zero\n elif math.isclose(exptime, 0.0, abs_tol=1e-5):\n noProcKey = EXPKEY\n noProcValue = exptime \n\n # Commanded FGS lock\n elif any (x in fgslock for x in ['GY', 'COARSE']):\n noProcKey = FGSKEY\n noProcValue = fgslock\n\n # Charge injection mode\n elif chinject != 'NONE':\n noProcKey = CHINKEY\n noProcValue = chinject\n\n # If noProcKey is set to a keyword, then this image has been found to not be viable for\n # alignment purposes.\n if (noProcKey is not None):\n if (noProcKey != FGSKEY):\n doProcess = False\n msgType = Messages.NOPROC.value\n else:\n msgType = Messages.WARN.value\n\n processMsg = noProcKey + '=' + str(noProcValue)\n\n # Issue message to log file for this data indicating no processing to be done or \n # processing should be allowed, but there may be some issue with the result (e.g., \n # GYROS mode so some drift)\n generate_msg(inputFile, msgType, noProcKey, noProcValue)\n\n # Populate a row of the table\n outputTable.add_row([inputFile, instrume, detector, sfilter, aperture, obstype,\n subarray, dateObs, mjdutc, doProcess, processMsg, catalog, \n foundSources, catalogSources, matchSources, rms_x, rms_y, \n rms_ra, rms_dec, completed, fit_rms, total_rms, datasetKey,\n status, headerletFile])\n #outputTable.pprint(max_width=-1)\n\n return(outputTable)", "def read_inversion_info(file_dic):\n #print_file_test = open('file_test.txt','w')\n\n if not ( check_inversion_files(file_dic) ):\n print 'error(read_inversion_info): problem with lenstool file names'\n return 0\n \n file_generate_arcs = file_dic['file_generate_arcs']\n info_input_lens = fc.extract_second_identifiers( file_generate_arcs, \\\n 'potential' )\n#-------------------------------------------------------------------------------\n\n file_source = file_dic['file_source']\n info_src = np.loadtxt(file_source, unpack=False)\n if len(info_src) == 8 and np.isscalar(info_src[0]):\n #FIXME - check if the second condition is all we need\n info_src = [info_src]\n#-------------------------------------------------------------------------------\n\n file_make_inversion = file_dic['file_make_inversion']\n info_fited_param = fc.extract_second_identifiers( file_make_inversion, \\\n 'limit' )\n info_forme = fc.extract_parameter(file_make_inversion, 'forme')[0][0]\n\n#-------------------------------------------------------------------------------\n\n file_best_fit = file_dic['file_best_fit']\n info_best_fit = fc.extract_second_identifiers( file_best_fit, \\\n 'potentiel' )\n\n info_xi2 = fc.extract_parameter(file_best_fit, '#Chi2pos:')\n\n#-------------------------------------------------------------------------------\n file_chires = file_dic['file_chires']\n\n info_chires = extract_parameter(file_chires, '0')\n rmss_mean = [0.0, 0.0]\n rmsi_mean = [0.0, 0.0]\n for i in info_chires:\n if i[0] != 'A':\n rmss_mean[0] = rmss_mean[0] + float(i[7])\n rmss_mean[1] = rmss_mean[1] + 1.0\n \n rmsi_mean[0] = rmsi_mean[0] + float(i[8])\n rmsi_mean[1] = rmsi_mean[1] + 1.0\n\n rmss_mean = rmss_mean[0]/rmss_mean[1]\n rmsi_mean = rmsi_mean[0]/rmsi_mean[1]\n#-------------------------------------------------------------------------------\n out_dict = { 'xi2' : float(info_xi2[0][0]), \\\n 'best_fit_lens' : info_best_fit, \\\n 'rmsi_mean' : rmsi_mean, \\\n 'rmss_mean' : rmss_mean, \\\n 'fited_parameters' : info_fited_param[0].keys(), \\\n 'input_lens' : info_input_lens[len(info_input_lens) - 1], \\\n 'forme' : info_forme \\\n }\n #for i in out_dict.keys():\n # print i, out_dict[i]\n return out_dict", "def fbank(signal,samplerate=16000,winlen=0.025,winstep=0.01,\n nfilt=26,nfft=512,lowfreq=0,highfreq=None,preemph=0.97): \n highfreq= highfreq or samplerate/2\n print \"preemph %s\"%(preemph)\n signal = sigproc.preemphasis(signal,preemph)\n frames = sigproc.framesig(signal, winlen*samplerate, winstep*samplerate)\n matchframes(frames[0], frames[1])\n pspec = sigproc.powspec(frames,nfft)\n energy = pylab.sum(pspec,1) # this stores the total energy in each frame\n energy = pylab.where(energy == 0, pylab.finfo(float).eps, energy) # if energy is zero, we get problems with log\n fb = get_filterbanks(nfilt, nfft, samplerate, lowfreq, highfreq)\n print \"len(fb) %s\"%(len(fb))\n colour = \"k-\"\n for i in range(len(fb)):\n if colour == \"k-\":\n colour = \"r-\"\n else:\n colour = \"k-\"\n startedplot = False\n midpoint = 0\n for j in range(len(fb[i])):\n if fb[i][j] > 0:\n if startedplot == False:\n startedplot = j\n if j > 0:\n pylab.plot([j-1, j], [fb[i][j-1], fb[i][j]], colour)\n if fb[i][j] == 1.0:\n midpoint = j\n else:\n if not startedplot == False:\n pylab.plot([j-1, j], [fb[i][j-1], 0], colour)\n try:\n print \"slope to midpoint %.3f, slope from midpoint %.3f\"%(1.0/float(midpoint-startedplot), 1.0/float(midpoint-j+1))\n except:\n pass\n break\n pylab.show()\n feat = pylab.dot(pspec, fb.T) # compute the filterbank energies\n feat = pylab.where(feat == 0, pylab.finfo(float).eps, feat) # if feat is zero, we get problems with log\n return feat, energy", "def detect_sample(sinogram, sino_type=\"180\"):\n check = True\n if not (sino_type == \"180\" or sino_type == \"360\"):\n raise ValueError(\"!!! Use only one of two options: '180' or '360'!!!\")\n if sino_type == \"180\":\n sinogram = 1.0 * np.vstack((sinogram, np.fliplr(sinogram)))\n sino_fft = np.abs(fft.fftshift(fft.fft2(sinogram)))\n (nrow, ncol) = sino_fft.shape\n ycenter = nrow // 2\n xcenter = ncol // 2\n radi = min(20, int(np.ceil(0.05 * min(ycenter, xcenter))))\n sino_fft = sino_fft[:ycenter - radi, :xcenter - radi]\n size = min(30, int(np.ceil(0.02 * min(nrow, ncol))))\n sino_smooth = ndi.gaussian_filter(sino_fft, size)\n (nrow1, _) = sino_smooth.shape\n row = int(0.8 * nrow1)\n list_check = np.zeros(nrow1 - row, dtype=np.float32)\n pos = np.argmax(sino_smooth[row])\n for i in np.arange(row, nrow1):\n pos1 = np.argmax(sino_smooth[i])\n if pos1 > pos:\n list_check[i - row] = 1.0\n ratio = (np.sum(list_check) / len(list_check))\n if ratio < 0.4:\n check = False\n return check", "def rfi_flag(data, freqs=None):\n \n masks = Masks()\n\n # Record any nans\n masks.add(\"nan_mask\", np.isnan(data))\n\n if params.do_sum_threshold:\n try:\n to_flag\n except NameError:\n bpass = estimate_bandpass(data)\n to_flag = data / bpass\n \n to_flag.mask = sum_threshold(to_flag)\n to_flag.mask = flag_fraction(to_flag)\n to_flag.mask = flag_window(to_flag)\n\n masks.add(\"sum_threshold_mask\", to_flag.mask)\n \n if params.do_sigma_clip:\n try:\n to_flag\n except NameError:\n bpass = estimate_bandpass(data)\n to_flag = data / bpass\n \n to_flag.mask = clip2(to_flag)\n to_flag.mask = flag_fraction(to_flag)\n to_flag.mask = flag_window(to_flag)\n\n masks.add(\"clip_mask\", to_flag.mask)\n \n if params.do_dtv_flagging and freqs is not None:\n try:\n to_flag\n except NameError:\n bpass = estimate_bandpass(data)\n to_flag = data / bpass\n \n to_flag.mask, dtv_times = do_dtv_flagging2(to_flag, freqs) \n\n masks.add(\"dtv_mask\", to_flag.mask)\n\n dtv_times_mask = np.zeros_like(data, dtype=np.bool)\n dtv_times_mask[dtv_times] = True\t\n\tmasks.add(\"dtv_times_mask\", dtv_times_mask)\n masks.dtv_tms = dtv_times\n \n\n return masks", "def check(self, data_input, debug_flag):\n self.results = [ [], [], [], [], [], [], [], [], [], False, [] ]\n _temp = 0\n _result = {}\n _wave1_t = data_input[0][0]\n _wave2_t = data_input[0][1]\n _wave3_t = data_input[0][2]\n _wave4_t = data_input[0][3]\n _wave5_t = data_input[0][4] \n _wave1_p = data_input[1][0]\n _wave2_p = data_input[1][1]\n _wave3_p = data_input[1][2]\n _wave4_p = data_input[1][3]\n _wave5_p = data_input[1][4]\n\n #Step1: 2 vs 1\n #Step1.1: time_analaysis\n _result = {} \n _result[str(\"2_1_t\")] = EW_fibonacci.check_fibratio(_wave2_t, _wave1_t)\n self.results[0].append(_result)\n \n #Step1.2: price_analaysis\n _result = {} \n _result[str(\"2_1_p\")] = EW_fibonacci.check_fibratio(_wave2_p, _wave1_p)\n self.results[0].append(_result)\n\n\n #Step2: 3 vs 1\n #Step2.1: time_analaysis\n _result = {} \n _result[str(\"3_1_t\")] = EW_fibonacci.check_fibratio(_wave3_t, _wave1_t)\n self.results[1].append(_result)\n \n #Step2.2: price_analaysis\n _result = {} \n _result[str(\"3_1_p\")] = EW_fibonacci.check_fibratio(_wave3_p, _wave1_p)\n self.results[1].append(_result)\n \n\n #Step3: 3 vs 2\n #Step3.1: time_analaysis\n _result = {} \n _result[str(\"3_2_t\")] = EW_fibonacci.check_fibratio(_wave3_t, _wave2_t)\n self.results[2].append(_result)\n \n #Step3.2: price_analaysis\n _result = {} \n _result[str(\"3_2_p\")] = EW_fibonacci.check_fibratio(_wave3_p, _wave2_p)\n self.results[2].append(_result) \n\n\n #Step4: 4 vs 2\n #Step4.1: time_analaysis\n _result = {} \n _result[str(\"4_2_t\")] = EW_fibonacci.check_fibratio(_wave4_t, _wave2_t)\n self.results[3].append(_result)\n \n #Step4.2: price_analaysis\n _result = {} \n _result[str(\"4_2_p\")] = EW_fibonacci.check_fibratio(_wave4_p, _wave2_p)\n self.results[3].append(_result) \n\n #Step5: 4 vs 3\n #Step5.1: time_analaysis\n _result = {} \n _result[str(\"4_3_t\")] = EW_fibonacci.check_fibratio(_wave4_t, _wave3_t)\n self.results[4].append(_result)\n \n #Step5.2: price_analaysis \n _result = {} \n _result[str(\"4_3_p\")] = EW_fibonacci.check_fibratio(_wave4_p, _wave3_p)\n self.results[4].append(_result)\n\n\n #Step6: 5 vs 1\n #Step6.1: time_analaysis\n _result = {} \n _result[str(\"5_1_t\")] = EW_fibonacci.check_fibratio(_wave5_t, _wave1_t)\n self.results[5].append(_result)\n \n #Step6.2: price_analaysis\n _result = {} \n _result[str(\"5_1_p\")] = EW_fibonacci.check_fibratio(_wave5_p, _wave1_p)\n self.results[5].append(_result) \n\n #Step7: 5 vs 3\n #Step7.1: time_analaysis\n _result = {} \n _result[str(\"5_3_t\")] = EW_fibonacci.check_fibratio(_wave5_t, _wave3_t)\n self.results[6].append(_result)\n \n #Step7.2: price_analaysis\n _result = {} \n _result[str(\"5_3_p\")] = EW_fibonacci.check_fibratio(_wave5_p, _wave3_p)\n self.results[6].append(_result)\n \n\n #Step8: 5 vs 0-3\n #Step8.1: time_analaysis\n _result = {} \n _result[str(\"5_0-3_t\")] = EW_fibonacci.check_fibratio(_wave5_t, (_wave1_t + _wave2_t +_wave3_t))\n self.results[7].append(_result) \n \n #Step8.2: price_analaysis\n _result = {} \n _result[str(\"5_0-3_p\")] = EW_fibonacci.check_fibratio(_wave5_p, (_wave1_p - _wave2_p +_wave3_p))\n self.results[7].append(_result)\n\n \n #Step9: 5 vs 4\n #Step9.1: time_analaysis\n _result = {} \n _result[str(\"5_4_t\")] = EW_fibonacci.check_fibratio(_wave5_t, _wave4_t)\n self.results[8].append(_result)\n \n #Step9.2: price_analaysis \n _result = {} \n _result[str(\"5_4_p\")] = EW_fibonacci.check_fibratio(_wave5_p, _wave4_p)\n self.results[8].append(_result)\n \n\n #Step10: Check if this impulse is valid or not\n self.results[9], self.results[10] = self.check_type(data_input, debug_flag)\n\n\n #Step11: return the results\n return self.results", "def check(self, data_input, debug_flag):\n self.results = [ [], [], [], [], [], [], [], [], [], False, [] ]\n _temp = 0\n _result = {}\n _wave1_t = data_input[0][0]\n _wave2_t = data_input[0][1]\n _wave3_t = data_input[0][2]\n _wave4_t = data_input[0][3]\n _wave5_t = data_input[0][4] \n _wave1_p = data_input[1][0]\n _wave2_p = data_input[1][1]\n _wave3_p = data_input[1][2]\n _wave4_p = data_input[1][3]\n _wave5_p = data_input[1][4]\n\n #Step1: 2 vs 1\n #Step1.1: time_analaysis\n _result = {} \n _result[str(\"2_1_t\")] = EW_fibonacci.check_fibratio(_wave2_t, _wave1_t)\n self.results[0].append(_result)\n \n #Step1.2: price_analaysis\n _result = {} \n _result[str(\"2_1_p\")] = EW_fibonacci.check_fibratio(_wave2_p, _wave1_p)\n self.results[0].append(_result)\n\n\n #Step2: 3 vs 1\n #Step2.1: time_analaysis\n _result = {} \n _result[str(\"3_1_t\")] = EW_fibonacci.check_fibratio(_wave3_t, _wave1_t)\n self.results[1].append(_result)\n \n #Step2.2: price_analaysis\n _result = {} \n _result[str(\"3_1_p\")] = EW_fibonacci.check_fibratio(_wave3_p, _wave1_p)\n self.results[1].append(_result)\n \n\n #Step3: 3 vs 2\n #Step3.1: time_analaysis\n _result = {} \n _result[str(\"3_2_t\")] = EW_fibonacci.check_fibratio(_wave3_t, _wave2_t)\n self.results[2].append(_result)\n \n #Step3.2: price_analaysis\n _result = {} \n _result[str(\"3_2_p\")] = EW_fibonacci.check_fibratio(_wave3_p, _wave2_p)\n self.results[2].append(_result) \n\n\n #Step4: 4 vs 2\n #Step4.1: time_analaysis\n _result = {} \n _result[str(\"4_2_t\")] = EW_fibonacci.check_fibratio(_wave4_t, _wave2_t)\n self.results[3].append(_result)\n \n #Step4.2: price_analaysis\n _result = {} \n _result[str(\"4_2_p\")] = EW_fibonacci.check_fibratio(_wave4_p, _wave2_p)\n self.results[3].append(_result) \n\n #Step5: 4 vs 3\n #Step5.1: time_analaysis\n _result = {} \n _result[str(\"4_3_t\")] = EW_fibonacci.check_fibratio(_wave4_t, _wave3_t)\n self.results[4].append(_result)\n \n #Step5.2: price_analaysis \n _result = {} \n _result[str(\"4_3_p\")] = EW_fibonacci.check_fibratio(_wave4_p, _wave3_p)\n self.results[4].append(_result)\n\n\n #Step6: 5 vs 1\n #Step6.1: time_analaysis\n _result = {} \n _result[str(\"5_1_t\")] = EW_fibonacci.check_fibratio(_wave5_t, _wave1_t)\n self.results[5].append(_result)\n \n #Step6.2: price_analaysis\n _result = {} \n _result[str(\"5_1_p\")] = EW_fibonacci.check_fibratio(_wave5_p, _wave1_p)\n self.results[5].append(_result) \n\n #Step7: 5 vs 3\n #Step7.1: time_analaysis\n _result = {} \n _result[str(\"5_3_t\")] = EW_fibonacci.check_fibratio(_wave5_t, _wave3_t)\n self.results[6].append(_result)\n \n #Step7.2: price_analaysis\n _result = {} \n _result[str(\"5_3_p\")] = EW_fibonacci.check_fibratio(_wave5_p, _wave3_p)\n self.results[6].append(_result)\n \n\n #Step8: 5 vs 0-3\n #Step8.1: time_analaysis\n _result = {} \n _result[str(\"5_0-3_t\")] = EW_fibonacci.check_fibratio(_wave5_t, (_wave1_t + _wave2_t +_wave3_t))\n self.results[7].append(_result) \n \n #Step8.2: price_analaysis\n _result = {} \n _result[str(\"5_0-3_p\")] = EW_fibonacci.check_fibratio(_wave5_p, (_wave1_p - _wave2_p +_wave3_p))\n self.results[7].append(_result)\n\n \n #Step9: 5 vs 4\n #Step9.1: time_analaysis\n _result = {} \n _result[str(\"5_4_t\")] = EW_fibonacci.check_fibratio(_wave5_t, _wave4_t)\n self.results[8].append(_result)\n \n #Step9.2: price_analaysis \n _result = {} \n _result[str(\"5_4_p\")] = EW_fibonacci.check_fibratio(_wave5_p, _wave4_p)\n self.results[8].append(_result)\n \n\n #Step10: Check if this impulse is valid or not\n self.results[9], self.results[10] = self.check_type(data_input, debug_flag)\n\n\n #Step11: return the results\n return self.results", "def check(self, data_input, debug_flag):\n self.results = [ [], [], [], [], [], [], [], [], [], False, [] ]\n _temp = 0\n _result = {}\n _wave1_t = data_input[0][0]\n _wave2_t = data_input[0][1]\n _wave3_t = data_input[0][2]\n _wave4_t = data_input[0][3]\n _wave5_t = data_input[0][4] \n _wave1_p = data_input[1][0]\n _wave2_p = data_input[1][1]\n _wave3_p = data_input[1][2]\n _wave4_p = data_input[1][3]\n _wave5_p = data_input[1][4]\n\n #Step1: 2 vs 1\n #Step1.1: time_analaysis\n _result = {} \n _result[str(\"2_1_t\")] = EW_fibonacci.check_fibratio(_wave2_t, _wave1_t)\n self.results[0].append(_result)\n \n #Step1.2: price_analaysis\n _result = {} \n _result[str(\"2_1_p\")] = EW_fibonacci.check_fibratio(_wave2_p, _wave1_p)\n self.results[0].append(_result)\n\n\n #Step2: 3 vs 1\n #Step2.1: time_analaysis\n _result = {} \n _result[str(\"3_1_t\")] = EW_fibonacci.check_fibratio(_wave3_t, _wave1_t)\n self.results[1].append(_result)\n \n #Step2.2: price_analaysis\n _result = {} \n _result[str(\"3_1_p\")] = EW_fibonacci.check_fibratio(_wave3_p, _wave1_p)\n self.results[1].append(_result)\n \n\n #Step3: 3 vs 2\n #Step3.1: time_analaysis\n _result = {} \n _result[str(\"3_2_t\")] = EW_fibonacci.check_fibratio(_wave3_t, _wave2_t)\n self.results[2].append(_result)\n \n #Step3.2: price_analaysis\n _result = {} \n _result[str(\"3_2_p\")] = EW_fibonacci.check_fibratio(_wave3_p, _wave2_p)\n self.results[2].append(_result) \n\n\n #Step4: 4 vs 2\n #Step4.1: time_analaysis\n _result = {} \n _result[str(\"4_2_t\")] = EW_fibonacci.check_fibratio(_wave4_t, _wave2_t)\n self.results[3].append(_result)\n \n #Step4.2: price_analaysis\n _result = {} \n _result[str(\"4_2_p\")] = EW_fibonacci.check_fibratio(_wave4_p, _wave2_p)\n self.results[3].append(_result) \n\n #Step5: 4 vs 3\n #Step5.1: time_analaysis\n _result = {} \n _result[str(\"4_3_t\")] = EW_fibonacci.check_fibratio(_wave4_t, _wave3_t)\n self.results[4].append(_result)\n \n #Step5.2: price_analaysis \n _result = {} \n _result[str(\"4_3_p\")] = EW_fibonacci.check_fibratio(_wave4_p, _wave3_p)\n self.results[4].append(_result)\n\n\n #Step6: 5 vs 1\n #Step6.1: time_analaysis\n _result = {} \n _result[str(\"5_1_t\")] = EW_fibonacci.check_fibratio(_wave5_t, _wave1_t)\n self.results[5].append(_result)\n \n #Step6.2: price_analaysis\n _result = {} \n _result[str(\"5_1_p\")] = EW_fibonacci.check_fibratio(_wave5_p, _wave1_p)\n self.results[5].append(_result) \n\n #Step7: 5 vs 3\n #Step7.1: time_analaysis\n _result = {} \n _result[str(\"5_3_t\")] = EW_fibonacci.check_fibratio(_wave5_t, _wave3_t)\n self.results[6].append(_result)\n \n #Step7.2: price_analaysis\n _result = {} \n _result[str(\"5_3_p\")] = EW_fibonacci.check_fibratio(_wave5_p, _wave3_p)\n self.results[6].append(_result)\n \n\n #Step8: 5 vs 0-3\n #Step8.1: time_analaysis\n _result = {} \n _result[str(\"5_0-3_t\")] = EW_fibonacci.check_fibratio(_wave5_t, (_wave1_t + _wave2_t +_wave3_t))\n self.results[7].append(_result) \n \n #Step8.2: price_analaysis\n _result = {} \n _result[str(\"5_0-3_p\")] = EW_fibonacci.check_fibratio(_wave5_p, (_wave1_p - _wave2_p +_wave3_p))\n self.results[7].append(_result)\n\n \n #Step9: 5 vs 4\n #Step9.1: time_analaysis\n _result = {} \n _result[str(\"5_4_t\")] = EW_fibonacci.check_fibratio(_wave5_t, _wave4_t)\n self.results[8].append(_result)\n \n #Step9.2: price_analaysis \n _result = {} \n _result[str(\"5_4_p\")] = EW_fibonacci.check_fibratio(_wave5_p, _wave4_p)\n self.results[8].append(_result)\n \n\n #Step10: Check if this impulse is valid or not\n self.results[9], self.results[10] = self.check_type(data_input, debug_flag)\n\n\n #Step11: return the results\n return self.results", "def check(self, data_input, debug_flag):\n self.results = [ [], [], [], False, [] ]\n _result = {}\n _wave1_t = data_input[0][0]\n _wave2_t = data_input[0][1]\n _wave3_t = data_input[0][2] \n _wave1_p = data_input[1][0]\n _wave2_p = data_input[1][1]\n _wave3_p = data_input[1][2]\n\n #Step1: b vs a\n #Step1.1: time_analaysis\n _result = {} \n _result[str(\"b_a_t\")] = EW_fibonacci.check_fibratio(_wave2_t, _wave1_t)\n self.results[0].append(_result)\n\n #Step1.2: price_analaysis\n _result = {} \n _result[str(\"b_a_p\")] = EW_fibonacci.check_fibratio(_wave2_p, _wave1_p)\n self.results[0].append(_result)\n\n\n #Step2: c vs b\n #Step2.1: time_analaysis\n _result = {} \n _result[str(\"c_b_t\")] = EW_fibonacci.check_fibratio(_wave3_t, _wave2_t)\n self.results[1].append(_result)\n\n #Step2.2: price_analaysis\n _result = {} \n _result[str(\"c_b_p\")] = EW_fibonacci.check_fibratio(_wave3_p, _wave2_p)\n self.results[1].append(_result)\n\n \n #Step3: c vs a\n #Step3.1: time_analaysis\n _result = {} \n _result[str(\"c_a_t\")] = EW_fibonacci.check_fibratio(_wave3_t, _wave1_t)\n self.results[2].append(_result)\n\n #Step3.2: price_analaysis\n _result = {} \n _result[str(\"c_a_p\")] = EW_fibonacci.check_fibratio(_wave3_p, _wave1_p)\n self.results[2].append(_result)\n\n\n #Step4: Check if this a-b-c is valid or not and which pattern can be chosen\n self.results[3], self.results[4] = self.check_type(data_input, debug_flag)\n\n\n #Step5: return the results\n return self.results", "def _process_utterance(lf0_dir, mgc_dir, bap_dir, cmp_dir, linear_dir, basename, wav_path, text, hparams):\n\n\tif hparams.trim_silence:\n\t\ttar_wavfile = wav_path[:-4] + \"_trim.wav\"\n\t\tprint(\"raw wav path:%s\" % wav_path)\n\t\twav_raw, fs = sf.read(wav_path)\n\t\twav_trim = audio.trim_silence(wav_raw, hparams)\n\t\tsf.write(tar_wavfile, wav_trim, fs)\n\n\t\twav_path = tar_wavfile\n\n\tnFFTHalf, alpha, bap_dim = audio.get_config(hparams.sample_rate)\n\n\tmcsize = hparams.num_mgc - 1\n\n\tfilename = basename #os.path.basename(wav_path).split(\".\")[0]\n\n\tprint('extract feats for %s' % wav_path)\n\n\t# extract f0,sp,ap\n\tos.system(\"analysis %s %s/%s.f0 %s/%s.sp %s/%s.bapd\" %\n\t\t\t\t (wav_path, lf0_dir, filename,\n\t\t\t\t mgc_dir, filename, bap_dir, filename)) # get float64???\n\n # interpolate f0\n\tf0 = np.fromfile(\"%s/%s.f0\" % (lf0_dir, filename),dtype=np.float64)\n\tcontinuous_f0 = interp1d(f0, kind=\"slinear\")\n\tcontinuous_f0.tofile(\"%s/%s.f0c\" % (lf0_dir, filename))\n\n\t# convert f0 to lf0\n\tos.system(\"x2x +da %s/%s.f0c > %s/%s.f0a\" % (lf0_dir, filename, lf0_dir, filename))\n\tos.system(\"x2x +af %s/%s.f0a | sopr -magic 0.0 -LN -MAGIC -1.0E+10 > %s/%s.lf0\" % (\n\t\tlf0_dir, filename, lf0_dir, filename))\n\n\t# convert sp to mgc\n\tos.system(\"x2x +df %s/%s.sp | sopr -R -m 32768.0 | \"\n\t\t\t \"mcep -a %f -m %d -l %d -e 1.0E-8 -j 0 -f 0.0 -q 3 \"\n\t\t\t \"> %s/%s.mgc\" % (mgc_dir, filename, alpha, mcsize, nFFTHalf, mgc_dir, filename))\n\n\t# convert ap to bap\n\tos.system(\"x2x +df %s/%s.bapd > %s/%s.bap\" %\n\t\t\t (bap_dir, filename, bap_dir, filename))\n\n\t# merge mgc,lf0 and bap to cmp\n\tos.system(\"merge +f -s 0 -l 1 -L %d %s/%s.mgc < %s/%s.lf0 > %s/%s.ml\" %\n\t\t\t((mcsize+1), mgc_dir, filename, lf0_dir, filename, cmp_dir, filename))\n\tos.system(\"merge +f -s 0 -l %d -L %d %s/%s.ml < %s/%s.bap > %s/%s.cmp\" %\n\t\t\t(bap_dim, (mcsize+2), cmp_dir, filename, bap_dir, filename, cmp_dir, filename))\n\n\t#if mel_frames > hparams.max_mel_frames and hparams.clip_mels_length:\n\t#\treturn None\n\n\t#Compute the linear scale spectrogram from the wav\n\twav = audio.load_wav(wav_path, hparams.sample_rate)\n\tlinear_spectrogram = audio.linearspectrogram(wav, hparams).astype(np.float32)\n\tlinear_frames = linear_spectrogram.shape[1]\n\n\t#sanity check\n\t#assert linear_frames == mel_frames\n\n\tlf0 = np.fromfile(\"%s/%s.lf0\" % (lf0_dir, filename), dtype=np.float32)\n\tmgc = np.fromfile(\"%s/%s.mgc\" % (mgc_dir, filename), dtype=np.float32)\n\tbap = np.fromfile(\"%s/%s.bap\" % (bap_dir, filename), dtype=np.float32)\n\tcmp = np.fromfile(\"%s/%s.cmp\" % (cmp_dir, filename), dtype=np.float32)\n\n\tcmp_dim = mcsize + 1 + 1 + bap_dim\n\tcmp_frames = cmp.shape[0] / cmp_dim\n\t#print(f0[:100])\n\t#print(continuous_f0[:100])\n\tprint(lf0.shape)\n\tprint(continuous_f0.shape)\n\tprint(mgc.shape)\n\tprint(bap.shape)\n\tprint(cmp_frames)\n\tprint(continuous_f0.dtype)\n\tprint(mgc.dtype)\n\tprint(bap.dtype)\n\tassert (mgc.shape[0]/(mcsize+1)) == (continuous_f0.shape[0]/1) == (bap.shape[0]/bap_dim) == cmp_frames\n\tassert cmp_dim == hparams.num_mels\n\t#assert len(out) >= cmp_frames * audio.get_hop_size(hparams)\n\n\t#time resolution adjustement\n\t#ensure length of raw audio is multiple of hop size so that we can use\n\t#transposed convolution to upsample\n\t#out = out[:mel_frames * audio.get_hop_size(hparams)]\n\t#assert len(out) % audio.get_hop_size(hparams) == 0\n\t#time_steps = len(out)\n\n\t# Write the spectrogram and audio to disk\n\t#audio_filename = 'audio-{}.npy'.format(index)\n\tcmp_mat = cmp.reshape(-1, cmp_dim)\n\tcmp_filename = 'cmp-{}.npy'.format(basename)\n\tlinear_filename = 'linear-{}.npy'.format(basename)\n\t#np.save(os.path.join(wav_dir, audio_filename), out.astype(out_dtype), allow_pickle=False)\n\tnp.save(os.path.join(cmp_dir, cmp_filename), cmp_mat, allow_pickle=False)\n\tnp.save(os.path.join(linear_dir, linear_filename), linear_spectrogram.T, allow_pickle=False)\n\t# Return a tuple describing this training example\n\treturn (cmp_filename, linear_filename, cmp_frames, text)", "def detect_freqs(self):\n n_fft_bins = self._config[\"audio_config\"][\"N_FFT_BINS\"]\n channel_avgs = []\n differences = []\n \n for i in range(n_fft_bins):\n channel_avgs.append(sum(self.freq_channels[i])/len(self.freq_channels[i]))\n differences.append(((self.freq_channels[i][0]-channel_avgs[i])*100)//channel_avgs[i])\n for i in [\"beat\", \"low\", \"mid\", \"high\"]:\n if any(differences[j] >= self.min_percent_diff[i]\\\n and self.freq_channels[j][0] >= self.min_detect_amplitude[i]\\\n for j in range(*self.detection_ranges[i]))\\\n and (time.time() - self.prev_freq_detects[i] > 0.2)\\\n and len(self.freq_channels[0]) == self.freq_channel_history:\n self.prev_freq_detects[i] = time.time()\n self.current_freq_detects[i] = True\n else:\n self.current_freq_detects[i] = False", "def FFT_brickwallLPF(filename,cutoff,wout=True,plot=True):\n start=time.time()\n n, data, data_dB,sr,ch=inputwav(filename)\n print('Applying FFT...')\n W=np.zeros((n,2))\n W[:,0]=1#blackman(n)\n W[:,1]=1#blackman(n)\n yfreq=rfft(data*W,axis=0)\n xfreq=np.linspace(0,sr/(2.0),n//1)\n yfreqBLPF=np.zeros((n,ch))\n yfreqBLPF[0:n,:]=yfreq\n print('Applying brickwall at '+str(cutoff)+' Hz...')\n yfreqBLPF[n:np.searchsorted(xfreq,cutoff):-1,:]=0.0\n data_filtered=(irfft(yfreqBLPF,axis=0))\n if wout==True:\n print('Exporting...')\n sf.write(filename[0:len(filename)-4]+'_brickwallLPF.wav',data_filtered,sr,'PCM_16')\n if plot==True:\n print('Plotting...')\n py.close()\n fig, (ax1, ax2) = py.subplots(nrows=2)\n ax1.semilogx(xfreq,20*np.log10(abs(yfreq[0:n//1,:]+.0001)),'k-',lw=0.5)\n ax1.semilogx(xfreq,20*np.log10(abs(yfreqBLPF[0:n//1,:]+.0001)),'m-',lw=0.1)\n ax1.set_xlabel('Frequency (Hz)')\n ax1.set_ylabel('Amplitude (dB)')\n ax2.plot(data,'k-',label='Raw')\n ax2.plot(data_filtered,'m-',lw=1,label='Filtered')\n ax2.set_xlim(0,10000)\n ax2.set_ylim(-1,1)\n ax2.set_ylabel('Amplitude (Norm Bits)')\n ax2.set_xlabel('Samples')\n ax2.legend(loc=2,frameon=False,ncol=2)\n print('Done!')\n end=time.time()\n elapsed=(end-start)\n print('Completed in '+str(elapsed)+' seconds.')\n return data_filtered", "def prep(self):\n print\n print 'Filtering rawdata to data as masked array...'\n# using 0 as flag\n# self.data = n.ma.masked_array(self.rawdata[:self.nints,:, self.chans,:], self.rawdata[:self.nints,:, self.chans,:] == 0j)\n# using standard flags\n self.data = n.ma.masked_array(self.rawdata[:self.nints,:, self.chans,:], self.flags[:self.nints,:, self.chans,:] == 0) # mask of True for flagged data (flags=0 in tpipe, which is flags=False in Miriad and flags=True in MS)\n self.dataph = (self.data.mean(axis=3).mean(axis=1)).real #dataph is summed and detected to form TP beam at phase center, multi-pol\n self.min = self.dataph.min()\n self.max = self.dataph.max()\n print 'Shape of data:'\n print self.data.shape\n print 'Dataph min, max:'\n print self.min, self.max\n\n self.freq = self.freq_orig[self.chans]\n\n # set up ur tracks (lol)\n self.dmtrack0 = {}\n self.twidths = {}\n for dmbin in xrange(len(self.dmarr)):\n self.dmtrack0[dmbin] = self.dmtrack(self.dmarr[dmbin],0) # track crosses high-freq channel in first integration\n self.twidths[dmbin] = 0\n for k in self.dmtrack0[dmbin][1]:\n self.twidths[dmbin] = max(self.twidths[dmbin], len(n.where(n.array(self.dmtrack0[dmbin][1]) == k)[0]))\n\n print 'Track width in time: '\n for dmbin in self.twidths:\n print 'DM=%.1f, twidth=%d. Iteration could step by %d/2.' % (self.dmarr[dmbin], self.twidths[dmbin], self.twidths[dmbin])", "def ha(sf,sfn,mX,pX,params,verbose=[],onlySelected=False,hc=-2,div=8,L=30,fs=44100,gt=[]):\r\n \r\n M,N,H,B = params\r\n \r\n idx = candidSelection(sf,t=0.025,hw=25) \r\n idx = np.concatenate((np.zeros(1),idx,np.array([sf.shape[0]])))\r\n idx_orig = idx.copy()\r\n mask = np.ones(idx.shape)\r\n mask[0]=0\r\n mask[-1]=0\r\n errors = np.zeros(mX.shape[0])\r\n scores = np.zeros(idx.shape)\r\n freqs = []\r\n \r\n tFlag = False\r\n vFlag = False # flag to enable prints and plots\r\n \r\n rms = np.sum(mX,axis=1)\r\n rms = rms-np.mean(rms)\r\n rms = rms/np.max(rms)\r\n rms = savgol_filter(rms,3,1)\r\n \r\n rms_t = -0.1\r\n \r\n # sending every onset candidate to harmonic analysis\r\n for i in range(len(idx)-2,0,-1):\r\n \r\n if onlySelected:\r\n if idx[i] not in verbose:\r\n continue\r\n \r\n b = int((idx[i]-(10240/H)) if (idx[i]>(idx[i-1]+(10240/H))) else idx[i-1])\r\n e = int((idx[i]+(10240/H)) if (idx[i]<(idx[i+1]-(10240/H))) else idx[i+1])\r\n \r\n \r\n if np.mean(rms[int(idx[i]):int(idx[i])+50])<rms_t:\r\n continue\r\n \r\n onst = int(idx[i]-b)\r\n pmX = np.copy(mX[b:e])\r\n \r\n\r\n if idx[i] in verbose:\r\n print(\"\\nOnset candidate:\")\r\n print(\"onset frame: %d\" %idx[i])\r\n print(\"sf onset number: %d\" %i)\r\n vFlag = True\r\n y = MRStftSynth(pmX,pX[b:e],M,H,B)\r\n print(\"synthesized sound\")\r\n ipd.display(ipd.Audio(data=y, rate=fs))\r\n \r\n if vFlag:\r\n print(\"STFT around candidate\")\r\n plt.pcolormesh(np.arange(pmX.shape[0]), np.arange(pmX.shape[1]), np.transpose(pmX))\r\n plt.show()\r\n \r\n print(\"filtered spectral flux\")\r\n plt.plot(sf[b:e])\r\n plt.show()\r\n print(\"raw spectral flux\")\r\n plt.plot(sfn[b:e])\r\n plt.show()\r\n \r\n allErrors,allf0s,pmXv = f0detection(pmX,pX[b:e],sfn[b:e],-100,10,onst,vFlag,hc,div,params,fs,tFlag)\r\n\r\n aL = np.min((e-idx[i]/2,L)) \r\n segments = getSegments(allf0s,allErrors,onst,pmX,vFlag)\r\n scores[i],freq,segmentScores = harmonicScore(segments,aL,vFlag,tFlag)\r\n freqs.append(freq)\r\n \r\n if scores[i]<1: # prevent rejected candidates from creating boundary for adjacent onset\r\n idx[i] = sf.shape[0]\r\n \r\n if vFlag:\r\n print(\"Score for this onset: %d\" %scores[i])\r\n \r\n if tFlag and scores[i]<1:\r\n pred_time = np.abs(idx[i]*(H/fs))\r\n closest_gt_ind = np.argmin(pred_time-gt)[0]\r\n if np.abs(gt[closest_gt_ind]-pred_time)<0.05:\r\n if score[i]>1:\r\n tp.append[idx[i]]\r\n if score[i]<1:\r\n fn.append[idx[i]]\r\n \r\n print(\"STFT around onset\")\r\n plt.pcolormesh(np.arange(pmX.shape[0]), np.arange(pmX.shape[1]), np.transpose(pmX))\r\n plt.show()\r\n \r\n y = MRStftSynth(pmXv,pX,M,H,B)\r\n ipd.display(ipd.Audio(data=y, rate=fs))\r\n \r\n plt.pcolormesh(np.arange(pmXv.shape[0]), np.arange(pmXv.shape[1]), np.transpose(pmXv))\r\n plt.show()\r\n\r\n vFlag = False\r\n tFlag = False\r\n \r\n avg = np.mean(scores)\r\n mask[scores<1] = 0\r\n result = idx_orig[mask==1]\r\n return idx_orig[1:-1],result,freqs,scores[1:-1]", "def automatic_checking(files):\n for i in range(10):\n fft_checking(files[i])", "def has_smaller_psf_fits(fp1, fp2, mode='quick', fracdiff_threshold=0.1):\n\n\tpsf1 = fits.getdata(fp1)\n\tpsf2 = fits.getdata(fp2)\n\n\treturn has_smaller_psf(psf1, psf2, mode=mode, fracdiff_threshold=fracdiff_threshold)", "def tofPreproc(evt, type, key, outkey=None):\n \n if outkey is None:\n outkey = \"corrected - \" + key\n tof_trace = evt[type][key].data\n\n tof_trace_inverted = tof_trace * -1\n #Find photon peak\n tof_peak_threshold = np.std(tof_trace_inverted[:pre_pp_index])*5\n\n all_peak_x = np.where(tof_trace_inverted>(np.median(tof_trace_inverted[:pre_pp_index])+tof_peak_threshold))[0]\n any_peaks = all_peak_x.size >= 2\n if any_peaks:\n print all_peak_x \n diff_x = all_peak_x[1:] - all_peak_x[:-1]\n end_peak = all_peak_x[np.where(diff_x > 1)[0]]\n photon_peak_end = end_peak[0] + 1\n photon_peak_start = all_peak_x[0]\n \n \t #Inverted and baseline corrected Tof signal\n base_line = np.median(tof_trace_inverted[:photon_peak_start])\n \n base_std = np.std(tof_trace_inverted[:photon_peak_start])\n \t\n corrected_tof = (tof_trace_inverted-base_line)[photon_peak_end:]\n add_record(evt['analysis'], 'analysis', 'Corrected ToF (base line)', corrected_tof)\n \n \t #Convert to M/Q\n Hpeak = np.argmax(corrected_tof[:hpeak_region])\n new_x = (np.arange(len(corrected_tof)) / float(Hpeak))**2. \n add_record(evt['analysis'], 'analysis', 'M/Q', new_x)", "def HD_input_snfit_data(self):\n\n dico = cPickle.load(open(SUGAR_parameter_pkl))\n self.read_snfit_results()\n self.read_meta()\n Filtre = np.array([True]*len(self.sn_name))\n self.zcmb = []\n self.z_err = []\n for j in range(len(self.sn_name)):\n if self.sn_name[j] in dico.keys() and self.sn_name[j] :\n\n for i in range (len(self.meta_sn_name_list)):\n if self.sn_name[j] == self.meta_sn_name_list[i]:\n \n self.z_err.append(self.meta_zhl_err[i])\n self.zcmb.append(self.meta_zcmb[i])\n if np.abs(self.x1[j] - self.meta_x1[i]) > 0.001:\n print 'problem with %s include in sample but difference between snfit and meta'%(self.sn_name[j])\n else:\n Filtre[j] = False\n\n for p in dico.keys():\n if p not in self.sn_name:\n print p\n \n self.x1 = self.x1[Filtre]\n self.x1_err = self.x1_err[Filtre] \n self.c = self.c[Filtre]\n self.c_err = self.c_err[Filtre]\n self.mb = self.mb[Filtre]\n self.mb_err = self.mb_err[Filtre]\n self.cov_x0_x1 = self.cov_x0_x1[Filtre]\n self.cov_x0_c = self.cov_x0_c[Filtre]\n self.cov_x1_c = self.cov_x1_c[Filtre]\n self.cov_mb_x1 = self.cov_mb_x1[Filtre]\n self.cov_mb_c = self.cov_mb_c[Filtre]\n self.z = self.z[Filtre]\n self.zcmb = np.array(self.zcmb)\n self.z_err = np.array(self.z_err)\n\n self.cov_y = np.zeros((len(self.mb)*3,len(self.mb)*3))\n\n for i in range (len(self.mb)):\n self.cov_y[i*3,i*3] = self.mb_err[i]**2\n self.cov_y[i*3+ 1,i*3+ 1] = self.x1_err[i]**2\n \n self.cov_y[i*3+ 2,i*3+ 2] = self.c_err[i]**2\n self.cov_y[i*3+ 0,i*3+ 1] = self.cov_mb_x1[i]\n self.cov_y[i*3+ 1,i*3+ 0] = self.cov_mb_x1[i]\n self.cov_y[i*3+ 0,i*3+ 2] = self.cov_mb_c[i]\n self.cov_y[i*3+ 2,i*3+ 0] = self.cov_mb_c[i]\n self.cov_y[i*3+ 1,i*3+ 2] = self.cov_x1_c[i] \n self.cov_y[i*3+ 2,i*3+ 1] = self.cov_x1_c[i] \n \n self.salt_parm = np.array([self.mb,self.x1,self.c]).T\n# print len(self.salt_parm), len(self.cov_y), len(self.z), len(self.zcmb)\n# return self.salt_parm, self.cov_y, self.z, self.meta_zcmb, self.meta_zhl_err, self.sn_name, self.meta_idr\n return self.salt_parm, self.cov_y, self.z, self.zcmb, self.z_err", "def test_sff_corrector():\n # The following csv file, provided by Vanderburg and Johnson\n # at https://www.cfa.harvard.edu/~avanderb/k2/ep60021426.html,\n # contains the results of applying SFF to EPIC 60021426.\n fn = get_pkg_data_filename('./data/ep60021426alldiagnostics.csv')\n data = np.genfromtxt(fn, delimiter=',', skip_header=1)\n mask = data[:, -2] == 0 # indicates whether the thrusters were on or off\n time = data[:, 0]\n raw_flux = data[:, 1]\n corrected_flux = data[:, 2]\n centroid_col = data[:, 3]\n centroid_row = data[:, 4]\n arclength = data[:, 5]\n correction = data[:, 6]\n\n lc = LightCurve(time=time, flux=raw_flux)\n sff = SFFCorrector(lc)\n corrected_lc = sff.correct(centroid_col=centroid_col,\n centroid_row=centroid_row,\n niters=1, windows=1)\n # do hidden plots execute smoothly?\n sff._plot_rotated_centroids()\n sff._plot_normflux_arclength()\n\n # the factor self.bspline(time-time[0]) accounts for\n # the long term trend which is divided out in order to get a \"flat\"\n # lightcurve.\n assert_almost_equal(corrected_lc.flux*sff.bspline(time),\n corrected_flux, decimal=3)\n assert_array_equal(time, corrected_lc.time)\n # the factor of 4 below accounts for the conversion\n # between pixel units to arcseconds\n assert_almost_equal(4*sff.s, arclength, decimal=2)\n assert_almost_equal(sff.interp(sff.s), correction, decimal=3)\n\n # test using KeplerLightCurve interface\n klc = KeplerLightCurve(time=time, flux=raw_flux, centroid_col=centroid_col,\n centroid_row=centroid_row)\n klc = klc.correct(niters=1, windows=1)\n sff = klc.corrector\n\n assert_almost_equal(klc.flux*sff.bspline(time),\n corrected_flux, decimal=3)\n assert_almost_equal(4*sff.s, arclength, decimal=2)\n assert_almost_equal(sff.interp(sff.s), correction, decimal=3)\n assert_array_equal(time, klc.time)", "def FFT_brickwallHPF(filename,cutoff,wout=True,plot=True):\n n, data, data_dB,sr,ch=inputwav(filename)\n print('Applying FFT...')\n yfreq=rfft(data,axis=0)\n xfreq=np.linspace(0,sr/(2.0),n)\n yfreqBHPF=np.zeros((n,ch))\n yfreqBHPF[0:n,:]=yfreq\n print('Applying brickwall at '+str(cutoff)+' Hz...')\n yfreqBHPF[0:np.searchsorted(xfreq,cutoff),:]=0.0\n data_filtered=(irfft(yfreqBHPF,axis=0))\n if wout==True:\n print('Exporting...')\n sf.write(filename[0:len(filename)-4]+'_brickwallHPF.wav',data_filtered,sr,'PCM_16')\n if plot==True:\n print('Plotting...')\n py.close()\n fig, (ax1, ax2) = py.subplots(nrows=2)\n ax1.semilogx(xfreq,20*np.log10(abs(yfreq[0:n,0]+.0001)),'k-',lw=0.5)\n ax1.semilogx(xfreq,20*np.log10(abs(yfreqBHPF[0:n//1,0]+.0001)),'m-',lw=0.1)\n ax1.set_xlabel('Frequency (Hz)')\n ax1.set_ylabel('Amplitude')\n ax2.plot(data,'k-',label='Raw')\n ax2.plot(data_filtered,'m-',label='Filtered')\n ax2.set_xlim(0,10000)\n ax2.set_ylim(-1,1)\n ax2.set_ylabel('Amplitude (Norm Bits)')\n ax2.set_xlabel('Samples')\n ax2.legend(loc=2)\n print('Done!')\n return data_filtered", "def spectral_check(self, ):\r\n a, b = self.dfa, self.dfm.copy()\r\n b['ts_a']=a.ts\r\n b['flux_a'] = a.flux\r\n b['dflux'] = (b.flux-b.flux_a)/b.flux_unc\r\n b['eflux100_a'] = a.eflux100\r\n b['deflux'] = (b.eflux100-b.eflux100_a)/b.eflux100_unc\r\n b['pindex_a'] = a.pindex\r\n b['gdelta'] = (b.pindex-b.pindex_a)/b.pindex_unc\r\n self.dfm = b # since copy\r\n\r\n fig,axx = plt.subplots(1,2, figsize=(10,5), sharey=True)\r\n hkw = dict(bins=np.linspace(-5,5,51), histtype='step', lw=2, density=True)\r\n\r\n cut = (b.ts>50) & ~pd.isnull(b.deflux) & ~pd.isnull(b.gdelta) &\\\r\n (b.modelname==\"LogParabola\") & (b.pindex<3) & (b.pindex>0.5) &\\\r\n (b.e0>500) &(b.eflux100_unc>0) &(b.pindex_unc>0)\r\n self.check_total = sum(cut)\r\n for ax, title, val in zip(axx.flatten(), ['Energy Flux', 'Spectral index'], [b.deflux, b.gdelta]): \r\n\r\n df=val[cut]\r\n ax.hist(df.clip(-5,5), label='mean {:5.2f}\\nstd {:5.2f}'.format(df.mean(),df.std()), **hkw);\r\n ax.grid(alpha=0.5); \r\n x=np.linspace(-4,4)\r\n ax.plot(x, stats.norm.pdf(x), '--g' );\r\n ax.set(xlabel='normalized fit deviation', title=title, )\r\n ax.legend(loc='upper left',prop=dict(family='monospace'))\r\n fig.suptitle('Normalized devations of fit from model', fontsize=16);\r\n\r\n return fig", "def load_yaafedata(params, \n n_learn_frames=2000,\n use_custom_stft=False):\n\n audio_file_path = getoptions(params, 'location', '/sons/voxforge/data/Learn/')\n # if no number specified, use n_learn_frames\n n_frames = getoptions(params, 'n_frames', n_learn_frames)\n sr = getoptions(params, 'sr', 16000)\n sigma_noise = getoptions(params, 'sigma', 0.0)\n random_seed = getoptions(params, 'shuffle', 1001)\n features = getoptions(params, 'features', [])\n wintime = getoptions(params, 'wintime', 0.032)\n steptime = getoptions(params, 'steptime', 0.008)\n startpoint = getoptions(params, 'startpoint', 0)\n forbid_list = getoptions(params, 'forbidden_names', [])\n mfnpf = getoptions(params, 'frame_num_per_file', 3000)\n# wintime = float(win_size)/float(sr)\n# steptime = float(step_size)/float(sr)\n \n win_size = int(wintime*sr)\n step_size = int(steptime*sr)\n# print wintime, steptime, win_size, step_size\n # apply sub_routine to all the files until a condition is met\n n_frames_reached = 0\n\n all_file_paths = get_filepaths(audio_file_path,\n random_seed,\n forbid_list = forbid_list)\n file_index = 0\n\n specseq = []\n featseq = []\n dataseq = []\n n_files_used = 0\n\n while (n_frames_reached < n_frames):\n file_index = file_index + 1\n filepath = all_file_paths[file_index]\n n_files_used = n_files_used + 1\n\n [loc_magSTFT, loc_Feats, locDatas] = load_data_one_audio_file(\n filepath, sr,\n wintime=wintime,\n steptime=steptime,\n max_frame_num_per_file=mfnpf,\n sigma_noise=sigma_noise,\n startpoint = startpoint,\n features=features)\n# if get_data:\n# [loc_magSTFT, loc_Feats, locDatas] = load_data_one_file_melspec(filepath, sr, sigma_noise, params);\n# Data = [Data , locDatas'];\n# else\n# [loc_magSTFT, loc_Feats, ~] = load_data_one_file_melspec(filepath, sr, sigma_noise, params);\n# end\n if not use_custom_stft:\n specseq.append(loc_magSTFT)\n else:\n specseq.append(np.abs(get_stft(locDatas,\n wsize=win_size,\n tstep=step_size,\n sigma = sigma_noise)).T)\n# print wintime, steptime, win_size, step_size\n# print loc_magSTFT.shape\n# print specseq[-1].shape\n# print locDatas.shape\n featseq.append(loc_Feats)\n dataseq.append(locDatas)\n \n n_frames_reached += min(loc_magSTFT.shape[0], loc_Feats.shape[0])\n print n_frames_reached\n \n Spectrums = np.vstack(specseq)\n Features = np.vstack(featseq)\n Data = np.hstack(dataseq)\n\n n_frames_reached = min(n_frames_reached, n_frames)\n Spectrums = Spectrums[0:n_frames_reached,:]\n Features = Features[0:n_frames_reached,:]\n used_files = all_file_paths[0:n_files_used]\n\n return Features, Spectrums, n_frames_reached, Data, used_files", "def test_input_flux_file():\n # Generate an input file\n flux_input_file = tstutils.data_path('test.flux')\n if os.path.isfile(flux_input_file):\n os.remove(flux_input_file)\n\n cfg_lines = ['[fluxcalib]']\n cfg_lines += [' extinct_correct = False # Set to True if your SENSFUNC derived with the UVIS algorithm\\n']\n cfg_lines += ['# Please add your SENSFUNC file name below before running pypeit_flux_calib']\n\n # These files need to be in tests/files/\n data = Table()\n data['filename'] = ['spec1d_cN20170331S0216-pisco_GNIRS_20170331T085412.181.fits',\n 'spec1d_cN20170331S0217-pisco_GNIRS_20170331T085933.097.fits']\n data['sensfile'] = 'sens_cN20170331S0206-HIP62745_GNIRS_20170331T083351.681.fits'\n # \n paths = [tstutils.data_path('')]\n\n fluxFile = inputfiles.FluxFile(config=cfg_lines, \n file_paths=paths,\n data_table=data)\n # Write\n fluxFile.write(flux_input_file)\n\n # Read\n fluxFile2 = inputfiles.FluxFile.from_file(flux_input_file)\n assert np.all(fluxFile2.data['filename'] == data['filename'])\n\n # Test path\n assert fluxFile2.file_paths[0] == paths[0]\n assert fluxFile2.filenames[0] == os.path.join(paths[0], data['filename'][0])\n\n # #################\n # Tickle the other ways to do sensfiles\n data3 = Table()\n data3['filename'] = ['spec1d_cN20170331S0216-pisco_GNIRS_20170331T085412.181.fits',\n 'spec1d_cN20170331S0217-pisco_GNIRS_20170331T085933.097.fits']\n data3['sensfile'] = ['sens_cN20170331S0206-HIP62745_GNIRS_20170331T083351.681.fits',\n '']\n\n fluxFile3 = inputfiles.FluxFile(config=cfg_lines, \n file_paths=paths,\n data_table=data3)\n assert fluxFile3.sensfiles[1] == os.path.join(paths[0], data['sensfile'][0])\n \n data4 = Table()\n data4['filename'] = ['spec1d_cN20170331S0216-pisco_GNIRS_20170331T085412.181.fits',\n 'spec1d_cN20170331S0217-pisco_GNIRS_20170331T085933.097.fits']\n data4['sensfile'] = ''\n\n fluxFile4 = inputfiles.FluxFile(config=cfg_lines, \n file_paths=paths,\n data_table=data4)\n assert len(fluxFile4.sensfiles) == 0\n\n # Clean up\n os.remove(flux_input_file)", "def test_fstatisticsAndReshapedSpectrum(self):\n data = _load_mtdata('v22_174_series.dat.gz')\n # Calculate the spectra.\n spec, freq, jackknife, fstatistics, _ = mtspec(\n data, 4930., 3.5, nfft=312, number_of_tapers=5, statistics=True,\n rshape=0, fcrit=0.9)\n # No NaNs are supposed to be in the output.\n self.assertEqual(np.isnan(spec).any(), False)\n self.assertEqual(np.isnan(freq).any(), False)\n self.assertEqual(np.isnan(jackknife).any(), False)\n self.assertEqual(np.isnan(fstatistics).any(), False)\n # Load the good data.\n datafile = os.path.join(os.path.dirname(__file__), 'data',\n 'fstatistics.npz')\n record = np.load(datafile)\n spec2 = record['spec']\n jackknife2 = record['jackknife']\n fstatistics2 = record['fstatistics']\n freq2 = np.arange(157) * 6.50127447e-07\n # Compare.\n np.testing.assert_almost_equal(freq, freq2)\n np.testing.assert_almost_equal(spec / spec, spec2 / spec)\n np.testing.assert_almost_equal(jackknife / jackknife,\n jackknife2 / jackknife, 5)\n np.testing.assert_almost_equal(fstatistics / fstatistics,\n fstatistics2 / fstatistics, 5)", "def test_filter_sff_file(self):\r\n\r\n try:\r\n fh = open(self.tiny_test)\r\n except IOError:\r\n self.fail(\r\n \"Could not open test file %s. Skipping test\" %\r\n self.tiny_test)\r\n\r\n # With no filters all flowgram should be in out file\r\n flowgrams, header = lazy_parse_sff_handle(fh)\r\n filter_list = []\r\n fd, out_file_name = mkstemp(\r\n prefix=\"test_filter_sff_file\",\r\n suffix=\".sff.txt\")\r\n close(fd)\r\n out_fh = open(out_file_name, \"w\")\r\n l = filter_sff_file(flowgrams, header, filter_list, out_fh)\r\n remove(out_file_name)\r\n fh.close()\r\n self.assertEqual(l, 114)\r\n\r\n # With good filters some should survive\r\n fh = open(self.tiny_test)\r\n flowgrams, header = lazy_parse_sff_handle(fh)\r\n filter_list = [lambda f:within_length(f, 100, 300)]\r\n fd, out_file_name = mkstemp(\r\n prefix=\"test_filter_sff_file\",\r\n suffix=\".sff.txt\")\r\n close(fd)\r\n out_fh = open(out_file_name, \"w\")\r\n l = filter_sff_file(flowgrams, header, filter_list, out_fh)\r\n remove(out_file_name)\r\n fh.close()\r\n self.assertEqual(l, 112)\r\n\r\n # With strong filters nothing should be in\r\n fh = open(self.tiny_test)\r\n flowgrams, header = lazy_parse_sff_handle(fh)\r\n filter_list = [lambda f:within_length(f, 0, 0)]\r\n fd, out_file_name = mkstemp(\r\n prefix=\"test_filter_sff_file\",\r\n suffix=\".sff.txt\")\r\n close(fd)\r\n out_fh = open(out_file_name, \"w\")\r\n l = filter_sff_file(flowgrams, header, filter_list, out_fh)\r\n remove(out_file_name)\r\n self.assertEqual(l, 0)", "def set_sff_trimpoints_with_sfftools(\r\n sff_dir, technical_lengths, sffinfo_path='sffinfo', sfffile_path='sfffile',\r\n debug=False):\r\n if not (exists(sffinfo_path) or which(sffinfo_path)):\r\n raise ApplicationNotFoundError(\r\n 'sffinfo executable not found. Is it installed and in your $PATH?')\r\n if not (exists(sfffile_path) or which(sfffile_path)):\r\n raise ApplicationNotFoundError(\r\n 'sfffile executable not found. Is it installed and in your $PATH?')\r\n\r\n for lib_id, sff_fp in get_per_lib_sff_fps(sff_dir):\r\n try:\r\n readlength = technical_lengths[lib_id]\r\n except KeyError:\r\n continue\r\n\r\n sffinfo_args = [sffinfo_path, '-s', sff_fp]\r\n if debug:\r\n print \"Running sffinfo command %s\" % sffinfo_args\r\n sffinfo_output_file = TemporaryFile()\r\n check_call(sffinfo_args, stdout=sffinfo_output_file)\r\n sffinfo_output_file.seek(0)\r\n\r\n seqlengths = {}\r\n for line in sffinfo_output_file:\r\n if line.startswith('>'):\r\n fields = line[1:].split()\r\n seq_len = fields[1].split('=')[1]\r\n seqlengths[fields[0]] = seq_len\r\n\r\n trim_fp = sff_fp + '.trim'\r\n trim_file = open(trim_fp, 'w')\r\n for id_, length in seqlengths.items():\r\n curr_length = int(seqlengths[id_])\r\n # Sfftools use 1-based index\r\n left_trim = readlength + 1\r\n # Key sequence not included in FASTA length\r\n right_trim = curr_length + 4\r\n if curr_length > left_trim:\r\n trim_file.write(\r\n \"%s\\t%s\\t%s\\n\" % (id_, left_trim, right_trim))\r\n else:\r\n stderr.write(\r\n 'Rejected read %s with trim points %s and %s (orig '\r\n 'length %s)' % (id_, left_trim, curr_length, length))\r\n trim_file.close()\r\n\r\n trimmed_sff_fp = sff_fp + '.trimmed'\r\n sfffile_args = [\r\n sfffile_path, '-t', trim_fp, '-o', trimmed_sff_fp, sff_fp]\r\n if debug:\r\n print \"Running sfffile command:\", sfffile_args\r\n check_call(sfffile_args, stdout=open(devnull, 'w'))\r\n remove(sff_fp)\r\n rename(trimmed_sff_fp, sff_fp)", "def HPF(filename,cutoff,Q=1,wout=True,plot=True):\n start=time.time()\n n, data, data_dB,sr,ch=inputwav(filename)\n b, a = butter(Q,cutoff/sr,btype='high')\n data_filtered=lfilter(b,a,data,axis=0)\n print('Applying FFT...')\n if wout==True:\n print('Exporting...')\n sf.write(filename[0:len(filename)-4]+'_HPF.wav',data_filtered,sr,'PCM_16')\n if plot==True:\n print('Plotting...')\n py.close()\n w, h = freqz(b,a,worN=16384)\n fig, (ax1, ax2) = py.subplots(nrows=2)\n ax1.semilogx(0.5*sr*w/np.pi,abs(h),'k--')\n ax1.set_title('Fiter Frequency Response')\n ax1.set_xlabel('Frequency (Hz)')\n ax1.set_ylabel('Rel. Amplitude')\n ax1.grid()\n ax1.set_ylim(0,1.1)\n ax1.set_xlim(1,20000)\n ax2.plot(data,'k-',label='Raw data')\n ax2.plot(data_filtered,'m-',lw=1,label='Filtered data')\n ax2.set_xlim(0,10000)\n ax2.set_ylim(-1,1)\n ax2.set_ylabel('Amplitude (Norm Bits)')\n ax2.set_xlabel('Samples')\n ax2.legend(loc=2,frameon=False,ncol=2)\n py.subplots_adjust(hspace=0.35) \n print('Done!')\n end=time.time()\n elapsed=int(1000*(end-start))\n print('...............................')\n print('Completed in '+str(elapsed)+' milliseconds.')\n return data_filtered", "def FFT_brickwallBR(filename,start,stop,wout=True,plot=True):\n n, data, data_dB,sr,ch=inputwav(filename)\n print('Applying FFT...')\n yfreq=fft(data,axis=0)\n xfreq=np.linspace(0,sr/(2.0),n//2)\n yfreqBHPF=np.zeros((n,ch),dtype=complex)\n yfreqBHPF[0:n,:]=yfreq\n print('Applying brickwall centered at '+str((start+stop)/2)+' Hz...')\n yfreqBHPF[np.searchsorted(xfreq,start):np.searchsorted(xfreq,stop),:]=0.00001\n data_filtered=(ifft(yfreqBHPF,axis=0))\n if wout==True:\n print('Exporting...')\n sf.write(filename[0:len(filename)-4]+'_brickwallHPF.wav',data_filtered.real,sr,'PCM_16')\n if plot==True:\n print('Plotting...')\n py.close()\n fig, (ax1, ax2) = py.subplots(nrows=2)\n ax1.semilogx(xfreq,20*np.log10(abs(yfreq[0:n//2])),'k-',lw=0.5)\n ax1.semilogx(xfreq,20*np.log10(abs(yfreqBHPF[0:n//2,0])),'m-',lw=0.1)\n ax1.set_xlabel('Frequency (Hz)')\n ax1.set_ylabel('Amplitude (dB)')\n ax2.plot(data,'k-')\n ax2.plot(data_filtered,'m-')\n ax2.set_xlim(0,1000)\n # ax2.set_ylim(-1,1)\n ax2.set_ylabel('Amplitude (Norm Bits)')\n ax2.set_xlabel('Samples')\n ax2.legend(loc=2)\n print('Done!')\n return data_filtered" ]
[ "0.6024711", "0.587461", "0.577335", "0.57703745", "0.5720756", "0.5682307", "0.5646616", "0.5549602", "0.5549602", "0.5549602", "0.55184007", "0.55042326", "0.54917777", "0.5479666", "0.54760754", "0.547309", "0.54530597", "0.54324853", "0.5427759", "0.542531", "0.5405512", "0.540514", "0.53970903", "0.5393065", "0.5390705", "0.538965", "0.53874385", "0.538267", "0.5367972", "0.5365353" ]
0.77132446
0
Tries to delete ``filename`` and ignores any error that is raised.
def safe_delete(self, filename): try: os.remove(filename) except OSError: pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def safe_delete(filename):\r\n try:\r\n os.unlink(filename)\r\n except OSError as e:\r\n if e.errno != errno.ENOENT:\r\n raise", "def _delete(filename):\n return os.remove(filename)", "def delete_file(filename):\n if os.path.isfile(filename):\n return os.remove(filename)", "def delete_temp_file(filename):\n try:\n os.remove(filename)\n except OSError as e:\n if e.errno != errno.ENOENT:\n raise e", "def rm_file(filename):\n try:\n os.unlink(filename)\n except FileNotFoundError:\n pass", "def remove( filename ):\n try:\n os.remove( filename )\n except OSError as e:\n if e.errno != errno.ENOENT:\n raise e", "def safely_remove_file(filename):\n\tprint(\"Removing {0}\".format(filename))\n\ttry:\n\t\tos.remove(filename)\n\texcept OSError:\n\t\tprint(\"Unable to remove {0}\".format(filename))\n\tprint(\"File removed.\")", "def silentremove(filename):\n try:\n os.remove(filename)\n except OSError as e:\n if e.errno != errno.ENOENT: # errno.ENOENT : no such file or directory\n logging.debug('Error removing file: ' + filename)\n raise # re-raise exception if a different error occured", "def silent_remove(filename):\n try:\n os.remove(filename)\n except OSError as err:\n if err.errno != errno.ENOENT: # errno.ENOENT = no such file or directory\n raise # re-raise exception if a different error occurred", "def remove_file(filename, fail_ok=True):\n try:\n os.remove(filename)\n except OSError as ose:\n if fail_ok is False:\n raise AssertionError(f\"Cannot remove {filename}: {str(ose)} {ose}\")", "def silent_remove(filename):\n print(\"{:s} will be destroyed if it exists\".format(filename))\n try:\n os.remove(filename)\n except OSError as e:\n if e.errno != errno.ENOENT: # errno.ENOENT = no such file or directory\n raise # re-raise exception if a different error occured", "def delete(self, filename):\n pass", "def delete_file(filename):\n\tprint client.file_delete(filename)", "def remove_file_if_exists(filename):\n try:\n os.remove(filename)\n except OSError as exc:\n if exc.errno != errno.ENOENT: # errno.ENOENT = no such file or directory\n raise", "def delete(self, filename):\n raise NotImplementedError", "def _removeFile(self, filename):\n try:\n #delete the output file\n os.remove(filename)\n except:\n #print (\"Failed to remove the file: \" + filename)\n pass", "def unlink_silent(filename):\n try:\n _os.unlink(filename)\n except OSError, e:\n if e.errno != _errno.ENOENT:\n raise", "def delete(self, filename, **kw):\n\n file_path = os.path.join(self.storage_path, filename)\n\n try:\n os.remove(file_path)\n except OSError:\n pass", "def deleteSingleFile(filename):\n os.popen('rm {}'.format(filename))", "def delete(self, filename):\n try:\n self.storage.delete(filename)\n return {\n 'filename': filename,\n 'message': 'The file was deleted with success'\n }\n except FileNotFoundError:\n abort(404, message='File %s does not exist' % filename)\n except BaseException:\n abort(500, message='Failed to delete the file ' + filename)\n LOGGER.error('A generic exception has occurred.', exc_info=True)", "def no_problem_unlink(file):\n\n try:\n os.unlink(file)\n except:\n # Nothing to delete?... No problem\n pass", "def remove_file(filename, verbose=True):\r\n if verbose:\r\n LOG.info('Deleting file %s' % os.path.relpath(filename, BASE_DIR))\r\n if not os.path.exists(filename):\r\n LOG.warn(\"File does not exist: %s\" % os.path.relpath(filename, BASE_DIR))\r\n else:\r\n os.remove(filename)", "def rm(file_name):\n if os.path.isfile(file_name):\n flag = os.remove(file_name)\n else:\n return \"error\"", "def _delete(self, remote_filename):\n\n file_id = self.get_file_id(remote_filename)\n if file_id is None:\n raise BackendException(\n 'File \"%s\" cannot be deleted: it does not exist' % (\n remote_filename))\n response = self.http_client.put(self.metadata_url + 'trash/' + file_id)\n response.raise_for_status()\n del self.names_to_ids[remote_filename]", "def test_file_deleted(self):\n try:\n with get_temp_file() as (fd, name):\n os.unlink(name)\n except Exception as err:\n self.fail('Failed with exception \"{}\"'.format(err))", "def _delete_binary(self, filename):\n\t\ttry:\n\t\t\tvalidation.required(filename, 'filename')\n\t\texcept errors.ValidationError, ex:\n\t\t\treturn utils.return_deferred_error(ex.value)\n\n\t\tself.log.debug(\"_delete_binary(%s)\" % filename)\n\n\t\t@stack\n\t\tdef do_delete(void):\n\t\t\tif os.path.exists(filename):\n\t\t\t\tself.log.debug(\"file [%s] exists...deleting\" % filename)\n\t\t\t\ttry:\n\t\t\t\t\tos.unlink(filename)\n\t\t\t\texcept Exception, ex:\n\t\t\t\t\tself.log.warning(\"Unable to delete [%s] - %s\" % (filename, ex))\n\t\t\t\t\traise errors.APIError(ex)\n\t\t\telse:\n\t\t\t\tself.log.debug(\"file [%s] doesn't exist...\" % filename)\n\t\t\treturn file\n\n\t\td = Deferred()\n\t\td.addCallback(do_delete)\n\t\td.addCallback(lambda _: (0, _))\n\t\td.addErrback(lambda _: (-1, _.getErrorMessage()))\n\t\td.callback(0)\n\t\treturn d", "def delete_file(filename: str):\n\t\tif filename == \"ALL\":\n\t\t\tfor file in os.listdir(\"data/music/\"):\n\t\t\t\tdeleted = False\n\t\t\t\twhile not deleted:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tos.remove(f\"data/music/{file}\")\n\t\t\t\t\t\tdeleted = True\n\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\tprint(\"Not removed, waiting 1 second...\")\n\t\t\t\t\t\tasyncio.sleep(1)\n\t\telse:\n\t\t\tprint(\"File--: \", filename)", "def delete_file(self, filename):\n if not filename in self.files:\n raise IOError('File %s Not Found' % filename)\n\n for nodename in self.files[filename]:\n node = self.datanodes[nodename]\n node.delete_file(filename)\n del self.files[filename]\n logging.info('file %s deleted' % filename)", "def delete_file(self, filename: str, directory: str = 'gcodes') -> Dict:\n raise NotImplementedError", "def delete(filename):\n storeapps = APP.config[\"storage\"]\n extension = os.path.basename(filename).split(\".\")[-1].upper()\n dirname = \".\".join(os.path.basename(filename).split(\".\")[:-1])\n directory = os.path.join(storeapps, extension, dirname)\n\n try:\n directory = directory.encode(\"utf-8\")\n except UnicodeDecodeError:\n pass\n\n if os.path.isdir(directory):\n shutil.rmtree(directory)\n if os.path.isdir(directory):\n return \"Unable to remove application (check server logs): %s\" % (filename), 500\n return \"Removed: %s\" % (filename), 200\n\n return \"File not found: %s\" % (filename), 404" ]
[ "0.8534043", "0.8300577", "0.82324356", "0.81359273", "0.8114149", "0.8065215", "0.7914509", "0.78162354", "0.78063285", "0.7795391", "0.77448577", "0.769363", "0.7657995", "0.7628127", "0.76126134", "0.75135005", "0.74589443", "0.7451056", "0.7368061", "0.73465884", "0.72446775", "0.711326", "0.7058171", "0.70345336", "0.70337814", "0.70135236", "0.701345", "0.7008649", "0.69824517", "0.6956453" ]
0.84430987
1
Generates the API documentation for all of the packages/modules/classes/functions. Sphinx doesn't automatically generate the documentation for the api. This calls sphinxapidoc which will create the API .rst files and dump them in the source directory. It is expected that one of the TOC directives calls out to the created API directory.
def generate_api_docs(self): if self.API_OUTPUT_DIR: args = [ # Put documentation for each module on its own page '-e', # don't create the "modules.rst" file (the table of contents # file) as this is already provided by the package's main rst # file. '-T', # Overwrite existing files '--force', '-o', self.API_OUTPUT_DIR, # the package to generate docs from self.PROJECT_DIR ] excludes = [ os.path.join(self.PROJECT_DIR, p) if not os.path.isabs(p) else p for p in self.API_EXCLUDE_DIRS ] apidoc.main(args + excludes)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_documentation(self):\n self.generate_api_docs()\n build.main([\n self.SOURCE_DIR,\n self.BUILD_DIR,\n ])", "def write_api_docs(self, outdir):\r\n if not os.path.exists(outdir):\r\n os.mkdir(outdir)\r\n # compose list of modules\r\n modules = self.discover_modules()\r\n self.write_modules_api(modules,outdir)", "def generate_html(repo_dir, package_dir, module):\n apidir = os.path.join(repo_dir, 'doc', 'api')\n print(f\"Generating {module} API docs in {apidir!r}\")\n if subprocess.call(['sphinx-apidoc', '-Tef', '-o', apidir,\n os.path.join(package_dir, module),\n os.path.join(package_dir, module, 'test'),\n os.path.join(package_dir, module, 'scripts')]):\n raise RuntimeError(f'API doc generation failed for {module}')", "def generate_apidoc_patches(self):\n base_path = self.paths[\"api_doc_dir\"]\n from django_swagger_utils.core.utils.mk_dirs import MkDirs\n MkDirs().mk_dir_if_not_exits(file_name=base_path + \"/\")\n\n from django_swagger_utils.apidoc_gen.generators.patch_generator import PatchGenerator\n\n patch_generator = PatchGenerator(self.app_name, self.parser, self.paths, base_path)\n # generating api docs\n patch_generator.generate_json_patch()", "def doc(self):\n from distutils.dir_util import copy_tree\n\n def copy_tree_checker(src, dst):\n \"\"\"Wrap copy_tree to avoid pydoit error.\"\"\"\n copy_tree(src, dst)\n return True\n\n return {\n \"actions\": [\n (create_dir, [\"build/doc/source\"]),\n (copy_tree_checker, [\"docs\", \"build/doc/source\"]),\n TaskCreator.get_sphinx() + \"-apidoc -o build/doc/source --force --separate --module-first \" + self.project_name_sc,\n TaskCreator.get_sphinx() + \"-build -j auto -n build/doc/source build/doc/html\"\n ],\n \"verbosity\": 2\n }", "def apiDocs():\n\treturn render_template('apiDocs.html')", "def main():\n # We know that qidoc build will set the correct cwd\n qibuild_dir = \"..\"\n qibuild_dir = os.path.abspath(qibuild_dir)\n this_file = __file__\n this_dir = os.path.dirname(this_file)\n cmake_api = os.path.join(this_dir, \"../source/advanced/cmake/api\")\n cmake_api = os.path.abspath(cmake_api)\n if not os.path.exists(cmake_api):\n os.makedirs(cmake_api)\n qibuild_cmake = os.path.join(qibuild_dir, \"cmake\", \"qibuild\")\n for filename in DOCUMENTED_FILES:\n cmake_file = os.path.join(qibuild_cmake, filename + \".cmake\")\n rst_file = os.path.join(cmake_api, filename + \".rst\")\n gen_cmake_doc(cmake_file, rst_file)", "def build_python_api_main(outputdir, components):\n mainrst_filename = Path(outputdir, 'index.rst')\n # list documented (python) packages\n docpython_dir = Path(outputdir, 'python')\n packages = [f for f in docpython_dir.glob('*')]\n packages = [p.name for p in packages]\n # (ugly) trick to print components in the expected order.\n pack = {}\n for p in components:\n for pname in packages:\n if pname.count(p) > 0:\n pack[pname] = components[p]\n packages = [p[0] for p in sorted(pack.items(), key=operator.itemgetter(1))] \n\n if len(packages) > 0:\n with open(mainrst_filename, 'a') as f:\n # label = '.. _siconos_python_reference:\\n\\n\\n'\n title = 'Siconos Python API reference'\n title += '\\n' + len(title) * '#' + '\\n\\n'\n title += 'This is the documentation of '\n title += '`python <https://www.python.org/>`_ '\n title += 'interface to Siconos.\\n\\n\\n'\n header = '.. toctree::\\n :maxdepth:3\\n\\n'\n f.write(title)\n f.write(header)\n for p in packages:\n if p in modules_docs:\n title = p.replace('_','.') + ': ' + modules_docs[p]\n directive = title + ' <python/' + p + '/autodoc>\\n'\n else:\n directive = 'python/' + p + '/autodoc\\n\\n'\n directive = textwrap.indent(directive, ' ')\n f.write(directive)\n f.write('\\n')", "def docs():\n sh('sphinx-build -W -b html docs docs/_build/html')", "def generate_all_api_documents(\n directory_uri=DIRECTORY_URI,\n doc_destination_dir=BASE,\n artifact_destination_dir=DISCOVERY_DOC_DIR,\n):\n api_directory = collections.defaultdict(list)\n http = build_http()\n resp, content = http.request(directory_uri)\n if resp.status == 200:\n directory = json.loads(content)[\"items\"]\n for api in directory:\n document_api(\n api[\"name\"],\n api[\"version\"],\n api[\"discoveryRestUrl\"],\n doc_destination_dir,\n artifact_destination_dir,\n )\n api_directory[api[\"name\"]].append(api[\"version\"])\n\n # sort by api name and version number\n for api in api_directory:\n api_directory[api] = sorted(api_directory[api])\n api_directory = collections.OrderedDict(\n sorted(api_directory.items(), key=lambda x: x[0])\n )\n\n markdown = []\n for api, versions in api_directory.items():\n markdown.append(\"## %s\" % api)\n for version in versions:\n markdown.append(\n \"* [%s](http://googleapis.github.io/google-api-python-client/docs/dyn/%s_%s.html)\"\n % (version, api, safe_version(version))\n )\n markdown.append(\"\\n\")\n\n with open(doc_destination_dir / \"index.md\", \"w\") as f:\n markdown = \"\\n\".join(markdown)\n f.write(markdown)\n\n else:\n sys.exit(\"Failed to load the discovery document.\")", "def generate_all(module, outputpath):\n mod = _get_module(module)\n out = outputpath + \"/\" + module + \"_pyapi.rst\"\n # Generate a summary page for the module's API\n with open(out, \"w\") as rst_file:\n rst_file.write(_generate_module_summary_rst(module))\n # Generate supporting pages for the module\n _generate_rst_for_all_classes(module, outputpath)\n _generate_rst_for_all_enums(module, outputpath)\n _generate_rst_for_all_exceptions(module, outputpath)\n _generate_rst_for_all_functions(module, outputpath)\n\n # Now recurse into any submodule and generate all for them too.\n for submod in iter_modules(mod.__path__):\n if submod.ispkg:\n generate_all(module + \".\" + submod.name, outputpath)", "def build_docs(session):\n envbindir = session.bin\n session.install(\"-e\", \".[all,docs]\")\n with session.chdir(\"docs/\"):\n session.run(\n \"sphinx-autobuild\",\n \"-j\",\n \"auto\",\n \"--open-browser\",\n \"-qT\",\n \".\",\n f\"{envbindir}/../tmp/html\",\n )", "def run_autoapi(app):\n if not app.config.autoapi_dirs:\n raise ExtensionError(\"You must configure an autoapi_dirs setting\")\n\n if app.config.autoapi_include_summaries is not None:\n warnings.warn(\n \"autoapi_include_summaries has been replaced by \"\n \"the show-module-summary AutoAPI option\\n\",\n RemovedInAutoAPI3Warning,\n )\n if app.config.autoapi_include_summaries:\n app.config.autoapi_options.append(\"show-module-summary\")\n\n # Make sure the paths are full\n normalised_dirs = _normalise_autoapi_dirs(app.config.autoapi_dirs, app.srcdir)\n for _dir in normalised_dirs:\n if not os.path.exists(_dir):\n raise ExtensionError(\n f\"AutoAPI Directory `{_dir}` not found. \"\n \"Please check your `autoapi_dirs` setting.\"\n )\n\n normalized_root = os.path.normpath(\n os.path.join(app.srcdir, app.config.autoapi_root)\n )\n url_root = os.path.join(\"/\", app.config.autoapi_root)\n\n template_dir = app.config.autoapi_template_dir\n if template_dir and not os.path.isabs(template_dir):\n if not os.path.isdir(template_dir):\n template_dir = os.path.join(app.srcdir, app.config.autoapi_template_dir)\n elif app.srcdir != os.getcwd():\n warnings.warn(\n \"autoapi_template_dir will be expected to be \"\n \"relative to the Sphinx source directory instead of \"\n \"relative to where sphinx-build is run\\n\",\n RemovedInAutoAPI3Warning,\n )\n sphinx_mapper_obj = PythonSphinxMapper(\n app, template_dir=template_dir, url_root=url_root\n )\n\n if app.config.autoapi_file_patterns:\n file_patterns = app.config.autoapi_file_patterns\n else:\n file_patterns = _DEFAULT_FILE_PATTERNS\n\n if app.config.autoapi_ignore:\n ignore_patterns = app.config.autoapi_ignore\n else:\n ignore_patterns = _DEFAULT_IGNORE_PATTERNS\n\n if \".rst\" in app.config.source_suffix:\n out_suffix = \".rst\"\n elif \".txt\" in app.config.source_suffix:\n out_suffix = \".txt\"\n else:\n # Fallback to first suffix listed\n out_suffix = next(iter(app.config.source_suffix))\n\n if sphinx_mapper_obj.load(\n patterns=file_patterns, dirs=normalised_dirs, ignore=ignore_patterns\n ):\n sphinx_mapper_obj.map(options=app.config.autoapi_options)\n\n if app.config.autoapi_generate_api_docs:\n sphinx_mapper_obj.output_rst(root=normalized_root, source_suffix=out_suffix)", "def docs(session):\n session.install('-rrequirements-dev.txt')\n session.install('-e', '.')\n run_sphinx(session)", "def generate(self):\n\n # Write Doxyfile\n doxyfile_content = DOXYFILE_TEMPLATE.format(\n name=\"wurfapi\",\n output_path=self.output_path,\n source_path=\" \".join(self.source_paths),\n recursive=\"YES\" if self.recursive else \"NO\",\n extra=\"\",\n )\n\n doxyfile_path = os.path.join(self.output_path, \"Doxyfile\")\n with open(doxyfile_path, \"w\") as doxyfile:\n\n doxyfile.write(doxyfile_content)\n\n # @todo: Doxygen generates a bunch of warnings. We should\n # propagate these somehow - if you want to know what\n # has not been documented etc.\n result = self.runner.run(\n command=self.doxygen_executable + \" Doxyfile\", cwd=self.output_path\n )\n\n # Doxygen reports warnings on stderr. So if we have some output\n # there raise it.\n self._suppress_incorrect_warnings(result.stderr)\n\n if result.stderr.output and self.warnings_as_error:\n raise wurfapi.doxygen_error.DoxygenError(result.stderr.output)\n\n # The Doxygen XML is written to the 'xml' subfolder of the\n # output directory\n return os.path.join(self.output_path, \"xml\")", "def run():\n build_no_documentation()\n build_sphinx_build()\n #build_sphinx_pdf()\n build_graphviz_files()", "def generate_patch_build(self, domain):\n # TODO change name of def\n base_path = self.paths[\"api_doc_dir\"]\n self.generate_apidoc_patches()\n from django_swagger_utils.apidoc_gen.generators.patch_generator import PatchGenerator\n patch_generator = PatchGenerator(self.app_name, self.parser, self.paths, base_path)\n patch_generator.filter_for_deleted_apis()\n\n process = subprocess.Popen(['which', 'apidoc'], stdout=subprocess.PIPE)\n\n output = process.communicate()[0]\n if output:\n\n with open(self.paths[\"base_dir\"] + \"/apidoc.json\", 'w') as outfile:\n apidoc_content = {\"url\": \"https://ib-backend-dev.apigateway.in\",\n \"version\": \"0.0.1\",\n \"description\": \"\",\n \"name\": \"iBHubs_backend API Documentation\",\n \"title\": \"iBHubs_backend Documenation\"}\n json.dump(apidoc_content, outfile, indent=4)\n # by default we assume user is working at no specific branch so we fix\n # url to default above url as above , then we check if any specific parametr is given\n # and replace url with required url\n if domain != '' and domain:\n with open(self.paths[\"apidoc\"]) as src_json:\n apidoc_content = json.load(src_json)\n apidoc_content['url'] = \"https://\" + domain\n with open(self.paths[\"apidoc\"], 'w') as outfile:\n json.dump(apidoc_content, outfile, indent=4)\n try:\n os.mkdir(\"docs\")\n except OSError:\n pass\n # the below command is responsible for creating docs\n process = subprocess.Popen(['apidoc', '-i', self.base_dir,\n '-o', os.path.join(self.base_dir, 'docs'),\n '-e', 'django_swagger_utils/*',\n '-e', 'static/*',\n ], stdout=subprocess.PIPE)\n print process.communicate()[0]\n ################################################\n # hosting apidoc\n ################################################\n # obtaining the path of static folder of django-swagger-utils\n # django_swagger_utils_path = os.path.abspath(os.path.join(os.path.dirname(__file__), \"../..\"))\n # static_folder_path = os.path.join(django_swagger_utils_path, \"static\")\n # import shutil\n # # create a folder apidoc , delete if previously exists\n # if os.path.exists(os.path.join(static_folder_path, \"apidoc\")):\n # shutil.rmtree(os.path.join(static_folder_path, \"apidoc\"))\n # apidoc_path = os.path.join(static_folder_path, \"apidoc\")\n #\n # os.mkdir(apidoc_path)\n\n # from distutils.dir_util import copy_tree\n # copydocs from docs to apidoc in swagger utils\n # try:\n # copy_tree(os.path.join(self.base_dir, 'docs'), apidoc_path)\n # except Exception as err:\n # print err\n\n # browse to localhost:<port>/static/apidoc/index.html\n\n else:\n raise CommandError(\"Help: Install apidoc: [ sudo npm install -g apidoc ]\")", "def generate_docs(root_dir, session):\n ...", "def generate(self, api):\n for namespace in api.namespaces.values():\n # One module per namespace is created. The module takes the name\n # of the namespace.\n with self.output_to_relative_path('{}.py'.format(namespace.name)):\n self._generate_namespace_module(namespace)", "def main():\n\n return redirect('/apidocs')", "def buildDocumentation():\n helptext = 'usage: build_doc.py <output format> <type of documentation>' \\\n '\\n - html: for html output' \\\n '\\n - pdf: for pdf output' \\\n '\\n\\n - all: complete documentation' \\\n '\\n - dev: only developer documentation' \\\n '\\n - user: only user documentation'\n if len(sys.argv) != 3:\n print helptext\n sys.exit(1)\n\n if sys.argv[1] not in ['pdf', 'html']:\n print helptext\n sys.exit(1)\n if sys.argv[2] not in ['all', 'dev', 'user']:\n print helptext\n sys.exit(1)\n\n copyfile('docs/index_%s.rst.template' % sys.argv[2], 'index.rst') # copy main file into root directory\n os.system('sphinx-build -b %s -c docs -D master_doc=index . docs/output/%s/%s' % (sys.argv[1], sys.argv[1], sys.argv[2]))\n os.remove('index.rst') # delete config file from root directory", "def generate_docs(self) -> List[Path]:\n outputs = []\n for file in self.files:\n if (stem := file.stem) == \"__init__\":\n # We might have more than one __init__.py file depending on package structure and these files shouldn't\n # contain methods, so we don't want to convert them\n continue\n\n if not (doc := get_doc(file)):\n continue # No docstring returned, skip this file\n doc = doc[33:] # First 33 characters are not required for our docs\n\n # Write the output we've generated to a file\n (output := self.directory / f\"{stem}.md\").write_text(generate_header(stem) + doc)\n outputs.append(output)\n return outputs", "def docs(command, warn_is_error=False, options=\"\"):\n print(\n \"\"\"\nRunning Sphinx to test the docs building\n========================================\n\"\"\"\n )\n o = \"-W \" if warn_is_error else \"\"\n if \"-W\" in options:\n options = options.replace(\"-W\", \"\")\n options = options + \" \" + o\n shutil.rmtree(\"docs/_build\", ignore_errors=True)\n shutil.rmtree(\"docs/api\", ignore_errors=True)\n shutil.rmtree(\"docs/code_reference/api\", ignore_errors=True)\n shutil.rmtree(\"docs/jupyter_execute\", ignore_errors=True)\n shutil.rmtree(\"docs/examples/default_config.yaml\", ignore_errors=True)\n command.run(\"python -m boa.config --output-path docs/examples/default_config.yaml\", echo=True, pty=POSIX)\n command.run(f\"sphinx-build {options} -b html docs docs/_build\", echo=True, pty=POSIX)", "def _write_member_documentation_pages(\n documenter: sphinx.ext.autodoc.Documenter):\n for entry in _get_documenter_members(documenter):\n if entry.is_inherited:\n continue\n if (entry.overload and entry.overload.overload_id and\n re.fullmatch('[0-9]+', entry.overload.overload_id)):\n logger.warning('Unspecified overload id: %s', entry.object_name)\n member_rst_path = os.path.join(documenter.env.app.srcdir, 'python', 'api',\n entry.page_name + '.rst')\n objtype = entry.documenter.objtype\n member_content = ''\n if objtype == 'class':\n member_content += ':duplicate-local-toc:\\n\\n'\n member_content += sphinx_utils.format_directive(\n 'tensorstore-python-apidoc',\n options=dict(\n fullname=entry.full_name,\n objtype=objtype,\n importname=entry.import_name,\n objectdescription=True,\n subscript=entry.subscript,\n overload=cast(ParsedOverload, entry.overload).overload_id,\n ),\n )\n pathlib.Path(member_rst_path).write_text(member_content)\n _write_member_documentation_pages(entry.documenter)", "def main(*, build, subdir, description, supports_modules=False,\n supports_quick=False):\n parser = argparse.ArgumentParser(description=description)\n group = parser.add_mutually_exclusive_group()\n group.add_argument(\n \"--serve\", action='store_true',\n help=\"Serve the documentation on the given PORT for easy preview.\")\n group.add_argument(\n \"--out_dir\", type=str, metavar=\"DIR\",\n help=\"Generate the documentation to the given output directory.\"\n \" The DIR must be an absolute path.\"\n \" If DIR already exists, then it must be empty.\"\n \" (For regression testing, the DIR can be the magic value <test>,\"\n \" in which case a $TEST_TMPDIR subdir will be used.)\")\n parser.add_argument(\n \"--port\", type=int, metavar=\"PORT\", default=8000,\n help=\"Use a non-default PORT when serving for preview.\")\n parser.add_argument(\n \"--verbose\", action=\"store_true\",\n help=\"Echo detailed commands, progress, etc. to the console\")\n if supports_modules:\n parser.add_argument(\n \"module\", nargs=\"*\",\n help=\"Limit the generated documentation to only these modules and \"\n \"their children. When none are provided, all will be generated. \"\n \"For example, specify drake.math or drake/math for the C++ \"\n \"module, or pydrake.math or pydrake/math for the Python module.\")\n if supports_quick:\n parser.add_argument(\n \"--quick\", action=\"store_true\", default=False,\n help=\"Omit from the output items that are slow to generate. \"\n \"This yields a faster preview, but the output will be incomplete.\")\n args = parser.parse_args()\n if args.verbose:\n global _verbose\n _verbose = True\n curried_build = build\n if supports_modules:\n canonicalized_modules = [\n x.replace('/', '.')\n for x in args.module\n ]\n curried_build = functools.partial(\n curried_build, modules=canonicalized_modules)\n if supports_quick:\n curried_build = functools.partial(\n curried_build, quick=args.quick)\n if args.out_dir is None:\n assert args.serve\n _do_preview(build=curried_build, subdir=subdir, port=args.port)\n else:\n _do_generate(build=curried_build, out_dir=args.out_dir,\n on_error=parser.error)", "def build_docs(open_docs):\n python_call(\"pip\", [\"install\", \"src/[docs]\"])\n python_call(\"pip\", [\"install\", \"-r\", \"src/requirements.txt\"])\n python_call(\n \"ipykernel\", [\"install\", \"--user\", \"--name=za_covid_map\"]\n )\n shutil.rmtree(\"docs/build\", ignore_errors=True)\n call(\n [\n \"sphinx-apidoc\",\n \"--module-first\",\n \"-o\",\n \"docs/source\",\n \"src/za_covid_map\",\n ]\n )\n call([\"sphinx-build\", \"-M\", \"html\", \"docs/source\", \"docs/build\", \"-a\"])\n if open_docs:\n docs_page = (Path.cwd() / \"docs\" / \"build\" / \"html\" / \"index.html\").as_uri()\n secho(\"Opening {}\".format(docs_page))\n webbrowser.open(docs_page)", "def do_docs(self, path):\n print(\"scaraping documentation\")\n for p in path.glob(\"**/*\"):\n if p.is_file():\n parts = p.relative_to(path).parts\n if parts[-1].endswith(\"rst\"):\n data = tsparse(p.read_bytes())\n blob = DocBlob()\n blob.arbitrary = data\n blob.content = {}\n\n blob.ordered_sections = []\n blob.item_file = None\n blob.item_line = None\n blob.item_type = None\n blob.aliases = []\n blob.example_section_data = Section()\n blob.see_also = []\n blob.signature = None\n blob.references = None\n blob.refs = []\n\n self.docs[parts] = json.dumps(blob.to_json(), indent=2)\n else:\n pass\n # data = p.read_bytes()", "def _generate_rst_files_for_modules() -> None:\n docs_dir = os.path.abspath(os.path.dirname(__file__))\n module_rst_save_dir = os.path.join(docs_dir, 'api_reference')\n # gather up modules to generate rst files for\n document_modules = _modules_to_rst()\n\n # rip out types that are duplicated in top-level composer module\n composer_imported_types = []\n for name in composer.__all__:\n obj = composer.__dict__[name]\n if not isinstance(obj, types.ModuleType):\n composer_imported_types.append(obj)\n\n document_modules = sorted(document_modules, key=lambda x: x.__name__)\n os.makedirs(module_rst_save_dir, exist_ok=True)\n for module in document_modules:\n saveas = os.path.join(module_rst_save_dir, f'{module.__name__}.rst')\n print(f'Generating rst file {saveas} for module: {module.__name__}')\n\n # avoid duplicate entries in docs. We add torch's _LRScheduler to\n # types, so we get a ``WARNING: duplicate object description`` if we\n # don't exclude it\n exclude_members = [torch.optim.lr_scheduler._LRScheduler]\n if module is not composer:\n exclude_members += composer_imported_types\n\n content = _auto_rst_for_module(module, exclude_members=exclude_members)\n\n with open(saveas, 'w') as f:\n f.write(content)", "def html():\n builtdocs = path(\"docs\") / options.sphinx.builddir / \"html\"\n destdir = path(PACKAGE) / \"docs\"\n destdir.rmtree()\n builtdocs.move(destdir)", "def run(self):\n # Loading yaml\n self.load_yaml()\n\n # Print paths from schema\n section_title = '**API Paths**'\n self.write(section_title)\n self.write('=' * len(section_title))\n self.print_paths()\n\n # Print models\n section_title = '**Schemas Description**'\n self.write(section_title)\n self.write('=' * len(section_title))\n self.print_schemas()\n\n # Render by sphinx\n node = nodes.section()\n node.document = self.state.document\n nested_parse_with_titles(self.state, self.__view_list, node)\n return node.children" ]
[ "0.8099668", "0.79043376", "0.75185376", "0.73260605", "0.7320983", "0.71930486", "0.7081947", "0.70498407", "0.69733393", "0.697248", "0.68476886", "0.6820365", "0.6783848", "0.6733729", "0.6585713", "0.657623", "0.6568642", "0.656522", "0.6563635", "0.6527915", "0.65139586", "0.650261", "0.64585096", "0.6448331", "0.6401936", "0.6397832", "0.63838387", "0.6368053", "0.6360185", "0.63020295" ]
0.8630611
0
Attempts to clean all of the files found in ``self.FILES_TO_CLEAN``. Ignores all errors.
def try_clean(self): for f in self.FILES_TO_CLEAN: if not os.path.exists(f): continue if os.path.isdir(f): # don't care on error shutil.rmtree(f, onerror=lambda *x, **y: None) else: self.safe_delete(f)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clean():\n clean_files()", "def cleanup_files(self):\n\n self.backup_files()\n self.delete_files()", "def clean(self):\n if self.verbosity:\n self.header(\"Cleaning data files\")\n\n tsv_list = os.listdir(self.tsv_dir)\n\n if self.resume_mode:\n # get finished clean command logs of last update\n prev_cleaned = [\n x.file_name + '.TSV'\n for x in self.log_record.called.filter(\n command='cleancalaccessrawfile',\n finish_datetime__isnull=False\n )\n ]\n self.log(\"{} files already cleaned.\".format(len(prev_cleaned)))\n # remove these from tsv_list\n tsv_list = [x for x in tsv_list if x not in prev_cleaned]\n\n # Loop through all the files in the source directory\n if self.verbosity:\n tsv_list = progress.bar(tsv_list)\n for name in tsv_list:\n call_command(\n \"cleancalaccessrawfile\",\n name,\n verbosity=self.verbosity,\n keep_files=self.keep_files,\n )", "def cleanUp(self):\r\n remove_files(self._db_files_to_remove, error_on_missing=False)", "def _cleanup_files(self):\n\n for root, dirs, files in os.walk(self.build_directory):\n dirs_to_delete = [\n Path(root).joinpath(x) for x in dirs if x == '__pycache__'\n ]\n files_to_delete = [\n Path(root).joinpath(x) for x in files if Path(x).suffix == '.pyc'\n ]\n for d in dirs_to_delete:\n logger.info('Deleting: %s', d)\n shutil.rmtree(d)\n for f in files_to_delete:\n logger.info('Deleting: %s', f)\n f.unlink()", "def clean_files(self):\n self.filenames.clear()", "def _clean_files(self, in_subdirectory=False):\n files = self._file_explorer.ls()\n if not in_subdirectory:\n LOG.info(f\"Cleaning {len(files)} file(s) on the device\")\n for file_ in files:\n try:\n self._file_explorer.rm(file_)\n except Exception as e:\n # Try to explore subdirectory\n LOG.info(f\"Attempting to clean directory {file_}\")\n self._file_explorer.cd(file_)\n self._clean_files(in_subdirectory=True)\n if in_subdirectory:\n self._file_explorer.cd('..')\n else:\n LOG.info(\"Done cleaning FS\")", "def clean_data():\n for clean_file in clean_files:\n file_list = [f for f in os.listdir(\".\") if f.endswith(clean_file)]\n for f in file_list:\n os.remove(f)", "def clean(files):\n\tfor file in files:\n\t\ttry:\n\t\t\tos.remove(file)\n\t\texcept Exception as e:\n\t\t\tprint(e)", "def run(self):\n super(CleanUp, self).run()\n\n for dir_ in CleanUp.CLEANFOLDERS:\n if exists(dir_):\n print(\"Removing: {}\".format(dir_))\n if not self.dry_run and exists(dir_):\n rmtree(dir_)\n\n for dir_ in CleanUp.CLEANFOLDERSRECURSIVE:\n for pdir in self.dfind(dir_, \".\"):\n print(\"Remove folder {}\".format(pdir))\n rmtree(pdir)\n\n for fil_ in CleanUp.CLEANFILESRECURSIVE:\n for pfil in self.ffind(fil_, \".\"):\n print(\"Remove file {}\".format(pfil))\n os.unlink(pfil)", "def cleanup(self):\n\n # check if the directory exists\n if not os.path.exists(self.path):\n return\n\n # check if the directory is a directory\n if not os.path.isdir(self.path):\n return\n\n # loop over content of directory and remove it\n for the_file in os.listdir(self.path):\n file_path = os.path.join(self.path, the_file)\n try:\n if os.path.isfile(file_path):\n os.unlink(file_path)\n elif os.path.isdir(file_path):\n shutil.rmtree(file_path)\n except Exception as e:\n pass", "def clean(self):\n files = ['CHG', 'CHGCAR', 'POSCAR', 'INCAR', 'CONTCAR',\n 'DOSCAR', 'EIGENVAL', 'IBZKPT', 'KPOINTS', 'OSZICAR',\n 'OUTCAR', 'PCDAT', 'POTCAR', 'vasprun.xml',\n 'WAVECAR', 'XDATCAR', 'PROCAR', 'ase-sort.dat',\n 'LOCPOT', 'AECCAR0', 'AECCAR1', 'AECCAR2',\n 'WAVECAR.GTO', 'vasp.out', 'vasp.err']\n for f in files:\n try:\n os.remove(f)\n except OSError:\n pass", "def clean(self):\n cleaning_targets = [\n (self.build_dir, ['.ba', '.bo']),\n (self.sim_dir, ['.cxx', '.h', '.o']),\n (self.verilog_dir, ['.v']),\n (self.info_dir, [])]\n # This function should delete:\n # *.ba, *.bo from build_dir\n # *.cxx, *.h, *.o from sim_dir\n # *.v from verilog_dir\n # ? from info_dir\n # sim_exe\n for path, extensions in cleaning_targets:\n for name in os.listdir(path):\n if os.path.splitext(name)[1].lower() in extensions:\n os.remove(os.path.join(path, name))\n try:\n os.rmdir(path)\n except OSError:\n # ignore errors\n pass\n try:\n os.remove(self.sim_exe)\n except OSError:\n # ignore errors\n pass", "def clean(self):\n original_dir = os.getcwd()\n os.chdir(self.output)\n\n # Clear out directory\n file_list = os.listdir(self.output)\n\n for afile in file_list:\n if not afile.endswith('.gitignore'):\n path = os.path.join(self.output, afile)\n if os.path.isdir(path):\n rmtree(path)\n else:\n os.remove(path)\n os.chdir(original_dir)", "def _clean_workdir(self):\n\t\ttoremove = [self._get_config_filepath(), self._get_params_filepath(), self._get_conv_filepath(), self._get_psf_filepath()]\n\t\tfor filepath in toremove:\n\t\t\tif os.path.exists(filepath):\t\n\t\t\t\tlogger.debug(\"Removing existing file %s...\" % (filepath))\n\t\t\t\tos.remove(filepath)", "def cleanup(self):\r\n for f in [i for d in self.data.values() for i in d[\"filenames\"]]:\r\n try:\r\n os.unlink(f)\r\n except Exception: pass\r\n self.Destroy()", "def _clean_files(self):\n if self.delfiles & 1:\n ProcUtils.remove(self.okm)\n if self.delfiles & 2:\n ProcUtils.remove(self.hkm)\n if self.delfiles & 4:\n ProcUtils.remove(self.qkm)\n if self.delfiles & 8:\n ProcUtils.remove(self.obc)\n\n if self.log is False:\n ProcUtils.remove(self.pcf_file)\n base = os.path.basename(self.okm)\n ProcUtils.remove(os.path.join(self.dirs['run'],\n '.'.join(['LogReport', base])))\n ProcUtils.remove(os.path.join(self.dirs['run'],\n '.'.join(['LogStatus', base])))\n ProcUtils.remove(os.path.join(self.dirs['run'],\n '.'.join(['LogUser', base])))", "def _remove_files(self):\n if hasattr(self, 'files'):\n for file in self.files:\n if os.path.exists(file):\n os.remove(file)\n\n self._remove_changes()\n self._remove_temporary_files()", "def doCleanups(self):\r\n result = self._resultForDoCleanups\r\n ok = True\r\n while self._cleanups:\r\n function, args, kwargs = self._cleanups.pop(-1)\r\n try:\r\n function(*args, **kwargs)\r\n except Exception:\r\n ok = False\r\n result.addError(self, sys.exc_info())\r\n return ok", "def do_clean(VERBOSE=0):\r\n thisdir = os.getcwd()\r\n tempdir = \"TEMP\"\r\n\r\n if os.path.isdir(os.path.join(thisdir, \"TEMP\")):\r\n tempdir = os.path.join(thisdir, \"TEMP\")\r\n elif os.path.isdir(os.path.join(thisdir, \"..\", \"TEMP\")):\r\n tempdir = os.path.join(thisdir, \"..\", \"TEMP\")\r\n\r\n if os.path.isdir(tempdir):\r\n os.chdir(tempdir)\r\n if VERBOSE > 0:\r\n print \" cleaning temporary directory '%s'\" % tempdir\r\n filestogo = glob.glob(\"*.*\")\r\n gonecount = 0\r\n for fg in filestogo:\r\n try:\r\n os.remove(fg)\r\n gonecount = gonecount +1\r\n except:\r\n if VERBOSE > 0:\r\n print \" !!! COULDN@T DELETE FILE '%s'\" % fg\r\n else:\r\n pass\r\n if VERBOSE > 0:\r\n print \" Deleted %s files\" % gonecount\r\n print\r\n os.chdir(thisdir)", "def clean_outputs(self) -> None:\n\n def _delete_if_not_none(fn: Optional[str]) -> None:\n if fn is not None:\n Path(fn).unlink()\n\n _delete_if_not_none(self.config[\"LOG_FILE\"])\n\n for file_ in self.exporter.get_all_files():\n file_.unlink()", "def finalize(self):\n if not self.inputs.clean_workdir:\n return\n cleaned_calcs = []\n for calculation in self.ctx.calculations:\n try:\n # noinspection PyProtectedMember\n calculation.outputs.remote_folder._clean()\n cleaned_calcs.append(calculation)\n except ValueError as ex:\n self.logger.warning(\"Exception caught while cleaning remote folders: {}\".format(ex))\n if cleaned_calcs:\n self.report('Cleaned remote folders of calculations: {}'.format(' '.join(map(str, cleaned_calcs))))", "def cleanup(self, job_dirs=None):\n if not self.args.cleanup:\n return\n self.args.logger.info(\"Begin intermediate file cleanup\")\n # Remove intermediate working directory files\n filetypes = [constants.INPUT_FEATURES_FILENAME.format(self.args.output_dir, \"*\"),\n constants.OUTPUT_FEATURES_FILENAME.format(self.args.output_dir, \"*\"),\n constants.RESULTS_FILENAME.format(self.args.output_dir, \"*\")]\n for filetype in filetypes:\n for filename in glob.glob(filetype):\n try:\n os.remove(filename)\n except OSError as error:\n self.args.logger.warning(f\"Cleanup: unable to remove {filename}: {error}\")\n if job_dirs: # Remove condor job directories\n for job_dir in job_dirs:\n try:\n shutil.rmtree(job_dir)\n except OSError as error:\n self.args.logger.warning(f\"Cleanup: unable to remove {job_dir}: {error}\")\n self.args.logger.info(\"End intermediate file cleanup\")", "def clean(ctx):\n header(clean.__doc__)\n with ctx.cd(ROOT):\n for pattern in CLEAN_PATTERNS:\n info(\"Removing {0}\", pattern)\n ctx.run(\"rm -rf {0}\".format(pattern))", "def cleanUp(self):\n self.dirMonitor.stop()\n self.filesList.cleanUp()", "def clean(self):\n print(\"Cleaning outputs in %s\" % self.args.output)\n files = glob.glob(self.args.output + \"*.pkl\")\n for f in files:\n if os.path.exists(f):\n os.remove(f)", "def actionClean():\n result = NO_ERROR\n\n Logger.printStartActionMessage(ACTION_CLEAN)\n #Init cleanup logger\n Cleanup.init()\n\n for action in Settings.cleanupOptions['actions']:\n if action == 'cleanOutput':\n for target in Settings.cleanupOptions['targets']:\n for platform in Settings.cleanupOptions['platforms']:\n for cpu in Settings.cleanupOptions['cpus']:\n for configuration in Settings.cleanupOptions['configurations']:\n #Clean up output folders for specific target, platform, cpu and configuration\n result = Cleanup.run(action, target, platform, cpu, configuration)\n else:\n #Perform other cleanup acrions that are not dependent of target ...\n result = Cleanup.run(action)\n if result == NO_ERROR:\n Logger.printEndActionMessage(ACTION_CLEAN)\n else:\n Logger.printEndActionMessage('Cleanup failed!',ColoredFormatter.RED)\n System.stopExecution(result)", "def do_cleanup_cruft(self): # pragma: no cover\n\n for cruft in self.get_cruft():\n cruft.cleanup()\n self.post_cleanup()", "def clean(self):\n \n with current_context() as ctx:\n project_outputs = ctx.get('current.project_outputs')\n if project_outputs is not None:\n if self._project in project_outputs:\n del project_outputs[self._project]\n \n path = self.path\n if os.path.isfile(path):\n args = [self.command, '-f', path, '-t', 'clean', '-g']\n try:\n check_call(args)\n except CalledProcessError as ex:\n return ex.returncode\n self.remove()\n return 0", "def clean(self):\n clean_list = [\n position\n for position in os.listdir()\n if os.path.isfile(position) and not position.startswith(\".\")\n ]\n self.move_files(clean_list)" ]
[ "0.757727", "0.74859875", "0.7146801", "0.7119696", "0.7087362", "0.7058787", "0.7051271", "0.7023708", "0.69623536", "0.6948941", "0.69370013", "0.6927504", "0.6906198", "0.679786", "0.6780564", "0.67707026", "0.67506415", "0.6735639", "0.6723798", "0.66588336", "0.6654853", "0.6628376", "0.66037434", "0.65863776", "0.65527534", "0.6552191", "0.6544146", "0.6533419", "0.65294236", "0.6527092" ]
0.85913986
0
Gathers all command line arguments and then builds the docs. This performs command line parsing and stores the known flags (those added with ``self.add_argument()``) into ``self.args`` and all leftover unknown args into ``self.argv`` (see
def build(self, argv=None): if argv is None: argv = sys.argv self.setup_default_arguments() self.args, self.argv = self.parser.parse_known_args(argv) if self.args.clean: self.try_clean() self.pre_build_hook() self.generate_documentation() self.post_build_hook()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_args(self):\n parser = argparse.ArgumentParser(description='Build PDF documentation')\n parser.add_argument('config', help='YAML config file')\n parser.add_argument('-f', '--fast', help='Do not update toc',\n action='store_true', default=False)\n parser.add_argument('-p', '--pandoc', help='Only pandoc, no latex',\n action='store_true', default=False)\n parser.add_argument('-n', '--nocache', help='Disable cache',\n action='store_true', default=False)\n parser.add_argument('-v', '--verbose', help='Enables verbose output; '\n 'repeat up to three time for more verbose output',\n action='count', default=0)\n\n self.args = parser.parse_args()", "def setup_docs(self):\n for arg in self.args:\n self.log.debug(\"Processing arg %s\" % arg)\n if isinstance(arg, dexy.doc.Doc) or isinstance(arg, dexy.doc.PatternDoc):\n doc = arg\n\n elif isinstance(arg, list):\n if not isinstance(arg[0], basestring):\n raise Exception(\"First arg %s should be a string\" % arg[0])\n if not isinstance(arg[1], dict):\n raise Exception(\"Second arg %s should be a dict\" % arg[1])\n\n if not \"*\" in arg[0]:\n doc = dexy.doc.Doc(arg[0], **arg[1])\n else:\n # This is a pattern doc or real doc TODO better way to verify?\n doc = dexy.doc.PatternDoc(arg[0], **arg[1])\n\n elif isinstance(arg, basestring):\n doc = dexy.doc.PatternDoc(arg)\n\n else:\n raise Exception(\"unknown arg type %s for arg %s\" % (arg.__class__.__name__, arg))\n\n doc.wrapper = self\n doc.setup()\n\n self.docs.append(doc)", "def get_cmd_line_args():\n parser = argparse.ArgumentParser(\n description=DOC, formatter_class=argparse.RawTextHelpFormatter)\n\n # Required arguments\n required = parser.add_argument_group(\"Required arguments\")\n required.add_argument(\n \"--fastq-regex\", required=True, type=is_fastq_regex, metavar=\"<regex>\",\n help=\n \"Snakemake regex used to infer the FASTQ files to process and the \"\n \"related wildcards: {sample} (mandatory), {lane} (optional) and \"\n \"{end} (mandatory if paired-end sequencing), e.g. \"\n \"/path/to/data/{sample}/{sample}_{ignore1}_{lane}_{end}_001.fastq.gz\"\n )\n required.add_argument(\n \"--outdir\", required=True, metavar=\"<dir>\", help=\"Output directory.\"\n )\n required.add_argument(\n \"--ref-build\", required=True, metavar=\"<version>\", help=\n \"Reference genome build, e.g. hg19 or mm10. Assuming the existence of \"\n \"the 3 following files in <--ref-genome-dir>: <ref build>.fa \"\n \"<ref build>.fa.fai and <ref build>.fa.dict\"\n )\n required.add_argument(\n \"--ref-genome-dir\", metavar=\"<dir>\", help=\n \"Bisulfite reference genome directory, including '<ref build>.fa', \"\n \"'<ref build>.fa.fai', '<ref build>.fa.dict' and the \"\n \"'Bisulfite_Genome' directory created by running the \"\n \"'bismark_genome_preparation' script. See README.md documentation.\"\n )\n\n\n # Optional general arguments\n optional = parser.add_argument_group(\"Optional\")\n optional.add_argument(\n \"--single-end\", action=\"store_false\", dest=\"paired_end\", help=\n \"By default paired-end sequencing is assumed, for single-end set this \"\n \"flag.\"\n )\n optional.add_argument(\n \"--rrbs\", action=\"store_true\", help=\n \"For Reduced Representation Bisulfite Sequencing (RRBS) set this flag.\"\n )\n optional.add_argument(\n \"--no-deduplication\", action=\"store_false\",\n dest=\"use_bismark_deduplicate\", help=\n \"Set this flag to not apply Bismark BAM deduplication. The deduplica\"\n \"tion removes reads with similar start/end positions on a given chromo\"\n \"some. It is not a valid PCR correction for RRBS or amplicon data. \"\n \"The deduplication is not applied if the --rrbs flag is set.\"\n )\n optional.add_argument(\n \"--non-directional-library\", action=\"store_false\",\n dest=\"directional_library\", help=\n \"By default the library is assumed to be directional, if not set this \"\n \"flag. See Bismark documentation for more information.\"\n )\n optional.add_argument(\n \"--target-bed\", type=is_file, metavar=\"<path>\", help=\n \"For targeted sequencing, the path to the BED file listing the regions\"\n \" targeted. Used only for read coverage computation. If no BED is pro\"\n \"vided the coverage will be computed on the whole reference genome.\"\n )\n optional.add_argument(\n \"--target-kit\", metavar=\"<name>\", help=\n \"For targeted sequencing, the name of the kit used to target to be \"\n \"reported in the preprocessing report. Does not affect processing.\"\n )\n optional.add_argument(\n \"--phred\", type=int, choices={33, 64}, default=DEFAULT_OF[\"phred\"],\n metavar=\"<33|64>\", help=\n \"Base quality encoding of input FASTQ files: 33|64, by default %i.\"\n % DEFAULT_OF[\"phred\"]\n )\n optional.add_argument(\n \"--r1-id\", default=DEFAULT_OF[\"r1_id\"], metavar=\"<ID>\", help=\n \"Case-insensitive ID used to identify R1 (forward) reads in paired-end\"\n \" sequencing, by default '%s'.\" % DEFAULT_OF[\"r1_id\"]\n )\n optional.add_argument(\n \"--r2-id\", default=DEFAULT_OF[\"r2_id\"], metavar=\"<ID>\", help=\n \"Case-insensitive ID used to identify R2 (reverse) reads in paired-end\"\n \" sequencing, by default '%s'.\" % DEFAULT_OF[\"r2_id\"]\n )\n optional.add_argument(\n \"--read-length\", type=int, metavar=\"<int>\", help=\n \"Length of reads (e.g. 150) to write in the HTML report. \"\n \"Does not affect the processing.\"\n )\n\n # Optional FastQC arguments\n fastqc = parser.add_argument_group(\"FastQC optional\")\n optional.add_argument(\n \"--fastqc-threads\", type=int, metavar=\"<int>\",\n default=DEFAULT_OF[\"fastqc\"][\"threads\"], help=\n \"FastQC '--threads' argument, by default %i.\"\n % DEFAULT_OF[\"fastqc\"][\"threads\"]\n )\n\n # Optional Trim Galore arguments\n trim_galore = parser.add_argument_group(\"Trim Galore optional\")\n ADAPTERS_URL = (\n \"https://support.illumina.com/bulletins/2016/12/what-sequences-do-i\"\n \"-use-for-adapter-trimming.html\")\n trim_galore.add_argument(\n \"--adapter-r1\", metavar=\"<sequence>\", help=\n \"Trim Galore '--adapter' argument: adapter sequence to be trimmed off \"\n \"read 1. Common sequences: %s\" % ADAPTERS_URL\n )\n trim_galore.add_argument(\n \"--adapter-r2\", metavar=\"<sequence>\", help=\n \"Trim Galore '--adapter2' argument: adapter sequence to be trimmed \"\n \"off read 2. Common sequences: %s\" % ADAPTERS_URL\n )\n trim_galore.add_argument(\n \"--quality\", type=int, default=DEFAULT_OF[\"trim_galore\"][\"quality\"],\n metavar=\"<int>\", help=\n \"Trim Galore '--quality' argument, by default %i.\"\n % DEFAULT_OF[\"trim_galore\"][\"quality\"]\n )\n trim_galore.add_argument(\n \"--stringency\", type=int, metavar=\"<int>\",\n default=DEFAULT_OF[\"trim_galore\"][\"stringency\"], help=\n \"Trim Galore '--stringency' argument: overlap with adapter sequence \"\n \"required to trim, by default %i (very stringent).\"\n % DEFAULT_OF[\"trim_galore\"][\"stringency\"]\n )\n trim_galore.add_argument(\n \"--min-length\", type=int, metavar=\"<int>\",\n default=DEFAULT_OF[\"trim_galore\"][\"min_length\"], help=\n \"Trim Galore '--length' argument: minimum read length after trimming \"\n \"otherwise removed, by default %i.\"\n % DEFAULT_OF[\"trim_galore\"][\"min_length\"]\n )\n trim_galore.add_argument(\n \"--error-rate\", type=float, metavar=\"<float>\",\n default=DEFAULT_OF[\"trim_galore\"][\"error_rate\"], help=\n \"Trim Galore '-e' argument: maximum allowed error rate with the \"\n \"matching region, by default {}\"\n .format(DEFAULT_OF[\"trim_galore\"][\"error_rate\"])\n )\n trim_galore.add_argument(\n \"--max-n\", type=int, metavar=\"<int>\", help=\n \"Trim Galore '--max_n' argument: Maximum number of 'N's in a read \"\n \"otherwise removed. By default not applied.\"\n )\n trim_galore.add_argument(\n \"--trim-n\", action=\"store_true\", help=\n \"Trim Galore '--trim-n' argument: remove 'N's from ends of the read.\"\n )\n trim_galore.add_argument(\n \"--clip-r1-5p\", type=int, metavar=\"<int>\", help=\n \"Trim Galore '--clip_R1' argument: remove basepairs from 5' end of \"\n \"read 1. Useful if there is a methylation bias at this end.\"\n )\n trim_galore.add_argument(\n \"--clip-r2-5p\", type=int, metavar=\"<int>\", help=\n \"Trim Galore '--clip_R2' argument: remove basepairs from 5' end of \"\n \"read 2. Useful if there is a methylation bias at this end.\"\n )\n trim_galore.add_argument(\n \"--clip-r1-3p\", type=int, metavar=\"<int>\", help=\n \"Trim Galore '--three_prime_clip_R1' argument: remove basepairs from \"\n \"3' end of read 1. Useful if there is a methylation bias at this end.\"\n )\n trim_galore.add_argument(\n \"--clip-r2-3p\", type=int, metavar=\"<int>\", help=\n \"Trim Galore '--three_prime_clip_R2' argument: remove basepairs from \"\n \"3' end of read 2. Useful if there is a methylation bias at this end.\"\n )\n\n # Optional Bismark tools arguments\n bismark = parser.add_argument_group(\"Bismark optional\")\n bismark.add_argument(\n \"--seed-mismatch\", type=int, choices=[0, 1], metavar=\"<0|1>\",\n default=DEFAULT_OF[\"bismark_bowtie2\"][\"seed_mismatch\"], help=\n \"Maximum number of mismatch allowed in a seed alignment: 0|1, \"\n \"by default %i.\" % DEFAULT_OF[\"bismark_bowtie2\"][\"seed_mismatch\"]\n )\n bismark.add_argument(\n \"--bowtie2-threads\", type=int, metavar=\"<int>\",\n default=DEFAULT_OF[\"bismark_bowtie2\"][\"threads\"], help=\n \"Bowtie2 '--threads' argument, by default %i\"\n % DEFAULT_OF[\"bismark_bowtie2\"][\"threads\"])\n bismark.add_argument(\n \"--meth-extract-threads\", type=int, metavar=\"<int>\",\n default=DEFAULT_OF[\"bismark_meth_extract\"][\"threads\"], help=\n \"bismark_methylation_extractor '--multicore' argument, by default %i.\"\n % DEFAULT_OF[\"bismark_meth_extract\"][\"threads\"]\n )\n\n # Optional Picard arguments\n picard = parser.add_argument_group(\"Picard optional\")\n picard.add_argument(\n \"--picard-jvm-args\", default=DEFAULT_OF[\"picard\"][\"jvm_args\"],\n metavar=\"<args>\", help=\n \"Java virtual machine arguments, e.g. to control starting and maximum \"\n \"heap size when running Picard, by default '%s'.\"\n % DEFAULT_OF[\"picard\"][\"jvm_args\"]\n )\n\n # Optional Samtools arguments\n samtools = parser.add_argument_group(\"Samtools optional\")\n samtools.add_argument(\n \"--samtools-threads\", type=int, metavar=\"<int>\",\n default=DEFAULT_OF[\"samtools\"][\"threads\"], help=\n \"Samtools '--threads' argument, by default %i\"\n % DEFAULT_OF[\"samtools\"][\"threads\"])\n\n # Parse the command line\n args = parser.parse_args()\n\n # For paired-end sequencing, check that the '{end}' wildcard is provided\n if args.paired_end is True and \"{end}\" not in args.fastq_regex:\n raise ValueError(\n \"The wildcard '{end}' is required in --fastq-regex argument when \"\n \"working with paired-end sequencing.\")\n\n # Set 'use_bismark_deduplicate' to False if RRBS data\n if args.rrbs is True:\n args.use_bismark_deduplicate = False\n\n # Check reference genome directory\n is_ref_genome_dir(args.ref_genome_dir, args.ref_build)\n\n # Convert the argparse object to a dict mapping <arg name> -> <val>\n kwargs = vars(args)\n\n return kwargs", "def build_args():\n parser = argparse.ArgumentParser(description='Validates, edits, or creates a 22 XML file')\n subparsers = parser.add_subparsers(help='sub-command help')\n \n add_branch_parser(subparsers)\n add_edit_parser(subparsers)\n add_finalize_parser(subparsers)\n add_grade_parser(subparsers)\n add_new_parser(subparsers)\n add_validate_parser(subparsers)\n add_validate_document_parser(subparsers)\n \n return parser.parse_args()", "def parse_args():\n # Define what commandline arguments can be accepted\n parser = argparse.ArgumentParser()\n parser.add_argument(Flags.CSV_DIR,metavar=\"CSV_DIRECTORY\", type=check_str_is_dir,\n help=\"Source directory containing Digikey CSV files\")\n parser.add_argument(Flags.PDF_DIR,metavar=\"PDF_DIRECTORY\", type=check_str_is_dir,\n help=\"Directory to save the PDF datasheets to\")\n parser.add_argument('--csv_pages', dest=Flags.CSV_PAGES,metavar=\"NUM_PAGES\", type=int, default=1,\n help=\"How many 500-row pages to download from Digikey (default 1)\")\n parser.add_argument('--fv_code', dest=Flags.FV_CODE,metavar=\"FV_CODE\", default='ffe002af', #op-amp\n help=\"The FV code of the part family on Digikey (default op-amps)\")\n parser.add_argument('--encrypted', dest=Flags.KEEP_ENCRYPTED, action='store_true', default=False, help=\"Do not filter encrypted PDFs\")\n parser.add_argument('--skip_csv', dest=Flags.SKIP_CSV_DL, action='store_true', default=False, help=\"Do not redownload the CSV.\")\n parser.add_argument('--skip_pdf', dest=Flags.SKIP_PDF_DL, action='store_true', default=False, help=\"Do not redownload the PDFs.\")\n parser.add_argument('--ocr', dest=Flags.KEEP_OCR, action='store_true', default=False, help=\"Do not filter PDFs that need OCR\")\n parser.add_argument('--duplicates', dest=Flags.KEEP_DUPLICATES, action='store_true', default=False, help=\"Do not filter duplicate PDFs (NOT IMPLEMENTED)\")\n parser.add_argument('--version', action='version', version='%(prog)s 0.0.0')\n args = vars(parser.parse_args())\n\n # TODO (lwhsiao): We should also add option to automatically select a parameterized\n # number of files and organize as train/test/dev\n\n Flags.parsed_args = args\n return args", "def cli_arguments():\n\n parser = argparse.ArgumentParser(\n formatter_class=argparse.RawDescriptionHelpFormatter,\n usage=f\"\\n{Color.DETAIL}pdforce.py [-p <pdf>] [-w <wordlist>] [-e <encoding>] [-o <output>] [-c] [-h/--help]{Color.END}\",\n description=f\"{Color.EMPHASIS}{TITLE}\\nLightweight PDF password cracker. USE FOR LEGAL INTENTS ONLY.{Color.END}\",\n epilog=f\"{Color.EMPHASIS}Made by @poponealex - https://github.com/poponealex{Color.END}\",\n )\n\n parser.add_argument(\n \"-p\",\n \"--pdf\",\n type=str,\n help=f\"{Color.INFORMATION}Path to the pdf file.{Color.END}\",\n action=\"store\",\n default=\"\",\n )\n\n parser.add_argument(\n \"-w\",\n \"--wordlist\",\n type=str,\n help=f\"{Color.INFORMATION}Path to the wordlist.{Color.END}\",\n action=\"store\",\n default=\"\",\n )\n\n parser.add_argument(\n \"-e\",\n \"--encoding\",\n type=str,\n help=f\"{Color.INFORMATION}Specify an encoding for the wordlist (https://docs.python.org/3/library/codecs.html#standard-encodings). The default encoding is platform dependent. Use 'iso8859_1' for rockyou. {Color.END}\",\n action=\"store\",\n default=None,\n )\n\n parser.add_argument(\n \"-o\",\n \"--output\",\n help=f\"{Color.INFORMATION}Output the cracked password to a new file.{Color.END}\",\n action=\"store\",\n )\n\n parser.add_argument(\n \"-c\",\n \"--copy\",\n help=f\"{Color.INFORMATION}Copy the password to the clipboard.{Color.END}\",\n action=\"store_true\",\n )\n\n return parser.parse_args()", "def add_options(cls, parser):\n\n group = parser.add_argument_group(\"Transform/Docify\")\n group.add(\n \"--doc_length\",\n \"-doc_length\",\n type=int,\n default=200,\n help=\"Number of tokens per doc.\",\n )\n group.add(\n \"--max_context\",\n \"-max_context\",\n type=int,\n default=1,\n help=\"Max context segments.\",\n )", "def set_args() -> argparse.ArgumentParser:\n parser = argparse.ArgumentParser( # type: argparse.ArgumentParser\n description=r'''\n -----------------------------------\n < Pull DNA barcodes from FASTQ files >\n -----------------------------------\n /\n \\ ______/ V`-, /\n } /~~\n /_)^ --,r'\n |b |b\n ''',\n formatter_class=argparse.RawDescriptionHelpFormatter,\n add_help=False\n )\n # Arguments for verbosity and logging\n parser.add_argument( # Verbosity\n '-v',\n '--verbosity',\n dest='verbosity',\n type=str.lower,\n choices=_VERBOSITY_LEVELS,\n default=_VERBOSITY_DEFAULT,\n required=False,\n metavar='verbosity',\n help=\"Set the verbosity level, choose from '%s'; defaults to '%s'\" % (\"', '\".join(_VERBOSITY_LEVELS), _VERBOSITY_DEFAULT)\n )\n parser.add_argument( # Number of cores\n '--parallel',\n dest='num_cores',\n type=_num_cores,\n const=None,\n default=1,\n nargs='?',\n required=False,\n metavar='num jobs',\n help=\"Run %(prog)s in parallel; if passed, can optionally specify the number of jobs to run at once\"\n )\n parser.add_argument( # Output directory\n '-o',\n '--output-directory',\n dest='outdirectory',\n type=str,\n default=_OUTDIR_DEFAULT,\n required=False,\n metavar='output directory',\n help=\"Choose where all output files are to be stored; defaults to '%s'\" % _OUTDIR_DEFAULT\n )\n # Input arguments\n inputs = parser.add_argument_group(\n title='input arguments',\n description='Provide inputs for %(prog)s'\n )\n inputs.add_argument( # Forward FASTQ\n '-f',\n '--forward-fastq',\n dest='forward',\n type=str,\n default=None,\n required=True,\n metavar='FORWARD FASTQ',\n help=\"Provide a filepath for the forward/single FASTQ file\"\n )\n inputs.add_argument( # Reverse FASTQ\n '-r',\n '--reverse-fastq',\n dest='reverse',\n type=str,\n default=None,\n required=False,\n metavar='REVERSE FASTQ',\n help=\"Provide a filepath for the optional reverse FASTQ file\"\n )\n inputs.add_argument( # Sample sheet\n '-s',\n '--sample-sheet',\n dest='sample_sheet',\n type=str,\n default=None,\n required=True,\n metavar='SAMPLE SHEET',\n help=\"Provide a filepath for the sample sheet\"\n )\n inputs.add_argument( # Barcodes file\n '-b',\n '--barcodes',\n dest='barcodes',\n type=str,\n required=True,\n default=None,\n metavar='BARCODES',\n help=\"Provide a filepath for the barcodes CSV file\"\n )\n barcodes = parser.add_argument_group(\n title='barcode options',\n description=\"Set parameters for barcode demultiplexing\"\n )\n barcodes.add_argument( # Number of errors allowed\n '-e',\n '--error',\n dest='error',\n type=int,\n default=_ERROR_DEFAULT,\n required=False,\n metavar='ERROR',\n help=\"This is how many mismatches in the barcode we allowed before rejecting, defaults to %s\" % _ERROR_DEFAULT\n )\n return parser", "def __init__(self):\n\n # Set-up the parser object.\n self.parser = argparse.ArgumentParser(description='Boolean query documents using inverted index', formatter_class=RawTextHelpFormatter)\n\n # Option to load the desired documents from a CSV file.\n self.parser.add_argument('documents', type=str, help='''Constructs an inverted index from a CSV file. Required format: One document per line. First line is discarded.\n<id>\\\\t<url>\\\\t<pub_date>\\\\t<title>\\\\t<document_content>\\\\n\n ''')\n\n self.parser.add_argument('-q',metavar='query', type=str, help='Boolean query', action='append')", "def parse_args(self):\n #-----------------------------------------------------------------------\n #This code is based on code from the KR Toolkit by Christian Muise\n #URL: http://code.google.com/p/krtoolkit/\n try:\n argv, opts, flags = sys.argv[1:], {}, []\n while argv:\n if argv[0][0:2] == '--':\n flags.append(argv[0])\n argv = argv[1:]\n elif argv[0][0] == '-':\n opts[argv[0]] = argv[1]\n argv = argv[2:]\n else:\n raise InputException(\"Badly constructed arg: \" +argv[0])\n except IndexError:\n raise InputException(\"Badly constructed arg: \" + argv[0])\n #-----------------------------------------------------------------------\n for flag in flags:\n if flag in self.program_flags:\n vars(self)[self.program_flags[flag].var_name] = True\n if self.program_flags[flag].function:\n self.program_flags[flag].function(self)\n else:\n raise InputException(\"Invalid flag: \" + flag)\n \n if not self.quiet:\n min_width = max(len('Flags:'),\n max(map(lambda x : len(x.description),\n self.program_args.itervalues()))) + 1\n if len(flags) == 0:\n print \"{:<{}} {}\".format('Flags:', min_width,'<None>')\n else:\n print \"{:<{}} {}\".format('Flags:', min_width,\n ', '.join(filter(lambda f : f in flags,\n self.program_flags)))\n \n for arg in opts:\n if arg not in self.program_args:\n raise InputException(\"Invalid arg: \" + arg)\n \n for arg in self.program_arg_order:\n arg_def = self.program_args[arg]\n if arg not in opts:\n if arg_def.needed:\n raise InputException(\"Error needed arg is missing: \" + arg)\n vars(self)[arg_def.var_name] = arg_def.default_value\n else:\n if arg_def.validator == None:\n vars(self)[arg_def.var_name] = opts[arg]\n else:\n vars(self)[arg_def.var_name] = arg_def.validator(opts[arg],\n arg_def.validator_args)\n if not self.quiet:\n print \"{:<{}} {}\".format(arg_def.description + ':', min_width,\n vars(self)[arg_def.var_name])", "def buildDocumentation():\n helptext = 'usage: build_doc.py <output format> <type of documentation>' \\\n '\\n - html: for html output' \\\n '\\n - pdf: for pdf output' \\\n '\\n\\n - all: complete documentation' \\\n '\\n - dev: only developer documentation' \\\n '\\n - user: only user documentation'\n if len(sys.argv) != 3:\n print helptext\n sys.exit(1)\n\n if sys.argv[1] not in ['pdf', 'html']:\n print helptext\n sys.exit(1)\n if sys.argv[2] not in ['all', 'dev', 'user']:\n print helptext\n sys.exit(1)\n\n copyfile('docs/index_%s.rst.template' % sys.argv[2], 'index.rst') # copy main file into root directory\n os.system('sphinx-build -b %s -c docs -D master_doc=index . docs/output/%s/%s' % (sys.argv[1], sys.argv[1], sys.argv[2]))\n os.remove('index.rst') # delete config file from root directory", "def command_line_arguments():\n\n try:\n parser = argparse.ArgumentParser(description='Log Handler/Cleaner/Copier for Idemia DocAuth')\n\n # Add required arguments.\n parser.add_argument('action', choices=['clean', 'download'], type=str, help='clean or download')\n\n # Parse the arguments\n args = parser.parse_args()\n\n return args\n\n except Exception as err:\n print(err)\n return", "def add_args(self): \n self.parser.add_argument('-u', '--username',\n default=None,\n help='the username for mongoDB (Default: None)')\n\n self.parser.add_argument('-p', '--password',\n default=None,\n help='the password for mongoDB (Default: None)')\n\n self.parser.add_argument('-d', '--database',\n default='grits',\n help='the database for mongoDB (Default: grits)')\n\n self.parser.add_argument('-m', '--mongohost',\n default='localhost',\n help='the hostname for mongoDB (Default: localhost)')\n\n self.parser.add_argument('-f', '--force', \n action='store_true',\n help='do not require confirmation to create indexes (Default: False)')", "def build_args(self, parser):\n raise NotImplementedError('build_args() must be implemented')", "def build_argparser(self):\n firstletters = ''\n for name, (categ, rest) in self.data.items():\n firstletters += name[0]\n\n self.argparser = argparse.ArgumentParser(\n usage='m3 x {} [arguments]'.format(self.name))\n\n for name, (categ, rest) in self.data.items():\n argargs = {}\n if rest.get('help'):\n argargs['help'] = rest['help']\n if rest.get('type') == 'flag':\n argargs['action'] = 'store_true'\n argargs['required'] = False\n elif 'default' not in rest:\n argargs['required'] = True\n if firstletters.count(name[0]) == 1:\n self.argparser.add_argument('-' + name[0],\n '--' + name, **argargs) # noqa: T484\n else:\n self.argparser.add_argument('--' + name, **argargs) # noqa:T484", "def get_command_line_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n 'docx_path', help=\"path to directory containing docx files\")\n parser.add_argument(\n '-sn', '--save_name', help=\"name of the directory containing the marked text files\")\n args = parser.parse_args()\n\n return args", "def setup_parser(self):\n parser = argparse.ArgumentParser(description=DESCRIPTION)\n parser.add_argument('words', metavar='W', nargs='+', help=POSITIONAL_HELP)\n parser.add_argument('-a','--any', dest=\"search_funct\", action=\"store_const\", \n const='any', default='all', help=SEARCH_HELP)\n parser.add_argument('-o','--only-id', action='store_true', help=ID_HELP)\n parser.add_argument('-u', '--update', action='store_true', help=UPDATE_HELP)\n return parser", "def _ParseCommandArguments():\n arg_parser = argparse.ArgumentParser()\n arg_parser.usage = __doc__\n\n arg_parser.add_argument('--download-dir',\n type=str,\n required=True,\n help='Directory into which corpora are downloaded.')\n arg_parser.add_argument('--build-dir',\n required=True,\n type=str,\n help='Directory where fuzzers were built.')\n args = arg_parser.parse_args()\n return args", "def libdoc_cli(arguments):\n LibDoc().execute_cli(arguments)", "def prepare_argparser():\n usage = \"\"\"\n %(prog)s index path [options] To create pre-calculated storage for the path given\n %(prog)s reform query [options] To reformulate the query\n \"\"\"\n description = \"%(prog)s -- Reformulates search queries using code training set\"\n\n optparser = argparse.ArgumentParser(prog=\"reformulation\", description=description, usage=usage)\n\n group = optparser.add_argument_group(\"index\",\n \"index creates pre-calculated storage for futher reformulation.\")\n group.add_argument(\"-r\", \"--recursive\", dest=\"recursive\", action=\"store_true\",\n help=\"Recursive scan of folders to index. DEFAULT: False\",\n default=False)\n\n group.add_argument(\"-d\", \"--dump\", dest=\"dump\", action=\"store_true\",\n help=\"Dumps corpus to a file and loads it by word2vec. DEFAULT: False\",\n default=False)\n\n group.add_argument(\"-s\", \"--skip\", dest=\"skip\", action=\"store_true\",\n help=\"Skips processing and takes existing corpus. DEFAULT: False\",\n default=False)\n\n groupReform = optparser.add_argument_group(\"reform\",\n \"reformulates te query using the index.\")\n\n groupReform.add_argument(\"-a\", \"--all\", dest=\"all\", action=\"store_true\", default=False,\n help=\"Reformulate using all ASTs (not public API only). DEFAULT: false\")\n\n optparser.add_argument(\"-i\", \"--interactive\", dest=\"interactive\", action=\"store_true\",\n help=\"Interactive mode for reformulation. DEFAULT: False\",\n default=False)\n optparser.add_argument(\"-f\", \"--folder\", dest=\"index_folder\",\n help=\"Index folder. DEFAULT: index\",\n default=\"index\")\n\n optparser.add_argument(\"-n\", \"--name\", dest=\"index_name\",\n help=\"Index name. DEFAULT: index\",\n default=\"index\")\n\n # optparser.add_argument(\"-h\", \"--help\", action=\"help\", help=\"show this help message and exit.\")\n optparser.add_argument(\"command\", help=\"command to run [index|reform]\")\n optparser.add_argument(\"arg\", help=\"Command argument [path|query]\")\n optparser.add_argument(\"-l\", \"--lang\", dest=\"lang\",\n help=\"Language. DEFAULT: Java\",\n default=\"Java\")\n optparser.add_argument(\"-m\", \"--methods\", dest=\"methods\", action=\"store_true\",\n help=\"Use methods instead of classes for reformulation. DEFAULT: False\",\n default=False)\n optparser.add_argument(\"--verbose\", dest=\"verbose\", type=int, default=2,\n help=\"Set verbose level. 0: only show critical message, 1: show additional warning message, 2: show process information, 3: show debug messages. DEFAULT:2\")\n\n return optparser", "def main(*, build, subdir, description, supports_modules=False,\n supports_quick=False):\n parser = argparse.ArgumentParser(description=description)\n group = parser.add_mutually_exclusive_group()\n group.add_argument(\n \"--serve\", action='store_true',\n help=\"Serve the documentation on the given PORT for easy preview.\")\n group.add_argument(\n \"--out_dir\", type=str, metavar=\"DIR\",\n help=\"Generate the documentation to the given output directory.\"\n \" The DIR must be an absolute path.\"\n \" If DIR already exists, then it must be empty.\"\n \" (For regression testing, the DIR can be the magic value <test>,\"\n \" in which case a $TEST_TMPDIR subdir will be used.)\")\n parser.add_argument(\n \"--port\", type=int, metavar=\"PORT\", default=8000,\n help=\"Use a non-default PORT when serving for preview.\")\n parser.add_argument(\n \"--verbose\", action=\"store_true\",\n help=\"Echo detailed commands, progress, etc. to the console\")\n if supports_modules:\n parser.add_argument(\n \"module\", nargs=\"*\",\n help=\"Limit the generated documentation to only these modules and \"\n \"their children. When none are provided, all will be generated. \"\n \"For example, specify drake.math or drake/math for the C++ \"\n \"module, or pydrake.math or pydrake/math for the Python module.\")\n if supports_quick:\n parser.add_argument(\n \"--quick\", action=\"store_true\", default=False,\n help=\"Omit from the output items that are slow to generate. \"\n \"This yields a faster preview, but the output will be incomplete.\")\n args = parser.parse_args()\n if args.verbose:\n global _verbose\n _verbose = True\n curried_build = build\n if supports_modules:\n canonicalized_modules = [\n x.replace('/', '.')\n for x in args.module\n ]\n curried_build = functools.partial(\n curried_build, modules=canonicalized_modules)\n if supports_quick:\n curried_build = functools.partial(\n curried_build, quick=args.quick)\n if args.out_dir is None:\n assert args.serve\n _do_preview(build=curried_build, subdir=subdir, port=args.port)\n else:\n _do_generate(build=curried_build, out_dir=args.out_dir,\n on_error=parser.error)", "def func_update_arguments(self, arg_raw ):\n\n arg_raw.prog = \"DISCASM\"\n arg_raw.description = \"extracts genome-aligned discordant and unmapped reads, and de novo assembles them\"\n\n arg_raw.add_argument(\"--chimeric_junctions\", dest=\"chimeric_junctions\", required=True, help=\"STAR Chimeric.out.junction file\")\n arg_raw.add_argument(\"--aligned_bam\", dest=\"aligned_bam_filename\", required=False, help=\"aligned bam file from your favorite rna-seq alignment tool\")\n arg_raw.add_argument(\"--left_fq\", dest=\"left_fq_filename\", required=True, help=\"left fastq file\")\n arg_raw.add_argument(\"--right_fq\", dest=\"right_fq_filename\", required=True, help=\"right fastq file\")\n arg_raw.add_argument(\"--out_dir\", dest=\"str_out_dir\", required=True, help=\"output directory\")\n arg_raw.add_argument(\"--denovo_assembler\", dest=\"denovo_assembler\", required=True, help=\"de novo assembly method: Trinity|Oases|OasesMultiK\")\n arg_raw.add_argument(\"--add_trinity_params\", dest=\"add_trinity_params\", required=False, help=\"any additional parameters to pass on to Trinity if Trinity is the chosen assembler.\")\n arg_raw.add_argument(\"--normalize_reads\", default=False, action='store_true', help='perform in silico normalization prior to de novo assembly')\n\n return(arg_raw)", "def xlslisp_compile_argdoc():\n\n doc = __main__.__doc__\n\n prog = doc.strip().splitlines()[0].split()[1]\n description = list(_ for _ in doc.strip().splitlines() if _)[1]\n epilog_at = doc.index(\"dependencies:\")\n epilog = doc[epilog_at:]\n\n parser = argparse.ArgumentParser(\n prog=prog,\n description=description,\n add_help=True,\n formatter_class=argparse.RawTextHelpFormatter,\n epilog=epilog,\n )\n\n parser.add_argument(\"file\", metavar=\"FILE\", help=\"the xlsx file to read\")\n parser.add_argument(\"-f\", \"--force\", action=\"count\", help=\"ask less questions\")\n\n exit_unless_main_doc_eq(parser)\n\n return parser", "def _input_args(self, args: List[str]):\n assert self._call is None, f\"You need to specify all inputs before calling `{self._call}`\"\n assert isinstance(args, list), f\"{args} is a {type(args)}, expected a list of strings!\"\n assert len(args) > 0, f\"Expected a non-empty argument list!\"\n assert all(isinstance(a, str) for a in args), f\"Expected a list of strings, not {[type(a) for a in args]}!\"\n # all arguments could potentially be filenames that we write to, so let's just add them\n self._write_files |= set(args)\n # add dummy argument zero\n args = [\"\"] + args\n # allocate args in memory\n arg_strings = [self._str(a, \"arg\") for a in args]\n # allocate a pointer array for argv\n self.data += [f\"argv: .word \" + \" \".join(\"0\" for _ in range(len(args)))]\n # load argc and argv\n self._args += [\"\", \"# argument count in a0\", f\"li a0, {len(args)}\"]\n self._args += [\"\", \"# load pointers to argument strings into argv\", f\"la a1, argv\"]\n for ii, aa in enumerate(arg_strings):\n self._args += [f\"la t1, {aa}\", f\"sw t1, {ii * 4}(a1)\"]", "def parse_arguments():\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"--accessions\", help=\"A json file with old/new family mapppings\")\n parser.add_argument(\"--add-header\", help=\"Print descriptive header\",\n action=\"store_true\", default=False)\n parser.add_argument(\"--add-links\", help=\"Creates hyperlinks to available Rfam html content\",\n action=\"store_true\", default=False)\n return parser", "def prepare_arg_parser():\n\tparser = argparse.ArgumentParser(\n\t\tdescription=\"\"\"Ajout des ponctuations réelles dans un xml de \n\t\t refbibs (NB lent: ~ 2 doc/s sur 1 thread)\"\"\",\n\t\tusage=\"\"\"ragreage.py \n\t\t -x ech/tei.xml/oup_Human_Molecular_Genetics_ddp278.xml\n\t\t -p ech/pdf/oup_Human_Molecular_Genetics_ddp278.pdf\n\t\t -m [bibzone|biblines|bibfields|authornames]\"\"\",\n\t\tepilog=\"- © 2014-15 Inist-CNRS (ISTEX) romain.loth at inist.fr -\"\n\t\t)\n\t\n\t\n\tparser.add_argument('-x','--xmlin',\n\t\tmetavar='path/to/xmlfile',\n\t\thelp=\"\"\"\n\t\tpath to a TEI.xml with citations in <biblStruct> xml format \n\t\t(perhaps to be created from native XML by a call like \n\t\t`saxonb-xslt -xsl:tools/Pub2TEI/Stylesheets/Publishers.xsl\n\t\t-s:exemples_RONI_1513/rsc_1992_C3_C39920001646.xml`)'\"\"\",\n\t\ttype=str,\n\t\trequired=True,\n\t\taction='store')\n\t\t\n\t\n\t\n\tparser.add_argument('-p','--pdfin',\n\t\tmetavar='path/to/pdffile',\n\t\thelp=\"\"\"path to a pdf file of the same text, for attempted\n\t\t pdftottext and citation regexp match\"\"\",\n\t\ttype=str,\n\t\tdefault=None , # cf juste en dessous\n\t\taction='store')\n\t\n\tparser.add_argument('-t','--txtin',\n\t\tmetavar='path/to/txtfile',\n\t\thelp=\"\"\"pdfin can be replaced by a path to a txt flow.\n\t\tThis input text must be very close to the xml content\n\t\t(or segment thereof, in accordance with a chosen -m type)\"\"\",\n\t\ttype=str,\n\t\tdefault=None , # cf juste en dessous\n\t\taction='store')\n\t\t\n\t\n\t\n\tparser.add_argument('-m','--model-type',\n\t\tmetavar='name-of-model',\n\t\thelp=\"\"\"format output as a valid tei's 'listBibl' (default)\n\t\t or tailored to a Grobid crf model pseudotei input among:\n\t\t {'bibzone', 'biblines', 'bibfields', 'authornames'}\"\"\",\n\t\ttype=str,\n\t\tdefault='listBibl' ,\n\t\taction='store')\n\t\n\t\n\tparser.add_argument('-d','--debug',\n\t\tmetavar=1,\n\t\ttype=int,\n\t\thelp='logging level for debug info in [0-3]',\n\t\tdefault=0,\n\t\taction='store')\n\t\n\t\t\n\tparser.add_argument('-r', '--remainder',\n\t\tdest='mask',\n\t\thelp='show mask after matches instead of normal output',\n\t\taction='store_true')\n\t\n\treturn parser", "def parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument('-u', '--urls_dirpath', type=unicode)\n parser.add_argument('-r', '--resources_dir', type=unicode)\n parser.add_argument('-t', '--total_docs', type=int)\n parser.add_argument('-m', '--mapping', type=unicode,\n help='File with the yago to lkif mapping')\n\n return parser.parse_args()", "def build_arg_parser():\n\n main = ArgumentParser(description='AMFinder command-line arguments.',\n allow_abbrev=False,\n formatter_class=RawTextHelpFormatter)\n\n subparsers = main.add_subparsers(dest='run_mode', required=True,\n help='action to be performed.')\n\n _ = training_subparser(subparsers)\n _ = prediction_subparser(subparsers)\n _ = diagnostic_subparser(subparsers)\n\n return main", "def _prepare(self):\n # Customize commandline arguments\n parser = argparse.ArgumentParser()\n self.initArgumentParser(parser, defaults=self.default_binding_overrides)\n self.__options = parser.parse_args()\n self.__bindings.update(args_util.parser_args_to_bindings(self.__options))\n\n self.start_logging()", "def __init__(self, *args, **kwargs):\n self.docs_command = DocsCommandContext()\n command_callback = self.docs_command.command_callback\n super().__init__(name=COMMAND_NAME, help=HELP_TEXT, callback=command_callback)" ]
[ "0.69166946", "0.6655604", "0.65268964", "0.63026917", "0.6240775", "0.6204276", "0.616624", "0.6138024", "0.60846776", "0.60733485", "0.60671383", "0.604279", "0.6033722", "0.60330325", "0.60208917", "0.5987356", "0.598559", "0.59679663", "0.59497654", "0.5935443", "0.59330064", "0.59020364", "0.58893657", "0.58749276", "0.5869787", "0.5864109", "0.5861234", "0.5844351", "0.583736", "0.5836456" ]
0.67095804
1
Read csv to list
def read_csv_to_list(csv_path): with open(csv_path, newline="") as f: reader = csv.reader(f) data = list(reader) return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_csv():", "def read_csv(csv_file):\r\n with open(csv_file, \"r\") as files:\r\n data = csv.reader(files)\r\n return list(data)", "def read_csv(csv_file_path):\n res = [] #list\n # f = open(csv_file_path) #read file\n with open(csv_file_path,\"r\") as f:", "def read_file_to_list(input_file):\n with open(input_file) as csvfile:\n csv_rows = csv.reader(csvfile)\n\n data = []\n for row in csv_rows:\n data.append(row)\n\n return data", "def load_csv(csv_path):\n with open(csv_path, newline='') as csv_file:\n csv_reader = csv.reader(csv_file)\n csv_list = [row for row in csv_reader]\n return csv_list", "def csv_to_list(csv_file, delimiter=','):\n with open(csv_file, 'r') as csv_con:\n reader = csv.reader(csv_con, delimiter=delimiter)\n return list(reader)", "def _read_csv(cls, input_file, quotechar=None):\n with open(input_file, \"r\", encoding=\"utf-8-sig\") as f:\n return list(csv.reader(f, delimiter=\",\", quotechar=quotechar))", "def _read_csv(cls, input_file, quotechar=None):\n with open(input_file, \"r\", encoding=\"utf-8-sig\") as f:\n return list(csv.reader(f, delimiter=\",\", quotechar=quotechar))", "def read_csv_to_list(in_file, headless=True, delim='\\t'):\n ret_list=list()\n with open(in_file,'r') as csv_file:\n my_reader = csv.reader(csv_file, delimiter=delim) \n if headless:\n next(my_reader)\n for row in my_reader:\n ret_list.append(list(row))\n return(ret_list)", "def read_csv(filename):\n # Implement this function\n file = open(filename)\n wrapper = csv.reader(file)\n result = []\n for rpos in wrapper: \n result = result + [rpos]\n file.close() \n return result", "def readCSVIntoList(link):\r\n \r\n tweets_file = csv.reader(open(link, \"rt\", encoding=\"cp1252\"))\r\n tweets_list = list(tweets_file) \r\n\r\n return tweets_list", "def load_data_from_csv(csv_file):\n list=[]\n\n with open(csv_file) as csv_1:\n csv_out = csv.reader(csv_1) \n next(csv_out)\n for rows in csv_out: \n if len(rows) != 0:\n list.append([rows[0],int(rows[1]),int(rows[2])])\n \n return (list)", "def _read_csv(cls, input_file, quotechar=None):\r\n with open(input_file, \"r\", encoding=\"utf-8\") as f:\r\n reader = csv.reader(f, delimiter=\",\", quotechar=quotechar)\r\n lines = []\r\n for line in reader:\r\n if sys.version_info[0] == 2:\r\n line = list(unicode(cell, 'utf-8') for cell in line)\r\n lines.append(line)\r\n return lines", "def listparse(csvfilename):\r\n output = []\r\n with open(csvfilename, 'r', newline = '') as csvfile:\r\n csvreader = csv.reader(csvfile, skipinitialspace = True)\r\n for row in csvreader:\r\n output.append(row)\r\n return output", "def _read_csv(cls, input_file, quotechar=None):\n with open(input_file, \"r\", encoding=\"utf-8-sig\") as f:\n return list(csv.reader(f, delimiter=\"\\n\", quotechar=quotechar))", "def readCSV(self):\n\n content = []\n with open(self.filename) as file:\n sn = csv.Sniffer()\n sn.preferred = [self.delimiter]\n try:\n dialect = sn.sniff(file.read(1024))\n except csv.Error:\n if not file.endswith(\"csv\"):\n self.delimiter = \"\\t\"\n file.seek(0)\n reader = csv.reader(file, delimiter=self.delimiter)\n dialect = reader.dialect\n file.seek(0)\n reader = csv.reader(file, dialect)\n rownr = 0\n\n for row in reader:\n\n if rownr == 0:\n header = row\n else:\n # print(row)\n content.append(row)\n rownr += 1\n\n file.close()\n\n return content.copy()", "def read_csv(filename):\n with open(filename) as csv:\n return [csv_line.strip().split(',') for csv_line in csv]", "def read_csv(file_name):\n final_list = []\n reader = csv.reader(open(file_name, 'rb'), delimiter=',')\n for x in reader:\n final_list.append(x)\n return final_list", "def parse_csv2list_upload(file_name):\n with open(file_name) as f:\n records = csv.reader(f)\n csv_list = [[j.strip() for j in record] for record in records]\n return csv_list", "def read_csv(path):\n csv_data =[]\n \n with open(path, 'r') as csv_file:\n csv_read = csv.reader(csv_file, dialect='excel')\n for row in csv_read:\n csv_data.append(row)\n\n return(csv_data)", "def read_from_csv(file):\n with open(file) as f:\n next(f)\n data = []\n for line in csv.reader(f, delimiter='\\t'):\n data.append(list(line))\n return data", "def readCSV(self, csvFileName):\n\tdata = []\n\twith open(csvFileName) as csvFile:\n\t\treader = csv.reader(csvFile)\n\t\tfor row in reader:\n\t\t\tdata.append(row)\n\treturn data", "def read_csv(path):\r\n output = []\r\n for row in csv.DictReader(open(path)):\r\n output.append(row)\r\n return output", "def csvread(infile):\n out_csv = []\n errors = []\n index = -1\n p = LineParser(csv=True)\n for line in infile:\n index += 1\n try:\n values = p.feed(line)\n except ListQuoteError as e:\n values = []\n e.line = line\n e.index = index\n errors.append(e)\n #\n out_csv.append(values)\n #\n if errors:\n e = CSVError(\"Parsing CSV failed. See 'errors' attribute.\")\n e.csv = out_csv\n e.errors = errors\n raise e\n return out_csv", "def read_from_csv(self, csv_file):\n data = []\n with codecs.open(csv_file, 'r', encoding='utf-8') as csvfile:\n header = None\n for i, line in enumerate(csvfile):\n line_split = [x.strip() for x in line.split(\"|\")]\n line_data = [x for x in line_split if len(x) > 0]\n if i == 0:\n header = line_data\n else:\n entry = {}\n for i,datum in enumerate(line_data):\n entry[header[i]] = datum\n data.append(entry)\n print \"Loaded %d entries from %s\" % (len(data), csv_file)\n return data", "def read_csv(path):\r\n data = []\r\n csv_file = open(path)\r\n for row in csv.DictReader(csv_file):\r\n data.append(row)\r\n csv_file.close() \r\n return data", "def read_sample_csv(self):\n f = open('sample.csv')\n lines = f.readline()\n fields = lines.split(',')\n fieldnames_lst = [i.strip() for i in fields]\n f.close()\n return fieldnames_lst", "def open_csv(file):\n\n\ttmp = [] # initialise the list\n\twith open(file, 'r') as f:\n\t\treader = csv.reader(f)\n\t\tfor row in reader:\n\t\t\ttmp.append(row) # add row to list\n\n\treturn tmp", "def read_csv(csvfilename):\n rows = []\n with open(csvfilename, encoding='utf-8') as csvfile:\n file_reader = csv.reader(csvfile)\n for row in file_reader:\n rows.append(row)\n return rows", "def read_csv(path):\n output = []\n for row in csv.DictReader(open(path)):\n output.append(row)\n return output" ]
[ "0.7983896", "0.7966335", "0.782238", "0.7816782", "0.7676543", "0.7659375", "0.759841", "0.759841", "0.7496229", "0.74867773", "0.74808073", "0.74660474", "0.7411088", "0.7405299", "0.7381975", "0.73708904", "0.7361922", "0.73237014", "0.73150474", "0.7302132", "0.72732013", "0.7268923", "0.72669244", "0.72514164", "0.7249331", "0.72493184", "0.72483593", "0.72421044", "0.7225781", "0.72196686" ]
0.8154642
0
Saves labels to csv
def save_labels_to_disk(labels: list, label_path: str): with open(label_path, "w") as result_file: wr = csv.writer(result_file, dialect="excel") wr.writerows(labels)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def labels2csv(labels, csv_path):\n with open(csv_path, \"w\") as file:\n file.write(\"id,label\\n\")\n for i, label in enumerate(labels):\n file.write(\"{},{}\\n\".format(i, label))", "def write_csv_label(labels, csv_file):\n with open(csv_file, 'w') as f:\n writer = csv.writer(f)\n for key, value in labels.items():\n writer.writerow([key, value])", "def write_labels_txt(labels: pd.DataFrame, path: str):\n\n # If the file containing the labels already exist, delete it\n if os.path.isfile(path):\n print('\\nA labels file already exists at {}, deleting it...'.format(path))\n os.remove(path)\n\n # Write the names of the labels on a txt\n labels.to_csv(path, header=None, index=None, sep=' ', mode='a')\n\n print('\\nThe labels file has been written at', path)", "def save_csv(self, filename): # DONE\n self.data.to_csv(filename)", "def SaveLabels(filepath, labels):\n # 1) Create a string with all the text to be stored\n text = '\\n'.join(labels)\n\n # 2) Open the datafile and save the text\n with open(filepath, 'w') as outfile:\n outfile.write(text)", "def save_ndarray_to_csv(docs_array, labels_array, csv_file):\n processed_array = np.vstack([labels_array, docs_array]).T\n df = pd.DataFrame(data=processed_array)\n df.to_csv(csv_file, index=False, header=['label', 'text'])", "def write_csv(image_names, image_classes, filename):\n with open(filename, 'w', newline='') as file:\n writer = csv.writer(file)\n writer.writerow(['filename', 'label'])\n writer.writerows(zip(image_names, image_classes))", "def save_to_csv(self):\n path = partial(os.path.join, 'datasets')\n save_name = self.name.lower().replace(' ', '_')\n self.df['values'].sum(axis=1).to_csv(path('{0}_values.csv'.format(save_name)))\n self.df['allocations'].to_csv(path('{0}_allocations.csv'.format(save_name)))\n self.df['returns'].to_csv(path('{0}_returns.csv'.format(save_name)))\n self.trades.to_csv(path('{0}_trades.csv'.format(save_name)))", "def save_csv(self, filename: str, type='n', **args):\n if type == 'n':\n df = self.export_nodes()\n else:\n df = self.export_edges()\n df.to_csv(filename, index=False)", "def saveCSV(self):\n filename=tkFileDialog.asksaveasfilename(defaultextension='.csv',\n initialdir=os.getcwd(),\n filetypes=[(\"csv\",\"*.csv\"),(\"All files\",\"*.*\")])\n if not filename:\n return\n for m in self.matrices:\n matrix = self.matrices[m] \n if matrix != None: \n c=matrix.csvRepresentation()\n f=open(filename,'w')\n f.write(c)\n f.close()\n return", "def write_labels():\n with open('../data/labels.txt', 'w') as labels_file:\n labels = generate_labels()\n labels_file.write('\\n'.join(labels))", "def _write_labels(self, labels: List[str], labels_path: Path):\n labels_path.write_text(escape_line_delimited_texts(labels))", "def _write_labels(self, labels: List[str], labels_path: Path):\n labels_path.write_text(escape_line_delimited_texts(labels))", "def writeFeatures(features, labels, output_filename):\n\twith open(output_filename, 'w') as csvfile:\n\t fieldnames = features[0].keys()\n\t fieldnames.append('label')\n\t writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n\n\t writer.writeheader()\n\t for i in range(len(features)):\n\t \tfeatures[i]['label'] = labels[i]\n\t \twriter.writerow(features[i])\n\n\treturn", "def write_labels_file(labels_to_class_names, dataset_dir,\n filename='labels.txt'):\n labels_path = os.path.join(dataset_dir, filename)\n with open(labels_path, 'w') as f:\n for label in labels_to_class_names:\n class_name = labels_to_class_names[label]\n f.write('%d:%s\\n' % (label, class_name))", "def __save_datasets(self):\n self.train.to_csv('{}/{}/{}'.format(path_to_train_set, img_format, 'train.csv'))\n self.valid.to_csv('{}/{}/{}'.format(path_to_valid_set, img_format, 'valid.csv'))\n self.test.to_csv('{}/{}/{}'.format(path_to_test_set, img_format, 'test.csv'))", "def write_label_file(labels_to_class_names, dataset_dir, filename='labels.txt'):\n labels_filename = os.path.join(dataset_dir, filename)\n with tf.gfile.Open(labels_filename, 'w') as f:\n for label in labels_to_class_names:\n class_name = labels_to_class_names[label]\n f.write('%d:%s\\n' % (label, class_name))", "def _write_input(\n self, X: List[str], labels: Optional[List[List[str]]], input_path: Path\n ):\n df = pd.DataFrame({\"Text\": X})\n\n if labels is not None:\n df[\"Label\"] = labels\n\n df.to_csv(input_path, sep=\"\\t\", index=False)", "def _write_input(\n self,\n X: List[str],\n labels: Optional[Union[List[str], List[List[str]]]],\n input_path: Path,\n ):\n df = pd.DataFrame({\"Text\": X})\n\n if labels is not None:\n df[\"Label\"] = labels\n\n df.to_csv(input_path, sep=\"\\t\", index=False)", "def write_label_file(labels_to_class_names, labels_filename):\n with tf.gfile.Open(labels_filename, \"w\") as f:\n for label in labels_to_class_names:\n class_name = labels_to_class_names[label]\n f.write('%d:%s\\n'%(label, class_name))", "def save(self, cat):\n with open(self.filename_csv, 'w', newline='') as csvfile:\n csv_file = csv.writer(csvfile, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n csv_file.writerow(['name', 'id', 'parent_id'])\n for row in cat:\n csv_file.writerow(row)", "def _save_data(self):\n self.data.to_csv('data/c&le/{}'.format(self.name))", "def output_classLabel_to_txt(save_path):\n file_obj = open(save_path,'w')\n length = len(class_label)\n for i in range(0,length):\n line = '%d:%s'%(i,class_label[i])\n file_obj.writelines(line+'\\n')\n return True", "def export_labels(self, export_dir: str, label_filename: str = 'labels.txt'):\n if not tf.io.gfile.exists(export_dir):\n tf.io.gfile.makedirs(export_dir)\n\n label_filepath = os.path.join(export_dir, label_filename)\n tf.compat.v1.logging.info('Saving labels in %s', label_filepath)\n with tf.io.gfile.GFile(label_filepath, 'w') as f:\n f.write('\\n'.join(self._label_names))", "def generate_labels_csv(csv_location, *args):\n os.chdir(csv_location) # Navigate into the right directory\n\n # Initilize and open the Labels csv\n with open('labels.csv', mode='w') as csv_file:\n fieldnames = ['Frame_ID', 'Class']\n writer = csv.DictWriter(csv_file, fieldnames=fieldnames)\n writer.writeheader()\n\n count = 0\n label = 0\n for classes in args:\n # Write into the CSV the frames with their associated class\n for i in range(count, classes):\n writer.writerow({'Frame_ID': 'frame' + str(i) + '.jpg', 'Class': label})\n\n # Increment label and count\n count = classes\n label += 1", "def save(self, data, outpath):\n data.to_csv(outpath)", "def create_csv_submission(ids, y_pred, name):\n # negative class has to be labelled -1 on AIcrowd\n y_pred[y_pred == 0] = -1\n\n with open(name, 'w') as csvfile:\n fieldnames = ['Id', 'Prediction']\n writer = csv.DictWriter(csvfile, delimiter=\",\", fieldnames=fieldnames)\n writer.writeheader()\n for r1, r2 in zip(ids, y_pred):\n writer.writerow({'Id':int(r1),'Prediction':int(r2)})", "def create_output_csv(labels, filename):\n\n keyframe_ind = [labels[i] != labels[i-1] for i, val in enumerate(labels)]\n keyframe_idxs = [i for i, val in enumerate(keyframe_ind) if val==True]\n keyframe_filenames = [\"%06d\" % (i+1) + \".jpg\" for i, val in enumerate(keyframe_ind) if val==True]\n keyframe_scenes = labels[keyframe_idxs]\n keyframe_scenes_ascii = [string.ascii_lowercase[i] for i in keyframe_scenes]\n result = pd.DataFrame([keyframe_filenames, keyframe_scenes_ascii]).transpose()\n result.columns = ['keyframe', 'scene id']\n filepath = os.getcwd()\n result.to_csv(filepath + '/' + filename)", "def export_label_index_dict(label_index_dict):\r\n\tcsv_file = open('output.csv', 'w')\r\n\twriter = csv.writer(csv_file)\r\n\trow = ''\r\n\theader = 'Type,Prediction\\n'\r\n\tcsv_file.write(header)\r\n\tfor key in label_index_dict.keys():\r\n\t\trow = key + ',' + label_index_dict[key] + '\\n'\r\n\t\tcsv_file.write(row)", "def create_csv_file(data_root, output_file):\r\n image_folder = data_root + \"/\" + \"training_set\"\r\n label_folder = data_root + \"/\" + \"training_set_label\"\r\n filenames = os.listdir(label_folder)\r\n filenames = [item for item in filenames if item[0] != '.']\r\n file_list = []\r\n for filename in filenames:\r\n image_name = \"training_set\" + \"/\" + filename.replace(\"_seg.\", \".\")\r\n label_name = \"training_set_label\" + \"/\" + filename\r\n file_list.append([image_name, label_name])\r\n \r\n with open(output_file, mode='w') as csv_file:\r\n csv_writer = csv.writer(csv_file, delimiter=',', \r\n quotechar='\"',quoting=csv.QUOTE_MINIMAL)\r\n csv_writer.writerow([\"image\", \"label\"])\r\n for item in file_list:\r\n csv_writer.writerow(item)" ]
[ "0.8146048", "0.79251695", "0.7332245", "0.70488673", "0.69873244", "0.6764667", "0.6763731", "0.67252517", "0.67002916", "0.6674826", "0.66690266", "0.66482043", "0.66482043", "0.66221464", "0.661912", "0.6589544", "0.6589139", "0.65413326", "0.6503416", "0.6473352", "0.6437085", "0.64347327", "0.641439", "0.6390464", "0.6378247", "0.6372916", "0.63645226", "0.63565266", "0.6326758", "0.63209575" ]
0.80636126
1
Splits the train set into test images
def split_test_train(train_folder_path, train_labels, test_folder, n_test_images): os.makedirs(test_folder, exist_ok=True) data = read_csv_to_list(train_labels) # Prepare test labels and move images to new folder labels = [] for img in data[1:n_test_images]: # Input and new image paths # print(type(train_folder_path),type(img[0])) img_path = train_folder_path / (img[0] + ".dcm") new_img_path = test_folder / (img[0] + ".dcm") if Path(img_path).exists(): # there can be several annotations per image shutil.move(img_path, new_img_path) labels.append(img) # Prepare train labels. Removes duplicate as we dont need them. train_labels = [] img_list_names = [] for idx, label in enumerate(data[n_test_images + 1 :]): if (label[0] in img_list_names) and (idx != 0): continue img_list_names.append(label[0]) train_labels.append(label) # labels.insert(0, data[0]) # train_labels.insert(0, data[0]) return train_labels, labels
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def split_datasets(img_lst):\n num = len(img_lst)\n\n idx = np.random.permutation(num)\n train_lst = np.array(img_lst)[idx[:int(num * .8)]] # 80/20 split\n validation_lst = np.array(img_lst)[idx[int(num * .8):int(num * .9)]]\n test_lst = np.array(img_lst)[idx[int(num * .9):]]\n return train_lst, validation_lst, test_lst", "def split_dataset(self, test_size=0.20):\n\t\t(self.training_data, self.test_data, self.training_labels, self.test_labels) = train_test_split(self.training_data, self.training_labels, test_size=test_size)", "def _split_train_test(samples, test_shots=1):\n train_set = list(samples)\n test_set = []\n labels = set(item[1] for item in train_set)\n for _ in range(test_shots):\n for label in labels:\n for i, item in enumerate(train_set):\n if item[1] == label:\n del train_set[i]\n test_set.append(item)\n break\n if len(test_set) < len(labels) * test_shots:\n raise IndexError('not enough examples of each class for test set')\n return train_set, test_set", "def _train_validation_test_split(self, images, centroids):\n \n # Get test images from images list. Test images are images for which a centroid mask does NOT exist\n test = []\n filtered_images = []\n \n # split images in test and train/val set\n for image in images:\n if image not in centroids:\n test.append(image)\n else:\n filtered_images.append(image)\n \n # Generate train/validation split based on remaining images and centroids\n train, validation = train_test_split(filtered_images, train_size=self.split, random_state=self.seed)\n \n return train, validation, test", "def train_test_split_data(aligned_image_lists, raw_image_lists, test_ratio ,validation = True ):\n\n assert len(aligned_image_lists) == len(raw_image_lists), \"images have different size\"\n mask = list(range(len(aligned_image_lists)))\n mask_train, mask_test = train_test_split(mask, test_size= test_ratio, shuffle=True)\n\n aligned_lists_train = [aligned_image_lists[i] for i in mask_train]\n aligned_lists_test = [aligned_image_lists[i] for i in mask_test]\n\n raw_lists_train = [raw_image_lists[i] for i in mask_train] \n raw_lists_test = [raw_image_lists[i] for i in mask_test] \n\n return [aligned_lists_train, aligned_lists_test, raw_lists_train, raw_lists_test]", "def split_test_and_train_data(self, test_size=0.3, view=0):\n X_train, X_test, y_train, y_test = train_test_split(self.data[view], self.class_data, test_size=test_size)\n return X_train, X_test, y_train, y_test", "def _divide_into_test_train(\n self, test_size: int, train_size: int\n ) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame]:\n X_train, X_test, y_train, y_test = train_test_split(\n self.df.iloc[:, :-1],\n self.df.iloc[:, -1],\n test_size=test_size,\n train_size=train_size,\n )\n return X_train, X_test, y_train, y_test", "def split_data_into_train_and_test(raw_training_data):\n train_set, test_set = train_test_split(raw_training_data, test_size=0.2, random_state=42)\n return train_set, test_set", "def split_data(data, test_size):\r\n ntest = int(round(len(data) * (1 - test_size)))+1\r\n \r\n train, test = data[:ntest], data[ntest:]\r\n \r\n return train,test", "def test_train_split(X, y, test_size):\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=42)\n print(f\"You have {len(X_train)} training colors and {len(X_test)} test colors - test_size: {test_size*100}.\")\n return X_train, X_test, y_train, y_test", "def get_train_test_lists(dataset_path, classes=('glare_small', 'normal'), test_size=0.25):\n image_set = []\n label_set = []\n for cls in classes:\n dir = os.path.join(dataset_path, cls)\n img_list = glob.glob(dir + '/*.png')\n img_list.extend(glob.glob(dir + '/*.jpg'))\n label = None\n if cls == 'glare_small' or cls == 'glare':\n label = 1\n if cls == 'normal':\n label = 0\n\n labels = list(itertools.repeat(label, len(img_list)))\n image_set.extend(img_list)\n label_set.extend(labels)\n X_train, X_test, y_train, y_test = train_test_split(image_set, label_set, test_size=test_size, random_state=42)\n return X_train, X_test, y_train, y_test", "def train_test_split(x, y, test_pct):\n data = zip(x, y)\n train, test = split_data(data, 1 - test_pct)\n x_train, y_train = zip(*train)\n x_test, y_test = zip(*test)\n return x_train, y_train, x_test, y_test", "def train_test_split(df, test_size=0.3):\r\n # split df here\r\n train_size = int(df.shape[0] * (1 - test_size))\r\n test_size = df.shape[0] - train_size\r\n train = df[:train_size]\r\n test = df[train_size:]\r\n\r\n return train, test # return the train and test datasets\r", "def train_test_split(x, y, test_pct):\n\tdata = zip(x,y)\n\ttrain, test = split_data(data, 1 - test_pct)\n\tx_train, y_train = zip(*train)\n\tx_test, y_test = zip(*test)\n\treturn x_train, y_train, x_test, y_test", "def split_data_by_image(self, test_fraction=0.5):\n image_id = BaseModel.get_image_id(self.inputs)\n test_idx = np.random.random(image_id.max()+1) <= test_fraction\n\n # Low image count edge case (mostly just for testing purposes)\n if True not in test_idx:\n test_idx[0] = True\n elif False not in test_idx:\n test_idx[0] = False\n \n test_idx = test_idx[image_id]\n if BaseModel.is_laue(self.inputs):\n train, test = self.split_laue_data_by_mask(test_idx)\n else:\n train, test = self.split_mono_data_by_mask(test_idx)\n\n #return self.get_tf_dataset(train), self.get_tf_dataset(test)\n return train, test", "def train_test_split(ratings):\n test = set(range(len(ratings))[::1000])\n train = sorted(set(range(len(ratings))) - test)\n test = sorted(test)\n return ratings.iloc[train], ratings.iloc[test]", "def test_train_split(X, y, test_size=0.2):\n idx = 0\n length_of_X = len(X)\n y_test = []\n X_test = []\n \n while idx < length_of_X*test_size:\n random_number_gen = np.random.randint(low=0, high=len(X))\n y_test.append(y[random_number_gen])\n X_test.append(X[random_number_gen])\n X = np.delete(X, random_number_gen, axis=0)\n y = np.delete(y, random_number_gen, axis=0)\n idx += 1\n return X, np.array(X_test), y, np.array(y_test)", "def train_test_split(df, test_size=0.1):\n ntrn = int(round(len(df) * (1 - test_size)))\n\n X_train, y_train = _load_data(df.iloc[0:ntrn])\n X_test, y_test = _load_data(df.iloc[ntrn:])\n\n return (X_train, y_train), (X_test, y_test)", "def split_train_test(df_train, labels):\n n_train = np.shape(df_train)[0]\n X = {'train': [], 'holdout': []} # features\n Y = {'train': [], 'holdout': []} # labels\n p10 = int(0.1 * n_train)\n X['holdout'] = df_train.iloc[-p10:]\n Y['holdout'] = labels[-p10:]\n X['train'] = df_train.iloc[:(n_train - p10)]\n Y['train'] = labels[:(n_train - p10)]\n return X, Y", "def __split_dataset(self):\n self.train, self.valid, _, _ = train_test_split(self.data, self.data, test_size=0.2)\n self.valid, self.test, _, _ = train_test_split(self.valid, self.valid, test_size=0.5)", "def train_test_split(dataset, split):\r\n train = list()\r\n train_size = split * len(dataset)\r\n dataset_copy = list(dataset) \r\n while len(train) < train_size:\r\n index = randrange(len(dataset_copy))\r\n train.append(dataset_copy.pop(index))\r\n return train, dataset_copy", "def split_train_test(data, test_ratio):\n shuffled_index = np.random.permutation(len(data))\n test_set_size = int(len(data) * test_ratio)\n\n train_indices = shuffled_index[test_set_size:]\n test_indices = shuffled_index[:test_set_size]\n\n return data.iloc[train_indices], data.iloc[test_indices]", "def data_split():\n x_train = np.load(os.path.join(svhn.DEFAULT_PREPROCESSED_DATA_FOLDER, svhn.TRAIN, \"images.npy\"))\n y_train = np.load(os.path.join(svhn.DEFAULT_PREPROCESSED_DATA_FOLDER, svhn.TRAIN, \"label.npy\"))\n x_test = np.load(os.path.join(svhn.DEFAULT_PREPROCESSED_DATA_FOLDER, svhn.TEST, \"images.npy\"))\n y_test = np.load(os.path.join(svhn.DEFAULT_PREPROCESSED_DATA_FOLDER, svhn.TEST, \"label.npy\"))\n\n print(x_train.shape)\n print(x_test.shape)\n\n img_rows, img_cols = x_train.shape[1], x_train.shape[2]\n num_classes = 10 # starts with 1 not 0\n\n y_test1 = y_test.reshape((y_test.shape[0],))\n y_test1 = [y - 1 for y in y_test1]\n\n y_train1 = y_train.reshape((y_train.shape[0],))\n y_train1 = [y - 1 for y in y_train1]\n\n input_shape = (img_rows, img_cols, 3)\n\n X_train = x_train.astype('float32')\n X_test = x_test.astype('float32')\n\n X_train /= 255\n X_test /= 255\n print('x_train shape:', X_train.shape)\n print(X_train.shape[0], 'train samples')\n print(X_test.shape[0], 'test samples')\n\n # convert class vectors to binary class matrices\n y_train2 = keras.utils.to_categorical(y_train1, num_classes)\n y_test2 = keras.utils.to_categorical(y_test1, num_classes)\n\n y_train2 = y_train2.astype('int32')\n y_test2 = y_test2.astype('int32')\n\n print(\n \"after process: X train shape: {}, X test shape: {}, y train shape: {}, y test shape: {}\".format(x_train.shape,\n x_test.shape,\n y_train2.shape,\n y_test2.shape))\n return input_shape, X_train, X_test, y_train2, y_test2", "def split_data(dataset, test_size=0.5):\n shuffled_data = np.random.RandomState(seed=721).permutation(dataset)\n train_set = shuffled_data[: int(len(dataset) * (1 - test_size)), :]\n test_set = shuffled_data[int(len(dataset) * (1 - test_size)):, :]\n return train_set, test_set", "def create_train_test_sets(conform_shape=True, indi_proportion=0.50, incl_group_imgs=True):\r\n X_train_indi, y_train_indi = build_dataframe('Individual_Training_Images',\r\n img_input_shape, conform_shape=conform_shape)\r\n X_test_indi, y_test_indi = build_dataframe('Individual_Test_Images',\r\n img_input_shape, conform_shape=conform_shape)\r\n \r\n X_train_group, y_train_group = build_dataframe('Group_Training_Images',\r\n img_input_shape, conform_shape=conform_shape)\r\n X_test_group, y_test_group = build_dataframe('Group_Test_Images',\r\n img_input_shape, conform_shape=conform_shape)\r\n \r\n X_train_indi, y_train_indi = subsample_dataframe(X_train_indi, y_train_indi,indi_proportion)\r\n \r\n if incl_group_imgs:\r\n X_train = np.concatenate([X_train_indi,X_train_group])\r\n y_train = np.concatenate([y_train_indi,y_train_group])\r\n else: \r\n X_train = X_train_indi.copy()\r\n y_train = y_train_indi.copy()\r\n\r\n return X_train, y_train, X_test_indi, y_test_indi, X_test_group, y_test_group", "def split_dataset(dataset, test_size):\n train_data = dataset.skip(test_size).shuffle(SHUFFLE_BUFFER_SIZE)\n train_data = train_data.padded_batch(BATCH_SIZE)\n \n test_data = dataset.take(test_size)\n test_data = test_data.padded_batch(BATCH_SIZE)\n \n return train_data, test_data", "def split_data(test_data, split_ratio):\n split_index = int(split_ratio * len(test_data))\n \n # randomly permute the values in place\n random.shuffle(test_data)\n \n # take slices of the determined size\n training_set = copy.copy(test_data[:split_index])\n test_data = copy.copy(test_data[split_index:])\n\n return training_set, test_data", "def split_train_test(ratings):\r\n ratings = ratings.sample(frac=1).reset_index(drop=True)\r\n train_user_list = []\r\n train_item_list = []\r\n train_rating_list = []\r\n test_user_list = []\r\n test_item_list = []\r\n test_rating_list = []\r\n user_pool = set(ratings['userId'].unique())\r\n for idx in user_pool:\r\n flag = 0\r\n items = ratings[ratings['userId']==idx][['itemId','rating']]\r\n for i, row in items.iterrows():\r\n if flag == 0:\r\n test_user_list.append(int(idx))\r\n test_item_list.append(int(row['itemId']))\r\n test_rating_list.append(row['rating'])\r\n flag = 1\r\n else:\r\n train_user_list.append(int(idx))\r\n train_item_list.append(int(row['itemId']))\r\n train_rating_list.append(row['rating'])\r\n\r\n train = pd.DataFrame({'userId': train_user_list, 'itemId': train_item_list, 'rating': train_rating_list}, columns=['userId', 'itemId', 'rating'])\r\n test = pd.DataFrame({'userId': test_user_list, 'itemId': test_item_list, 'rating': test_rating_list}, columns=['userId', 'itemId', 'rating'])\r\n return [train, test]\r\n \r\n\r\n \r\n #train, test = train_test_split(ratings, test_size=0.1, shuffle=True)\r\n #return [train, test]\r", "def train_test_split(df):\n training_size = int(len(df) * .67)\n test_size = int(len(df) - training_size)\n train, test = df[0:training_size], df[training_size:len(df)]\n return train, test", "def split_data(images, labels):\n images, labels = shuffle_data_pair(images, labels)\n\n num_covid_points = sum(map(lambda label: label == 0, labels))\n\n # Calculate split\n num_test = int(num_covid_points * 0.1)\n num_covid_train = num_covid_points - num_test * 2\n num_other_train = int(num_covid_train * 1.1)\n\n # (train, validate, test) points added\n num_points_added = [\n [0, 0, 0], # COVID-19\n [0, 0, 0], # Viral pneumonia\n [0, 0, 0] # Normal\n ]\n\n # Datasets\n images_train = []\n labels_train = []\n images_validate = []\n labels_validate = []\n images_test = []\n labels_test = []\n\n # Add images and labels to datasets\n notifier.send(\" Adding images and labels to dataset...\")\n for i, label in enumerate(labels):\n print(f\" Point: {i} / {len(labels)}\")\n completed_labels = [False, False, False] # Enough of label added\n if all(completed_labels):\n break\n for j in range(3): # 0: COVID-19, 1: Viral pneumonia, 2: Normal\n if completed_labels[j]:\n continue\n if label == j:\n # Add training data\n can_add_training = False\n if j == 0: # COVID-19\n if num_points_added[j][0] < num_covid_train:\n can_add_training = True\n num_points_added[j][0] += 1\n elif num_points_added[j][0] < num_other_train: # Not COVID-19\n can_add_training = True\n num_points_added[j][0] += 1\n if can_add_training:\n images_train.append(images[i])\n labels_train.append(labels[i])\n break\n\n # Add validation data\n if num_points_added[j][1] < num_test:\n num_points_added[j][1] += 1\n images_validate.append(images[i])\n labels_validate.append(labels[i])\n break\n\n # Add testing data\n if num_points_added[j][2] < num_test:\n num_points_added[j][2] += 1\n images_test.append(images[i])\n labels_test.append(labels[i])\n break\n\n # Point couldn't be added anywhere: label is complete\n completed_labels[j] = True\n break\n\n # Shuffle all data\n notifier.send(\" Shuffling data...\")\n images_train, labels_train = shuffle_data_pair(\n images_train, labels_train\n )\n images_validate, labels_validate = shuffle_data_pair(\n images_validate, labels_validate\n )\n images_test, labels_test = shuffle_data_pair(\n images_test, labels_test\n )\n\n if PLOT_LABELS:\n # Plot data frequencies\n plt.hist(labels, bins=3)\n plt.title(\"Labels\")\n\n plt.hist(labels_train, bins=3)\n plt.title(\"Train Labels\")\n\n plt.hist(labels_validate, bins=3)\n plt.title(\"Validate Labels\")\n\n plt.hist(labels_test, bins=3)\n plt.title(\"Test Labels\")\n\n plt.show()\n\n # Make labels categorical\n notifier.send(\" Making labels categorical: train...\")\n labels_train = tf.keras.utils.to_categorical(labels_train)\n notifier.send(\" Making labels categorical: validate...\")\n labels_validate = tf.keras.utils.to_categorical(labels_validate)\n notifier.send(\" Making labels categorical: test...\")\n labels_test = tf.keras.utils.to_categorical(labels_test)\n\n notifier.send(\" Converting data to NumPy arrays...\")\n return \\\n np.array(images_train), np.array(images_validate), np.array(images_test), \\\n np.array(labels_train), np.array(labels_validate), np.array(labels_test)" ]
[ "0.74955", "0.7494549", "0.7489278", "0.74131435", "0.73808104", "0.7314455", "0.7261903", "0.7215054", "0.7191714", "0.7166882", "0.71620023", "0.7145183", "0.71408004", "0.7076274", "0.7059303", "0.70584756", "0.70197546", "0.70191765", "0.69856054", "0.69731814", "0.6963965", "0.6962351", "0.6954356", "0.6944877", "0.69151044", "0.68783563", "0.6866853", "0.6864538", "0.68640494", "0.686051" ]
0.7677867
0
Generate human readable tool test reports. Creates reports in various formats (HTML, text, markdown) from the structured test output (tool_test_output.json).
def cli(ctx, path, **kwds): if not os.path.exists(path): io.error("Failed to tool test json file at %s" % path) return 1 test_data = StructuredData(path) handle_reports(ctx, test_data.structured_data, kwds)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main(execution_type = \"Native\", qmetry_data = {}):\r\n try:\r\n prog = dellunit.TestProgram(LOGTIME, execution_type, qmetry_data)\r\n jsonfile = ('report_%s.json'%logtime if (prog.timestamp_report == 1) else 'report.json')\r\n htmlfile = ('report_%s.html'%logtime if (prog.timestamp_report == 1) else 'report.html')\r\n JSON_FILE = os.path.abspath(os.path.join('logs', jsonfile))\r\n REPORT_FILE = os.path.abspath(os.path.join('logs', htmlfile))\r\n\r\n invalid_tests_json = []\r\n for test in prog.invalid_tests:\r\n test_json = {}\r\n test_json[\"testcase\"] = test\r\n test_json[\"invalid\"] = True\r\n test_json[\"status\"] = \"error\"\r\n test_json[\"description\"] = util.getTestcaseInformation(test)[\"Testcase Name\"]\r\n invalid_tests_json.append(test_json)\r\n\r\n except:\r\n sys.stdout == sys.__stdout__\r\n sys.stderr == sys.__stderr__\r\n logging.exception('Fatal error during test, skipping report generation')\r\n os.chdir(CURRENT_DIR)\r\n sys.exit(1)\r\n\r\n with open(JSON_FILE, 'r') as outfile_read:\r\n data = json.load(outfile_read)\r\n with open(JSON_FILE, 'w') as outfile_write:\r\n data[\"results\"][0][\"tests\"].extend(invalid_tests_json)\r\n json.dump(data, outfile_write)\r\n logging.info('Creating the report.html file')\r\n report.render_report(JSON_FILE, REPORT_FILE)\r\n os.chdir(CURRENT_DIR)", "def test_json_report(self):\n self._test_reports_helper({\"--json-report\": \"\"}, [\"report.json\"])", "def create_test_report(test_generator, test_history):\n \n df_res_labels = create_res_labels_df(test_generator, test_history)\n \n print_metric_to_console = False\n lvls=['']\n \n metrics_dict = {}\n \n n_samples = df_res_labels.shape[0]\n print('.'*50)\n print('showing test metrics for {} samples'.format(n_samples))\n print('`'*50)\n \n lvl_metrics_dict = {}\n for lvl in lvls:\n y_tr = df_res_labels['y_true' + lvl]\n y_pre = df_res_labels['y_pred' + lvl] \n \n lvl_metrics_dict = {}\n \n # Macro / Micro Driven Metrics\n for avg in ['macro', 'micro']:\n \n met_name = 'precision' + ('_'+ avg) \n res = metrics.precision_score(y_tr, y_pre, average=avg)\n lvl_metrics_dict[met_name] = res\n \n met_name = 'f1' + ('_'+ avg) \n res = metrics.f1_score(y_tr, y_pre, average=avg)\n lvl_metrics_dict[met_name] = res\n \n met_name = 'recall' + ('_'+ avg) \n res = metrics.recall_score(y_tr, y_pre, average=avg)\n lvl_metrics_dict[met_name] = res\n \n met_name = 'accuracy' \n res = metrics.accuracy_score(y_tr, y_pre)\n lvl_metrics_dict[met_name] = res\n \n metrics_dict[lvl] = lvl_metrics_dict\n \n df_test_results = pd.DataFrame(metrics_dict).sort_values(by=lvls, ascending=False)\n df_test_results=df_test_results.reindex(columns=lvls)\n \n print(df_test_results)\n print('- '*70)\n \n plot_confusion_matrix(df_res_labels)\n \n return df_res_labels", "def _generate_report(self):\n total_duration = 0.0\n total_nb_tests = 0\n total_nb_success = 0\n nb_modules = 0\n payload = []\n\n res_table = prettytable.PrettyTable(\n padding_width=2,\n field_names=['Module', 'Duration', 'nb. Test Run', 'Success'])\n res_table.align['Module'] = \"l\"\n res_table.align['Duration'] = \"r\"\n res_table.align['Success'] = \"r\"\n\n # for each scenario we draw a row for the table\n for item in self.summary:\n if item['task_status'] is True:\n nb_modules += 1\n total_duration += item['overall_duration']\n total_nb_tests += item['nb_tests']\n total_nb_success += item['nb_success']\n try:\n success_avg = 100 * item['nb_success'] / item['nb_tests']\n except ZeroDivisionError:\n success_avg = 0\n success_str = f\"{success_avg:0.2f}%\"\n duration_str = time.strftime(\"%H:%M:%S\",\n time.gmtime(item['overall_duration']))\n res_table.add_row([item['test_name'], duration_str,\n item['nb_tests'], success_str])\n payload.append({'module': item['test_name'],\n 'details': {'duration': item['overall_duration'],\n 'nb tests': item['nb_tests'],\n 'success rate': success_str,\n 'success': item['success'],\n 'failures': item['failures']}})\n\n total_duration_str = time.strftime(\"%H:%M:%S\",\n time.gmtime(total_duration))\n try:\n self.result = 100 * total_nb_success / total_nb_tests\n except ZeroDivisionError:\n self.result = 100\n success_rate = f\"{self.result:0.2f}\"\n success_rate_str = str(success_rate) + '%'\n res_table.add_row([\"\", \"\", \"\", \"\"])\n res_table.add_row([\"TOTAL:\", total_duration_str, total_nb_tests,\n success_rate_str])\n\n LOGGER.info(\"Rally Summary Report:\\n\\n%s\\n\", res_table.get_string())\n LOGGER.info(\"Rally '%s' success_rate is %s%% in %s/%s modules\",\n self.case_name, success_rate, nb_modules,\n len(self.summary))\n self.details['summary'] = {'duration': total_duration,\n 'nb tests': total_nb_tests,\n 'nb success': success_rate}\n self.details[\"modules\"] = payload", "def test_gettesttools_html(self):\n pass", "def test_basic_usage(self):\n self._test_reports_helper({}, [\"report.txt\"])", "def create_test_summary(args, TEST_RESULTS):\n logging.error(\"Creating test summary report...\")\n\n try:\n test_summary = \"Performance Metrics of {APP} Application Tested from this PR\\n\".format(APP=args.bundle_id)\n test_summary += \"---------------------------------------------------------------\\n\"\n\n for element in TEST_RESULTS:\n if element != LAUNCHES:\n test_summary += \"> {KEY}: {VALUE}\".format(KEY=element, VALUE=TEST_RESULTS[element])\n if element == INSTALL_LAUNCH_DURATION:\n if int(TEST_RESULTS[INSTALL_LAUNCH_DURATION]) > args.duration_limit:\n test_summary += \"ms :x:\\n\"\n else:\n test_summary += \"ms :white_check_mark:\\n\"\n\n if element == INSTALL_MEMORY_USAGE:\n if int(TEST_RESULTS[INSTALL_MEMORY_USAGE]) > args.memory_limit:\n test_summary += \"MB :x:\\n\"\n else:\n test_summary += \"MB :white_check_mark:\\n\"\n\n if element == APP_SIZE:\n if int(TEST_RESULTS[APP_SIZE]) > args.size_limit:\n test_summary += \"MB :x:\\n\"\n else:\n test_summary += \"MB :white_check_mark:\\n\"\n test_summary += \"---------------------------------------------------------------\\n\"\n\n for element in TEST_RESULTS[LAUNCHES]:\n test_summary += \"> DEVICE: {DEVICE} | LAUNCH TYPE: {LAUNCH_TYPE} | \".format(DEVICE=element[DEVICE], LAUNCH_TYPE=element[LAUNCH_TYPE])\n test_summary += \"DURATION: {DURATION}ms \".format(DURATION=element[LAUNCH_DURATION])\n if int(element[LAUNCH_DURATION]) > args.duration_limit:\n test_summary += \" :x: | \"\n else:\n test_summary += \" :white_check_mark: | \"\n\n test_summary += \"MEMORY USAGE: {MEMORY_USAGE}MB \".format(MEMORY_USAGE=element[MEMORY_USAGE])\n if int(element[MEMORY_USAGE]) > args.memory_limit:\n test_summary += \" :x:\\n\"\n else:\n test_summary += \" :white_check_mark:\\n\"\n test_summary += \"----------------------------------------------------\\n\"\n\n except Exception as e:\n logging.error(\"Creating test summary failed with error '{ERROR}'\".format(ERROR=e))\n return None\n\n logging.info(test_summary)\n return test_summary", "def print_analysis(self,version,results,tests,test_details,test_order,\n output_dir,diffs_only):\n def format_result(r):\n return '%s %s' % (r.outcome,r.get_cause())\n\n main_template = makolookup.get_template(\"main.mako\")\n detail_template = makolookup.get_template(\"detail.mako\")\n\n f = open(os.path.join(output_dir,'index.html'),'w')\n try:\n f.write(main_template.render(version=version,results=results,tests=tests,\n test_details=test_details,test_order=test_order,\n time2datetime=time2datetime))\n finally:\n f.close()\n\n for test_id,test_detail in test_details.items():\n #print ('Detail: %s' % test_id)\n f = open(os.path.join(output_dir,test_id+'.html'),'w')\n try:\n f.write(detail_template.render(version=version,test_id=test_id,\n test_detail=test_detail,diffs_only=diffs_only))\n except:\n f.write(\"Error while processing output.\")\n finally:\n f.close()", "def main(reports: str, output: str) -> None:\n json_reports = {\n path: json.loads(path.read_text(encoding=\"utf-8\"))\n for path in pathlib.Path(reports).resolve().glob(\"*.json\")\n }\n\n cpython_results: t.Dict[str, t.Dict[str, t.Any]] = {}\n mopsa_results: t.Dict[str, t.Any] = {}\n lambda_py_results: t.Dict[str, t.Any] = {}\n sos_python_results: t.Dict[str, t.Any] = {}\n\n for report in json_reports.values():\n if report[\"type\"] == \"CPython\":\n cpython_results[report[\"version\"]] = report[\"results\"]\n elif report[\"type\"] == \"Lambda-Py\":\n lambda_py_results = report[\"results\"]\n elif report[\"type\"] == \"Mopsa\":\n mopsa_results = report[\"results\"]\n else:\n assert report[\"type\"] == \"SOS\"\n sos_python_results = report[\"results\"]\n\n cpython_versions = list(cpython_results.keys())\n cpython_versions.sort()\n\n assert cpython_results, \"results for CPython are missing\"\n assert lambda_py_results, \"results for Lambda-Py are missing\"\n assert mopsa_results, \"results for Mopsa are missing\"\n assert sos_python_results, \"results for SOS Python are missing\"\n\n num_columns = 1 + len(cpython_results) + 1 + 1 + 3\n\n lines: t.List[str] = [\n f\"\\\\begin{{longtable}}{{|r|{len(cpython_results) * 'c'}|c|c|ccc|}}\\\\hline\",\n f\"& \\\\multicolumn{{{len(cpython_results)}}}{{c|}}{{CPython}}\",\n \"& $\\\\lambda_\\\\pi$ & \\\\Mopsa & \\\\multicolumn{3}{c|}{SOS Python} \\\\\\\\\",\n f\"& {'&'.join(cpython_versions)} & & & & Time $[s]$ & Transitions \\\\\\\\\",\n \"\\\\hline\\\\endhead\",\n ]\n\n columns: t.List[str] = [\"\"]\n\n sos_python_times: t.List[float] = []\n sos_python_transitions: t.List[int] = []\n\n for test in programs.all_tests:\n columns.clear()\n columns.append(f\"{latex_escape(test.name)}\")\n\n for version in cpython_versions:\n columns.append(get_mark(cpython_results[version][test.identifier]).value)\n\n columns.append(get_mark(lambda_py_results[test.identifier]).value)\n columns.append(get_mark(mopsa_results[test.identifier]).value)\n\n sos_result = sos_python_results[test.identifier]\n columns.append(get_mark(sos_result).value)\n if sos_result[\"returncode\"] == 0:\n columns.append(f\"${sos_result['execution_time']:.2f}$\")\n columns.append(f\"${sos_result['transitions']:,}$\".replace(\",\", \"\\\\,\"))\n sos_python_times.append(sos_result[\"execution_time\"])\n sos_python_transitions.append(sos_result[\"transitions\"])\n else:\n columns.append(r\"\\textcolor{gray}{--}\")\n columns.append(r\"\\textcolor{gray}{--}\")\n\n lines.append(\"&\".join(columns))\n lines.append(\"\\\\\\\\\")\n\n lines.append(\"\\\\hline \\\\hline\")\n\n cpython_statistics = {\n version: count_marks(results) for version, results in cpython_results.items()\n }\n lambda_py_statistics = count_marks(lambda_py_results)\n mopsa_statistics = count_marks(mopsa_results)\n sos_python_statistics = count_marks(sos_python_results)\n\n for mark in Mark:\n columns.clear()\n columns.append(mark.value)\n for version in cpython_versions:\n columns.append(str(cpython_statistics[version][mark]))\n columns.append(str(lambda_py_statistics[mark]))\n columns.append(str(mopsa_statistics[mark]))\n columns.append(\n f\"\\\\multicolumn{{3}}{{c|}}{{{str(sos_python_statistics[mark])}}}\"\n )\n lines.append(\"&\".join(columns))\n lines.append(\"\\\\\\\\\")\n lines.append(\"\\\\hline\")\n lines.append(\n f\"\\\\multicolumn{{{num_columns}}}{{c}}\"\n \"{\\\\rule{0pt}{1.2\\\\normalbaselineskip}Generated by \\\\texttt{evaluation/latexify.py}.}\"\n )\n lines.append(\"\\\\end{longtable}\")\n\n pathlib.Path(output).write_text(\"\\n\".join(lines), encoding=\"utf-8\")\n\n num_k_python_tests = sum(1 for test in programs.all_tests if test.is_k_python)\n num_lambda_py_tests = sum(1 for test in programs.all_tests if test.is_lambda_py)\n print(\"Total number of tests:\", len(programs.all_tests))\n print(\"K Python tests:\", num_k_python_tests)\n print(\"Lambda-Py tests:\", num_lambda_py_tests)\n print(\n \"Our tests:\", len(programs.all_tests) - num_k_python_tests - num_lambda_py_tests\n )\n print(f\"Median Time: {statistics.median(sos_python_times)}\")\n print(f\"Median Transitions: {statistics.median(sos_python_transitions)}\")\n print(f\"AVG Time: {statistics.mean(sos_python_times)}\")\n print(f\"AVG Transitions: {statistics.mean(sos_python_transitions)}\")", "def test_manager_basic_generate_report():\n report_path = os.path.join(REPORT_DIR, \"basic_report.json\")\n httpretty.register_uri(\n httpretty.GET, \"http://test.com/test\", body=json.dumps({\"foo\": \"bar\"})\n )\n httpretty.register_uri(httpretty.GET, \"http://test.com/bar\")\n\n result = spintest(\n [\"http://test.com\"],\n [\n {\"method\": \"GET\", \"route\": \"/test\", \"output\": \"test\"},\n {\"method\": \"GET\", \"route\": \"/{{ test['foo'] }}\"},\n ],\n generate_report=report_path,\n )\n\n assert True is result\n\n assert True is os.path.isfile(report_path)\n\n assert True is validate_report(report_path)", "def _analyze_make_output(self,\n jenkins_build_url=None,\n test_reports_dir=None,\n jstest_reports_file=None,\n lint_reports_file=None,\n dry_run=False):\n if jenkins_build_url is None:\n jenkins_build_url = 'http://www.example.com/'\n if test_reports_dir is None:\n test_reports_dir = self.reports_dir\n if jstest_reports_file is None:\n jstest_reports_file = os.path.join(self.tmpdir,\n 'jstest_output.txt')\n if lint_reports_file is None:\n lint_reports_file = os.path.join(self.tmpdir,\n 'lint_errors.txt')\n\n return analyze_make_output.main(jenkins_build_url, test_reports_dir,\n jstest_reports_file, lint_reports_file,\n None, dry_run)", "def output_test():\n\toutput_comparison_page(TEST_EVENT_LIST, TEST_COMPARISON_PAGE_FILEPATH)", "def test(self):\n cmd = TaskCreator.get_pytest() + \" \"\n options = ('-vv '\n '-n auto '\n '--ignore=tests/experiments/ '\n '--html=build/tests/html/tests.html '\n '--junitxml=build/tests/xml/tests.xml '\n '--cov ' + self.project_name_sc + ' '\n '--cov-report term '\n '--cov-report html:build/tests/coverage/html '\n '--cov-report xml:build/tests/coverage/xml/coverage.xml ')\n return {\n \"actions\": [cmd + options],\n \"verbosity\": 2\n }", "def _process_test_suite(self, logfile):\r\n\r\n print '***' * 10\r\n print 'Output will be generated in folder {}\\n'.format(self.output_dir_path)\r\n\r\n command = 'robot --outputdir {0} -r {1}_report.html -l {1}_log.html -o {1}_output.xml {1}.robot'.format(\r\n self.output_dir_path, self.name)\r\n\r\n return self._run_command(command, logfile)", "def generate_report():\n if not os.path.isdir(samples_path):\n print(\"Couldn't find glTF-Sample-Models/2.0/\")\n print(\"Get it by running `git submodule update --init --recursive`\")\n print(\"See README.md for more instructions\")\n print(\"Tests did not run\")\n sys.exit(1)\n\n # Print Blender version for debugging\n try:\n subprocess.run(['blender', '--version'], check=True)\n except:\n print(\"Check that Blender is installed!\")\n raise\n\n print()\n\n # We're going to try to run Blender in a clean-ish environment for\n # testing. We don't want to use whatever the user has installed for\n # the glTF importer addon because it might be old, etc; we want to\n # be sure we're using the current state of ../io_scene_gltf. So\n # create a new directory to use as scripts/addons/, symlink our\n # addon into it, and tell Blender to use that.\n with tempfile.TemporaryDirectory() as scripts_dir:\n addons_dir = os.path.join(scripts_dir, 'addons')\n os.mkdir(addons_dir)\n blender_addon_path = os.path.join(addons_dir, 'io_scene_gltf')\n os.symlink(src=src_addon_dir, dst=blender_addon_path)\n\n env = os.environ.copy()\n env['BLENDER_USER_SCRIPTS'] = scripts_dir\n #TODO Should we worry about BLENDER_SYSTEM_SCRIPTS, etc?\n\n subprocess.run(\n [\n 'blender',\n '--background', # run UI-less\n '--factory-startup', # factory settings\n '--addons', 'io_scene_gltf', # enable the addon\n '--python', test_script # run the test script\n ],\n env=env,\n check=True\n )", "def cli(argv):\r\n args = get_args(argv)\r\n verbosity = \"summary\"\r\n if args.verbose:\r\n verbosity = \"report\"\r\n report = evaluate(args.design, verbosity)\r\n print json.dumps(report, indent=4)", "def Main(argv):\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--main',\n dest='main',\n default=False,\n action='store_true',\n help='Generate report only for main waterfall '\n 'builders.')\n parser.add_argument(\n '--rotating',\n dest='rotating',\n default=False,\n action='store_true',\n help='Generate report only for rotating builders.')\n parser.add_argument(\n '--failures_report',\n dest='failures_report',\n default=False,\n action='store_true',\n help='Only generate the failures section of the report.')\n parser.add_argument(\n '--omit_failures',\n dest='omit_failures',\n default=False,\n action='store_true',\n help='Do not generate the failures section of the report.')\n parser.add_argument(\n '--no_update',\n dest='no_update',\n default=False,\n action='store_true',\n help='Run reports, but do not update the data files.')\n parser.add_argument(\n '--date',\n dest='date',\n default=0,\n type=int,\n help='The date YYYYMMDD of waterfall report.')\n\n options = parser.parse_args(argv)\n\n if not ValidOptions(parser, options):\n return 1\n\n main_only = options.main\n rotating_only = options.rotating\n failures_report = options.failures_report\n omit_failures = options.omit_failures\n date = options.date\n\n test_data_dict = dict()\n failure_dict = dict()\n\n prod_access = CheckProdAccess()\n if not prod_access:\n print('ERROR: Please run prodaccess first.')\n return\n\n with open('%s/waterfall-test-data.json' % DATA_DIR, 'r') as input_file:\n test_data_dict = json.load(input_file)\n\n with open('%s/test-failure-data.json' % DATA_DIR, 'r') as fp:\n failure_dict = json.load(fp)\n\n builds = GetBuilds(date)\n\n waterfall_report_dict = dict()\n rotating_report_dict = dict()\n int_date = 0\n for test_desc in TESTS:\n test, test_family = test_desc\n for build in builds:\n (builder, buildnum) = build\n if test.startswith('kernel') and 'llvm' in builder:\n continue\n if 'x86' in builder and not test.startswith('bvt'):\n continue\n target, build_link = DownloadLogFile(builder, buildnum, test, test_family)\n\n if os.path.exists(target):\n test_summary, report_date, board, tmp_date, color = ParseLogFile(\n target, test_data_dict, failure_dict, test, builder, buildnum,\n build_link)\n\n if tmp_date != 0:\n int_date = tmp_date\n\n if builder in ROTATING_BUILDERS:\n UpdateReport(rotating_report_dict, builder, test, report_date,\n build_link, test_summary, board, color)\n else:\n UpdateReport(waterfall_report_dict, builder, test, report_date,\n build_link, test_summary, board, color)\n\n PruneOldFailures(failure_dict, int_date)\n\n if waterfall_report_dict and not rotating_only and not failures_report:\n main_report = GenerateWaterfallReport(waterfall_report_dict, failure_dict,\n 'main', int_date, omit_failures)\n EmailReport(main_report, 'Main', format_date(int_date))\n shutil.copy(main_report, ARCHIVE_DIR)\n if rotating_report_dict and not main_only and not failures_report:\n rotating_report = GenerateWaterfallReport(rotating_report_dict,\n failure_dict, 'rotating',\n int_date, omit_failures)\n EmailReport(rotating_report, 'Rotating', format_date(int_date))\n shutil.copy(rotating_report, ARCHIVE_DIR)\n\n if failures_report:\n failures_report = GenerateFailuresReport(failure_dict, int_date)\n EmailReport(failures_report, 'Failures', format_date(int_date))\n shutil.copy(failures_report, ARCHIVE_DIR)\n\n if not options.no_update:\n with open('%s/waterfall-test-data.json' % DATA_DIR, 'w') as out_file:\n json.dump(test_data_dict, out_file, indent=2)\n\n with open('%s/test-failure-data.json' % DATA_DIR, 'w') as out_file:\n json.dump(failure_dict, out_file, indent=2)\n\n UpdateBuilds(builds)", "def generate_report():\n\n # Fetch the top 3 most viewed articles and number of views and print them\n articles_query = get_articles_query()\n popular_articles = execute_query(articles_query)\n print_top_articles(popular_articles)\n\n # Fetch the most popular authors and print them\n authors_query = get_authors_query()\n popular_authors = execute_query(authors_query)\n print_authors(popular_authors)\n\n # Print the days when there were more than 1% errors in HTTP requests\n errors_query = get_errorData_query()\n error_data = execute_query(errors_query)\n print_error_data(error_data)", "def _generate_report(self):\n\n _LOG.info(\"Generating the HTML report.\")\n\n # Make sure the output directory exists.\n try:\n self.outdir.mkdir(parents=True, exist_ok=True)\n except OSError as err:\n raise Error(f\"failed to create directory '{self.outdir}': {err}\")\n\n raw_stats_paths, descr_paths = self._copy_raw_data()\n\n # Find the styles and templates paths.\n templdir = FSHelpers.search_for_app_data(\"wult\", Path(\"templates\"),\n pathdescr=\"HTML report Jinja2 templates\")\n csspath = FSHelpers.search_for_app_data(\"wult\", Path(\"css/style.css\"),\n pathdescr=\"HTML report CSS file\")\n\n # Copy the styles file to the output directory.\n dstpath = self.outdir.joinpath(\"style.css\")\n try:\n shutil.copyfile(csspath, dstpath)\n except OSError as err:\n raise Error(f\"failed to copy CSS file from '{csspath}' to '{dstpath}':\\n{err}\")\n\n # The summary table is only included into the main HTML page.\n sum_tbl = self._prepare_summary_table(raw_stats_paths, descr_paths)\n links_tbl = self._prepare_links_table()\n\n # Each column name gets its own HTML page.\n for colname, pinfos in self._pinfos.items():\n stats_tbl = self._prepare_stats_table(pinfos)\n\n # Render the template.\n jenv = Jinja2.build_jenv(templdir, trim_blocks=True, lstrip_blocks=True)\n jenv.globals[\"stats_tbl\"] = stats_tbl\n jenv.globals[\"pinfos\"] = pinfos\n jenv.globals[\"colname\"] = colname\n jenv.globals[\"title_descr\"] = self.title_descr\n jenv.globals[\"toolname\"] = self._refinfo[\"toolname\"]\n\n if sum_tbl:\n jenv.globals[\"sum_tbl\"] = sum_tbl\n jenv.globals[\"links_tbl\"] = links_tbl\n templfile = outfile = \"index.html\"\n sum_tbl = None\n else:\n templfile = \"metric.html\"\n outfile = links_tbl[colname][\"fname\"]\n\n Jinja2.render_template(jenv, Path(templfile), outfile=self.outdir.joinpath(outfile))", "def run_test(self):\n\n # populate *_ps sets\n self.enter_project_file()\n\n # populate *_dir sets\n self.enter_directories()\n\n # The files in the directories makes up the largest possible set of files\n self.result_files = self.result_files_dir\n self.design_files = self.design_files_dir\n self.design_space_files = self.design_space_files_dir\n self.test_bench_files = self.test_bench_files_dir\n\n # populate *_ms sets\n self.enter_meta_results_file()\n\n # populate *_OK sets\n self.check_analysis_status()\n\n df = {'design_files_dir' : list(self.design_files_dir),'design_files_pr' : list(self.design_files_pr),\n 'design_files_ms' : list(self.design_files_ms), 'design_files_OK' : list(self.design_files_OK)}\n\n ds = {'design_space_files_dir' : list(self.design_space_files_dir),\n 'design_space_files_pr' : list(self.design_space_files_pr)}\n\n rs = {'result_files_dir' : list(self.result_files_dir), 'result_files_ms' : list(self.result_files_ms),\n 'result_files_OK' : list(self.result_files_OK)}\n\n tb = {'test_bench_files_dir' : list(self.test_bench_files_dir),\n 'test_bench_files_ms' : list(self.test_bench_files_ms)}\n\n srl = SummaryReportsLinks(self.result_files_dir)\n\n lf = {'files_linked_from_sum_reps' : srl.get_files(),\n 'folders_linked_from_sum_reps' : srl.get_folders()}\n\n # 'test_bench_files_pr' : list(self.test_bench_files_pr),\n \n json_test = {'design_files' : df, 'design_space_files' : ds, 'result_files' : rs,\n 'test_bench_files' : tb, 'stat_files' : self.stat_files,\n 'files_linked_from_sum_reps' : lf}\n\n with open('test_run.json','wb') as f_out:\n json.dump(json_test, f_out, indent=4)", "def summary(args, json_logs, all_logs):\n all_data = process_logs(json_logs)\n if args.debug:\n print(json.dumps(all_data))\n\n buf = io.StringIO()\n\n start = args.start.strftime(\"%m/%d/%Y %H:%M:%S\")\n end = args.end.strftime(\"%m/%d/%Y %H:%M:%S\")\n print(f\"Test Summary Report: {start} -> {end}\\n\", file=buf)\n\n # Calculate test failures per day/scenario\n all_days = {} # dict of per-scenario counts\n days = {} # dict of per-day -> per-scenario counts and test names\n top_failed = {} # dict of per-test failure counts\n top_flakes = {} # dict of per-test flake counts\n for day in sorted(all_data.keys()):\n days[day] = {}\n for scenario in sorted(all_data[day].keys()):\n if scenario not in all_days:\n all_days[scenario] = {\"success\": 0, \"missing\": 0, \"failed\": 0, \"flakes\": 0}\n\n # Figure out how many were successful, failed, or were flakes\n success, missing, failed, flakes = check_tests(all_data[day][scenario])\n\n days[day][scenario] = {\n \"success\": len(success),\n \"missing\": len(missing),\n \"failed\": len(failed),\n \"flakes\": len(flakes),\n \"missing-tests\": missing,\n \"failed-tests\": failed,\n \"flaky-tests\": flakes}\n all_days[scenario][\"success\"] += len(success)\n all_days[scenario][\"missing\"] += len(missing)\n all_days[scenario][\"failed\"] += len(failed)\n all_days[scenario][\"flakes\"] += len(flakes)\n\n for n in failed:\n top_failed[n] = top_failed.get(n, 0) + 1\n\n for n in flakes:\n top_flakes[n] = top_flakes.get(n, 0) + 1\n\n\n # Summary of tests per scenario\n print(\"Weekly summary\", file=buf)\n print(\"==============\", file=buf)\n for scenario in sorted(all_days.keys()):\n success = all_days[scenario][\"success\"]\n missing = all_days[scenario][\"missing\"]\n failed = all_days[scenario][\"failed\"]\n flakes = all_days[scenario][\"flakes\"]\n\n print(f\"{scenario}: Ran {success+failed+missing} tests. {success} passed, {failed} failed, {missing} missing, {flakes} flakes.\", file=buf)\n print(\"\\n\", file=buf)\n\n print(\"Top 5 failed tests for the week\", file=buf)\n for n in sorted((n for n in top_failed), key=lambda x: top_failed[x], reverse=True)[:5]:\n print(f\" {n} - {top_failed[n]}\", file=buf)\n print(\"\\n\", file=buf)\n\n print(\"Top 5 flaky tests for the week\", file=buf)\n for n in sorted((n for n in top_flakes), key=lambda x: top_flakes[x], reverse=True)[:5]:\n print(f\" {n} - {top_flakes[n]}\", file=buf)\n print(\"\\n\", file=buf)\n\n # Print daily stats\n for day in sorted(days.keys()):\n print(datetime.strptime(day, \"%Y%m%d\").strftime(\"%m/%d/%Y\"), file=buf)\n for scenario in sorted(days[day].keys()):\n s = days[day][scenario]\n success = s[\"success\"]\n missing = s[\"missing\"]\n failed = s[\"failed\"]\n total = success + failed + missing\n flakes = s[\"flakes\"]\n print(f\" {scenario} (Ran {total}, {success} passed, {failed} failed, {missing} missing. {flakes} flakes) :\", file=buf)\n if s[\"missing-tests\"]:\n print(\" Missing:\", file=buf)\n for n in s[\"missing-tests\"]:\n print(f\" {n}\", file=buf)\n if s[\"failed-tests\"]:\n print(\" Failed:\", file=buf)\n for n in sorted(s[\"failed-tests\"].keys()):\n print(f\" {n}\", file=buf)\n if s[\"flaky-tests\"]:\n print(\" Flakes:\", file=buf)\n for n in sorted(s[\"flaky-tests\"].keys()):\n print(f\" {n}\", file=buf)\n print(\"\\n\", file=buf)\n\n # Print the failure details for each scenario, on each day.\n for scenario in sorted(all_days.keys()):\n success = all_days[scenario][\"success\"]\n failed = all_days[scenario][\"failed\"]\n flakes = all_days[scenario][\"flakes\"]\n\n msg = f\"{scenario}: Ran {success+failed} tests. {success} passed, {failed} failed, {flakes} flakes.\"\n print(\"=\" * len(msg), file=buf)\n print(msg, file=buf)\n print(\"=\" * len(msg), file=buf)\n\n if args.flake_details:\n print(\"Failed test details\", file=buf)\n print(\"-------------------\", file=buf)\n print_test_details(scenario, days, \"failed-tests\", buf)\n\n if args.flake_details:\n print(\"\\nFlake test details\", file=buf)\n print(\"-------------------\", file=buf)\n print_test_details(scenario, days, \"flaky-tests\", buf)\n\n print(\"\\n\", file=buf)\n\n\n # Save the logs for the failures and flakes if a path is specified\n try:\n if args.archive_logs:\n archive_test_logs(days, args.archive_logs, all_logs)\n except RuntimeError as e:\n print(f\"\\nERROR: Problem archiving test logs - {e}\", file=buf)\n\n return buf.getvalue()", "def task_render_report():\n target = 'report.pdf'\n dep = 'report.md'\n return {\n 'file_dep': [dep],\n 'targets': [target],\n 'actions': [\n f\"pandoc -t latex -o {target} {dep}\"\n ],\n 'clean': True\n }", "def pytest_runtest_makereport(item):\n global itr\n pytest_html = item.config.pluginmanager.getplugin('html')\n outcome = yield\n report = outcome.get_result()\n extra = getattr(report, 'extra', [])\n report.description = str(item.function.__doc__)\n report.function = item.function.__doc__\n report_directory = os.path.dirname(item.config.option.htmlpath)\n\n if report.when == 'call' or report.when == \"setup\":\n extra.append(pytest_html.extras.url(MyConfigFiles.PCN_AppURL))\n xfail = hasattr(report, 'wasxfail')\n #print(\"Xfaile details::\", xfail)\n if (report.skipped and xfail) or (report.failed and not xfail): # or report.outcome:\n #print(\"Report . Node ID::\", report.nodeid)\n file_name = report.nodeid.replace(\"QA/TestCases/\", '\\\\')\n file_name = file_name.replace(\"::\", \"_\") + \".png\"\n _capture_screenshot(file_name)\n extra.append(pytest_html.extras.html('<div>Log description</div>'))\n if file_name:\n html = '<div><img src=\"%s\" alt=\"screenshot\" style=\"width:304px;height:228px;\" ' \\\n 'onclick=\"window.open(this.src)\" align=\"right\"/></div>' % file_name\n extra.append(pytest_html.extras.html(html))\n print(\"Inside IF--HTML\", file_name)\n # movfiletodir(file_name)\n report.extra = extra", "def generate_report():\n if os.path.isdir(\"build/coverage\"):\n shutil.rmtree(\"build/coverage\")\n commands = '''\nscons -uij32 --optimization=coverage controller/cplusplus_test\nlcov --base-directory build/coverage --directory build/coverage -c -o build/coverage/controller_test.info\ngenhtml -o build/coverage/controller/test_coverage -t test --num-spaces 4 build/coverage/controller_test.info\n'''\n for cmd in commands.splitlines():\n cmd_args = cmd.split()\n if (len(cmd_args) == 0):\n continue\n cmd = cmd_args[0]\n cmd_path = find_executable(cmd)\n if not cmd_path:\n continue\n pid = os.fork()\n if pid == 0:\n # Avoid stdout buffering by execing command into child process.\n os.execv(cmd_path, cmd_args)\n os.waitpid(pid, 0)", "def testOutput(self):\n global base_dir\n\n base_dir = tempfile.mkdtemp()\n if not os.path.isdir(base_dir):\n os.mkdir(base_dir)\n build = builder.Builder(self.toolchains, base_dir, None, 1, 2,\n checkout=False, show_unknown=False)\n build.do_make = self.Make\n board_selected = self.boards.GetSelectedDict()\n\n build.BuildBoards(self.commits, board_selected, keep_outputs=False,\n verbose=False)\n lines = terminal.GetPrintTestLines()\n count = 0\n for line in lines:\n if line.text.strip():\n count += 1\n\n # We should get two starting messages, then an update for every commit\n # built.\n self.assertEqual(count, len(commits) * len(boards) + 2)\n build.SetDisplayOptions(show_errors=True);\n build.ShowSummary(self.commits, board_selected)\n #terminal.EchoPrintTestLines()\n lines = terminal.GetPrintTestLines()\n self.assertEqual(lines[0].text, '01: %s' % commits[0][1])\n self.assertEqual(lines[1].text, '02: %s' % commits[1][1])\n\n # We expect all archs to fail\n col = terminal.Color()\n self.assertSummary(lines[2].text, 'sandbox', '+', ['board4'])\n self.assertSummary(lines[3].text, 'arm', '+', ['board1'])\n self.assertSummary(lines[4].text, 'powerpc', '+', ['board2', 'board3'])\n\n # Now we should have the compiler warning\n self.assertEqual(lines[5].text, 'w+%s' %\n errors[0].rstrip().replace('\\n', '\\nw+'))\n self.assertEqual(lines[5].colour, col.MAGENTA)\n\n self.assertEqual(lines[6].text, '03: %s' % commits[2][1])\n self.assertSummary(lines[7].text, 'sandbox', '+', ['board4'])\n self.assertSummary(lines[8].text, 'arm', '', ['board1'], ok=True)\n self.assertSummary(lines[9].text, 'powerpc', '+', ['board2', 'board3'])\n\n # Compiler error\n self.assertEqual(lines[10].text, '+%s' %\n errors[1].rstrip().replace('\\n', '\\n+'))\n\n self.assertEqual(lines[11].text, '04: %s' % commits[3][1])\n self.assertSummary(lines[12].text, 'sandbox', '', ['board4'], ok=True)\n self.assertSummary(lines[13].text, 'powerpc', '', ['board2', 'board3'],\n ok=True)\n\n # Compile error fixed\n self.assertEqual(lines[14].text, '-%s' %\n errors[1].rstrip().replace('\\n', '\\n-'))\n self.assertEqual(lines[14].colour, col.GREEN)\n\n self.assertEqual(lines[15].text, 'w+%s' %\n errors[2].rstrip().replace('\\n', '\\nw+'))\n self.assertEqual(lines[15].colour, col.MAGENTA)\n\n self.assertEqual(lines[16].text, '05: %s' % commits[4][1])\n self.assertSummary(lines[17].text, 'sandbox', '+', ['board4'])\n self.assertSummary(lines[18].text, 'powerpc', '', ['board3'], ok=True)\n\n # The second line of errors[3] is a duplicate, so buildman will drop it\n expect = errors[3].rstrip().split('\\n')\n expect = [expect[0]] + expect[2:]\n self.assertEqual(lines[19].text, '+%s' %\n '\\n'.join(expect).replace('\\n', '\\n+'))\n\n self.assertEqual(lines[20].text, 'w-%s' %\n errors[2].rstrip().replace('\\n', '\\nw-'))\n\n self.assertEqual(lines[21].text, '06: %s' % commits[5][1])\n self.assertSummary(lines[22].text, 'sandbox', '', ['board4'], ok=True)\n\n # The second line of errors[3] is a duplicate, so buildman will drop it\n expect = errors[3].rstrip().split('\\n')\n expect = [expect[0]] + expect[2:]\n self.assertEqual(lines[23].text, '-%s' %\n '\\n'.join(expect).replace('\\n', '\\n-'))\n\n self.assertEqual(lines[24].text, 'w-%s' %\n errors[0].rstrip().replace('\\n', '\\nw-'))\n\n self.assertEqual(lines[25].text, '07: %s' % commits[6][1])\n self.assertSummary(lines[26].text, 'sandbox', '+', ['board4'])\n\n # Pick out the correct error lines\n expect_str = errors[4].rstrip().replace('%(basedir)s', '').split('\\n')\n expect = expect_str[3:8] + [expect_str[-1]]\n self.assertEqual(lines[27].text, '+%s' %\n '\\n'.join(expect).replace('\\n', '\\n+'))\n\n # Now the warnings lines\n expect = [expect_str[0]] + expect_str[10:12] + [expect_str[9]]\n self.assertEqual(lines[28].text, 'w+%s' %\n '\\n'.join(expect).replace('\\n', '\\nw+'))\n\n self.assertEqual(len(lines), 29)\n shutil.rmtree(base_dir)", "def print_report():\n with open(report_path) as f:\n report = json.load(f)\n\n tests = report['tests']\n\n num_passed = 0\n num_failed = 0\n failures = []\n ok = '\\033[32m' + 'ok' + '\\033[0m' # green 'ok'\n failed = '\\033[31m' + 'FAILED' + '\\033[0m' # red 'FAILED'\n for test in tests:\n name = os.path.relpath(test['filename'], samples_path)\n print('import', name, '... ', end='')\n if test['result'] == 'PASSED':\n print(ok, \"(%.4f s)\" % test['timeElapsed'])\n num_passed += 1\n else:\n print(failed)\n print(test['error'])\n num_failed += 1\n failures.append(name)\n\n if failures:\n print('\\nfailures:')\n for name in failures:\n print(' ', name)\n\n result = ok if num_failed == 0 else failed\n print(\n '\\ntest result: %s. %d passed; %d failed\\n' %\n (result, num_passed, num_failed)\n )\n\n exit_code = 0 if num_failed == 0 else 3\n sys.exit(exit_code)", "def pretty_results(test_results, show_failures_only):\n\n\t_results = '__________________________________________________________________________________\\n\\n'\n\t_results += '\t\t::::::: MXUnit Test Results ::::::: \\n'\n\ttests = json.loads(test_results)\n\t \n\tpassed = len( [ x for x in tests if x['TESTSTATUS']=='Passed'] )\n\tfailed = len( [ x for x in tests if x['TESTSTATUS']=='Failed'] )\n\terrors = len( [ x for x in tests if x['TESTSTATUS']=='Error'] )\n\t\n\ttotal_time = sum([ float(x['TIME']) for x in tests ])\n\t_results += '\t\tPassed: %s, Failed: %s, Errors: %s, Time: %sms\\n' % (passed,failed,errors,total_time)\n\t_results += '\t\tDate: %s\\n' % (datetime.datetime.now().strftime(\"%A, %B %d, %Y, %I:%M %p\"))\n\t_results += '__________________________________________________________________________________\\n\\n'\n\n\tif show_failures_only:\n\t\t_results += '\t\t\t\t *** Showing Failures Only *** \\n\\n'\n\t\ttests = [ _test for _test in tests if _test['TESTSTATUS']=='Failed' ]\n\n\tfor test in tests:\n\t\t_results += '\t%s.%s (%s) %sms\\n' % (test['COMPONENT'], test['TESTNAME'], test['TESTSTATUS'], test['TIME'] ) \n\t\t\n\t\tif( test['DEBUG'] ):\n\t\t\t_debug = test['DEBUG']\n\t\t\ti=0\n\t\t\tfor var in _debug:\n\t\t\t\tprint '%s = %s' % ( var, _debug[i] )\n\t\t\t\tif 'var' in var:\n\t\t\t\t\tvar_val = var['var']\n\t\t\t\telif 'VAR' in var:\n\t\t\t\t\tvar_val = var['VAR']\n\t\t\t\telse:\n\t\t\t\t\tvar_val = None\n\n\t\t\t\tif var_val != None:\n\t\t\t\t\t_results += \"\t\tDebug: \t%s \\n \" % var_val\n\n\t\tif( test['TESTSTATUS'] in ('Failed','Error') ):\n\t\t\t_results += '\t\tMessage: %s\\n' % test['ERROR']['Message']\n\t\t\t_results += '\t\tStackTrace: {\\n%s\\t\\t\\n\\t\\t}\\n' % pretty_print_stacktrace(test['ERROR']['StackTrace']) \n\n\t\t\n\t\t\t\n\t\t_results += '\\n|--------------------------------------------------------------------------------\\n'\n\t\n\t_results += '\\n__________________________________________________________________________________\\n\\n'\n\t_results += 'Test results: Passed=%s, Failed=%s, Errors=%s\\n' % (passed,failed,errors)\n\treturn _results", "def GenerateWaterfallReport(report_dict, fail_dict, waterfall_type, date,\n omit_failures):\n\n filename = 'waterfall_report.%s_waterfall.%s.txt' % (waterfall_type, date)\n\n date_string = ''\n date_list = report_dict['date']\n num_dates = len(date_list)\n i = 0\n for d in date_list:\n date_string += d\n if i < num_dates - 1:\n date_string += ', '\n i += 1\n\n if waterfall_type == 'main':\n report_list = WATERFALL_BUILDERS\n else:\n report_list = report_dict.keys()\n\n with open(filename, 'w') as out_file:\n # Write Report Header\n out_file.write('\\nStatus of %s Waterfall Builds from %s\\n\\n' %\n (waterfall_type, date_string))\n out_file.write(' '\n ' kernel kernel\\n')\n out_file.write(' Build bvt- bvt-cq '\n 'toolchain- security daily daily\\n')\n out_file.write(' status inline '\n ' tests regression benchmarks\\n')\n out_file.write(' [P/ F/ DR]* [P/ F /DR]* '\n '[P/ F/ DR]* [P/ F/ DR]* [P/ F/ DR]* [P/ F/ DR]*\\n\\n')\n\n # Write daily waterfall status section.\n for i in range(0, len(report_list)):\n builder = report_list[i]\n if builder == 'date':\n continue\n\n if builder not in report_dict:\n out_file.write('Unable to find information for %s.\\n\\n' % builder)\n continue\n\n build_dict = report_dict[builder]\n status = build_dict.get('build_status', 'bad')\n inline = build_dict.get('bvt-inline', '[??/ ?? /??]')\n cq = build_dict.get('bvt-cq', '[??/ ?? /??]')\n inline_color = build_dict.get('bvt-inline-color', '')\n cq_color = build_dict.get('bvt-cq-color', '')\n if 'x86' not in builder:\n toolchain = build_dict.get('toolchain-tests', '[??/ ?? /??]')\n security = build_dict.get('security', '[??/ ?? /??]')\n toolchain_color = build_dict.get('toolchain-tests-color', '')\n security_color = build_dict.get('security-color', '')\n if 'gcc' in builder:\n regression = build_dict.get('kernel_daily_regression', '[??/ ?? /??]')\n bench = build_dict.get('kernel_daily_benchmarks', '[??/ ?? /??]')\n regression_color = build_dict.get('kernel_daily_regression-color', '')\n bench_color = build_dict.get('kernel_daily_benchmarks-color', '')\n out_file.write(' %6s %6s'\n ' %6s %6s %6s %6s\\n' %\n (inline_color, cq_color, toolchain_color,\n security_color, regression_color, bench_color))\n out_file.write('%25s %3s %s %s %s %s %s %s\\n' % (builder, status,\n inline, cq,\n toolchain, security,\n regression, bench))\n else:\n out_file.write(' %6s %6s'\n ' %6s %6s\\n' % (inline_color, cq_color,\n toolchain_color,\n security_color))\n out_file.write('%25s %3s %s %s %s %s\\n' % (builder, status, inline,\n cq, toolchain, security))\n else:\n out_file.write(' %6s %6s\\n' %\n (inline_color, cq_color))\n out_file.write('%25s %3s %s %s\\n' % (builder, status, inline, cq))\n if 'build_link' in build_dict:\n out_file.write('%s\\n\\n' % build_dict['build_link'])\n\n out_file.write('\\n\\n*P = Number of tests in suite that Passed; F = '\n 'Number of tests in suite that Failed; DR = Number of tests'\n ' in suite that Didn\\'t Run.\\n')\n\n if omit_failures:\n print('Report generated in %s.' % filename)\n return filename\n\n # Write failure report section.\n out_file.write('\\n\\nSummary of Test Failures as of %s\\n\\n' % date_string)\n\n # We want to sort the errors and output them in order of the ones that occur\n # most often. So we have to collect the data about all of them, then sort\n # it.\n error_groups = []\n for suite in fail_dict:\n suite_dict = fail_dict[suite]\n if suite_dict:\n for test in suite_dict:\n test_dict = suite_dict[test]\n for err_msg in test_dict:\n err_list = test_dict[err_msg]\n sorted_list = sorted(err_list, key=lambda x: x[0], reverse=True)\n err_group = [len(sorted_list), suite, test, err_msg, sorted_list]\n error_groups.append(err_group)\n\n # Sort the errors by the number of errors of each type. Then output them in\n # order.\n sorted_errors = sorted(error_groups, key=lambda x: x[0], reverse=True)\n for i in range(0, len(sorted_errors)):\n err_group = sorted_errors[i]\n suite = err_group[1]\n test = err_group[2]\n err_msg = err_group[3]\n err_list = err_group[4]\n out_file.write('Suite: %s\\n' % suite)\n out_file.write(' %s (%d failures)\\n' % (test, len(err_list)))\n out_file.write(' (%s)\\n' % err_msg)\n for i in range(0, len(err_list)):\n err = err_list[i]\n out_file.write(' %s, %s, %s\\n' % (format_date(err[0]), err[1],\n err[2]))\n out_file.write('\\n')\n\n print('Report generated in %s.' % filename)\n return filename", "def reports_cli():", "def cli(ctx, **kwds):\n invalid = _validate_kwds(kwds)\n if invalid:\n ctx.exit(invalid)\n tool_description = tool_builder.build(**kwds)\n tool_builder.write_tool_description(ctx, tool_description, **kwds)" ]
[ "0.6395984", "0.63704044", "0.63287634", "0.6298978", "0.62303543", "0.62125254", "0.6170875", "0.6157971", "0.61302793", "0.6079714", "0.6063422", "0.60530716", "0.60408163", "0.60160196", "0.59748095", "0.5972561", "0.59694135", "0.59077525", "0.58958656", "0.58577126", "0.5854289", "0.585339", "0.5835953", "0.58258796", "0.5796938", "0.5753807", "0.5750584", "0.57431257", "0.57311785", "0.57274693" ]
0.6617242
0
Load a dataset into the Dataset object from the self.input_file
def _load_data(self): # This allows a simulated dataset to use the same constructor. if self.input_file is None: return logging.info(f"Loading data from file {self.input_file}") # Load the dataset. if os.path.isdir(self.input_file): self.data = get_matrix_from_mtx(self.input_file) else: self.data = get_matrix_from_h5(self.input_file)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_dataset(self):\n # Get all the files in the directory\n file_list = self.get_file_list()\n\n # Concatenate the data corresponding to a list of files\n data = self.concatenate_file_data(file_list)\n\n # Shuffle the data and create the training and the validation datasets\n data = self.shuffle_data_dictionary(data)\n self.training_dataset, self.validation_dataset = self.split_data_into_training_and_validation(data)", "def _load(self, dataset):\n raise NotImplementedError('Loader {} does not support loading datasets.'.format(self.type()))", "def load(\n self,\n input_context: Optional[tf.distribute.InputContext] = None\n ) -> tf.data.Dataset:\n pass", "def load_dataset(file_path):\n return Dataset.load(file_path)", "def load():\n filepath = dirname(abspath(__file__))\n##### EDIT THE FOLLOWING TO POINT TO DatasetName.csv #####\n data = recfromtxt(open(filepath + '/spector.csv',\"rb\"), delimiter=\" \",\n names=True, dtype=float, usecols=(1,2,3,4))\n names = list(data.dtype.names)\n endog = array(data[names[3]], dtype=float)\n endog_name = names[3]\n exog = column_stack(data[i] for i in names[:3]).astype(float)\n exog_name = names[:3]\n dataset = Dataset(data=data, names=names, endog=endog, exog=exog,\n endog_name = endog_name, exog_name=exog_name)\n return dataset", "def load(self, dataset, model_dir):\n raise NotImplementedError", "def load_data(path_dataset):\n data = read_txt(path_dataset)[1:]\n return preprocess_data(data)", "def load_file(self, dset_type):\r\n path = './data/{0}.{1}'.format(self.name, dset_type)\r\n try:\r\n file_contents = np.genfromtxt(path, missing_values=0, skip_header=0,\r\n dtype=int, delimiter=\",\")\r\n self.labels[dset_type] = file_contents[:, 0]\r\n self.examples[dset_type] = file_contents[:, 1:]\r\n\r\n except RuntimeError:\r\n print('ERROR: Unable to load file ''{0}''. Check path and try again.'.format(path))", "def load_data(self, dataset, dataset_name):\n with open(dataset, \"r\", encoding=\"utf-8\") as f:\n self.data = json.load(f)\n self.dataset_name = dataset_name", "def load(self, path):\n\n missing_files = self.check_for_missing_files(path)\n\n if missing_files is not None:\n raise IOError('Invalid dataset of type {}: files {} not found at {}'.format(self.type(), ' '.join(missing_files), path))\n\n loading_dataset = dataset.Dataset(path, loader=self)\n\n self._load(loading_dataset)\n\n return loading_dataset", "def load_dataset(self, f_handle):\n # The file handle might be None in which case an exception is raised\n if f_handle is None:\n raise ValueError('unknown file')\n # Expects a file in a supported tabular data format.\n if not f_handle.is_tabular:\n raise ValueError('cannot create dataset from file \\'' + f_handle.name + '\\'')\n # Open the file as a csv file. Expects that the first row contains the\n # column names. Read dataset schema and dataset rows into two separate\n # lists.\n columns = []\n rows = []\n with f_handle.open() as csvfile:\n reader = csv.reader(csvfile, delimiter=f_handle.delimiter)\n for col_name in next(reader):\n columns.append(\n DatasetColumn(\n identifier=len(columns),\n name=col_name.strip()\n )\n )\n for row in reader:\n values = [cast(v.strip()) for v in row]\n rows.append(DatasetRow(identifier=len(rows), values=values))\n # Get unique identifier and create subfolder for the new dataset\n identifier = get_unique_identifier()\n dataset_dir = self.get_dataset_dir(identifier)\n os.makedirs(dataset_dir)\n # Write rows to data file\n data_file = os.path.join(dataset_dir, DATA_FILE)\n DefaultJsonDatasetReader(data_file).write(rows)\n # Create dataset an write descriptor to file\n dataset = FileSystemDatasetHandle(\n identifier=identifier,\n columns=columns,\n data_file=data_file,\n row_count=len(rows),\n max_row_id=len(rows) - 1\n )\n dataset.to_file(\n descriptor_file=os.path.join(dataset_dir, DESCRIPTOR_FILE)\n )\n return dataset", "def load():\n filepath = dirname(abspath(__file__))\n data = recfromtxt(filepath + '/scotvote.csv', delimiter=\",\",\n names=True, dtype=float, usecols=(1,2,3,4,5,6,7,8))\n names = list(data.dtype.names)\n endog = array(data[names[0]], dtype=float)\n endog_name = names[0]\n exog = column_stack(data[i] for i in names[1:]).astype(float)\n exog_name = names[1:]\n dataset = Dataset(data=data, names=names, endog=endog, exog=exog,\n endog_name = endog_name, exog_name=exog_name)\n return dataset", "def load_dataset(\n self,\n ):\n with xr.open_dataset(self._filepath) as fdata:\n out = fdata.assign_coords({\n 'nCells': np.arange(fdata.dims['nCells']),\n })\n if self.time is not None:\n out = out.assign_coords({\n 'Time': self.time,\n })\n if 'nVertLevels' in fdata.dims:\n out = out.assign_coords({\n 'nVertLevels': np.arange(fdata.dims['nVertLevels']),\n })\n if 'nVertLevelsP1' in fdata.dims:\n out = out.assign_coords({\n 'nVertLevelsP1': np.arange(fdata.dims['nVertLevelsP1']),\n })\n if 'nEdges' in fdata.dims:\n out = out.assign_coords({\n 'nEdges': np.arange(fdata.dims['nEdges']),\n })\n if 'nVertices' in fdata.dims:\n out = out.assign_coords({\n 'nVertices': np.arange(fdata.dims['nVertices']),\n })\n if 'nVertLevelsLES' in fdata.dims:\n out = out.assign_coords({\n 'nVertLevelsLES': np.arange(fdata.dims['nVertLevelsLES']),\n })\n return out", "def load_input(_input_filename=None):\n if _input_filename == SAMPLE_DATA:\n # Load Sample\n _input_filename = os.path.join(LOCAL, 'samples', _input_filename)\n # Validar path de entrada:\n if not os.path.exists(_input_filename):\n print ('No es posible localizar el archivo: {}.'.format(os.path.basename(_input_filename)))\n with open(_input_filename, 'rb') as tmp_f:\n tmp_lines = tmp_f.readlines()\n if len(tmp_lines) > 0:\n csv_headers = tmp_lines[0].replace('\\n', '').replace(' ', '').split(',')\n try:\n return pd.read_csv(_input_filename, skipinitialspace=True, usecols=csv_headers)\n except:\n pass", "def _load_dataset(self, data_path, augmentation, batch_size):\n if path.split(data_path)[1] == \"\":\n # Deal with edge case where there's a \"/\" at the end of the path.\n data_path = path.split(data_path)[0]\n\n if path.split(data_path)[1].endswith(\"training\"):\n dataset_name = \"training dataset\"\n else:\n dataset_name = \"validation dataset\"\n\n start_time = time.time()\n self._update_status(\"Loading {}.\".format(dataset_name))\n\n\n dataset = MapillaryDataset(data_path, augmentation, self.iaa)\n data_loader = DataLoader(dataset,\n batch_size,\n shuffle=True)\n\n self._update_status(\"{} loaded. ({} ms)\".format(\n dataset_name.capitalize(),\n int((time.time() - start_time) * 1000)))\n\n return data_loader", "def loadData(self, file):\n self.data = batchImport(file, self.ps)", "def read_data(feature_file, label_file):", "def prepare_dataset(fpath):\n raise NotImplementedError", "def load(self, file):\n with open(file) as file:\n self.dataset = [line.strip() for line in file]\n\n return self.dataset", "def load_data(self):\n with open('data/fordTrain.csv') as f:\n data = csv.reader(f, delimiter=',')\n train = [x for i, x in enumerate(data) if i > 0] \n # Extract features and target variable separately\n trainx = [x[3:] for x in train]\n trainy = [x[2] for x in train]\n\n with open('data/fordTest.csv') as f:\n data = csv.reader(f, delimiter=',')\n testx = [x[3:] for i, x in enumerate(data) if i > 0] \n\n with open('data/Solution.csv') as f:\n data = csv.reader(f, delimiter=',')\n testy = [x[2] for i, x in enumerate(data) if i > 0] \n\n # Extract features and target variable, convert to numpy array\n trainx = np.asarray(trainx, dtype=np.float32)\n trainy = np.asarray(trainy, dtype=np.int8)\n testx = np.asarray(testx, dtype=np.float32)\n testy = np.asarray(testy, dtype=np.int8)\n\n # Return training and test sets\n trainSet = Dataset(trainx, trainy)\n testSet = Dataset(testx, testy)\n return trainSet, testSet", "def load(self):\r\n\r\n #Open the dataset read only using GDAL\r\n dataset = gdal.Open(self.inputds, gdal.GA_ReadOnly)\r\n \r\n return dataset\r\n \r\n\r\n #print \"Failed to open %s. Is it a GDAL supported format?\" %(self.inputds)\r", "def load(self, filename) :\r\n \r\n # determine filename\r\n dir = os.path.dirname(__file__)\r\n f = os.path.join(dir, '..', 'data', filename)\r\n \r\n # load data\r\n with open(f, 'r') as fid :\r\n data = np.loadtxt(fid, delimiter=\",\")\r\n \r\n # separate features and labels\r\n self.X = data[:,:-1]\r\n self.y = data[:,-1]", "def load(self, filename) :\n\n # determine filename\n dir = os.path.dirname('__file__')\n f = os.path.join(dir, '..', 'data', filename)\n\n # load data\n with open(f, 'r') as fid :\n data = np.loadtxt(fid, delimiter=\",\")\n\n # separate features and labels\n self.X = data[:,:-1]\n self.y = data[:,-1]", "def _read_dataset(self, dataset_path):\n dataset = pd.read_pickle(dataset_path)\n return dataset", "def load(self, input):", "def import_data(path_dataset):\n\n X=np.array(pd.read_hdf(path_dataset))\n\n print('Data set shape:',np.shape(X))\n print('#####################################')\n \n return X", "def data_input(self):\n path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'data'))\n if not os.path.isfile('{0}/{1}.csv'.format(path, self.data_file)):\n print 'Error: Dataset file is not exist.'\n exit()\n # Uplead Dataset.csv file.\n f = open('{0}/{1}.csv'.format(path, self.data_file), 'r')\n print 'Now uploading dataset File.....'\n f = list(f)\n # The Dataset contains heading, number of lines - heading\n self.number_of_VOCs = sum(1 for row in f)-1\n # Count number of columns, last column's value is empty, that is why -1.\n self.number_of_columns = len(f[0].split(',')) -1\n self.first_m_z = int(f[0].split(',')[3]) # find the first m/z value.\n self.last_m_z = int(f[0].split(',')[-2]) # find the last m/z value.\n print 'dataset includes ', self.number_of_VOCs, 'VOCs in all samples '\n print ('dataset includes ', self.number_of_columns, ' Columns, ',\n 'm/z values start from ', self.first_m_z,\n 'and end ', self.last_m_z)\n # Create a matrix with a shape of (number_of_VOCs X number_of_columns) filled with zeros.\n self.dataset = np.zeros((self.number_of_VOCs,\n self.number_of_columns))\n for line in range(1, len(f)):\n if int(float(f[line].strip().split(',')[0])) not in self.loaded_samples:\n self.loaded_samples.append(int(float(f[line].strip().split(',')[0])))\n for column in range(self.number_of_columns):\n self.dataset[line-1][column] = int(float(f[line].strip().split(',')[column]))", "def load_dataset(file_name, model_ver):\n\n print 'Loading dataset ...'\n\n if model_ver == 'dmspline':\n file_path = join(DATA_DIR, file_name)\n id_field = Field(sequential=False, use_vocab=False, dtype=torch.int)\n text_field = Field(pad_token=None, tokenize=_tokenize_str)\n attr_field = Field(sequential=False, use_vocab=False, dtype=torch.float)\n standardzed_attr_field = Field(sequential=False, use_vocab=False, dtype=torch.float)\n\n dataset = TabularDataset(\n path=file_path,\n format='csv',\n fields=[('attr', attr_field), ('id', id_field), ('standardized_attr', standardzed_attr_field), ('text', text_field)],\n skip_header=True)\n\n else:\n file_path = join(DATA_DIR, file_name)\n id_field = Field(sequential=False, use_vocab=False, dtype=torch.int)\n text_field = Field(pad_token=None, tokenize=_tokenize_str)\n\n dataset = TabularDataset(\n path=file_path,\n format='csv',\n fields=[('id', id_field), ('text', text_field)],\n skip_header=True)\n\n text_field.build_vocab(dataset, min_freq=10)\n return dataset", "def load_datapair(self, ds):\n raise NotImplementedError(\"Define this in your derived checker class\")", "def load_dataset(self, split='train'):\n path = self.args.data\n if not os.path.exists(path):\n raise FileNotFoundError(\n \"Dataset not found: ({})\".format(path)\n )\n\n files = os.listdir(path) if os.path.isdir(path) else [path]\n files = [f for f in files if split in f]\n assert len(files) > 0\n\n self.datasets[split] = CombineBertData(files)\n\n\n \"\"\"\n dataset = data_utils.load_indexed_dataset(\n split_path, self.dictionary, self.args.dataset_impl, combine=combine\n )\n if dataset is None:\n raise FileNotFoundError(\n \"Dataset not found: {} ({})\".format(split, split_path)\n )\n\n dataset = TokenBlockDataset(\n dataset,\n dataset.sizes,\n self.args.tokens_per_sample,\n pad=self.dictionary.pad(),\n eos=self.dictionary.eos(),\n break_mode=self.args.sample_break_mode,\n include_targets=True,\n )\n\n add_eos_for_other_targets = (\n self.args.sample_break_mode is not None\n and self.args.sample_break_mode != \"none\"\n )\n\n self.datasets[split] = MonolingualDataset(\n dataset,\n dataset.sizes,\n self.dictionary,\n self.output_dictionary,\n add_eos_for_other_targets=add_eos_for_other_targets,\n shuffle=True,\n targets=self.targets,\n add_bos_token=self.args.add_bos_token,\n )\n \"\"\"" ]
[ "0.70277363", "0.69790083", "0.6945343", "0.6936825", "0.6823342", "0.6752945", "0.6737796", "0.67372847", "0.6703015", "0.6688588", "0.66557074", "0.65531415", "0.6552921", "0.64937913", "0.64900523", "0.64452887", "0.64417726", "0.6441511", "0.6436873", "0.64123297", "0.640442", "0.6401931", "0.6385811", "0.6378019", "0.6376438", "0.6368928", "0.6368306", "0.6366822", "0.6363562", "0.6351259" ]
0.7132409
0
Trim the dataset for inference, choosing barcodes and genes to use. Sets the values of self.analyzed_barcode_inds, and self.empty_barcode_inds, which are used throughout training.
def _trim_dataset_for_analysis(self, low_UMI_count_cutoff: int = 30, num_transition_barcodes: Union[int, None] = 7000, gene_blacklist: List[int] = []): logging.info("Trimming dataset for inference.") # Get data matrix and barcode order that sorts barcodes by UMI count. matrix = self.data['matrix'] umi_counts = np.array(matrix.sum(axis=1)).squeeze() umi_count_order = np.argsort(umi_counts)[::-1] # Initially set the default to be the whole dataset. self.analyzed_barcode_inds = np.arange(start=0, stop=matrix.shape[0]) self.analyzed_gene_inds = np.arange(start=0, stop=matrix.shape[1]) # Expected cells must not exceed nonzero count barcodes. num_nonzero_barcodes = np.sum(umi_counts > 0).item() n_cells = min(self.priors['n_cells'], num_nonzero_barcodes) try: # Choose which genes to use based on their having nonzero counts. # (All barcodes must be included so that inference can generalize.) gene_counts_per_barcode = np.array(matrix.sum(axis=0)).squeeze() self.analyzed_gene_inds = np.where(gene_counts_per_barcode > 0)[0].astype(dtype=int) if len(gene_blacklist) > 0: # Ensure genes on the blacklist are excluded. self.analyzed_gene_inds = np.array([g for g in self.analyzed_gene_inds if g not in gene_blacklist]) except IndexError: logging.warning("Something went wrong trying to trim genes.") # Estimate priors on cell size and 'empty' droplet size. self.priors['cell_counts'], self.priors['empty_counts'] = \ get_d_priors_from_dataset(self) # After gene trimming # If running the simple model, just use the expected cells, no more. if self.model_name == "simple": self.analyzed_barcode_inds = np.array(umi_count_order[:n_cells], dtype=int) # If not using the simple model, include empty droplets. else: try: # Get the cell barcodes. cell_barcodes = umi_count_order[:n_cells] # Set the low UMI count cutoff to be the greater of either # the user input value, or an empirically-derived value. empirical_low_UMI = int(self.priors['empty_counts'] * 0.8) low_UMI_count_cutoff = max(low_UMI_count_cutoff, empirical_low_UMI) logging.info(f"Excluding barcodes with counts below " f"{low_UMI_count_cutoff}") # See how many barcodes there are to work with total. num_barcodes_above_umi_cutoff = \ np.sum(umi_counts > low_UMI_count_cutoff).item() # Get a number of transition-region barcodes. num = min(num_transition_barcodes, num_barcodes_above_umi_cutoff - cell_barcodes.size) num = max(0, num) transition_barcodes = umi_count_order[n_cells: (n_cells + num)] # Use the cell barcodes and transition barcodes for analysis. self.analyzed_barcode_inds = np.concatenate(( cell_barcodes, transition_barcodes)).astype(dtype=int) # Identify probable empty droplet barcodes. if num < num_transition_barcodes: # This means we already used all the barcodes. empty_droplet_barcodes = np.array([]) else: # Decide which empty barcodes to include. empty_droplet_sorted_barcode_inds = \ np.arange(n_cells + num, num_barcodes_above_umi_cutoff, dtype=int) # The entire range # empty_droplet_sorted_barcode_inds = \ # np.arange(n_cells + num, # min(num_barcodes_above_umi_cutoff # - cell_barcodes.size - num, # n_cells + num + num_empty_droplets), # dtype=int) empty_droplet_barcodes = \ umi_count_order[empty_droplet_sorted_barcode_inds] self.empty_barcode_inds = empty_droplet_barcodes.astype(dtype=int) logging.info(f"Using {cell_barcodes.size} probable cell barcodes, " f"plus an additional {transition_barcodes.size} barcodes, " f"and {empty_droplet_barcodes.size} empty droplets.") except IndexError: logging.warning("Something went wrong trying to trim barcodes.") self.is_trimmed = True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def trim(self, trim_samples):\n n = len(self.timestamps)\n self.timestamps = self.timestamps[:n - trim_samples]\n self.labels = self.labels[:n - trim_samples]\n self.emg = [x[:n - trim_samples] for x in self.emg]\n self.accel = [x[:n - trim_samples] for x in self.accel]\n self.gyro = [x[:n - trim_samples] for x in self.gyro]\n self.orient = [x[:n - trim_samples] for x in self.orient]", "def _trimRecords(self):\n self.highpassrecords.resize(self.nhighpassrecords, refcheck=False)\n self.lowpassrecords.resize(self.nlowpassrecords, refcheck=False)\n self.digitalsvalrecords.resize(self.ndigitalsvalrecords, refcheck=False)\n # cleanup by deleting any struct arrays of len 0\n for recname in ('highpassrecords', 'lowpassrecords', 'digitalsvalrecords'):\n if len(self.__getattribute__(recname)) == 0:\n self.__delattr__(recname)", "def test_preprocess_variable_length_barcodes(self):\r\n\r\n # Should discard all reads due to sequence length being too short\r\n\r\n fasta_files = [self.sample_fasta_file]\r\n qual_files = [self.sample_qual_file]\r\n mapping_file = self.sample_mapping_file_var_length\r\n barcode_type = \"variable_length\"\r\n min_seq_len = 200\r\n max_seq_len = 1000\r\n min_qual_score = 25\r\n starting_ix = 1\r\n keep_primer = False\r\n max_ambig = 0\r\n max_primer_mm = 1\r\n trim_seq_len = True\r\n dir_prefix = self.output_dir\r\n max_bc_errors = 2\r\n max_homopolymer = 4\r\n retain_unassigned_reads = False\r\n keep_barcode = False\r\n attempt_bc_correction = True\r\n qual_score_window = 0\r\n disable_primer_check = False\r\n reverse_primers = 'disable'\r\n record_qual_scores = False\r\n discard_bad_windows = False\r\n median_length_filtering = None\r\n added_demultiplex_field = None\r\n\r\n preprocess(fasta_files,\r\n qual_files,\r\n mapping_file,\r\n barcode_type,\r\n min_seq_len,\r\n max_seq_len,\r\n min_qual_score,\r\n starting_ix,\r\n keep_primer,\r\n max_ambig,\r\n max_primer_mm,\r\n trim_seq_len,\r\n dir_prefix,\r\n max_bc_errors,\r\n max_homopolymer,\r\n retain_unassigned_reads,\r\n keep_barcode,\r\n attempt_bc_correction,\r\n qual_score_window,\r\n disable_primer_check,\r\n reverse_primers,\r\n record_qual_scores,\r\n discard_bad_windows,\r\n median_length_filtering,\r\n added_demultiplex_field)\r\n\r\n output_seqs = open(dir_prefix + \"seqs.fna\", \"U\")\r\n output_log = open(dir_prefix + \"split_library_log.txt\", \"U\")\r\n output_histograms = open(dir_prefix + \"histograms.txt\", \"U\")\r\n\r\n actual_seqs = [line for line in output_seqs]\r\n actual_log = [line for line in output_log]\r\n actual_histograms = [line for line in output_histograms]\r\n\r\n expected_seqs = []\r\n expected_log = [\r\n 'Number raw input seqs\\t6\\n',\r\n '\\n',\r\n 'Length outside bounds of 200 and 1000\\t6\\n',\r\n 'Num ambiguous bases exceeds limit of 0\\t0\\n',\r\n 'Missing Qual Score\\t0\\n',\r\n 'Mean qual score below minimum of 25\\t0\\n',\r\n 'Max homopolymer run exceeds limit of 4\\t0\\n',\r\n 'Num mismatches in primer exceeds limit of 1: 0\\n',\r\n '\\n',\r\n 'Sequence length details for all sequences passing quality filters:\\n',\r\n 'No sequences passed quality filters for writing.\\n',\r\n '\\n',\r\n 'Barcodes corrected/not\\t0/0\\n',\r\n 'Uncorrected barcodes will not be written to the output fasta file.\\n',\r\n 'Corrected barcodes will be written with the appropriate barcode category.\\n',\r\n 'Corrected but unassigned sequences will not be written unless --retain_unassigned_reads is enabled.\\n',\r\n '\\n',\r\n 'Total valid barcodes that are not in mapping file\\t0\\n',\r\n 'Sequences associated with valid barcodes that are not in the mapping file will not be written.\\n',\r\n '\\n',\r\n 'Barcodes in mapping file\\n',\r\n 'Sample\\tSequence Count\\tBarcode\\n',\r\n 's2\\t0\\tAGAGTCCTGAGC\\n',\r\n 's1\\t0\\tACACATGTCTA\\n',\r\n 's3\\t0\\tAACTGTGCGTACG\\n',\r\n '\\n',\r\n 'Total number seqs written\\t0']\r\n expected_histograms = [\r\n '# bins raw sequence lengths, length of sequences that pass quality filters before processing, and lengths of sequences that pass quality filters post processing.\\n',\r\n 'Length\\tRaw\\tBefore\\tAfter\\n',\r\n '20\\t2\\t0\\t0\\n',\r\n '30\\t4\\t0\\t0']\r\n\r\n self.assertEqual(actual_seqs, expected_seqs)\r\n self.assertEqual(actual_log, expected_log)\r\n self.assertEqual(actual_histograms, expected_histograms)", "def _clean_data(self, dataset):\n dataset.dropna(inplace=True)\n # Problem: handle missing data (in a different way), noisy data, inconsistent data", "def reset(self, dataset):\n assert dataset, 'Groundtruth should not be empty.'\n assert isinstance(dataset,\n dict), 'annotation file format {} not supported'.format(\n type(dataset))\n self.anns, self.cats, self.imgs = dict(), dict(), dict()\n self.dataset = copy.deepcopy(dataset)\n self.createIndex()", "def _common_preprocess(self, data):\n\n data = data.drop('id', axis=1) \n data = data.drop(['17', '488', 'B01AF', 'H01AB'], axis=1, errors='ignore')\n\n # drop age outliers\n idx = data[(data['age'] > 99)].index\n data = data.drop(idx)\n\n # drop rows with CKD\n idx = data[((data['585'] != 0) | (data['586'] != 0)) &\n (data['ckd'] == 0)].index\n data = data.drop(idx)\n data = data.drop(['585', '586'], axis=1)\n\n return data", "def trim_srna_sample(data):\n in_file = data[\"files\"][0]\n names = data[\"rgnames\"]['sample']\n work_dir = os.path.join(dd.get_work_dir(data), \"trimmed\")\n out_dir = os.path.join(work_dir, names)\n utils.safe_makedir(out_dir)\n out_file = replace_directory(append_stem(in_file, \".clean\"), out_dir)\n trim_reads = data[\"config\"][\"algorithm\"].get(\"trim_reads\", True)\n if utils.file_exists(out_file):\n data[\"clean_fastq\"] = out_file\n data[\"collapse\"] = _collapse(data[\"clean_fastq\"])\n data[\"size_stats\"] = _summary(data['collapse'])\n return [[data]]\n\n adapter = dd.get_adapters(data)\n if trim_reads and not adapter and error_dnapi:\n raise ValueError(error_dnapi)\n adapters = adapter if adapter else _dnapi_prediction(in_file)\n times = \"\" if len(adapters) == 1 else \"--times %s\" % len(adapters)\n if trim_reads and adapters:\n adapter_cmd = \" \".join(map(lambda x: \"-a \" + x, adapters))\n out_noadapter_file = replace_directory(append_stem(in_file, \".fragments\"), out_dir)\n out_short_file = replace_directory(append_stem(in_file, \".short\"), out_dir)\n log_out = os.path.join(out_dir, \"%s.log\" % names)\n atropos = _get_atropos()\n options = \" \".join(data.get('resources', {}).get('atropos', {}).get(\"options\", \"\"))\n cores = (\"--threads %s\" % dd.get_num_cores(data) if dd.get_num_cores(data) > 1 else \"\")\n if \" \".join(data.get('resources', {}).get('cutadapt', {}).get(\"options\", \"\")):\n raise ValueError(\"Atropos is now used, but cutadapt options found in YAML file.\"\n \"See https://atropos.readthedocs.io/en/latest/\")\n cmd = _cmd_atropos()\n if not utils.file_exists(out_file):\n with file_transaction(out_file) as tx_out_file:\n do.run(cmd.format(**locals()), \"remove adapter for %s\" % names)\n if utils.file_exists(log_out):\n content = open(log_out).read().replace(out_short_file, names)\n open(log_out, 'w').write(content)\n if options:\n in_file = append_stem(tx_out_file, \".tmp\")\n utils.move_safe(tx_out_file, in_file)\n cmd = \"{atropos} {cores} {options} -se {in_file} -o {tx_out_file} -m 17\"\n do.run(cmd.format(**locals()), \"cutadapt with this %s for %s\" %(options, names))\n else:\n if not trim_reads:\n logger.debug(\"Skip trimming for: %s\" % names)\n elif not adapters:\n logger.info(\"No adapter founds in %s, this is an issue related\"\n \" to no small RNA enrichment in your sample.\" % names)\n symlink_plus(in_file, out_file)\n data[\"clean_fastq\"] = out_file\n data[\"collapse\"] = _collapse(data[\"clean_fastq\"])\n data[\"size_stats\"] = _summary(data['collapse'])\n return [[data]]", "def clean_data(self):\n data_clean = []\n for item in self.data:\n if int(item[2]) >= self.seq_length and int(item[2]) <= self.max_frames:# and item[1] in self.classes:\n data_clean.append(item)\n\n return data_clean", "def clean(self):\n for i in range(len(self.asteroid_type) - 1, -1, -1):\n x, y = self.get_coords(self.asteroid_type[i])\n if x < -self.gap:\n self.del_asteroid(i)", "def check_barcode_lens(self):\n barcode_lens = set(map(len, self.settings['barcodes']))\n if 1 != len(barcode_lens):\n raise ValueError('all barcodes must be the same length')\n self.barcode_len = barcode_lens.pop()\n self.settings['barcode_len'] = self.barcode_len", "def test_preprocess(self):\r\n\r\n # Should discard all reads due to sequence length being too short\r\n\r\n fasta_files = [self.sample_fasta_file]\r\n qual_files = [self.sample_qual_file]\r\n mapping_file = self.sample_mapping_file\r\n barcode_type = \"golay_12\"\r\n min_seq_len = 200\r\n max_seq_len = 1000\r\n min_qual_score = 25\r\n starting_ix = 1\r\n keep_primer = False\r\n max_ambig = 0\r\n max_primer_mm = 1\r\n trim_seq_len = True\r\n dir_prefix = self.output_dir\r\n max_bc_errors = 2\r\n max_homopolymer = 4\r\n retain_unassigned_reads = False\r\n keep_barcode = False\r\n attempt_bc_correction = True\r\n qual_score_window = 0\r\n disable_primer_check = False\r\n reverse_primers = 'disable'\r\n record_qual_scores = False\r\n discard_bad_windows = False\r\n median_length_filtering = None\r\n added_demultiplex_field = None\r\n\r\n preprocess(fasta_files,\r\n qual_files,\r\n mapping_file,\r\n barcode_type,\r\n min_seq_len,\r\n max_seq_len,\r\n min_qual_score,\r\n starting_ix,\r\n keep_primer,\r\n max_ambig,\r\n max_primer_mm,\r\n trim_seq_len,\r\n dir_prefix,\r\n max_bc_errors,\r\n max_homopolymer,\r\n retain_unassigned_reads,\r\n keep_barcode,\r\n attempt_bc_correction,\r\n qual_score_window,\r\n disable_primer_check,\r\n reverse_primers,\r\n record_qual_scores,\r\n discard_bad_windows,\r\n median_length_filtering,\r\n added_demultiplex_field)\r\n\r\n output_seqs = open(dir_prefix + \"seqs.fna\", \"U\")\r\n output_log = open(dir_prefix + \"split_library_log.txt\", \"U\")\r\n output_histograms = open(dir_prefix + \"histograms.txt\", \"U\")\r\n\r\n actual_seqs = [line for line in output_seqs]\r\n actual_log = [line for line in output_log]\r\n actual_histograms = [line for line in output_histograms]\r\n\r\n expected_seqs = []\r\n expected_log = [\r\n 'Number raw input seqs\\t6\\n',\r\n '\\n',\r\n 'Length outside bounds of 200 and 1000\\t6\\n',\r\n 'Num ambiguous bases exceeds limit of 0\\t0\\n',\r\n 'Missing Qual Score\\t0\\n',\r\n 'Mean qual score below minimum of 25\\t0\\n',\r\n 'Max homopolymer run exceeds limit of 4\\t0\\n',\r\n 'Num mismatches in primer exceeds limit of 1: 0\\n',\r\n '\\n',\r\n 'Sequence length details for all sequences passing quality filters:\\n',\r\n 'No sequences passed quality filters for writing.\\n',\r\n '\\n',\r\n 'Barcodes corrected/not\\t0/0\\n',\r\n 'Uncorrected barcodes will not be written to the output fasta file.\\n',\r\n 'Corrected barcodes will be written with the appropriate barcode category.\\n',\r\n 'Corrected but unassigned sequences will not be written unless --retain_unassigned_reads is enabled.\\n',\r\n '\\n',\r\n 'Total valid barcodes that are not in mapping file\\t0\\n',\r\n 'Sequences associated with valid barcodes that are not in the mapping file will not be written.\\n',\r\n '\\n',\r\n 'Barcodes in mapping file\\n',\r\n 'Sample\\tSequence Count\\tBarcode\\n',\r\n 's2\\t0\\tAGAGTCCTGAGC\\n',\r\n 's1\\t0\\tACACATGTCTAC\\n',\r\n 's3\\t0\\tAACTGTGCGTAC\\n',\r\n '\\n',\r\n 'Total number seqs written\\t0']\r\n expected_histograms = [\r\n '# bins raw sequence lengths, length of sequences that pass quality filters before processing, and lengths of sequences that pass quality filters post processing.\\n',\r\n 'Length\\tRaw\\tBefore\\tAfter\\n',\r\n '20\\t2\\t0\\t0\\n',\r\n '30\\t4\\t0\\t0']\r\n\r\n self.assertEqual(actual_seqs, expected_seqs)\r\n self.assertEqual(actual_log, expected_log)\r\n self.assertEqual(actual_histograms, expected_histograms)\r\n\r\n # With minimal length at 5, should retain 4 sequences\r\n\r\n fasta_files = [self.sample_fasta_file]\r\n qual_files = [self.sample_qual_file]\r\n mapping_file = self.sample_mapping_file\r\n barcode_type = \"golay_12\"\r\n min_seq_len = 5\r\n max_seq_len = 1000\r\n min_qual_score = 25\r\n starting_ix = 1\r\n keep_primer = False\r\n max_ambig = 0\r\n max_primer_mm = 0\r\n trim_seq_len = False\r\n dir_prefix = self.output_dir\r\n max_bc_errors = 2\r\n max_homopolymer = 4\r\n retain_unassigned_reads = False\r\n keep_barcode = False\r\n attempt_bc_correction = True\r\n qual_score_window = 0\r\n disable_primer_check = False\r\n reverse_primers = 'disable'\r\n record_qual_scores = False\r\n discard_bad_windows = False\r\n median_length_filtering = None\r\n added_demultiplex_field = None\r\n\r\n preprocess(fasta_files,\r\n qual_files,\r\n mapping_file,\r\n barcode_type,\r\n min_seq_len,\r\n max_seq_len,\r\n min_qual_score,\r\n starting_ix,\r\n keep_primer,\r\n max_ambig,\r\n max_primer_mm,\r\n trim_seq_len,\r\n dir_prefix,\r\n max_bc_errors,\r\n max_homopolymer,\r\n retain_unassigned_reads,\r\n keep_barcode,\r\n attempt_bc_correction,\r\n qual_score_window,\r\n disable_primer_check,\r\n reverse_primers,\r\n record_qual_scores,\r\n discard_bad_windows,\r\n median_length_filtering,\r\n added_demultiplex_field)\r\n\r\n output_seqs = open(dir_prefix + \"seqs.fna\", \"U\")\r\n output_log = open(dir_prefix + \"split_library_log.txt\", \"U\")\r\n output_histograms = open(dir_prefix + \"histograms.txt\", \"U\")\r\n\r\n actual_seqs = [line for line in output_seqs]\r\n actual_log = [line for line in output_log]\r\n actual_histograms = [line for line in output_histograms]\r\n\r\n expected_seqs = [\r\n '>s1_1 a orig_bc=ACACATGTCTAC new_bc=ACACATGTCTAC bc_diffs=0\\n',\r\n 'CCCTTATATATATAT\\n',\r\n '>s2_2 b orig_bc=AGAGTCCTGAGC new_bc=AGAGTCCTGAGC bc_diffs=0\\n',\r\n 'CCCTTTCCA\\n',\r\n '>s3_3 c orig_bc=AACTGTGCGTAC new_bc=AACTGTGCGTAC bc_diffs=0\\n',\r\n 'AACCGGCCGGTT\\n',\r\n '>s1_4 d orig_bc=ACTCATGTCTAC new_bc=ACACATGTCTAC bc_diffs=1\\n',\r\n 'CCCTTACTATATAT\\n']\r\n expected_log = [\r\n 'Number raw input seqs\\t6\\n',\r\n '\\n',\r\n 'Length outside bounds of 5 and 1000\\t0\\n',\r\n 'Num ambiguous bases exceeds limit of 0\\t0\\n',\r\n 'Missing Qual Score\\t0\\n',\r\n 'Mean qual score below minimum of 25\\t0\\n',\r\n 'Max homopolymer run exceeds limit of 4\\t0\\n',\r\n 'Num mismatches in primer exceeds limit of 0: 2\\n',\r\n '\\n',\r\n 'Sequence length details for all sequences passing quality filters:\\n',\r\n 'Raw len min/max/avg\\t29.0/35.0/32.5\\n',\r\n 'Wrote len min/max/avg\\t9.0/15.0/12.5\\n',\r\n '\\n',\r\n 'Barcodes corrected/not\\t1/0\\n',\r\n 'Uncorrected barcodes will not be written to the output fasta file.\\n',\r\n 'Corrected barcodes will be written with the appropriate barcode category.\\n',\r\n 'Corrected but unassigned sequences will not be written unless --retain_unassigned_reads is enabled.\\n',\r\n '\\n',\r\n 'Total valid barcodes that are not in mapping file\\t0\\n',\r\n 'Sequences associated with valid barcodes that are not in the mapping file will not be written.\\n',\r\n '\\n',\r\n 'Barcodes in mapping file\\n',\r\n 'Num Samples\\t3\\n',\r\n 'Sample ct min/max/mean: 1 / 2 / 1.33\\n',\r\n 'Sample\\tSequence Count\\tBarcode\\n',\r\n 's1\\t2\\tACACATGTCTAC\\n',\r\n 's2\\t1\\tAGAGTCCTGAGC\\n',\r\n 's3\\t1\\tAACTGTGCGTAC\\n',\r\n '\\n',\r\n 'Total number seqs written\\t4']\r\n expected_histograms = [\r\n '# bins raw sequence lengths, length of sequences that pass quality filters before processing, and lengths of sequences that pass quality filters post processing.\\n',\r\n 'Length\\tRaw\\tBefore\\tAfter\\n',\r\n '0\\t0\\t0\\t1\\n',\r\n '10\\t0\\t0\\t3\\n',\r\n '20\\t2\\t1\\t0\\n',\r\n '30\\t4\\t3\\t0']\r\n\r\n self.assertEqual(actual_seqs, expected_seqs)\r\n self.assertEqual(actual_log, expected_log)\r\n self.assertEqual(actual_histograms, expected_histograms)\r\n\r\n # Added sliding window should discard read \"b\"\r\n\r\n fasta_files = [self.sample_fasta_file]\r\n qual_files = [self.sample_qual_file]\r\n mapping_file = self.sample_mapping_file\r\n barcode_type = \"golay_12\"\r\n min_seq_len = 5\r\n max_seq_len = 1000\r\n min_qual_score = 22\r\n starting_ix = 1\r\n keep_primer = False\r\n max_ambig = 0\r\n max_primer_mm = 0\r\n trim_seq_len = False\r\n dir_prefix = self.output_dir\r\n max_bc_errors = 2\r\n max_homopolymer = 4\r\n retain_unassigned_reads = False\r\n keep_barcode = False\r\n attempt_bc_correction = True\r\n qual_score_window = 3\r\n disable_primer_check = False\r\n reverse_primers = 'disable'\r\n record_qual_scores = False\r\n discard_bad_windows = True\r\n median_length_filtering = None\r\n added_demultiplex_field = None\r\n reverse_primer_mismatches = 0\r\n\r\n preprocess(fasta_files,\r\n qual_files,\r\n mapping_file,\r\n barcode_type,\r\n min_seq_len,\r\n max_seq_len,\r\n min_qual_score,\r\n starting_ix,\r\n keep_primer,\r\n max_ambig,\r\n max_primer_mm,\r\n trim_seq_len,\r\n dir_prefix,\r\n max_bc_errors,\r\n max_homopolymer,\r\n retain_unassigned_reads,\r\n keep_barcode,\r\n attempt_bc_correction,\r\n qual_score_window,\r\n disable_primer_check,\r\n reverse_primers,\r\n reverse_primer_mismatches,\r\n record_qual_scores,\r\n discard_bad_windows,\r\n median_length_filtering,\r\n added_demultiplex_field)\r\n\r\n output_seqs = open(dir_prefix + \"seqs.fna\", \"U\")\r\n output_log = open(dir_prefix + \"split_library_log.txt\", \"U\")\r\n output_histograms = open(dir_prefix + \"histograms.txt\", \"U\")\r\n\r\n actual_seqs = [line for line in output_seqs]\r\n actual_log = [line for line in output_log]\r\n actual_histograms = [line for line in output_histograms]\r\n\r\n expected_seqs = [\r\n '>s1_1 a orig_bc=ACACATGTCTAC new_bc=ACACATGTCTAC bc_diffs=0\\n',\r\n 'CCCTTATATATATAT\\n',\r\n '>s3_2 c orig_bc=AACTGTGCGTAC new_bc=AACTGTGCGTAC bc_diffs=0\\n',\r\n 'AACCGGCCGGTT\\n',\r\n '>s1_3 d orig_bc=ACTCATGTCTAC new_bc=ACACATGTCTAC bc_diffs=1\\n',\r\n 'CCCTTACTATATAT\\n']\r\n expected_log = [\r\n 'Number raw input seqs\\t6\\n',\r\n '\\n',\r\n 'Length outside bounds of 5 and 1000\\t0\\n',\r\n 'Num ambiguous bases exceeds limit of 0\\t0\\n',\r\n 'Missing Qual Score\\t0\\n',\r\n 'Mean qual score below minimum of 22\\t0\\n',\r\n 'Max homopolymer run exceeds limit of 4\\t0\\n',\r\n 'Num mismatches in primer exceeds limit of 0: 2\\n',\r\n '\\n',\r\n 'Size of quality score window, in base pairs: 3\\n',\r\n 'Number of sequences where a low quality score window was detected: 1\\n',\r\n 'Sequences with a low quality score were not written, -g option enabled.\\n',\r\n '\\n',\r\n 'Sequence length details for all sequences passing quality filters:\\n',\r\n 'Raw len min/max/avg\\t32.0/35.0/33.7\\n',\r\n 'Wrote len min/max/avg\\t12.0/15.0/13.7\\n',\r\n '\\n',\r\n 'Barcodes corrected/not\\t1/0\\n',\r\n 'Uncorrected barcodes will not be written to the output fasta file.\\n',\r\n 'Corrected barcodes will be written with the appropriate barcode category.\\n',\r\n 'Corrected but unassigned sequences will not be written unless --retain_unassigned_reads is enabled.\\n',\r\n '\\n',\r\n 'Total valid barcodes that are not in mapping file\\t0\\n',\r\n 'Sequences associated with valid barcodes that are not in the mapping file will not be written.\\n',\r\n '\\n',\r\n 'Barcodes in mapping file\\n',\r\n 'Num Samples\\t2\\n',\r\n 'Sample ct min/max/mean: 1 / 2 / 1.50\\n',\r\n 'Sample\\tSequence Count\\tBarcode\\n',\r\n 's1\\t2\\tACACATGTCTAC\\n',\r\n 's3\\t1\\tAACTGTGCGTAC\\n',\r\n 's2\\t0\\tAGAGTCCTGAGC\\n',\r\n '\\n',\r\n 'Total number seqs written\\t3']\r\n expected_histograms = [\r\n '# bins raw sequence lengths, length of sequences that pass quality filters before processing, and lengths of sequences that pass quality filters post processing.\\n',\r\n 'Length\\tRaw\\tBefore\\tAfter\\n',\r\n '10\\t0\\t0\\t3\\n',\r\n '20\\t2\\t0\\t0\\n',\r\n '30\\t4\\t3\\t0']\r\n\r\n self.assertEqual(actual_seqs, expected_seqs)\r\n self.assertEqual(actual_log, expected_log)\r\n self.assertEqual(actual_histograms, expected_histograms)", "def trim_alt(ds, data_vars=[\"Altitude_m\", \"Counts\", \"Temperature_C\"]):\n\n if \"trim_method\" in ds.attrs:\n trm_list = ds.attrs[\"trim_method\"]\n\n if not isinstance(trm_list, list): # make sure it is a list before looping\n trm_list = [trm_list]\n\n for trm_meth in trm_list:\n if trm_meth.lower() == \"altitude\":\n print(\"Trimming using altitude data\")\n altitude = ds[\n \"Altitude_m\"\n ] # need to use atltitude values before starting trimming\n for var in data_vars:\n ds[var] = ds[var].where(~(altitude < ds.attrs[\"Deadzone_m\"]))\n ds[var] = ds[var].where(~(altitude > ds.attrs[\"Range_m\"]))\n print(f\"Trimming {var}\")\n\n histtext = \"Trimmed altimeter data using Altimeter_m = 0.\"\n\n ds = utils.insert_history(ds, histtext)\n\n elif trm_meth.lower() == \"bin range\":\n print(\"Trimming using good_bins of %s\" % str(ds.attrs[\"good_bins\"]))\n if \"bins\" in ds.coords:\n # trim coordinate bins\n ds = ds.isel(\n bins=slice(ds.attrs[\"good_bins\"][0], ds.attrs[\"good_bins\"][1])\n )\n # reset Bin_count attribute\n ds.attrs[\"Bin_count\"] = (\n ds.attrs[\"good_bins\"][1] - ds.attrs[\"good_bins\"][0]\n )\n\n histtext = (\n \"Removed extra bins from altimeter data using good_bins attribute.\"\n )\n\n ds = utils.insert_history(ds, histtext)\n\n else:\n print(\"Did not trim altimeter data\")\n\n return ds", "def trim(self, start, stop=None):\n if stop is None:\n stop = self.data.shape[0]\n\n start = max(start, 0)\n stop = min(stop, self.data.shape[0])\n self.data = self.data.iloc[start:stop,:]", "def preprocess(self):\n \n file_name_list = os.listdir(self.image_dir)\n random.seed(1234)\n random.shuffle(file_name_list)\n \n for i,d in enumerate(self.domains):\n self.attr2idx[d]=i \n\n for i, file_name in enumerate(file_name_list):\n if (file_name.startswith('X_')):\n continue\n \n parts = file_name.split(\"-\")\n label = int(parts[0])\n if label not in self.domains:\n continue\n img_name = file_name\n\n count=self.get_sample_count(label)\n if count<self.valid_set_size:\n # create holdout set on the fly\n utils.copy_file(self.image_dir,self.valid_set_dir,img_name)\n else:\n self.dataset.append([img_name, self.attr2idx[label]])\n \n self.increment_sample_count(label)\n\n print(\"Sample count per domain: \"+str(self.sample_count)+\" (including holdout set, holdout size per domain is: \"+str(self.valid_set_size)+\")\")\n print('Finished preprocessing the dataset...')", "def test_trim_fasta(self):\r\n expected = [\"\"\">HWUSI-EAS552R_0357:8:1:10040:6364#0/1\r\nGACGAG\r\n\"\"\",\r\n \"\"\">HWUSI-EAS552R_0357:8:1:10184:6365#0/1\r\nGTCTGA\r\n\"\"\"]\r\n\r\n self.assertEqual(list(trim_fasta(self.fasta_barcodes, 6)), expected)", "def preprocess(self):\n filtered_data = pd.read_csv(self.input)\n\n if self.config.getboolean(\"filterMissingsInGenes\"):\n # first filter out the genes that have more missings than threshold\n filtered_data = self.filterMissings(self.config[\"threshold\"], filtered_data)\n if self.config.getboolean(\"filterMissingsInSamples\"):\n # second transpose matrix and filter out samples that have more missings than threshold\n filtered_samples = self.filterMissings(self.config[\"threshold\"], filtered_data.T)\n filtered_data = filtered_samples.T\n\n # transpose back into original orientation and save\n filePrefix = self.input.split(\"/\")[-1].split(\".\")[\n 0] # split path by / to receive filename, split filename by . to receive filename without ending\n filename = self.output + filePrefix + \"_filtered.csv\"\n filtered_data.to_csv(filename, index=False)\n return filename", "def trim(self, start, end):\n for _var in self.inputs:\n _var.trim(start, end)", "def _reset(self) -> None:\n self.images = []\n self.activations = []\n self.labels = []\n self.preds = []\n self.n_found = 0", "def trim(args) :\n from trimmer import trim_reads\n trim_reads(args)", "def strip_barcodes(input_file, wanted_set):\n file_name = os.path.splitext(os.path.basename(input_file))[0]\n with open(file_name + \"_adapters_removed.fasta\", \"w\") as out:\n for record in SeqIO.parse(input_file, \"fasta\"):\n match = re.search(r'\\S*:', record.id)\n if match:\n correct = match.group().rstrip(\":\")\n else:\n correct = str(record.id)\n SEQ = str(record.seq)\n if correct in wanted_set:\n out.write(\">\" + correct + \"\\n\" + SEQ + \"\\n\")", "def preprocess_dataset(dataset=None, remove_missing=60, remove_empty_rows=True):\n print('feature size before dropping:{}'.format(dataset.shape[1]))\n dataset_after_drop = dataset.dropna(thresh=dataset.shape[0]*remove_missing/100, how='all',axis=1)\n print('feature size after dropping:{}'.format(dataset_after_drop.shape[1]))\n print('row size before dropping:{}'.format(dataset_after_drop.shape[0]))\n if remove_empty_rows is True:\n df_final = dataset_after_drop.dropna(inplace=False).reset_index (drop=True)\n print('row size after dropping:{}'.format(df_final.shape[0]))\n print('---------------')\n print('final shape:{}'.format(df_final.shape))\n return df_final\n else:\n return dataset_after_drop", "def reset(self):\n self.items = np.arange(self.ratings.shape[1])", "def clear_annotation(self):\n\n self.xValues = []\n self.yValues = []\n self.colors = []\n\n self.stop_video()", "def _clean_results(self):\n\t\tif self.file_type == \"Automobile\":\n\t\t\tcols = [\"Year\", \"Mileage\", \"Price\"]\n\t\t\tself.data.Mileage.replace([',', 'mi.', 'nan', ' '], '', regex=True, inplace=True) # Fix mileage column\n\t\t\tself.data.Price.replace([',', '\\$'], '', regex=True, inplace=True) # Always fix price column (, and $ removed)\n\t\t\tself.data[cols] = self.data[cols].apply(pd.to_numeric, errors='coerce') # Coerces errors into NaN values\n\t\t\tself.data.drop(self.data[self.data.Year < 2000].index, inplace=True) # Remove cars made before 2000\n\t\t\tself.data.drop(self.data[self.data.Price > 30000].index, inplace=True) # Remove cars over $30,000\n\t\t\tself.data.drop(self.data[(self.data.Mileage < 1000) | (self.data.Mileage > 300000)].index, inplace=True) # Remove cars with over 300,000 miles\n\t\t\tself.data['Age'] = 2018 - self.data['Year'] # Change years to Age\n\t\telif self.file_type == \"Apartment\":\n\t\t\tself.data.Area.replace(['ft2'], '', regex=True, inplace=True) # Remove ft2 from square footage column\n\t\t\tself.data.Price.replace([',', '\\$'], '', regex=True, inplace=True) # Always fix price column (, and $ removed)\n\t\telse:\n\t\t\tself.data['Street'], self.data['City'], self.data['State'] = self.data['Address'].str.split(',', 2).str\n\t\t\tdel self.data.Address\n\t\t\tself.data.drop(self.data[self.data.Price > 1000000].index, inplace=True) # Remove houses worth more than $1 million\n\n\t\tself.data.replace('^\\s*$', np.nan, regex=True, inplace=True) # Replace all empty values with np.NaN\n\t\tself.data = self.data.dropna(axis=1, how='all') # Remove Null Columns\n\t\tself.data = self.data.apply(pd.to_numeric, errors='ignore') # Coerces errors into NaN values", "def _untrain(self):\n # untrain the mapper\n if self.__mapper is not None:\n self.__mapper.untrain()\n # let base class untrain as well\n super(MappedClassifier, self)._untrain()", "def get_count_matrix_all_barcodes(self) -> sp.csr.csr_matrix:\n\n if self.is_trimmed:\n\n # Return the count matrix for selected barcodes and genes.\n trimmed_bc_matrix = self.data['matrix'].tocsc()\n trimmed_matrix = trimmed_bc_matrix[:, self.analyzed_gene_inds].tocsr()\n\n # Apply transformation to the count data.\n return self.transformation.transform(trimmed_matrix)\n\n else:\n logging.warning(\"Using full count matrix, without any trimming. \"\n \"Could be slow.\")\n\n # Apply transformation to the count data.\n return self.transformation.transform(self.data['matrix'])", "def trim(self):\n result = library.MagickTrimImage(self.wand)\n if not result:\n self.raise_exception()", "def trim_features():\n pass", "def reset(self):\n self.baseline = None\n self.cut = None\n self.manual_push = 0", "def trim_data(data, attributes):\n return data.drop(attributes, axis=1)" ]
[ "0.5931867", "0.5751522", "0.56039447", "0.55723774", "0.5401302", "0.5370132", "0.5366945", "0.53542274", "0.5294477", "0.52587545", "0.5251565", "0.52389866", "0.52253807", "0.5208275", "0.51975644", "0.5188658", "0.514943", "0.51444215", "0.51317275", "0.51254904", "0.5110015", "0.51077676", "0.5104215", "0.5094967", "0.507826", "0.50532675", "0.503051", "0.5026695", "0.5019436", "0.5009204" ]
0.7511484
0
Estimate relevant priors, populating fields in the self.priors dict.
def _estimate_priors(self): # Estimate the log UMI count turning point between cells and 'empties'. self.priors['log_counts_crossover'] = \ np.mean(np.log1p([self.priors['cell_counts'], self.priors['empty_counts']])).item() # Estimate prior for the scale param of LogNormal for d. if self.model_name != "simple": self.priors['d_std'] = (np.log1p(self.priors['cell_counts']) - self.priors['log_counts_crossover']) / 5 else: self.priors['d_std'] = 0.2 # This is a reasonable prior in log space. # Priors for models that include empty droplets: if self.model_name != "simple": # Estimate fraction of trimmed dataset that contains cells. # cell_prob = self.priors['n_cells'] / self.analyzed_barcode_inds.size cell_prob = (1 - self.fraction_empties) \ * (self.priors['n_cells'] / self.analyzed_barcode_inds.size) self.priors['cell_prob'] = cell_prob assert cell_prob > 0, f"Fraction of trimmed dataset " \ f"containing cells should be > 0, " \ f"but is {cell_prob}." assert cell_prob <= 1, f"Fraction of trimmed dataset " \ f"containing cells should be at most 1, " \ f"but is {cell_prob}." # Turn cell probability into logit. self.priors['cell_logit'] = np.log(cell_prob / (1 - cell_prob)).item() # Estimate the ambient gene expression profile. self.priors['chi_ambient'], self.priors['chi_bar'] = \ estimate_chi_from_dataset(self)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calc_priors(self, prior_U, method='inverse'):\n if self.Pchance is None:\n raise IOError(\"Set Pchance before calling this method\")\n\n # TODO -- Move this into Bayesian\n if prior_U < 0.:\n self.prior_U = np.product(self.candidates['P_c'])\n else:\n self.prior_U = prior_U\n\n # Raw priors\n self.raw_prior_Oi = bayesian.raw_prior_Oi(method, self.candidates[self.filter].values,\n Pchance=self.Pchance,\n half_light=self.candidates.half_light.values)\n\n # Normalize\n self.prior_Oi = bayesian.renorm_priors(self.raw_prior_Oi, self.prior_U)\n\n # Add to table\n self.candidates['P_O'] = self.prior_Oi", "def make_priors(self):\r\n if self.last_img_size != (self.target_size, self.target_size):\r\n prior_data = []\r\n\r\n for conv_w, conv_h, scale in zip(self.conv_ws, self.conv_hs, self.scales):\r\n for i in range(conv_h):\r\n for j in range(conv_w):\r\n # +0.5 because priors are in center-size notation\r\n cx = (j + 0.5) / conv_w\r\n cy = (i + 0.5) / conv_h\r\n\r\n for ar in self.aspect_ratios:\r\n ar = np.sqrt(ar)\r\n\r\n w = scale * ar / self.target_size\r\n h = scale / ar / self.target_size\r\n\r\n # This is for backward compatability with a bug where I made everything square by accident\r\n h = w\r\n\r\n prior_data += [cx, cy, w, h]\r\n\r\n self.priors = np.array(prior_data).reshape(-1, 4)\r\n self.last_img_size = (self.target_size, self.target_size)\r\n return self.priors", "def prep(self):\n \n # create a dict with prior probabilities\n self.row_priors = [0.0]*len(self.rows)\n self.feature_priors = dict()\n \n # denominator is given by reference priors\n denominator = sum(self.column_priors)\n # null_feature_prior is used when feature is not observed at all\n # this is set up to scale with features, i.e. arbitrarily adding\n # child features into an ontology should not skew sums over repr.\n null_feature_prior = 1/max(denominator, float(len(self.rows)))\n \n for rowname, rowindex in self.rows.items(): \n numerator = 0\n for colname, colindex in self.columns.items(): \n colprior = self.column_priors[colindex]\n numerator += self.data[colindex][rowindex]*colprior\n if numerator == 0:\n numerator = null_feature_prior \n self.row_priors[rowindex] = float(numerator)/denominator\n self.feature_priors[rowname] = self.row_priors[rowindex]\n\n return self", "def priors(self):\n\n return self._priors", "def calculate_priors(trainingLabels):\r\n sum = 0\r\n priors = {}\r\n totalSamples = len(trainingLabels)\r\n classes = set(trainingLabels)\r\n for cls in classes:\r\n numCls = len(filter(lambda x: x == cls, trainingLabels))\r\n sum += numCls\r\n priors[cls] = float(numCls) / float(totalSamples)\r\n \r\n # Sanity check: valid partitioning\r\n assert(sum == totalSamples)\r\n\r\n return priors", "def _get_model_priors(self):\n if self._alpha_model_priors:\n return self._alpha_model_priors\n # sample the variables from their corresponding distributions\n params = self._get_prior_params()\n self._alpha_model_priors = self._params2probs(params)\n return self._alpha_model_priors", "def sort_priors(self):\n return", "def set_priors(parnames, limits, linenames, vsyst, nssps=1):\n priors = {}\n for parname in parnames:\n name = parname.split(\"_\")[0]\n if name in limits: #all the CvD ssp parameters\n vmin, vmax = limits[name]\n# print(parname,vmin,vmax)\n delta = vmax - vmin\n priors[parname] = stats.uniform(loc=vmin, scale=delta)\n elif parname in vsyst:\n priors[parname] = stats.norm(loc=vsyst[parname], scale=500)\n elif parname == \"eta\": #what does eta do?\n priors[\"eta\"] = stats.uniform(loc=1., scale=19)#uniform distribution in range [1,19]\n elif parname == \"nu\": #what does nu do?\n priors[\"nu\"] = stats.uniform(loc=2, scale=20)#uniform distribution in range [2,20]\n elif parname == \"sigma\":\n priors[\"sigma\"] = stats.uniform(loc=50, scale=300)#obtains the uniform distribution on [loc, loc + scale]. i.e. uniform in range [50,300]\n elif parname == \"sigma_gas\":\n priors[parname] = stats.uniform(loc=50, scale=100)#uniform between [50,100]km/s\n elif name == \"w\":\n priors[parname] = stats.uniform(loc=0, scale=1)#weights uniform between 0 and 1\n elif name in linenames:\n# priors[parname] = stats.expon(loc=0, scale=0.5)#favors low values>~0; make even stronger by decreasing scale. \n priors[parname] = stats.expon(loc=0, scale=0.2)#favors low values>~0; make even stronger by decreasing scale. \n elif name in [\"pred\", \"pblue\"]:\n porder = int(parname.split(\"_\")[1])\n if porder == 0:\n mu, sd = 1 / nssps, 1\n a, b = (0 - mu) / sd, (np.infty - mu) / sd\n priors[parname] = stats.truncnorm(a, b, mu, sd)\n else:\n priors[parname] = stats.norm(0, 0.05)\n else:\n print(f\"parameter without prior: {parname}\")\n return priors", "def set_priors(self,alpha,beta):\n\n\t\tassert type(alpha) == float and type(beta) == float\n\n\t\tself.alpha = alpha\n\t\tself.beta = beta", "def __init__(self):\n super(PriProb, self).__init__()\n # initialize R: distribute R_TOTAL reward points in J_avi locations randomly\n # self.r preserved for debugging, no real use in the script\n self.r = np.array(ad.randint_upto_sum(R_TOTAL, J_avi)).astype(NP_DTYPE)\n\n # expand self.r from J_avi locations to J locations using is_avi\n self.r_exp = np.zeros((J), dtype=NP_DTYPE)\n self.r_exp[np.nonzero(is_avi.cpu().numpy())] = self.r\n\n #normalizedR = ad.normalize(self.r_exp, using_max=False)\n self.R = nn.Parameter(torch.from_numpy(self.r_exp))", "def set_priors(self,alpha):\n\n\t\tassert type(alpha) == float\n\t\tself.alpha = alpha", "def __init__(self, priors, ids, row_priors=None): \n\n # mappings from feature ids to indexes\n self.rows = dict()\n # mapping from indexes to feature ids\n self.row_names = tuple(ids)\n # array and dict with row priors\n self.row_priors = [1.0] * len(ids)\n self.feature_priors = dict()\n for index, feature in enumerate(self.row_names):\n self.rows[feature] = index\n if row_priors is not None:\n self.row_priors[index] = row_priors[feature]\n self.feature_priors[feature] = row_priors[feature]\n else:\n self.feature_priors[feature] = 1.0\n \n # mappings from reference ids to indexes, reverse, and priors\n self.columns = dict()\n self.column_names = [None] * len(priors)\n self.column_priors = [1.0] * len(priors)\n self.reference_priors = priors.copy()\n for refname in priors.keys():\n index = len(self.columns)\n self.columns[refname] = index\n self.column_names[index] = refname\n self.column_priors[index] = priors[refname]\n \n # data store as nested arrays\n # first index is reference index, second is feature index \n self.data = [None] * len(self.columns)\n for _ in range(len(self.columns)):\n self.data[_] = [0.0] * len(self.rows)\n \n # map to ontology parents\n self.parents = None\n \n # cache for finding positive parents during FP inference calculations\n self.cache = dict()\n self.temp = Counter()", "def prior_of_priors(self, tt):\n for i in xrange(self.n_params): \n try: \n p_theta *= self.param_obj.prior()[i].pdf(tt[i]) \n\n except UnboundLocalError: \n p_theta = self.param_obj.prior()[i].pdf(tt[i]) \n\n return p_theta", "def set_priors(parnames, limits, vsyst=0, nssps=1):\n priors = {}\n for parname in parnames:\n name = parname.split(\"_\")[0]\n if name in limits:\n vmin, vmax = limits[name]\n delta = vmax - vmin\n priors[parname] = stats.uniform(loc=vmin, scale=delta)\n elif parname == \"Vsyst\":\n priors[parname] = stats.norm(loc=vsyst, scale=500)\n elif parname == \"eta\":\n priors[\"eta\"] = stats.uniform(loc=1, scale=10)\n elif parname == \"nu\":\n priors[\"nu\"] = stats.uniform(loc=2, scale=20)\n elif parname == \"sigma\":\n priors[\"sigma\"] = stats.uniform(loc=50, scale=300)\n elif name == \"w\":\n priors[parname] = stats.uniform(loc=0, scale=1)\n elif name == \"p\":\n porder = int(parname.split(\"_\")[1])\n if porder == 0:\n mu, sd = np.sqrt(2 * nssps), 1\n a, b = (0 - mu) / sd, (np.infty - mu) / sd\n priors[parname] = stats.truncnorm(a, b, mu, sd)\n else:\n priors[parname] = stats.norm(0, 0.1)\n else:\n raise ValueError(f\"Parameter without prior: {parname}\")\n return priors", "def init_probability_dict(self):\n for x in xrange(0,10):\n self.class_probabilities[x] = self.init_probability_2d()", "def _get_guide_priors(self):\n if not self._alpha_guide_prior_params:\n # create initial parameters\n params = self._get_prior_params()\n # register all parameters in pyro\n for p, v in iteritems(params):\n pyro.param(p, v)\n self._alpha_guide_prior_params = dict(\n self._param_store.named_parameters()\n )\n else:\n # register all parameters in pyro\n for p, v in iteritems(self._alpha_guide_prior_params):\n pyro.param(p, v)\n return self._params2probs(self._alpha_guide_prior_params)", "def __call__(self, relsSortedByScores, qrelDict):\n result = 0.\n postQty = len(qrelDict)\n\n pos = 0\n for i, rel in enumerate(relsSortedByScores):\n if rel > RELEVANCE_THRESHOLD:\n pos += 1.\n result += pos / (i + 1.)\n\n return result / postQty", "def score(self, tree):\n probas = [self.prod_freq.get(prod, 0.5) /\n self.source_freq.get(prod.source, 1)\n for prod in tree]\n logger.debug(\"Scoring {0}:\".format(tree))\n for prod, proba in zip(tree, probas):\n logger.debug(\n \"\\t- {0} {1}({2})\"\n .format(prod, round(proba, 3), round(log(proba, 10), 3)))\n log_proba = sum(log(proba, 10) for proba in probas)\n return {\n 'log_proba': log_proba,\n 'productions': len(probas)\n }", "def reprime(self):\n self.__primed = 1", "def _updateInitialProbabilities(self): \n N = self.N\n K = self.K\n\n for i in range(1,self.K+1):\n s = 0\n updated_prob = 0\n for n in range(1,self.N+1):\n s = s+1\n updated_prob = updated_prob + self.posterior_state_trellis[n][(1,i)]\n self.state_initial_prob[i] = (updated_prob/s)", "def update_probabilities(self):\n self.probabilities = self.pheromones**self.EXP_PH * self.mcv**self.EXP_MCV", "def update_precision_priors(self,\n precision_mat, variable_mat,\n prec_alpha, prec_beta):\n new_priors = {'alpha': (np.zeros(precision_mat['alpha'].shape)\n + prec_alpha + 0.5),\n 'beta': (prec_beta\n + 0.5 * get_square_gauss(variable_mat))}\n\n return new_priors", "def propose(self):\n\n p = type(self)(self.n, alpha=self.alpha)\n\n return p, p.compute_prior() - self.compute_prior()", "def _compute_sensitivities(self, context):\n _logger.info(\"calling _compute_sensitivities.\")\n cached_id = np.random.randint(1000)\n if self.start_epoch == context.epoch_id:\n sensitivities_file = self.sensitivities_file\n else:\n sensitivities_file = self.sensitivities_file + \".epoch\" + str(\n context.epoch_id)\n sensitivities = self._load_sensitivities(sensitivities_file)\n\n for param in context.eval_graph.all_parameters():\n if not re.match(self.pruned_params, param.name()):\n continue\n if param.name() not in sensitivities:\n sensitivities[param.name()] = {\n 'pruned_percent': [],\n 'loss': [],\n 'size': param.shape()[0]\n }\n\n metric = None\n\n for param in sensitivities.keys():\n ratio = self.delta_rate\n while ratio < 1:\n ratio = round(ratio, 2)\n if ratio in sensitivities[param]['pruned_percent']:\n _logger.debug('{}, {} has computed.'.format(param, ratio))\n ratio += self.delta_rate\n continue\n if metric is None:\n metric = self._eval_graph(context, self.eval_rate,\n cached_id)\n\n param_backup = {}\n # prune parameter by ratio\n self._prune_parameters(\n context.eval_graph,\n context.scope, [param], [ratio],\n context.place,\n lazy=True,\n param_backup=param_backup)\n self.pruned_list[0]\n # get accuracy after pruning and update self.sensitivities\n pruned_metric = self._eval_graph(context, self.eval_rate,\n cached_id)\n loss = metric - pruned_metric\n _logger.info(\"pruned param: {}; {}; loss={}\".format(\n param, ratio, loss))\n for brother in self.pruned_list[0]:\n if re.match(self.pruned_params, brother):\n if brother not in sensitivities:\n sensitivities[brother] = {\n 'pruned_percent': [],\n 'loss': []\n }\n sensitivities[brother]['pruned_percent'].append(ratio)\n sensitivities[brother]['loss'].append(loss)\n\n self._save_sensitivities(sensitivities, sensitivities_file)\n\n # restore pruned parameters\n for param_name in param_backup.keys():\n param_t = context.scope.find_var(param_name).get_tensor()\n param_t.set(self.param_backup[param_name], context.place)\n\n# pruned_metric = self._eval_graph(context)\n\n ratio += self.delta_rate\n return sensitivities", "def calc_priors(categories, data):\n counts = np.zeros(categories)\n for val in range(categories):\n counts[val] = np.count_nonzero(data.labels == val)\n return counts / len(data.labels)", "def pred_proba(self, X, times, presorted_times=False):\n results = Parallel(n_jobs=self.n_jobs)(\n delayed(_predict_tree)(self.trees[tree_idx], 'surv', X, times,\n presorted_times)\n for tree_idx in range(self.n_estimators))\n return functools.reduce(lambda x, y: x + y, results) \\\n / self.n_estimators", "def _generate_p(self):\n self._values, weights = zip(*self._weights.items())\n cumsum = list(itertools.accumulate(weights))\n total = cumsum[-1]\n self._p = [i / total for i in cumsum]", "def calc_par_clf(irr_values):\n par_values = [calculate_par(round(float(v))) for v in irr_values]\n total_count = len(par_values)\n count = [0, 0, 0]\n for v in par_values:\n if v <= 3.13:\n count[0] += 1\n elif v <= 5.48:\n count[1] += 1\n else:\n count[2] += 1\n\n # convert to %\n count = [round(c * 100 / total_count, 2) for c in count]\n\n return par_values, {'low': count[0], 'medium': count[1], 'high': count[2]}", "def probabilities(self):\n raise NotImplementedError", "def aPriori(self) -> dict:\n \n return self.probIn" ]
[ "0.6760685", "0.6671473", "0.6519258", "0.6318768", "0.62852085", "0.62144035", "0.6048549", "0.60283566", "0.5965118", "0.59049845", "0.5749101", "0.56592935", "0.560779", "0.560301", "0.55439323", "0.5540224", "0.5506283", "0.54675066", "0.54665715", "0.5448176", "0.5443485", "0.54426235", "0.5423064", "0.54130113", "0.5400026", "0.5386656", "0.53734744", "0.53590393", "0.5339299", "0.5337552" ]
0.75927603
0
Get the count matrix, trimmed if trimming has occurred.
def get_count_matrix(self) -> sp.csr.csr_matrix: if self.is_trimmed: # Return the count matrix for selected barcodes and genes. trimmed_bc_matrix = self.data['matrix'][self.analyzed_barcode_inds, :].tocsc() trimmed_matrix = trimmed_bc_matrix[:, self.analyzed_gene_inds].tocsr() # Apply transformation to the count data. return self.transformation.transform(trimmed_matrix) else: logging.warning("Using full count matrix, without any trimming. " "Could be slow.") # Apply transformation to the count data. return self.transformation.transform(self.data['matrix'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_count_matrix_empties(self) -> sp.csr.csr_matrix:\n\n if self.is_trimmed:\n\n # Return the count matrix for selected barcodes and genes.\n trimmed_bc_matrix = self.data['matrix'][self.empty_barcode_inds,\n :].tocsc()\n trimmed_matrix = trimmed_bc_matrix[:, self.analyzed_gene_inds].tocsr()\n\n # Apply transformation to the count data.\n return self.transformation.transform(trimmed_matrix)\n\n else:\n logging.error(\"Trying to get empty count matrix without trimmed data.\")\n\n # Apply transformation to the count data.\n return self.transformation.transform(self.data['matrix'])", "def get_count_matrix_all_barcodes(self) -> sp.csr.csr_matrix:\n\n if self.is_trimmed:\n\n # Return the count matrix for selected barcodes and genes.\n trimmed_bc_matrix = self.data['matrix'].tocsc()\n trimmed_matrix = trimmed_bc_matrix[:, self.analyzed_gene_inds].tocsr()\n\n # Apply transformation to the count data.\n return self.transformation.transform(trimmed_matrix)\n\n else:\n logging.warning(\"Using full count matrix, without any trimming. \"\n \"Could be slow.\")\n\n # Apply transformation to the count data.\n return self.transformation.transform(self.data['matrix'])", "def test_cmatrix_reduction(self):\n cmat = np.array([[1,4,5,0,0],\n [1,4,5,0,0],\n [6,2,2,0,0],\n [0,0,0,3,7],\n [0,0,0,7,3]])\n reduced_matrix = get_connected_count_matrix(cmat)\n difference = reduced_matrix - np.array([[1, 4, 5], [1, 4, 5], [6, 2, 2]])\n self.assertTrue(np.sum(difference) == 0)", "def trim(self):\n for i in range(len(self)):\n if self[i] != TRIT_ZERO:\n return self.__class__(self[i:])\n return self.__class__([])", "def trim_whitespace(matrix, details, min_gap):\r\n if details == -1:\r\n row = matrix[0, ]\r\n else:\r\n row = matrix[matrix.shape[0] - 1, ]\r\n\r\n min_left = np.argmin(row)\r\n min_right = np.argmin(row[::-1])\r\n\r\n if min_left > min_gap:\r\n matrix = matrix[:, min_left - min_gap:]\r\n\r\n if min_right > min_gap:\r\n matrix = matrix[:, 0:len(row) - (min_right - min_gap)]\r\n\r\n return matrix", "def raw_counts(self):\n return np.array([[1, 2], [3, 4], [5, 6]])", "def reformat_countTable(\n self,analysis_id_I=None,sna2experimentID_I=None,\n sna2sns_I=None):\n if self.countTable: countTable = self.countTable[:];\n else: countTable = [];\n\n countTable_flat = self.reformat_countOrFPKMTable(\n countOrFPKMTable_I=countTable,\n analysis_id_I=analysis_id_I,\n sna2experimentID_I=sna2experimentID_I,\n sna2sns_I=sna2sns_I,\n count_or_FPKM = 'count');\n return countTable_flat;", "def counts_compressed(self):\n return [[i, int(count)] for (i, count) in enumerate(self.counts) if count > 0]", "def clear(self):\n self.counts = [0] * len(self.values)\n if HAS_NUMPY:\n self.counts = numpy.array(self.counts)", "def _fetch_count_metrics_and_clear(self):\n with self._count_rlock:\n count_metrics = self._count_metrics\n self._count_metrics = defaultdict(int)\n\n return count_metrics", "def raw_counts(self):\n return np.array([1, 2, 3])", "def raw_counts(self):\n return np.array([1, 2, 3])", "def get_counts(self):\n counts = [0, 0]\n for i in range(self._num_rows):\n for j in range(self._num_cols):\n if self._board[i][j] == \"B\":\n counts[0] += 1\n elif self._board[i][j] == \"W\":\n counts[1] += 1\n return counts", "def trim(args) :\n from trimmer import trim_reads\n trim_reads(args)", "def convert_to_measure_matrix(self):\n self.M = copy.deepcopy(self.m)\n\n for row in range(self.num_states):\n accum = 0\n for col in range(self.num_states):\n accum += self.m[row][col]\n self.M[row][col] = accum\n \n # pprint(self.m) \n # pprint(self.M)", "def __len__(self):\n return len(np.where(np.logical_not(self.data.mask))[0])", "def count(self) -> Tuple[groupable, pdarray]:\n repMsg = generic_msg(\n cmd=\"countReduction\",\n args={\"segments\": cast(pdarray, self.segments), \"size\": self.length},\n )\n self.logger.debug(repMsg)\n return self.unique_keys, create_pdarray(repMsg)", "def __calc_new_status0(self, removed_cells):\n # iterate through all removed cells. move up cell down and remove empty\n # cols\n result = deepcopy(self.status)\n # if len(result) != 0:\n # colsLen = len(result[0])\n for i in removed_cells:\n for j in range(i[0], -1, -1):\n if j == 0:\n result[j][i[1]] = ''\n else:\n result[j][i[1]] = result[j - 1][i[1]]\n\n # rotate the matrix and find empty lines, remove them and rotate back\n result = [i for i in zip(*result) if set(i) != {''}]\n result = zip(*result)\n result = [list(i) for i in result]\n\n #fill empty line\n # for i in range(0, len(result)):\n # if len(result[i]) < colsLen:\n # for j in range(0, colsLen-len(result[i])):\n # result[i].append('')\n return result", "def empty_matrix(self):\r\n\r\n return [[0 for i in range(len(self.s2)+1)] for j in range(len(self.s1)+1)]", "def get_random_smoothing_matrix(counts, width=3):\n st = time.time()\n num_obs = len(counts)\n s_matrix = []\n lns = []\n if type(width) != list:\n width = [(width, 2*width)]\n for ni in range(num_obs):\n for _w, nrs in width:\n p = np.ones(len(counts))\n p[ni] = 0\n if counts[ni] >= _w:\n region = get_region(ni, counts, 1, p)\n s_matrix.append(region)\n for nr in range(nrs):\n region = get_region(ni, counts, _w, p)\n s_matrix.append(region)\n lns.append(len(np.nonzero(region)[0]))\n for nr in range(nrs):\n region = get_region(ni, counts, _w, p)\n s_matrix.append(region)\n lns.append(len(np.nonzero(region)[0]))\n S = np.stack(s_matrix, axis=0).astype(np.float32)\n assert np.alltrue(S.sum(axis=-1)>0) \n return torch.from_numpy(S)", "def matrix_dim(CT):\r\n if CT[0]==0 and CT[-1]==0:\r\n return 2\r\n elif CT[0]!=0 and CT[-1]!=0:\r\n return 4", "def get_score_matrix(self) -> int:", "def action_space(self):\n return np.argwhere(self.mat==0).astype(np.int32)", "def reduce_mini(minigrid):\n row = []\n for i in range(3):\n for j in range(3):\n row.append(minigrid[i][j])\n for i in range(9):\n if len(row[i]) == 1:\n for j in range(9):\n if i != j:\n if row[i] in row[j]:\n chunks = row[j].split(row[i])\n row[j] = chunks[0] + chunks[1]\n\n count_dict = {}\n for i in range(9):\n for char in row[i]:\n if char in count_dict:\n count_dict[char] = \"X\"\n else:\n count_dict[char] = i\n\n for key in count_dict:\n if count_dict[key] != \"X\":\n row[count_dict[key]] = key\n\n for i in range(3):\n for j in range(3):\n minigrid[i][j] = row[(i*3)+j]\n\n return minigrid", "def trim_dataset(mat, batch_size):\n no_of_rows_drop = mat.shape[0]%batch_size\n print(\"number of rows dropped\", no_of_rows_drop)\n if(no_of_rows_drop > 0):\n return mat[:-no_of_rows_drop]\n else:\n return mat", "def count_matrix_largest(self, effective=False):\n return self.count_matrix(connected_set=0, effective=effective)", "def global_score_count(self):\n score_count_mat = np.ndarray((5,10))\n\n for n in range(0,5):\n score_count = [\n np.count_nonzero(self.__data[...,n*5:n*5+5] == score)\n for score\n in np.arange(10)+1\n ]\n\n score_count_mat[n,...] = score_count\n\n return score_count_mat", "def flat_dim(self):\n return np.sum([c.flat_dim for c in self.spaces])", "def calc_empty(self):\n empty = 0\n for x in range(0, self.tot_col):\n for y in range(1, self.tot_rows + 1):\n if self.file_list[y][x] == '':\n empty += 1\n #print(csv_list[y][x] + ' %s %s' % (x, y))\n return empty", "def sparse_counts_map(self):\n if self.hpx._ipix is None:\n flatarray = self.data.flattern()\n else:\n flatarray = self.expanded_counts_map()\n nz = flatarray.nonzero()[0]\n data_out = flatarray[nz]\n return (nz, data_out)" ]
[ "0.7883857", "0.695453", "0.5581138", "0.5525962", "0.5487828", "0.54187196", "0.53689903", "0.5367731", "0.5279583", "0.52316165", "0.5052875", "0.5052875", "0.5050538", "0.5036658", "0.50318664", "0.498184", "0.49812087", "0.49420154", "0.49123126", "0.4906234", "0.4897239", "0.48877054", "0.48851556", "0.48798853", "0.48761392", "0.48558673", "0.48523542", "0.48217982", "0.48191327", "0.47942656" ]
0.7683302
1
Get the count matrix for empty drops, trimmed if trimming has occurred.
def get_count_matrix_empties(self) -> sp.csr.csr_matrix: if self.is_trimmed: # Return the count matrix for selected barcodes and genes. trimmed_bc_matrix = self.data['matrix'][self.empty_barcode_inds, :].tocsc() trimmed_matrix = trimmed_bc_matrix[:, self.analyzed_gene_inds].tocsr() # Apply transformation to the count data. return self.transformation.transform(trimmed_matrix) else: logging.error("Trying to get empty count matrix without trimmed data.") # Apply transformation to the count data. return self.transformation.transform(self.data['matrix'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_count_matrix(self) -> sp.csr.csr_matrix:\n\n if self.is_trimmed:\n\n # Return the count matrix for selected barcodes and genes.\n trimmed_bc_matrix = self.data['matrix'][self.analyzed_barcode_inds,\n :].tocsc()\n trimmed_matrix = trimmed_bc_matrix[:, self.analyzed_gene_inds].tocsr()\n\n # Apply transformation to the count data.\n return self.transformation.transform(trimmed_matrix)\n\n else:\n logging.warning(\"Using full count matrix, without any trimming. \"\n \"Could be slow.\")\n\n # Apply transformation to the count data.\n return self.transformation.transform(self.data['matrix'])", "def get_count_matrix_all_barcodes(self) -> sp.csr.csr_matrix:\n\n if self.is_trimmed:\n\n # Return the count matrix for selected barcodes and genes.\n trimmed_bc_matrix = self.data['matrix'].tocsc()\n trimmed_matrix = trimmed_bc_matrix[:, self.analyzed_gene_inds].tocsr()\n\n # Apply transformation to the count data.\n return self.transformation.transform(trimmed_matrix)\n\n else:\n logging.warning(\"Using full count matrix, without any trimming. \"\n \"Could be slow.\")\n\n # Apply transformation to the count data.\n return self.transformation.transform(self.data['matrix'])", "def calc_empty(self):\n empty = 0\n for x in range(0, self.tot_col):\n for y in range(1, self.tot_rows + 1):\n if self.file_list[y][x] == '':\n empty += 1\n #print(csv_list[y][x] + ' %s %s' % (x, y))\n return empty", "def num_empty(self):\n count = 0\n for i in self.__buckets:\n if i.size() == 0:\n count += 1\n return count", "def __len__(self):\n return len(np.where(np.logical_not(self.data.mask))[0])", "def NumberOfEmptySpots(self):\n\n return np.count_nonzero(self.state == EMPTY)", "def empty(self):\n return [cell for cell in self.compact if not cell.peg]", "def count_num_empty_tiles_not_masked(subgrid):\n\n\tnum_empty_tiles_not_masked = 0\n\tfor tile in subgrid:\n\t\tif tile == MaskedTile.EMPTY:\n\t\t\tnum_empty_tiles_not_masked += 1\n\n\treturn num_empty_tiles_not_masked", "def getNumCleanedTiles(self):\n\t\tr = 0\n\t\tfor i in self.tiles:\n\t\t\tif i.isClean(): r += 1\n\t\treturn r", "def trim(self):\n for i in range(len(self)):\n if self[i] != TRIT_ZERO:\n return self.__class__(self[i:])\n return self.__class__([])", "def getNumCleanedTiles(self):\n counter = 0\n for tile in self.tiles:\n if self.tiles[tile] == 'clean':\n counter += 1\n return counter", "def clear(self):\n self.counts = [0] * len(self.values)\n if HAS_NUMPY:\n self.counts = numpy.array(self.counts)", "def counts_compressed(self):\n return [[i, int(count)] for (i, count) in enumerate(self.counts) if count > 0]", "def test_cmatrix_reduction(self):\n cmat = np.array([[1,4,5,0,0],\n [1,4,5,0,0],\n [6,2,2,0,0],\n [0,0,0,3,7],\n [0,0,0,7,3]])\n reduced_matrix = get_connected_count_matrix(cmat)\n difference = reduced_matrix - np.array([[1, 4, 5], [1, 4, 5], [6, 2, 2]])\n self.assertTrue(np.sum(difference) == 0)", "def prune_empty(self):\n prev_count = self.count_deleted()\n if not self.args.keep_empty_dirs:\n for _, e in self.contents.items():\n e.prune_empty()\n return self.count_deleted() - prev_count", "def empty_matrix(self):\r\n\r\n return [[0 for i in range(len(self.s2)+1)] for j in range(len(self.s1)+1)]", "def test_reduce_null_matrix_is_empty(self):\n original = pd.read_csv(NULL_FILENAME, index_col=0, header=0)\n full_reduced = entropy_reduce_position_matrix(\n original,\n 1,\n trivial_metric\n )\n self.assertEqual(full_reduced.shape[0], original.shape[0])\n self.assertEqual(full_reduced.shape[1], 0)", "def get_empty_cells(board):\n empty_cells = [idx for idx, e in enumerate(board) if e == ' ']\n return empty_cells", "def getNumCleanedTiles(self):\n return len(self.clean_tiles)", "def get_nan_counts(data, cols, null_col_suffix=''):\n nulls_df = pd.DataFrame(pd.isnull(data[cols]).sum())\n nulls_df.columns = ['null_counts'+null_col_suffix]\n nulls_df['feature'] = nulls_df.index\n nulls_df.reset_index(inplace=True, drop=True)\n return nulls_df", "def nnz(self):\n t = self.get_MSC()\n return len(np.unique(t['masks']))", "def get_counts(self):\n counts = [0, 0]\n for i in range(self._num_rows):\n for j in range(self._num_cols):\n if self._board[i][j] == \"B\":\n counts[0] += 1\n elif self._board[i][j] == \"W\":\n counts[1] += 1\n return counts", "def find_blank(bd):\n count = 0\n for num in bd:\n if num == \" \":\n return count\n else:\n count += 1", "def nancnt_nb(a):\n out = np.empty(a.shape[1], dtype=np.float_)\n for col in range(a.shape[1]):\n out[col] = np.sum(~np.isnan(a[:, col]))\n return out", "def trim_zeros(array):\n multislice = []\n for i in range(array.ndim):\n sum_axes = tuple(j for j in range(array.ndim) if j is not i)\n edges = np.where(np.sum(array, axis=sum_axes) > 0)\n if edges[0].size == 0:\n return np.array([], dtype=array.dtype)\n low = edges[0][0]\n high = edges[0][-1]\n multislice.append(slice(low, high+1, 1))\n return array[tuple(multislice)]", "def observed_species(counts):\n return (counts!=0).sum()", "def __calc_new_status0(self, removed_cells):\n # iterate through all removed cells. move up cell down and remove empty\n # cols\n result = deepcopy(self.status)\n # if len(result) != 0:\n # colsLen = len(result[0])\n for i in removed_cells:\n for j in range(i[0], -1, -1):\n if j == 0:\n result[j][i[1]] = ''\n else:\n result[j][i[1]] = result[j - 1][i[1]]\n\n # rotate the matrix and find empty lines, remove them and rotate back\n result = [i for i in zip(*result) if set(i) != {''}]\n result = zip(*result)\n result = [list(i) for i in result]\n\n #fill empty line\n # for i in range(0, len(result)):\n # if len(result[i]) < colsLen:\n # for j in range(0, colsLen-len(result[i])):\n # result[i].append('')\n return result", "def check_empty_table(spark, df):\n return df.count()", "def get_empty_columns(\n dc_input: deepconsensus_pb2.DeepConsensusInput) -> List[int]:\n columns_to_remove = []\n for i in range(len(dc_input.subreads[0].bases)):\n all_internal_gaps = True\n for subread in dc_input.subreads:\n if subread.bases[i] != dc_constants.GAP_OR_PAD:\n all_internal_gaps = False\n break\n if all_internal_gaps:\n columns_to_remove.append(i)\n return columns_to_remove", "def columns_count(self):\n if self.value.count != 0:\n return len(self.value[0])\n else:\n return 0" ]
[ "0.6858056", "0.657635", "0.6126679", "0.56833136", "0.55564344", "0.5550837", "0.553804", "0.54982156", "0.54872674", "0.5481251", "0.5453243", "0.5403643", "0.5371395", "0.53526986", "0.5337087", "0.53238875", "0.5268101", "0.5231699", "0.5193867", "0.51901627", "0.51793355", "0.51591235", "0.5146519", "0.51456183", "0.5140205", "0.5139628", "0.51349884", "0.5131888", "0.5131441", "0.5129061" ]
0.7904562
0
Get the count matrix, trimming only genes, not barcodes.
def get_count_matrix_all_barcodes(self) -> sp.csr.csr_matrix: if self.is_trimmed: # Return the count matrix for selected barcodes and genes. trimmed_bc_matrix = self.data['matrix'].tocsc() trimmed_matrix = trimmed_bc_matrix[:, self.analyzed_gene_inds].tocsr() # Apply transformation to the count data. return self.transformation.transform(trimmed_matrix) else: logging.warning("Using full count matrix, without any trimming. " "Could be slow.") # Apply transformation to the count data. return self.transformation.transform(self.data['matrix'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_count_matrix(self) -> sp.csr.csr_matrix:\n\n if self.is_trimmed:\n\n # Return the count matrix for selected barcodes and genes.\n trimmed_bc_matrix = self.data['matrix'][self.analyzed_barcode_inds,\n :].tocsc()\n trimmed_matrix = trimmed_bc_matrix[:, self.analyzed_gene_inds].tocsr()\n\n # Apply transformation to the count data.\n return self.transformation.transform(trimmed_matrix)\n\n else:\n logging.warning(\"Using full count matrix, without any trimming. \"\n \"Could be slow.\")\n\n # Apply transformation to the count data.\n return self.transformation.transform(self.data['matrix'])", "def get_count_matrix_empties(self) -> sp.csr.csr_matrix:\n\n if self.is_trimmed:\n\n # Return the count matrix for selected barcodes and genes.\n trimmed_bc_matrix = self.data['matrix'][self.empty_barcode_inds,\n :].tocsc()\n trimmed_matrix = trimmed_bc_matrix[:, self.analyzed_gene_inds].tocsr()\n\n # Apply transformation to the count data.\n return self.transformation.transform(trimmed_matrix)\n\n else:\n logging.error(\"Trying to get empty count matrix without trimmed data.\")\n\n # Apply transformation to the count data.\n return self.transformation.transform(self.data['matrix'])", "def raw_counts(self):\n return np.array([[1, 2], [3, 4], [5, 6]])", "def raw_counts(self):\n return np.array([1, 2, 3])", "def raw_counts(self):\n return np.array([1, 2, 3])", "def counts_compressed(self):\n return [[i, int(count)] for (i, count) in enumerate(self.counts) if count > 0]", "def count_nucleotides(mat):\n\n final_counts = np.ones((4, mat.shape[1]))\n\n for i in range(len(mat[0, :])):\n cur_nucleotides = np.ones((4, 1))\n a_count = 0\n c_count = 0\n g_count = 0\n t_count = 0\n for j in range(len(mat[:, 0])):\n if mat[j, i] == 'A':\n a_count = a_count + 1\n elif mat[j, i] == 'C':\n c_count = c_count + 1\n elif mat[j, i] == 'G':\n g_count = g_count + 1\n elif mat[j, i] == 'T':\n t_count = t_count + 1\n cur_nucleotides = np.array([a_count, c_count, g_count, t_count])\n final_counts[:, i] = cur_nucleotides\n return final_counts", "def create_count_matrix(filename, output_dir):\n\n import os\n import json\n\n word_tag_output = \"tag_word_count.json\"\n bigram_matrix_name = \"bigram_count.json\"\n unigram_matrix_name = \"unigram_count.json\"\n trigram_matrix_name = \"trigram_count.json\"\n\n sub_dir = os.path.join(output_dir, \"count_matrix/\")\n if not os.path.exists(sub_dir):\n os.mkdir(sub_dir)\n\n word_tag_matrix = get_tag_word_matrix(filename)\n with open(sub_dir + word_tag_output, \"w\") as f:\n json.dump(word_tag_matrix, f)\n\n unigram_matrix = get_tag_n_gram(n=1, filename=filename)\n with open(sub_dir + unigram_matrix_name, \"w\") as f:\n json.dump(unigram_matrix, f)\n\n bigram_matrix = get_tag_n_gram(n=2, filename=filename)\n with open(sub_dir + bigram_matrix_name, \"w\") as f:\n json.dump(bigram_matrix, f)\n\n trigram_matrix = get_tag_n_gram(n=3, filename=filename)\n with open(sub_dir + trigram_matrix_name, \"w\") as f:\n json.dump(trigram_matrix, f)", "def get_counts(self):\n counts = [0, 0]\n for i in range(self._num_rows):\n for j in range(self._num_cols):\n if self._board[i][j] == \"B\":\n counts[0] += 1\n elif self._board[i][j] == \"W\":\n counts[1] += 1\n return counts", "def global_score_count(self):\n score_count_mat = np.ndarray((5,10))\n\n for n in range(0,5):\n score_count = [\n np.count_nonzero(self.__data[...,n*5:n*5+5] == score)\n for score\n in np.arange(10)+1\n ]\n\n score_count_mat[n,...] = score_count\n\n return score_count_mat", "def get_score_matrix(self) -> int:", "def count_pegs(self):\r\n count = 0\r\n\r\n for i in range(0, len(self.matrix)):\r\n for j in range(0, len(self.matrix[i])):\r\n if self.matrix[i][j] == \"1\":\r\n count += 1\r\n\r\n return count", "def count(self):\n return sum([self.bits[x][y] for x in range(self.n_rows)\n for y in range(self.n_columns)])", "def cellranger_counts(fname, genome=\"matrix\"):\n with tables.open_file(fname, \"r\") as f:\n try:\n group = f.get_node(f.root, genome)\n except tables.NoSuchNodeError:\n print(\"That genome does not exist in this file.\")\n return None\n gene_ids = getattr(group, \"features/id\").read()\n barcodes = getattr(group, \"barcodes\").read()\n data = getattr(group, \"data\").read()\n indices = getattr(group, \"indices\").read()\n indptr = getattr(group, \"indptr\").read()\n shape = getattr(group, \"shape\").read()\n\n matrix = sp_sparse.csc_matrix((data, indices, indptr), shape=shape)\n gene_ids = np.array([x.decode() for x in gene_ids])\n barcodes = np.array([x.decode().replace(\"-1\", \"\") for x in barcodes])\n\n return CellRangerCounts(matrix, gene_ids, barcodes)", "def count_matrix(datasets, labels):\n \n fn = lambda fd, axis: fd.shape[0]\n return fn_matrix(datasets, fn, axes = None, label = labels)", "def create_count_map(self) -> Dict[int, int]:\n res: Dict[int, int] = {}\n for sequence_data in self.model.values():\n sequence_data: NGramsSequence = cast(NGramsSequence, sequence_data)\n for count in sequence_data.next_count.values():\n count: int = cast(int, count)\n if count not in res:\n res[count] = 0\n res[count] += 1\n self.count_map = res\n logger.success('created count map')\n return res", "def expanded_counts_map(self):\n if self.hpx._ipix is None:\n return self.counts\n\n output = np.zeros(\n (self.counts.shape[0], self.hpx._maxpix), self.counts.dtype)\n for i in range(self.counts.shape[0]):\n output[i][self.hpx._ipix] = self.counts[i]\n return output", "def sparse_counts_map(self):\n if self.hpx._ipix is None:\n flatarray = self.data.flattern()\n else:\n flatarray = self.expanded_counts_map()\n nz = flatarray.nonzero()[0]\n data_out = flatarray[nz]\n return (nz, data_out)", "def count_matrix(pb_seq):\n assert_same_size(pb_seq)\n pb_count = numpy.zeros((len(pb_seq[0]), len(NAMES)))\n for seq in pb_seq:\n for idx, block in enumerate(seq):\n if block in NAMES:\n pb_count[idx, NAMES.index(block)] += 1.0\n elif block not in [\"Z\", \"z\"]:\n raise InvalidBlockError(block=block)\n return pb_count", "def getCounts(self):\n ret = [0]*len(self.numToLabel)\n for block in self.blocks:\n for label in block[1]: ret[label] += 1\n return ret", "def to_countvectors(self):\n if hasattr(self, \"ifp\"):\n df = self.to_dataframe()\n return to_countvectors(df)\n raise AttributeError(\"Please use the `run` method before\")", "def create_matrix(data, discrete, prop, cutoff, nfeatures):\n y = np.zeros(len(data))\n \n count = 0 \n for i in range (len(data)):\n if data[i][nfeatures+prop]>cutoff:\n y[i]=1\n count += 1\n else:\n y[i]=0\n \n if discrete==False:\n y[i]=data[i][nfeatures+prop]\n \n x = data[:, 0:nfeatures]\n \n \n print (\"Number of good designs \"+str(count)+\" out of total \"+str(len(y)))\n return x, y", "def reformat_countTable(\n self,analysis_id_I=None,sna2experimentID_I=None,\n sna2sns_I=None):\n if self.countTable: countTable = self.countTable[:];\n else: countTable = [];\n\n countTable_flat = self.reformat_countOrFPKMTable(\n countOrFPKMTable_I=countTable,\n analysis_id_I=analysis_id_I,\n sna2experimentID_I=sna2experimentID_I,\n sna2sns_I=sna2sns_I,\n count_or_FPKM = 'count');\n return countTable_flat;", "def get_marble_count(self):", "def get_roi_counts(self):\n counts = [[roi.counts for roi in group.rois] for group in self.roi_groups]\n return counts", "def calculate_2mer_freq(counts_file):\n count_matrix = dict()\n\n with open(counts_file, \"r\", newline=\"\") as handle:\n records = csv.reader(handle, delimiter=\"\\t\")\n next(records)\n for row in records:\n nuc1 = str(row[0])\n nuc2 = str(row[1])\n count = int(row[2])\n\n left = \"x{}\".format(nuc2)\n right = \"{}x\".format(nuc1)\n\n count_matrix.setdefault(nuc1, dict())[left] = count\n count_matrix.setdefault(nuc2, dict())[right] = count\n\n lines = \"\"\n header = \"\"\n for ref, d in count_matrix.items():\n lines += ref\n for other in sorted(d.keys()):\n lines += \"\\t\" + str(d[other])\n lines += \"\\n\"\n header = \"x\\t{}\\n\".format(\"\\t\".join(sorted(d.keys())))\n print(header + lines)", "def kmer_count(self,size):\n if size == 1:\n return ['A','T','C','G']\n else:\n result = []\n for seq in Analyze_DNA_Sequence.kmer_count(self,size-1):\n for base in ['A','T','C','G']:\n result.append(seq+base)\n return result", "def as_counts_array(counts):\n if isinstance(counts, (Mapping, MappingView)):\n return numpy.fromiter(counts.values(), dtype=int)\n if isinstance(counts, (GeneratorType, map, filter)):\n return numpy.fromiter(counts, dtype=int)\n return numpy.asarray(counts)", "def count():", "def test_expand_counts(self):\n c = array([2,0,1,2])\n self.assertEqual(expand_counts(c), array([0,0,2,3,3]))" ]
[ "0.7951889", "0.76719147", "0.65599716", "0.6110267", "0.6110267", "0.60370463", "0.6031134", "0.57888055", "0.5784263", "0.57638943", "0.56966496", "0.569527", "0.5555884", "0.555468", "0.55300796", "0.5523527", "0.5501671", "0.54715586", "0.5467564", "0.54530025", "0.5442834", "0.54399854", "0.5389863", "0.5347347", "0.5345584", "0.5338662", "0.52626693", "0.52571034", "0.5242332", "0.5173542" ]
0.80284107
0
Load a count matrix from an mtx directory from CellRanger's output.
def get_matrix_from_mtx(filedir: str) -> Dict[str, Union[sp.csr.csr_matrix, List[np.ndarray], np.ndarray]]: assert os.path.isdir(filedir), "The directory {filedir} is not accessible." # Read in the count matrix using scipy. count_matrix = io.mmread(os.path.join(filedir, 'matrix.mtx')).tocsr().transpose() # Read in gene names. gene_names = np.genfromtxt(fname=os.path.join(filedir, "genes.tsv"), delimiter="\t", skip_header=0, dtype='<U50')[:, 1].squeeze() # second column # Read in barcode names. barcodes = np.genfromtxt(fname=os.path.join(filedir, "barcodes.tsv"), delimiter="\t", skip_header=0, dtype='<U20') # Issue warnings if necessary, based on dimensions matching. if count_matrix.shape[1] != len(gene_names): logging.warning(f"Number of gene names in {filedir}/genes.tsv does not " f"match the number expected from the count matrix.") if count_matrix.shape[0] != len(barcodes): logging.warning(f"Number of barcodes in {filedir}/barcodes.tsv does not " f"match the number expected from the count matrix.") return {'matrix': count_matrix, 'gene_names': gene_names, 'barcodes': barcodes}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cellranger_counts(fname, genome=\"matrix\"):\n with tables.open_file(fname, \"r\") as f:\n try:\n group = f.get_node(f.root, genome)\n except tables.NoSuchNodeError:\n print(\"That genome does not exist in this file.\")\n return None\n gene_ids = getattr(group, \"features/id\").read()\n barcodes = getattr(group, \"barcodes\").read()\n data = getattr(group, \"data\").read()\n indices = getattr(group, \"indices\").read()\n indptr = getattr(group, \"indptr\").read()\n shape = getattr(group, \"shape\").read()\n\n matrix = sp_sparse.csc_matrix((data, indices, indptr), shape=shape)\n gene_ids = np.array([x.decode() for x in gene_ids])\n barcodes = np.array([x.decode().replace(\"-1\", \"\") for x in barcodes])\n\n return CellRangerCounts(matrix, gene_ids, barcodes)", "def init_matrix_count(self):\n if not self.corpus :\n print(\"Error! viterbi::transition_matrix\")\n return False\n\n file_corpus=open(self.corpus,'r')\n file_corpus_contents=file_corpus.readline().split()\n self.__corpus_contents=file_corpus_contents\n #initialize transition\n word1=file_corpus_contents[0]\n self.transition_matrix_count[\"^\"]={}\n self.transition_matrix_count[\"^\"][self.getTag(word1)]=1\n self.addEmissionEntry(word1)\n\n for word2 in file_corpus_contents[1:]:\n self.addTransitionEntry(word1,word2)\n self.addEmissionEntry(word2)\n word1=word2", "def loadmm(filepath):\n X = mmread(filepath)\n return fast_sparse_matrix(X)", "def create_count_matrix(filename, output_dir):\n\n import os\n import json\n\n word_tag_output = \"tag_word_count.json\"\n bigram_matrix_name = \"bigram_count.json\"\n unigram_matrix_name = \"unigram_count.json\"\n trigram_matrix_name = \"trigram_count.json\"\n\n sub_dir = os.path.join(output_dir, \"count_matrix/\")\n if not os.path.exists(sub_dir):\n os.mkdir(sub_dir)\n\n word_tag_matrix = get_tag_word_matrix(filename)\n with open(sub_dir + word_tag_output, \"w\") as f:\n json.dump(word_tag_matrix, f)\n\n unigram_matrix = get_tag_n_gram(n=1, filename=filename)\n with open(sub_dir + unigram_matrix_name, \"w\") as f:\n json.dump(unigram_matrix, f)\n\n bigram_matrix = get_tag_n_gram(n=2, filename=filename)\n with open(sub_dir + bigram_matrix_name, \"w\") as f:\n json.dump(bigram_matrix, f)\n\n trigram_matrix = get_tag_n_gram(n=3, filename=filename)\n with open(sub_dir + trigram_matrix_name, \"w\") as f:\n json.dump(trigram_matrix, f)", "def load(self, path):\n self.matrix = np.loadtxt(path, dtype=float)\n self.rank = len(self.matrix)", "def fileCounter(directory):", "def load_counts(filename, lengths=None, base=None):\n n = None\n if lengths is not None:\n n = lengths.sum()\n shape = (n, n)\n else:\n shape = None\n # This is the interaction count files\n dataframe = pd.read_csv(filename, sep=\"\\t\", comment=\"#\", header=None)\n row, col, data = dataframe.as_matrix().T\n\n # If there are NAs remove them\n mask = np.isnan(data)\n if np.any(mask):\n warnings.warn(\n \"NAs detected in %s. \"\n \"Removing NAs and replacing with 0.\" % filename)\n row = row[np.invert(mask)]\n col = col[np.invert(mask)]\n data = data[np.invert(mask)]\n\n # XXX We need to deal with the fact that we should not duplicate entries\n # for the diagonal.\n # XXX what if n doesn't exist?\n if base is not None:\n if base not in [0, 1]:\n raise ValueError(\"indices should start either at 0 or 1\")\n col -= base\n row -= base\n else:\n warnings.warn(\n \"Attempting to guess whether counts are 0 or 1 based\")\n\n if (col.min() >= 1 and row.min() >= 1) and \\\n ((n is None) or (col.max() == n)):\n # This is a hack to deal with the fact that sometimes, the files\n # are indexed at 1 and not 0\n\n col -= 1\n row -= 1\n\n if shape is None:\n n = max(col.max(), row.max()) + 1\n shape = (n, n)\n\n data = data.astype(float)\n counts = sparse.coo_matrix((data, (row, col)), shape=shape)\n return counts", "def fromfile(self, path):\n\t\tdata = filetools.read_data(path)\n\t\tprint \"File read: %i lines\" % len(data)\n\t\tself.build_matrix(data)", "def count(train_dir):\r\n path = train_dir\r\n count = 0\r\n for fn in os.listdir(path): #fn 表示的是文件名\r\n count = count + 1\r\n return count", "def _setcounter():\n fname = os.path.basename(camera.status.lastfile)\n tname = fname.split('.')[0]\n i = len(tname)-1\n if i > -1:\n while tname[i].isdigit() and i>-1:\n i = i - 1\n nname = fname[:-4]\n bname = tname[:i+1]\n for file in glob.glob('/data/counters/'+bname+'[0-9][0-9][0-9].cntr'):\n os.remove(file)\n for file in glob.glob('/data/counters/'+bname+'[0-9][0-9][0-9][0-9].cntr'):\n os.remove(file)\n f = open('/data/counters/'+nname+'cntr','w')\n f.close()", "def imagesMatrix(path,imageSize = 10304,byteorder = '>'):\n listing = os.listdir(path)\n listing.sort()\n count = 0\n docFiles = []\n for infile in listing:\n count = count + 1\n docFiles.append(infile)\n matrix = np.zeros((imageSize,count))\n for i in range(len(listing)):\n matrix[:,i]=np.asarray(read_pgm(join(path,listing[i]),byteorder)).reshape(-1)\n return matrix,listing", "def read_samples(self,dir):\n expected=[]\n xmatrix=[]\n for root,dirs,files in os.walk(dir):\n for file in files:\n with open(os.path.join(root,file),\"r\") as auto:\n expected.append(int(auto.readline().strip('\\n')))\n a=[]\n for line in auto:\n a.append([int(n) for n in line.strip('\\n').split(' ')])\n xmatrix.append(a)\n return np.asarray(xmatrix),expected", "def matrix_readin(suffix='full'):\n\n ## Define arrays for covariance matrices\n c2s = np.zeros([2, 2, n_bins, n_bins])\n c3s = np.zeros([2, 2, 2, n_bins, n_bins])\n c4s = np.zeros([2, 2, 2, 2, n_bins, n_bins])\n ## Normalization arrays for covariance matrices\n n2s = np.zeros([2, 2])\n n3s = np.zeros([2, 2, 2])\n n4s = np.zeros([2, 2, 2, 2])\n\n for ii in range(len(I1)):\n index4 = \"%d%d,%d%d\" % (I1[ii], I2[ii], I3[ii], I4[ii])\n index3 = \"%d,%d%d\" % (I2[ii], I1[ii], I3[ii])\n index2 = \"%d%d\" % (I1[ii], I2[ii])\n\n j1, j2, j3,j4=I1[ii]-1, I2[ii]-1, I3[ii]-1, I4[ii]-1 # internal indexing\n\n # Define input files\n file_root_all = os.path.join(file_root, 'CovMatricesAll/')\n\n if suffix=='full':\n counts_file = file_root_all + 'total_counts_n%d_m%d_%s.txt' % (n, m, index4)\n # Load total number of counts\n try:\n total_counts=np.loadtxt(counts_file)\n print(\"Reading in integral components for C_{%s}, which used %.2e pairs, %.2e triples and %.2e quads of particles\" % (index4,total_counts[0], total_counts[1], total_counts[2]))\n except (FileNotFoundError, IOError): pass\n else:\n pass\n #print(\"Reading in integral components for C_{%s}, iteration %s\"%(index4,suffix))\n\n # Load full integrals\n c2 = np.diag(np.loadtxt(file_root_all + 'c2_n%d_m%d_%s_%s.txt' % (n, m, index2, suffix))[skip_bins:])\n c3 = np.loadtxt(file_root_all + 'c3_n%d_m%d_%s_%s.txt' % (n, m, index3, suffix))[skip_bins:, skip_bins:]\n c4 = np.loadtxt(file_root_all + 'c4_n%d_m%d_%s_%s.txt' % (n, m, index4, suffix))[skip_bins:, skip_bins:]\n\n # Now save components\n c2s[j1, j2] += c2\n n2s[j1, j2] += 1\n c3s[j2, j1, j3] += c3\n n3s[j2, j1, j3] += 1\n # will deal with c4s/n4s later\n\n # c2 symmetry - indices interchanged, ensures matrix symmetry if they are equal\n c2s[j2, j1] += c2\n n2s[j2, j1] += 1\n\n # c3 symmetry - last two indices interchanged, ensures matrix symmetry if they are equal\n c3s[j2, j3, j1] += c3.T\n n3s[j2, j3, j1] += 1\n \n # All symmetries possible for c4 without transpositions\n permutations4 = ((j1, j2, j3, j4), # original\n (j2, j1, j3, j4), # first two indices interchanged\n (j1, j2, j4, j3), # last two indices interchanged\n (j2, j1, j4, j3), # first and last two indices interchanged at the same time\n )\n \n for (i1, i2, i3, i4) in permutations4:\n c4s[i1, i2, i3, i4] += c4\n n4s[i1, i2, i3, i4] += 1\n # now swap indices and transpose\n c4s[i3, i4, i1, i2] += c4.T\n n4s[i3, i4, i1, i2] += 1\n \n # normalize the covariances\n c2s /= n2s[:, :, None, None]\n c3s /= n3s[:, :, :, None, None]\n c4s /= n4s[:, :, :, :, None, None]\n\n def construct_fields(j1, j2, j3, j4, alpha1, alpha2):\n # Reconstruct the full field for given input fields and rescaling parameters\n\n # Create kronecker deltas\n d_xw = (j1 == j4)\n d_xz = (j1 == j3)\n d_yw = (j2 == j4)\n d_yz = (j2 == j3)\n\n full = c4s[j1, j2, j3, j4] + 0.25 * alpha1 * (d_xw * c3s[j1, j2, j3] + d_xz * c3s[j1, j2, j4]) + 0.25 * alpha2 * (d_yw * c3s[j2, j1, j3] + d_yz * c3s[j2, j1, j4]) + 0.5 * alpha1 * alpha2 * (d_xw * d_yz + d_xz * d_yw) * c2s[j1, j2]\n return full\n\n # Index in ordering (P_11,P_12,P_22)\n cov_indices = [[0, 0], [0, 1], [1, 1]]\n\n c_tot = np.zeros([3, 3, n_bins, n_bins]) # array with each individual covariance accessible\n c_comb = np.zeros([3*n_bins, 3*n_bins]) # full array suitable for inversion\n\n for j1 in range(3):\n ind1, ind2 = cov_indices[j1]\n alpha1, alpha2 = alpha_best[[ind1, ind2]]\n for j2 in range(3):\n ind3,ind4 = cov_indices[j2]\n tmp = construct_fields(ind1, ind2, ind3, ind4, alpha1, alpha2)\n c_tot[j1, j2] = tmp\n c_comb[j1*n_bins:(j1+1)*n_bins, j2*n_bins:(j2+1)*n_bins] = tmp\n\n return c_tot, 0.5*(c_comb+c_comb.T) # add all remaining symmetries", "def load_scores_ba_dir( dir ):\n return FileBinnedArrayDir( dir )", "def map_count(filename):\n f = open(filename, \"r+\")\n buf = mmap.mmap(f.fileno(), 0)\n lines = 0\n readline = buf.readline\n while readline():\n lines += 1\n return lines", "def updateCounts(self):\n found = False\n fileName = \"counts\"\n if not os.access(fileName, os.F_OK):\n try:\n TFH = open(fileName, \"w\")\n TFH.close()\n except IOError as inst: # @UnusedVariable\n self.logIt(__name__ + \".updateCounts(): Unable to open \" + fileName + \" for write.\" + \" => \" + str(\n inst.errno) + \":\" + str(inst.strerror) + \"\\n\")\n raise\n\n self.logIt(__name__ + \".updateCounts(): fileName=\" + fileName + \"\\n\")\n try:\n FH = open(fileName, \"rb+\")\n # FH = posixfile.open(fileName, \"rb+\") # posixfile has been deprecated.\n # FH.lock('w|')\n data = None\n while 1:\n data = str(FH.readline())\n if data is None or data == \"\": break\n data = re.sub(\"\\n\", \"\", data)\n self.debug(__name__ + \".updateCounts(): data is \" + str(data) + \"\\n\")\n ms = str(self.msgNum) + \"=\"\n self.debug(__name__ + \".updateCounts(): ms is\" + str(ms) + \"\\n\")\n if re.search(ms, data):\n found = True\n self.debug(__name__ + \".updateCounts(): DEBUG0.5\\n\")\n break\n self.debug(__name__ + \".updateCounts(): DEBUG1\\n\")\n if data and found:\n self.debug(__name__ + \".updateCounts(): DEBUG2\\n\")\n eloc = FH.tell()\n self.debug(__name__ + \".updateCounts(): eloc=\" + str(eloc) + \"\\n\")\n sloc = eloc - len(data) - 1\n self.debug(__name__ + \".updateCounts(): sloc=\" + str(sloc) + \"\\n\")\n FH.seek(sloc, os.SEEK_SET)\n cloc = FH.tell()\n self.debug(__name__ + \".updateCounts(): cloc=\" + str(cloc) + \"\\n\")\n myList = list()\n myList = data.split('=')\n icount = int(myList[1]) + 1\n FH.write(str(self.msgNum) + \"=\" + str(icount) + \"\\n\")\n else:\n self.debug(__name__ + \".updateCounts(): DEBUG3\\n\")\n FH.write(str(self.msgNum) + \"=1\" + \"\\n\")\n FH.lock('u')\n FH.close()\n except IOError as inst: # @UnusedVariable\n pass\n # self.logIt( __name__ + \".updateCounts(): Unable to open \" + fileName + \" for write.\" + \" => \" + str( inst.errno ) + \":\" + str( inst.strerror ) + \"\\n\" )\n # Endtry", "def read_matricies():\n\tmatrix_arr = []\n\tcurr_dirpath = os.getcwd()\n\tfor subdir, dirs, files in os.walk(curr_dirpath + '/Example Matrices'):\n\t\tfor curr_file in files:\n\t\t\tcurr_filepath = curr_dirpath + '/Example Matrices/' + curr_file\n\t\t\twith open(curr_filepath, 'r') as open_file:\n\t\t\t\tfor line in open_file:\n\t\t\t\t\tif len(line) > 0:\n\t\t\t\t\t\tcurr_matrix = np.matrix(line)\n\t\t\t\t\t\tmatrix_arr.append(curr_matrix)\n\treturn matrix_arr", "def build_matrix(path_screen, nmols, list_models):\r\n df = pd.DataFrame(columns=list_models, index=nmols)\r\n ntarget = os.path.split(path_screen)[1]\r\n df.index.name = ntarget\r\n df = df.fillna(0)\r\n for num_db in os.listdir(path_screen):\r\n for ff in os.listdir(os.path.join(path_screen, num_db)):\r\n if ff.split('.')[0] in list_models:\r\n pscreenfile = os.path.join(path_screen, num_db, ff)\r\n match_compounds = [int(mol.strip()) for mol in open(pscreenfile).readlines()]\r\n for compound in match_compounds:\r\n df.at[compound, ff.split('.')[0]] = 1\r\n df = df.fillna(0)\r\n return df", "def produce_mirna_single_counts_table(count_files, table_file):\n merge_count_tables(count_files, table_file, \".dedup.single.bam.mirbase_counts.txt\")", "def load_input(path):\n counts = defaultdict(int)\n if not os.path.exists(mode+'indices.p'):\n root = '/'.join(path.split('/')[0:-1])\n all_paths = [root+'/'+x for x in os.listdir(root)] #'/'.join(path.split('/')[0:-1]))\n else:\n all_paths = [path]\n \n for path in all_paths:\n print(path)\n with open(path) as f:\n if mode == 'word':\n words = tokenize(f.read())\n else:\n words = f.read()\n\n for word in words:\n counts[word] += 1 \n\n words = [x for x in words if len(x) > 0]\n return words, counts", "def load_data():\n\n training_files_dir = \"digits/trainingDigits\"\n training_files = os.listdir(training_files_dir)\n file_num = len(training_files)\n hw_labels = []\n\n training_mat = zeros((file_num, 32 * 32))\n for i in xrange(file_num):\n filename = training_files[i]\n file_label = int((filename.split(\".\")[0]).split(\"_\")[0])\n hw_labels.append(file_label)\n training_mat[i, :] = img2vector(training_files_dir + '/' + filename)\n\n return training_mat, hw_labels", "def count_labels(labels_path):\n counts = np.zeros(4)\n with open(labels_path, 'r') as f:\n for line in f:\n line = int(line.split()[1]) - 1\n counts[line] += 1\n\n return counts", "def load_initial_tec_num(matrix: list):\n print(\"Start loading...\")\n\n # delete all existed records\n models.TecCore.objects.all().delete()\n # row index\n index = len(matrix)\n for row in matrix[1:]:\n tec_id = row[0]\n common_part_name = row[1].strip()\n mgo = row[2].strip()\n\n t = models.TecCore(\n tec_id=tec_id,\n common_part_name=common_part_name.upper(), # upper case\n mgo_part_name_list=mgo.upper()\n )\n\n # save models\n t.save()\n\n # return loaded row number\n return index", "def gather_counts(directory):\n counts_un = defaultdict(int)\n counts_bi = defaultdict(int)\n counts_tri = defaultdict(int)\n prev_prev = \"<s>\"\n prev = \"<s>\"\n for filename in os.listdir(f\"./{directory}\"):\n if \".DS_Store\" in filename:\n continue\n with open(f\"./{directory}/{filename}\", \"r\") as f:\n for line in f:\n line = line.strip()\n if len(line) == 0:\n continue\n counts_un[line+\"\\n\"] += 1\n counts_bi[prev+\"\\n\"+line+\"\\n\"] += 1\n counts_tri[prev_prev+\"\\n\"+prev+\"\\n\"+line+\"\\n\"] += 1\n prev_prev = prev\n prev = line\n counts_un[\"</s>\\n\"] += 2\n counts_bi[\"</s>\\n</s>\\n\"] += 1\n counts_bi[prev+\"\\n\"+\"</s>\\n\"] += 1\n counts_tri[prev_prev+\"\\n\"+prev+\"\\n\" + \"</s>\\n\"] += 1\n counts_tri[prev+\"\\n</s>\\n</s>\\n\"] += 1\n return counts_un, counts_bi, counts_tri", "def read_file(path_file):\n with open(path_file, 'r') as f:\n L = f.readlines()\n if len(L[0]) == 9:\n #Y file\n matrix = np.zeros(len(L)-1)\n for index, l in enumerate(L):\n if index > 0:\n matrix[index-1] = 2*int(l.split(',')[1])-1\n elif len(L[0]) == 7:\n #X file\n matrix = np.chararray((len(L)-1,100))\n for index, l in enumerate(L):\n if index > 0:\n matrix[index-1,:] = list(l.split(',')[1][:-2])\n elif len(L[0]) > 100:\n #X_mat100 file\n matrix = np.zeros((len(L),100))\n for index, l in enumerate(L):\n matrix[index, :] = list(map(float, l.split(\" \")))\n else:\n assert('ERROR')\n return(matrix)", "def load_matrix(self, src_dir, key_word=\"funneled\"):\r\n X = []\r\n Y = []\r\n label = 0\r\n for root, dirs, files in os.walk(src_dir):\r\n if files != []:\r\n for file in files:\r\n if key_word in file:\r\n img = cv2.imread(os.path.join(root, file), cv2.IMREAD_GRAYSCALE)\r\n min_value = np.min(img)\r\n max_value = np.max(img)\r\n X.append((img.flatten() - min_value)/(max_value - min_value)) # Normalize the data to [0, 1]\r\n Y.append(label)\r\n label +=1\r\n \r\n return dict(X = np.asarray(X), \r\n Y = np.asarray(Y))", "def to_id_matrix(f):\n if path.isfile(\"data/\"+f+\".npy\"):\n return 0\n \n # Get a worker number to position the progress bar\n global idxQueue\n thr_idx = idxQueue.get()\n\n setproctitle(f\"RNANet statistics.py Worker {thr_idx+1} to_id_matrix({f})\")\n\n if not path.isfile(f\"{path_to_seq_data}/realigned/{f}_3d_only.stk\"):\n warn(f\"File not found: {path_to_seq_data}/realigned/{f}_3d_only.stk\")\n align = AlignIO.read(f\"{path_to_seq_data}/realigned/{f}_3d_only.stk\", \"stockholm\")\n names = [ x.id for x in align if '[' in x.id ]\n del align\n \n pbar = tqdm(total = len(names)*(len(names)-1)*0.5, position=thr_idx+1, desc=f\"Worker {thr_idx+1}: {f} idty matrix\", unit=\"comparisons\", leave=False)\n pbar.update(0)\n \n # Prepare the job\n process = subprocess.Popen(shlex.split(f\"esl-alipid --rna --noheader --informat stockholm {path_to_seq_data}/realigned/{f}_3d_only.stk\"), stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n id_matrix = np.zeros((len(names), len(names)))\n cnt = 0\n while not cnt or process.poll() is None:\n output = process.stdout.read()\n if output:\n lines = output.strip().split(b'\\n')\n for l in lines:\n cnt += 1\n line = l.split()\n s1 = line[0].decode('utf-8')\n s2 = line[1].decode('utf-8')\n score = line[2].decode('utf-8')\n id1 = names.index(s1)\n id2 = names.index(s2)\n id_matrix[id1, id2] = float(score)\n pbar.update(1)\n if cnt != len(names)*(len(names)-1)*0.5:\n warn(f\"{f} got {cnt} updates on {len(names)*(len(names)-1)*0.5}\")\n if process.poll() != 0:\n l = process.stderr.read().strip().split(b'\\n')\n warn(\"\\n\".join([ line.decode('utf-8') for line in l ]))\n pbar.close()\n\n np.save(\"data/\"+f+\".npy\", id_matrix)\n\n idxQueue.put(thr_idx) # replace the thread index in the queue\n setproctitle(f\"RNANet statistics.py Worker {thr_idx+1} finished\")\n return 0", "def load_confusion_matrix(self):\n self.add_mat = self.load_data_file('confusion_matrix/add.txt')\n self.sub_mat = self.load_data_file('confusion_matrix/sub.txt')\n self.del_mat = self.load_data_file('confusion_matrix/del.txt')\n self.rev_mat = self.load_data_file('confusion_matrix/rev.txt')\n print(\n \"[INFO] Load confusion matrix completed: \\n\" +\n \"[INFO] confusion matrix add_mat: \" + str(self.add_mat) + \"\\n\" +\n \"[INFO] confusion matrix sub_mat: \" + str(self.sub_mat) + \"\\n\" +\n \"[INFO] confusion matrix del_mat: \" + str(self.del_mat) + \"\\n\" +\n \"[INFO] confusion matrix rev_mat: \" + str(self.rev_mat))", "def loader(self, cfile):\n try:\n for i in open(cfile, 'r+').read().splitlines():\n print(i)\n self.RAM.append(i)\n return len(self.RAM)\n except:\n print(cfile + \" not found. System terminating...\")\n sys.exit()", "def read_countfiles(sample_pattern):\n # TODO(jsh): do we really need that assumption anymore?\n def get_sample(countfile):\n base = os.path.basename(countfile).split('.')[0]\n frame = pd.read_csv(countfile, sep='\\t', names=['variant', 'raw'])\n frame.raw = frame.raw.astype('int')\n sample = base.split('_')[0]\n if sample.startswith('t'):\n alts = list()\n for tube in ['a', 'b', 'c']:\n alias = tube + sample[1:]\n aliased = frame.copy()\n aliased['sample'] = alias\n alts.append(aliased)\n return pd.concat(alts, axis='index')\n else:\n frame['sample'] = sample\n return frame\n samples = [get_sample(countfile) for countfile in glob.glob(sample_pattern)]\n grid = pd.concat(samples, axis='index')\n grid.reset_index(drop=True, inplace=True)\n return grid" ]
[ "0.5822121", "0.5723406", "0.55659884", "0.55165875", "0.5495147", "0.5473045", "0.5368484", "0.52734065", "0.5208827", "0.51845884", "0.5170246", "0.51477873", "0.50873554", "0.50840425", "0.50825506", "0.50770015", "0.5061036", "0.50164026", "0.5006129", "0.500322", "0.5000912", "0.49752858", "0.49667957", "0.4942529", "0.49419224", "0.49215472", "0.49145326", "0.49144137", "0.4878073", "0.4872725" ]
0.6061012
0
Load a count matrix from an h5 file from CellRanger's output. The file needs to be a _raw_gene_bc_matrices_h5.h5 file. This function returns a dictionary that includes the count matrix, the gene names (which correspond to columns of the count matrix), and the barcodes (which correspond to rows of the count matrix). This function works for CellRanger v2 and v3 HDF5 formats.
def get_matrix_from_h5(filename: str) -> Dict[str, Union[sp.csr.csr_matrix, List[np.ndarray], np.ndarray]]: # try: with tables.open_file(filename, 'r') as f: # Initialize empty lists. gene_names = [] csc_list = [] barcodes = None # For CellRanger v2, each group in the table (other than root) # contains a genome, so walk through the groups to get data for each genome. # For v3, there is only the 'matrix' group for group in f.walk_groups(): try: # Read in data for this genome, and put it into a # scipy.sparse.csc.csc_matrix barcodes = getattr(group, 'barcodes').read() data = getattr(group, 'data').read() indices = getattr(group, 'indices').read() indptr = getattr(group, 'indptr').read() shape = getattr(group, 'shape').read() csc_list.append(sp.csc_matrix((data, indices, indptr), shape=shape)) # Code for v2 try: gene_names.extend(getattr(group, 'gene_names').read()) except tables.NoSuchNodeError: # This exists in case the file is CellRanger v3 pass # Code for v3 try: # Read in 'feature' information feature_group = f.get_node(group, 'features') feature_types = getattr(feature_group, 'feature_type').read() feature_names = getattr(feature_group, 'name').read() # The only 'feature' we want is 'Gene Expression' is_gene_expression = (feature_types == b'Gene Expression') gene_names.extend(feature_names[is_gene_expression]) # Excise other 'features' from the count matrix gene_feature_inds = np.where(is_gene_expression)[0] csc_list[-1] = csc_list[-1][gene_feature_inds, :] except tables.NoSuchNodeError: # This exists in case the file is CellRanger v2 pass except tables.NoSuchNodeError: # This exists to bypass the root node, which has no data. pass # Put the data from all genomes together (for v2 datasets). count_matrix = sp.vstack(csc_list, format='csc') count_matrix = count_matrix.transpose().tocsr() # Issue warnings if necessary, based on dimensions matching. if count_matrix.shape[1] != len(gene_names): logging.warning(f"Number of gene names in {filename} does not match " f"the number expected from the count matrix.") if count_matrix.shape[0] != len(barcodes): logging.warning(f"Number of barcodes in {filename} does not match " f"the number expected from the count matrix.") return {'matrix': count_matrix, 'gene_names': np.array(gene_names), 'barcodes': np.array(barcodes)} # In order to batch files, this exception is now caught in command_line.py # except IOError: # # # Cannot read input file. Terminate. # logging.error("Unable to open file '%s'" % filename) # sys.exit(IOError)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_matrix_to_h5(output_file: str,\n gene_names: np.ndarray,\n barcodes: np.ndarray,\n inferred_count_matrix: sp.csc.csc_matrix,\n cell_barcode_inds: Union[np.ndarray, None] = None,\n ambient_expression: Union[np.ndarray, None] = None,\n rho: Union[np.ndarray, None] = None,\n phi: Union[np.ndarray, None] = None,\n z: Union[np.ndarray, None] = None,\n d: Union[np.ndarray, None] = None,\n p: Union[np.ndarray, None] = None,\n loss: Union[Dict, None] = None) -> bool:\n\n assert isinstance(inferred_count_matrix,\n sp.csc_matrix), \"The count matrix must be csc_matrix \" \\\n \"format in order to write to HDF5.\"\n\n assert gene_names.size == inferred_count_matrix.shape[1], \\\n \"The number of gene names must match the number of columns in the count\" \\\n \"matrix.\"\n\n assert barcodes.size == inferred_count_matrix.shape[0], \\\n \"The number of barcodes must match the number of rows in the count\" \\\n \"matrix.\"\n\n # This reverses the role of rows and columns, to match CellRanger format.\n inferred_count_matrix = inferred_count_matrix.transpose().tocsc()\n\n # Write to output file.\n try:\n with tables.open_file(output_file, \"w\",\n title=\"Background-subtracted UMI counts\") as f:\n\n # Create the group where data will be stored.\n group = f.create_group(\"/\", \"background_removed\",\n \"Counts after background correction\")\n\n # Create arrays within that group for barcodes and gene_names.\n f.create_array(group, \"gene_names\", gene_names)\n f.create_array(group, \"genes\", np.arange(gene_names.size)) # For compatibility, added post PR\n f.create_array(group, \"barcodes\", barcodes)\n\n # Create arrays to store the count data.\n f.create_array(group, \"data\", inferred_count_matrix.data)\n f.create_array(group, \"indices\", inferred_count_matrix.indices)\n f.create_array(group, \"indptr\", inferred_count_matrix.indptr)\n f.create_array(group, \"shape\", inferred_count_matrix.shape)\n\n # Store background gene expression, barcode_inds, z, d, and p.\n if cell_barcode_inds is not None:\n f.create_array(group, \"barcode_indices_for_latents\",\n cell_barcode_inds)\n if ambient_expression is not None:\n f.create_array(group, \"ambient_expression\", ambient_expression)\n if z is not None:\n f.create_array(group, \"latent_gene_encoding\", z)\n if d is not None:\n f.create_array(group, \"latent_scale\", d)\n if p is not None:\n f.create_array(group, \"latent_cell_probability\", p)\n if rho is not None:\n f.create_array(group, \"contamination_fraction_params\", rho)\n if phi is not None:\n f.create_array(group, \"overdispersion_params\", phi)\n if loss is not None:\n f.create_array(group, \"training_elbo_per_epoch\",\n np.array(loss['train']['elbo']))\n\n logging.info(f\"Succeeded in writing output to file {output_file}\")\n\n return True\n\n except Exception:\n logging.warning(f\"Encountered an error writing output to file \"\n f\"{output_file}. \"\n \"Output may be incomplete.\")\n\n return False", "def cellranger_counts(fname, genome=\"matrix\"):\n with tables.open_file(fname, \"r\") as f:\n try:\n group = f.get_node(f.root, genome)\n except tables.NoSuchNodeError:\n print(\"That genome does not exist in this file.\")\n return None\n gene_ids = getattr(group, \"features/id\").read()\n barcodes = getattr(group, \"barcodes\").read()\n data = getattr(group, \"data\").read()\n indices = getattr(group, \"indices\").read()\n indptr = getattr(group, \"indptr\").read()\n shape = getattr(group, \"shape\").read()\n\n matrix = sp_sparse.csc_matrix((data, indices, indptr), shape=shape)\n gene_ids = np.array([x.decode() for x in gene_ids])\n barcodes = np.array([x.decode().replace(\"-1\", \"\") for x in barcodes])\n\n return CellRangerCounts(matrix, gene_ids, barcodes)", "def get_matrix_from_mtx(filedir: str) -> Dict[str,\n Union[sp.csr.csr_matrix,\n List[np.ndarray],\n np.ndarray]]:\n\n assert os.path.isdir(filedir), \"The directory {filedir} is not accessible.\"\n\n # Read in the count matrix using scipy.\n count_matrix = io.mmread(os.path.join(filedir,\n 'matrix.mtx')).tocsr().transpose()\n\n # Read in gene names.\n gene_names = np.genfromtxt(fname=os.path.join(filedir, \"genes.tsv\"),\n delimiter=\"\\t\", skip_header=0,\n dtype='<U50')[:, 1].squeeze() # second column\n\n # Read in barcode names.\n barcodes = np.genfromtxt(fname=os.path.join(filedir, \"barcodes.tsv\"),\n delimiter=\"\\t\", skip_header=0, dtype='<U20')\n\n # Issue warnings if necessary, based on dimensions matching.\n if count_matrix.shape[1] != len(gene_names):\n logging.warning(f\"Number of gene names in {filedir}/genes.tsv does not \"\n f\"match the number expected from the count matrix.\")\n if count_matrix.shape[0] != len(barcodes):\n logging.warning(f\"Number of barcodes in {filedir}/barcodes.tsv does not \"\n f\"match the number expected from the count matrix.\")\n\n return {'matrix': count_matrix,\n 'gene_names': gene_names,\n 'barcodes': barcodes}", "def read_counts_file(path):\n labels_filename = path\n with open(labels_filename, 'rb') as f:\n lines = f.read().decode()\n lines = lines.split('\\n')\n lines = filter(None, lines)\n\n labels_to_counts = {}\n for line in lines:\n index = line.index(':')\n labels_to_counts[line[:index]] = int(line[index+1:])\n return labels_to_counts", "def count_barcodes(metrics_file):\n\n barcodes = pd.read_csv(metrics_file, sep=\"\\t\", header=0, names=[\"barcode\", \"randomer\", \"count\"])\n return Counter(dict(barcodes.groupby(\"barcode\")['count'].sum().iteritems()))", "def get_count_matrix_all_barcodes(self) -> sp.csr.csr_matrix:\n\n if self.is_trimmed:\n\n # Return the count matrix for selected barcodes and genes.\n trimmed_bc_matrix = self.data['matrix'].tocsc()\n trimmed_matrix = trimmed_bc_matrix[:, self.analyzed_gene_inds].tocsr()\n\n # Apply transformation to the count data.\n return self.transformation.transform(trimmed_matrix)\n\n else:\n logging.warning(\"Using full count matrix, without any trimming. \"\n \"Could be slow.\")\n\n # Apply transformation to the count data.\n return self.transformation.transform(self.data['matrix'])", "def block_level_distribution_file( file ):\n import h5py\n import numpy as np\n\n # open the h5 wabbit file\n fid = h5py.File(file,'r')\n\n # read treecode table\n b = fid['block_treecode'][:]\n treecode = np.array(b, dtype=float)\n\n # close file\n fid.close()\n\n # number of blocks\n Nb = treecode.shape[0]\n\n # min/max level. required to allocate list!\n jmin, jmax = get_max_min_level( treecode )\n counter = np.zeros(jmax+1)\n\n # fetch level for each block and count\n for i in range(Nb):\n J = treecode_level(treecode[i,:])\n counter[J] += 1\n\n return counter", "def load_h5(fname: str, path: str='/') -> dict:\n try:\n with h5py.File(fname, 'r') as f:\n dataMap = recursively_load_dict_contents_from_group(f, path)\n except IOError as e:\n print(f\"Cannot open HDF5 file {fname}\")\n print(f\"IOError: {e}\")\n\n return dataMap", "def calculate_2mer_freq(counts_file):\n count_matrix = dict()\n\n with open(counts_file, \"r\", newline=\"\") as handle:\n records = csv.reader(handle, delimiter=\"\\t\")\n next(records)\n for row in records:\n nuc1 = str(row[0])\n nuc2 = str(row[1])\n count = int(row[2])\n\n left = \"x{}\".format(nuc2)\n right = \"{}x\".format(nuc1)\n\n count_matrix.setdefault(nuc1, dict())[left] = count\n count_matrix.setdefault(nuc2, dict())[right] = count\n\n lines = \"\"\n header = \"\"\n for ref, d in count_matrix.items():\n lines += ref\n for other in sorted(d.keys()):\n lines += \"\\t\" + str(d[other])\n lines += \"\\n\"\n header = \"x\\t{}\\n\".format(\"\\t\".join(sorted(d.keys())))\n print(header + lines)", "def create_count_matrix(filename, output_dir):\n\n import os\n import json\n\n word_tag_output = \"tag_word_count.json\"\n bigram_matrix_name = \"bigram_count.json\"\n unigram_matrix_name = \"unigram_count.json\"\n trigram_matrix_name = \"trigram_count.json\"\n\n sub_dir = os.path.join(output_dir, \"count_matrix/\")\n if not os.path.exists(sub_dir):\n os.mkdir(sub_dir)\n\n word_tag_matrix = get_tag_word_matrix(filename)\n with open(sub_dir + word_tag_output, \"w\") as f:\n json.dump(word_tag_matrix, f)\n\n unigram_matrix = get_tag_n_gram(n=1, filename=filename)\n with open(sub_dir + unigram_matrix_name, \"w\") as f:\n json.dump(unigram_matrix, f)\n\n bigram_matrix = get_tag_n_gram(n=2, filename=filename)\n with open(sub_dir + bigram_matrix_name, \"w\") as f:\n json.dump(bigram_matrix, f)\n\n trigram_matrix = get_tag_n_gram(n=3, filename=filename)\n with open(sub_dir + trigram_matrix_name, \"w\") as f:\n json.dump(trigram_matrix, f)", "def parseGroupsFileToDictOfCounts(groups_file):\n return parseGroupsFileToDict(groups_file, \"counts\")", "def generate_transition_bigram_counts(file_path):\r\n\ttransition_bigram_counts = dict()\r\n\twith open(file_path) as f:\r\n\t\tline_count = 0\r\n\t\tfor line in f:\r\n\t\t\tline_count+=1\r\n\t\t\tif line_count%3 != 0:\r\n\t\t\t\tcontinue\r\n\t\t\ttag_set = [\"<START>\"] + line.lower().split()\r\n\t\t\ti = 1\r\n\t\t\twhile(i<len(tag_set)):\r\n\t\t\t\ttag_tuple = (tag_set[i-1], tag_set[i])\r\n\t\t\t\tif tag_tuple not in transition_bigram_counts:\r\n\t\t\t\t\ttransition_bigram_counts[tag_tuple] = 1\r\n\t\t\t\telse:\r\n\t\t\t\t\ttransition_bigram_counts[tag_tuple]+=1\r\n\t\t\t\ti+=1\r\n\treturn transition_bigram_counts", "def loadh5(fname, path='/data'):\n fp = open_read(fname)\n slab = fp.get_node(path)\n mat = slab.read()\n fp.close()\n return mat", "def load_counts(filename, lengths=None, base=None):\n n = None\n if lengths is not None:\n n = lengths.sum()\n shape = (n, n)\n else:\n shape = None\n # This is the interaction count files\n dataframe = pd.read_csv(filename, sep=\"\\t\", comment=\"#\", header=None)\n row, col, data = dataframe.as_matrix().T\n\n # If there are NAs remove them\n mask = np.isnan(data)\n if np.any(mask):\n warnings.warn(\n \"NAs detected in %s. \"\n \"Removing NAs and replacing with 0.\" % filename)\n row = row[np.invert(mask)]\n col = col[np.invert(mask)]\n data = data[np.invert(mask)]\n\n # XXX We need to deal with the fact that we should not duplicate entries\n # for the diagonal.\n # XXX what if n doesn't exist?\n if base is not None:\n if base not in [0, 1]:\n raise ValueError(\"indices should start either at 0 or 1\")\n col -= base\n row -= base\n else:\n warnings.warn(\n \"Attempting to guess whether counts are 0 or 1 based\")\n\n if (col.min() >= 1 and row.min() >= 1) and \\\n ((n is None) or (col.max() == n)):\n # This is a hack to deal with the fact that sometimes, the files\n # are indexed at 1 and not 0\n\n col -= 1\n row -= 1\n\n if shape is None:\n n = max(col.max(), row.max()) + 1\n shape = (n, n)\n\n data = data.astype(float)\n counts = sparse.coo_matrix((data, (row, col)), shape=shape)\n return counts", "def get_frame():\n\tall_frames = {}\n\tkeys = get_utt()\n\tfor i, matrix_id in enumerate(mfcc_h5.read(keys)):\t\n\t\tmatrix = np.asarray(matrix_id)\t\n\t\tall_frames[keys[i]] = matrix \n\t\n\treturn all_frames", "def _load_mock_bgs_mxxl_file_hdf5(filename):\n f = h5py.File(filename)\n ra = f[\"Data/ra\"][...].astype('f8') % 360.0\n dec = f[\"Data/dec\"][...].astype('f8')\n SDSSr_true = f[\"Data/app_mag\"][...].astype('f8')\n zred = f[\"Data/z_obs\"][...].astype('f8')\n f.close()\n\n return {'RA':ra, 'DEC':dec, 'Z': zred ,\n 'SDSSr_true':SDSSr_true}", "def read_h5(self):\n infile = h5py.File(self.inf_name,'r')\n\n vardict = self.labdict\n #store data with the correct labels\n for k in infile['plasma/1d'].keys():\n try:\n vardict[k] = infile[self.labdict[k]].value\n except:\n vardict[k] = []\n\n vardict['a_ions']=infile['/plasma/anum'].value\n vardict['znum']=infile['/plasma/znum'].value\n \n\n self.rho_in = vardict['rho']\n self._rho_vol = infile['distributions/rhoDist/abscissae/dim1'].value[1:]\n self._volumes = infile['distributions/rhoDist/shellVolume'].value\n self.nrho_in = np.size(self.rho_in)\n\n if vardict['a_ions'][0]!='/':\n self.nspec = len(vardict['a_ions'])\n else:\n self.nspec = vardict['ni'].shape[1]\n print(\"Number of ions: \", self.nspec)\n if len(vardict['a_ions'])!=len(vardict['znum']):\n print(\"ERROR! array of A and Z don't have the same length\")\n\n self.A = vardict['a_ions']\n self.Z = vardict['znum']\n self.nion = self.nspec\n \n self.te_in = vardict['te'][:]\n self.ne_in = vardict['ne'][:]\n self.ti_in = vardict['ti'][:]\n ni1_in = vardict['ni'][:,0]\n self.ni_in = np.zeros((self.nion, self.nrho_in),dtype=float)\n self.ni_in[0,:] = ni1_in\n if self.nion==2:\n ni2_in = vardict['ni'][:,1]\n self.ni_in[1,:] = ni2_in\n elif self.nion==3:\n ni2_in = vardict['ni'][:,1]\n ni3_in = vardict['ni'][:,2]\n self.ni_in[1,:] = ni2_in\n self.ni_in[2,:] = ni3_in\n\n try:\n self.vt_in = vardict['vtor']\n except:\n self.vt_in = np.zeros(self.nrho_in,dtype=float)\n\n try:\n self.zeff_in = vardict['zeff'][:]\n except:\n self.zeff_in = np.zeros(self.nrho_in,dtype=float)\n\n self.ni = np.zeros((self.nion, self.nrho),dtype = float)\n self.spline()", "def get_all_counts(filename):\r\n column_keys, get_data = get_csv(filename)\r\n all_counts_dict = {}\r\n for key in column_keys[1:]:\r\n all_counts_dict[key] = {}\r\n\r\n for i,(k,v) in enumerate(get_data()):\r\n for key in column_keys[1:]:\r\n column = column_keys[1:].index(key)\r\n x = v[column]\r\n all_counts_dict[key][x] = all_counts_dict[key].get(x, 0) + 1\r\n return all_counts_dict", "def load_h5(fname, surfmap=True):\n filenames = glob.glob(fname)\n print(\"Files found: {}\".format(filenames))\n fin = h5py.File(filenames[0])\n meas = fin['measurement0'] # Wavefront data located in 'measurement0'\n opdsets = meas['genraw']\n wvl = opdsets.attrs['wavelength'][:]\n wvl = float(wvl[:-3])\n # Get the x pixel spacing\n try:\n iscale = float(opdsets.attrs['xpix'][:-3])\n except TypeError:\n iscale = 0.0\n print(\"No Calibration Dimensioning Found in H5 file\")\n # Return either surface map or fringe map\n if surfmap is True:\n data = np.asarray(opdsets['data'])\n data[data > 1e10] = np.nan # Eliminates \"bad\" data sets to NAN\n data *= wvl * mask_data(filenames[0])\n else:\n data = np.asarray(meas['reserve_interferogram']['frame4']['data'])\n return data, wvl, iscale", "def count_entries2(csv_file,c_size,colname):\n \n # Initialize an empty dictionary: counts_dict\n counts_dict = {}\n\n # Iterate over the file chunk by chunk\n for chunk in pd.read_csv(csv_file, chunksize=c_size):\n\n # Iterate over the column in DataFrame\n for entry in chunk[colname]:\n if entry in counts_dict.keys():\n counts_dict[entry] += 1\n else:\n counts_dict[entry] = 1\n\n # Return counts_dict\n return counts_dict", "def make_dict_database(file_name):\n f = h5py.File(file_name,'r')\n\n num_games = len(f.keys())\n db = {'input' : np.zeros((num_games, 181)), 'output' : np.zeros((num_games, 2))}\n for i, g in enumerate(f.keys()):\n db['input'][i] = f[g]['input'][:]\n db['output'][i] = f[g]['win_probs'][:]\n \n return db", "def parse_gcov_file(gcov_file):\n count = {}\n with open(gcov_file) as fh:\n for line in fh:\n tag, value = line.split(':')\n if tag == 'file':\n src_file = value.rstrip()\n elif tag == 'lcount':\n line_num, exec_count = value.split(',')\n count[int(line_num)] = int(exec_count)\n\n return src_file, count", "def parse_mixcr_table(filepath):\n tbl = pd.read_csv(filepath, sep='\\t')\n out = {}\n for _, row in tbl.iterrows():\n motifs = get_binding_motifs(row['aaSeqImputedCDR3'])\n for kind, motif_counts in motifs.items():\n for motif, count in motif_counts.items():\n for mykind in [kind, 'all_types']:\n key = (mykind, motif)\n if key not in out:\n out[key] = {\n 'num_unique_seqs': 0,\n 'num_clones': 0,\n 'num_unique_occurences': 0,\n 'num_clonal_occurences': 0,\n }\n out[key]['num_unique_seqs'] += 1\n out[key]['num_clones'] += row['cloneCount']\n out[key]['num_unique_occurences'] += count\n out[key]['num_clonal_occurences'] += count * row['cloneCount']\n return out", "def open_h5meta(filepath):\n data = dict()\n h5meta_content = read_h5meta(filepath)\n for file in h5meta_content[\"filelist\"]:\n data[file] = read_detector_data(file)\n\n return data", "def parse_histograms(filepath):\n with open(filepath, 'r') as f:\n lines = f.readlines()\n # Define regular expressions matching (sub-)headers and data lines\n th1f_exp = re.compile(r'^TH1F\\|.+')\n header_exp = re.compile(r'^TH1F\\|(.+?)\\|B(?:R|F)/U2(.+?)\\|.+? mass \\(GeV\\)\\|?')\n subheader_exp = re.compile(r'^\\s*?(\\d+?),\\s*(\\d+?\\.\\d+?),\\s*(\\d+\\.\\d+)\\s*$')\n data_exp = re.compile(r'^\\s*(\\d+?)\\s*,\\s*(\\d+\\.\\d+)\\s*$')\n # Locate beginning of each histogram\n header_line_idx = [i for i in range(len(lines)) if th1f_exp.match(lines[i]) is not None]\n # Iterate over histograms\n histograms = {}\n for offset in header_line_idx:\n # Parse header\n mh = header_exp.match(lines[offset])\n if mh is None or len(mh.groups()) != 2:\n raise ValueError(\"Malformed header encountered: {0}\".format(lines[offset]))\n decay_code = mh.group(1)\n # Parse sub-header (min/max mass and number of points)\n ms = subheader_exp.match(lines[offset+1])\n if ms is None or len(ms.groups()) != 3:\n raise ValueError(\"Malformed sub-header encountered: {0}\".format(lines[offset+1]))\n npoints = int(ms.group(1))\n min_mass = float(ms.group(2))\n max_mass = float(ms.group(3))\n masses = np.linspace(min_mass, max_mass, npoints, endpoint=False)\n branching_ratios = np.zeros(npoints)\n # Now read the data lines (skipping the two header lines)\n for line in lines[offset+2:offset+npoints+1]:\n md = data_exp.match(line)\n if md is None or len(md.groups()) != 2:\n raise ValueError(\"Malformed data row encountered: {0}\".format(line))\n idx = int(md.group(1))\n br = float(md.group(2))\n branching_ratios[idx] = br\n histograms[decay_code] = (masses, branching_ratios)\n return histograms", "def load_read_count(pbi_file):\n\n # Decode PacBio .pbi file. This is not a full decode of the index, only the parts we need\n # until we get to the read count.\n # More on index format at https://pacbiofileformats.readthedocs.io/en/9.0/PacBioBamIndex.html .\n\n fmt = Struct(\n # Header\n \"magic\" / Const(b\"PBI\\x01\"),\n \"version_patch\" / Int8ul,\n \"version_minor\" / Int8ul,\n \"version_major\" / Int8ul,\n \"version_empty\" / Int8ul,\n \"pbi_flags\" / Int16ul,\n \"n_reads\" / Int32ul,\n )\n\n with gzip.open(pbi_file, \"rb\") as f:\n idx_contents = fmt.parse_stream(f)\n\n return idx_contents.n_reads", "def load_rawdata_mobilegaitlab(datafile):\n itype = np.int16\n rawdata = np.fromfile(datafile, dtype=[\n (\"counter\", np.int32),\n (\"ax\", itype),\n (\"ay\", itype),\n (\"az\", itype),\n (\"rx\", itype),\n (\"ry\", itype),\n (\"rz\", itype)])\n data_dict = {key: np.array(rawdata[key], dtype=float)\n for key in rawdata.dtype.fields}\n N = len(data_dict[\"counter\"])\n data_dict[\"dt\"] = 0.01*np.ones(N, dtype=float)\n return data_dict", "def load_data(file_path):\n with h5py.File(file_path) as f:\n # load meta info\n fs, channels, p_names, signals = _get_info(f)\n\n # load raw data\n data = [f['protocol{}/raw_data'.format(k + 1)][:] for k in range(len(p_names))]\n df = pd.DataFrame(np.concatenate(data), columns=channels)\n\n # load signals data\n signals_data = [f['protocol{}/signals_data'.format(k + 1)][:] for k in range(len(p_names))]\n df_signals = pd.DataFrame(np.concatenate(signals_data), columns=['signal_'+s for s in signals])\n df = pd.concat([df, df_signals], axis=1)\n\n # load timestamps\n if 'timestamp' in df:\n timestamp_data = [f['protocol{}/timestamp_data'.format(k + 1)][:] for k in range(len(p_names))]\n df['timestamps'] = np.concatenate(timestamp_data)\n\n # events data\n events_data = [f['protocol{}/mark_data'.format(k + 1)][:] for k in range(len(p_names))]\n df['events'] = np.concatenate(events_data)\n\n # set block names and numbers\n df['block_name'] = np.concatenate([[p]*len(d) for p, d in zip(p_names, data)])\n df['block_number'] = np.concatenate([[j + 1]*len(d) for j, d in enumerate(data)])\n return df, fs, channels, p_names", "def read_scoring_matrix(filename):\n scoring_dict = {}\n scoring_file = urllib2.urlopen(filename)\n ykeys = scoring_file.readline()\n ykeychars = ykeys.split()\n for line in scoring_file.readlines():\n vals = line.split()\n xkey = vals.pop(0)\n scoring_dict[xkey] = {}\n for ykey, val in zip(ykeychars, vals):\n scoring_dict[xkey][ykey] = int(val)\n return scoring_dict", "def countDataSize(self,filename):\n \n try:\n d = h5py.File(filename,'r')\n except:\n print(filename)\n return \n\n N = 0\n scan_edges = d['level2/Statistics/scan_edges'][:]\n for (start,end) in scan_edges:\n N += (end-start)//self.offsetLen * self.offsetLen\n d.close()\n\n N = N*self.Nfeeds\n\n self.chunks += [[int(self.Nsamples), int(self.Nsamples+N)]]\n self.datasizes += [int(N/self.Nfeeds)]\n self.Nsamples += int(N)" ]
[ "0.6468086", "0.62510335", "0.56875813", "0.5545488", "0.5397619", "0.5346175", "0.5329193", "0.53139323", "0.5265356", "0.5219594", "0.5218226", "0.51863855", "0.5039143", "0.50300133", "0.5007558", "0.49996474", "0.49768156", "0.49536636", "0.49511558", "0.49158347", "0.49119967", "0.48623988", "0.48438975", "0.48350677", "0.48040968", "0.4800592", "0.47961453", "0.4794854", "0.478669", "0.47837368" ]
0.6791512
0
Write count matrix data to output HDF5 file using CellRanger format.
def write_matrix_to_h5(output_file: str, gene_names: np.ndarray, barcodes: np.ndarray, inferred_count_matrix: sp.csc.csc_matrix, cell_barcode_inds: Union[np.ndarray, None] = None, ambient_expression: Union[np.ndarray, None] = None, rho: Union[np.ndarray, None] = None, phi: Union[np.ndarray, None] = None, z: Union[np.ndarray, None] = None, d: Union[np.ndarray, None] = None, p: Union[np.ndarray, None] = None, loss: Union[Dict, None] = None) -> bool: assert isinstance(inferred_count_matrix, sp.csc_matrix), "The count matrix must be csc_matrix " \ "format in order to write to HDF5." assert gene_names.size == inferred_count_matrix.shape[1], \ "The number of gene names must match the number of columns in the count" \ "matrix." assert barcodes.size == inferred_count_matrix.shape[0], \ "The number of barcodes must match the number of rows in the count" \ "matrix." # This reverses the role of rows and columns, to match CellRanger format. inferred_count_matrix = inferred_count_matrix.transpose().tocsc() # Write to output file. try: with tables.open_file(output_file, "w", title="Background-subtracted UMI counts") as f: # Create the group where data will be stored. group = f.create_group("/", "background_removed", "Counts after background correction") # Create arrays within that group for barcodes and gene_names. f.create_array(group, "gene_names", gene_names) f.create_array(group, "genes", np.arange(gene_names.size)) # For compatibility, added post PR f.create_array(group, "barcodes", barcodes) # Create arrays to store the count data. f.create_array(group, "data", inferred_count_matrix.data) f.create_array(group, "indices", inferred_count_matrix.indices) f.create_array(group, "indptr", inferred_count_matrix.indptr) f.create_array(group, "shape", inferred_count_matrix.shape) # Store background gene expression, barcode_inds, z, d, and p. if cell_barcode_inds is not None: f.create_array(group, "barcode_indices_for_latents", cell_barcode_inds) if ambient_expression is not None: f.create_array(group, "ambient_expression", ambient_expression) if z is not None: f.create_array(group, "latent_gene_encoding", z) if d is not None: f.create_array(group, "latent_scale", d) if p is not None: f.create_array(group, "latent_cell_probability", p) if rho is not None: f.create_array(group, "contamination_fraction_params", rho) if phi is not None: f.create_array(group, "overdispersion_params", phi) if loss is not None: f.create_array(group, "training_elbo_per_epoch", np.array(loss['train']['elbo'])) logging.info(f"Succeeded in writing output to file {output_file}") return True except Exception: logging.warning(f"Encountered an error writing output to file " f"{output_file}. " "Output may be incomplete.") return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_count_matrix(pb_count, outfile, first=1):\n # write the header (PB names)\n print(\" \" + \"\".join([\"%6s\" % name for name in NAMES]), file=outfile)\n # write the data table\n for residue_idx, residue_pb in enumerate(pb_count):\n print(\"%-5d\" % (residue_idx + first) +\n \" \".join(\"%5d\" % i for i in residue_pb), file=outfile)", "def write_counts(self):\n\n self.db_file.write(\n struct.pack(\"<HH\", self.number_of_entries, 0x0040))\n self.db_file.write(\n struct.pack(\"<HH\", self.number_of_genres, 0x0010))\n self.db_file.write(\n struct.pack(\"<HH\", self.number_of_performers, 0x0010))\n self.db_file.write(\n struct.pack(\"<HH\", self.number_of_albums, 0x0010))\n self.db_file.write(\n struct.pack(\"<HH\", self.number_of_playlists, 0x0010))\n self.db_file.write(\n struct.pack(\"<HH\", 0x0001, 0x0014))\n\n self.db_file.write(\n b\"\\x01\\x00\\x02\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x02\\x00\\x00\\x00\\x00\\x00\")\n self.db_file.write(\n b\"\\x00\\x00\\x06\\x00\\x04\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\")", "def save_as_hdf5(self, filename):", "def write_to_file(self, time):\n if Parameters.instance().use_ages:\n nb_age_groups = len(Parameters.instance().age_proportions)\n else:\n nb_age_groups = 1\n if Parameters.instance().use_ages:\n if self.spatial_output: # Separate output line for each cell\n for cell in self.population.cells:\n for age_i in range(0, nb_age_groups):\n data = {s: 0 for s in list(InfectionStatus)}\n for inf_status in data:\n data_per_inf_status =\\\n cell.compartment_counter.retrieve()[inf_status]\n data[inf_status] += data_per_inf_status[age_i]\n # Age groups are numbered from 1 to the total number\n # of age groups (thus the +1):\n data[\"age_group\"] = age_i+1\n data[\"time\"] = time\n data[\"cell\"] = cell.id\n data[\"location_x\"] = cell.location[0]\n data[\"location_y\"] = cell.location[1]\n self.writer.write(data)\n else: # Summed output across all cells in population\n data = {s: 0 for s in list(InfectionStatus)}\n for cell in self.population.cells:\n for age_i in range(0, nb_age_groups):\n for inf_status in list(InfectionStatus):\n data_per_inf_status =\\\n cell.compartment_counter.retrieve()[inf_status]\n data[inf_status] += data_per_inf_status[age_i]\n data[\"age_group\"] = age_i+1\n data[\"time\"] = time\n self.writer.write(data)\n else: # If age not considered, age_group not written in csv\n if self.spatial_output: # Separate output line for each cell\n for cell in self.population.cells:\n data = {s: 0 for s in list(InfectionStatus)}\n for k in data:\n data[k] += sum(cell.compartment_counter.retrieve()[k])\n data[\"time\"] = time\n data[\"cell\"] = cell.id\n data[\"location_x\"] = cell.location[0]\n data[\"location_y\"] = cell.location[1]\n self.writer.write(data)\n else: # Summed output across all cells in population\n data = {s: 0 for s in list(InfectionStatus)}\n for cell in self.population.cells:\n for k in data:\n # Sum across age compartments\n data[k] += sum(cell.compartment_counter.retrieve()[k])\n data[\"time\"] = time\n self.writer.write(data)", "def writeH5Dataset( self, foldername, time, nameConvention = \"grid\" ):\n filename = \"{0}/{1}_{2:06}.h5\".format(foldername,nameConvention,time)\n file = h5py.File(filename,'w',driver='mpio',comm=self.global_comm)\n dset = file.create_dataset(\"dset\",self._layout.fullShape, dtype = self._f.dtype)\n slices = tuple([slice(s,e) for s,e in zip(self._layout.starts,self._layout.ends)])\n dset[slices]=self._f[:]\n attr_data = np.array(self._layout.dims_order)\n dset.attrs.create(\"Layout\", attr_data, (self._nDims,), h5py.h5t.STD_I32BE)\n file.close()", "def outputBigMatrix(cellNames, results, outFname, isGene=False):\n logging.info(\"Writing data to file %s\" % outFname)\n ofh = open(outFname, \"w\")\n # write header\n if isGene:\n ofh.write(\"#gene\\t%s\\n\" % \"\\t\".join(cellNames))\n else:\n ofh.write(\"#transcript\\t%s\\n\" % \"\\t\".join(cellNames))\n \n # create a sorted list of all transcript names\n logging.info(\"Getting transcript IDs\")\n allTrans = set()\n for res in results:\n allTrans.update(res)\n allTrans = list(allTrans)\n allTrans.sort()\n\n # write out matrix\n logging.info(\"Iterating over transcript IDs and writing to tab file\")\n for trans in allTrans:\n ofh.write(\"%s\\t\" % trans)\n row = []\n for countDict in results:\n row.append(str(countDict.get(trans, 0)))\n ofh.write(\"\\t\".join(row))\n ofh.write(\"\\n\")\n ofh.close()\n\n # also output as a binary file for now\n # it's a lot easier and faster to parse, at least for python scripts\n # can be read from python with a single line:\n # matrix = marshal.load(open(\"data.tab.marshal\"))\n # matrix is then a nested hash: cellName -> transcript -> count\n binPath = outFname+\".marshal\"\n logging.info(\"Writing %s\" % binPath)\n allData = {}\n for name, transDict in zip(cellNames, results):\n allData[name] = transDict\n marshal.dump(allData, open(binPath, \"wb\"))", "def write_file(self):\n if self.it_num % 5 == 0:\n #plt.imshow(self.grid)\n #plt.savefig(\"output%.4d.png\" % self.it_num, bbox_inches='tight')\n io.savemat(\"MLOutput%.4d\" % self.it_num, { \"Grid\":self.grid})", "def write_hdf5(filename, data):\n \n if '.h5' in filename:\n fid = h5py.File(filename, 'w')\n else:\n filename = filename+'.h5'\n fid = h5py.File(filename, 'w')\n\n print('Writing %s...'%filename)\n\n write_hdf5_group(fid, data)\n\n fid.close()\n print('Finished writting %s.'%filename)\n return", "def fog_file_writer(data, location, count):\n\n data.to_csv(location.format(str(count % 2000) + '.txt'))\n count += 1\n\n return count", "def generate_cell_tsv():\n\n h5_in_path = INPUT_FILE_PATH.replace(\".bed.gz\", \".h5\") \\\n .replace(OVERLAP_PATH, REFERENCE_PATH + PAS_DATASET + \"/centered/\")\n with h5.File(h5_in_path, 'r') as h5_in:\n cell_ids = list(h5_in['cells'])\n utr_lengths = list(h5_in['utrs'])\n cluster_lengths = list(h5_in['cluster_utrs'])\n trajectory_lengths = list(h5_in['traj_utrs'])\n subtrajectory_lengths = list(h5_in['subtraj_utrs'])\n age_lengths = list(h5_in['age_utrs'])\n\n with open(REFERENCE_PATH + \"names_by_id.pkl\", 'rb') as names_in:\n cell_names = pkl.load(names_in)[0]\n tsv_out_path = INPUT_FILE_PATH.replace(\".bed.gz\", \".tsv\") \\\n .replace(OVERLAP_PATH, REFERENCE_PATH + PAS_DATASET + \"/tsv/\")\n with open(tsv_out_path, 'wt') as cell_data_out:\n cell_count = 0\n cell_utrs = []\n cell_utrs_cluster = []\n cell_utrs_trajectory = []\n cell_utrs_subtrajectory = []\n cell_utrs_age = []\n for idx, cell_id in enumerate(cell_ids):\n cell_count += 1\n cell_utr = utr_lengths[idx]\n cell_utr_cluster = cluster_lengths[idx]\n cell_utr_trajectory = trajectory_lengths[idx]\n cell_utr_subtrajectory = subtrajectory_lengths[idx]\n cell_utr_age = age_lengths[idx]\n cell_utrs.append(cell_utr)\n cell_utrs_cluster.append(cell_utr_cluster)\n cell_utrs_trajectory.append(cell_utr_trajectory)\n cell_utrs_subtrajectory.append(cell_utr_subtrajectory)\n cell_utrs_age.append(cell_utr_age)\n # Executes on the last cell group of the entire list or when a new cell group is on the next line.\n if idx + 1 == len(cell_ids) or cell_ids[idx + 1] != cell_id:\n cell_utr_mean = str(np.mean(cell_utrs))\n # Sets approved gene UTR means to 'NA' if cell has no reads from approved genes.\n # Otherwise this will set the approved gene UTR to the mean of only approved gene statistics.\n # Sets approved cell UTR means to 'NA' if the cells aren't in approved groups.\n cell_utr_cluster_mean = str(np.mean(cell_utrs_cluster))\n cell_utr_trajectory_mean = str(np.mean(cell_utrs_trajectory))\n cell_utr_subtrajectory_mean = str(np.mean(cell_utrs_subtrajectory))\n cell_utr_age_mean = str(np.mean(cell_utrs_age))\n cell_name = cell_names[cell_id]\n cell_data = CELL_DATA_DICT[cell_name]\n cell_age = cell_data[0]\n cell_subcluster = cell_data[2] + \".\" + cell_data[5]\n cell_data_used = [cell_data[2], cell_data[3], cell_data[4], cell_subcluster, cell_data[6], cell_data[7],\n cell_data[8], cell_data[9], cell_data[10], cell_data[11], cell_data[16],\n cell_data[13], cell_data[14], cell_data[15], cell_age, cell_utr_mean,\n cell_utr_cluster_mean, cell_utr_trajectory_mean, cell_utr_subtrajectory_mean,\n cell_utr_age_mean, cell_data[20], cell_count]\n cell_data_str = '\\t'.join(cell_data_used) + '\\n'\n cell_data_out.write(cell_data_str)\n # Resets cell data for next line.\n cell_utrs = []\n cell_utrs_cluster = []\n cell_utrs_trajectory = []\n cell_utrs_subtrajectory = []\n cell_utrs_age = []\n cell_count = 0\n\n print(\"Cell tsv generated!\")", "def saveh5(fname, mat, name='data'):\n fp = open_write(fname)\n save_vec(mat, fp, fp.root, name)\n fp.close()", "def onestatfile():\n with hp.File('StatsFile.h5', 'w') as onefile:\n alldata = np.empty((600, 4, 3, 500), dtype=np.float32)\n for j in range(600):\n for i in range(3):\n msd, vol, rms, asp = getstats(i, j+1)\n alldata[j, 0, i, :] = msd\n alldata[j, 1, i, :] = vol\n alldata[j, 2, i, :] = rms\n alldata[j, 3, i, :] = asp\n onefile.create_dataset('Stats', data=alldata, chunks=(1, 4, 3, 500),\n compression='gzip', compression_opts=9)", "def write_hdf5( self, iteration ) :\n # Before opening the file, select the particles that\n # need to be written for each species\n # (This allows to know the number of particles to be written,\n # which is needed when setting up the file)\n select_array_dict = {}\n selected_nlocals_dict = {}\n selected_nglobal_dict = {}\n # Loop over the different species, select the particles and fill\n # select_array_dict, selected_nlocals_dict, selected_nglobal_dict\n for species_name in sorted(self.species_dict.keys()):\n # Select the particles that will be written\n species = self.species_dict[species_name]\n select_array_dict[species_name] = self.apply_selection( species )\n # Get their total number\n n = select_array_dict[species_name].sum()\n if self.comm_world is not None :\n # In MPI mode: gather and broadcast an array containing\n # the number of particles on each process\n selected_nlocals_dict[species_name] = mpiallgather( n )\n selected_nglobal_dict[species_name] = \\\n sum(selected_nlocals_dict[species_name])\n else:\n # Single-proc output\n selected_nlocals_dict[species_name] = None\n selected_nglobal_dict[species_name] = n\n\n # Find the file name\n filename = \"data%08d.h5\" %iteration\n fullpath = os.path.join( self.write_dir, \"hdf5\", filename )\n\n # Create the file and setup its attributes\n # (can be done by one proc or in parallel)\n self.create_file_empty_particles( fullpath, self.top.it,\n self.top.time, self.top.dt, selected_nglobal_dict )\n\n # Open the file again (possibly in parallel)\n f = self.open_file( fullpath, parallel_open=self.lparallel_output )\n # (f is None if this processor does not participate in writing data)\n\n # Loop over the different species and write the requested quantities\n for species_name in sorted(self.species_dict.keys()) :\n\n # Get the HDF5 species group\n if f is not None:\n species_path = \"/data/%d/particles/%s\"%(iteration,species_name)\n species_grp = f[species_path]\n else:\n species_grp = None\n\n # Get the relevant species object and selection array\n species = self.species_dict[species_name]\n select_array = select_array_dict[species_name]\n n_rank = selected_nlocals_dict[species_name]\n\n # Write the datasets for each particle datatype\n self.write_particles( species_grp, species, n_rank, select_array )\n\n # Close the file\n if f is not None:\n f.close()", "def write_file(self):\r\n # -open file for writing\r\n f_fbob = open(self.fn_path, 'w')\r\n\r\n # -write header\r\n f_fbob.write('%s\\n' % (self.heading))\r\n\r\n # -write sections 1 & 2 : NOTE- what about NOPRINT?\r\n f_fbob.write('%10i%10i%10i%10i\\n' % (self.nqfb, self.nqcfb,\r\n self.nqtfb, self.iufbobsv))\r\n f_fbob.write('%10e\\n' % (self.tomultfb)) # check format\r\n\r\n # -write sections 3-5 looping through observations groups\r\n c = 0\r\n for i in range(self.nqfb):\r\n # while (i < self.nqfb):\r\n # write section 3\r\n f_fbob.write('{:10d}{:10d}\\n'.format(self.nqobfb[i],\r\n self.nqclfb[i]))\r\n\r\n # Loop through observation times for the groups\r\n for j in range(self.nqobfb[i]):\r\n # -write section 4\r\n f_fbob.write(\r\n '{}{:10d}{:10.4g}{}{:10.4g}\\n'.format(self.obsnam[c],\r\n self.irefsp[c],\r\n self.toffset[c], ' ',\r\n self.flwobs[c]))\r\n c += 1 # index variable\r\n\r\n # -write section 5 - NOTE- need to adjust factor for muliple obs same cell\r\n for j in range(abs(self.nqclfb[i])):\r\n if self.nqclfb[\r\n i] < 0: # set factor to 1.0 for all cells in group\r\n self.factor[i, :] = 1.0\r\n f_fbob.write('{:10d}{:10d}{:10d}{}{:10f}\\n'\r\n .format(self.layer[i, j], (self.row[i, j]),\r\n self.column[i, j],\r\n ' ', self.factor[\r\n i, j])) # note- is 10f good enough here?\r\n\r\n f_fbob.close()\r\n #\r\n # swm: BEGIN hack for writing standard file\r\n sfname = self.fn_path # swm:hack\r\n sfname += '_ins' # swm: hack\r\n # write header\r\n f_ins = open(sfname, 'w') # swm: hack for standard file\r\n f_ins.write('jif @\\n') # swm: hack for standard file\r\n f_ins.write('StandardFile 0 1 %s\\n' % (\r\n self.nqtfb)) # swm: hack for standard file\r\n for i in range(0, self.nqtfb):\r\n f_ins.write(\r\n '{}\\n'.format(self.obsnam[i])) # swm: hack for standard file\r\n\r\n f_ins.close()\r\n # swm: END hack for writing standard file\r\n\r\n return", "def writeMatrix(self):\n\t\tpass", "def __call__(self):\n\n if self.f5 is not None:\n file = self.name + \"%03d.f5\" % (self.count)\n filename = os.path.join(self.prefix, file)\n self.f5.writeToFile(filename)\n self.count += 1\n return", "def _write_dataset(name, dataset, num_shards, output_dir):\n borders = np.int32(np.linspace(0, len(dataset), num_shards + 1))\n indices = list(range(len(dataset)))\n\n for i in range(num_shards):\n filename = os.path.join(\n output_dir, '%s-%.5d-of-%.5d' % (name, i, num_shards))\n shard_indices = indices[borders[i]:borders[i + 1]]\n _write_shard(filename, dataset, shard_indices)\n logging.info('Wrote dataset indices [%d, %d) to output shard %s',\n borders[i], borders[i + 1], filename)", "def create_count_matrix(filename, output_dir):\n\n import os\n import json\n\n word_tag_output = \"tag_word_count.json\"\n bigram_matrix_name = \"bigram_count.json\"\n unigram_matrix_name = \"unigram_count.json\"\n trigram_matrix_name = \"trigram_count.json\"\n\n sub_dir = os.path.join(output_dir, \"count_matrix/\")\n if not os.path.exists(sub_dir):\n os.mkdir(sub_dir)\n\n word_tag_matrix = get_tag_word_matrix(filename)\n with open(sub_dir + word_tag_output, \"w\") as f:\n json.dump(word_tag_matrix, f)\n\n unigram_matrix = get_tag_n_gram(n=1, filename=filename)\n with open(sub_dir + unigram_matrix_name, \"w\") as f:\n json.dump(unigram_matrix, f)\n\n bigram_matrix = get_tag_n_gram(n=2, filename=filename)\n with open(sub_dir + bigram_matrix_name, \"w\") as f:\n json.dump(bigram_matrix, f)\n\n trigram_matrix = get_tag_n_gram(n=3, filename=filename)\n with open(sub_dir + trigram_matrix_name, \"w\") as f:\n json.dump(trigram_matrix, f)", "def write(data: orm.Data, filename: str) -> None:\n save(to_bands_inspect(data), hdf5_file=filename)", "def export_to_hdf5(self, path, mode='a', libver='earliest'):\n\n # If data come from ENDF, don't allow exporting to HDF5\n if hasattr(self, '_evaluation'):\n raise NotImplementedError('Cannot export incident neutron data that '\n 'originated from an ENDF file.')\n\n # Open file and write version\n f = h5py.File(str(path), mode, libver=libver)\n f.attrs['filetype'] = np.string_('data_proton')\n if 'version' not in f.attrs:\n f.attrs['version'] = np.array(HDF5_VERSION)\n\n group = f.create_group(self.name)\n group.attrs['Z'] = self.atomic_number\n group.attrs['A'] = self.mass_number\n group.attrs['metastable'] = self.metastable\n group.attrs['atomic_weight_ratio'] = self.atomic_weight_ratio\n\n # Determine union energy grid\n union_grid = np.array([])\n for rx in self:\n union_grid = np.union1d(union_grid, rx.xs.x)\n group.create_dataset('energy', data=union_grid)\n\n # Write cross sections\n rxs_group = group.create_group('reactions')\n for mt, rx in self.reactions.items():\n if not rx.redundant:\n rx_group = rxs_group.create_group('reaction_{:03}'.format(rx.mt))\n rx.to_hdf5(rx_group, union_grid)\n \n f.close()", "def countDataSize(self,filename):\n \n try:\n d = h5py.File(filename,'r')\n except:\n print(filename)\n return \n\n N = 0\n scan_edges = d['level2/Statistics/scan_edges'][:]\n for (start,end) in scan_edges:\n N += (end-start)//self.offsetLen * self.offsetLen\n d.close()\n\n N = N*self.Nfeeds\n\n self.chunks += [[int(self.Nsamples), int(self.Nsamples+N)]]\n self.datasizes += [int(N/self.Nfeeds)]\n self.Nsamples += int(N)", "def write_to_carray(num_matrix, array_name, file_path):\n var_start = \"unsigned short int \" + array_name + \" [] = {\\n\\t\"\n var_end = \"};\\n\"\n var_values = [] \n for row in num_matrix:\n for cell in row:\n var_values.append(str(cell))\n \n f = open(file_path, \"w\")\n f.write(\"// Auto generated image header file\\n\")\n f.write(var_start)\n f.write(\",\\n\\t\".join(var_values) + \"\\n\")\n f.write(var_end)\n f.close()", "def _write_example_count(count: int, output_file: str) -> Text:\n count_fname = output_file + '.num_examples.txt'\n with tf.gfile.GFile(count_fname, 'w') as count_writer:\n count_writer.write(str(count))\n return count_fname", "def write(self,data): \n if not os.path.exists(self.output_dir):\n os.makedirs(self.output_dir)\n\n # We will store these in a separate file and link them to the level2s\n fname = data.filename.split('/')[-1]\n \n if os.path.exists(self.outfile):\n output = h5py.File(self.outfile,'a')\n else:\n output = h5py.File(self.outfile,'w')\n\n # Set permissions and group\n if self.set_permissions:\n try:\n os.chmod(self.outfile,0o664)\n shutil.chown(self.outfile, group=self.permissions_group)\n except PermissionError:\n self.logger(f'{fname}:{self.name}: Warning, couldnt set the file permissions.')\n\n # Store datasets in root\n data_out = {'tod':self.all_tod,\n 'weights':self.all_weights,\n 'mask':self.all_mask,\n 'cal_factors':self.all_cal_factors,\n 'frequency':self.all_frequency,\n 'auto_rms':self.all_auto}\n\n for dname, dset in data_out.items():\n if dname in output:\n del output[dname]\n output.create_dataset(dname, data=dset)\n\n output.attrs['version'] = __level3_version__\n output['cal_factors'].attrs['source'] = self.cal_source\n output['cal_factors'].attrs['calibrator_obsid'] = self.nearest_calibrator\n\n output.close()\n \n if self.level3 in data.keys():\n del data[self.level3]\n data[self.level3] = h5py.ExternalLink(self.outfile,'/')", "def write_e(self, outpath):\n\n if not self.tokens:\n raise Exception(\"MLE model not yet trained\")\n\n word_counts = collections.Counter([word_tag[WORD_INDEX] for word_tag in self.tokens])\n # Count and format word tag pairs with out unks\n e_tokens = [(token[WORD_INDEX], token[TAG_INDEX]) for token in self.tokens]\n e_counts = dict(collections.Counter(e_tokens))\n formatted_counts = [k[WORD_INDEX] + SPACE + k[TAG_INDEX] + TAB + str(e_counts[k]) for k in e_counts]\n output = NEW_LINE.join(formatted_counts)\n write(outpath, output)", "def _write_example_count(count: int) -> Text:\n count_fname = FLAGS.output_tfrecord + '.num_examples.txt'\n with tf.io.gfile.GFile(count_fname, 'w') as count_writer:\n count_writer.write(str(count))\n return count_fname", "def write_wabbit_hdf5( file, time, x0, dx, box, data, treecode, iteration = 0, dtype=np.float64 ):\n import h5py\n import numpy as np\n\n\n Level = np.size(treecode,1)\n if len(data.shape)==4:\n # 3d data\n Bs = np.zeros([3,1])\n N, Bs[0], Bs[1], Bs[2] = data.shape\n Bs = Bs[::-1]\n print( \"Writing to file=%s max=%e min=%e size=%i %i %i \" % (file, np.max(data), np.min(data), Bs[0], Bs[1], Bs[2]) )\n\n else:\n # 2d data\n Bs = np.zeros([2,1])\n N, Bs[0], Bs[1] = data.shape\n Bs = Bs[::-1]\n print(\"~~~~~~~~~~~~~~~~~~~~~~~~~\")\n print(\"Writing file %s\" % (file) )\n print(\"Time=%e it=%i N=%i Bs[0]=%i Bs[1]=%i Level=%i Domain=[%d, %d]\" % (time, iteration, N, Bs[0], Bs[1],Level, box[0], box[1]) )\n print(\"~~~~~~~~~~~~~~~~~~~~~~~~~\")\n\n\n fid = h5py.File( file, 'w')\n\n fid.create_dataset( 'coords_origin', data=x0, dtype=dtype )\n fid.create_dataset( 'coords_spacing', data=dx, dtype=dtype )\n fid.create_dataset( 'blocks', data=data, dtype=dtype )\n fid.create_dataset( 'block_treecode', data=treecode, dtype=dtype )\n\n fid.close()\n\n fid = h5py.File(file,'a')\n dset_id = fid.get( 'blocks' )\n dset_id.attrs.create( \"version\", 20200902) # this is used to distinguish wabbit file formats\n dset_id.attrs.create('time', time, dtype=dtype)\n dset_id.attrs.create('iteration', iteration)\n dset_id.attrs.create('domain-size', box, dtype=dtype )\n dset_id.attrs.create('total_number_blocks', N )\n fid.close()", "def updateCounts(self):\n found = False\n fileName = \"counts\"\n if not os.access(fileName, os.F_OK):\n try:\n TFH = open(fileName, \"w\")\n TFH.close()\n except IOError as inst: # @UnusedVariable\n self.logIt(__name__ + \".updateCounts(): Unable to open \" + fileName + \" for write.\" + \" => \" + str(\n inst.errno) + \":\" + str(inst.strerror) + \"\\n\")\n raise\n\n self.logIt(__name__ + \".updateCounts(): fileName=\" + fileName + \"\\n\")\n try:\n FH = open(fileName, \"rb+\")\n # FH = posixfile.open(fileName, \"rb+\") # posixfile has been deprecated.\n # FH.lock('w|')\n data = None\n while 1:\n data = str(FH.readline())\n if data is None or data == \"\": break\n data = re.sub(\"\\n\", \"\", data)\n self.debug(__name__ + \".updateCounts(): data is \" + str(data) + \"\\n\")\n ms = str(self.msgNum) + \"=\"\n self.debug(__name__ + \".updateCounts(): ms is\" + str(ms) + \"\\n\")\n if re.search(ms, data):\n found = True\n self.debug(__name__ + \".updateCounts(): DEBUG0.5\\n\")\n break\n self.debug(__name__ + \".updateCounts(): DEBUG1\\n\")\n if data and found:\n self.debug(__name__ + \".updateCounts(): DEBUG2\\n\")\n eloc = FH.tell()\n self.debug(__name__ + \".updateCounts(): eloc=\" + str(eloc) + \"\\n\")\n sloc = eloc - len(data) - 1\n self.debug(__name__ + \".updateCounts(): sloc=\" + str(sloc) + \"\\n\")\n FH.seek(sloc, os.SEEK_SET)\n cloc = FH.tell()\n self.debug(__name__ + \".updateCounts(): cloc=\" + str(cloc) + \"\\n\")\n myList = list()\n myList = data.split('=')\n icount = int(myList[1]) + 1\n FH.write(str(self.msgNum) + \"=\" + str(icount) + \"\\n\")\n else:\n self.debug(__name__ + \".updateCounts(): DEBUG3\\n\")\n FH.write(str(self.msgNum) + \"=1\" + \"\\n\")\n FH.lock('u')\n FH.close()\n except IOError as inst: # @UnusedVariable\n pass\n # self.logIt( __name__ + \".updateCounts(): Unable to open \" + fileName + \" for write.\" + \" => \" + str( inst.errno ) + \":\" + str( inst.strerror ) + \"\\n\" )\n # Endtry", "def write_counts(outfile, counts, search):\n\t\n\tcombinations = get_all_counts(counts)\n\t\n\tcounts_df = pd.DataFrame(combinations, columns = [set['name'] for set in search] + ['count'])\n\t\n\tcounts_df.to_csv(outfile, index=False, sep = '\\t')", "def write_count_table(filtered_counts, gene_names, ids_liver_header):\n with open(\"filtered_tcga_counts.tsv\", \"w\") as file:\n file.write(\"gene_id\\tgene_name\\t\" + \"\\t\".join(ids_liver_header) + \"\\n\")\n for gene_name, counts_line in zip(gene_names, filtered_counts):\n file.write(gene_name + \"\\t\" + \"\\t\" + \"\\t\".join(counts_line) + \"\\n\")" ]
[ "0.62956023", "0.5863224", "0.5714787", "0.56640995", "0.5645576", "0.5634674", "0.5626462", "0.55892056", "0.5532122", "0.5529419", "0.5496241", "0.5484333", "0.545105", "0.54503447", "0.54497975", "0.5441969", "0.5421193", "0.5383405", "0.53695124", "0.5364304", "0.5358697", "0.5329367", "0.53083676", "0.5298556", "0.5233804", "0.5223897", "0.52121586", "0.519824", "0.5188953", "0.5171703" ]
0.67506576
0
Compute an estimate of reasonable priors on cell size and ambient size. Given a dataset (scipy.sparse.csr matrix of counts where rows are barcodes and columns are genes), and an expected cell count, compute an estimate of reasonable priors on cell size and ambient count size. This is done by a series of heuristics.
def get_d_priors_from_dataset(dataset: Dataset) -> Tuple[float, float]: # Count the total unique UMIs per barcode (summing after transforming). transformed_counts = \ np.array(dataset.transformation.transform(dataset.data['matrix'] [:, dataset.analyzed_gene_inds]) .sum(axis=1)).squeeze() counts = np.array(dataset.data['matrix'] [:, dataset.analyzed_gene_inds].sum(axis=1)).squeeze() # If it's a model that does not model empty droplets, the dataset is cells. if dataset.model_name == 'simple': assert type(dataset.priors['n_cells']) is int, "No prior on number of cells." # Sort order the cells by counts. sort_order = np.argsort(counts)[::-1] # Estimate cell count by median, taking 'cells' to be the largest counts. cell_counts = int(np.median(transformed_counts[sort_order] [:dataset.priors['n_cells']]).item()) empty_counts = 0 # Models that include both cells and empty droplets. else: # Cutoff for original data. Empirical. cut = dataset.low_count_threshold # Estimate the number of UMI counts in empty droplets. # Mode of (rounded) log counts (for counts > cut) is a robust empty estimator. empty_log_counts = mode(np.round(np.log1p(transformed_counts[counts > cut]), decimals=1))[0] empty_counts = int(np.expm1(empty_log_counts).item()) # Estimate the number of UMI counts in cells. # Median of log counts above 5 * empty counts is a robust cell estimator. cell_log_counts = np.median(np.log1p(transformed_counts [transformed_counts > 5 * empty_counts])) cell_counts = int(np.expm1(cell_log_counts).item()) logging.info(f"Prior on counts in empty droplets is {empty_counts}") logging.info(f"Prior on counts for cells is {cell_counts}") return cell_counts, empty_counts
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def estimate_cell_count_from_dataset(dataset: Dataset) -> int:\n\n # If it's a model that does not model empty droplets, the dataset is cells.\n # NOTE: this is overridden if --expected_cells is specified.\n if dataset.model_name == 'simple':\n return dataset.data['matrix'].shape[0]\n\n # Count number of UMIs in each barcode.\n counts = np.array(dataset.data['matrix'].sum(axis=1),\n dtype=int).squeeze()\n\n # Find the order that sorts barcodes by UMI count.\n count_sort_order = np.argsort(counts)[::-1] # Decreasing UMI counts\n\n # Find the UMI count cutoff as 0.9 * counts(99th percentile barcode)\n ninety_ninth_percentile_ind = int(counts.size * 0.01)\n umi_cutoff = 0.9 * counts[count_sort_order][ninety_ninth_percentile_ind]\n\n # Count the number of barcodes with UMI counts above the cutoff.\n cell_count_est = int(np.sum(counts > umi_cutoff).item())\n\n return cell_count_est", "def _estimate_priors(self):\n\n # Estimate the log UMI count turning point between cells and 'empties'.\n self.priors['log_counts_crossover'] = \\\n np.mean(np.log1p([self.priors['cell_counts'],\n self.priors['empty_counts']])).item()\n\n # Estimate prior for the scale param of LogNormal for d.\n if self.model_name != \"simple\":\n self.priors['d_std'] = (np.log1p(self.priors['cell_counts'])\n - self.priors['log_counts_crossover']) / 5\n else:\n self.priors['d_std'] = 0.2 # This is a reasonable prior in log space.\n\n # Priors for models that include empty droplets:\n if self.model_name != \"simple\":\n # Estimate fraction of trimmed dataset that contains cells.\n # cell_prob = self.priors['n_cells'] / self.analyzed_barcode_inds.size\n cell_prob = (1 - self.fraction_empties) \\\n * (self.priors['n_cells'] / self.analyzed_barcode_inds.size)\n self.priors['cell_prob'] = cell_prob\n\n assert cell_prob > 0, f\"Fraction of trimmed dataset \" \\\n f\"containing cells should be > 0, \" \\\n f\"but is {cell_prob}.\"\n\n assert cell_prob <= 1, f\"Fraction of trimmed dataset \" \\\n f\"containing cells should be at most 1, \" \\\n f\"but is {cell_prob}.\"\n\n # Turn cell probability into logit.\n self.priors['cell_logit'] = np.log(cell_prob / (1 - cell_prob)).item()\n\n # Estimate the ambient gene expression profile.\n self.priors['chi_ambient'], self.priors['chi_bar'] = \\\n estimate_chi_from_dataset(self)", "def testCondition(df, indexCol, dmatDf, gbCol, gbValues=None, countCol='Cells', min_count=3):\n\n if gbValues is None:\n gbValues = sorted(df[gbCol].unique())\n\n cnts = df.groupby([indexCol, gbCol])[countCol].agg(np.sum).unstack(gbCol, fill_value=0)[gbValues]\n uIndices = list(df[indexCol].dropna().unique())\n dmat = dmatDf.loc[:, uIndices].loc[uIndices, :]\n compressedDmat = distance.squareform(dmat.values)\n Z = sch.linkage(compressedDmat, method='complete')\n members = getClusterMembers(Z)\n resDf = testHClusters(cnts, members, gbValues, min_count=min_count)\n return Z, resDf, np.array(uIndices)", "def mut_space_size(graph: nx.MultiGraph, estimate=True) -> int:\n space = mut_space(graph)\n sizes = (len(gg) for g, gg in space)\n if estimate:\n return sum(map(log, sizes))\n return reduce(op.mul, sizes)", "def calc_priors(categories, data):\n counts = np.zeros(categories)\n for val in range(categories):\n counts[val] = np.count_nonzero(data.labels == val)\n return counts / len(data.labels)", "def csize(grades):\n\tp = 0\n\tfor k in grades:\n\t\tl = _comb(n,k)\n\t\tp += l\n\treturn p", "def csize(grades):\n\tp = 0\n\tfor k in grades:\n\t\tl = _comb(n,k)\n\t\tp += l\n\treturn p", "def getCellCount(self, idx = None, cell = 1, verbose = 0):\n\n if idx is None: idx = np.arange(self.atoms.shape[0])\n if isinstance(idx, (int, np.integer)): idx = [idx]\n\n areas = self.getAreas(idx = idx, cell = cell)\n if cell == 1:\n base_area = np.abs(np.linalg.det(self.base_1[:2, :2]))\n elif cell == 2:\n base_area = np.abs(np.linalg.det(self.base_2[:2, :2]))\n\n count = areas / base_area\n\n if verbose > 0:\n string = \"Cell count for cell %i, with %i index, max deviation: %.4E\"\\\n % (cell, len(count), np.max(count - np.round(count, 0)))\n ut.infoPrint(string)\n\n return count", "def optimal_chunksizes(nt, nlat, nlon):\n\n clon = np.sqrt(1000000.0 * nlon / (nlat * nt))\n clat = nlat * clon / nlon\n return (nt, int(np.ceil(clat)), int(np.ceil(clon)))", "def score(tmp_dir, locusList, ranges):\r\n\t\r\n\tloc = ''\r\n\tpos = 1\r\n\tcount_indel = 0\r\n\tholes = 0\r\n\tsnps = 0\r\n\tcovMax=combined_covMax=covSum=covSum2= 0 \r\n\tcovMin = combined_covMin =99999\r\n\tpercentage_coverages =[]\r\n\tsnpList = []\r\n\tindelList = []\r\n\tresults = {} \r\n\t\r\n\tpileup_file = os.path.join(tmp_dir, 'all.pileup')\r\n\tfor l in open(pileup_file):\r\n\t\tt = l.split()\r\n\t\tif loc == '':\r\n\t\t\tloc = t[0] \r\n\t\t\tpos = ranges[loc][0] + 1 \r\n\t\tif t[0] != loc:\r\n\t\t\tresults =GenerateResult(ranges,\r\n\t\t\t\t\t\t\t\t\tholes, locusList,\r\n\t\t\t\t\t\t\t\t\tloc,snps,count_indel,\r\n\t\t\t\t\t\t\t\t\tsnpList, indelList,\r\n\t\t\t\t\t\t\t\t\tpercentage_coverages,combined_covMin,\r\n\t\t\t\t\t\t\t\t\tcombined_covMax, covMin, covMax,covSum, results)\r\n\t\t\t# reset locus vars\r\n\t\t\tloc = t[0] \r\n\t\t\tpos = ranges[loc][0] + 1 \r\n\t\t\tcount_indel = 0\r\n\t\t\tholes =snps=covMax=combined_covMax=covSum=covSum2= 0 \r\n\t\t\tcovMin =combined_covMin= 99999\r\n\t snpList = []\r\n\t\t\tindelList = []\r\n\t\t\tpercentage_coverages =[]\r\n\t\there = int(t[1])\r\n\t\tif here - 1 < ranges[loc][0]: \r\n\t\t\tcontinue\r\n\t\telif here - 1 >= ranges[loc][1]: \r\n\t\t\tcontinue\r\n\t\twhile pos < here: \r\n\t\t\tholes += 1 \r\n\t\t\tpos += 1\r\n\r\n\t\tv, indel, array_of_all_indels,most_common_indel = pile(t[2], t[4])\r\n\t\tx = v.items()\r\n\t\tx.sort(lambda a,b: compGreater(t[2], a, b))\r\n\t\t\r\n\t\tif x[0][0] != t[2].lower():\r\n\t\t\tsnps += 1\r\n\t\t\tsnpList.append((pos,t[2],v));\r\n\t\tc = x[0][1] \r\n\t\tcov= int(most_common_indel)/float(t[3]) \r\n\t\tif cov > 0.5: \r\n count_indel += 1\r\n indel_type = Counter(array_of_all_indels) \r\n indel_type = indel_type.items()\r\n indelList.append((int(pos),t[2], indel_type))\r\n\t\tcovSum += c \r\n\t\tcovSum2 += c * c\r\n\t\tif c > covMax:\r\n\t\t\tcovMax = c\r\n\t\tif c < covMin:\r\n\t\t\tcovMin = c\r\n\t\tcombined_c = x[0][1] + x[1][1] + x[2][1] + x[3][1] \r\n\t\tif combined_c > combined_covMax:\r\n\t\t\tcombined_covMax = c \r\n\t\tif combined_c < combined_covMin:\r\n\t\t\tcombined_covMin = c \r\n\t\t\r\n\t\tn = int(t[3]) \r\n\t\tjs = []\r\n\t\tfor (_,j) in x[1:]: \r\n\t\t\tjs.append(j) \r\n\t\tpercentage_coverage = sum(js)/float(n)*100 \r\n\t\tpercentage_coverages.append(round(float(percentage_coverage),2))\r\n\t\tpos = here + 1\r\n\tresults =GenerateResult(ranges,\r\n\t\t\t\t\t\t\tholes,\r\n\t\t\t\t\t\t\tlocusList,loc,\r\n\t\t\t\t\t\t\tsnps,count_indel,\r\n\t\t\t\t\t\t\tsnpList,indelList,\r\n\t\t\t\t\t\t\tpercentage_coverages,combined_covMin,\r\n\t\t\t\t\t\t\tcombined_covMax, covMin, covMax,\r\n\t\t\t\t\t\t\tcovSum, results)\r\n\t\r\n\treturn results", "def estimate_size(accns, bed, order, conservative=True):\n accns = [order[x] for x in accns]\n ii, bb = zip(*accns)\n mini, maxi = min(ii), max(ii)\n if not conservative: # extend one gene\n mini -= 1\n maxi += 1\n minb = bed[mini]\n maxb = bed[maxi]\n assert minb.seqid == maxb.seqid\n distmode = \"ss\" if conservative else \"ee\"\n ra = (minb.seqid, minb.start, minb.end, \"+\")\n rb = (maxb.seqid, maxb.start, maxb.end, \"+\")\n\n dist, orientation = range_distance(ra, rb, distmode=distmode)\n assert dist != -1\n return dist", "def purity_score(clusters, classes):\n clusters = np.array(clusters)\n classes = np.array(classes)\n A = np.c_[(clusters,classes)]\n\n n_accurate = 0.\n\n for j in np.unique(A[:,0]):\n z = A[A[:,0] == j, 1]\n x = np.argmax(np.bincount(z))\n n_accurate += len(z[z == x])\n\n return n_accurate / A.shape[0]", "def estimate_size(self, metric, criteria='last'):\n return estimate_size(self.x, self.y, self.data, metric=metric, criteria=criteria)", "def __get_cell_proportions(self):\n\n prim_dim = self.prim_dim\n prim_limit = len(self._cells) if self.prim_limit == 0 else self.prim_limit\n proportions = [None, None]\n proportions[prim_dim] = prim_proportions = [-1.] * len(self._cells[:prim_limit])\n proportions[1-prim_dim] = sec_proportions = []\n cells = self._cells[:]\n\n while cells:\n\n sec_proportion = -1.\n\n for i, cell in enumerate(cells[:prim_limit]):\n min_size = cell.update_min_size()\n prim_proportions[i] = max(prim_proportions[i], cell.proportions[prim_dim])\n sec_proportion = max(sec_proportion, cell.proportions[1-prim_dim])\n\n sec_proportions.append(sec_proportion)\n del cells[:prim_limit]\n\n default_proportions = [p1 if p2 < 0. else p2 for p1, p2 in\n zip(self._global_default_proportions, self._default_proportions)]\n default_prim_p = default_proportions[prim_dim]\n default_sec_p = default_proportions[1-prim_dim]\n prim_proportions[:] = [default_prim_p if p < 0. else p for p in prim_proportions]\n sec_proportions[:] = [default_sec_p if p < 0. else p for p in sec_proportions]\n\n return proportions", "def _get_new_capacity(self):\n for prime in primes:\n if prime > 2 * self.size:\n return prime\n raise ValueError(\"Error: Table size overflow!\")", "def _calculate_cluster_measures(\n arr4d,\n threshold,\n bin_struct,\n two_sided_test=False,\n):\n n_regressors = arr4d.shape[3]\n\n max_sizes = np.zeros(n_regressors, int)\n max_masses = np.zeros(n_regressors, float)\n\n for i_regressor in range(n_regressors):\n arr3d = arr4d[..., i_regressor].copy()\n\n if two_sided_test:\n arr3d[np.abs(arr3d) <= threshold] = 0\n else:\n arr3d[arr3d <= threshold] = 0\n\n labeled_arr3d, _ = label(arr3d > 0, bin_struct)\n\n if two_sided_test:\n # Label positive and negative clusters separately\n n_positive_clusters = np.max(labeled_arr3d)\n temp_labeled_arr3d, _ = label(\n arr3d < 0,\n bin_struct,\n )\n temp_labeled_arr3d[temp_labeled_arr3d > 0] += n_positive_clusters\n labeled_arr3d = labeled_arr3d + temp_labeled_arr3d\n del temp_labeled_arr3d\n\n clust_vals, clust_sizes = np.unique(labeled_arr3d, return_counts=True)\n assert clust_vals[0] == 0\n\n clust_vals = clust_vals[1:] # First cluster is zeros in matrix\n clust_sizes = clust_sizes[1:]\n\n # Cluster mass-based inference\n max_mass = 0\n for unique_val in clust_vals:\n ss_vals = np.abs(arr3d[labeled_arr3d == unique_val]) - threshold\n max_mass = np.maximum(max_mass, np.sum(ss_vals))\n\n # Cluster size-based inference\n max_size = 0\n if clust_sizes.size:\n max_size = np.max(clust_sizes)\n\n max_sizes[i_regressor], max_masses[i_regressor] = max_size, max_mass\n\n return max_sizes, max_masses", "def num_cells(self):\n if hasattr(self, '__num_cells__'):\n return self.__num_cells__\n if self.x is not None:\n return self.x.size(self.__cat_dim__('x', self.x))\n if self.boundary_index is not None:\n return int(self.boundary_index[1,:].max()) + 1\n assert self.upper_index is None and self.lower_index is None\n return None", "def determine_size(self):\n size = np.inf\n while size >= self.n:\n size = np.random.pareto(0.2)\n size = int(math.ceil(size))\n return size", "def compute_detection_counts(kinds, valid_mask, aoi_mask, scene_counts):\n scene_counts = np.maximum(scene_counts, 1)\n if len(kinds):\n pairs = (kinds == 'pair_trawlers')\n singles = (kinds == 'single_trawler')\n scales = (kinds == 'pair_trawlers') * 2 + (kinds == 'single_trawler')\n aoi_pts = round((scales * (valid_mask & aoi_mask) / scene_counts).sum(), 1) \n aoi_pairs = round((pairs * (valid_mask & aoi_mask) / scene_counts).sum(), 1) \n else:\n aoi_pts = aoi_pairs = 0\n return aoi_pts, aoi_pairs", "def _layer_size_score(size, hole_count, hole_area):\r\n board_area = size[0] * size[1]\r\n if board_area == 0:\r\n return 0\r\n \r\n hole_percentage = hole_area / board_area\r\n hole_score = (hole_percentage - 0.25) ** 2\r\n size_score = (board_area - 8) **2\r\n return hole_score * size_score", "def _cell_state_size(self):\n state_sizes = self._cells[0].state_size\n if isinstance(state_sizes, tuple):\n return sum(state_sizes)\n return state_sizes", "def calc_sparsity (data): \n matrix_size = data.shape[0]*data.shape[1] # Number of possible interactions in the matrix\n num_purchases = len(data.nonzero()[0]) # Number of items interacted with\n sparsity = 100*(1 - (num_purchases/matrix_size))\n print('{:.2f} % of the user interaction matrix is sparse'.format(sparsity,2))", "def test_calc_distribution():\n\n # Default score is 0\n score = optimize.calc_distribution([])\n\n exp_score = np.zeros((24, 24))\n\n np.testing.assert_almost_equal(score.values, exp_score)\n\n # Score for enrichment in AA pairs\n score = optimize.calc_distribution([\n ('AAAAA', 'AAAAA'),\n ('AAAA', 'AAAA'),\n ('AAAAAA'*100, 'AAAAAA'*100),\n ])\n\n assert np.allclose(score.loc['A', 'A'], 10.25, atol=1e-2)\n\n # Score for enrichment in CS pairs\n score = optimize.calc_distribution([\n ('CSCSCS', 'SCSCSC'),\n ('SCCSSS', 'SSCCCS'),\n ])\n\n # Make sure it's symmetric\n assert np.allclose(score, score.T)\n\n # Check specific cells\n assert np.allclose(score.loc['S', 'S'], 2.32, atol=1e-2)\n assert np.allclose(score.loc['C', 'C'], 1.58, atol=1e-2)\n assert np.allclose(score.loc['S', 'C'], 3.32, atol=1e-2)\n assert np.allclose(score.loc['C', 'S'], 3.32, atol=1e-2)\n\n # Score for enrichment in gaps\n score = optimize.calc_distribution([\n ('C----S', 'SCSCSC'),\n ('SCCS-S', 'S----S'),\n ])\n\n # Make sure it's still symmetric\n assert np.allclose(score, score.T)\n\n # Check specific cells\n assert np.allclose(score.loc['*', '*'], 1.58, atol=1e-2)\n assert np.allclose(score.loc['*', 'C'], 2.32, atol=1e-2)\n assert np.allclose(score.loc['C', '*'], 2.32, atol=1e-2)\n assert np.allclose(score.loc['S', '*'], 2.0, atol=1e-2)\n\n # See if we can weight some alignments higher than others\n\n # Score for enrichment in CS pairs\n score = optimize.calc_distribution([\n ('CSCSCS', 'SCSCSC'),\n ('SCCSSS', 'SSCCCS'),\n ], weights=[1.0, 0.5])\n\n # Make sure it's symmetric\n assert np.allclose(score, score.T)\n\n # Check specific cells\n assert np.allclose(score.loc['S', 'S'], 1.58, atol=1e-2)\n assert np.allclose(score.loc['C', 'C'], 1.0, atol=1e-2)\n assert np.allclose(score.loc['S', 'C'], 3.09, atol=1e-2)\n assert np.allclose(score.loc['C', 'S'], 3.09, atol=1e-2)", "def optimal(nb_items):\n # Compute first possibility\n height1 = math.floor(math.sqrt(nb_items))\n width1 = math.ceil(nb_items / height1)\n\n # Compute second possibility\n width2 = math.ceil(math.sqrt(nb_items))\n height2 = math.ceil(nb_items / width2)\n\n # Minimize the product of height and width\n if height1 * width1 < height2 * width2:\n height, width = height1, width1\n else:\n height, width = height2, width2\n\n return (height, width)", "def calculate_chunk_size(thread_count, item_count):\n chunk_size = int(item_count / (thread_count * 10))\n if chunk_size < 1:\n chunk_size = 1\n if chunk_size > 20:\n chunk_size = 20\n return chunk_size", "def calculate_assignments_sparse(topics, data, voc_size, iterations = 1000, threshold = 1e-4): \n #calulate block size\n Ndocs_batch = (50000*10000) // voc_size #fits in 4GB of memory\n \n Nbatches = len(data) // Ndocs_batch\n if Nbatches*Ndocs_batch < len(data):\n Nbatches += 1\n \n start_time = time()\n for i in range(Nbatches):\n \n \n partial_assignments = calculate_assignments(topics, data[i*Ndocs_batch:(i+1)*Ndocs_batch], voc_size, iterations)\n partial_assignments[partial_assignments < threshold] = 0 \n #re-normalize\n partial_assignments /= partial_assignments.sum(axis=1)[:,np.newaxis]\n \n if i==0:\n sparse_assignments = csr_matrix(partial_assignments)\n else: \n sparse_assignments = _csr_vappend(sparse_assignments, csr_matrix(partial_assignments))\n \n\n print('Done batch {} out of {}. Elapsed {:.2f} min.'.format(i,Nbatches, (time()-start_time)/60 ))\n \n return sparse_assignments", "def min_num_iterations_():\n rows, cols = map_shape\n error = 1\n it = 0\n minErr = 1e-4\n while (error > minErr):\n bkp_utilities = utilities.copy()\n update_utils(utilities, map_shape, map_arr, rewards, final_arr, actions, gamma)\n diff = [(bkp_utilities[(r,c)] - utilities[(r,c)]) for r in range(rows) for c in range(cols)]\n error = np.sqrt(np.dot(diff, diff))\n it += 1\n return it", "def objective_distribution():\n # calculate the needed number of cells or take max value (above which\n # higher diversity should not have practic effects)\n volume = min(cf.naive_pool*len(cf.tinf), 10**5)\n # get bins in the required energy range, width depending on nkey\n if cf.nkey == 1:\n # for nkey = 1, a lot of small bins may not be occupied, thus choose\n # larger bins\n bin_size_goal = 0.1\n bin_number = max(np.round((cf.upperlim-cf.thr)/bin_size_goal), 1)\n bin_edges = np.linspace(cf.thr, cf.upperlim, bin_number+1)\n else:\n bin_size_goal = 0.025\n bin_number = max(np.round((cf.upperlim-cf.thr)/bin_size_goal), 1)\n bin_edges = np.linspace(cf.thr, cf.upperlim, bin_number+1)\n # for the midpoint of each bin, get Gaussian distribution value for\n # mean=0.5 and std=0.1\n bin_midpoints = bin_edges[:-1] + (bin_edges[1]-bin_edges[0])/2\n gauss_weights = np.exp(-np.power(bin_midpoints - 0.5, 2.) /\n (2 * np.power(0.1, 2.)))\n # scale so that the sum over the bins contains the required cell number\n norm1 = np.sum(gauss_weights)\n obj_dist = np.floor((volume / norm1) * gauss_weights)\n # give back the objective distribution and bin_edges\n return bin_edges, obj_dist, volume", "def optimalBinSize(x):\n interquartile = np.diff(np.prctile(x, [25, 75]))\n return 2. * interquartile * len(x)**(-1./3)", "def model_params_size(model, param_dims=[2, 4], param_types=['weight', 'bias']):\n _, _, sparse_params_cnt = model_params_stats(model, param_dims, param_types)\n return sparse_params_cnt" ]
[ "0.620238", "0.54556507", "0.5310594", "0.5288271", "0.5102519", "0.509151", "0.509151", "0.5053118", "0.5047448", "0.504083", "0.50119156", "0.50027806", "0.5002605", "0.4951536", "0.4927784", "0.49217126", "0.49094176", "0.48926193", "0.4889214", "0.48789653", "0.48736045", "0.48625737", "0.4844825", "0.48446247", "0.48411012", "0.48154017", "0.4811187", "0.4808906", "0.48074055", "0.4805347" ]
0.57310665
1
Compute an estimate of number of real cells in a dataset. Given a Dataset, compute an estimate of the number of real cells. This is done CellRangerstyle, by taking barcode with total UMI count in the 99th percentile of the dataset, and then finding the number of barcodes that have greater than 0.9 that number of UMIs.
def estimate_cell_count_from_dataset(dataset: Dataset) -> int: # If it's a model that does not model empty droplets, the dataset is cells. # NOTE: this is overridden if --expected_cells is specified. if dataset.model_name == 'simple': return dataset.data['matrix'].shape[0] # Count number of UMIs in each barcode. counts = np.array(dataset.data['matrix'].sum(axis=1), dtype=int).squeeze() # Find the order that sorts barcodes by UMI count. count_sort_order = np.argsort(counts)[::-1] # Decreasing UMI counts # Find the UMI count cutoff as 0.9 * counts(99th percentile barcode) ninety_ninth_percentile_ind = int(counts.size * 0.01) umi_cutoff = 0.9 * counts[count_sort_order][ninety_ninth_percentile_ind] # Count the number of barcodes with UMI counts above the cutoff. cell_count_est = int(np.sum(counts > umi_cutoff).item()) return cell_count_est
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_d_priors_from_dataset(dataset: Dataset) -> Tuple[float, float]:\n\n # Count the total unique UMIs per barcode (summing after transforming).\n transformed_counts = \\\n np.array(dataset.transformation.transform(dataset.data['matrix']\n [:, dataset.analyzed_gene_inds])\n .sum(axis=1)).squeeze()\n counts = np.array(dataset.data['matrix']\n [:, dataset.analyzed_gene_inds].sum(axis=1)).squeeze()\n\n # If it's a model that does not model empty droplets, the dataset is cells.\n if dataset.model_name == 'simple':\n\n assert type(dataset.priors['n_cells']) is int, \"No prior on number of cells.\"\n\n # Sort order the cells by counts.\n sort_order = np.argsort(counts)[::-1]\n\n # Estimate cell count by median, taking 'cells' to be the largest counts.\n cell_counts = int(np.median(transformed_counts[sort_order]\n [:dataset.priors['n_cells']]).item())\n\n empty_counts = 0\n\n # Models that include both cells and empty droplets.\n else:\n\n # Cutoff for original data. Empirical.\n cut = dataset.low_count_threshold\n\n # Estimate the number of UMI counts in empty droplets.\n\n # Mode of (rounded) log counts (for counts > cut) is a robust empty estimator.\n empty_log_counts = mode(np.round(np.log1p(transformed_counts[counts > cut]),\n decimals=1))[0]\n empty_counts = int(np.expm1(empty_log_counts).item())\n\n # Estimate the number of UMI counts in cells.\n\n # Median of log counts above 5 * empty counts is a robust cell estimator.\n cell_log_counts = np.median(np.log1p(transformed_counts\n [transformed_counts > 5 * empty_counts]))\n cell_counts = int(np.expm1(cell_log_counts).item())\n\n logging.info(f\"Prior on counts in empty droplets is {empty_counts}\")\n\n logging.info(f\"Prior on counts for cells is {cell_counts}\")\n\n return cell_counts, empty_counts", "def computeNumClass(self):\n # Get the number of data\n n = len(self.data)\n # For IQR\n # First, compute the position of the first and third quartile\n fQPos = ( (n - 1) / 4 ) + 1\n tQPos = ( (3 * (n - 1)) / 4 ) + 1\n # Get the quartiles\n firstQ = 0.0\n thirdQ = 0.0\n if fQPos == round(fQPos):\n firstQ = self.data[int(fQPos)]\n else:\n up = round(fQPos)\n firstQ = self.data[up - 1] + ((self.data[up] - self.data[up - 1]) / 4.0)\n if tQPos == round(tQPos):\n thirdQ = self.data[int(tQPos)]\n else:\n up = round(tQPos)\n thirdQ = self.data[up - 1] + (3 * (self.data[up] - self.data[up - 1]) / 4.0)\n # Compute the IQR\n IQR = thirdQ - firstQ\n # Compute the number of classes and its length\n self.numBins = int(2 * IQR * m.pow(n, -1/3))\n self.computeBinWidth()", "def cardinality(self):\n estimate = self._alpha * math.pow(self._m, 2) / sum(math.pow(2, -x) for x in self._registers)\n\n if estimate <= 2.5 * self._m:\n # get number of registers equal to zero\n empty_registers = self._registers.count(0)\n if empty_registers != 0:\n return self._linear_count(empty_registers)\n else:\n return estimate\n elif estimate <= ((1 << 32) / 30):\n return estimate\n else:\n return self._large_range_correction(estimate)", "def count_elements_in_dataset(dataset):\n return dataset.count()", "def getCellCount(self, idx = None, cell = 1, verbose = 0):\n\n if idx is None: idx = np.arange(self.atoms.shape[0])\n if isinstance(idx, (int, np.integer)): idx = [idx]\n\n areas = self.getAreas(idx = idx, cell = cell)\n if cell == 1:\n base_area = np.abs(np.linalg.det(self.base_1[:2, :2]))\n elif cell == 2:\n base_area = np.abs(np.linalg.det(self.base_2[:2, :2]))\n\n count = areas / base_area\n\n if verbose > 0:\n string = \"Cell count for cell %i, with %i index, max deviation: %.4E\"\\\n % (cell, len(count), np.max(count - np.round(count, 0)))\n ut.infoPrint(string)\n\n return count", "def IQR(data):\n return percentile(data, 75) - percentile(data, 25)", "def nb_data_bands(dataset):\n alphaband = dataset.GetRasterBand(1).GetMaskBand()\n if ((alphaband.GetMaskFlags() & gdal.GMF_ALPHA) or\n dataset.RasterCount == 4 or\n dataset.RasterCount == 2):\n return dataset.RasterCount - 1\n else:\n return dataset.RasterCount", "def getCellCountRatio(self, idx = None, verbose = 1):\n\n if idx is None: idx = np.arange(self.atoms.shape[0])\n if isinstance(idx, (int, np.integer)): idx = [idx]\n\n count_1 = self.getCellCount(idx = idx, cell = 1, verbose = verbose - 1)\n count_2 = self.getCellCount(idx = idx, cell = 2, verbose = verbose - 1)\n\n return count_2 / count_1", "def optimalBinSize(x):\n interquartile = np.diff(np.prctile(x, [25, 75]))\n return 2. * interquartile * len(x)**(-1./3)", "def num_cells(self):\n if hasattr(self, '__num_cells__'):\n return self.__num_cells__\n if self.x is not None:\n return self.x.size(self.__cat_dim__('x', self.x))\n if self.boundary_index is not None:\n return int(self.boundary_index[1,:].max()) + 1\n assert self.upper_index is None and self.lower_index is None\n return None", "def get_total_item_size(dataset):\n total_items = 0\n for element in dataset:\n total_items += 1\n return total_items", "def ncells(self):\n return self.izone.size", "def find_numerical_contours(counts):\n\tone_sigma_boundary = sigma_boundary(counts, 68)\n\tone_sigma = counts > one_sigma_boundary\n\ttwo_sigma_boundary = sigma_boundary(counts, 95)\n\ttwo_sigma = (counts > two_sigma_boundary) & (counts < one_sigma_boundary)\n\tthree_sigma_boundary = sigma_boundary(counts, 99)\n\tthree_sigma = (counts > three_sigma_boundary) & (counts < two_sigma_boundary)\n\n\t# Check method: Output actual percentages in each region\n\tprint('total no. samples:')\n\tprint(np.sum(counts))\n\tprint('included in 1st sigma region:')\n\tprint(np.sum(one_sigma * counts) / np.sum(counts))\n\tprint('included in 2 sigma region:')\n\tprint((np.sum(one_sigma * counts) + np.sum(two_sigma * counts)) / np.sum(counts))\n\tprint('included in 3 sigma region:')\n\tprint((np.sum(one_sigma * counts) + np.sum(two_sigma * counts) + np.sum(three_sigma * counts)) / np.sum(counts))\n\n\tfilled_numerical_contours = one_sigma * 1 + two_sigma * 2 + three_sigma * 3\n\n\treturn filled_numerical_contours", "def estimate_chi_from_dataset(dataset: Dataset) -> Tuple[torch.Tensor,\n torch.Tensor]:\n\n # Ensure that an estimate of the log count crossover point between cells\n # and empty droplets has already been calculated.\n try:\n log_crossover = dataset.priors['log_counts_crossover']\n except KeyError:\n raise AssertionError(\"Could not find dataset parameter \"\n \"log_counts_crossover.\")\n\n ep = np.finfo(np.float32).eps.item() # Small value\n\n # Trimmed and appropriately transformed count matrix.\n count_matrix = dataset.get_count_matrix()\n\n # Empty droplets have log counts < log_crossover.\n empty_barcodes = (np.log(np.array(count_matrix.sum(axis=1)).squeeze())\n < log_crossover)\n\n # Sum gene expression for the empty droplets.\n gene_expression = np.array(count_matrix[empty_barcodes, :].sum(axis=0)).squeeze()\n\n # As a vector on a simplex.\n gene_expression = gene_expression + ep\n chi_ambient_init = \\\n torch.Tensor(gene_expression / np.sum(gene_expression))\n\n # Full count matrix, appropriately transformed.\n full_count_matrix = dataset.get_count_matrix_all_barcodes()\n\n # Sum all gene expression.\n gene_expression_total = np.array(full_count_matrix.sum(axis=0)).squeeze()\n\n # As a vector on a simplex.\n gene_expression_total = gene_expression_total + ep\n chi_bar = \\\n torch.Tensor(gene_expression_total / np.sum(gene_expression_total))\n\n return chi_ambient_init, chi_bar", "def calc_priors(categories, data):\n counts = np.zeros(categories)\n for val in range(categories):\n counts[val] = np.count_nonzero(data.labels == val)\n return counts / len(data.labels)", "def calc_gini_impurity(self, data):\n impurity = 1\n label_counts = get_value_counts(data)\n for label in label_counts.keys():\n impurity -= (label_counts[label]/len(data))**2 # nrows\n return impurity", "def quantile(self, hypercube):\n raise NotImplementedError()", "def get_nb_vals(i, pnts, dem, top_left_cor, cellsize, rows, cols):\n nb_x = np.zeros((5,5)) # this 5 by 5 max would contain the x coordinate of 16 neighbor pixels of a sample point\n nb_y = np.zeros((5,5)) # this 5 by 5 matrix would contain the y coordinate of 16 neighbor pixels of a sample point\n nb_z = np.zeros((5,5))\n # get index and value of cell in DEM containing current point\n (cell_X, cell_Y, cell_Z) = misc.getCellValue(pnts[i], \n dem, \n top_left_cor, \n cellsize)\n #Deal with sample points near boundary of the DEM\n point_within_dem = (cell_X-2) >=0 and (cell_Y-2>=0) and (cell_X+3)<=cols and (cell_Y+3)<=rows\n if point_within_dem:\n nb_z[0:5,0:5] = misc.RasterSubset(dem,(cell_Y-2),(cell_Y+3),(cell_X-2),(cell_X+3))\n else:\n #Get the part of moving window within the DEM domain\n in_data= misc.RasterSubset(dem,max((cell_Y-2),0),min((cell_Y+3),rows),max((cell_X-2),0),min((cell_X+3),cols))\n #in_data=dem[\"array\"][max((cell_Y-2),0):min((cell_Y+3),rows),max((cell_X-2),0):min((cell_X+3),cols)]\n nb_z[max((2-cell_Y),0):min((5-(cell_Y+3-rows)),5),max((2-cell_X),0):min((5-(cell_X+3-cols)),5)]=in_data[0:in_data.shape[0],0:in_data.shape[1]]\n in_data_avg=np.mean(in_data[in_data>-3.4e+10])\n nb_z[nb_z==0]=in_data_avg\n nb_z[nb_z<-3.4e+10]=in_data_avg\n\n\n \n # If there is missing data in the neighborhood of the sample point \n # use neighborhood average to replace the missing value \n has_missing_data = (nb_z>8848).sum()>0 or (nb_z<-413).sum()>0\n if has_missing_data:\n avgValue=np.mean(nb_z[np.where(np.logical_and(nb_z<8848, nb_z>-413))])\n nb_z[nb_z>8848]=avgValue\n nb_z[nb_z<-413]=avgValue\n \n # Obtain the coordinate of cell centroid of a 5*5 neighborhood around the sample point\n for ii in [0,1,2,3,4]:\n cor_y=ii-2\n dy = (cell_Y+cor_y+0.5) * cellsize[1]\n nb_y[ii,:] = top_left_cor[1] + dy\n for jj in [0,1,2,3,4]:\n cor_x=jj-2\n dx = (cell_X+cor_x+0.5) * cellsize[0]\n nb_x [:,jj] = top_left_cor[0] + dx\n return nb_x, nb_y, nb_z", "def num_cells_for_rows(self, rows):\r\n return (rows * rows + rows) // 2", "def get_gini(rows):\n count_zero = 0 # number of rows labelled healthy\n count_one = 0 # number of rows labelled depressed\n\n for row in rows:\n if row[len(row) - 1] == 0:\n count_zero = count_zero + 1\n else:\n count_one = count_one + 1\n return 1 - (count_zero / float(len(rows))) ** 2 - (count_one / float(len(rows))) ** 2", "def num_bands(composition: Composition, potcar: Potcar) -> int:\n results = 0\n for element, potcar_single in zip(composition, potcar):\n num_atoms_per_element = composition[element]\n occupied_bands = potcar_single.nelectrons / 2\n num_bands_per_atom = occupied_bands + unoccupied_bands[str(element)]\n results += num_atoms_per_element * num_bands_per_atom\n\n return ceil(results)", "def countOccupied(data):\n\tcounter = 0\n\n\t# loop through rows and columns and\n\t# count the number of '#'s\n\tfor r in range(len(data)):\n\t\tfor c in range(len(data[r])):\n\t\t\tif data[r][c] == '#':\n\t\t\t\tcounter += 1\n\n\treturn counter", "def count_lorentz(fit_range, lorentz_array_2d):\n counter = 0\n for i in range(0, lorentz_array_2d.shape[0]):\n f0 = lorentz_array_2d[i][1]\n if f0 > fit_range[1] and f0 < fit_range[2]:\n counter += 1\n return counter", "def evaluate(self, dataset):\n success = 0\n for sample, labelVector, label in dataset.tests:\n if self.guessLabel(sample) == label:\n success += 1\n return success / len(dataset.tests)", "def getNbins(self,axis,includeTotalBin = True):\n\n\t\tif axis == \"f\":\n\t\t\tnCells = 1 if self.nCells == 0 else self.nCells\n\t\t\treturn nCells\n\n\t\tif axis == \"i\":\n\t\t\treturn self.meshInfo[1]\n\n\t\tif axis == \"j\":\n\t\t\treturn self.meshInfo[2]\n\n\t\tif axis == \"k\":\n\t\t\treturn self.meshInfo[3]\n\n\t\tif axis == \"d\":\n\t\t\tnDir = 1 if self.nDir == 0 else self.nDir\n\t\t\treturn nDir\n\n\t\tif axis == \"u\":\n\t\t\tnUsr = 1 if self.nUsr == 0 else self.nUsr\n\t\t\tnUsr = nUsr - 1 if self.usrTC == \"t\" and not includeTotalBin else nUsr\n\t\t\treturn nUsr\n\n\t\tif axis == \"s\":\n\t\t\tnSeg = 1 if self.nSeg == 0 else self.nSeg\n\t\t\tnSeg = nSeg - 1 if self.segTC == \"t\" and not includeTotalBin else nSeg\n\t\t\treturn nSeg\n\n\t\tif axis == \"m\":\n\t\t\tnMul = 1 if self.nMul == 0 else self.nMul\n\t\t\tnMul = nMul - 1 if self.mulTC == \"t\" and not includeTotalBin else nMul\n\t\t\treturn nMul\n\n\t\tif axis == \"c\":\n\t\t\tnCos = 1 if self.nCos == 0 else self.nCos\n\t\t\tnCos = nCos - 1 if self.cosTC == \"t\" and not includeTotalBin else nCos\n\t\t\treturn nCos\n\n\t\tif axis == \"e\":\n\t\t\tnErg = 1 if self.nErg == 0 else self.nErg\n\t\t\tnErg = nErg - 1 if self.ergTC == \"t\" and not includeTotalBin else nErg\n\t\t\treturn nErg\n\n\t\tif axis == \"t\":\n\t\t\tnTim = 1 if self.nTim == 0 else self.nTim\n\t\t\tnTim = nTim - 1 if self.timTC == \"t\" and not includeTotalBin else nTim\n\t\t\treturn nTim", "def number_of_outliers(sentiment, lower, upper):\r\n upper_quartile = np.percentile(sentiment, upper)\r\n lower_quartile = np.percentile(sentiment, lower)\r\n lower_outlier = np.count_nonzero(sentiment <= lower_quartile)\r\n higher_outlier = np.count_nonzero(sentiment >= upper_quartile)\r\n total_outlier = lower_outlier + higher_outlier\r\n return total_outlier", "def get_number_of_data_points(self):\n\n log.warning(\n \"get_number_of_data_points not implemented, values for statistical measurements such as AIC or BIC are \"\n \"unreliable\",\n )\n\n return 1.0", "def Ncells(self):\n return len(self.cells)", "def num_cells(self):\n cbi = self.cbi\n if cbi is None:\n return None\n return cbi[-1] # pylint: disable=E1136", "def num_cells(self):\n cbi = self.cbi\n if cbi is None:\n return None\n return cbi[-1] # pylint: disable=E1136" ]
[ "0.665363", "0.6281301", "0.59919727", "0.59555984", "0.5892062", "0.5744019", "0.5660073", "0.56421137", "0.5569726", "0.5563326", "0.54325527", "0.5362565", "0.5356441", "0.53364575", "0.53349966", "0.5327423", "0.5314066", "0.5296762", "0.5288975", "0.5220886", "0.519564", "0.5185774", "0.5177138", "0.51663524", "0.5156343", "0.51548105", "0.51527935", "0.51521724", "0.51513237", "0.51513237" ]
0.83259344
0
Return this user's nickname. The nickname will be a unique, human readable identifier for this user with respect to this application. It will be an email address for some users, but not all.
def nickname(self): if (self.__email and self.__auth_domain and self.__email.endswith('@' + self.__auth_domain)): suffix_len = len(self.__auth_domain) + 1 return self.__email[:-suffix_len] else: return self.__email
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_nickname(self):\n return self._nick", "def nickname(self):\r\n if \"nickname\" in self.data:\r\n return self.data[\"nickname\"]\r\n return None", "def get_nickname_for_user(cls, user):\n return cls.get_account_for_user(user).nickname", "def mail_nickname(self):\n if \"mailNickname\" in self._prop_dict:\n return self._prop_dict[\"mailNickname\"]\n else:\n return None", "def mail_nickname(self):\n if \"mailNickname\" in self._prop_dict:\n return self._prop_dict[\"mailNickname\"]\n else:\n return None", "def create_nickname_for_user(cls, user):\n name = nickname = user.email().split('@', 1)[0]\n next_char = chr(ord(nickname[0].lower())+1)\n existing_nicks = [account.lower_nickname\n for account in cls.gql(('WHERE lower_nickname >= :1 AND '\n 'lower_nickname < :2'),\n nickname.lower(), next_char)]\n suffix = 0\n while nickname.lower() in existing_nicks:\n suffix += 1\n nickname = '%s%d' % (name, suffix)\n return nickname", "def get_nickname_for_email(cls, email, default=None):\n account = cls.get_account_for_email(email)\n if account is not None and account.nickname:\n return account.nickname\n if default is not None:\n return default\n return email.replace('@', '_')", "def get_email_for_nickname(cls, nickname):\n account = cls.get_account_for_nickname(nickname)\n if account is None:\n return None\n return account.email", "def get_name(self):\n return self.user.username if self.user.username else self.user.email", "def user_name(self) -> str:\n return pulumi.get(self, \"user_name\")", "def getUserName(self):\n user = User.by_id(self.user_id)\n return user.name", "def get_name(self):\n user = self.user\n name = \"%s %s\" % (user.first_name, user.last_name)\n name = name.strip()\n\n return self.display_name or name or user.email or user.username", "def get_username(self):\n full_name = '%s %s' % (self.user.first_name.strip(), self.user.last_name.strip()[0:1])\n if len(full_name.strip()) == 0:\n full_name = self.user.username\n return full_name.strip()", "def get_black_player_nickname(self, obj):\n return obj.black_player.nickname", "def group_nickname(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"group_nickname\")", "def get_short_name(self):\n # The user is identified by the email address\n return self.email", "def get_short_name(self):\n # The user is identified by their email address\n return self.first_name", "def user_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"user_name\")", "def get_name(self) :\n\n return self.factory.to_user_name(self.name)", "def get_user_display_name(self):\n return self.user.get_full_name() or self.user.get_username()", "def __str__(self):\n return self.nickname", "def get_full_name(self):\n # The user is identified by their email address\n return self.first_name+' '+self.last_name", "def user_name(self):\n return self._stub.List(self._message).user_name", "def get_user_name(self):\n full_name = f'{self.f_name} {self.l_name}'\n return full_name", "def user_name(self):\n\n return self._user_name", "def getUserName(self):\n userType = self.env['res.users']\n \n uiUser = userType.browse(self._uid)\n return uiUser.name", "def username(self) -> str:\n return pulumi.get(self, \"username\")", "def username(self) -> str:\n return pulumi.get(self, \"username\")", "def username(self) -> str:\n return pulumi.get(self, \"username\")", "def user_name(self):\n return self._user_name" ]
[ "0.8245342", "0.82021475", "0.80784094", "0.7570145", "0.7570145", "0.7427743", "0.7356333", "0.7088285", "0.70873076", "0.6995919", "0.6968879", "0.6951663", "0.6922138", "0.6818108", "0.6752576", "0.6742666", "0.6720184", "0.66955596", "0.6691967", "0.6687338", "0.6678083", "0.66628015", "0.6661956", "0.6620308", "0.6610519", "0.66046697", "0.65985256", "0.65985256", "0.65985256", "0.6585817" ]
0.8227201
1
Return this user's auth domain.
def auth_domain(self): return self.__auth_domain
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def domain(self):\n # type: (...) -> AuthDomain\n return self._domain", "def auth_domain(request):\n return request.registry.settings.get('h.auth_domain', request.domain)", "def get_domain(self):\n return self.domain", "def get_domain(self):\n return self._domain", "def get_domain_user(self):\n return self.domain_user.get_text()", "def getDomain(self):\n return self.domain", "def domain(self) -> str:\n return self._domain", "def domain(self) -> str:\n return pulumi.get(self, \"domain\")", "def domain(self) -> str:\n return pulumi.get(self, \"domain\")", "def domain(self):\n return self['domain']", "def domain(self):\n return self._domain", "def domain(self):\n return self._domain", "def domain(self):\n return self._domain", "def domain(self):\n\n return self._domain", "def kerberos_domain(self):\n return hookenv.config('kerberos-domain')", "def domain_name(self) -> str:\n return pulumi.get(self, \"domain_name\")", "def domain_name(self) -> str:\n return pulumi.get(self, \"domain_name\")", "def domain(self):\n # type: () -> string_types\n return self._domain", "def domain(self):\n return self._get('domain', '/domain/', self.DOMAIN_DATA)", "def get_domain_name(self):\n return self.domain_name.get_text()", "def custom_domain(self):\n return self._custom_domain", "def domain(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"domain\")", "def domain(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"domain\")", "def domain(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"domain\")", "def domain(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"domain\")", "def get_domain_passwd(self):\n return self.domain_passwd.get_text()", "def domain_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"domain_name\")", "def domain_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"domain_name\")", "def domain_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"domain_name\")", "def domain(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"domain\")" ]
[ "0.8473398", "0.7908221", "0.7783108", "0.7752245", "0.77312803", "0.7607269", "0.7606187", "0.7574134", "0.7574134", "0.75533456", "0.7479255", "0.7479255", "0.7479255", "0.74182606", "0.7284481", "0.7259606", "0.7259606", "0.7225471", "0.7218564", "0.7094098", "0.7087787", "0.7074804", "0.69584227", "0.69584227", "0.69584227", "0.69000787", "0.68951434", "0.68951434", "0.68951434", "0.6872613" ]
0.8618813
0
Computes the logout URL for this request and specified destination URL.
def create_logout_url(dest_url): req = user_service_pb.StringProto() resp = user_service_pb.StringProto() req.set_value(dest_url) try: apiproxy_stub_map.MakeSyncCall('user', 'CreateLogoutURL', req, resp) except apiproxy_errors.ApplicationError, e: if (e.application_error == user_service_pb.UserServiceError.REDIRECT_URL_TOO_LONG): raise RedirectTooLongError else: raise e return resp.value()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_logout_url(self, redirect_url=None):\n url = urllib_parse.urljoin(self.server_url, 'logout')\n if redirect_url:\n params = {self.logout_redirect_param_name: redirect_url}\n query = urllib_parse.urlencode(params)\n return ''.join([url, '?', query])\n return url", "def logouturl(request, response):\n from google.appengine.api import users as gusers\n return gusers.create_logout_url(request.uri)", "def logout_view():\n return url(r'^logout/$', logout, {'template_name': 'miniuser/logout.html'}, name='logout')", "def goto_url(self):\n msg = self._cw._('you have been logged out')\n return self._cw.base_url()", "def logout_view():\n return url(r'^logout/$', LogoutView.as_view(template_name='miniuser/logout.html'), name='logout')", "def logout_shim(request, **kwargs):\n auth_logout(request)\n redirect_to = request.REQUEST.get('next', '')\n if not redirect_to:\n if 'HTTP_REFERER' in request.META:\n pieces = urlparse.urlsplit(request.META['HTTP_REFERER'])\n redirect_to = '%s://%s' % (pieces.scheme, pieces.netloc)\n else:\n redirect_to = '/'\n return redirect(redirect_to)", "def get_expected_logout_url(self, slug):\n return current_user_services.create_logout_url(slug)", "def create_logout_url(parser, token):\n try:\n tag_name, dest_url = token.split_contents()\n except ValueError:\n raise TemplateSyntaxError(\"%r tag requires a single argument\" % token.contents.split()[0])\n return LogoutURLNode(dest_url)", "def _SafeCreateLogoutURL(mr):\n try:\n return users.create_logout_url(mr.current_page_url)\n except users.RedirectTooLongError:\n if mr.project_name:\n return users.create_logout_url('/p/%s' % mr.project_name)\n else:\n return users.create_logout_url('/')", "def logout(self, request):\n pass", "def logout(request):\n return internal_logout(request, next_page = reverse('wainz.views.composite'), redirect_field_name = 'next')", "def _redirect(self):\n \n # Redirect URL is held in 'r' URL arg of this request\n b64encReturnTo = str(request.params.get('r', ''))\n\n if b64encReturnTo:\n # Decode the return to address\n try:\n b64decReturnTo = base64.urlsafe_b64decode(b64encReturnTo)\n except Exception, e:\n log.error(\"logout - decoding return URL: %s\" % e) \n c.xml = \"Error carrying out browser redirect following logout\"\n response.status_code = 400\n return render('ndg.security.kid', 'ndg.security.error')\n \n # Check for 'getCredentials' - avoid in case username/password\n # contained in the URL!\n getCredentialsIdx = b64decReturnTo.rfind('/getCredentials')\n if getCredentialsIdx != -1:\n log.debug(\"Reverting request URL from getCredentials to \"\n \"login...\")\n b64decReturnTo = b64decReturnTo[:getCredentialsIdx] + '/login'\n \n # Add flag indicating to caller that logout succeeded. The caller\n # can use this to remove any security cookie present in their\n # domain - See:\n # ndg.security.client.ssoclient.ssoclient.lib.base.BaseController\n if '?' in b64decReturnTo:\n b64decReturnTo += '&logout=1'\n else:\n b64decReturnTo += '?logout=1'\n\n # and now go back to whence we had come\n log.debug(\"LogoutController._redirect: redirect to %s\" %\n b64decReturnTo)\n h.redirect_to(b64decReturnTo)\n else:\n log.debug(\"LogoutController._redirect: no redirect URL set.\")\n response.status_code = 400\n c.errorPageHeading = \"Log out\"\n if getattr(c, \"loggedIn\", False):\n c.xml = \"Logged out\"\n else:\n c.xml = (\"An error occurred logging out. Please report the \"\n \"problem to your site administrator\") \n \n return render('ndg.security.kid', 'ndg.security.error')", "def _CAS_logout(self):\n import urllib\n redirect(\"%s?service=%s\" % (self.cas_logout_url, self.cas_my_url))", "def get_redirect_url(self, *args, **kwargs):\n redirect = kwargs['route']\n self.permanent = redirect.permanent\n return redirect.target.url", "def delete_redirect_url(self):\n return url_for(self.delete_redirect_to_view)", "def logout(environ, start_response):\n uri = environ.get('HTTP_REFERER', '/')\n cookie = Cookie.SimpleCookie()\n cookie['tiddlyweb_user'] = ''\n cookie['tiddlyweb_user']['path'] = '/'\n cookie['tiddlyweb_user']['expires'] = '%s' % (time.ctime(time.time()-6000))\n start_response('303 See Other', [\n ('Set-Cookie', cookie.output(header='')),\n ('Location', uri)\n ])\n return [uri]", "def logout():\n session.pop('userinfo', None)\n # no more steps necessary, because we don't keep the token around\n if 'target' not in session.keys():\n return redirect(\"/\")\n return redirect(session['target'])", "def logout_redirect(request):\n logout(request)\n\n # Build the URL\n login_url = furl(login_redirect_url(request, next_url=request.build_absolute_uri()))\n\n # Check for branding\n if hasattr(settings, 'SCIAUTH_BRANDING'):\n logger.debug('SciAuth branding passed')\n\n # Encode it and pass it\n branding = base64.urlsafe_b64encode(json.dumps(settings.SCIAUTH_BRANDING).encode('utf-8')).decode('utf-8')\n login_url.query.params.add('branding', branding)\n\n # Set the URL and purge cookies\n response = redirect(login_url.url)\n response.delete_cookie('DBMI_JWT', domain=dbmi_settings.JWT_COOKIE_DOMAIN)\n logger.debug('Redirecting to: {}'.format(login_url.url))\n\n return response", "def logout(request):\n\n headers = forget(request)\n url = request.route_url('auth_logout', _app_url=get_app_url(request))\n return HTTPFound(location=url, headers=headers)", "def logout(self):\r\n # should redirect\r\n check_for_get_code(self, 302, reverse('logout'))", "def logout(request, redirect_url=None):\n auth_logout(request)\n\n url = redirect_url or getattr(settings, 'LOGOUT_REDIRECT_URL', '/')\n\n return HttpResponseRedirect(url)", "def logout(self):\n kwargs = {}\n r = self._token_id_request(urljoin(self._url, Client._logout_resource), **kwargs)", "def post_logout_redirect_uris(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"post_logout_redirect_uris\")", "def post_logout_redirect_uris(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"post_logout_redirect_uris\")", "def logout(self):\n return self.get('/logout', follow_redirects=True)", "def make_url(realm_url, endpoint):\n return \"{}/protocol/openid-connect/{}\".format(realm_url, endpoint)", "def logout_view(request):\n if request.user.is_authenticated:\n logout(request)\n callback_url = \"https://login.cern.ch/adfs/ls/?wa=wsignout1.0&ReturnUrl=\"\n callback_url += \"http%3A//\"\n callback_url += request.META[\"HTTP_HOST\"]\n callback_url += reverse(\"certhelper:logout_status\")\n return HttpResponseRedirect(callback_url)\n return HttpResponseRedirect(\"/\")", "def logout(self):\n url = self.base_url + \"/account/account/logout.html\"\n params = {\"dojo.preventCache\": str(int(time.time()))}\n url += \"?\" + urllib.parse.urlencode(params)\n self.fetch(url)", "def logout(self):\n\n return self.app.get(\"/logout/\", follow_redirects=True)", "def logout(self):" ]
[ "0.73377967", "0.6966632", "0.62754434", "0.62183374", "0.6205199", "0.61692834", "0.608848", "0.5966493", "0.5965179", "0.591183", "0.58448863", "0.58086485", "0.5677516", "0.56206053", "0.5591618", "0.55612814", "0.5513131", "0.5512236", "0.5501371", "0.54162616", "0.53906506", "0.5338931", "0.53339976", "0.53339976", "0.5333755", "0.5325946", "0.5304076", "0.52725756", "0.5246947", "0.5242765" ]
0.69746566
1
Get an instance to the sql connection object.
def get_sql_connection(self): return self.sql
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_conn(self):\n return self.get_connection(self.mssql_conn_id)", "def get_connection(cls):\n return cls.database.connection", "def _get_connection(self) -> Connection:\n # TODO(101) is there a problem with having just one db connection?\n # Will this cause bugs with failed commits?\n curr_thread = threading.get_ident()\n if curr_thread not in self.conn or self.conn[curr_thread] is None:\n try:\n conn = sqlite3.connect(self.db_path)\n conn.row_factory = StringIDRow\n self.conn[curr_thread] = conn\n except sqlite3.Error as e:\n raise MephistoDBException(e)\n return self.conn[curr_thread]", "def getDbConnection(self, **kwargs):\r\n \r\n con = sql.connect(self._filename, **kwargs)\r\n con.row_factory = sql.Row\r\n return con", "def get_connection(self, session_cls=None):\n # If this connection has to be created within an existing session,\n # ``session_cls`` will be provided as an argument.\n # Otherwise, fetch a new ``session_cls`` from ``get_session()``\n if session_cls is None:\n session_cls = self.get_session()\n\n conn = session_cls()\n conn = self._execute_database_specific_connection_statements(conn)\n\n return conn", "def get_conn(self):\n conn = sqlite3.connect(self.uri)\n conn.row_factory = sqlite3.Row\n return conn", "def connection(self, name=None):\n if not name:\n name = threading.currentThread().getName()\n if name in self:\n return self[name]\n self[name] = self.database.connection()\n return self[name]", "def get_conn(cls):\n\n if not cls.conn or not cls.conn.open:\n cls.connect()\n\n try:\n cls.conn.ping() # ping to test if the current conn is working\n except MySQLdb.OperationalError:\n cls.connect()\n\n return cls.conn", "def __get_connection():\n # 根据配置文件创建连接池\n if not Mysql.__mysql_pool:\n Mysql.__mysql_pool = PooledDB(\n creator=MySQLdb,\n use_unicode=False,\n cursorclass=DictCursor,\n db=sqlconf.MysqlConfig['db'],\n host=sqlconf.MysqlConfig['host'],\n port=sqlconf.MysqlConfig['port'],\n user=sqlconf.MysqlConfig['user'],\n passwd=sqlconf.MysqlConfig['passwd'],\n charset=sqlconf.MysqlConfig['charset'],\n mincached=sqlconf.MysqlConfig['mincached'],\n maxcached=sqlconf.MysqlConfig['maxcached'],\n maxconnections=sqlconf.MysqlConfig['maxconnections'])\n # 返回连接池中连接对象\n return Mysql.__mysql_pool.connection()", "def get_connection(self):\n return self.connection", "def get_connection(self):\n return self.connection", "def __new__(cls, connection):\n return object.__new__(cls)", "def get_connection(self, simple_rows=False):\n return self.open(simple_rows)", "def _unthreadsafe_get_connection(self):\n return PooledDBConnection(self, self._queue.get())", "def connect(self):\n\n self.logger.debug(\"creating DB connection\")\n conn = sql.connect(**self.connection_arguments)\n self.logger.debug(\"DB connection ready: %r\", conn.get_host_info())\n return conn", "def _get_connection(self) -> sqlite3.Connection:\n curr_thread = threading.get_ident()\n if curr_thread not in self.conn or self.conn[curr_thread] is None:\n conn = sqlite3.connect(self.db_path, check_same_thread=False)\n conn.row_factory = sqlite3.Row\n self.conn[curr_thread] = conn\n return self.conn[curr_thread]", "def get_connection(db_url=None):\n return engine(db_url).connect()", "def create_connection():\r\n try:\r\n conn = sq.connect(DBClass.db_name)\r\n except sq.Error as e:\r\n raise e\r\n \r\n return conn", "def get_new_connection(self, conn_params):\r\n self.__connection_string = conn_params.get('connection_string', '')\r\n conn = Database.connect(**conn_params)\r\n return conn", "def get_connection():\n\t# flask.g documentation: http://flask.pocoo.org/docs/0.12/api/#flask.g\n\ttry:\n\t\tconn = flask.g._database_connection\n\texcept AttributeError:\n\t\tconn = flask.g._database_connection = sqlite3.connect(config.PATH_DATABASE,\n\t\t\t\tdetect_types=sqlite3.PARSE_DECLTYPES) # allows storing datetime, etc.\n\t\tconn.row_factory = sqlite3.Row\n\treturn conn", "def connection(self) -> Connection:\n if not self._connection:\n self._connection = self.engine.connect()\n\n return self._connection", "def get_connection():\n con = psycopg2.connect(**DB_CONFIG)\n return con", "def _get_connection(reconnect=False):\n global _connection\n identity = get_identity()\n # Connect to the database if not already connected\n if _connection.get(identity) is None or reconnect:\n try:\n _connection[identity] = Connection(**_connection_settings)\n except Exception, e:\n raise ConnectionError(\"Cannot connect to the database:\\n%s\" % e)\n return _connection[identity]", "def get_connection(self):\n return self._connection", "def get_connection(self):\n return self._connection", "def get_connection(self):\n import psycopg2 as dbapi\n self.get_input()\n conn = dbapi.connect(host=self.opts[\"host\"],\n port=int(self.opts[\"port\"]),\n user=self.opts[\"user\"],\n password=self.opts[\"password\"],\n database=self.opts[\"database\"])\n encoding = ENCODING.lower()\n if self.script.encoding:\n encoding = self.script.encoding.lower()\n encoding_lookup = {'iso-8859-1': 'Latin1', 'latin-1': 'Latin1', 'utf-8': 'UTF8'}\n db_encoding = encoding_lookup.get(encoding)\n conn.set_client_encoding(db_encoding)\n return conn", "def real_conn(self) -> Connection:\n return self._real_conn", "def connection(self):\n return self.get_connection()", "def connection(self) -> \"Connection[Any]\":\n return self._conn", "def get(self, conn_id: str) -> Connection:\n return Connection.from_dict(self.query(f'{CONNECTION_URL}/{conn_id}'))" ]
[ "0.73680174", "0.7256587", "0.69811875", "0.69683695", "0.69191766", "0.6882339", "0.6825282", "0.6809682", "0.6749888", "0.669564", "0.669564", "0.66404516", "0.66221774", "0.66088647", "0.660717", "0.65949684", "0.6583066", "0.6569411", "0.65676725", "0.656309", "0.65545565", "0.6542207", "0.65178335", "0.65041935", "0.65041935", "0.6494186", "0.6472929", "0.6472291", "0.64715344", "0.6454706" ]
0.73205405
1
Initialize rpkirtr database tables. Three tables are created one that keeps track of the rpkirtr session, one that keeps track of the prefixes associated with each active rpkirtr session, and finally one for storing router key information.
def init_rpki_rtr_tables(self): cur = self.sql.cursor() cur.execute("PRAGMA foreign_keys = on") cur.execute(''' CREATE TABLE cache ( cache_id INTEGER PRIMARY KEY NOT NULL, host TEXT NOT NULL, port TEXT NOT NULL, version INTEGER, nonce INTEGER, serial INTEGER, updated INTEGER, refresh INTEGER, retry INTEGER, expire INTEGER, UNIQUE (host, port))''') cur.execute(''' CREATE TABLE prefix ( prefix_id INTEGER PRIMARY KEY AUTOINCREMENT, cache_id INTEGER NOT NULL REFERENCES cache(cache_id) ON DELETE CASCADE ON UPDATE CASCADE, asn INTEGER NOT NULL, prefix TEXT NOT NULL, prefixlen INTEGER NOT NULL, max_prefixlen INTEGER NOT NULL, prefix_min TEXT, prefix_max TEXT, UNIQUE (cache_id, asn, prefix, prefixlen, max_prefixlen))''') cur.execute(''' CREATE TABLE routerkey ( cache_id INTEGER NOT NULL REFERENCES cache(cache_id) ON DELETE CASCADE ON UPDATE CASCADE, asn INTEGER NOT NULL, ski TEXT NOT NULL, key TEXT NOT NULL, UNIQUE (cache_id, asn, ski), UNIQUE (cache_id, asn, key))''') self.sql.commit()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_rib_tables(self):\n cur = self.sql.cursor()\n cur.execute(\"PRAGMA foreign_keys = on\")\n cur.execute('''\n CREATE TABLE rtr_cache (\n rtr_id INTEGER PRIMARY KEY NOT NULL,\n device TEXT NOT NULL,\n rtrupdt INTEGER,\n UNIQUE (device))''')\n cur.execute('''\n CREATE TABLE rtr_rib (\n rtr_id INTEGER NOT NULL\n REFERENCES rtr_cache(rtr_id)\n ON DELETE CASCADE\n ON UPDATE CASCADE,\n idx INTEGER NOT NULL,\n status TEXT,\n pfx TEXT NOT NULL,\n pfxlen INTEGER NOT NULL,\n pfxstr_min TEXT NOT NULL,\n pfxstr_max TEXT NOT NULL,\n nexthop TEXT NOT NULL,\n metric INTEGER,\n locpref INTEGER,\n weight INTEGER,\n pathbutone TEXT,\n orig_asn INTEGER NOT NULL,\n route_orig TEXT)''')\n self.sql.commit()", "def init_tables(self):\n\n settings.Base.metadata.tables[\n 'session_master'].drop(bind=settings.engine)\n settings.Base.metadata.tables['uurl'].drop(bind=settings.engine)\n\n settings.Base.metadata.tables[\n 'session_master'].create(bind=settings.engine)\n settings.Base.metadata.tables['uurl'].create(bind=settings.engine)\n\n logging.info(\"Sessionization Tables created\")", "def prep(self):\n sq1 = 'create table TCVR ( ID, T, C, V, R , primary key ( ID ) ) ;'\n sq2 = 'create table IDX ( ID , A , primary key(A) ) ; '\n self.sq.SQX(sq1)\n self.sq.SQX(sq2)\n sq3 = \"insert into IDX VALUES ( 1 , 'A' ) ; \"\n self.sq.SQX(sq3)", "def initialize():\n DATABASE.connect()\n DATABASE.create_tables([User, Entry], safe=True)\n DATABASE.close()", "def initialize():\n\n db.connect() # Se conecta\n db.create_tables([Entry], safe=True) # Crea las tablas\n # safe=true evita crear modelos ya creados", "def initialize():\n db.connect()\n db.create_tables([Entry], safe=True)", "def initialize():\n db.connect()\n db.create_tables([Entry], safe=True)", "def __init__(self):\n self.__db = sqlite3.connect(DB_PATH)\n self.__cur = self.__db.cursor()\n self.__create_tables()", "def initialize():\n \n db.connect()\n db.create_tables([Product], safe=True)", "def init_db():\n # We are setting the module variables here for the first time, so disable the warning\n global DB_USER_TABLE # pylint: disable=global-variable-undefined\n global DB_CUSTOMER_TABLE # pylint: disable=global-variable-undefined\n global DB_USER_CUSTOMER_RELS_TABLE # pylint: disable=global-variable-undefined\n global DB_TICKET_TABLE # pylint: disable=global-variable-undefined\n global DB_COMMENT_TABLE # pylint: disable=global-variable-undefined\n\n db = TinyDB(app.config['DB_NAME'])\n\n DB_USER_TABLE = db.table('users')\n DB_CUSTOMER_TABLE = db.table('customers')\n DB_USER_CUSTOMER_RELS_TABLE = db.table('user_customer_rels')\n DB_TICKET_TABLE = db.table('tickets')\n DB_COMMENT_TABLE = db.table('comments')", "def __init__(self, datastore_root: str):\n self.session_storage: Dict[str, ProlificClient] = {}\n self.agent_data: Dict[str, Dict[str, Any]] = {}\n self.table_access_condition = threading.Condition()\n self.conn: Dict[int, sqlite3.Connection] = {}\n self.db_path = os.path.join(datastore_root, f\"{PROVIDER_TYPE}.db\")\n self.init_tables()\n self.datastore_root = datastore_root\n self._last_study_mapping_update_times: Dict[str, float] = defaultdict(\n lambda: time.monotonic()\n )", "def initialize(self):\n\n db = dict()\n\n db['meta'] = Meta(None)\n db['race'] = Race(None, None, None, None, None)\n db['track'] = Track(None, None)\n db['classes'] = set([])\n db['teams'] = set([])\n db['drivers'] = set([])\n\n self.db = db", "def __init__(self):\n engine = db_connect()\n create_tables(engine)\n self.Session = sessionmaker(bind=engine)", "def init_db(self):\n print(\"Initializing database...\", end='')\n self.cursor.execute(\"DROP DATABASE %s\" % self.db.database)\n self.__init__(self.db_name)\n self.cursor.execute(\"USE %s\" % self.db.database)\n\n # Book\n self.cursor.execute(\n \"\"\"CREATE TABLE Book (\n ISBN VARCHAR(13),\n title VARCHAR(300) COLLATE utf8_general_ci,\n publisher VARCHAR(100) COLLATE utf8_general_ci,\n lang VARCHAR(40),\n publicationDate DATE,\n pageCount SMALLINT CHECK(pageCount >= 0),\n stock SMALLINT CHECK(stock >= 0),\n price DECIMAL(5,2),\n subject VARCHAR(100),\n avg_rating DECIMAL(4,2) CHECK(avg_rating <= 10.00),\n total_rating_score INT DEFAULT 0,\n num_ratings INT DEFAULT 0,\n PRIMARY KEY (ISBN))\"\"\")\n\n # Author\n self.cursor.execute(\n \"\"\"CREATE TABLE Author (\n ID INT AUTO_INCREMENT,\n name VARCHAR(200) COLLATE utf8_general_ci,\n lang VARCHAR(40),\n PRIMARY KEY (ID))\"\"\")\n\n # CustomerPersonal\n self.cursor.execute(\n \"\"\"CREATE TABLE CustomerPersonal (\n phone CHAR(10),\n address VARCHAR(300) NOT NULL,\n PRIMARY KEY (phone))\"\"\")\n\n # CustomerCredentials\n self.cursor.execute(\n \"\"\"CREATE TABLE CustomerCredentials (\n loginID VARCHAR(30),\n firstName VARCHAR(50) NOT NULL,\n lastName VARCHAR(50) NOT NULL,\n salt VARBINARY(32) NOT NULL,\n pass_key VARBINARY(32) NOT NULL,\n phone CHAR(10) NOT NULL,\n PRIMARY KEY (loginID),\n FOREIGN KEY (phone) REFERENCES CustomerPersonal(phone)\n ON UPDATE CASCADE ON DELETE RESTRICT)\"\"\")\n\n # ManagerPersonal\n self.cursor.execute(\n \"\"\"CREATE TABLE ManagerPersonal (\n phone CHAR(10),\n address VARCHAR(300) NOT NULL,\n PRIMARY KEY (phone))\"\"\")\n\n # ManagerCredentials\n self.cursor.execute(\n \"\"\"CREATE TABLE ManagerCredentials (\n loginID VARCHAR(30),\n managerID INT UNIQUE NOT NULL AUTO_INCREMENT,\n firstName VARCHAR(50),\n lastName VARCHAR(50),\n salt VARBINARY(32) NOT NULL,\n pass_key VARBINARY(32) NOT NULL,\n phone CHAR(10) NOT NULL,\n PRIMARY KEY (loginID),\n FOREIGN KEY (phone) REFERENCES ManagerPersonal(phone)\n ON UPDATE CASCADE ON DELETE RESTRICT)\"\"\")\n\n # Comment\n self.cursor.execute(\n \"\"\"CREATE TABLE Comment (\n commentID INT AUTO_INCREMENT,\n ISBN VARCHAR(13) NOT NULL,\n loginID VARCHAR(30) NOT NULL,\n score TINYINT NOT NULL,\n message TEXT,\n veryUseful INT DEFAULT 0,\n useful INT DEFAULT 0,\n useless INT DEFAULT 0,\n avg_usefulness DECIMAL (3,2),\n commentDate DATETIME,\n PRIMARY KEY (commentID),\n FOREIGN KEY (ISBN) REFERENCES Book(ISBN)\n ON UPDATE RESTRICT ON DELETE CASCADE,\n FOREIGN KEY (loginID) REFERENCES CustomerCredentials(loginID)\n ON UPDATE CASCADE ON DELETE CASCADE)\"\"\")\n\n # OrderLog\n self.cursor.execute(\n \"\"\"CREATE TABLE OrderLog (\n orderNumber INT AUTO_INCREMENT,\n loginID VARCHAR(30) NOT NULL,\n orderDate DATE,\n PRIMARY KEY (orderNumber),\n FOREIGN KEY (loginID) REFERENCES CustomerCredentials(loginID)\n ON UPDATE CASCADE ON DELETE CASCADE)\"\"\")\n\n # Return Request\n self.cursor.execute(\n \"\"\"CREATE TABLE ReturnRequest (\n requestID INT AUTO_INCREMENT,\n orderNumber INT NOT NULL,\n requestDate DATE,\n ISBN VARCHAR(13) NOT NULL,\n quantity SMALLINT,\n status VARCHAR(25) DEFAULT 'PENDING',\n PRIMARY KEY (requestID),\n FOREIGN KEY (orderNumber) REFERENCES OrderLog(orderNumber)\n ON UPDATE RESTRICT ON DELETE CASCADE)\"\"\")\n\n # # HasKeyword\n # self.cursor.execute(\n # \"\"\"CREATE TABLE HasKeyword (\n # ISBN VARCHAR(13),\n # word VARCHAR(50) COLLATE utf8_general_ci,\n # PRIMARY KEY (ISBN, word),\n # FOREIGN KEY (ISBN) REFERENCES Book(ISBN)\n # ON UPDATE RESTRICT ON DELETE CASCADE)\"\"\")\n\n # Wrote\n self.cursor.execute(\n \"\"\"CREATE TABLE Wrote (\n authorID INT,\n ISBN VARCHAR(13),\n PRIMARY KEY (authorID, ISBN),\n FOREIGN KEY (authorID) REFERENCES Author(ID)\n ON UPDATE RESTRICT ON DELETE RESTRICT,\n FOREIGN KEY (ISBN) REFERENCES Book(ISBN)\n ON UPDATE RESTRICT ON DELETE CASCADE)\"\"\")\n\n # ProductOf\n self.cursor.execute(\n \"\"\"CREATE TABLE ProductOf (\n ISBN VARCHAR(13),\n orderNumber INT,\n quantity SMALLINT CHECK(quantity > 0),\n PRIMARY KEY (ISBN, orderNumber),\n FOREIGN KEY (ISBN) REFERENCES Book(ISBN)\n ON UPDATE RESTRICT ON DELETE CASCADE,\n FOREIGN KEY (orderNUmber) REFERENCES OrderLog(orderNumber)\n ON UPDATE RESTRICT ON DELETE CASCADE)\"\"\")\n\n # Trusts\n self.cursor.execute(\n \"\"\"CREATE TABLE Trusts (\n loginID VARCHAR(30),\n otherLoginID VARCHAR(30) CHECK(loginID<>otherLoginID),\n trustStatus VARCHAR(9) CHECK(trustStatus = 'TRUSTED' OR trustStatus = 'UNTRUSTED'),\n PRIMARY KEY (loginID, otherLoginID),\n FOREIGN KEY (loginID) REFERENCES CustomerCredentials(loginID)\n ON UPDATE CASCADE ON DELETE CASCADE,\n FOREIGN KEY (otherLoginID) REFERENCES CustomerCredentials(loginID)\n ON UPDATE CASCADE ON DELETE CASCADE)\"\"\")\n\n # Rates\n self.cursor.execute(\n \"\"\"CREATE TABLE Rates (\n loginID VARCHAR(30),\n commentID INT,\n rating VARCHAR(10) NOT NULL,\n PRIMARY KEY (loginID, commentID),\n FOREIGN KEY (loginID) REFERENCES CustomerCredentials(loginID)\n ON UPDATE CASCADE ON DELETE CASCADE,\n FOREIGN KEY (commentID) REFERENCES Comment(commentID)\n ON UPDATE RESTRICT ON DELETE CASCADE)\"\"\"\n )\n\n print(\"done\")", "def initialize():\n DATABASE.connect()\n DATABASE.create_tables([User], safe=True)\n DATABASE.close()", "def initialize(self) -> None:\n # First, establish a connection to the specified database\n try:\n self._connect_to_db()\n except psycopg2.OperationalError: # specified database does not exist\n with psycopg2.connect(database=DATABASE_ENV[\"POSTGRES_DB\"],\n user=self.dbuser, password=self.dbpassword,\n host=self.dbhost, port=str(self.dbport)) as con:\n with con.cursor() as cur:\n con.autocommit = True # cannot create db inside a transaction\n cur.execute(f'CREATE DATABASE \"{self.dbname}\"')\n con.autocommit = False\n self._connect_to_db() # try again\n\n # Second, create the necessary database table, only if required\n with self._connection.cursor() as cur:\n cur.execute(f\"\"\"\n CREATE TABLE IF NOT EXISTS \"{self.MESSAGE_TABLE_NAME}\" (\n id SERIAL PRIMARY KEY,\n key CHAR(4) NOT NULL,\n value REAL NOT NULL,\n ts TIMESTAMP NOT NULL,\n tz TEXT NOT NULL\n );\n \"\"\")\n self._connection.commit()", "def initialize_database():\n # Create the schema\n Base.metadata.create_all(engine)\n\n # Create a connection/database session\n session = Session()\n\n # Now, create a few restaurants:\n cupcake = Restaurant(name=\"Cupcakes\")\n five_guys = Restaurant(name=\"Five Guys\")\n ihop = Restaurant(name=\"IHOP\")\n\n # And a few users:\n mike = User(name=\"Mike\")\n ryan = User(name=\"Ryan\")\n\n # And finally a few votes:\n mike.preferences.append(Preference(vote=\"+1\", restaurant=five_guys))\n ryan.preferences.append(Preference(vote=\"+0\", restaurant=five_guys))\n ryan.preferences.append(Preference(vote=\"-0\", restaurant=cupcake))\n\n session.add(mike)\n session.add(ryan)\n session.add(ihop)\n\n session.commit()\n\n session.close()", "def __init__(self):\n engine = db_connect()\n create_reals_table(engine)\n self.Session = sessionmaker(bind=engine)", "def initDB():\n global DATABASE\n\n uid0 = generate_resource_uid('Admin1', 0)\n\n DATABASE[\"users\"] = {\n \"Admin1\": {\n \"Type\": \"admin\",\n \"Password\": \"AdminPass\",\n \"Quota\": int(sys.maxsize),\n \"Resources\": {uid0},\n \"Created\": 1,\n },\n \"User1\": {\n \"Type\": \"user\",\n \"Password\": \"UserPass\",\n \"Quota\": int(sys.maxsize),\n \"Resources\": set([]),\n \"Created\": 0,\n }\n }\n\n DATABASE[\"resources\"] = {\n uid0: \"Admin1\",\n }", "def init_db() -> None:\n conn = sqlite3.connect('../Utils/map_storage.db')\n cursor = conn.cursor()\n\n with conn:\n station_cmd = \"\"\"CREATE TABLE IF NOT EXISTS\n nodes(city TEXT, name TEXT, is_station TEXT, x INT, y INT, zone TEXT)\"\"\"\n\n cursor.execute(station_cmd)\n\n connection_cmd = \"\"\"CREATE TABLE IF NOT EXISTS\n connections(city TEXT, name_1 TEXT, name_2 TEXT, color TEXT)\"\"\"\n\n cursor.execute(connection_cmd)", "def initialize():\n DATABASE.connect()\n DATABASE.drop_tables([Journal], safe=True)\n DATABASE.create_tables([Journal], safe=True)\n DATABASE.close()", "def init():\n database.create_tables([Tracker])\n database.commit()", "def init_tables(self) -> None:\n with self.table_access_condition:\n conn = self._get_connection()\n conn.execute(\"PRAGMA foreign_keys = 1\")\n c = conn.cursor()\n c.execute(tables.CREATE_STUDIES_TABLE)\n c.execute(tables.CREATE_SUBMISSIONS_TABLE)\n c.execute(tables.CREATE_REQUESTERS_TABLE)\n c.execute(tables.CREATE_UNITS_TABLE)\n c.execute(tables.CREATE_WORKERS_TABLE)\n c.execute(tables.CREATE_RUNS_TABLE)\n c.execute(tables.CREATE_RUN_MAP_TABLE)\n c.execute(tables.CREATE_PARTICIPANT_GROUPS_TABLE)\n c.execute(tables.CREATE_PARTICIPANT_GROUP_QUALIFICATIONS_MAPPING_TABLE)\n conn.commit()", "def _init_db(self):\n cursor = self._main_connection.cursor()\n cursor.execute(self.sql[\"create_table\"])\n self._main_connection.commit()", "def init_db(self):\n self.db_config = databaseutils.process_db_config(self.config['db'])\n\n from sqlalchemy import create_engine\n from sqlalchemy.orm import sessionmaker, scoped_session\n self.engine = create_engine(self.db_config.constr, pool_recycle=3600)\n self.session = scoped_session(sessionmaker(bind=self.engine))\n\n # Make sure tables are created\n DB_Base.metadata.create_all(self.engine)", "def initialize_database():\n # TODO: Refactor the funtime library\n this.db = Store(this.host).create_lib(this.store_name).get_store()", "def init_db():\n db.drop_all()\n db.create_all()\n\n print(\"Initialized Connect 4 Database.\")", "def init_db():\n db = get_db()\n Page.create_table(db)\n PageVersion.create_table(db)\n User.create_table(db)", "def initialise(self):\n \n # Create the RDM control directory if it doesn't already exist.\n rdm_control_directory = self.repo.working_dir + \"/.rdm\"\n if not os.path.exists(rdm_control_directory):\n os.makedirs(rdm_control_directory)\n \n # Set up the SQLite database.\n self.db_connect()\n \n # If this file already exists, then skip this step.\n if self.db_exists():\n response = raw_input(\"The publications database already exists. Do you want to overwrite it? (y/n)\\n\")\n if response == \"y\" or response == \"Y\":\n _LOG.info(\"Overwriting...\")\n with self.connection:\n c = self.connection.cursor()\n query = \"DROP TABLE %s\" % PUBLICATIONS_TABLE\n c.execute(query)\n elif response == \"n\" or response == \"N\":\n _LOG.info(\"Not overwriting.\")\n return\n else:\n _LOG.error(\"Unknown response '%s'. Not overwriting.\" % response)\n return\n\n # Set up publication table columns.\n with self.connection:\n c = self.connection.cursor()\n query = \"CREATE TABLE %s (id INTEGER PRIMARY KEY AUTOINCREMENT, path TEXT, date TEXT, time TEXT, sha TEXT, pid TEXT, doi TEXT)\" % PUBLICATIONS_TABLE\n c.execute(query)\n\n # Disconnect.\n self.db_disconnect()\n\n return", "def setup_tables(self):\n try:\n self.cursor.execute('CREATE SCHEMA sandbox')\n self.cursor.execute(\"DROP TABLE sandbox.dvds_rdbhdb_super;\")\n except (db.ProgrammingError, db.OperationalError), e:\n # sandbox may not exist\n pass #raise\n\n try:\n self.cursor.execute(\n \"\"\"CREATE TABLE sandbox.dvds_rdbhdb_super(\n id SERIAL PRIMARY KEY,\n name varchar(40) NOT NULL,\n rating float,\n UNIQUE(name)\n );\n \"\"\" )\n except db.ProgrammingError, e:\n if e[0] != '42P07':\n raise" ]
[ "0.7012303", "0.6316942", "0.62167615", "0.6146707", "0.6076793", "0.60717803", "0.60717803", "0.606234", "0.60622776", "0.6019015", "0.59543115", "0.592308", "0.5921048", "0.5912806", "0.588549", "0.5882846", "0.5846708", "0.58415794", "0.5828501", "0.58272105", "0.57964", "0.57863677", "0.5778713", "0.574801", "0.574661", "0.5743334", "0.5729962", "0.57206225", "0.5720353", "0.5689411" ]
0.81615967
0
Initialize RIB database tables. Two tables are created. One stores the rtr ID associated with a given device that is to be queried, while the second stores different route attributes gleaned from 'sh ip bgp' command.
def init_rib_tables(self): cur = self.sql.cursor() cur.execute("PRAGMA foreign_keys = on") cur.execute(''' CREATE TABLE rtr_cache ( rtr_id INTEGER PRIMARY KEY NOT NULL, device TEXT NOT NULL, rtrupdt INTEGER, UNIQUE (device))''') cur.execute(''' CREATE TABLE rtr_rib ( rtr_id INTEGER NOT NULL REFERENCES rtr_cache(rtr_id) ON DELETE CASCADE ON UPDATE CASCADE, idx INTEGER NOT NULL, status TEXT, pfx TEXT NOT NULL, pfxlen INTEGER NOT NULL, pfxstr_min TEXT NOT NULL, pfxstr_max TEXT NOT NULL, nexthop TEXT NOT NULL, metric INTEGER, locpref INTEGER, weight INTEGER, pathbutone TEXT, orig_asn INTEGER NOT NULL, route_orig TEXT)''') self.sql.commit()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_rpki_rtr_tables(self):\n cur = self.sql.cursor()\n cur.execute(\"PRAGMA foreign_keys = on\")\n cur.execute('''\n CREATE TABLE cache (\n cache_id INTEGER PRIMARY KEY NOT NULL,\n host TEXT NOT NULL,\n port TEXT NOT NULL,\n version INTEGER,\n nonce INTEGER,\n serial INTEGER,\n updated INTEGER,\n refresh INTEGER,\n retry INTEGER,\n expire INTEGER,\n UNIQUE (host, port))''')\n cur.execute('''\n CREATE TABLE prefix (\n prefix_id INTEGER PRIMARY KEY AUTOINCREMENT,\n cache_id INTEGER NOT NULL\n REFERENCES cache(cache_id)\n ON DELETE CASCADE\n ON UPDATE CASCADE,\n asn INTEGER NOT NULL,\n prefix TEXT NOT NULL,\n prefixlen INTEGER NOT NULL,\n max_prefixlen INTEGER NOT NULL,\n prefix_min TEXT,\n prefix_max TEXT,\n UNIQUE (cache_id, asn, prefix, prefixlen, max_prefixlen))''')\n cur.execute('''\n CREATE TABLE routerkey (\n cache_id INTEGER NOT NULL\n REFERENCES cache(cache_id)\n ON DELETE CASCADE\n ON UPDATE CASCADE,\n asn INTEGER NOT NULL,\n ski TEXT NOT NULL,\n key TEXT NOT NULL,\n UNIQUE (cache_id, asn, ski),\n UNIQUE (cache_id, asn, key))''')\n self.sql.commit()", "def initialize():\n \n db.connect()\n db.create_tables([Product], safe=True)", "def initialize(self):\n\n cursor = self.conn.cursor()\n\n # This table can be used as a parent for a collection of runs\n cursor.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS RunCollections (\n id INT AUTO_INCREMENT PRIMARY KEY,\n name VARCHAR(14) UNIQUE\n );\"\"\"\n )\n\n # This table holds in which run each appears.\n cursor.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS Runs (\n id INT AUTO_INCREMENT PRIMARY KEY,\n name VARCHAR(14) UNIQUE,\n collection_id INT,\n FOREIGN KEY (collection_id) REFERENCES RunCollections (id) ON DELETE CASCADE);\"\"\"\n )\n\n # This table holds resources, which can be in multiple runs and have multiple varieties\n cursor.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS Resources (\n id INT AUTO_INCREMENT PRIMARY KEY, \n extension VARCHAR(20), \n webpage VARCHAR(30),\n run_id INT NOT NULL,\n FOREIGN KEY (run_id) REFERENCES Runs (id) ON DELETE CASCADE);\"\"\"\n )\n\n cursor.execute(\n 'SELECT Table_name FROM information_schema.tables WHERE table_schema = \"vpntfg0\" AND Table_name LIKE \"%Varieties_%\" ORDER BY Table_name'\n )\n for row in cursor.fetchall():\n self.variety_tables.append(row[0])\n\n cursor.close()\n _logger.info(\"Variety tables are: %s\" % self.variety_tables)\n\n _logger.info(\"Database initialized\")", "def initDB():\n global DATABASE\n\n uid0 = generate_resource_uid('Admin1', 0)\n\n DATABASE[\"users\"] = {\n \"Admin1\": {\n \"Type\": \"admin\",\n \"Password\": \"AdminPass\",\n \"Quota\": int(sys.maxsize),\n \"Resources\": {uid0},\n \"Created\": 1,\n },\n \"User1\": {\n \"Type\": \"user\",\n \"Password\": \"UserPass\",\n \"Quota\": int(sys.maxsize),\n \"Resources\": set([]),\n \"Created\": 0,\n }\n }\n\n DATABASE[\"resources\"] = {\n uid0: \"Admin1\",\n }", "def pre_interface_route_table_create(self, resource_dict):\n pass", "def setup_table(self):\n\n self.setup.create_basic_table_in_dev()\n self.setup.insert_random_records_into_dev()", "def initDb(self) -> None:\n try: \n connection = None \n dump = open('db/db.sql')\n sql_str = dump.read() \n connection = self.connect()\n cursor = connection.cursor()\n cursor.executescript(sql_str)\n acs_ports = JsonSettings.parseJson('settings.json','AcsPorts')\n db_ports = JsonSettings.parseJson('settings.json','DbPorts')\n acs_port_names = JsonSettings.getKeys('acs_port_', acs_ports) \n db_port_names = JsonSettings.getKeys('db_port_', db_ports)\n for acs_port_name in acs_port_names:\n cursor.execute(f\"ALTER TABLE ports ADD COLUMN {acs_port_name} INTEGER\")\n for db_port_name in db_port_names:\n cursor.execute(f\"ALTER TABLE ports ADD COLUMN {db_port_name} INTEGER\") \n except Exception as e:\n logging.error(f'{self.cn} Error \\n{e}', exc_info=1)\n finally:\n connection.commit() \n tables = cursor.execute(\"SELECT name FROM sqlite_master WHERE type='table' AND name != 'sqlite_sequence'\")\n logging.info(f'{self.cn} Database created with tables:\\n{tables.fetchall()}')\n if connection:\n connection.close()", "def prep(self):\n sq1 = 'create table TCVR ( ID, T, C, V, R , primary key ( ID ) ) ;'\n sq2 = 'create table IDX ( ID , A , primary key(A) ) ; '\n self.sq.SQX(sq1)\n self.sq.SQX(sq2)\n sq3 = \"insert into IDX VALUES ( 1 , 'A' ) ; \"\n self.sq.SQX(sq3)", "def init():\n database = \"database.pkl\"\n\n onsite_bills = BillID(database)\n online_bills = BillID(database)\n\n return onsite_bills, online_bills", "def init_service(self):\n conn = self.get_conn()\n c = conn.cursor()\n c.execute(SQL[\"delete_services\"])\n c.execute(SQL[\"delete_stops\"])\n c.execute(SQL[\"delete_routes\"])\n bus_routes = self.load_json(\"bus_routes.json\")\n bus_services = self.load_json(\"bus_services.json\")\n bus_stops = self.load_json(\"bus_stops.json\")\n c.executescript(\"\"\"\n CREATE TABLE IF NOT EXISTS \"Stops\" (\n \"id\"\tINTEGER,\n \"BusStopCode\"\tTEXT,\n \"Description\"\tTEXT,\n \"Latitude\" REAL,\n \"Longitude\" REAL,\n PRIMARY KEY(\"id\")\n );\n CREATE TABLE IF NOT EXISTS \"Services\" (\n \"ServiceNo\"\tTEXT,\n \"Operator\"\tTEXT,\n \"Direction\"\tINTEGER,\n \"Category\"\tTEXT,\n PRIMARY KEY(\"ServiceNo\",\"Direction\")\n );\n CREATE TABLE IF NOT EXISTS \"Routes\" (\n \"ServiceNo\"\tTEXT,\n \"Direction\"\tINTEGER,\n \"BusStopCode\"\tTEXT,\n FOREIGN KEY(\"BusStopCode\") REFERENCES \"Stops\"(\"BusStopCode\"),\n FOREIGN KEY(\"ServiceNo\") REFERENCES \"Services\"(\"ServiceNo\")\n );\"\"\")\n print(\"Tables successfully created.\")\n for route in bus_routes:\n c.execute(SQL[\"insert_routes\"], (route[\"ServiceNo\"], route[\"Direction\"], route[\"BusStopCode\"]))\n for service in bus_services:\n c.execute(SQL[\"insert_services\"], (service[\"ServiceNo\"], service[\"Operator\"], service[\"Direction\"], \n service[\"Category\"]))\n for stop in bus_stops:\n c.execute(SQL[\"insert_stops\"], (stop[\"BusStopCode\"], stop[\"Description\"], stop[\"Latitude\"], stop[\"Longitude\"]))\n conn.commit()\n conn.close()", "def initialize_tables(database_connection_object, logger):\n\n try:\n cmd = \"\"\"\n create table if not exists `services_fingerprint_table` (\n target varchar(20),\n port int,\n name varchar(20),\n version varchar(500))\n \"\"\"\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n database_connection_object.cursor().execute(cmd)\n\n except ProgrammingError as programming_error:\n logger.error(programming_error)\n\n except pymysql.err.Warning as pymysql_warning:\n logger.error(pymysql_warning)", "def init_db() -> None:\n conn = sqlite3.connect('../Utils/map_storage.db')\n cursor = conn.cursor()\n\n with conn:\n station_cmd = \"\"\"CREATE TABLE IF NOT EXISTS\n nodes(city TEXT, name TEXT, is_station TEXT, x INT, y INT, zone TEXT)\"\"\"\n\n cursor.execute(station_cmd)\n\n connection_cmd = \"\"\"CREATE TABLE IF NOT EXISTS\n connections(city TEXT, name_1 TEXT, name_2 TEXT, color TEXT)\"\"\"\n\n cursor.execute(connection_cmd)", "def pre_route_table_create(self, resource_dict):\n pass", "def initialize():\n db.connect()\n db.create_tables([Entry], safe=True)", "def initialize():\n db.connect()\n db.create_tables([Entry], safe=True)", "def initialize():\n DATABASE.connect()\n DATABASE.create_tables([User, Entry], safe=True)\n DATABASE.close()", "def full_initialization_process():\n\n db1 = Database('TOBACCO_RAW;')\n con1, cur1 = db1.connect()\n cur1.execute('create index idl_doc_field_id_idx on idl_doc_field(id);')\n cur1.execute('create index idl_doc_id_idx on idl_doc(id);')\n add_timestamp_to_idl_doc()\n\n create_utf_text_files()\n\n initialize_tables()\n fill_tables()", "def _initialize(self, chain, length):\n # If the table already exists, exit now.\n if chain != 0:\n return\n\n # Determine size\n try:\n size = len(self._getfunc())\n except TypeError:\n size = 1\n\n query = \"create table %s (recid INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, trace int(5), %s FLOAT)\" % (self.name, ' FLOAT, '.join(['v%s' % (x+1) for x in range(size)]))\n self.db.cur.execute(query)", "def init_db():\n # We are setting the module variables here for the first time, so disable the warning\n global DB_USER_TABLE # pylint: disable=global-variable-undefined\n global DB_CUSTOMER_TABLE # pylint: disable=global-variable-undefined\n global DB_USER_CUSTOMER_RELS_TABLE # pylint: disable=global-variable-undefined\n global DB_TICKET_TABLE # pylint: disable=global-variable-undefined\n global DB_COMMENT_TABLE # pylint: disable=global-variable-undefined\n\n db = TinyDB(app.config['DB_NAME'])\n\n DB_USER_TABLE = db.table('users')\n DB_CUSTOMER_TABLE = db.table('customers')\n DB_USER_CUSTOMER_RELS_TABLE = db.table('user_customer_rels')\n DB_TICKET_TABLE = db.table('tickets')\n DB_COMMENT_TABLE = db.table('comments')", "def initialize_database():\n # Create the schema\n Base.metadata.create_all(engine)\n\n # Create a connection/database session\n session = Session()\n\n # Now, create a few restaurants:\n cupcake = Restaurant(name=\"Cupcakes\")\n five_guys = Restaurant(name=\"Five Guys\")\n ihop = Restaurant(name=\"IHOP\")\n\n # And a few users:\n mike = User(name=\"Mike\")\n ryan = User(name=\"Ryan\")\n\n # And finally a few votes:\n mike.preferences.append(Preference(vote=\"+1\", restaurant=five_guys))\n ryan.preferences.append(Preference(vote=\"+0\", restaurant=five_guys))\n ryan.preferences.append(Preference(vote=\"-0\", restaurant=cupcake))\n\n session.add(mike)\n session.add(ryan)\n session.add(ihop)\n\n session.commit()\n\n session.close()", "def populateTable(self):\n\n output_list = self.output_ports.split(', ')\n\n for i in output_list:\n values = i.split('-')\n nextHopPort = values[0]\n linkCost = values[1]\n destId = values[2]\n learnedFrom = 0 # As it was learned from ConfigFile\n row = routing_row.RoutingRow(nextHopPort, destId, linkCost, destId, learnedFrom)\n self.addToRoutingTable(row)", "def initialize(self) -> None:\n # First, establish a connection to the specified database\n try:\n self._connect_to_db()\n except psycopg2.OperationalError: # specified database does not exist\n with psycopg2.connect(database=DATABASE_ENV[\"POSTGRES_DB\"],\n user=self.dbuser, password=self.dbpassword,\n host=self.dbhost, port=str(self.dbport)) as con:\n with con.cursor() as cur:\n con.autocommit = True # cannot create db inside a transaction\n cur.execute(f'CREATE DATABASE \"{self.dbname}\"')\n con.autocommit = False\n self._connect_to_db() # try again\n\n # Second, create the necessary database table, only if required\n with self._connection.cursor() as cur:\n cur.execute(f\"\"\"\n CREATE TABLE IF NOT EXISTS \"{self.MESSAGE_TABLE_NAME}\" (\n id SERIAL PRIMARY KEY,\n key CHAR(4) NOT NULL,\n value REAL NOT NULL,\n ts TIMESTAMP NOT NULL,\n tz TEXT NOT NULL\n );\n \"\"\")\n self._connection.commit()", "async def associate(self, rtb_id, subnet_id):\n self._client.associate_route_table(\n RouteTableId=rtb_id,\n SubnetId=subnet_id,\n )", "def setup_tables(self):\n try:\n self.cursor.execute('CREATE SCHEMA sandbox')\n self.cursor.execute(\"DROP TABLE sandbox.dvds_rdbhdb_super;\")\n except (db.ProgrammingError, db.OperationalError), e:\n # sandbox may not exist\n pass #raise\n\n try:\n self.cursor.execute(\n \"\"\"CREATE TABLE sandbox.dvds_rdbhdb_super(\n id SERIAL PRIMARY KEY,\n name varchar(40) NOT NULL,\n rating float,\n UNIQUE(name)\n );\n \"\"\" )\n except db.ProgrammingError, e:\n if e[0] != '42P07':\n raise", "def _generate_table(self):\n for i in xrange(32):\n dest = [0]\n gw = [0]\n self._table.append(\n {'destination': dest, 'gateway': gw}\n )", "def init_db():\n # users table\n cur.execute(\n \"CREATE TABLE IF NOT EXISTS users (\"\n \"id INTEGER PRIMARY KEY AUTO_INCREMENT,\"\n \"name VARCHAR(255) NOT NULL,\"\n \"email VARCHAR(255) NOT NULL,\"\n \"password VARCHAR(30) NOT NULL,\"\n \"birthdate DATE);\"\n )\n\n # users' phone records table\n cur.execute(\"CREATE TABLE IF NOT EXISTS records (\"\n \"id INTEGER PRIMARY KEY AUTO_INCREMENT,\"\n \"ownerID INTEGER,\"\n \"name VARCHAR(255),\"\n \"phone VARCHAR(22),\"\n \"birthdate DATE);\")", "def __init__(self, conn_string):\n from sqlalchemy import create_engine, MetaData, Table, Column, Integer, String\n self.engine = create_engine(conn_string, echo=False)\n\n self.metadata = MetaData()\n self.metadata.reflect(bind=self.engine)\n\n self.view = Table(\"vNetwork2\", self.metadata, Column(\"systemId\", Integer), Column(\"macAddress\", Integer), Column(\"ipAddress\", Integer, primary_key=True), Column(\"fqdn\", Integer), autoload_with=self.engine)\n self.view_aliases = Table(\"vAliases\", self.metadata, Column(\"hostnameId\", Integer, primary_key=True), Column(\"aliasId\", Integer, primary_key=True), Column(\"alias\", String), Column(\"aliasDomian\", String), Column(\"host\", String), Column(\"hostDomain\", String), autoload_with=self.engine)\n\n self.tables = self.metadata.tables\n self.hostnames = Table('hostnames', self.metadata, autoload=True)\n\n self.aliases = self.tables[\"aliases\"]\n self.systems = self.tables[\"systems\"]\n self.domains = self.tables[\"domains\"]\n self.hostAddresses = self.tables[\"hostAddresses\"]\n self.hostnames = self.tables[\"hostnames\"]\n self.hostnameAliases = self.tables[\"hostnamesAliases\"]\n self.networkInterfaces = self.tables[\"networkInterfaces\"]\n self.networkSubnets = self.tables[\"networkSubnets\"]\n self.vendors = self.tables[\"vendors\"]\n self.racks = self.tables[\"racks\"]\n self.categories = self.tables[\"categories\"]\n self.manufacturers = self.tables[\"manufacturers\"]\n self.ipSurvey = self.tables[\"ipSurvey\"]\n self.storageSystems = self.tables[\"storageSystems\"]\n\n from sqlalchemy.orm import sessionmaker\n Session = sessionmaker(bind=self.engine)\n\n self.session = Session()", "def init_tables(self):\n\n settings.Base.metadata.tables[\n 'session_master'].drop(bind=settings.engine)\n settings.Base.metadata.tables['uurl'].drop(bind=settings.engine)\n\n settings.Base.metadata.tables[\n 'session_master'].create(bind=settings.engine)\n settings.Base.metadata.tables['uurl'].create(bind=settings.engine)\n\n logging.info(\"Sessionization Tables created\")", "def _init_db(self):\n cursor = self._main_connection.cursor()\n cursor.execute(self.sql[\"create_table\"])\n self._main_connection.commit()", "def initTable(self):\n sql = \"\"\" ( nodeId integer PRIMARY KEY,\n nextId integer,\n childId integer,\n label text);\n \"\"\"\n self.db.createTable(self.tableName + sql)\n # Reserve the first record as the head pointer, if it's not there\n found = self.db.selectById(self.tableName, 1)\n if not found:\n record = dict(nextId=None, childId=None, label='head pointer')\n self.db.insert(self.tableName, record)" ]
[ "0.67964107", "0.5831904", "0.578942", "0.5739625", "0.57393616", "0.5728153", "0.56974334", "0.5678885", "0.5677351", "0.56728756", "0.5611789", "0.5607463", "0.56032944", "0.5599525", "0.5599525", "0.5587869", "0.5570944", "0.5511227", "0.55088484", "0.55059016", "0.5493284", "0.5475553", "0.5474234", "0.5456786", "0.54502445", "0.5435253", "0.5433989", "0.5431969", "0.5431902", "0.54277533" ]
0.8008855
0
Reset an existing rpkirtr session. Reset any existing rpkirtr session for the given host and port.
def reset_rpki_rtr_session(self, host, port): cur = self.sql.cursor() cur.execute("PRAGMA foreign_keys = on") cur.execute("DELETE FROM cache WHERE host = ? and port = ?", (host, port)) self.sql.commit()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reset_session(self):\n if not self.is_open():\n return\n if self._active_result is not None:\n self._active_result.fetch_all()\n try:\n self.keep_open = self.protocol.send_reset(self.keep_open)\n except (InterfaceError, OperationalError) as err:\n _LOGGER.warning(\n \"Warning: An error occurred while attempting to reset the \"\n \"session: %s\",\n err,\n )", "def reset_rtr_rib_session(self, device):\n cur = self.sql.cursor()\n cur.execute(\"PRAGMA foreign_keys = on\")\n cur.execute(\"DELETE FROM rtr_cache WHERE device = ?\", (device, ))\n self.sql.commit()", "def reset(self) -> None:\n\n self.host.reset()", "def reset(self):\n\t\tmesslen, received = self.socket.send('reset\\r', 5)\t\t\n\t\treturn None", "def resetSimulator():\n\tif settings._telnet == True:\n\t\toutput('Resetting simulator...')\n\t\tsettings.obj = []\n\t\tsendData('RESET', read=True, flush=True)\n\t\t\n\t\ttry:\n\t\t\tsettings._tn.close()\n\t\texcept:\n\t\t\tpass\n\t\t\n\t\tsettings._tn = None\n\t\tsettings._telnet = False\n\t\ttime.sleep(5)\n\t\t\n\t\toutput('Reconnecting...')\n\t\tinitTelnet(settings.ip, settings.port, retries=10)\n\telse:\n\t\traise 'No active telnet connection!'", "def _reset_session(self):\n retries = self.__no_of_retries\n\n while retries > 0:\n if not self._is_session_valid():\n self._close()\n self._set_session()\n else:\n break\n retries -= 1\n else:\n raise DatabaseError.ConnectionError(\"Connection to database not available!\")", "def reset(self):\n requests.put('{}/reset'.format(self._get_url()))", "def reset(self):\n self.sql_manager.port_update(self.id, external_ip=None, external_port=None)\n self.external_port = None\n self.external_ip = None", "def renew_connection(self):\n with Controller.from_port(port=9051) as controller:\n controller.authenticate(password = self.tor_password)\n controller.signal(Signal.NEWNYM)\n self.session = self.create_session()", "def reset(self):\r\n _debug('api.reset()')\r\n self.write('*RST')\r\n self.query('*IDN?') # Pauses operation until fully reset?\r", "def reset(self):\r\n _debug('api.reset()')\r\n self.write('*RST')\r\n self.query('*IDN?') # Pauses operation until fully reset?\r", "def reset(self):\r\n _debug('api.reset()')\r\n self.write('*RST')\r\n self.query('*IDN?') # Pauses operation until fully reset?\r", "def reset(self):\r\n _debug('api.reset()')\r\n self.write('*RST')\r\n self.query('*IDN?') # Pauses operation until fully reset?\r", "def reset(self):\n self.logger.debug(\"Resetting %s\", self.key)\n self.driver.reset(self.key)", "def reset(self):\n self.reconnect()", "def softreset(self):\n try:\n self.device.write(b'\\x03') # abort\n self.device.write(b'\\x04') # reset\n self.device.write(b'\\r')\n self.__read_until(b'raw REPL; CTRL-B to exit\\r\\n>')\n except Exception as e:\n raise ReplError(e)", "def reset(self):\r\n _debug('simq03b_api.reset')\r\n self.write('*RST')\r\n self.query('*IDN?') # Pauses operation until fully reset?\r", "def restartserver(self, port=None):\n if port is not None:\n if port < 0: #code to try a random port\n self.parameters['port'] = random.randint(2223,50000)\n else:\n self.parameters['port'] = port\n return self.startserver()", "def restart(self):\n self.session_id = uuid.uuid4()\n self.turn_count = 0", "def reset(self):\n return self._send_command('reset')", "def reconect(self):\n self.connection.reset_connection()\n return self.connection", "def call_for_auth_reset(self):\n pos.select_dispenser(1)\n crindsim.lift_handle()\n pos.click(\"reset\")\n pos.click(\"yes\")\n crindsim.lower_handle()\n #Checks crind diag to see if reset message is displayed\n if not system.wait_for(lambda: \"reset\" in pos.read_dispenser_diag()[\"Status\"].lower(), verify = False):\n tc_fail(\"CRIND did not reset\")\n #Wait for crind to return to idle\n if not system.wait_for(lambda: \"idle\" in pos.read_dispenser_diag()[\"Status\"].lower(), timeout = 120, verify = False):\n tc_fail(\"CRIND did not return to idle\")\n pos.click(\"back\")", "def reset():\n if app.config['SESSION_KEY'] in session:\n session.pop(app.config['SESSION_KEY'])\n return redirect(url_for('home'))", "def reset(self, **kwargs):\n return self.env.reset(**kwargs)", "def reset(self, env):\n self._env = env\n return", "def reset(self):\n\n\t\tself._send_message(\"RESET\", \"\\x00\")", "def reset(name, runas=None):\n return prlctl(\"reset\", salt.utils.data.decode(name), runas=runas)", "def reset(self):\n error_estop = \"\"\"\\\nE-Stop is ASSERTED. Disengage E-Stop and then reset the robot.\n\"\"\"\n error_nonfatal = \"\"\"Non-fatal Robot Error on reset.\nRobot reset cleared stopped state and robot can be enabled, but a non-fatal\nerror persists. Check diagnostics or rethink.log for more info.\n\"\"\"\n error_env = \"\"\"Failed to reset robot.\nPlease verify that the ROS_IP or ROS_HOSTNAME environment variables are set\nand resolvable. For more information please visit:\nhttp://sdk.rethinkrobotics.com/wiki/RSDK_Shell#Initialize\n\"\"\"\n is_reset = lambda: (self._state.enabled == False and\n self._state.stopped == False and\n self._state.error == False and\n self._state.estop_button == 0 and\n self._state.estop_source == 0)\n pub = rospy.Publisher('robot/set_super_reset', Empty, queue_size=10)\n\n if (self._state.stopped and\n self._state.estop_button == AssemblyState.ESTOP_BUTTON_PRESSED):\n rospy.logfatal(error_estop)\n raise IOError(errno.EREMOTEIO, \"Failed to Reset: E-Stop Engaged\")\n\n rospy.loginfo(\"Resetting robot...\")\n try:\n baxter_dataflow.wait_for(\n test=is_reset,\n timeout=3.0,\n timeout_msg=error_env,\n body=pub.publish\n )\n except OSError as e:\n if e.errno == errno.ETIMEDOUT:\n if self._state.error == True and self._state.stopped == False:\n rospy.logwarn(error_nonfatal)\n return False\n raise", "def _reset_connection(self):\n\n self.__userid = 0\n self.__token = 0\n self.__conn.close()\n\n self.__conn = httplib.HTTPConnection(\"www.slimtimer.com\")\n self._logon()", "def reinitsession(cls, arg, session):\n arg = None\n print(\"Dup Session start\")\n cls.log(1, \"Dup Session start\")\n ret, username = cls.getsessionuser(session)\n if ret is False:\n print(\"Unable to reinit the session\", session, arg)\n cls.log(3, \"Unable to reinit the session\",\n session, arg)\n return False\n ret, passwd = cls.getsessionpasswd(session)\n if ret is False:\n print(\"Unable to reinit the session\", session, arg)\n cls.log(3, \"Unable to reinit the session\",\n session, arg)\n return False\n\n IP = session[\"ip_addr\"]\n # vfid = session[\"vfid\"]\n https = session[\"ishttps\"]\n # debug = session[\"debug\"]\n # throttle_delay = session[\"throttle_delay\"]\n newsession = None\n retry = 0\n for i in range(10):\n retry = i\n newsession = auth.login(username, passwd, IP, https)\n if auth.is_failed_login(newsession):\n cls.sleep(20, session)\n continue\n else:\n break\n if not auth.is_failed_login(newsession):\n # print('old', cls.sessionkey(session), 'New',\n # cls.sessionkey(newsession))\n session['credential'] = newsession['credential']\n session[\"version\"] = newsession[\"version\"]\n print(\"Dup Session Completed after Iterations:\", retry)\n cls.log(1, \"Dup Session Completed after Iterations:\",\n retry)\n return True\n print(\"Dup Session Failed.\")\n cls.log(2, \"Dup Session Failed.\")\n sys.exit('Exiting as session dup didn\\'t work')\n return False" ]
[ "0.6069027", "0.5886393", "0.5725652", "0.5698956", "0.5672209", "0.5669425", "0.56320226", "0.5576333", "0.5535342", "0.5509324", "0.5509324", "0.5509324", "0.5509324", "0.5490396", "0.5471192", "0.54675925", "0.54552346", "0.5447819", "0.5424177", "0.5382374", "0.5368454", "0.5339069", "0.53269637", "0.531806", "0.5257668", "0.52337813", "0.5181871", "0.5115181", "0.50987816", "0.5094631" ]
0.72867167
0
Reset a RIB query session. Delete any state corresponding to a RIB query that was issued for a particular router device.
def reset_rtr_rib_session(self, device): cur = self.sql.cursor() cur.execute("PRAGMA foreign_keys = on") cur.execute("DELETE FROM rtr_cache WHERE device = ?", (device, )) self.sql.commit()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reset(self):\r\n _debug('simq03b_api.reset')\r\n self.write('*RST')\r\n self.query('*IDN?') # Pauses operation until fully reset?\r", "def reset(self):\r\n _debug('api.reset()')\r\n self.write('*RST')\r\n self.query('*IDN?') # Pauses operation until fully reset?\r", "def reset(self):\r\n _debug('api.reset()')\r\n self.write('*RST')\r\n self.query('*IDN?') # Pauses operation until fully reset?\r", "def reset(self):\r\n _debug('api.reset()')\r\n self.write('*RST')\r\n self.query('*IDN?') # Pauses operation until fully reset?\r", "def reset(self):\r\n _debug('api.reset()')\r\n self.write('*RST')\r\n self.query('*IDN?') # Pauses operation until fully reset?\r", "def reset_session(self):\n if not self.is_open():\n return\n if self._active_result is not None:\n self._active_result.fetch_all()\n try:\n self.keep_open = self.protocol.send_reset(self.keep_open)\n except (InterfaceError, OperationalError) as err:\n _LOGGER.warning(\n \"Warning: An error occurred while attempting to reset the \"\n \"session: %s\",\n err,\n )", "def interactive_reset(self):\n # Set the initial state\n\n self.dataset.reset()\n\n self.current_turn = 0\n self.current_function = None\n self.query = \"\"\n self.query_vector = np.ones(self.dataset.getVocabularySize())\n self.keywords={\"provided\":set(), \"rejected\":set()}\n self.functions_rejected=set()\n self.result_index = 0\n self.dont_know = False\n\n self.history={\n 'system_action': {\"action\": self.SYSTEM_OPENING_ACT},\n 'user_action': None\n }", "def reset_rpki_rtr_session(self, host, port):\n cur = self.sql.cursor()\n cur.execute(\"PRAGMA foreign_keys = on\")\n cur.execute(\"DELETE FROM cache WHERE host = ? and port = ?\", (host, port))\n self.sql.commit()", "def reset(self):\r\r\n self.read(\"*cls\")\r\r\n self.waitForCompletion()\r\r\n self.read(\"*RST\") # Reset and query\r\r\n self.dev.write(\"*cls\")\r\r\n while self.read(\"*OPC?\") != \"1\": time.sleep(1) # Wait until completion\r\r", "def reset(self):\n return self._send_command('reset')", "def reset(self):\n self.algo_state = {}\n self.actual_repetitions = 0\n self.next_session = -1\n self.last_session = -1\n self.past_quality = []", "def reset(self):\n requests.put('{}/reset'.format(self._get_url()))", "def reset(self):\n self.command_stack = []\n self.refresh_table_asap = False\n\n self.scripts = set()\n\n # TODO: Implement\n # make sure to reset the connection state in the event that we were\n # watching something\n # if self.watching and self.connection:\n # try:\n # # call this manually since our unwatch or\n # # immediate_execute_command methods can call reset()\n # self.connection.send_command('UNWATCH')\n # self.connection.read_response()\n # except ConnectionError:\n # # disconnect will also remove any previous WATCHes\n # self.connection.disconnect()\n\n # clean up the other instance attributes\n self.watching = False\n self.explicit_transaction = False\n\n # TODO: Implement\n # we can safely return the connection to the pool here since we're\n # sure we're no longer WATCHing anything\n # if self.connection:\n # self.connection_pool.release(self.connection)\n # self.connection = None", "def reset (self):\n\n self.currentState = self.initialState\n self.inputSymbol = None", "def reset(self):\n self.history_states.clear()", "def clear(self):\r\n self._state[\"data\"].clear()\r\n self._state[\"session\"].request_rerun()", "def clear(self):\r\n self._state[\"data\"].clear()\r\n self._state[\"session\"].request_rerun()", "def _reset(self):\n self._model._reset()\n super(RDPAnalyzer, self)._reset()", "def reset(self):\n self.logger.debug(\"Resetting %s\", self.key)\n self.driver.reset(self.key)", "def reset(self):\n\n\t\tself._send_message(\"RESET\", \"\\x00\")", "def reset(self, reset_from):\n self._grants.clear()\n self._groups.clear()\n self._reset_cached()\n self._id += 1\n for name, backend in self._backends.items():\n if name == reset_from:\n continue\n backend.reload()", "def reset(self):\r\n return self._api.reset()", "def clear(self):\n self._state[\"data\"].clear()\n self._state[\"session\"].request_rerun()", "def clear(self):\n self._state[\"data\"].clear()\n self._state[\"session\"].request_rerun()", "def clear(self):\n self._state[\"data\"].clear()\n self._state[\"session\"].request_rerun()", "def reset(self):\n \n pass", "def reset(self):\r\n err = self._cfuncs['ka_reset'](self._core._get_ka())\r\n self._core._handle_error(err)", "def reset(self):\n self.reconnect()", "def reset(self):\n self.state = [\n ['R', 'N', 'B', 'Q', 'K', 'B', 'N', 'R'],\n ['P'] * 8,\n [' '] * 8,\n [' '] * 8,\n [' '] * 8,\n [' '] * 8,\n ['p'] * 8,\n ['r', 'n', 'b', 'q', 'k', 'b', 'n', 'r']\n ]", "def reset():\n pass" ]
[ "0.6432845", "0.6182595", "0.6182595", "0.6182595", "0.6182595", "0.6125877", "0.6085551", "0.60491025", "0.5917269", "0.5793116", "0.5774083", "0.57254076", "0.5672482", "0.5669636", "0.5629088", "0.5625022", "0.5625022", "0.56182265", "0.5617173", "0.56071633", "0.56038904", "0.5602701", "0.5574076", "0.5574076", "0.5574076", "0.55624896", "0.55445683", "0.5540676", "0.55339795", "0.553079" ]
0.671066
0
Fetch rib information in conjunction with rpki information. Construct a database 'join' of the contents of currently available rpkirtr information with currently available RIB information. The join is constructed over matching prefix ranges; that is, for cases where the rpki rtr ROA covers the route prefix in the RIB.
def get_rpki_rib(self): cur = self.sql.cursor() cur.execute("SELECT DISTINCT host, port, device, idx, asn, prefix, prefixlen, " " max_prefixlen, status, pfx, pfxlen, pfxstr_min, pfxstr_max, " " nexthop, metric, locpref, weight, pathbutone, orig_asn, route_orig " "FROM prefix " " INNER JOIN rtr_rib ON prefix_min <= pfxstr_min AND pfxstr_max <= prefix_max" " INNER JOIN rtr_cache ON rtr_cache.rtr_id = rtr_rib.rtr_id" " INNER JOIN cache ON cache.cache_id = prefix.cache_id", ()) return cur.fetchall()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_rib_tables(self):\n cur = self.sql.cursor()\n cur.execute(\"PRAGMA foreign_keys = on\")\n cur.execute('''\n CREATE TABLE rtr_cache (\n rtr_id INTEGER PRIMARY KEY NOT NULL,\n device TEXT NOT NULL,\n rtrupdt INTEGER,\n UNIQUE (device))''')\n cur.execute('''\n CREATE TABLE rtr_rib (\n rtr_id INTEGER NOT NULL\n REFERENCES rtr_cache(rtr_id)\n ON DELETE CASCADE\n ON UPDATE CASCADE,\n idx INTEGER NOT NULL,\n status TEXT,\n pfx TEXT NOT NULL,\n pfxlen INTEGER NOT NULL,\n pfxstr_min TEXT NOT NULL,\n pfxstr_max TEXT NOT NULL,\n nexthop TEXT NOT NULL,\n metric INTEGER,\n locpref INTEGER,\n weight INTEGER,\n pathbutone TEXT,\n orig_asn INTEGER NOT NULL,\n route_orig TEXT)''')\n self.sql.commit()", "def get_link_inr(network_name: str, rx_pair_inr: Dict) -> Dict:\n results: DefaultDict = defaultdict(list)\n for (rx_node, rx_from_node), inr_power in rx_pair_inr.items():\n link_name = Topology.mac_to_link_name.get(network_name, {}).get(\n (rx_node, rx_from_node)\n )\n if link_name is None:\n continue\n\n inr_db = 10 * np.log10(inr_power)\n if inr_db < HardwareConfig.MINIMUM_SNR_DB:\n continue\n\n results[link_name].append(\n {\"rx_node\": rx_node, \"rx_from_node\": rx_from_node, \"inr_curr_power\": inr_db}\n )\n return results", "def lookup_rdap(self, inc_raw=False, retry_count=3, depth=0,\r\n excluded_entities=None, bootstrap=False,\r\n rate_limit_timeout=120, asn_alts=None):\r\n\r\n from .rdap import RDAP\r\n\r\n # Create the return dictionary.\r\n results = {}\r\n\r\n asn_data = None\r\n response = None\r\n if not bootstrap:\r\n\r\n # Retrieve the ASN information.\r\n log.debug('ASN lookup for {0}'.format(self.address_str))\r\n asn_data, response = self.net.lookup_asn(retry_count, asn_alts)\r\n\r\n # Add the ASN information to the return dictionary.\r\n results.update(asn_data)\r\n\r\n # Retrieve the RDAP data and parse.\r\n rdap = RDAP(self.net)\r\n log.debug('RDAP lookup for {0}'.format(self.address_str))\r\n rdap_data = rdap.lookup(inc_raw, retry_count, asn_data, depth,\r\n excluded_entities, response, bootstrap,\r\n rate_limit_timeout)\r\n\r\n # Add the RDAP information to the return dictionary.\r\n results.update(rdap_data)\r\n\r\n return results", "def init_rpki_rtr_tables(self):\n cur = self.sql.cursor()\n cur.execute(\"PRAGMA foreign_keys = on\")\n cur.execute('''\n CREATE TABLE cache (\n cache_id INTEGER PRIMARY KEY NOT NULL,\n host TEXT NOT NULL,\n port TEXT NOT NULL,\n version INTEGER,\n nonce INTEGER,\n serial INTEGER,\n updated INTEGER,\n refresh INTEGER,\n retry INTEGER,\n expire INTEGER,\n UNIQUE (host, port))''')\n cur.execute('''\n CREATE TABLE prefix (\n prefix_id INTEGER PRIMARY KEY AUTOINCREMENT,\n cache_id INTEGER NOT NULL\n REFERENCES cache(cache_id)\n ON DELETE CASCADE\n ON UPDATE CASCADE,\n asn INTEGER NOT NULL,\n prefix TEXT NOT NULL,\n prefixlen INTEGER NOT NULL,\n max_prefixlen INTEGER NOT NULL,\n prefix_min TEXT,\n prefix_max TEXT,\n UNIQUE (cache_id, asn, prefix, prefixlen, max_prefixlen))''')\n cur.execute('''\n CREATE TABLE routerkey (\n cache_id INTEGER NOT NULL\n REFERENCES cache(cache_id)\n ON DELETE CASCADE\n ON UPDATE CASCADE,\n asn INTEGER NOT NULL,\n ski TEXT NOT NULL,\n key TEXT NOT NULL,\n UNIQUE (cache_id, asn, ski),\n UNIQUE (cache_id, asn, key))''')\n self.sql.commit()", "def get_ROIs(self, base):\n locs3d = self.locs3d\n #print loc3d\n base_locs = locs3d[base]\n ROI_dic = dict((i, [Id]) for i,Id in enumerate(base))\n for i, loc in enumerate(locs3d):\n if i not in base:\n dist = np.sqrt(np.sum((base_locs - loc)**2, 1))\n min_i = np.argmin(dist)\n ROI_dic[min_i].append(i)\n out = ROI_dic.values()\n return out", "def test_ipam_rirs_list(self):\n pass", "def rel_overall(self, Rb, Rk, Rt, order, flen=2):\n Relary = np.zeros(21)\n Rel = defaultdict(int)\n if (len(Rb[0]) >= flen) & (len(Rk[0]) >= flen) & (len(Rt[0]) >= flen):\n if order == self.trg_jorder:\n for j in order:\n for i in xrange(flen):\n Rel[j] += self.gw[i]*min(Rb[j][-(i+1)], Rk[j][-(i+1)], Rt[j][-(i+1)])\n Relary[j] += self.gw[i]*min(Rb[j][-(i+1)], Rk[j][-(i+1)], Rt[j][-(i+1)])\n else:\n raise ImportError('joints order not match !!')\n else:\n return Rel, np.array([])\n return Rel, Relary", "def orcid_lookup(self):\n if not hasattr(self, \"_orcid_lookup\"):\n self._orcid_lookup = {}\n self.load_data()\n for entry in self.data.get(\"contributor\", []):\n if \"@id\" in entry:\n # Orcid represented as full URL but we just want id\n orcid = entry[\"@id\"].split(\"/\")[-1]\n self._orcid_lookup[orcid] = entry\n return self._orcid_lookup", "def buildRoutesDict(self):\n \n # create route number and name xref dictionary\n arcpy.env.workspace = PublicTransit.RTD_PATH\n routes = arcpy.SearchCursor(PublicTransit.BUS_ROUTES, \"\", \"\", \"RouteID; Name\", \"\")\n self.routeXref = dict()\n for route in routes:\n self.routeXref[route.RouteID] = route.Name\n self.routeXref[route.Name] = route.RouteID\n del routes\n \n #get mode lookup table\n mode_table = self.getModeLookupTable()\n \n # Query the RTD database for the route name, operator, mode, and headways.\n # We are querying for weekday routes (DAYTYPE_CLASS Weekday field = 'Y')\n conn = pyodbc.connect(PublicTransit.DB_CONN_STRING)\n cursor = conn.cursor()\n self.transitRoutes = dict()\n qry = \"\"\"\n WITH t AS\n (\n SELECT CPT_AGENCYID, AGENCYNAME, SCH_ROUTEID, SCH_PATTERNID, CPT_MODE, SCH_ROUTEDESIGNATOR,\n CASE\n WHEN HOUR_CLASS >= 3 and HOUR_CLASS < 6 THEN 'EA'\n WHEN HOUR_CLASS >= 6 and HOUR_CLASS < 10 THEN 'AM'\n WHEN HOUR_CLASS >= 10 and HOUR_CLASS < 15 THEN 'MD'\n WHEN HOUR_CLASS >= 15 and HOUR_CLASS < 19 THEN 'PM'\n WHEN (HOUR_CLASS BETWEEN 19 AND 24) OR HOUR_CLASS < 3 THEN 'EV'\n END AS tod,\n [HOURLY_FREQUENCY(Daily until HOUR_CLASS update)], HOUR_CLASS\n FROM dbo.[ROUTE HEADWAY AND FREQUENCY]\n WHERE DAYTYPE_CLASS IN\n (SELECT dc.CLASS FROM dbo.DAYTYPE_CLASS dc WHERE WEEKDAY = 'Y')\n )\n SELECT CPT_AGENCYID, AGENCYNAME, SCH_ROUTEID, SCH_PATTERNID, CPT_MODE, SCH_ROUTEDESIGNATOR, tod,\n 60.0 / ROUND(AVG(CAST([HOURLY_FREQUENCY(Daily until HOUR_CLASS update)] AS FLOAT)), 0) as headway\n FROM t\n GROUP BY CPT_AGENCYID, AGENCYNAME, SCH_ROUTEID, SCH_PATTERNID, CPT_MODE, SCH_ROUTEDESIGNATOR, tod\n ORDER BY SCH_ROUTEID, SCH_PATTERNID, tod\"\"\"\n \n used_route_names = []\n # Iterate through result set and apply attributes.\n for row in cursor.execute(qry):\n routePattern = str(row.SCH_ROUTEID) + \"_\" + str(row.SCH_PATTERNID)\n if routePattern not in self.transitRoutes:\n self.transitRoutes[routePattern] = TransitRoute(routePattern,\n routeId = row.SCH_ROUTEID,\n patternId = row.SCH_PATTERNID)\n self.transitRoutes[routePattern].new_name = self.__cleanRouteName(row.CPT_AGENCYID + \"_\" + row.SCH_ROUTEDESIGNATOR[:(11 - 1 - len(row.CPT_AGENCYID))],used_route_names) #12 is the maximum name length\n self.transitRoutes[routePattern].agency = row.AGENCYNAME\n mode = -1\n for mode_row in mode_table:\n if row.CPT_AGENCYID == mode_row[\"CPT_AGENCYID\"] and row.CPT_MODE == mode_row[\"CPT_MODE\"]:\n if mode_row[\"SCH_ROUTEDESIGNATOR\"] != \"NA\":\n if row.SCH_ROUTEDESIGNATOR == mode_row[\"SCH_ROUTEDESIGNATOR\"]:\n mode = mode_row[\"MODECODE\"]\n mode_group = Mode.getModeFromLookupTable(mode_row[\"MODEGROUP\"])\n break #this is as detailed as we can get\n else:\n mode = mode_row[\"MODECODE\"]\n mode_group = Mode.getModeFromLookupTable(mode_row[\"MODEGROUP\"])\n self.transitRoutes[routePattern].mode = mode\n self.transitRoutes[routePattern].mode_group = Mode.getModeName(mode_group)\n # set headways\n if row.tod == 'EA':\n self.transitRoutes[routePattern].eaHeadway = row.headway\n elif row.tod == 'AM':\n self.transitRoutes[routePattern].amHeadway = row.headway\n elif row.tod == 'MD':\n self.transitRoutes[routePattern].mdHeadway = row.headway\n elif row.tod == 'PM':\n self.transitRoutes[routePattern].pmHeadway = row.headway\n elif row.tod == 'EV':\n self.transitRoutes[routePattern].evHeadway = row.headway\n conn.close()", "def test_ipam_rirs_read(self):\n pass", "def selective_search_roidb(self):\n # cache_path: data/cache/Actions/\n # self.name: rcnn_<>_<>\n # cache_file: data/cache/Actions/rcnn_<datatype>_<imageset>_selective_search_roidb.pkl\n cache_file = None\n if cfg.LESSEN_DEBUG_TIME:\n lessen_debug_str = cfg.LESSEN_DEBUG_STR\n cache_file = os.path.join(self.cache_path, self.name + \"_\" + \n lessen_debug_str + '_selective_search_roidb.pkl')\n else:\n cache_file = os.path.join(self.cache_path, self.name + '_selective_search_roidb.pkl')\n\n if os.path.exists(cache_file):\n with open(cache_file, 'rb') as fid:\n roidb = cPickle.load(fid)\n print '{} ss roidb loaded from {}'.format(self.name, cache_file)\n return roidb\n\n if self._image_set != 'test':\n gt_roidb = self.gt_roidb()\n gt_roidb, ss_roidb = self._load_selective_search_roidb(gt_roidb)\n roidb = action_datasets.imdb.merge_roidbs(gt_roidb, ss_roidb)\n else:\n roidb = self._load_selective_search_roidb(None)\n\n\n with open(cache_file, 'wb') as fid:\n cPickle.dump(roidb, fid, cPickle.HIGHEST_PROTOCOL)\n print 'wrote ss roidb to {}'.format(cache_file)\n\n return roidb", "def _back_compat_hook_ri_data(self):\n\n # Nothing to do.\n if not (hasattr(cdp, 'frq_labels') and hasattr(cdp, 'noe_r1_table') and hasattr(cdp, 'remap_table')):\n return\n\n # Initialise the new structures.\n cdp.ri_ids = []\n cdp.ri_type = {}\n frq = {} # This will be placed into cdp later as cdp.spectrometer_frq still exists.\n\n # Generate the new structures.\n for i in range(cdp.num_ri):\n # The ID.\n ri_id = \"%s_%s\" % (cdp.ri_labels[i], cdp.frq_labels[cdp.remap_table[i]])\n\n # Not unique.\n if ri_id in cdp.ri_ids:\n # Loop until a unique ID is found.\n for j in range(100):\n # New id.\n new_id = \"%s_%s\" % (ri_id, j)\n\n # Unique.\n if not new_id in cdp.ri_ids:\n ri_id = new_id\n break\n\n # Add the ID.\n cdp.ri_ids.append(ri_id)\n\n # The relaxation data type.\n cdp.ri_type[ri_id] = cdp.ri_labels[i]\n\n # The frequency data.\n frq[ri_id] = cdp.frq[cdp.remap_table[i]]\n\n # Delete the old structures.\n del cdp.frq\n del cdp.frq_labels\n del cdp.noe_r1_table\n del cdp.num_frq\n del cdp.num_ri\n del cdp.remap_table\n del cdp.ri_labels\n\n # Set the frequencies.\n cdp.frq = frq", "def mainRoads(osm_path): \n return retrieve(osm_path,'lines',['highway','oneway','lanes','maxspeed'],**{'highway':[\"='primary' or \",\"='trunk' or \",\"='motorway' or \",\"='motorway_link' or \",\"='trunk_link' or \",\"='primary_link' or \", \"='secondary' or \",\"='tertiary' or \",\"='tertiary_link'\"]})", "def get_rib(self, node):\n if not self.net.is_running:\n error(\"The network is not running.\")\n r = self._get_node(node)\n if r not in self.net.routers:\n return None\n out = r.cmd(self.RIBCommand)\n if 'Connection refused' not in out:\n return self._parse_rib(out)\n else:\n return None", "def buildLinksDict(self):\n \n arcpy.env.workspace = PublicTransit.RTD_PATH\n # Check if feature layer already exists; if so, delete it.\n if arcpy.Exists(PublicTransit.ROUTE_EDGES_FEATURE_LYR):\n arcpy.Delete_management(PublicTransit.ROUTE_EDGES_FEATURE_LYR)\n # Create a feature layer based on bus route traversal edges, and join to\n # the Roadways feature class.\n arcpy.MakeFeatureLayer_management(PublicTransit.BUS_ROUTE_TRAVERSAL_EDGES,\n PublicTransit.ROUTE_EDGES_FEATURE_LYR)\n routeTraversalEdgesJoinField = \"SourceOID\"\n roadwaysJoinField = \"LinkId\"\n arcpy.AddJoin_management(PublicTransit.ROUTE_EDGES_FEATURE_LYR,\n routeTraversalEdgesJoinField,\n PublicTransit.ROADWAYS_FC,\n roadwaysJoinField,\n \"KEEP_COMMON\")\n self.linksDict = dict()\n \n linkIdField = \"Roadways.LinkId\"\n fromNodeField = \"Roadways.F_JNCTID\"\n toNodeField = \"Roadways.T_JNCTID\"\n onewayField = \"Roadways.ONEWAY\"\n \n links = arcpy.SearchCursor(PublicTransit.ROUTE_EDGES_FEATURE_LYR, \"\", \"\",\n linkIdField + \";\" + fromNodeField + \";\" +\n toNodeField + \";\" + onewayField, \"\") \n print \"Found %d links\" % \\\n int(arcpy.GetCount_management(PublicTransit.ROUTE_EDGES_FEATURE_LYR).getOutput(0))\n \n linkIter = 0\n # Add link to dictionary if both the from and to node are in the nodes dictionary.\n for l in links:\n linkId = l.getValue(linkIdField)\n fromNode = self.__getIdHash(l.getValue(fromNodeField))\n toNode = self.__getIdHash(l.getValue(toNodeField))\n oneWay = l.getValue(onewayField)\n if (linkId not in self.linksDict):\n if (fromNode in self.nodesDict and toNode in self.nodesDict):\n self.linksDict[linkId] = Link(linkId, self.nodesDict[fromNode],\n self.nodesDict[toNode], oneWay)\n linkIter += 1\n if (linkIter % 10000 == 0):\n print \"processed %d links\" % (linkIter)\n del l\n del links\n arcpy.Delete_management(PublicTransit.ROUTE_EDGES_FEATURE_LYR)", "def rib_update(self, updates):\n\n self.logger.debug(\"RIB Update\")\n\n if self.rib_timing:\n start_time = clock()\n\n for tmp_update in updates:\n if 'announce' in tmp_update:\n update = tmp_update[\"announce\"]\n elif 'withdraw' in tmp_update:\n update = tmp_update[\"withdraw\"]\n update[\"as_path\"] = None\n else:\n self.logger.debug(\"Got invalid update from route server\")\n return\n\n advertising_participant = self.config.portip_2_participant[update[\"next_hop\"]]\n participants = self.config.participants[advertising_participant].peers_in\n\n for participant in participants:\n self.update_forbidden_paths(update[\"prefix\"],\n update[\"as_path\"],\n None,\n participant,\n advertising_participant)\n\n if 'announce' in tmp_update:\n ingress_participants = self.policy_handler.get_ingress_participants(advertising_participant)\n active_policies = True if ingress_participants else False\n ingress_participants.update(self.rib.get_all_participants_using_best_path(update[\"prefix\"],\n advertising_participant))\n\n filter_participants = self.rib.get_all_receiver_participants(update[\"prefix\"], advertising_participant)\n\n ingress_participants = ingress_participants.intersection(filter_participants)\n filter_participants = self.rib.get_all_participants_advertising(update[\"prefix\"])\n ingress_participants = ingress_participants.difference(filter_participants)\n\n if ingress_participants:\n receiver_participant = self.get_first_sdx_participant_on_path(update[\"prefix\"], advertising_participant)\n\n if receiver_participant:\n co_update, old_co_entry, new_co_entry = self.cib.update_out(advertising_participant,\n update[\"prefix\"],\n receiver_participant,\n ingress_participants,\n self.config.id,\n active_policies)\n random_value = randint(0, self.config.loop_detector.max_random_value)\n timestamp = time()\n\n self.notify_nh_sdx(co_update, old_co_entry, new_co_entry, timestamp, random_value)\n else:\n co_update, old_co_entry, new_co_entry = self.cib.delete_out_entry(advertising_participant,\n update[\"prefix\"])\n random_value = randint(0, self.config.loop_detector.max_random_value)\n timestamp = time()\n\n self.notify_nh_sdx(co_update, old_co_entry, new_co_entry, timestamp, random_value)\n\n if self.rib_timing:\n end_time = clock()\n with open(self.rib_timing_file, \"a\") as outfile:\n outfile.write(str(end_time - start_time) + '\\n')\n\n self.logger.debug(\"Done processing RIB Update\")", "def key_klifs_residues(numbering):\n if numbering == None:\n print(\"The structure was not found in the klifs database.\")\n key_res = None\n return key_res\n\n key_res = dict() #initialize key_res (which read from the 0-based numbering list)\n for i in range(5):\n key_res[f'group{i}'] = list()\n ## feature group 0: A-loop backbone dihedrals\n key_res['group0'].append(numbering[83]) # start of A-loop\n\n ## feature group 1: P-loop backbone dihedrals\n key_res['group1'].append(numbering[3]) # res0 in P-loop\n key_res['group1'].append(numbering[4]) # res1 in P-loop\n key_res['group1'].append(numbering[5]) # res2 in P-loop\n key_res['group1'].append(numbering[6]) # res3 in P-loop\n key_res['group1'].append(numbering[7]) # res4 in P-loop\n key_res['group1'].append(numbering[8]) # res5 in P-loop\n\n ## feature group 2: aC-related features\n #angle between aC and aE helices\n key_res['group2'].append(numbering[19]) # res0 in aC\n key_res['group2'].append(numbering[29]) # res10 in aC\n key_res['group2'].append(numbering[62]) # end of aE\n\n # key salt bridge\n key_res['group2'].append(numbering[16]) # K in beta III\n key_res['group2'].append(numbering[23]) # E in aC\n\n ## feature group 3: DFG-related features\n key_res['group3'].append(numbering[79]) # X-DFG\n key_res['group3'].append(numbering[80]) # DFG-Asp\n key_res['group3'].append(numbering[81]) # DFG-Phe\n key_res['group3'].append(numbering[27]) # ExxxX\n\n ## feature group 4: the FRET distance\n # not in the list of 85 (equivalent to Aura\"S284\"), use the 100% conserved beta III K as a reference\n key_res['group4'].append(numbering[16] + 120)\n\n # not in the list of 85 (equivalent to Aura\"L225\"), use the 100% conserved beta III K as a reference\n key_res['group4'].append(numbering[16] + 61)\n\n return key_res", "def testCorrectJoin(self):\n b_tree = OOBTree()\n b_tree.update({1: \"Monkey D. Luffy\", 2: \"Roronoa Zoro\", 3: \"Nami\"})\n failed_counter = 0\n key = 1\n data = {\"from\":\"East Blue\"}\n (mod_data, mod_tree, failed_counter) = self.processing.join(b_tree, key, data, failed_counter)\n self.assertEqual(mod_data, {\"from\":\"East Blue\", \"right_data\":\"Monkey D. Luffy\"})\n self.assertEqual(len(mod_tree), 2)\n self.assertEqual(failed_counter, 0)", "def trio_hits(l_contig, mid_contig_end, blast_hits, olap_dict, cont_dict):\n mid_contig = mid_contig_end.split(\"prime_\")[1]\n length_mid = len(cont_dict[mid_contig].seq) \n right_links = []\n right_links = list(olap_dict[mid_contig_end].keys())\n \n #If contigs are chromosomal, ensure they are adjacent\n if chr_links:\n if l_contig in chr_links: \n new_right_links= []\n for r_l in right_links:\n if r_l not in chr_links or r_l in chr_links[l_contig]:\n new_right_links.append(r_l)\n right_links = new_right_links \n \n if len(right_links) == 1:\n outcome = right_links[0]\n elif len(right_links) == 0:\n outcome = \"link_listempty\"\n else:\n left_matches = []\n for hit in blast_hits:\n if (hit[0] == l_contig) and (int(hit[11]) >= l_min_score):\n left_matches.append(hit)\n link_count = {}\n for link in right_links:\n \n right_matches = []\n\n for hit in blast_hits:\n if (hit[0] == link) and (int(hit[11]) >= r_min_score): \n right_matches.append(hit)\n \n for lhit in left_matches:\n for rhit in right_matches:\n if lhit[1] == rhit[1]:\n lh_start = int(lhit[8])\n lh_end = int(lhit[9])\n rh_start = int(rhit[8])\n rh_end = int(rhit[9])\n if abs(lh_start - rh_start) < length_mid + 100:\n if (lh_end - lh_start)/(rh_end - rh_start) < 0:\n if abs(lh_end - rh_end) > abs(lh_start - rh_start):\n link_score = int(lhit[11]) * int(rhit[11])\n if not link in link_count: \n link_count[link] = link_score\n elif link_score > link_count[link]:\n link_count[link] = link_score\n \n number_of_matches = len(link_count)\n if number_of_matches == 1:\n outcome = list(link_count.keys())[0]\n if number_of_matches == 0:\n outcome = \"link_unmatched\"\n if number_of_matches > 1:\n outcome = max(link_count, key = link_count.get)\n\n return outcome", "def get_adj_rib_in(afi_safi, peer_ip):\n prefix = flask.request.args.get('prefix')\n community = flask.request.args.get('community')\n if prefix or community:\n if prefix:\n return flask.jsonify({\n 'attr': api_utils.get_adj_rib_in(peer_ip, afi_safi, prefix)\n })\n else:\n return flask.jsonify({\n 'prefixes': api_utils.get_adj_rib_in(peer_ip, afi_safi)}\n )", "def get_roidb(self):\n # get all the images_ids in this dataset\n img_ids = self._COCO.getImgIds()\n # sort the ids, make each time the same order\n img_ids.sort()\n # load the 'image' of the COCO dataset\n roidb = copy.deepcopy(self._COCO.loadImgs(img_ids))\n for entry in roidb:\n # predefine some attribute of each image\n self._prep_roidb_entry(entry)\n\n # for cahce_dir\n if not os.path.exists(self.cache_dir):\n os.mkdir(self.cache_dir)\n cache_file = os.path.join(self.cache_dir, self.annFile + '_gt_roidb.pkl')\n if os.path.exists(cache_file):\n self._read_roidb_from_cachefile(roidb, cache_file)\n print('{} gt roidb loaded from {}'.format(self.annFile, cache_file))\n else:\n for entry in roidb:\n self._add_roidb_from_annotations(entry)\n with open(cache_file, 'wb') as f:\n pickle.dump(roidb, f, pickle.HIGHEST_PROTOCOL)\n print('wrote gt roidb to {}'.format(cache_file))\n return roidb", "def link_riboswitch(self, riboswitch):\n if riboswitch.id not in [rs.id for rs in self.riboswitches]:\n self.riboswitches.append(riboswitch)", "def auto_join(blue_range, red_range, coadd_consecutive=True, do_joins=False):\r\n\r\n coadd_map = []\r\n current_obj_ra = ''\r\n current_blue = []\r\n current_red = []\r\n objects = []\r\n b = blue_range[0]\r\n r = red_range[0]\r\n while (b <= blue_range[1]) or (r <= red_range[1]):\r\n print b, r\r\n\r\n def load_hdrpars(i,side='blue'):\r\n filename = '%s%04d.fits' % (side, i)\r\n file_exists = os.path.exists(filename)\r\n if file_exists:\r\n hdr = pyfits.getheader(filename)\r\n # just use the nearest arcsec--small telescope drifts otherwise\r\n return True, hdr['OBJECT'],hdr['RA'][:8],hdr['DEC'][:9]\r\n else:\r\n return False, None, None, None\r\n\r\n bfileexists, bobj, bra, bdec = load_hdrpars(b,side='blue')\r\n rfileexists, robj, rra, rdec = load_hdrpars(r,side='red')\r\n\r\n if bfileexists and rfileexists and (bra == rra) and (bdec == rdec):\r\n # both sides observe same object\r\n if (rra == current_obj_ra) and coadd_consecutive:\r\n # which matches the previous object\r\n current_blue.append(b)\r\n current_red.append(r)\r\n current_obj = robj\r\n else:\r\n # both sides observe a new object\r\n if current_obj_ra != '': # starting the list\r\n coadd_map.append((current_blue, current_red))\r\n current_obj = robj\r\n objects.append(current_obj)\r\n current_blue = [b]\r\n current_red = [r]\r\n current_obj_ra = rra\r\n b+=1\r\n r+=1\r\n else:\r\n # both sides observe different objects (or one side is missing)\r\n if rfileexists and (rra == current_obj_ra) and coadd_consecutive:\r\n current_red.append(r)\r\n r+=1\r\n elif bfileexists and (bra == current_obj_ra) and coadd_consecutive:\r\n current_blue.append(b)\r\n b+=1\r\n else:\r\n # some other state. save last object\r\n coadd_map.append((current_blue, current_red))\r\n objects.append(current_obj)\r\n\r\n # peek ahead\r\n _, nbobj, nbra, nbdec = load_hdrpars(b+1,side='blue')\r\n _, nrobj, nrra, nrdec = load_hdrpars(r+1,side='red')\r\n\r\n # does current blue match either of next objects?\r\n if bfileexists:\r\n if (bra != nbra) and (bra != nrra):\r\n # no--write it out by itself\r\n coadd_map.append(([b],[]))\r\n current_blue = []\r\n objects.append(bobj)\r\n else:\r\n # save and continue\r\n current_blue = [b]\r\n current_obj = bobj\r\n b+=1\r\n\r\n # does current red match either of next objects?\r\n if rfileexists:\r\n if (rra != nbra) and (rra != nrra):\r\n # no--write it out by itself\r\n coadd_map.append(([],[r]))\r\n current_red = []\r\n objects.append(robj)\r\n else:\r\n # save and continue\r\n current_red = [r]\r\n current_obj = robj\r\n current_ra = rra\r\n r+=1\r\n\r\n # save final object\r\n coadd_map.append((current_blue, current_red))\r\n\r\n for x in zip(objects, coadd_map):\r\n print x\r\n if do_joins:\r\n for lists in coadd_map:\r\n combine_sides(lists[0], lists[1],splot='no')\r\n\r\n return coadd_map, objects", "def get_roidb_and_dataset(dataset_name, proposal_file, ind_range):\n dataset = JsonDataset(dataset_name)\n if cfg.TEST.PRECOMPUTED_PROPOSALS:\n assert proposal_file, 'No proposal file given'\n roidb = dataset.get_roidb(\n proposal_file=proposal_file,\n proposal_limit=cfg.TEST.PROPOSAL_LIMIT\n )\n else:\n roidb = dataset.get_roidb()\n\n if ind_range is not None:\n total_num_images = len(roidb)\n start, end = ind_range\n roidb = roidb[start:end]\n else:\n start = 0\n end = len(roidb)\n total_num_images = end\n\n return roidb, dataset, start, end, total_num_images", "def dojoin(ipath1,ipath2,opath):\n r1 = '%s.map' % ipath1\n r2 = '%s.map' % ipath2\n if not mapsMatch(r1,r2):\n print '### maps %s and %s do not match' % (r1,r2)\n sys.exit(1)\n outpath = '%s.map' % opath\n shutil.copyfile(r1,outpath)\n r1 = '%s.eigenstratgeno' % ipath1\n r2 = '%s.eigenstratgeno' % ipath2\n outpath = '%s.eigenstratgeno' % opath\n joinRows(r1,r2,outpath)\n outpath = '%s.ind' % opath\n r1 = '%s.ind' % ipath1\n r2 = '%s.ind' % ipath2\n joinInds(r1,r2,outpath)", "def _vROHR(self,vKNOT=None):\r\n\r\n logStr = \"{0:s}.{1:s}: \".format(self.__class__.__name__, sys._getframe().f_code.co_name)\r\n logger.debug(\"{0:s}{1:s}\".format(logStr,'Start.')) \r\n \r\n try: \r\n vROHR=None \r\n \r\n vROHR=pd.merge(self.dataFrames['ROHR'],self.dataFrames['ROHR_BZ'],left_on='pk',right_on='fk')\r\n\r\n vROHR=vROHR[[\r\n 'BESCHREIBUNG'\r\n ,'IDREFERENZ'\r\n #Asset\r\n ,'BAUJAHR','HAL'\r\n ,'IPLANUNG','KENNUNG'\r\n #Reibung\r\n ,'L','LZU','RAU','ZAUS','ZEIN','ZUML'\r\n ,'JLAMBS','LAMBDA0'\r\n #inst.\r\n ,'ASOLL','INDSCHALL'\r\n #FW\r\n ,'fk2LROHR','KVR'\r\n #Ref.\r\n ,'fkCONT'\r\n ,'fkDTRO_ROWD'\r\n ,'fkLTGR','fkSTRASSE'\r\n ,'fkKI','fkKK'\r\n #IDs \r\n ,'pk_x','tk'\r\n ,'GEOM','GRAF'\r\n #BZ\r\n ,'IRTRENN'\r\n ,'LECKSTART','LECKEND','LECKMENGE','LECKORT','LECKSTATUS'\r\n #Rest\r\n ,'QSVB'\r\n ,'ZVLIMPTNZ'\r\n ,'KANTENZV'\r\n ]]\r\n\r\n vROHR.rename(columns={'pk_x':'pk'},inplace=True)\r\n vROHR=pd.merge(vROHR,self.dataFrames['CONT'],left_on='fkCONT',right_on='pk')\r\n\r\n if 'IDREFERENZ_x' in vROHR.columns.tolist(): #90-12\r\n vROHR.rename(columns={'IDREFERENZ_x':'IDREFERENZ'},inplace=True)\r\n\r\n vROHR=vROHR[[\r\n 'BESCHREIBUNG'\r\n ,'IDREFERENZ'\r\n #Asset\r\n ,'BAUJAHR','HAL'\r\n ,'IPLANUNG','KENNUNG'\r\n #Reibung\r\n ,'L','LZU','RAU','ZAUS','ZEIN','ZUML'\r\n ,'JLAMBS','LAMBDA0'\r\n #inst.\r\n ,'ASOLL','INDSCHALL'\r\n #FW\r\n ,'fk2LROHR','KVR'\r\n #Ref.\r\n ,'fkDTRO_ROWD'\r\n ,'fkLTGR','fkSTRASSE'\r\n ,'fkKI','fkKK'\r\n #IDs \r\n ,'pk_x','tk_x'\r\n ,'GEOM_x','GRAF_x'\r\n #BZ\r\n ,'IRTRENN'\r\n ,'LECKSTART','LECKEND','LECKMENGE','LECKORT','LECKSTATUS'\r\n #Rest\r\n ,'QSVB'\r\n ,'ZVLIMPTNZ'\r\n ,'KANTENZV'\r\n #CONT\r\n ,'NAME' \r\n ,'ID'\r\n ,'LFDNR'\r\n ]]\r\n vROHR.rename(columns={'pk_x':'pk','tk_x':'tk','NAME':'CONT','ID':'CONT_ID','LFDNR':'CONT_LFDNR'},inplace=True) \r\n vROHR=pd.merge(vROHR,self.dataFrames['DTRO_ROWD'],left_on='fkDTRO_ROWD',right_on='pk') \r\n\r\n vROHR=vROHR[[\r\n 'BESCHREIBUNG'\r\n ,'IDREFERENZ'\r\n #Asset\r\n ,'BAUJAHR','HAL'\r\n ,'IPLANUNG','KENNUNG'\r\n #Reibung\r\n ,'L','LZU','RAU','ZAUS','ZEIN','ZUML'\r\n ,'JLAMBS','LAMBDA0'\r\n #inst.\r\n ,'ASOLL','INDSCHALL'\r\n #FW\r\n ,'fk2LROHR','KVR'\r\n #DTRO_ROWD\r\n ,'AUSFALLZEIT', 'DA', 'DI', 'DN', 'KT', 'PN', 'REHABILITATION','REPARATUR', 'S', 'WSTEIG', 'WTIEFE'\r\n #Ref.\r\n ,'fkLTGR','fkSTRASSE'\r\n ,'fkKI','fkKK'\r\n #IDs \r\n ,'pk_x','tk_x'\r\n ,'GEOM_x','GRAF_x'\r\n #BZ\r\n ,'IRTRENN'\r\n ,'LECKSTART','LECKEND','LECKMENGE','LECKORT','LECKSTATUS'\r\n #Rest\r\n ,'QSVB'\r\n ,'ZVLIMPTNZ'\r\n ,'KANTENZV'\r\n #CONT\r\n ,'CONT' \r\n ,'CONT_ID'\r\n ,'CONT_LFDNR'\r\n ]]\r\n vROHR.rename(columns={'pk_x':'pk','tk_x':'tk'},inplace=True)\r\n vROHR=pd.merge(vROHR,self.dataFrames['LTGR'],left_on='fkLTGR',right_on='pk')\r\n\r\n vROHR=vROHR[[\r\n 'BESCHREIBUNG_x'\r\n ,'IDREFERENZ'\r\n #Asset\r\n ,'BAUJAHR','HAL'\r\n ,'IPLANUNG','KENNUNG'\r\n #Reibung\r\n ,'L','LZU','RAU','ZAUS','ZEIN','ZUML'\r\n ,'JLAMBS','LAMBDA0'\r\n #inst.\r\n ,'ASOLL','INDSCHALL'\r\n #FW\r\n ,'fk2LROHR','KVR'\r\n #DTRO_ROWD\r\n ,'AUSFALLZEIT', 'DA', 'DI', 'DN', 'KT', 'PN', 'REHABILITATION','REPARATUR', 'S', 'WSTEIG', 'WTIEFE'\r\n #LTGR\r\n ,'NAME','BESCHREIBUNG_y','SICHTBARKEIT','VERLEGEART','fkDTRO','fkSRAT'\r\n #Ref.\r\n ,'fkSTRASSE'\r\n ,'fkKI','fkKK'\r\n #IDs \r\n ,'pk_x','tk_x'\r\n ,'GEOM_x','GRAF_x'\r\n #BZ\r\n ,'IRTRENN'\r\n ,'LECKSTART','LECKEND','LECKMENGE','LECKORT','LECKSTATUS'\r\n #Rest\r\n ,'QSVB'\r\n ,'ZVLIMPTNZ'\r\n ,'KANTENZV'\r\n #CONT\r\n ,'CONT' \r\n ,'CONT_ID'\r\n ,'CONT_LFDNR'\r\n ]]\r\n vROHR.rename(columns={'pk_x':'pk','tk_x':'tk','NAME':'LTGR_NAME','BESCHREIBUNG_y':'LTGR_BESCHREIBUNG','BESCHREIBUNG_x':'BESCHREIBUNG'},inplace=True)\r\n\r\n vROHR=vROHR[[\r\n 'BESCHREIBUNG'\r\n ,'IDREFERENZ'\r\n #Asset\r\n ,'BAUJAHR','HAL'\r\n ,'IPLANUNG','KENNUNG'\r\n #Reibung\r\n ,'L','LZU','RAU','ZAUS','ZEIN','ZUML'\r\n ,'JLAMBS','LAMBDA0'\r\n #inst.\r\n ,'ASOLL','INDSCHALL'\r\n #FW\r\n ,'fk2LROHR','KVR'\r\n #DTRO_ROWD\r\n ,'AUSFALLZEIT', 'DA', 'DI', 'DN', 'KT', 'PN', 'REHABILITATION','REPARATUR', 'S', 'WSTEIG', 'WTIEFE'\r\n #LTGR\r\n ,'LTGR_NAME','LTGR_BESCHREIBUNG','SICHTBARKEIT','VERLEGEART','fkDTRO','fkSRAT'\r\n #Ref.\r\n ,'fkSTRASSE'\r\n ,'fkKI','fkKK'\r\n #IDs \r\n ,'pk','tk'\r\n ,'GEOM_x','GRAF_x'\r\n #BZ\r\n ,'IRTRENN'\r\n ,'LECKSTART','LECKEND','LECKMENGE','LECKORT','LECKSTATUS'\r\n #Rest\r\n ,'QSVB'\r\n ,'ZVLIMPTNZ'\r\n ,'KANTENZV'\r\n #CONT\r\n ,'CONT' \r\n ,'CONT_ID'\r\n ,'CONT_LFDNR'\r\n ]]\r\n \r\n vROHR=pd.merge(vROHR,self.dataFrames['DTRO'],left_on='fkDTRO',right_on='pk')\r\n\r\n if 'IDREFERENZ_x' in vROHR.columns.tolist(): #90-12\r\n vROHR.rename(columns={'IDREFERENZ_x':'IDREFERENZ'},inplace=True)\r\n\r\n vROHR=vROHR[[\r\n 'BESCHREIBUNG_x'\r\n ,'IDREFERENZ'\r\n #Asset\r\n ,'BAUJAHR','HAL'\r\n ,'IPLANUNG','KENNUNG'\r\n #Reibung\r\n ,'L','LZU','RAU','ZAUS','ZEIN','ZUML'\r\n ,'JLAMBS','LAMBDA0'\r\n #inst.\r\n ,'ASOLL','INDSCHALL'\r\n #FW\r\n ,'fk2LROHR','KVR'\r\n #DTRO_ROWD\r\n ,'AUSFALLZEIT', 'DA', 'DI', 'DN', 'KT', 'PN', 'REHABILITATION','REPARATUR', 'S', 'WSTEIG', 'WTIEFE'\r\n #LTGR\r\n ,'LTGR_NAME','LTGR_BESCHREIBUNG','SICHTBARKEIT','VERLEGEART'\r\n #DTRO\r\n ,'NAME'\r\n ,'BESCHREIBUNG_y'\r\n ,'E'\r\n #Ref.\r\n ,'fkSTRASSE','fkSRAT'\r\n ,'fkKI','fkKK'\r\n #IDs \r\n ,'pk_x','tk_x'\r\n ,'GEOM_x','GRAF_x'\r\n #BZ\r\n ,'IRTRENN'\r\n ,'LECKSTART','LECKEND','LECKMENGE','LECKORT','LECKSTATUS'\r\n #Rest\r\n ,'QSVB'\r\n ,'ZVLIMPTNZ'\r\n ,'KANTENZV'\r\n #CONT\r\n ,'CONT' \r\n ,'CONT_ID'\r\n ,'CONT_LFDNR'\r\n ]]\r\n vROHR.rename(columns={'pk_x':'pk','tk_x':'tk','NAME':'DTRO_NAME','BESCHREIBUNG_y':'DTRO_BESCHREIBUNG','BESCHREIBUNG_x':'BESCHREIBUNG'},inplace=True)\r\n \r\n #logger.debug(\"{:s} vor fkKI: {!s:s}\".format(logStr,(vROHR))) \r\n vROHR=pd.merge(vROHR,vKNOT,left_on='fkKI',right_on='pk') \r\n #logger.debug(\"{:s} nach fkKI: {!s:s}\".format(logStr,(vROHR))) \r\n vROHR.rename(columns={'BESCHREIBUNG_x':'BESCHREIBUNG','IDREFERENZ_x':'IDREFERENZ'\r\n ,'pk_x':'pk','tk_x':'tk'\r\n ,'CONT_ID_x':'CONT_ID','CONT_LFDNR_x':'CONT_LFDNR'\r\n },inplace=True) \r\n\r\n vROHR=vROHR[[\r\n 'BESCHREIBUNG'\r\n ,'IDREFERENZ'\r\n #Asset\r\n ,'BAUJAHR','HAL'\r\n ,'IPLANUNG','KENNUNG'\r\n #Reibung\r\n ,'L','LZU','RAU','ZAUS','ZEIN','ZUML'\r\n ,'JLAMBS','LAMBDA0'\r\n #inst.\r\n ,'ASOLL','INDSCHALL'\r\n #FW\r\n ,'fk2LROHR','KVR_x'\r\n #DTRO_ROWD\r\n ,'AUSFALLZEIT', 'DA', 'DI', 'DN', 'KT', 'PN', 'REHABILITATION','REPARATUR', 'S', 'WSTEIG', 'WTIEFE'\r\n #LTGR\r\n ,'LTGR_NAME','LTGR_BESCHREIBUNG','SICHTBARKEIT','VERLEGEART'\r\n #DTRO\r\n ,'DTRO_NAME'\r\n ,'DTRO_BESCHREIBUNG'\r\n ,'E'\r\n #Ref.\r\n ,'fkSTRASSE','fkSRAT'\r\n ,'fkKK'\r\n #IDs \r\n ,'pk','tk'\r\n ,'GEOM_x','GRAF_x'\r\n #BZ\r\n ,'IRTRENN'\r\n ,'LECKSTART','LECKEND','LECKMENGE','LECKORT','LECKSTATUS'\r\n #Rest\r\n ,'QSVB'\r\n ,'ZVLIMPTNZ'\r\n ,'KANTENZV'\r\n #CONT\r\n ,'CONT_x' \r\n ,'CONT_ID'\r\n ,'CONT_LFDNR'\r\n #Ki\r\n ,'NAME'\r\n ,'KVR_y','TM'\r\n ,'XKOR','YKOR','ZKOR'\r\n ,'pXCor','pYCor'\r\n ]]\r\n\r\n vROHR.rename(columns={'NAME':'NAME_i','KVR_x':'KVR','KVR_y':'KVR_i','TM':'TM_i','CONT_x':'CONT'},inplace=True) \r\n vROHR.rename(columns={'XKOR':'XKOR_i','YKOR':'YKOR_i','ZKOR':'ZKOR_i'\r\n ,'pXCor':'pXCor_i'\r\n ,'pYCor':'pYCor_i'\r\n },inplace=True) \r\n \r\n vROHR=pd.merge(vROHR,vKNOT,left_on='fkKK',right_on='pk') \r\n vROHR.rename(columns={'BESCHREIBUNG_x':'BESCHREIBUNG','IDREFERENZ_x':'IDREFERENZ'\r\n ,'pk_x':'pk','tk_x':'tk'\r\n ,'CONT_ID_x':'CONT_ID','CONT_LFDNR_x':'CONT_LFDNR'\r\n },inplace=True) \r\n\r\n vROHR.rename(columns={'NAME':'NAME_k','KVR_x':'KVR','KVR_y':'KVR_k','TM':'TM_k','CONT_x':'CONT'},inplace=True) \r\n vROHR.rename(columns={'XKOR':'XKOR_k','YKOR':'YKOR_k','ZKOR':'ZKOR_k'\r\n ,'pXCor':'pXCor_k'\r\n ,'pYCor':'pYCor_k'\r\n },inplace=True) \r\n\r\n vROHR['pXCors']=[[xi,xk] for xi,xk in zip(vROHR['pXCor_i'],vROHR['pXCor_k'])]\r\n vROHR['pYCors']=[[xi,xk] for xi,xk in zip(vROHR['pYCor_i'],vROHR['pYCor_k'])]\r\n\r\n vROHR.rename(columns={'GEOM_x':'GEOM'},inplace=True) \r\n\r\n vROHR=pd.merge(vROHR,vROHR,left_on='fk2LROHR',right_on='pk',how='left',suffixes=('','_2L')) \r\n\r\n vROHR=vROHR[[\r\n 'BESCHREIBUNG'\r\n ,'IDREFERENZ'\r\n #Asset\r\n ,'BAUJAHR','HAL'\r\n ,'IPLANUNG','KENNUNG'\r\n #Reibung\r\n ,'L','LZU','RAU','ZAUS','ZEIN','ZUML'\r\n ,'JLAMBS','LAMBDA0'\r\n #inst.\r\n ,'ASOLL','INDSCHALL'\r\n #FW\r\n ,'NAME_i_2L'\r\n ,'NAME_k_2L'\r\n ,'KVR' \r\n #DTRO_ROWD\r\n ,'AUSFALLZEIT', 'DA', 'DI', 'DN', 'KT', 'PN', 'REHABILITATION','REPARATUR', 'S', 'WSTEIG', 'WTIEFE'\r\n #LTGR\r\n ,'LTGR_NAME','LTGR_BESCHREIBUNG','SICHTBARKEIT','VERLEGEART'\r\n #DTRO\r\n ,'DTRO_NAME'\r\n ,'DTRO_BESCHREIBUNG'\r\n ,'E'\r\n #Ref.\r\n ,'fkSTRASSE','fkSRAT'\r\n #IDs \r\n ,'pk','tk' \r\n #BZ\r\n ,'IRTRENN'\r\n ,'LECKSTART','LECKEND','LECKMENGE','LECKORT','LECKSTATUS'\r\n #Rest\r\n ,'QSVB'\r\n ,'ZVLIMPTNZ'\r\n ,'KANTENZV'\r\n #CONT\r\n ,'CONT' \r\n ,'CONT_ID'\r\n ,'CONT_LFDNR'\r\n #Ki\r\n ,'NAME_i'\r\n ,'KVR_i','TM_i'\r\n ,'XKOR_i','YKOR_i','ZKOR_i' \r\n #Kk\r\n ,'NAME_k'\r\n ,'KVR_k','TM_k'\r\n ,'XKOR_k','YKOR_k','ZKOR_k'\r\n #plotCors\r\n ,'pXCor_i','pYCor_i'\r\n ,'pXCor_k','pYCor_k'\r\n # matplotlibs's .plot(pXCors,pYCors,...)\r\n ,'pXCors','pYCors' # nur die Endpunkte \r\n # ...........\r\n ,'GEOM'\r\n ]]\r\n \r\n # WAYP ###\r\n vROHR['WAYP']=[list() for dummy in vROHR['pk']] # leere Liste von Wegpunkten\r\n for index,row in vROHR.iterrows():\r\n if pd.isnull(row.GEOM): \r\n continue\r\n geomBytes=base64.b64decode(row.GEOM)\r\n # 1. Byte: Endianess: 0=little\r\n # 1. Byte auslassen\r\n \r\n # 2 ints lesen ...\r\n headerData = struct.unpack('2i',geomBytes[1:9]) \r\n graphType,NOfWaypoints=headerData # graphType: Werte von 1 bis 6 bedeuten: Point, LineString, Polygon, MultiPoint, ...\r\n \r\n # xy-Koordinatenpaare lesen \r\n # 2 double: xi, yi\r\n for idx in range(NOfWaypoints):\r\n offset=9+idx*16 \r\n end=offset+16 \r\n waypXY=struct.unpack('2d',geomBytes[offset:end]) \r\n row.WAYP.append(waypXY)\r\n \r\n vROHR['pWAYPXCors']=[list() for dummy in vROHR['pk']] # leere Liste von pWegpunkten X\r\n vROHR['pWAYPYCors']=[list() for dummy in vROHR['pk']] # leere Liste von pWegpunkten Y\r\n for index,row in vROHR.iterrows():\r\n for waypXY in row.WAYP:\r\n X,Y=waypXY\r\n if int(row.CONT_ID)==1001:\r\n row.pWAYPXCors.append(X-self.pXCorZero)\r\n row.pWAYPYCors.append(Y-self.pYCorZero)\r\n else:\r\n row.pWAYPXCors.append(X)\r\n row.pWAYPYCors.append(Y)\r\n\r\n vROHR=vROHR[[\r\n 'BESCHREIBUNG'\r\n ,'IDREFERENZ'\r\n #Asset\r\n ,'BAUJAHR','HAL'\r\n ,'IPLANUNG','KENNUNG'\r\n #Reibung\r\n ,'L','LZU','RAU','ZAUS','ZEIN','ZUML'\r\n ,'JLAMBS','LAMBDA0'\r\n #inst.\r\n ,'ASOLL','INDSCHALL'\r\n #FW\r\n ,'NAME_i_2L'\r\n ,'NAME_k_2L'\r\n ,'KVR'\r\n #DTRO_ROWD\r\n ,'AUSFALLZEIT', 'DA', 'DI', 'DN', 'KT', 'PN', 'REHABILITATION','REPARATUR', 'S', 'WSTEIG', 'WTIEFE'\r\n #LTGR\r\n ,'LTGR_NAME','LTGR_BESCHREIBUNG','SICHTBARKEIT','VERLEGEART'\r\n #DTRO\r\n ,'DTRO_NAME'\r\n ,'DTRO_BESCHREIBUNG'\r\n ,'E'\r\n #Ref.\r\n ,'fkSTRASSE','fkSRAT'\r\n #IDs \r\n ,'pk','tk' \r\n #BZ\r\n ,'IRTRENN'\r\n ,'LECKSTART','LECKEND','LECKMENGE','LECKORT','LECKSTATUS'\r\n #Rest\r\n ,'QSVB'\r\n ,'ZVLIMPTNZ'\r\n ,'KANTENZV'\r\n #CONT\r\n ,'CONT' \r\n ,'CONT_ID'\r\n ,'CONT_LFDNR'\r\n #Ki\r\n ,'NAME_i'\r\n ,'KVR_i','TM_i'\r\n ,'XKOR_i','YKOR_i','ZKOR_i' \r\n #Kk\r\n ,'NAME_k'\r\n ,'KVR_k','TM_k'\r\n ,'XKOR_k','YKOR_k','ZKOR_k'\r\n #plotCors\r\n ,'pXCor_i','pYCor_i'\r\n ,'pXCor_k','pYCor_k'\r\n # matplotlibs's .plot(pXCors,pYCors,...)\r\n ,'pXCors','pYCors' # nur die Endpunkte\r\n ,'pWAYPXCors','pWAYPYCors' # alle Wegpunkte\r\n #WAYP\r\n ,'WAYP' #List of Tuples: [(x1,y1),...,(xN,yN)] \r\n ]]\r\n\r\n except Exception as e:\r\n logStrFinal=\"{:s}Exception: Line: {:d}: {!s:s}: {:s}\".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))\r\n logger.error(logStrFinal) \r\n raise XmError(logStrFinal) \r\n finally:\r\n logger.debug(\"{0:s}{1:s}\".format(logStr,'_Done.')) \r\n return vROHR", "def overlap_join(ltable, rtable,\n l_key_attr, r_key_attr,\n l_join_attr, r_join_attr,\n tokenizer, threshold, comp_op='>=',\n allow_missing=False,\n l_out_attrs=None, r_out_attrs=None,\n l_out_prefix='l_', r_out_prefix='r_',\n out_sim_score=True, n_jobs=1, show_progress=True):\n\n from py_stringsimjoin import __use_cython__ \n if __use_cython__:\n from py_stringsimjoin.join.overlap_join_cy import overlap_join_cy\n return overlap_join_cy(ltable, rtable, \n l_key_attr, r_key_attr, \n l_join_attr, r_join_attr, \n tokenizer, threshold, comp_op, allow_missing, \n l_out_attrs, r_out_attrs, \n l_out_prefix, r_out_prefix, \n out_sim_score, n_jobs, show_progress)\n else:\n from py_stringsimjoin.join.overlap_join_py import overlap_join_py\n return overlap_join_py(ltable, rtable, \n l_key_attr, r_key_attr, \n l_join_attr, r_join_attr, \n tokenizer, threshold, comp_op, allow_missing, \n l_out_attrs, r_out_attrs, \n l_out_prefix, r_out_prefix, \n out_sim_score, n_jobs, show_progress)", "def merge_roidb(roidbs):\n roidb = roidbs[0]\n for r in roidbs[1:]:\n roidb.extend(r)\n return roidb", "def merge_roidb(roidbs):\n roidb = roidbs[0]\n for r in roidbs[1:]:\n roidb.extend(r)\n return roidb", "def rbpdb_data_load(rna_info, out=None):\n del out # this function doesn't emit progress status (yet)!\n rbpdb_protein_file_path = (\n \"./website/data/RBPDB_v1.3.1_proteins_human_2012-11-21.tdt\"\n )\n letter_strength = RBPDB_MOTIF_PWM_LETTER_STRENGTH\n n_repeat_req = RBPDB_MOTIF_N_REPEAT_REQ\n rna_seq = get_human_seq(rna_info)\n\n experiment_id_to_pwm_dict = (\n picklify(\n generate_rbpdb_experimental_to_pwm, letter_strength, n_repeat_req\n )\n )\n protein_id_to_experimental_ids_dict = (\n picklify(generate_rbpdb_protein_to_experiment_id)\n )\n experiment_id_to_columns_dict = (\n picklify(generate_rbpdb_experiment_to_columns)\n )\n with open(rbpdb_protein_file_path) as handle:\n _ = handle.readline().strip().split('\\t')\n # columns here is expected to have the following information in the\n # following order:\n # protein_id, annotation_id, creation_date, update_date, gene_name,\n # gene_description, species, taxID, domains, aliases, flag, flag_notes,\n # some_other_id\n protein_columns = handle.readline().replace(\"\\n\", \"\").split('\\t')\n while protein_columns != ['']:\n assert len(protein_columns) == 13\n # We only care about human RBPs for now.\n if protein_columns[10] == \"0\":\n protein_columns = (\n handle.readline().replace(\"\\n\", \"\").split('\\t')\n )\n continue\n rbp = protein_columns[4]\n protein_id = protein_columns[0]\n\n if protein_id not in protein_id_to_experimental_ids_dict:\n # No experiments associated. So no data to be had\n protein_columns = (\n handle.readline().replace(\"\\n\", \"\").split('\\t')\n )\n continue\n\n for experiment_id in (\n protein_id_to_experimental_ids_dict[protein_id]\n ):\n assert (\n experiment_id in experiment_id_to_pwm_dict\n or experiment_id == \"410\"\n )\n if experiment_id == \"410\":\n continue\n pwms = experiment_id_to_pwm_dict[experiment_id]\n for pwm in pwms:\n assert len(pwm[\"A\"]) > 0\n experimental_columns = (\n experiment_id_to_columns_dict[experiment_id]\n )\n assert len(experimental_columns) == 15\n total_columns = protein_columns + experimental_columns\n annotation = (\n ANNOTATION_COLUMN_DELIMITER.join(\n [\n total_columns[i]\n for i in rbpdb_columns_of_interest\n ]\n )\n )\n\n if pwm_degree_of_freedom(pwm) >= 2048:\n # experimentally shown that by this point naive brute\n # force is faster. Bound could be\n # reduced.\n sites = pwm_scan_naive_brute_force(rna_seq, pwm)\n else:\n sites = pwm_scan(rna_seq, pwm)\n\n if not sites:\n continue\n\n for start, end in sites:\n yield rbp, start, end, annotation\n\n protein_columns = handle.readline().replace(\"\\n\", \"\").split('\\t')" ]
[ "0.54669535", "0.50892144", "0.50018066", "0.48029062", "0.47392702", "0.47358343", "0.46948", "0.46539682", "0.46516523", "0.46367228", "0.46123815", "0.4599465", "0.45961982", "0.45904016", "0.45855385", "0.45748854", "0.4573801", "0.4570454", "0.45699817", "0.4546437", "0.45456466", "0.45295972", "0.45112592", "0.4500018", "0.44998905", "0.4499282", "0.44958532", "0.44954744", "0.44954744", "0.44692683" ]
0.73909044
0
Retry until f succeeds or an exception that isn't caused by EINTR occurs.
def until_not_interrupted(f, *args, **kw): while True: try: return f(*args, **kw) except (IOError, OSError) as e: if e.args[0] == errno.EINTR: continue raise
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _eintr_retry(func, *args):\n while True:\n try:\n return func(*args)\n except (OSError, select.error) as e:\n if e.args[0] != errno.EINTR:\n raise", "def _eintr_retry(func, *args):\n\twhile True:\n\t\ttry:\n\t\t\treturn func(*args)\n\t\texcept (OSError, select.error) as e:\n\t\t\tif e.args[0] != errno.EINTR:\n\t\t\t\traise", "def _retry(self, f):\n count = 0\n while True:\n try:\n return f()\n # http://initd.org/psycopg/docs/module.html#psycopg2.DatabaseError\n # handle operational error - memory allocation, unexpected disconnect\n except psycopg2.OperationalError, oe:\n count += 1\n if count < self._max_retries:\n LOGGER.warn(\"Transient Error Received %s \", oe)\n time.sleep(self._retry_period)\n else:\n LOGGER.error(\"Unrecoverable Error %s\", oe)\n raise oe\n # other database errors - integrity, internal, programming error etc\n except psycopg2.DatabaseError, de:\n LOGGER.error(\"Database Error %s\", de)\n raise de\n # interface errors\n except psycopg2.Error, e:\n raise e", "def interruptable(func, *args, **opts):\r\n while True:\r\n try:\r\n result = func(*args, **opts)\r\n except IOError, e:\r\n if e.errno == errno.EINTR:\r\n continue\r\n raise e\r\n except OSError, e:\r\n if e.errno == errno.EINTR:\r\n continue\r\n raise e\r\n else:\r\n break\r\n return result", "def _poll_until_no_exception(self, fn, expected_exception, max_retries=20, retry_delay=3):\n\n for i in range(max_retries):\n try:\n return fn()\n except expected_exception:\n if i == max_retries - 1:\n raise\n if self.is_live:\n time.sleep(retry_delay)", "def retry_on_refuse(f, *args, **kwargs):\n i = 0\n while True:\n try:\n i += 1\n f(*args, **kwargs)\n break\n except (OSError, socket.error) as e:\n if e.args[0] != socket.errno.ECONNREFUSED or i > 10000:\n raise\n else:\n time.sleep(0.001)", "def _poll_until_exception(self, fn, expected_exception, max_retries=20, retry_delay=3):\n\n for _ in range(max_retries):\n try:\n fn()\n if self.is_live:\n time.sleep(retry_delay)\n except expected_exception:\n return\n\n self.fail(\"expected exception {expected_exception} was not raised\")", "def try_complete(tries, f, after_fail=lambda try_no: None, cond=lambda res: res):\n for i in xrange(tries):\n res = f()\n if cond(res):\n return res\n after_fail(i)\n raise NotCompleted()", "def execute_with_retry(f, args=[], kwargs={}, retry_on=(Exception,),\n max_tries=3, sleep=5):\n attempt = 0\n result = None\n while attempt < max_tries:\n attempt += 1\n try:\n result = f(*args, **kwargs)\n break\n except retry_on, e:\n if attempt >= max_tries:\n raise e\n log(\"Function call failed ('%s': %i/%i).\\n\"\n \"Reason: %s.\\n\"\n \"Wait for %i sec before retry...\"\n % (f.__name__, attempt, max_tries, str(e), sleep))\n time.sleep(sleep)\n return result", "def create_attempter(f):\n def attempt(fn, *args, **kwargs):\n if f.done():\n return\n\n try:\n fn(*args, **kwargs)\n except Exception as e:\n f.set_exception(e)\n\n return attempt", "def _Retry(func, *args, **kwargs):\n retries = _RETRIES\n while True:\n try:\n return func(*args, **kwargs)\n except Exception as e: # pylint: disable=broad-except\n retries -= 1\n if retries > 0:\n log.info('Exception {e} thrown in {func}. Retrying.'.format(\n e=e, func=func.__name__))\n time.sleep(1)\n else:\n raise e", "def wait_for(func, expected_exceptions=(), retries=60):\n\n retries = int(retries)\n for retry in range(1, retries + 1):\n try:\n return_value = func()\n if return_value:\n break\n\n except expected_exceptions:\n if retry == retries:\n raise\n else:\n pass\n\n time.sleep(1)\n\n return return_value", "def retry(retries, task_f, check_f=bool, wait_f=None):\n for attempt in range(retries):\n ret = task_f()\n if check_f(ret):\n return ret\n if attempt < retries - 1 and wait_f is not None:\n wait_f(attempt)\n raise RetryException(\"Giving up after {} failed attempt(s)\".format(retries))", "def ensure_redis_call(f, *args, **kwargs):\n attempts = kwargs.pop('attempts', 5)\n\n for i in six.moves.range(attempts + 1):\n try:\n return f(*args, **kwargs)\n\n except (ConnectionError, TimeoutError) as e:\n if i == attempts:\n raise\n else:\n wait = 2 ** i\n msg = (\n 'Will reattempt to execute {} with args={} kwargs={} '\n 'after {} seconds due to exception {}: {}'\n ''.format(f, args, kwargs, wait, type(e).__name__, e)\n )\n print(msg)\n time.sleep(wait)", "def always_retry(e):\n return True", "def retry_on_exception(func, num_tries=40, period_in_seconds=DEFAULT_PERIOD,\n error=None):\n for x in range(num_tries):\n try:\n return func()\n except Exception as e:\n if error and e.error_code == error:\n logging.info(\"Skipping on exception %s\" % error)\n break\n if x == (num_tries - 1):\n raise RuntimeError(\"Failed on %d tries: %s\" % (num_tries, e))\n logging.info(\"Got exception %s on try number %s...\" % (e, x))\n\n time.sleep(period_in_seconds)", "def _retry_on_deadlock(f):\n @functools.wraps(f)\n def wrapped(*args, **kwargs):\n while True:\n try:\n return f(*args, **kwargs)\n except db_exc.DBDeadlock:\n LOG.warn(_(\"Deadlock detected when running \"\n \"'%(func_name)s': Retrying...\"),\n dict(func_name=f.__name__))\n # Retry!\n time.sleep(0.5)\n continue\n functools.update_wrapper(wrapped, f)\n return wrapped", "def retryConnection(func, *args, **kw):\n try_num = 0\n while True:\n try:\n web_file = func(*args, **kw)\n break\n except IOError:\n try_num += 1\n if try_num >= retry:\n raise IOError\n if p_out:\n print(\"retry connection...\")\n os.system('sleep ' + str(sleeptime))\n return web_file", "def _non_blocking(f):\n def g(*args, **kwargs):\n try:\n return f(*args, **kwargs)\n except socket.timeout:\n pass\n return g", "def _retry_on_deadlock(f):\n @functools.wraps(f)\n def wrapped(*args, **kwargs):\n while True:\n try:\n return f(*args, **kwargs)\n except db_exc.DBDeadlock:\n LOG.warning(_LW(\"Deadlock detected when running \"\n \"'%(func_name)s': Retrying...\"),\n dict(func_name=f.__name__))\n # Retry!\n time.sleep(0.5)\n continue\n functools.update_wrapper(wrapped, f)\n return wrapped", "def _retry_on_deadlock(f):\n @functools.wraps(f)\n def wrapped(*args, **kwargs):\n while True:\n try:\n return f(*args, **kwargs)\n except db_exc.DBDeadlock:\n LOG.warning(\"Deadlock detected when running \"\n \"'%(func_name)s': Retrying...\",\n dict(func_name=f.__name__))\n # Retry!\n time.sleep(0.5)\n continue\n functools.update_wrapper(wrapped, f)\n return wrapped", "def libvirt_retry(self, op):\n end_time = time.time() + 30.0\n ignore = [\n # libvirt connection closed for some reason, just retry\n \"Unable to read from monitor: Connection reset by peer\",\n # lxc container starting often fails as they're started\n # simultaneously with the same device names, use a unique\n # name to work around it.\n # http://www.redhat.com/archives/libvir-list/2013-August/msg01475.html\n \"RTNETLINK answers: File exists\",\n ]\n while True:\n try:\n return op()\n except libvirt.libvirtError as error:\n if not any(ignorable in str(error) for ignorable in ignore):\n # some other error, raise immediately\n raise\n\n time_left = max(end_time - time.time(), 0)\n if not time_left:\n # timeout\n raise\n\n self.log.warning(\"got possibly transient error '%s' from libvirt, retrying for %.1fs...\",\n error, time_left)\n time.sleep(1.0)", "def test_retry_raises_error_on_negative_retries(self):\n\n @retry(Exception, max_retries=-1)\n def f():\n raise Exception(\"Faulty function\")\n\n self.assertRaises(ValueError, f)", "def retry_on_exception(func, max_attempts=5, ignored_exceptions=(StaleElementReferenceException, InvalidElementStateException)):\r\n attempt = 0\r\n while attempt < max_attempts:\r\n try:\r\n return func()\r\n except ignored_exceptions:\r\n world.wait(1)\r\n attempt += 1\r\n\r\n assert_true(attempt < max_attempts, 'Ran out of attempts to execute {}'.format(func))", "def call_with_retries(function, retry_count, retry_delay):\n logger.info(\"Calling function: %s with retry count: %s, retry_delay: %s\",\n function, retry_count, retry_delay)\n for retry in range(1, int(retry_count) + 1):\n logger.info(\"Attempt number: %s\", retry)\n try:\n return function()\n # pylint: disable=broad-except\n except Exception as verify_exception:\n logger.info(\"Verify exception: %s\", verify_exception)\n time.sleep(float(retry_delay))\n if retry > int(retry_count):\n logger.info(\"Exceeded max retries! Reraising last exception\")\n raise\n assert False, \"Should never get here.\"", "def die_on_error(f):\n def wrapped(*args, **kwargs):\n result = f(*args, **kwargs)\n if result.returncode == 1:\n sys.exit(1)\n return wrapped", "def retry(nattempts, exception=None):\n \n def tryIt(func):\n def wrapper(*args, **kwargs):\n attempts = 0\n while attempts < nattempts - 1:\n try:\n return func(*args, **kwargs)\n except (exception if exception is not None else Exception):\n attempts += 1\n return func(*args, **kwargs)\n return wrapper\n return tryIt", "def smart_retry(f):\n # type: (Callable) -> CallableT\n\n @functools.wraps(f)\n def wrapper(api_instance, *args, **kwargs):\n # type: (UnifiAPI, *Any, **Any) -> Any\n try:\n return f(api_instance, *args, **kwargs)\n except Unauthorized as e:\n\n api_instance.log.debug(\n \"An exception occurred when executing %s: %s. Refreshing the connection to the Controller and retrying\",\n f.__name__,\n e,\n )\n api_instance.connect()\n return f(api_instance, *args, **kwargs)\n\n except Exception:\n raise\n\n return cast(CallableT, wrapper)", "def retryCall(fn, args=None, keywordArgs=None, failureTester=None, sleepManager=None):\n sleepManager = sleepManager or time.SleepManager()\n while True:\n try:\n result = yield fn(*args, **keywordArgs)\n defer.returnValue(result)\n except Exception: # pylint: disable=W0703\n failureTester(failure.Failure())\n yield sleepManager.sleep()", "def retry(callback, retries, sleep=0.5, catch=Exception, *args, **kwargs):\n r = 0\n while r < retries:\n r += 1\n try:\n return callback(*args, **kwargs)\n except catch as c:\n if r == retries:\n raise c\n else:\n time.sleep(r * sleep)" ]
[ "0.7447151", "0.7388192", "0.679798", "0.6735481", "0.6682818", "0.64191544", "0.6375819", "0.62591887", "0.6195957", "0.60207826", "0.6017203", "0.5943427", "0.59385973", "0.5765465", "0.57129365", "0.5606272", "0.55996346", "0.5597502", "0.55897963", "0.55773646", "0.55732906", "0.5540544", "0.5531441", "0.55151576", "0.55122894", "0.5477822", "0.54383934", "0.541592", "0.5401455", "0.53890496" ]
0.76732606
0
Evaluates the Stumpff function C(z) according to the Equation 3.53
def stump_C(z) : if z > 0 : return (1 - cos(sqrt(z)))/z elif z < 0 : return (cosh(sqrt(-z)) - 1)/(-z) else : return 0.5
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def stump_S(z) :\n\n if z > 0:\n sz = sqrt(z) \n return (sz - sin(sz))/pow(sz,3)\n elif z < 0 :\n s_z = sqrt(-z) \n # According to the equation the denominatori is pow(sqrt(z),3)\n return (sinh(s_z) - s_z)/pow(s_z,3)\n else :\n return 0.1666666666666666", "def SFR(self, z):\n sfr = (0.017 + 0.13 * z)/(1 + np.power(z/3.3, 5.3))\n return sfr", "def f(z):\n a=1./(1.+z)\n #da=0.01\n da=0.01*a\n #da=1e-7\n gp,g,gm=[D(1./ia-1.) for ia in [a+da,a,a-da]]\n f=a*(gp-gm)/(2*g*da)\n #dz=0.01\n #gp,g,gm=[D(zi) for zi in [z+dz,z,z-dz]]\n #f=(z)*(gp-gm)/(2.*g*dz)\n return f", "def _call(self, z): \n self.a = F.softplus(self.a)\n self.w = F.softmax(self.w, dim=1)\n # Compute\n pre_sigm = self.a * z + self.b\n sigm = torch.sigmoid(pre_sigm)\n x_pre = self.w * sigm\n if (len(z.shape) > 2):\n x_pre = torch.sum(self.w * sigm, dim=1)\n x_pre_clipped = x_pre * (1 - self.eps) + self.eps * 0.5\n zp = torch.log(x_pre_clipped) - torch.log(1 - x_pre_clipped)\n return zp", "def cPofZ(self,arr,zx):\n sig1=(zx-arr['zmg'])/arr['sigma_pz']\n sig2=(zx-self.zcat)/self.sigmacat\n return (1.-self.Acat)*0.5*(1 + erf(sig1/np.sqrt(2)))+self.Acat*0.5*(1 + erf(sig2/np.sqrt(2)))", "def pz_fn(self, z):\n pass", "def f_c(self, z, m):\n\t return (1. - self.f_o(z))*m/(self.ALPHA_1*self.mmax + m)", "def f_c(self, z, m):\n\t return (1. - self.f_o(z))*m/(self.ALPHA_1*self.MMAX + m)", "def c_s(self, z):\n R = self.R_bg(z)\n return const.c/np.sqrt(3.*(1.+R))", "def st(self, sigma, z):\n \n aa = self.params[0]\n a = self.params[1]\n p = self.params[2]\n delta_c = self.params[3]\n \n return aa * sqrt(2.e0*a/pi) * (delta_c/sigma) * exp( (-a*delta_c*delta_c) / (2.e0*sigma*sigma) )*(1.e0 + pow( ( (sigma*sigma)/(a*delta_c*delta_c) ) , p ))", "def f(z,c):\n zz = z*z + c\n return zz", "def fsigma8(z):\n fs8=f(z)*sigma8(z)\n return fs8", "def cPofZ(self,arr,zx):\n f_obs,sigma = arr\n #results=np.zeros((self.ngals))#((arr.size))\n# cdfs = np.cumsum(np.array([[self.getpdf(f_obs[n],sigma[n],z,self.dz)\n# for z in np.arange(0.,zx+self.dz,self.dz)] for n in range(self.ngals)]),\n# axis=1)\n cdfs = np.cumsum(np.array([self.PofZ(arr,z,self.dz)\n for z in np.arange(0.,zx+self.dz,self.dz)]),axis=1)\n# mask = pdfs.sum(axis=1)!=0\n# xarrs = arr[mask]['z'][:,np.newaxis] + self.dz\n# masked_pdf = np.where(xarrs<zx, pdfs[mask], 0.)\n# results[mask] = np.sum(masked_pdf, axis=1)\n return cdfs#results", "def __call__(self, z):\n if self._norm is None:\n self._norm = simps(lambda t: self.pz_fn(t), 0.0, self.config[\"zmax\"], 256)\n return self.pz_fn(z) / self._norm", "def zsx_s(self):\n\n return self.glb[user_params_index[\"Zs\"]]/self.glb[user_params_index[\"Xs\"]]", "def evaluate(self, z):\n\t\treturn self.p(z) / self.q(z)", "def _derZ(self, x, y, z):\n if _isscalar(x):\n x_pos = max(min(self.xSearchFunc(self.x_list, x), self.x_n - 1), 1)\n y_pos = max(min(self.ySearchFunc(self.y_list, y), self.y_n - 1), 1)\n z_pos = max(min(self.zSearchFunc(self.z_list, z), self.z_n - 1), 1)\n else:\n x_pos = self.xSearchFunc(self.x_list, x)\n x_pos[x_pos < 1] = 1\n x_pos[x_pos > self.x_n - 1] = self.x_n - 1\n y_pos = self.ySearchFunc(self.y_list, y)\n y_pos[y_pos < 1] = 1\n y_pos[y_pos > self.y_n - 1] = self.y_n - 1\n z_pos = self.zSearchFunc(self.z_list, z)\n z_pos[z_pos < 1] = 1\n z_pos[z_pos > self.z_n - 1] = self.z_n - 1\n alpha = (x - self.x_list[x_pos - 1]) / (\n self.x_list[x_pos] - self.x_list[x_pos - 1]\n )\n beta = (y - self.y_list[y_pos - 1]) / (\n self.y_list[y_pos] - self.y_list[y_pos - 1]\n )\n dfdz = (\n (\n (1 - alpha) * (1 - beta) * self.f_values[x_pos - 1, y_pos - 1, z_pos]\n + (1 - alpha) * beta * self.f_values[x_pos - 1, y_pos, z_pos]\n + alpha * (1 - beta) * self.f_values[x_pos, y_pos - 1, z_pos]\n + alpha * beta * self.f_values[x_pos, y_pos, z_pos]\n )\n - (\n (1 - alpha)\n * (1 - beta)\n * self.f_values[x_pos - 1, y_pos - 1, z_pos - 1]\n + (1 - alpha) * beta * self.f_values[x_pos - 1, y_pos, z_pos - 1]\n + alpha * (1 - beta) * self.f_values[x_pos, y_pos - 1, z_pos - 1]\n + alpha * beta * self.f_values[x_pos, y_pos, z_pos - 1]\n )\n ) / (self.z_list[z_pos] - self.z_list[z_pos - 1])\n return dfdz", "def f_o(self, z):\n\t return exp(-(z/self.MU)**self.CIRC_3)", "def f_o(self, z):\n\t return exp(-(z/self.MU)**self.CIRC_3)", "def uSat(self, k, m, z):\n result = self.ProfNFW.nfw(k, m, z)\n result *= self.Nsat(m) / self.nBarGal(1./(1.+z))\n return result", "def uSat(self, k, m, z):\n result = self.ProfNFW.nfw(k, m, z)\n result *= self.Nsat(m) / self.nBarGal(1./(1.+z))\n return result", "def closure(Z):\r\n Z = np.array(Z)\r\n Z = Z/float(np.sum(Z))\r\n if any(Z < 0):\r\n return None\r\n else:\r\n return Z", "def PofZ(self,arr,z,dz):\n norm=(1-self.Acat)/np.sqrt(2*np.pi)/arr[\"sigma_pz\"]\n norm2=self.Acat/np.sqrt(2*np.pi)/self.sigmacat\n return np.exp(-(arr['zmg']-z)**2/(2*arr['sigma_pz']**2))*norm+np.exp(-(self.zcat-z)**2/(2*self.sigmacat**2))*norm2", "def U_z(z, test_energy_fun):\n z1 = z[:, 0]\n z2 = z[:, 1]\n\n if test_energy_fun == 1:\n return 0.5*((tf.sqrt(z1**2 + z2**2) - 2)/0.4)**2 - tf.log(tf.exp(-0.5*((z1 - 2)/0.6)**2) + tf.exp(-0.5*((z1 + 2)/0.6)**2))\n elif test_energy_fun == 2:\n w1 = tf.sin((2.*np.pi*z1)/4.)\n return 0.5*((z2 - w1) / 0.4)**2\n elif test_energy_fun == 3:\n w1 = tf.sin((2.*np.pi*z1)/4.)\n w2 = 3.*tf.exp(-0.5*((z1 - 1)/0.6)**2)\n return -tf.log(tf.exp(-0.5*((z2 - w1)/0.35)**2) + tf.exp(-0.5*((z2 - w1 + w2)/0.35)**2))\n elif test_energy_fun == 4:\n w1 = tf.sin((2.*np.pi*z1)/4.)\n w3 = 3.*tf.sigmoid((z1 - 1)/0.3)**4\n return -tf.log(tf.exp(-0.5*((z2 - w1)/0.4)**2) + tf.exp(-0.5*((z2 - w1 + w3)/0.35)**2))\n else:\n raise ValueError('invalid `test_energy_fun`')", "def u(self, k, m, z):\n result = self.ProfNFW.nfw(k, m, z) * self.Ngal(m) / self.nBarGal(1./(1.+z))\n return result", "def Fx_case_C(z, x, gamma, alp, lamb):\n \n beta2 = 1-1/gamma**2\n beta = sqrt(beta2)\n \n sin2a = sin(2*alp)\n cos2a = cos(2*alp) \n eta = eta_case_C(z, x, beta2, alp, lamb)\n kap = (2*(alp - z) + eta + lamb)/beta # kappa for case C\n #kap = sqrt( lamb**2 + eta**2 + x**2 + 4*(1+x)*sin(alp)**2 + 2*(lamb + eta*(1+x))*sin2a + 2*lamb*eta*cos2a)\n \n #N1 = (1 + beta2)*(1+x)\n #N2 = -(1 + beta2*(1+x)**2)*cos2a\n #N3 = (eta - beta*kap + beta2*lamb*(1+x))*sin2a\n #return (N1+N2+N3)/D**3\n\n N_Ex = 1+x - cos2a + (eta - beta*kap)*sin2a\n N_By = beta*( (1+x)*cos2a - 1 - lamb*sin2a )\n \n D = kap - beta*(eta + lamb*cos2a + (1+x)*sin2a)\n \n return (1+x)*(N_Ex - beta*N_By)/D**3", "def zscore(vals):", "def e_z(self, z):\n onez = 1+z \n onez2 = onez*onez \n onez3 = onez*onez2 \n arg = self.m0*onez3 + self.k0*onez2 + self.q0 \n return(np.sqrt(arg))", "def F(cst, x):\n [u0, v0, u1, v1, u2, v2, coeffs] = cst\n [u, v, g1, g2, g3] = x\n a = g1*u1 - u0\n b = g2*u2 - u0\n c = g3*u - u0\n l = g1*v1 - v0 \n m = g2*v2 - v0\n n = g3*v - v0\n r = g1 - 1\n s = g2 - 1\n t = g3 - 1\n return np.array([\n coeffs[0]*(a**2-l**2) + 2*coeffs[1]*(a*b-l*m) + coeffs[2]*(b**2-m**2) + 2*coeffs[3]*(a*c-l*n) + 2*coeffs[4]*(b*c-m*n) + c**2 - n**2,\n coeffs[0]*(l**2-r**2) + 2*coeffs[1]*(l*m-r*s) + coeffs[2]*(m**2-s**2) + 2*coeffs[3]*(l*n-r*t) + 2*coeffs[4]*(m*n-s*t) + n**2 - t**2,\n coeffs[0]*a*l + coeffs[1]*(l*b+m*a) + coeffs[2]*m*b + coeffs[3]*(l*c+n*a) + coeffs[4]*(m*c+b*n) + c*n,\n coeffs[0]*a*r + coeffs[1]*(r*b+s*a) + coeffs[2]*s*b + coeffs[3]*(r*c+t*a) + coeffs[4]*(s*c+b*t) + c*t,\n coeffs[0]*r*l + coeffs[1]*(l*s+m*r) + coeffs[2]*m*s + coeffs[3]*(l*t+n*r) + coeffs[4]*(m*t+s*n) + t*n \n ])", "def u(self, k, m, z):\n result = (1. - self.fb) * self.ProfNFW.nfw(k, m, z)\n result += self.fb * self.ProfGasBattaglia16.rho3dFourierNorm(k, m, z)\n result *= m / self.U.rho_m(z)\n return result" ]
[ "0.6817996", "0.6719775", "0.64250463", "0.63308775", "0.6235414", "0.61715096", "0.6168971", "0.61512005", "0.6137466", "0.61325085", "0.61201406", "0.60167384", "0.60125196", "0.59461224", "0.593396", "0.5912131", "0.59098804", "0.5876366", "0.5876366", "0.5868159", "0.5868159", "0.58644164", "0.5862719", "0.578475", "0.5732587", "0.57308644", "0.5701393", "0.5698587", "0.5686304", "0.5648329" ]
0.70650136
0
Evaluates the Stumpff function S(z) according to the Equation 3.52
def stump_S(z) : if z > 0: sz = sqrt(z) return (sz - sin(sz))/pow(sz,3) elif z < 0 : s_z = sqrt(-z) # According to the equation the denominatori is pow(sqrt(z),3) return (sinh(s_z) - s_z)/pow(s_z,3) else : return 0.1666666666666666
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def SFR(self, z):\n sfr = (0.017 + 0.13 * z)/(1 + np.power(z/3.3, 5.3))\n return sfr", "def st(self, sigma, z):\n \n aa = self.params[0]\n a = self.params[1]\n p = self.params[2]\n delta_c = self.params[3]\n \n return aa * sqrt(2.e0*a/pi) * (delta_c/sigma) * exp( (-a*delta_c*delta_c) / (2.e0*sigma*sigma) )*(1.e0 + pow( ( (sigma*sigma)/(a*delta_c*delta_c) ) , p ))", "def fsigma8(z):\n fs8=f(z)*sigma8(z)\n return fs8", "def zsx_s(self):\n\n return self.glb[user_params_index[\"Zs\"]]/self.glb[user_params_index[\"Xs\"]]", "def uSat(self, k, m, z):\n result = self.ProfNFW.nfw(k, m, z)\n result *= self.Nsat(m) / self.nBarGal(1./(1.+z))\n return result", "def uSat(self, k, m, z):\n result = self.ProfNFW.nfw(k, m, z)\n result *= self.Nsat(m) / self.nBarGal(1./(1.+z))\n return result", "def test_ssq_stft():\n th = 1e-1\n for N in (128, 129):\n x = np.random.randn(N)\n for n_fft in (120, 121):\n for window_scaling in (1., .5):\n if window_scaling == 1:\n window = None\n else:\n window = get_window(window, win_len=n_fft//1, n_fft=n_fft)\n window *= window_scaling\n\n Sx, *_ = ssq_stft(x, window=window, n_fft=n_fft)\n xr = issq_stft(Sx, window=window, n_fft=n_fft)\n\n txt = (\"\\nSSQ_STFT: (N, n_fft, window_scaling) = ({}, {}, {})\"\n ).format(N, n_fft, window_scaling)\n assert len(x) == len(xr), \"%s != %s %s\" % (N, len(xr), txt)\n mae = np.abs(x - xr).mean()\n assert mae < th, \"MAE = %.2e > %.2e %s\" % (mae, th, txt)", "def fs2ps2D(px, s):\n\t\tsfun = psarclength(px)\t\n\t\treturn sfun-s", "def steffensen ( fun , x , fx = None , args = () ) :\n \n if fx is None : fx = float ( fun ( x , *args ) ) ## reuse if already calculated\n if fx : \n gx = ( fun ( x + fx , *args ) - fx ) / fx\n if gx : return x - fx / gx", "def psi_s_case_E(z, x, gamma):\n \n if z == 0 and x == 0:\n return 0\n \n beta2 = 1-1/gamma**2\n beta = sqrt(beta2)\n \n L = (z + beta*sqrt(x**2*(1-beta2) + z**2))/(1-beta2)\n \n return 1/(sqrt(x**2 + L**2) - beta*L)", "def psi_s(z, x, gamma):\n \n if z == 0 and x == 0:\n return 0\n \n beta2 = 1-1/gamma**2\n beta = sqrt(beta2)\n \n alp = alpha(z, x, beta2) # Use approximate quatic formulas\n #alp = alpha_exact_case_B_brentq(z, x, beta) # Use numerical root finder \n \n kap = 2*(alp - z)/beta # Simpler form of kappa\n #kap = sqrt(x**2 + 4*(1+x) * sin(alp)**2)\n \n out = (cos(2*alp)- 1/(1+x)) / (kap - beta * (1+x) * sin(2*alp)) \n \n # Add SC term\n # out += -1 / ( (gamma**2-1)*(1+x)*(kap - beta*(1+x)*sin(2*alp)) ) \n \n return out", "def f(z):\n a=1./(1.+z)\n #da=0.01\n da=0.01*a\n #da=1e-7\n gp,g,gm=[D(1./ia-1.) for ia in [a+da,a,a-da]]\n f=a*(gp-gm)/(2*g*da)\n #dz=0.01\n #gp,g,gm=[D(zi) for zi in [z+dz,z,z-dz]]\n #f=(z)*(gp-gm)/(2.*g*dz)\n return f", "def Sx(z, params, x=0, layer=None):\r\n Ez_here = Ez(z, params, x=x, layer=layer)\r\n Hy_here = Hy(z, params, x=x, layer=layer)\r\n return -0.5 * Ez_here * Hy_here.conjugate()", "def old_psi_s(z, x, beta):\n\n beta2 = beta**2\n \n out = (cos(2 * alpha(z, x, beta2)) - 1 / (1+x)) / (\n kappa(z, x, beta2) - beta * (1+x) * sin(2*alpha(z, x, beta2)))\n\n return out", "def _ss(data):\n c = sum(data)/len(data)\n ss = sum((x-c)**2 for x in data)\n return ss", "def zeta_function(s,a = 1,derivative = 0):\n return mp.zeta(s,a,derivative)", "def stump_C(z) :\n\n if z > 0 :\n return (1 - cos(sqrt(z)))/z \n elif z < 0 :\n return (cosh(sqrt(-z)) - 1)/(-z)\n else :\n return 0.5", "def _call(self, z): \n self.a = F.softplus(self.a)\n self.w = F.softmax(self.w, dim=1)\n # Compute\n pre_sigm = self.a * z + self.b\n sigm = torch.sigmoid(pre_sigm)\n x_pre = self.w * sigm\n if (len(z.shape) > 2):\n x_pre = torch.sum(self.w * sigm, dim=1)\n x_pre_clipped = x_pre * (1 - self.eps) + self.eps * 0.5\n zp = torch.log(x_pre_clipped) - torch.log(1 - x_pre_clipped)\n return zp", "def stability_function_unexpanded(self):\n import sympy\n z = sympy.var('z')\n s = len(self)\n I = sympy.eye(s)\n\n v = 1 - self.alpha.sum(1)\n vstar = sympy.Matrix(v[:-1])\n v_mp1 = sympy.Rational(v[-1])\n alpha_star = sympy.Matrix(self.alpha[:-1,:])\n beta_star = sympy.Matrix(self.beta[:-1,:])\n alpha_mp1 = sympy.Matrix(self.alpha[-1,:])\n beta_mp1 = sympy.Matrix(self.beta[-1,:])\n p1 = (alpha_mp1 + z*beta_mp1).T*(I-alpha_star-z*beta_star).lower_triangular_solve(vstar)\n p1 = p1[0] + v_mp1\n return p1", "def get_s( self ):\n\n # initialize scaling factor as unknown variable, assuming it's real and\n # greater than zero\n _s = Symbol( 's', real = True, positive = True )\n\n # solve for scaling factor (first argument is expression set equal to zero)\n s = solve( self.a * _s ** self.n + self.b * _s - 1, _s )\n\n # save result as float\n self.s = float( s[ 0 ] )", "def SFR_Neijssel(self, z):\n SFR = 0.01 * ((1+z)**2.77) / (1 + ((1+z)/2.9)**4.7) * 1e9 #[1e9 for GPc-3]\n return SFR # [Msun yr-1 Gpc-3] in comoving volume ", "def _ss(data):\n c = mean(data)\n ss = sum((x-c)**2 for x in data)\n return ss", "def _ss(data):\n c = mean(data)\n ss = sum((x-c)**2 for x in data)\n return ss", "def _ss(data):\n c = mean(data)\n ss = sum((x-c)**2 for x in data)\n return ss", "def _ss(data):\n c = mean(data)\n ss = sum((x-c)**2 for x in data)\n return ss", "def _ss(data):\n c = mean(data)\n ss = sum((x-c)**2 for x in data)\n return ss", "def pz_fn(self, z):\n pass", "def _SS(data, m):\n if m is None:\n m = mean(data)\n return _generalised_sum(data, lambda x: (x-m)**2)", "def sigmaz():\n return _SIGMAZ.copy()", "def psi_x_SC(z, x, gamma):\n \n beta2 = 1-1/gamma**2\n beta = sqrt(beta2)\n \n alp = alpha(z, x, beta2) # Use approximate quatic formulas\n #alp = alpha_exact_case_B_brentq(z, x, beta) # Use numerical root finder\n \n kap = 2*(alp - z)/beta \n #kap = sqrt(x**2 + 4*(1+x) * sin(alp)**2) \n \n sin2a = sin(2*alp)\n cos2a = cos(2*alp) \n \n arg2 = -4 * (1+x) / x**2\n F = my_ellipkinc(alp, arg2) \n E = my_ellipeinc(alp, arg2)\n \n D = kap**2 - beta2 * (1+x)**2 * sin2a**2\n \n T1 = 1/abs(x)/(1 + x)*F\n T2 = x/(1+x)/(2+x)/abs(x)*E\n T3 = beta*(cos2a - 1 - x)/D\n \n T4 = -kap*x*(2+x)*(beta2*(1+x)**2 - 2)*sin2a/(x**2*(2+x)**2)/D\n T5 = -kap*beta2*(1+x)*x*(2+x)*sin2a*cos2a/(x**2*(2+x)**2)/D\n \n out = (T1 + T2 + T3 + T4 + T5)\n \n return out" ]
[ "0.6983814", "0.62845415", "0.6270047", "0.6215237", "0.60620826", "0.60620826", "0.5966971", "0.59611017", "0.58421904", "0.58372325", "0.5802116", "0.5789399", "0.57810855", "0.57647526", "0.5738204", "0.56974256", "0.5696346", "0.56717175", "0.56093794", "0.55684173", "0.55440867", "0.552219", "0.552219", "0.552219", "0.552219", "0.552219", "0.5465348", "0.5462522", "0.54488546", "0.54256046" ]
0.7471584
0
Send a notification to systemd. state is a string; see
def sd_notify(state, logger, unset_environment=False): addr = os.environ.get('NOTIFY_SOCKET') if addr is None: # not run in a service, just a noop return try: sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM | socket.SOCK_CLOEXEC) if addr[0] == '@': addr = '\0' + addr[1:] sock.connect(addr) sock.sendall(state.encode('utf-8')) except: logger.debug("Exception while invoking sd_notify()", exc_info=True) finally: if unset_environment: os.environ.pop('NOTIFY_SOCKET') sock.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def notify(cls, state):\r\n return PlatformMessage(method=\"__reply__\", kwargs={\"state\": state})", "def notify(self, path, state):\n pass", "def set_state(self, state: bool) -> None:\n payload = self._cfg.state_power_on if state else self._cfg.state_power_off\n command = f\"{COMMAND_POWER}{self._cfg.idx+1}\"\n self._mqtt_client.publish(\n self._cfg.command_topic + command,\n payload,\n )", "def send_state():\n while True:\n if I_AM_CRUSHED is False:\n sleep_time = random.randint(send_state_sec[0], send_state_sec[1])\n sock_check = socket.socket(type=socket.SOCK_DGRAM)\n sock_check.sendto(\"I'am healthy\", ('dispatcher', port_for_check))\n sock_check.close()\n time.sleep(sleep_time)", "def notify(guid, message):", "def status_post(self, status, state=None):\n\n # create new JSON payload to update device shadow\n new_payload = {\"state\": {\"reported\": {\"status\": str(status)}, \"desired\": None}}\n if state:\n new_payload.update({\"state\": {\"reported\": state}})\n\n # update shadow\n self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 20)\n\n # log to syslog\n LOGGER.info(status)\n LOGGER.debug(json.dumps(new_payload))", "def _send_notification() -> None:\n send_notification(\n self,\n \"slack:@aaron\",\n \"New {0} Version: {1}\".format(\n self.properties[CONF_APP_NAME], new_version\n ),\n title=\"New Software 💿\",\n )", "def send_notification (event):\n Publisher.sendMessage (event)", "def send_state(state):\n # filter state to just expected keys\n state = {k:v for k, v in state.items() if k in _state_keys}\n\n # send to settings.ROOM_NAME\n channel_layer = get_channel_layer()\n async_to_sync(channel_layer.group_send)(settings.ROOM_NAME, {\n 'type': 'share_state',\n 'state': state,\n })", "def sendNotification(title, message):\n if platform.platform().startswith('Linux'):\n notify2.init('')\n n = notify2.Notification(title, message)\n n.show()\n \n elif platform.platform().startswith('Windows') and platform.release() == '10':\n n = ToastNotifier()\n n.show_toast(title, message)", "def send_event(instance_config, status, output):\n # This function assumes the input is a string like \"mumble.main\"\n monitoring_overrides = instance_config.get_monitoring()\n if 'alert_after' not in monitoring_overrides:\n monitoring_overrides['alert_after'] = '2m'\n monitoring_overrides['check_every'] = '1m'\n monitoring_overrides['runbook'] = monitoring_tools.get_runbook(\n monitoring_overrides,\n instance_config.service, soa_dir=instance_config.soa_dir,\n )\n\n check_name = (\n 'check_marathon_services_replication.%s' %\n instance_config.job_id\n )\n monitoring_tools.send_event(\n service=instance_config.service,\n check_name=check_name,\n overrides=monitoring_overrides,\n status=status,\n output=output,\n soa_dir=instance_config.soa_dir,\n cluster=instance_config.cluster,\n )\n _log(\n service=instance_config.service,\n line='Replication: %s' % output,\n component='monitoring',\n level='debug',\n cluster=instance_config.cluster,\n instance=instance_config.instance,\n )", "def update_notify_state(self, data, stage=None):\n LOGGER.debug(\"Send update nofity state with %s\", self.client_status)\n if stage:\n data['stage'] = stage\n NOTIFY_STATE.update_state(data)", "def shiftr_event_listener(event):\n state = event.data.get(\"new_state\")\n topic = state.entity_id.replace(\".\", \"/\")\n\n try:\n _state = state_helper.state_as_number(state)\n except ValueError:\n _state = state.state\n\n try:\n mqttc.publish(topic, _state, qos=0, retain=False)\n\n if state.attributes:\n for attribute, data in state.attributes.items():\n mqttc.publish(\n f\"/{topic}/{attribute}\", str(data), qos=0, retain=False\n )\n except RuntimeError:\n pass", "def send_notification(self):\n s1 = System()\n b1 = Books(\"1984\", \"George Orwell\", \"Harvill Secker\", \"1949\", \"0123456789123\")\n m1 = Members(\"Richard\", \"Blackmore\", \"14-04-1945\", \"Weston\")\n s1.send_notification(\"Please return book\")\n self.assertEqual(m1.get_notifications(), None)\n s1.add_resource(b1)\n s1.lending_process(b1, m1)\n s1.send_notification(\"Please return book\")\n self.assertEqual(m1.get_notifications(), \"-Please return boo- \")", "def notify_subscribers(self, instance, domain, state=None):\n if not self.notifier:\n return\n\n if not state:\n state = instance.state\n\n tups = domain.get_subscribers()\n for subscriber_name, subscriber_op in tups:\n properties = {'hostname': instance.public_ip}\n content = {'node_id': instance.instance_id, 'state': state,\n 'domain_id': domain.domain_id,\n 'properties': properties}\n self.notifier.notify_by_name(subscriber_name, subscriber_op, content)", "def initiate_dev_state_notifications(self):\n\t\treturn Job(SDK.PrlVm_InitiateDevStateNotifications(self.handle)[0])", "def send_status(self):\n self.data = {\n 'value': '',\n 'state': self.state,\n }\n event_manager.device_changed(self)", "def exposed_set_state(self, state):\n return json.dumps(dict(message='Triggered state: {}'.format(state)),\n indent=2)", "def notifySysOperator(self):\n msg = self.generateNotifyMessage()\n print(msg)\n # with smtplib.SMTP('smtp.gmail.com', 587) as smtp:\n # smtp.ehlo()\n # smtp.starttls()\n # smtp.ehlo()\n\n # smtp.login(\"[email protected]\", \"qwerQWER123.\")\n\n # smtp.sendmail(\"[email protected]\", \"[email protected]\", msg)\n\n # smtp.close()\n return False", "def sendNotificationEmail(fundName, status, message):\n\tgetSubject = lambda fundName, status: \\\n\t\tfundName + ' auto update succesful' \\\n\t\tif status == Constants.STATUS_SUCCESS else \\\n\t\tfundName + ' auto update failed'\n\n\tlogger.debug('sendNotificationEmail(): {0}'.format(fundName))\n\tsendMail( message\n\t\t\t, getSubject(fundName, status)\n\t\t\t, getMailSender()\n\t\t\t, getNotificationMailRecipients()\n\t\t\t, getMailServer()\n\t\t\t, getMailTimeout())", "def send_notification(summary, message=\"\", icon=None, update=True):\n if icon == None:\n icon = \"sonata\"\n\n if pynotify_module:\n global notification\n if update:\n notification.update(summary=summary, message=message, icon=icon)\n else:\n notification = pynotify.Notification(summary=summary, message=message, icon=icon)\n notification.show()\n else:\n # Use notify-send instead\n os.popen('notify-send -i \"{0}\" \"{1}\" \"{2}\"'.format(icon.replace('\"', '\\\\\"'), summary.replace('\"', '\\\\\"'), message.replace('\"', '\\\\\"')))", "def send_state(self):\n self.state = self.enigma.get_state()\n messages = self.notify_slaves()\n for message in messages:\n self.network.messages_to_slaves.append(message)", "def sendInterfaceStatusUpdateMessage(iTag, status): #@NoSelf", "def setState(self, newstate) :\n if self.state != newstate :\n self.state=newstate\n for gate in self.notifyList:\n gate.notify()", "def update_status(self, kind, status, text=None, notify=True):\n status_str=\"status/\"+kind if kind else \"status\"\n self[status_str]=status\n if notify:\n self.send_signal(\"any\",status_str,status)\n if text:\n self.set_variable(status_str+\"_text\",text)\n self.send_signal(\"any\",status_str+\"_text\",text)", "async def notify(self, message: str) -> None:\n\n pass", "def send_system_realtime_message(self, status=TIMING_CLOCK):\n self._midi.send_message([status & 0xF7], delta=1)", "def update_status(self) -> None:\n try:\n (rc, mid) = self.mqttc.publish(\n self.config.status_topic, json.dumps(self.status), qos=0, retain=False\n )\n if rc == mqtt.MQTT_ERR_SUCCESS:\n logging.info(\n f\"The request for a status update has been successfully accepted: mid={mid}\"\n )\n else:\n logging.warning(\"The request for a status update has been rejected\")\n except ValueError as e:\n logging.warning(f\"Cannot send status update: {e}\")", "def _notify_slack(self, notify_type, name, environment, details=None):\n\n if self._slack_integration is None:\n return False\n\n user = self._get_current_user()\n\n # Text\n if self._deploy_stage == self.DEPLOY_STAGE_BUILDING:\n if notify_type == self.NOTIFY_TYPE_STARTED:\n text = ':wrench: Started building *%s* for *%s* by %s' % (environment, name, user.name)\n elif notify_type == self.NOTIFY_TYPE_SUCCEEDED:\n text = ':white_check_mark: Succeeded building *%s* for *%s* by %s' % (environment, name, user.name)\n elif notify_type == self.NOTIFY_TYPE_FAILED:\n text = ':x: Failed building *%s* for *%s* by %s' % (environment, name, user.name)\n else:\n raise RuntimeError('Unknown notification type was given while notifying user')\n elif self._deploy_stage == self.DEPLOY_STAGE_DEPLOYING:\n if notify_type == self.NOTIFY_TYPE_STARTED:\n text = ':steam_locomotive: Started deploying *%s* for *%s* by %s' % (environment, name, user.name)\n elif notify_type == self.NOTIFY_TYPE_SUCCEEDED:\n text = ':tada: Succeeded deploying *%s* for *%s* by %s' % (environment, name, user.name)\n elif notify_type == self.NOTIFY_TYPE_FAILED:\n text = ':x: Failed deploying *%s* for *%s* by %s' % (environment, name, user.name)\n else:\n raise RuntimeError('Unknown notification type was given while notifying user')\n else:\n raise RuntimeError('Unknown deploy stage was set while notifying user')\n\n # Color\n if notify_type == self.NOTIFY_TYPE_SUCCEEDED:\n color = 'good'\n elif notify_type == self.NOTIFY_TYPE_FAILED:\n color = 'danger'\n else:\n color = '#e3e4e6'\n\n # Send\n return self._slack_integration.send_message(text, sub_text=details, color=color)", "def write(self, notification):" ]
[ "0.71543527", "0.6743333", "0.62118864", "0.58737814", "0.58208144", "0.57788986", "0.57196826", "0.5703359", "0.5696581", "0.56706965", "0.5624191", "0.5618526", "0.5584987", "0.5573422", "0.5566332", "0.55626523", "0.5525932", "0.5525822", "0.55131394", "0.54965544", "0.5434289", "0.53876626", "0.537634", "0.536877", "0.5362929", "0.5354168", "0.53375286", "0.53368783", "0.532641", "0.5323656" ]
0.74761426
0
Return the layout for a popup window. It consists of a title bar showing the `title` text, and a body layout. The window is surrounded by borders.
def create_popup_window(title, body): assert isinstance(title, six.text_type) assert isinstance(body, Container) return HSplit([ VSplit([ Window(width=D.exact(1), height=D.exact(1), content=FillControl(BORDER.TOP_LEFT, token=Token.Window.Border)), TokenListToolbar( get_tokens=lambda cli: [(Token.Window.Title, ' %s ' % title)], align_center=True, default_char=Char(BORDER.HORIZONTAL, Token.Window.Border)), Window(width=D.exact(1), height=D.exact(1), content=FillControl(BORDER.TOP_RIGHT, token=Token.Window.Border)), ]), VSplit([ Window(width=D.exact(1), content=FillControl(BORDER.VERTICAL, token=Token.Window.Border)), body, Window(width=D.exact(1), content=FillControl(BORDER.VERTICAL, token=Token.Window.Border)), ]), VSplit([ Window(width=D.exact(1), height=D.exact(1), content=FillControl(BORDER.BOTTOM_LEFT, token=Token.Window.Border)), Window(height=D.exact(1), content=FillControl(BORDER.HORIZONTAL, token=Token.Window.Border)), Window(width=D.exact(1), height=D.exact(1), content=FillControl(BORDER.BOTTOM_RIGHT, token=Token.Window.Border)), ]), ])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def window(*args, width: int = 200, height: int = 200, autosize: bool = False,\n no_resize: bool = False, no_title_bar: bool = False, no_move: bool = False, no_scrollbar: bool = False,\n no_collapse: bool = False, horizontal_scrollbar: bool = False, no_focus_on_appearing: bool = False,\n no_bring_to_front_on_focus: bool = False, menubar: bool = False, no_close: bool = False,\n no_background: bool = False, label: str = '', show: bool = True, collapsed: bool = False,\n modal: bool = False, popup: bool = False,\n on_close: Callable = None, min_size: List[int]=[32, 32], max_size: List[int] = [30000, 30000], id:str=''):\n try:\n\n widget = internal_dpg.add_window(*args, width=width, height=height, autosize=autosize,\n no_resize=no_resize, no_title_bar=no_title_bar, no_move=no_move,\n no_scrollbar=no_scrollbar, no_collapse=no_collapse,\n horizontal_scrollbar=horizontal_scrollbar,\n no_focus_on_appearing=no_focus_on_appearing,\n no_bring_to_front_on_focus=no_bring_to_front_on_focus,\n menubar=menubar, no_close=no_close,\n no_background=no_background, label=label, show=show, \n collapsed=collapsed, on_close=on_close,\n min_size=min_size, max_size=max_size, id=id, modal=modal,\n popup=popup)\n internal_dpg.push_container_stack(widget)\n yield widget\n\n finally:\n internal_dpg.pop_container_stack()", "def position_window(self):\n x, y = self.get_position()\n root_x = self.anchor_widget.winfo_rootx() + x\n root_y = self.anchor_widget.winfo_rooty() + y\n self.tipwindow.wm_geometry(\"+%d+%d\" % (root_x, root_y))", "def make_layout(self):\n\n for h in range(0, self.num_layout_heads):\n self.set_sliding_window_layout(h)\n self.set_global_layout(h)\n\n self.check_and_propagate_first_head_layout()\n return self.layout", "def placeWindow(self):\r\n\t\t# window size\r\n\t\tw = 600\r\n\t\th = 300\r\n\t\t# find the screen size\r\n\t\tsw = self.parent.winfo_screenwidth()\r\n\t\tsh = self.parent.winfo_screenheight()\r\n\t\t# now define the location on the current screen\r\n\t\tx = (sw/2-0.5*w)\r\n\t\ty = (sh/2-0.5*h)\r\n\t\tself.parent.geometry('%dx%d+%d+%d' % (w, h, x, y))", "def return_layout(self):\n return html.Div([\n dbc.Row([dbc.Col([\n html.H3('Kitsu Library Explorer', style={'padding': '10px 0 0 10px'}),\n dbc.Button('Primary', color='primary', className='mr-1', id=self.ids[self.id_wip_button]),\n super().return_layout(),\n ])], style={'margin': 0, 'padding': 0}),\n html.Hr(),\n dbc.Row([dbc.Col([\n self.mod_upload.return_layout(self.ids),\n self.mod_cache.return_layout(self.ids),\n ])], style={'maxWidth': '90%', 'paddingLeft': '5%'}),\n\n dbc.Row([dbc.Col([\n html.H2('Data Interaction'),\n html.P(' FIXME: Needs dropdown to select table_name. Filtered data should be applied to px chart'),\n self.mod_table.return_layout(self.ids, px.data.gapminder()),\n ])], style={'maxWidth': '90%', 'paddingLeft': '5%'}),\n\n dbc.Modal([\n dbc.ModalHeader('Module Header'),\n dbc.ModalBody('Module body text'),\n dbc.ModalFooter(dbc.Button('Close', id=self.ids[self.id_modal_close], className='ml-auto')),\n ], backdrop='static', centered=True, id=self.ids[self.id_modal]),\n ])", "def layoutDialog(*args, backgroundColor: List[float, float, float]=None, dismiss: AnyStr=\"\",\n parent: AnyStr=\"\", title: AnyStr=\"\", uiScript: Script=None, **kwargs)->AnyStr:\n pass", "def window(*args, backgroundColor: List[float, float, float]=None, closeCommand: Script=None,\n defineTemplate: AnyStr=\"\", docTag: Union[AnyStr, bool]=\"\", dockCorner:\n Union[List[AnyStr, AnyStr], List[List[AnyStr, AnyStr]]]=None, dockStation: bool=True,\n dockingLayout: Union[AnyStr, bool]=\"\", exists: bool=True, frontWindow: bool=True,\n height: Union[int, bool]=0, iconName: Union[AnyStr, bool]=\"\", iconify: bool=True,\n interactivePlacement: bool=True, leftEdge: Union[int, bool]=0, mainMenuBar:\n bool=True, mainWindow: bool=True, maximizeButton: bool=True, menuArray: bool=True,\n menuBar: bool=True, menuBarCornerWidget: Union[List[AnyStr, AnyStr], bool]=None,\n menuBarResize: bool=True, menuBarVisible: bool=True, menuIndex: List[AnyStr,\n int]=None, minimizeButton: bool=True, minimizeCommand: Script=None,\n nestedDockingEnabled: bool=True, numberOfMenus: bool=True, parent: AnyStr=\"\",\n resizeToFitChildren: bool=True, restoreCommand: Script=None, retain: bool=True,\n sizeable: bool=True, state: Union[AnyStr, bool]=\"\", title: Union[AnyStr, bool]=\"\",\n titleBar: bool=True, titleBarMenu: bool=True, toolbox: bool=True, topEdge: Union[int,\n bool]=0, topLeftCorner: Union[List[int, int], bool]=None, useTemplate: AnyStr=\"\",\n visible: bool=True, width: Union[int, bool]=0, widthHeight: Union[List[int, int],\n bool]=None, q=True, query=True, e=True, edit=True, **kwargs)->Union[AnyStr, Any]:\n pass", "def _create_example_window():\n return Window({\"warning\": False, \"state\": \"close\"})", "def create_layout( self ):", "def createWidgets(self):\r\n top = self.winfo_toplevel()\r\n top.rowconfigure(0, weight=1)\r\n top.columnconfigure(0, weight=1)\r\n self.rowconfigure(0, weight=1)\r\n self.columnconfigure(0, weight=1) \r\n\r\n self.button_quit = tk.Button(self, text='Quit', command=self.quit)\r\n self.button_quit.grid(row=0, column=0, sticky=tk.N+tk.S+tk.E+tk.W)", "def create_main_window():\n main_win = MainWindow()\n main_windows.append(main_win)\n available_geometry = app.desktop().availableGeometry(main_win)\n main_win.resize(available_geometry.width() * 2 / 3,\n available_geometry.height() * 2 / 3)\n main_win.show()\n return main_win", "def app_layout(self):\n return self.pt_app.layout", "def GetWindow(self):\r\n\r\n return self.window", "def create_window(root, *args, **kwargs):\n global top, rt\n rt = root\n top = tk.Toplevel(root)\n ui_support.set_Tk_var()\n window = Window(top)\n ui_support.init(top, window, *args, **kwargs)\n return top, window", "def CreateLayout(self):\n # Defines the title of the Dialog\n self.SetTitle(\"A Custom Dialog with a Top Menu\")\n\n # Flushes all the already existing menu to create our one. The content will be on the left.\n self.MenuFlushAll()\n\n # Creates a Sub menu begin to insert new menu entry\n self.MenuSubBegin(\"Left Menu\")\n\n # Adds a string with a given ID, so it will trigger a call to Command once clicked\n self.MenuAddString(self.ID_LEFT_MENU_FIRST_ITEM, \"Close\")\n\n # Finalizes the Sub Menu\n self.MenuSubEnd()\n\n # Finalizes the menu\n self.MenuFinished()\n\n # Creates a Group in the Menu. The content will be on the right\n if self.GroupBeginInMenuLine():\n # Creates a BitmapButtonCustomGui with the find icon\n settings = c4d.BaseContainer()\n settings[c4d.BITMAPBUTTON_BUTTON] = True\n settings[c4d.BITMAPBUTTON_BORDER] = False\n settings[c4d.BITMAPBUTTON_TOGGLE] = True\n settings[c4d.BITMAPBUTTON_ICONID1] = c4d.RESOURCEIMAGE_SCENEBROWSER_FIND2\n settings[c4d.BITMAPBUTTON_ICONID2] = c4d.RESOURCEIMAGE_SCENEBROWSER_FIND1\n\n self.displayContentButtonDlg = self.AddCustomGui(self.ID_RIGHT_MENU_SHOW_CONTENT,\n c4d.CUSTOMGUI_BITMAPBUTTON, \"\",\n c4d.BFH_CENTER | c4d.BFV_CENTER, 0, 0, settings)\n\n self.GroupEnd()\n\n # Creates a group that will contain the content that will be hidden when the BitmapButton is pressed. It's\n # important to have a parent group to the group that needs to be hidden since you need to redraw this parent\n # group after the visibility definition.\n if self.GroupBegin(self.ID_MAIN_GROUP, c4d.BFH_LEFT | c4d.BFV_CENTER):\n\n # The group that will be hidden\n if self.GroupBegin(self.ID_HIDDEN_GROUP, c4d.BFH_LEFT | c4d.BFV_CENTER):\n # Adds the content you want to toggle\n self.AddStaticText(0, c4d.BFH_LEFT | c4d.BFV_CENTER, name=\"test\")\n\n self.GroupEnd()\n self.GroupEnd()\n\n # Adds two buttons, Ok and Cancel\n self.AddDlgGroup(c4d.DLG_OK | c4d.DLG_CANCEL)\n\n return True", "def create_layout(python_input, history_mapping):\n highlighters = [\n SearchHighlighter(preview_search=True),\n SelectionHighlighter()]\n\n help_window = create_popup_window(\n title='History Help',\n body=Window(\n content=BufferControl(\n buffer_name=HELP_BUFFER,\n default_char=Char(token=Token),\n lexer=PygmentsLexer(RstLexer),\n highlighters=highlighters),\n right_margins=[ScrollbarMargin()],\n scroll_offsets=ScrollOffsets(top=2, bottom=2)))\n\n return HSplit([\n # Top title bar.\n TokenListToolbar(\n get_tokens=_get_top_toolbar_tokens,\n align_center=True,\n default_char=Char(' ', Token.Toolbar.Status)),\n FloatContainer(\n content=VSplit([\n # Left side: history.\n Window(\n content=BufferControl(\n buffer_name=HISTORY_BUFFER,\n wrap_lines=False,\n lexer=PygmentsLexer(PythonLexer),\n highlighters=highlighters),\n left_margins=[HistoryMargin(history_mapping)],\n scroll_offsets=ScrollOffsets(top=2, bottom=2)),\n # Separator.\n Window(width=D.exact(1),\n content=FillControl(BORDER.LIGHT_VERTICAL, token=Token.Separator)),\n # Right side: result.\n Window(\n content=BufferControl(\n buffer_name=DEFAULT_BUFFER,\n wrap_lines=False,\n highlighters=highlighters,\n input_processors=[GrayExistingText(history_mapping)],\n lexer=PygmentsLexer(PythonLexer)),\n left_margins=[ResultMargin(history_mapping)],\n scroll_offsets=ScrollOffsets(top=2, bottom=2)),\n ]),\n floats=[\n # Help text as a float.\n Float(width=60, top=3, bottom=2,\n content=ConditionalContainer(\n # (We use InFocusStack, because it's possible to search\n # through the help text as well, and at that point the search\n # buffer has the focus.)\n content=help_window, filter=InFocusStack(HELP_BUFFER))),\n ]\n ),\n # Bottom toolbars.\n ArgToolbar(),\n SearchToolbar(),\n TokenListToolbar(\n get_tokens=partial(_get_bottom_toolbar_tokens, python_input=python_input),\n default_char=Char(' ', Token.Toolbar.Status)),\n ])", "def createWindow(self):\r\n\t\t# give the window a title\r\n\t\tself.parent.title( 'Acrobat Data Acquisition')\r\n\t\t# set the style\r\n\t\tself.style = ttk.Style()\r\n\t\tself.style.theme_use('default')\r\n\t\tself.pack(fill= tk.BOTH, expand=1)", "def createWindow(self):\n\n # create window, set basic attributes\n w = gtk.Window(gtk.WINDOW_TOPLEVEL)\n w.set_size_request(*self.__def_win_size__)\n w.set_decorated(False)\n #w.fullscreen()\n #w.unfullscreen()\n w.set_title(self.__name__)\n w.connect(\"destroy\", gtk.main_quit)\n\n # declare buttons and their associated handlers\n controls = (\n (\"open_button\", gtk.ToolButton(gtk.STOCK_OPEN), self.onPlay),\n (\"play_button\", gtk.ToolButton(gtk.STOCK_MEDIA_PLAY), self.onPlay),\n (\"stop_button\", gtk.ToolButton(gtk.STOCK_MEDIA_STOP), self.onStop),\n (\"quit_button\", gtk.ToolButton(gtk.STOCK_QUIT), gtk.main_quit)\n )\n\n # as well as the container in which to put them\n box = gtk.HButtonBox()\n\n # for every widget, connect to its clicked signal and add it\n # to the enclosing box\n for name, widget, handler in controls:\n widget.connect(\"clicked\", handler)\n box.pack_start(widget, True)\n setattr(self, name, widget)\n\n viewer = gtk.DrawingArea()\n viewer.modify_bg(gtk.STATE_NORMAL, viewer.style.black)\n\n # we will need this later\n self.xid = None\n\n # now finally do the top-level layout for the window\n layout = gtk.VBox(False)\n layout.pack_start(viewer)\n\n # subclasses can override childWidgets() to supply\n # custom controls\n layout.pack_start(self.customWidgets(), False, False)\n layout.pack_end(box, False, False)\n w.add(layout)\n w.show_all()\n\n # we want to return only the portion of the window which will\n # be used to display the video, not the whole top-level\n # window. a DrawingArea widget is, in fact, an X11 window.\n return viewer", "def layout(self):\n # Create the layout\n boxLayout = QtGui.QGridLayout()\n\n # Add widgets to layout\n boxLayout.addWidget(self.magnitudeLabel,0,0)\n boxLayout.addWidget(self.magnitudeOption,0,1)\n boxLayout.addWidget(self.directionLabel,1,0)\n boxLayout.addWidget(self.directionOption,1,1)\n boxLayout.addWidget(self.horizontalLabel,2,0)\n boxLayout.addWidget(self.horizontalOption,2,1)\n boxLayout.addWidget(self.verticalLabel,3,0)\n boxLayout.addWidget(self.verticalOption,3,1)\n boxLayout.addWidget(self.closeButton,4,1)\n\n # Set layout to window\n self.setLayout(boxLayout)", "def layout(self):\n return self._layout_manager", "def win_popup(self):\n content = BoxLayout(orientation='vertical')\n message_label = Label(text=self.win_message)\n button_layer = BoxLayout(orientation='horizontal')\n dismiss_button = Button(text='QUIT', size_hint=(1, 1))\n next_button = Button(id='next', text='NEXT ROUND', size_hint=(1, 1))\n button_layer.add_widget(dismiss_button)\n button_layer.add_widget(next_button)\n content.add_widget(message_label)\n content.add_widget(button_layer)\n popup = Popup(title=self.winner,\n content=content, size_hint=(0.3, 0.25))\n dismiss_button.bind(on_release=(lambda a: self.exit_game()),\n on_press=popup.dismiss)\n next_button.bind(on_release=(lambda a: self.next_round()),\n on_press=popup.dismiss)\n popup.open()", "def get_root_layout(self):\n layout = dbc.Container(\n fluid=True,\n children=[\n self.get_nav_bar_layout(),\n dbc.Row(\n [\n # Real time update database\n dcc.Interval(\n id='real_time_db_update',\n interval=300000, # in milliseconds\n n_intervals=0\n ),\n dbc.Col(children=self.get_sidebar_layout(), md=2),\n dbc.Col(children=self.get_potential_deal_table_layout(), md=10)\n\n ]\n ),\n dbc.Row(\n dbc.Label(children=\"Made with ❤️ in India\"),\n className=\"justify-content-center\"\n )\n ]\n )\n return layout", "def get_layout(self):\n return self._layout", "def create_window(\n caption: str,\n width: Union[int, Options.Hidden] = SizeX,\n height: Union[int, Options.Hidden] = SizeY,\n resizable: bool = True,\n) -> None:\n global WINDOW\n global IMPL\n if not WINDOW and not IMPL:\n gl.glClearColor(1, 1, 1, 1)\n imgui.create_context()\n WINDOW = pyglet.window.Window(\n width=width if isinstance(width, int) else width.CurrentValue,\n height=height if isinstance(height, int) else height.CurrentValue,\n resizable=resizable,\n caption=caption,\n )\n IMPL = create_renderer(WINDOW)\n\n def on_resize(w: int, h: int) -> None:\n SizeX.CurrentValue = w\n SizeY.CurrentValue = h\n SaveAllModSettings()\n\n WINDOW.push_handlers(on_resize)\n style_ui(ImguiStyle.CurrentValue)\n\n elif WINDOW:\n WINDOW.set_caption(caption=caption)\n style_ui(ImguiStyle.CurrentValue)", "def create_window_constants() -> None:\r\n\r\n self.WIDTH = 1000\r\n self.HEIGHT = 600\r\n\r\n self.WIDGET_PAD = 5 # Widget padding\r\n self.MAIN_BG = '#eeeeee' # Main background\r\n\r\n self.FONT_LARGE = ('Courier',24)\r\n self.FONT_NORMAL = ('Courier', 12)\r\n self.FONT_SMALL = ('Courier', 10)", "def createLayout(self):\n self.fig = Figure()\n self.canvas = FigureCanvas(self.fig)\n self.canvas.setParent(self)\n self.canvas.setFocus()\n self.mpl_toolbar = NavigationToolbar(self.canvas, self)\n self.axes = self.fig.add_subplot(111)\n # Log window\n self.log = QtWidgets.QTextEdit(self)\n self.log.setCurrentFont(self.parent.txteditfont)\n self.log.setFixedHeight(200)\n self.log.setReadOnly(True)\n # Parameters\n plab = QtWidgets.QLabel(\"Peak parameters (type, pos, amp, FWHM, asym, Lfrac)\")\n self.ptext = QtWidgets.QTextEdit(self)\n self.ptext.setCurrentFont(self.parent.txteditfont)\n # Buttons\n self.exeBtn = QtWidgets.QPushButton(\"Compute\")\n self.okBtn = QtWidgets.QPushButton(\"OK\")\n self.cancelBtn = QtWidgets.QPushButton(\"Cancel\")\n\n # set the layout\n vbox = QtWidgets.QVBoxLayout()\n vbox.addWidget(self.canvas)\n vbox.addWidget(self.mpl_toolbar)\n vbox.addWidget(self.log)\n vbox.addWidget(plab)\n vbox.addWidget(self.ptext)\n hbox = QtWidgets.QHBoxLayout()\n hbox.addWidget(self.exeBtn)\n hbox.addWidget(self.okBtn)\n hbox.addWidget(self.cancelBtn)\n vbox.addLayout(hbox)\n self.setLayout(vbox)", "def make_layout(self):\n\n for h in range(0, self.num_layout_heads):\n self.set_random_layout(h)\n self.set_sliding_window_layout(h)\n self.set_global_layout_itc(h)\n\n self.check_and_propagate_first_head_layout()\n return self.layout", "def default_window():\n X = [0, .125, 1.4375, 1.5625, 2.9375, 3.0625, 4.4375, 4.5625, 5.875, 6.0]\n Y = [0, .125, 2.875, 3.0]\n Z = [0, .125]\n V, F = True, False\n occupancy = [\n [[V],[V],[V]],\n [[V],[F],[V]],\n [[V],[V],[V]],\n [[V],[F],[V]],\n [[V],[V],[V]],\n [[V],[F],[V]],\n [[V],[V],[V]],\n [[V],[F],[V]],\n [[V],[V],[V]]\n ]\n return w7.window(X, Y, Z, occupancy)", "def get_main_window():\n\n pass", "def createLayout(self):\n hbox = QtWidgets.QHBoxLayout()\n hbox.addStretch(1)\n okBtn = QtWidgets.QPushButton(\"OK\")\n okBtn.clicked.connect(self.validate)\n cancelBtn = QtWidgets.QPushButton(\"Cancel\")\n cancelBtn.clicked.connect(self.reject)\n hbox.addWidget(okBtn)\n hbox.addWidget(cancelBtn)\n vbox = QtWidgets.QVBoxLayout()\n vbox.addWidget(self.lab)\n vbox.addWidget(self.text)\n vbox.addLayout(hbox)\n self.setLayout(vbox)" ]
[ "0.5706642", "0.5676141", "0.556403", "0.549403", "0.5490062", "0.5477046", "0.5474232", "0.5458338", "0.5445988", "0.54405284", "0.5437478", "0.53826696", "0.5343365", "0.53356165", "0.5334864", "0.5316925", "0.52934974", "0.52924705", "0.52872044", "0.5278744", "0.5267551", "0.52569956", "0.5255191", "0.52371776", "0.5232424", "0.5221035", "0.5218385", "0.5208422", "0.52014446", "0.5185625" ]
0.750733
0
Create an `Application` for the history screen. This has to be run as a sub application of `python_input`. When this application runs and returns, it retuns the selected lines.
def create_history_application(python_input, original_document): history_mapping = HistoryMapping(python_input.history, original_document) def default_buffer_pos_changed(): """ When the cursor changes in the default buffer. Synchronize with history buffer. """ # Only when this buffer has the focus. if buffer_mapping.focus_stack[-1] == DEFAULT_BUFFER: try: line_no = default_buffer.document.cursor_position_row - \ history_mapping.result_line_offset if line_no < 0: # When the cursor is above the inserted region. raise IndexError history_lineno = sorted(history_mapping.selected_lines)[line_no] except IndexError: pass else: history_buffer.cursor_position = \ history_buffer.document.translate_row_col_to_index(history_lineno, 0) def history_buffer_pos_changed(): """ When the cursor changes in the history buffer. Synchronize. """ # Only when this buffer has the focus. if buffer_mapping.focus_stack[-1] == HISTORY_BUFFER: line_no = history_buffer.document.cursor_position_row if line_no in history_mapping.selected_lines: default_lineno = sorted(history_mapping.selected_lines).index(line_no) + \ history_mapping.result_line_offset default_buffer.cursor_position = \ default_buffer.document.translate_row_col_to_index(default_lineno, 0) history_buffer = Buffer( initial_document=Document(history_mapping.concatenated_history), on_cursor_position_changed=Callback(history_buffer_pos_changed), accept_action=AcceptAction( lambda cli, buffer: cli.set_return_value(default_buffer.document)), read_only=True) default_buffer = Buffer( initial_document=history_mapping.get_new_document(), on_cursor_position_changed=Callback(default_buffer_pos_changed), read_only=True) help_buffer = Buffer( initial_document=Document(HELP_TEXT, 0), accept_action=AcceptAction.IGNORE, read_only=True ) buffer_mapping = BufferMapping({ HISTORY_BUFFER: history_buffer, DEFAULT_BUFFER: default_buffer, HELP_BUFFER: help_buffer, }, initial=HISTORY_BUFFER) application = Application( layout=create_layout(python_input, history_mapping), use_alternate_screen=True, buffers=buffer_mapping, style=python_input._current_style, mouse_support=Condition(lambda cli: python_input.enable_mouse_support), key_bindings_registry=create_key_bindings(python_input, history_mapping) ) return application
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def history(command):\n namespace = app.main(command)\n assert namespace.command == 'h' or namespace.command == \"history\"", "def history_main(args=None, stdin=None):\n hist = builtins.__xonsh_history__\n ns = _hist_parse_args(args)\n if ns:\n _HIST_MAIN_ACTIONS[ns.action](ns, hist)", "def history():", "def run():\n if len(sys.argv) != 2:\n raise SystemExit(\"usage: historyview.py filename\")\n \n filename = sys.argv[1]\n\n app = HistoryViewApp()\n app.run(filename)", "def __init__(self, history, vm, shell):\n super(ShellHistoryDialog, self).__init__(shell)\n self.setupUi(self)\n \n self.__vm = vm\n self.__shell = shell\n \n self.historyList.addItems(history)\n index = shell.getHistoryIndex()\n if index < 0 or index >= len(history):\n self.historyList.setCurrentRow(\n self.historyList.count() - 1, QItemSelectionModel.Select)\n else:\n self.historyList.setCurrentRow(index, QItemSelectionModel.Select)\n self.historyList.scrollToItem(self.historyList.currentItem())", "def hist():\n history_dict = {}\n # create history_list\n for i in range(readline.get_current_history_length()):\n history_dict[i+1] = (readline.get_history_item(i+1))\n return history_dict", "def init_readline():\n if g.command_line:\n return\n\n if has_readline:\n g.READLINE_FILE = os.path.join(get_config_dir(), \"input_history\")\n\n if os.path.exists(g.READLINE_FILE):\n readline.read_history_file(g.READLINE_FILE)\n dbg(c.g + \"Read history file\" + c.w)", "def history():\n \n user_id = session[\"user_id\"]\n history_list = hist(user_id, db)\n return render_template('history.html', history=history_list)", "def _CreatePromptApplication(self, config, multiline):\n\n return pt_application.Application(\n layout=layout.CreatePromptLayout(\n config=config,\n extra_input_processors=[Context()],\n get_bottom_status_tokens=self._GetBottomStatusTokens,\n get_bottom_toolbar_tokens=self._GetBottomToolbarTokens,\n get_continuation_tokens=None,\n get_debug_tokens=self._GetDebugTokens,\n get_prompt_tokens=None,\n is_password=False,\n lexer=None,\n multiline=filters.Condition(lambda cli: multiline()),\n show_help=filters.Condition(\n lambda _: self.key_bindings.help_key.toggle),\n wrap_lines=True,\n ),\n buffer=self.default_buffer,\n clipboard=None,\n erase_when_done=False,\n get_title=None,\n key_bindings_registry=self.key_bindings_registry,\n mouse_support=False,\n reverse_vi_search_direction=True,\n style=interactive_style.GetDocumentStyle(),\n )", "def history():\n files = os.listdir(app.config['SEGMENTS_FOLDER'])\n if len(files) <= 3:\n flash('There is no history yet', 'warning')\n return redirect(url_for('home'))\n\n range_list, segments_list, full_track_dict_list = generate_track_and_segments_data(app, files)\n\n return render_template(\"history.html\", segments_list=segments_list,\n full_track_dict_list=full_track_dict_list,\n range_list=range_list,\n title=\"history\")", "def stdin(config, input):\n if input == 'history':\n home = str(Path.home())\n with open(home + '/.bash_history', 'r') as file:\n output = file.read()\n input = None\n m = SearchMatches(input, output, config.regex, config.color, config.underline)\n m.print_match_lines()\n else:\n input = shlex.split(input)\n output = subprocess.check_output(input).decode('ascii')\n input = None\n m = SearchMatches(input, output, config.regex, config.color, config.underline)\n m.print_match_lines()", "def getline(number):\n number = int(number)\n return readline.get_history_item(number)", "def show_input_history(self):\n # copy with user multifilter\n pass", "def _grab_history(self):\n self.data['history_lines'] = []\n self.data['history_file'] = None\n self.data['history_encoding'] = None\n self.data['headings'] = []\n self.data['history_last_release'] = ''\n self.data['history_insert_line_here'] = 0\n default_location = None\n config = self.setup_cfg.config\n if config and config.has_option('zest.releaser', 'history_file'):\n default_location = config.get('zest.releaser', 'history_file')\n history_file = self.vcs.history_file(location=default_location)\n self.data['history_file'] = history_file\n if not history_file:\n logger.warn(\"No history file found\")\n return\n logger.debug(\"Checking %s\", history_file)\n history_lines, history_encoding = read_text_file(history_file)\n history_lines = history_lines.split('\\n')\n headings = utils.extract_headings_from_history(history_lines)\n if not headings:\n logger.warn(\"No detectable version heading in the history \"\n \"file %s\", history_file)\n return\n self.data['history_lines'] = history_lines\n self.data['history_encoding'] = history_encoding\n self.data['headings'] = headings\n\n # Grab last header.\n start = headings[0]['line']\n if len(headings) > 1:\n # Include the next header plus underline, as this is nice\n # to show in the history_last_release.\n end = headings[1]['line'] + 2\n else:\n end = len(history_lines)\n history_last_release = '\\n'.join(history_lines[start:end])\n self.data['history_last_release'] = history_last_release\n\n # Add line number where an extra changelog entry can be inserted. Can\n # be useful for entry points. 'start' is the header, +1 is the\n # underline, +2 is probably an empty line, so then we should take +3.\n # Or rather: the first non-empty line.\n insert = start + 2\n while insert < end:\n if history_lines[insert].strip():\n break\n insert += 1\n self.data['history_insert_line_here'] = insert", "def on_executeButton_clicked(self):\n lines = []\n for index in range(self.historyList.count()):\n # selectedItems() doesn't seem to preserve the order\n itm = self.historyList.item(index)\n if itm.isSelected():\n lines.append(itm.text())\n cmds = os.linesep.join(lines) + os.linesep\n self.__shell.executeLines(\n cmds,\n historyIndex=self.historyList.currentRow())\n \n # reload the list because shell modified it\n self.on_reloadButton_clicked()", "def history_keyboard(language_code):\n keyboard = [\n [\n make_button(buttons.HISTORY_INTERNATIONAL, language_code),\n ],\n [\n make_button(buttons.HISTORY_SCIENCE, language_code),\n ],\n [\n make_button(buttons.HISTORY_EU_PROBLEMS, language_code),\n ],\n [\n make_button(buttons.HISTORY_CULTURE, language_code),\n ],\n [\n make_button(buttons.HISTORY_REFORMS, language_code),\n ],\n [\n make_button(buttons.HISTORY_STATEHOOD, language_code),\n ],\n [\n make_button(buttons.HISTORY_ALL, language_code),\n ],\n [\n make_button(buttons.EXIT_PARAMETERS, language_code),\n make_button(buttons.COURSES_RETURN, language_code),\n ],\n ]\n return InlineKeyboardMarkup(inline_keyboard=keyboard)", "def _hist_create_parser():\n p = argparse.ArgumentParser(prog='history',\n description='Tools for dealing with history')\n subp = p.add_subparsers(title='action', dest='action')\n # session action\n show = subp.add_parser('show', prefix_chars='-+',\n help='displays session history, default action')\n show.add_argument('-r', dest='reverse', default=False,\n action='store_true', help='reverses the direction')\n show.add_argument('-n', dest='numerate', default=False, action='store_true',\n help='numerate each command')\n show.add_argument('-t', dest='timestamp', default=False,\n action='store_true', help='show command timestamps')\n show.add_argument('-T', dest='end_time', default=None,\n help='show only commands before timestamp')\n show.add_argument('+T', dest='start_time', default=None,\n help='show only commands after timestamp')\n show.add_argument('-f', dest='datetime_format', default=None,\n help='the datetime format to be used for filtering and printing')\n show.add_argument('session', nargs='?', choices=_HIST_SESSIONS.keys(), default='session',\n help='Choose a history session, defaults to current session')\n show.add_argument('slices', nargs='*', default=None,\n help='display history entries or range of entries')\n # 'id' subcommand\n subp.add_parser('id', help='displays the current session id')\n # 'file' subcommand\n subp.add_parser('file', help='displays the current history filename')\n # 'info' subcommand\n info = subp.add_parser('info', help=('displays information about the '\n 'current history'))\n info.add_argument('--json', dest='json', default=False,\n action='store_true', help='print in JSON format')\n # diff\n diff = subp.add_parser('diff', help='diffs two xonsh history files')\n _dh_create_parser(p=diff)\n # replay, dynamically\n from xonsh import replay\n rp = subp.add_parser('replay', help='replays a xonsh history file')\n replay._rp_create_parser(p=rp)\n _HIST_MAIN_ACTIONS['replay'] = replay._rp_main_action\n # gc\n gcp = subp.add_parser(\n 'gc', help='launches a new history garbage collector')\n gcp.add_argument('--size', nargs=2, dest='size', default=None,\n help=('next two arguments represent the history size and '\n 'units; e.g. \"--size 8128 commands\"'))\n bgcp = gcp.add_mutually_exclusive_group()\n bgcp.add_argument('--blocking', dest='blocking', default=True,\n action='store_true',\n help=('ensures that the gc blocks the main thread, '\n 'default True'))\n bgcp.add_argument('--non-blocking', dest='blocking', action='store_false',\n help='makes the gc non-blocking, and thus return sooner')\n return p", "def test_previousLineWithoutHistory(self):\n s = 'hello world'\n self.widget.buffer = s\n self.widget.setInputHistory(History([]))\n self.widget.keystrokeReceived('\\x10', None)\n self.assertEqual(self.widget.buffer, s)\n self.assertEqual(self.widget.cursor, 0)\n self.assertEqual(self.widget.getInputHistory(), [])", "def index(self):\n top, bottom = self.margins\n\n if self.cursor.y == bottom:\n self.history.top.append(self.buffer[top])\n\n super(CustomHistoryScreen, self).index()", "def CreateConsole(self):\n lc = launcher.TextFrame('title')\n return lc", "def history():\n rows = db.execute(\"SELECT * FROM histories WHERE id=:id\", id=session[\"user_id\"])\n\n return render_template(\"history.html\", rows=rows)", "def get_history():\n return response_texts_to_entries(make_post_request(HISTORY_API, data={\"k\": config[\"api_key\"]}))", "def history():\n history = db.execute(\"SELECT * from history WHERE id=:id\", id=session[\"user_id\"])\n\n return render_template(\"history.html\", history = history)", "def create_history_form(self):\n history_items = [json.loads(item) for item in self.history]\n\n history_form_items = []\n for item in history_items:\n history_form_items.append(GameHistoryForm(guess=item.get('guess'),\n result=item.get('result')))\n\n return GameHistoryForms(history=history_form_items)", "def history():\n return apology(\"TODO\")", "def history():\n return apology(\"TODO\")", "def history():\n return apology(\"TODO\")", "def add_history(self):\n # add separator, if there already are history entries\n if self.parentApp.History != '':\n self.parentApp.History += (\n '\\n\\n--- --- --- --- --- --- --- --- --- --- --- ---\\n\\n'\n )\n\n # add the transaction to it\n self.parentApp.History += self.parentApp.tmpTransC.to_str()", "def get_history(page):\n headings = page.filter_headings()\n idx = [i for i, head in enumerate(headings) \n if 'History' in head or 'history' in head]\n if not idx:\n return \"\"\n sections = page.get_sections(include_headings=True)\n history = str(sections[idx[0]+1].strip_code())\n return history", "def history(self, update, context):\n\n message = update.message.text.lower().split(\" \")\n user = self.User(update)\n output = \"\"\n if message[1] == \"show\":\n if not self.data_base.has_history(user):\n output = \"you don't have any history\"\n self.data_base.log(user, update.message.text, output)\n else:\n output = self.data_base.show_history(user)\n if len(output) > 4096:\n output = output[-4096::]\n self.data_base.log(user, update.message.text, \"Successfully showed history\")\n\n elif message[1] == \"clear\":\n if not self.data_base.has_history(user):\n output = \"your history is already clean\"\n else:\n self.data_base.clear_history(user)\n output = \"Clean\"\n self.data_base.log(user, update.message.text, output)\n else:\n output = \"Looks like you have a little mistake\\n\" \\\n \"the correct way of using the /history command is:\\n\" \\\n \"/history show\\n\" \\\n \"/history clear\"\n self.data_base.log(user, update.message.text, output)\n user.send_message(output)" ]
[ "0.6645085", "0.6400172", "0.63021773", "0.6147445", "0.6069044", "0.600494", "0.5942434", "0.57836646", "0.5758709", "0.5725173", "0.56594366", "0.5636762", "0.5608617", "0.5605935", "0.5590616", "0.557176", "0.55626994", "0.5554046", "0.55293834", "0.5527644", "0.54987985", "0.5497268", "0.54849327", "0.54530674", "0.54512197", "0.54512197", "0.54512197", "0.54305714", "0.5423435", "0.54082024" ]
0.7123608
0
When the cursor changes in the default buffer. Synchronize with history buffer.
def default_buffer_pos_changed(): # Only when this buffer has the focus. if buffer_mapping.focus_stack[-1] == DEFAULT_BUFFER: try: line_no = default_buffer.document.cursor_position_row - \ history_mapping.result_line_offset if line_no < 0: # When the cursor is above the inserted region. raise IndexError history_lineno = sorted(history_mapping.selected_lines)[line_no] except IndexError: pass else: history_buffer.cursor_position = \ history_buffer.document.translate_row_col_to_index(history_lineno, 0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def history_buffer_pos_changed():\n # Only when this buffer has the focus.\n if buffer_mapping.focus_stack[-1] == HISTORY_BUFFER:\n line_no = history_buffer.document.cursor_position_row\n\n if line_no in history_mapping.selected_lines:\n default_lineno = sorted(history_mapping.selected_lines).index(line_no) + \\\n history_mapping.result_line_offset\n\n default_buffer.cursor_position = \\\n default_buffer.document.translate_row_col_to_index(default_lineno, 0)", "def test_editBufferSaved(self):\n s1 = \"hello\"\n s2 = \"world\"\n n = 3\n history = History([s1])\n self.widget.buffer = s2\n self.widget.cursor = n\n self.widget.setInputHistory(history)\n self.widget.keystrokeReceived('\\x10', None)\n self.assertEqual(self.widget.buffer, s1)\n self.widget.keystrokeReceived('\\x0e', None)\n self.assertEqual(self.widget.buffer, s2)\n self.assertEqual(self.widget.cursor, n)", "def test_previousLine(self):\n s = 'hello world'\n self.widget.buffer = s\n self.widget.setInputHistory(History(['first', 'second', 'last']))\n self.widget.keystrokeReceived('\\x10', None)\n self.assertEqual(self.widget.buffer, 'last')\n self.assertEqual(self.widget.cursor, 0)", "def test_previousLineEmptyBuffer(self):\n s = 'hello world'\n self.widget.setInputHistory(History([s]))\n self.widget.keystrokeReceived('\\x10', None)\n self.assertEqual(self.widget.getInputHistory(), [s])\n self.assertEqual(self.widget.buffer, s)\n self.assertEqual(self.widget.cursor, 0)", "def test_previousLineWithoutHistory(self):\n s = 'hello world'\n self.widget.buffer = s\n self.widget.setInputHistory(History([]))\n self.widget.keystrokeReceived('\\x10', None)\n self.assertEqual(self.widget.buffer, s)\n self.assertEqual(self.widget.cursor, 0)\n self.assertEqual(self.widget.getInputHistory(), [])", "def setDefaultCursorPosition(self):\n self.srcEditor.setFocus()\n self.srcEditor.setCursorPosition(0,0)", "def onCursorChanged(self, view):\n if not self.settingCursor:\n row = view.get_cursor()[0]\n i = self.model.get_iter(row)\n event = self.model.get(i, 9)[0]\n self.notifyHilightChanged(event)", "def test_enterAppendsHistory(self):\n s = 'hello world'\n self.widget.keystrokeReceived('\\r', None)\n self.assertEqual(self.widget.getInputHistory(), [])\n self.widget.buffer = s\n self.widget.keystrokeReceived('\\r', None)\n self.assertEqual(self.widget.getInputHistory(), [s])", "def _update_cursor(self) -> None:\n # get the brush size (get a local reference in case another process\n # changes it between the different accesses in this method)\n brush_size = self.brush_size\n # if there is not update, return\n if not self.is_cursor_change:\n return\n # otherwise dequeue the update\n self.is_cursor_change = False\n # make a static border ring for the cursor\n ring = make_ring(brush_size - 1, brush_size)\n cursor = make_cursor(ring, self._brush_border_color)\n # make a circle with the current color\n brush_circle = make_circle(brush_size) - ring\n cursor = cursor + make_cursor(brush_circle, self._color)\n # create the pyglet cursor object and set it\n mouse = pyglet_cursor(cursor)\n self._view.set_cursor(mouse)", "def _(event):\n event.cli.push_focus(SYSTEM_BUFFER)", "def test_previousLineTwice(self):\n s = 'hello world'\n self.widget.buffer = s\n self.widget.setInputHistory(History(['first', 'second', 'last']))\n self.widget.keystrokeReceived('\\x10', None)\n self.widget.keystrokeReceived('\\x10', None)\n self.assertEqual(self.widget.buffer, 'second')\n self.assertEqual(self.widget.cursor, 0)", "def create_history_application(python_input, original_document):\n history_mapping = HistoryMapping(python_input.history, original_document)\n\n def default_buffer_pos_changed():\n \"\"\" When the cursor changes in the default buffer. Synchronize with\n history buffer. \"\"\"\n # Only when this buffer has the focus.\n if buffer_mapping.focus_stack[-1] == DEFAULT_BUFFER:\n try:\n line_no = default_buffer.document.cursor_position_row - \\\n history_mapping.result_line_offset\n\n if line_no < 0: # When the cursor is above the inserted region.\n raise IndexError\n\n history_lineno = sorted(history_mapping.selected_lines)[line_no]\n except IndexError:\n pass\n else:\n history_buffer.cursor_position = \\\n history_buffer.document.translate_row_col_to_index(history_lineno, 0)\n\n def history_buffer_pos_changed():\n \"\"\" When the cursor changes in the history buffer. Synchronize. \"\"\"\n # Only when this buffer has the focus.\n if buffer_mapping.focus_stack[-1] == HISTORY_BUFFER:\n line_no = history_buffer.document.cursor_position_row\n\n if line_no in history_mapping.selected_lines:\n default_lineno = sorted(history_mapping.selected_lines).index(line_no) + \\\n history_mapping.result_line_offset\n\n default_buffer.cursor_position = \\\n default_buffer.document.translate_row_col_to_index(default_lineno, 0)\n\n history_buffer = Buffer(\n initial_document=Document(history_mapping.concatenated_history),\n on_cursor_position_changed=Callback(history_buffer_pos_changed),\n accept_action=AcceptAction(\n lambda cli, buffer: cli.set_return_value(default_buffer.document)),\n read_only=True)\n\n default_buffer = Buffer(\n initial_document=history_mapping.get_new_document(),\n on_cursor_position_changed=Callback(default_buffer_pos_changed),\n read_only=True)\n\n help_buffer = Buffer(\n initial_document=Document(HELP_TEXT, 0),\n accept_action=AcceptAction.IGNORE,\n read_only=True\n )\n\n buffer_mapping = BufferMapping({\n HISTORY_BUFFER: history_buffer,\n DEFAULT_BUFFER: default_buffer,\n HELP_BUFFER: help_buffer,\n }, initial=HISTORY_BUFFER)\n\n application = Application(\n layout=create_layout(python_input, history_mapping),\n use_alternate_screen=True,\n buffers=buffer_mapping,\n style=python_input._current_style,\n mouse_support=Condition(lambda cli: python_input.enable_mouse_support),\n key_bindings_registry=create_key_bindings(python_input, history_mapping)\n )\n return application", "def onCursorPositionChanged (self , ln, col):\n self.viewer().CursorPositionChanged.emit( ln, col )", "def slot_history_changed(self, history, _dummy):\r\n pass", "def slot_history_changed(self, _sender, _data):\r\n self.change_type = TYPE_HISTORY\r\n self.do_paint()\r\n self.change_type = None", "def __cursorChanged(self, fn, line, pos, editor):\n enc = editor.getEncoding()\n lang = editor.getLanguage()\n eol = editor.getEolIndicator()\n self.__setSbFile(fn, line, pos, enc, lang, eol)\n self.cursorChanged.emit(editor)", "def test_previousLineEmptyBufferWithoutHistory(self):\n self.widget.keystrokeReceived('\\x10', None)\n self.assertEqual(self.widget.getInputHistory(), [])\n self.assertEqual(self.widget.buffer, '')\n self.assertEqual(self.widget.cursor, 0)", "def change_cursor(self, cursor):\n self.setCursor(cursor)", "def update_launcher(self):\n if not self.misc.bufwinnr(self.name):\n self.open_launcher()\n\n self.mapper.clear()\n self.clear_highlighting()\n self.misc.go_to_win(self.misc.bufwinnr(self.name))\n self.misc.set_buffer(None)\n\n buffer_list = sorted(self.buffers_with_matches())\n if not self.view_buffer:\n self.view_buffer = self.curr_buf.number\n\n i = buffer_list.index(self.view_buffer)\n buf_prev = buffer_list[-1 if not i else i - 1]\n buf_next = buffer_list[0 if i == len(buffer_list) - 1 else i + 1]\n\n vim.command(\"setlocal stl=\\ \\ <-\\ {0}\\ \\ [{1}]\\ \\ {2}\\ ->\\ \\ \".format(\n os.path.split(self.misc.bufname(buf_prev))[1].replace(' ', '\\\\'),\n os.path.split(self.misc.bufname(self.view_buffer))[1].replace(' ', '\\\\'),\n os.path.split(self.misc.bufname(buf_next))[1].replace(' ', '\\\\')))\n\n # self.matches = {'bufname': [(linenr, col, line), ...], ...}\n if self.find_new_matches:\n if not self.cache:\n self.search(self.input_so_far)\n self.cache = list(self.matches)\n\n _matches = self.matches[self.view_buffer]\n if _matches:\n if self.view_buffer == self.curr_buf.number:\n pos = bisect.bisect_left(_matches, self.curr_buf_pos)\n _matches.insert(pos, self.curr_buf_pos)\n else:\n _matches = self.matches[self.view_buffer]\n\n if _matches:\n self.misc.set_buffer(\n [self.render_line(m, j) for j, m in enumerate(_matches)])\n\n # set the position to the current line\n if self.find_new_matches:\n if self.view_buffer == self.curr_buf.number:\n self.launcher_curr_pos = pos\n else:\n self.launcher_curr_pos = 0\n\n if self.launcher_curr_pos is not None:\n length = len(vim.current.buffer)\n if self.launcher_curr_pos >= length:\n self.launcher_curr_pos = length - 1\n vim.current.window.cursor = (self.launcher_curr_pos + 1, 1)\n\n self.render_curr_line()\n self.highlight()\n\n # adjust the window height according to the total\n # number of matches\n n = len(_matches)\n if n > self.max_height:\n vim.current.window.height = self.max_height\n else:\n vim.current.window.height = n\n\n vim.command(\"normal! zz\")\n\n else:\n vim.command('syntax clear')\n self.misc.set_buffer([' nothing found...'])\n vim.current.window.height = 1\n self.launcher_curr_pos = 0", "def watchCursor(self, cursor):\n cursor.observers.append(self._cursorCallback)", "def _set_cursor(self, cursor):\n self._cursor = cursor", "def append_cursor_enter_callback(self):", "def slot_history_changed(self, _sender, _data):\r\n last_candle = self.history.last_candle()\r\n if last_candle:\r\n self.client.history_last_candle = last_candle.tim", "def setEditCursor(self, event):\n self.editMode = True\n self.updateCursor(\"X_cursor\")\n self.changeColor(self.lastChanged, self.colors['pentomino'])\n x = (event.y - self.margin) // self.cellSize\n y = (event.x - self.margin) // self.cellSize\n if not (0 <= x < self.rows and 0 <= y < self.cols):\n return\n if not self.gridBusy[x][y]:\n return\n assert len(self.history) >= self.gridBusy[x][y]\n self.lastChanged = self.gridBusy[x][y]\n self.changeColor(self.lastChanged, self.colors['pent_edit'])", "def _on_head_changed(self, change):\n if change.new:\n self._update_head_history()", "def cursorUpdate(self):\n if self.__table[self.__cursor - 1] == 0:\n self.__cursor += 1", "def refresh_mark(self):\n current = self.player.current_position()\n if current != None:\n if self.prev_song != None and self.prev_song < len(self.buf):\n self.buf[self.prev_song] = ' ' + self.buf[self.prev_song][1:]\n self.buf[current] = '-' + self.buf[current][1:]\n self.prev_song = current\n # Move cursor to current position.\n vim.current.window.cursor = (current + 1, 1)", "def test_nextLine(self):\n s = 'hello world'\n self.widget.buffer = s\n history = History(['first', 'second', 'last'])\n history.previousLine()\n history.previousLine()\n self.widget.setInputHistory(history)\n self.widget.keystrokeReceived('\\x0e', None)\n self.assertEqual(self.widget.buffer, 'last')\n self.assertEqual(self.widget.cursor, 0)", "def cursor_placement_thread(self):\r\n while self.editing:\r\n # pylint: disable=W0212\r\n with goxapi.Signal._lock:\r\n curses.curs_set(2)\r\n self.win.touchwin()\r\n self.win.refresh()\r\n time.sleep(0.1)\r\n curses.curs_set(0)", "def index(self):\n top, bottom = self.margins\n\n if self.cursor.y == bottom:\n self.history.top.append(self.buffer[top])\n\n super(CustomHistoryScreen, self).index()" ]
[ "0.7898416", "0.68018246", "0.62982154", "0.6297645", "0.6279367", "0.6243511", "0.6192357", "0.6175505", "0.61451435", "0.6135683", "0.6130657", "0.6118235", "0.61064404", "0.6103579", "0.606284", "0.60550404", "0.59968597", "0.59947914", "0.5979873", "0.5979297", "0.5941482", "0.5907574", "0.5883547", "0.58706117", "0.58201146", "0.5781811", "0.57732433", "0.5728141", "0.57265586", "0.57031274" ]
0.7823026
1
When the cursor changes in the history buffer. Synchronize.
def history_buffer_pos_changed(): # Only when this buffer has the focus. if buffer_mapping.focus_stack[-1] == HISTORY_BUFFER: line_no = history_buffer.document.cursor_position_row if line_no in history_mapping.selected_lines: default_lineno = sorted(history_mapping.selected_lines).index(line_no) + \ history_mapping.result_line_offset default_buffer.cursor_position = \ default_buffer.document.translate_row_col_to_index(default_lineno, 0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def slot_history_changed(self, history, _dummy):\r\n pass", "def slot_history_changed(self, _sender, _data):\r\n last_candle = self.history.last_candle()\r\n if last_candle:\r\n self.client.history_last_candle = last_candle.tim", "def slot_history_changed(self, _sender, _data):\r\n self.change_type = TYPE_HISTORY\r\n self.do_paint()\r\n self.change_type = None", "def watchCursor(self, cursor):\n cursor.observers.append(self._cursorCallback)", "def _on_head_changed(self, change):\n if change.new:\n self._update_head_history()", "def history():", "def test_editBufferSaved(self):\n s1 = \"hello\"\n s2 = \"world\"\n n = 3\n history = History([s1])\n self.widget.buffer = s2\n self.widget.cursor = n\n self.widget.setInputHistory(history)\n self.widget.keystrokeReceived('\\x10', None)\n self.assertEqual(self.widget.buffer, s1)\n self.widget.keystrokeReceived('\\x0e', None)\n self.assertEqual(self.widget.buffer, s2)\n self.assertEqual(self.widget.cursor, n)", "def default_buffer_pos_changed():\n # Only when this buffer has the focus.\n if buffer_mapping.focus_stack[-1] == DEFAULT_BUFFER:\n try:\n line_no = default_buffer.document.cursor_position_row - \\\n history_mapping.result_line_offset\n\n if line_no < 0: # When the cursor is above the inserted region.\n raise IndexError\n\n history_lineno = sorted(history_mapping.selected_lines)[line_no]\n except IndexError:\n pass\n else:\n history_buffer.cursor_position = \\\n history_buffer.document.translate_row_col_to_index(history_lineno, 0)", "def history(self, history):\n self._history = history", "def onCursorPositionChanged (self , ln, col):\n self.viewer().CursorPositionChanged.emit( ln, col )", "def test_previousLine(self):\n s = 'hello world'\n self.widget.buffer = s\n self.widget.setInputHistory(History(['first', 'second', 'last']))\n self.widget.keystrokeReceived('\\x10', None)\n self.assertEqual(self.widget.buffer, 'last')\n self.assertEqual(self.widget.cursor, 0)", "def index(self):\n top, bottom = self.margins\n\n if self.cursor.y == bottom:\n self.history.top.append(self.buffer[top])\n\n super(CustomHistoryScreen, self).index()", "def test_previousLineWithoutHistory(self):\n s = 'hello world'\n self.widget.buffer = s\n self.widget.setInputHistory(History([]))\n self.widget.keystrokeReceived('\\x10', None)\n self.assertEqual(self.widget.buffer, s)\n self.assertEqual(self.widget.cursor, 0)\n self.assertEqual(self.widget.getInputHistory(), [])", "def history(self, history):\n\n self._history = history", "def onRegisterHistory(self):\n pass", "def test_previousLineTwice(self):\n s = 'hello world'\n self.widget.buffer = s\n self.widget.setInputHistory(History(['first', 'second', 'last']))\n self.widget.keystrokeReceived('\\x10', None)\n self.widget.keystrokeReceived('\\x10', None)\n self.assertEqual(self.widget.buffer, 'second')\n self.assertEqual(self.widget.cursor, 0)", "def __cursorChanged(self, fn, line, pos, editor):\n enc = editor.getEncoding()\n lang = editor.getLanguage()\n eol = editor.getEolIndicator()\n self.__setSbFile(fn, line, pos, enc, lang, eol)\n self.cursorChanged.emit(editor)", "def cursorUpdate(self):\n if self.__table[self.__cursor - 1] == 0:\n self.__cursor += 1", "def Modifier_History(self):\n\t\tpass", "def onCursorChanged(self, view):\n if not self.settingCursor:\n row = view.get_cursor()[0]\n i = self.model.get_iter(row)\n event = self.model.get(i, 9)[0]\n self.notifyHilightChanged(event)", "def test_nextLine(self):\n s = 'hello world'\n self.widget.buffer = s\n history = History(['first', 'second', 'last'])\n history.previousLine()\n history.previousLine()\n self.widget.setInputHistory(history)\n self.widget.keystrokeReceived('\\x0e', None)\n self.assertEqual(self.widget.buffer, 'last')\n self.assertEqual(self.widget.cursor, 0)", "def test_previousLineEmptyBuffer(self):\n s = 'hello world'\n self.widget.setInputHistory(History([s]))\n self.widget.keystrokeReceived('\\x10', None)\n self.assertEqual(self.widget.getInputHistory(), [s])\n self.assertEqual(self.widget.buffer, s)\n self.assertEqual(self.widget.cursor, 0)", "def test_enterAppendsHistory(self):\n s = 'hello world'\n self.widget.keystrokeReceived('\\r', None)\n self.assertEqual(self.widget.getInputHistory(), [])\n self.widget.buffer = s\n self.widget.keystrokeReceived('\\r', None)\n self.assertEqual(self.widget.getInputHistory(), [s])", "def test_historyPositionResetByReturn(self):\n s1 = \"hello\"\n s2 = \"world\"\n s3 = \"goodbye\"\n history = History([s1, s2, s3])\n self.widget.setInputHistory(history)\n self.widget.keystrokeReceived('\\x10', None) # put s3 up\n self.widget.keystrokeReceived('\\x10', None) # put s2 up\n self.widget.keystrokeReceived('\\r', None) # submit s2\n\n # s2 should be the previous line now, since it was added to the input\n # history and the input history position was reset.\n self.assertEqual(history.previousLine(), s2)\n\n # Followed by s3, s2, and s1\n self.assertEqual(history.previousLine(), s3)\n self.assertEqual(history.previousLine(), s2)\n self.assertEqual(history.previousLine(), s1)", "def after_cursor_next(self, cursor):\n pass", "def history_go(self, relative):\n self.thistab.history_go(int(relative))", "def update_history(self, play, coplay):\r\n self.history.append(play, coplay)\r\n self.base.history.append(play,coplay)\r\n self.trust.history.append(play,coplay)\r\n self.conviction.history.append(play,coplay)", "def update_history(self, move):\r\n player_number = self.player_numbers[self.current_player]\r\n heaps = tuple(self.heaps)\r\n self.history.append([player_number, heaps, move])", "def record_history_on_commit(self,\n clocked: 'Clocked',\n changes: dict,\n session: orm.Session,\n timestamp: dt.datetime):\n new_tick = self._get_new_tick(clocked)\n\n new_clock = self.make_clock(timestamp, new_tick)\n attr = {'entity': clocked}\n\n for prop, cls in self.history_models.items():\n if prop in changes:\n value = changes[prop]\n\n self._cap_previous_history_row(clocked, new_clock, cls)\n\n # Add new history row\n hist = attr.copy()\n hist[prop.key] = value\n session.add(\n cls(\n vclock=new_clock.vclock,\n effective=new_clock.effective,\n **hist,\n ),\n )", "def test_nextLineTwice(self):\n s = 'hello world'\n self.widget.buffer = s\n history = History(['first', 'second', 'last'])\n history.previousLine()\n history.previousLine()\n history.previousLine()\n self.widget.setInputHistory(history)\n self.widget.keystrokeReceived('\\x0e', None)\n self.widget.keystrokeReceived('\\x0e', None)\n self.assertEqual(self.widget.buffer, 'last')\n self.assertEqual(self.widget.cursor, 0)" ]
[ "0.71576846", "0.6885786", "0.68556553", "0.66171944", "0.6570386", "0.6502582", "0.6494127", "0.6404436", "0.63687605", "0.63419145", "0.63144755", "0.62715435", "0.62354267", "0.6198887", "0.6184626", "0.61757636", "0.607954", "0.6064025", "0.6047204", "0.598927", "0.59868395", "0.5974034", "0.59608275", "0.5946222", "0.5944546", "0.58880883", "0.586409", "0.58596563", "0.5856646", "0.5848261" ]
0.73001057
0
Update the fuel level.
def update_fuel_level(self, new_level): if new_level <= self.fuel_capacity: self.fuel_level = new_level else: print("The tank can't hold that much!")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_fuel_level(self, new_level):\r\n if new_level <= self.fuel_capacity:\r\n self.fuel_level = new_level\r\n else:\r\n print(\"The tank can't hold that much!\")", "def fill_tank(self):\r\n self.fuel_level = self.fuel_capacity", "def upgrage_level(self):\n print('level is upgraded on one point')\n self.level += 1", "def fill_tank(self):\r\n self.fuel_level = self.fuel_capacity\r\n print(\"Fuel tank is full\")", "def upgrade(self):\n if self.level < len(self.tower_images):\n self.level_up_animation = True\n self.level += 1\n self.base_damage += 3\n self.damage = self.base_damage\n\n #Since level does not upgrade in menu we have to manually do it here\n self.menu.tower_level += 1", "def fill_tank(self):\n self.fuel_level = self.fuel_capacity\n print(\"Fuel tank is full.\")", "def fill_tank(self):\n self.fuel_level = self.fuel_capacity\n print(\"Fuel tank is full.\")", "def update(self):\n #self._light.update()\n #self._state = 'on' #self._light.is_on()\n #self._brightness = 80 #self._light.brightness\n _LOGGER.info(\"update() is called\")", "def add_fuel(self, amount):\n if (self.fuel_level + amount\n <= self.fuel_capacity):\n self.fuel_level += amount\n print(\"Added fuel.\")\n else:\n print(\"The tank won't hold that much.\")", "def level_upgrade(self, lvl):\n\t\tpass", "def add_fuel(self, amount):\n if (self.fuel_level + amount <= self.fuel_capacity):\n self.fuel_level += amount\n print(\"Added fuel to \"+ self.make + \".\")\n else:\n print(\"The tank won't hold that much.\")", "def update(self):\n\n bumperCode = self.robot.getBumperStatus()\n if bumperCode == 2: # Left side of bumper was hit\n self.setVector(0.4, 220)\n elif bumperCode == 1: # should be right\n self.setVector(0.4, 160)\n elif bumperCode == 3: # should be both\n self.setVector(0.4, 180)\n else:\n self.setVector(0.0, 0.0)", "def _update_level_data(self):\n\t\t# taxes, inhabitants\n\t\tself.tax_base = self.session.db.get_settler_tax_income(self.level)\n\t\tself.inhabitants_max = self.session.db.get_settler_inhabitants_max(self.level)\n\t\tif self.inhabitants > self.inhabitants_max: # crop settlers at level down\n\t\t\tself.inhabitants = self.inhabitants_max\n\n\t\t# consumption:\n\t\t# Settler productions are specified to be disabled by default in the db, so we can enable\n\t\t# them here per level.\n\t\tcurrent_lines = self.get_production_lines()\n\t\tfor (prod_line,) in self.session.db.get_settler_production_lines(self.level):\n\t\t\tif not self.has_production_line(prod_line):\n\t\t\t\tself.add_production_by_id(prod_line)\n\t\t\t# cross out the new lines from the current lines, so only the old ones remain\n\t\t\tif prod_line in current_lines:\n\t\t\t\tcurrent_lines.remove(prod_line)\n\t\tfor line in current_lines[:]: # iterate over copy for safe removal\n\t\t\t# all lines, that were added here but are not used due to the current level\n\t\t\tself.remove_production_by_id(line)\n\t\t# update instance graphics\n\t\tself.update_action_set_level(self.level)", "def _calculate_fuel(self):\n self._fuel = self._calculate_fuel_r(self._mass)", "def update(self):\n self.getPower()\n if self._state != STATE_OFF:\n self.getVolume()\n self.getCurrentChannel()", "def __change_level(self, level):\n self.level = level", "def update(self):\n self._brightness = self._lj.get_load_level(self._index) / 99 * 255", "def update_fodder(self):\n self.remaining_food['Herbivore'] += self.parameters['alpha'] * (\n self.parameters['f_max'] - self.remaining_food['Herbivore'])", "def update_gauge(self):\n pass # Do nothing", "def fill_up(self):\n self.fuel = self.gas_tank_size", "def update_fodder(self):\n self.remaining_food['Herbivore'] = self.parameters['f_max']", "def update_hp_for_higher_level(chosen_class,level):\n #Checks to see if your character is level 4,8,12,etc.\n def upgradedAbilityAt4(level):\n if level % 4 == 0:\n upgraded_ability = raw_input(\"Level \"+str(level)+\"!\\n Which two abilities would you like to upgrade? (Adds +1 to ability)\\n Please input two from str/dex/con/int/wis/cha with a space in between.\\n (ex: cha dex) \").split(' ')\n print\n #To write:\n #if either ability pushes ability score over 20, redo input\n\n \n for i in upgraded_ability:\n self.stealthUpdate(i,1)\n #class specific HP calculations\n if chosen_class == 'barbarian': \n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,12) + self.con + self.classMods[6]\n elif chosen_class == 'cleric':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,8) + self.con + self.classMods[6]\n elif chosen_class == 'druid':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,8) + self.con + self.classMods[6]\n elif chosen_class == 'fighter':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,10) + self.con + self.classMods[6]\n elif chosen_class == 'monk':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,8) + self.con + self.classMods[6]\n elif chosen_class == 'paladin':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,10) + self.con + self.classMods[6]\n elif chosen_class == 'ranger':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,10) + self.con + self.classMods[6]\n elif chosen_class == 'rogue':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,6) + self.con + self.classMods[6]\n elif chosen_class == 'wizard':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,6) + self.con + self.classMods[6]", "def update(self) -> None:\n state = int(self._light.is_on())\n self._state = bool(state)\n self._brightness = to_hass_level(state)", "def update(self, t=1):\n\n\t\t# Update acceleration based on available fuel & throttle\n\t\tthrottle = self.throttle if self.fuel >= 1 else self.throttle * self.fuel\n\t\tself.acceleration = self.thrust * throttle / self.total_mass - GRAVITY\n\n\t\tself.velocity += self.acceleration / t\n\t\tself.altitude += self.velocity / t\n\t\t\n\t\t# Collide with the ground\n\t\tif self.altitude <= 0:\n\t\t\tself.reset_motion()\n\n\t\t# Update remaining fuel\n\t\tif self.fuel * t >= self.throttle:\n\t\t\tself.fuel -= self.throttle * self.fuel_consumption / t\n\t\t\tif self.fuel < 0:\n\t\t\t\tself.fuel = 0", "def setLevel(self, level):\n self._autoLevelFunction = None\n level = float(level)\n if level != self._level:\n self._level = level\n self._updateScenePrimitive()\n self._updated(Item3DChangedType.ISO_LEVEL)", "def update(self, elapsed):\n delta = 35 * elapsed\n rel = self.behavior_system.robot.perception_system.get_releaser('threatening-stimulus-releaser')\n fear = self.behavior_system.robot.emotion_system.emotion_fear\n\n # TODO: incorporate fear emotion\n if rel.is_active() and self.behavior_system.robot.emotion_system.active_emotion == fear:\n self.activation_level = self.activation_level + delta\n else:\n self.activation_level = max(0, self.activation_level - delta)", "def increment_level(self):\n self.level += 1\n styled_set_label_text(self.level_display, \"Level: \"+str(self.level))\n glib.timeout_add(2000//(self.level+3), self.make_timer(self.level))", "def update(self):\n self.value = self.sensor.update()", "def update(self, elapsed):\n delta = 8 * elapsed\n rest = self.behavior_system.robot.drive_system.rest_drive\n\n if self.behavior_system.robot.drive_system.active_drive == rest:\n self.activation_level = self.activation_level + delta\n else:\n self.activation_level = max(0, self.activation_level - delta)", "def update_strength(self, strength):\n\n self.strength = strength\n self.right_leg.strength = strength\n self.finite_leg.strength = strength\n self.left_leg.strength = strength" ]
[ "0.8415923", "0.6688058", "0.66838443", "0.6623987", "0.6615976", "0.6509965", "0.6509965", "0.6485182", "0.64336383", "0.63165337", "0.6311251", "0.6284206", "0.62757164", "0.62296623", "0.6193649", "0.6104646", "0.6078069", "0.60302275", "0.60189354", "0.59883904", "0.5967881", "0.59541893", "0.59384996", "0.59264296", "0.59100693", "0.58398646", "0.583508", "0.5831489", "0.58262914", "0.58191395" ]
0.84342533
0
Add fuel to the tank.
def add_fuel(self, amount): if (self.fuel_level + amount <= self.fuel_capacity): self.fuel_level += amount print("Added fuel.") else: print("The tank won't hold that much.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_fuel(self, amount):\n if (self.fuel_level + amount <= self.fuel_capacity):\n self.fuel_level += amount\n print(\"Added fuel to \"+ self.make + \".\")\n else:\n print(\"The tank won't hold that much.\")", "def fill_tank(self):\r\n self.fuel_level = self.fuel_capacity", "def fill_tank(self):\n self.fuel_level = self.fuel_capacity\n print(\"Fuel tank is full.\")", "def fill_tank(self):\n self.fuel_level = self.fuel_capacity\n print(\"Fuel tank is full.\")", "def fill_tank(self):\r\n self.fuel_level = self.fuel_capacity\r\n print(\"Fuel tank is full\")", "def fill_up(self):\n self.fuel = self.gas_tank_size", "def _calculate_fuel(self):\n self._fuel = self._calculate_fuel_r(self._mass)", "def fill_tank(self):\n print(\"This car has no fuel tank!\")", "def add_to_water_level(self, amount):\n LandCell.add_to_water_level(self, amount)\n if self.water_level > 0:\n self.reset_food_level()", "def cargo_fuel(self, cargo_fuel):\n\n self._cargo_fuel = cargo_fuel", "def update_fuel_level(self, new_level):\n if new_level <= self.fuel_capacity:\n self.fuel_level = new_level\n else:\n print(\"The tank can't hold that much!\")", "def update_fuel_level(self, new_level):\r\n if new_level <= self.fuel_capacity:\r\n self.fuel_level = new_level\r\n else:\r\n print(\"The tank can't hold that much!\")", "def _add_bal(self):\n\n c = self.components\n p = self.pipes\n\n # TODO No mass flow reversal yet\n if self.temperature_driven:\n\n lines = self.params['lines'].v()\n\n self.block.mix_temp = Var(self.TIME, lines)\n\n def _temp_bal_incoming(b, t, l):\n\n incoming_comps = collections.defaultdict(list)\n incoming_pipes = collections.defaultdict(list)\n\n for name, comp in c.items():\n if value(comp.get_mflo(t)) >= 0:\n incoming_comps['supply'].append(name)\n else:\n incoming_comps['return'].append(name)\n\n for name, pipe in p.items():\n if value(pipe.get_edge_mflo(self.name, t)) >= 0:\n incoming_pipes['supply'].append(name)\n else:\n incoming_pipes['return'].append(name)\n # Zero mass flow rate:\n if value(\n sum(c[comp].get_mflo(t) for comp in incoming_comps[l]) + \\\n sum(p[pipe].get_edge_mflo(self.name, t) for pipe in\n incoming_pipes[l])) == 0:\n # mixed temperature is average of all joined pipes, actual value should not matter,\n # because packages in pipes of this time step will have zero size and components do not take over\n # mixed temperature in case there is no mass flow\n\n return b.mix_temp[t, l] == (\n sum(c[comp].get_temperature(t, l) for comp in c) +\n sum(p[pipe].get_temperature(self.name, t, l) for\n pipe in p)) / (\n len(p) + len(c))\n\n\n else: # mass flow rate through the node\n return (sum(\n c[comp].get_mflo(t) for comp in incoming_comps[l]) +\n sum(p[pipe].get_edge_mflo(self.name, t) for pipe in\n incoming_pipes[l])) * b.mix_temp[t, l] == \\\n sum(c[comp].get_mflo(t) * c[comp].get_temperature(t,\n l)\n for comp in incoming_comps[l]) + \\\n sum(p[pipe].get_edge_mflo(self.name, t) * p[\n pipe].get_edge_temperature(self.name, t, l)\n for pipe in incoming_pipes[l])\n\n self.block.def_mixed_temp = Constraint(self.TIME,\n lines,\n rule=_temp_bal_incoming)\n\n def _temp_bal_outgoing(b, t, l, comp):\n\n outgoing_comps = collections.defaultdict(list)\n outgoing_pipes = collections.defaultdict(list)\n\n for name, comp_obj in c.items():\n if comp_obj.get_mflo(t) >= 0:\n outgoing_comps['return'].append(name)\n else:\n outgoing_comps['supply'].append(name)\n\n for name, pipe_obj in p.items():\n if pipe_obj.get_edge_mflo(self.name, t) >= 0:\n outgoing_pipes['return'].append(name)\n else:\n outgoing_pipes['supply'].append(name)\n\n if t == 0:\n return Constraint.Skip\n if comp in outgoing_pipes[l]:\n return p[comp].get_edge_temperature(self.name, t, l) == \\\n b.mix_temp[t, l]\n elif comp in outgoing_comps[l]:\n return c[comp].get_temperature(t, l) == b.mix_temp[t, l]\n else:\n return Constraint.Skip\n\n self.block.outgoing_temp_comps = Constraint(self.TIME,\n lines,\n c.keys(),\n rule=_temp_bal_outgoing)\n self.block.outgoing_temp_pipes = Constraint(self.TIME,\n lines,\n p.keys(),\n rule=_temp_bal_outgoing)\n\n elif self.repr_days is None:\n\n def _heat_bal(b, t):\n return 0 == sum(\n self.components[i].get_heat(t) for i in self.components) \\\n + sum(\n pipe.get_edge_heat(self.name, t) for pipe in p.values())\n\n self.block.ineq_heat_bal = Constraint(self.TIME,\n rule=_heat_bal)\n\n def _mass_bal(b, t):\n return 0 == sum(\n self.components[i].get_mflo(t) for i in self.components) \\\n + sum(\n pipe.get_edge_mflo(self.name, t) for pipe in p.values())\n\n self.block.ineq_mass_bal = Constraint(self.TIME,\n rule=_mass_bal)\n\n else:\n def _heat_bal(b, t, c):\n return 0 == sum(\n self.components[i].get_heat(t, c) for i in\n self.components) \\\n + sum(\n pipe.get_edge_heat(self.name, t, c) for pipe in p.values())\n\n self.block.ineq_heat_bal = Constraint(self.TIME, self.REPR_DAYS,\n rule=_heat_bal)\n\n def _mass_bal(b, t, c):\n return 0 == sum(\n self.components[i].get_mflo(t, c) for i in\n self.components) \\\n + sum(\n pipe.get_edge_mflo(self.name, t, c) for pipe in p.values())\n\n self.block.ineq_mass_bal = Constraint(self.TIME, self.REPR_DAYS,\n rule=_mass_bal)", "def fuel_meter(self, fuel):\n fuel = min(100, fuel) # For the sake of meter, cap it\n # Add a block for each 10 % of life remaining\n block_count = int(math.ceil(fuel/10.0))\n\n images = self.world.assets.images # save typing\n frame, frame_size = images['meter_frame']\n surf = frame.copy() # Don't blit onto original frame\n block, block_size = images['health_block']\n # Adjust color for fuel meter\n block = block.copy()\n block.fill((150, 200, 50), special_flags=BLEND_RGB_MULT)\n for i in range(block_count):\n y = frame_size[1] - 4 - block_size[1] - (i * (block_size[1] - 2))\n surf.blit(block, (4, y))\n pos = (720, 100)\n return surf, pos", "def add_fuel(self, filename):\n with open(filename) as f:\n content = f.readlines()\n content = [x.strip() for x in content]\n\n y = 0\n for line in content:\n x_max = min(self.width - 1, len(line))\n for x in range(x_max):\n self.fuel[x][y] = int(ord(line[x]))\n\n y += 1\n if y >= self.height - 1:\n break", "def add_to_water_level(self, amount):\n self.water_level += amount\n if self.water_level < 0:\n self.water_level = 0.0", "def add_food_to_bag(self):\n self.food_eaten.set(sum([species.food.get() for species in self.ecosystem]))", "def add_to_frontier(self, path):\n if self.method == \"astar\":\n value = path.cost + self.problem.heuristic(path.end())\n if self.method == \"best\":\n value = self.problem.heuristic(path.end())\n if self.method == \"least-cost\":\n value = path.cost\n self.frontier.add(path, value)", "def add_tank(self, name, elevation=0.0, init_level=3.048,\n min_level=0.0, max_level=6.096, diameter=15.24,\n min_vol=0.0, vol_curve=None, overflow=False, coordinates=None):\n self._node_reg.add_tank(name, elevation, init_level, min_level, \n max_level, diameter, min_vol, vol_curve, \n overflow, coordinates)", "def add_fleet(self, index, *args, **kw):\n\n fleetid = self.fleets.append(ListNode(\"{0!s}\".format(kw.get(\"name\", \"Fleet {0:d}\".format(index))), [\n ListNode(\"Nodes\"),\n ListNode(\"Behaviours\", data=kw.get(\n \"behaviours\", self.defaults[2].get_data()))\n ])\n )\n for i in range(kw.get(\"nodes\", 1)):\n self.add_node(fleetid)", "def add_to_frontier(self,path):\n value = path.cost+self.problem.heuristic(path.end())\n self.frontier.add(path, value)", "def fill_gas_tank(self):\n print(\"Filling the tank for\", self.get_descriptive_name())", "def add(self, component):\n\n self.append(component)\n self.sum.Add(component.th1f)", "def add_food(self, _food):\n self.food.append(_food)", "def fill_gas_tank(self):\n print(\"\\nThis car need a gas tank!\")", "def addToPot(self, amount, index):\n\t\tself.pots[index] = self.pots[index] + amount", "def _calculate_fuel_simple(self):\n self._fuel_simple = (self.mass // 3) - 2", "def add_gear_piece(self):\n self.__num_gear_collected += 1", "def add_health(self, ammount: int):\n # A cool trick to do this fast\n self.current_health = min(\n self.current_health+ammount,\n self.total_health\n )", "def addItem(self, item):\n self.items.append(item)\n self.totalWeight += item" ]
[ "0.78126687", "0.70024645", "0.6612156", "0.6612156", "0.65950745", "0.6408894", "0.62886226", "0.6250818", "0.6184884", "0.6095201", "0.6018097", "0.60175323", "0.59844637", "0.5947288", "0.5943111", "0.5851397", "0.5788474", "0.5713844", "0.56987673", "0.56959385", "0.5613319", "0.5520372", "0.5514927", "0.54907614", "0.5473135", "0.5384396", "0.5370947", "0.5370579", "0.5363341", "0.53438836" ]
0.7887067
0
Fully charge the vehicle.
def charge(self): self.battery.charge_level = 100 print("The vehicle is fully charged.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def recharge(self):\n self.battery=self.full\n return self", "def charge(self):\r\n\r\n self.charge_level = 100\r\n print(\"The battery is fully charged.\")", "def charge(self):\r\n\r\n self.charge_level = 100\r\n print(\"The battery is fully charged.\")", "def update(self):\r\n super().update()\r\n if self.stopped and self.charge < CHARGE_MAX:\r\n self.charge += 1", "def chargeBatteries(self):\n self.currentBattery = self.maxBattery", "def drive(self, kilometres_driven):\n self.fuel -= (self.litres_per_kilometre * kilometres_driven)", "async def async_return_to_base(self, **kwargs: Any) -> None:\n await self._vacuum_bot.execute_command(Charge())", "def fill_up(self):\n self.fuel = self.gas_tank_size", "def charge(self, charge):\n self._charge = charge", "def update_capacity(self, curr_velocity, time, angle):\n gained_energy = self.recharge_rate * time # KWh \n \n energy = self.motor_power(curr_velocity, angle) * time\n\n self.current_capacity += gained_energy - energy", "def charge(self, charge):\n\n self._charge = charge", "def charge(self):\n\t\tfor l, loan in enumerate(self.loans):\n\t\t\tpayment_value = loan.borrower._pay(loan.value)\n\t\t\tloan.value -= payment_value\n\t\t\tif loan.value <= 0.0:\n\t\t\t\tloan.borrower.debt_link = None\n\t\t\t\tdel self.loans[l]\n\t\t\tself.stock += payment_value", "def ComputerFinalStateOfCharge(self):\r\n pass", "def partial_charge(self, params):\n return self.post(f\"{self.gateway_path}/partial_debit\", params)", "def wait(self, cycles):\n\t\tself.planet.tiles[self.y][self.x].set_occupant() # set occupant to the initial tile\n\t\tif self.planet.tiles[self.y][self.x].is_shaded: # in 'plain' the rover will recharge itself\n\t\t\ti = 0\n\t\t\twhile i < int(cycles):\n\t\t\t\tif self.battery < 100:\n\t\t\t\t\tself.battery += 1\n\t\t\t\ti += 1", "def charge(self):\n return self._charge", "def recharge(self, amount):\n self.action.recharge(self.cardUid, amount)\n self.start()", "def purge(self):\n self.remaining = 0", "def fill_tank(self):\r\n self.fuel_level = self.fuel_capacity\r\n print(\"Fuel tank is full\")", "def fill_tank(self):\n self.fuel_level = self.fuel_capacity\n print(\"Fuel tank is full.\")", "def fill_tank(self):\n self.fuel_level = self.fuel_capacity\n print(\"Fuel tank is full.\")", "def accelerate_or_deccelerate(self, car):\n if random.random() < .1:\n car.deccelerate()\n else:\n car.accelerate()", "def charge(self, power, time=1):\n \n requested_charge = round((float(power)*float(time)) * self.efficiency, 2)\n requested_state = round(self.state + requested_charge, 2)\n \n ## Checking Constraints\n assert power <= self.power, f'Power requested, {power}, is greater than possible power: {self.power}'\n assert power > 0, f'Power requested for charge, {power}, is negative'\n assert requested_state <= self.capacity, f'Charge requested, {requested_charge}, would leave the battery state, {self.state}, above capacity ({self.capacity}): {requested_state}'\n \n ## Charging\n self.state = requested_state", "def miner_begin_charging(self, player): \n self.ai_state = AI_CHARGING\n self.ai_counter = 30\n if self.direction_id == 'left': self.xvel = -1*self.max_speed\n elif self.direction_id == 'right': self.xvel = self.max_speed", "def brake(self):\n\n self.log_speed()\n while self.speed_mph > 0:\n time.sleep(1)\n if (self.speed_mph - self.deceleration_rate) < 0:\n self.speed_mph = 0\n self.log_speed()\n break\n else:\n self.speed_mph -= self.deceleration_rate\n self.log_speed()", "def _charge_user(self):\n self.set_banner_with_timeout(\"Charging...\", 0, Colours.INFO, None)\n \n self.owner.charge_all(self._charge_user_callback)\n self._request_redraw()\n return self.states.CHARGING", "def spendFuelToSurvive(self):\n fuelNeeded = self.getLightUpkeep()\n woodNeeded = math.ceil(\n fuelNeeded / self.configs[\"parameters\"][\"RESOURCE_TO_FUEL_RATE\"][\"WOOD\"]\n )\n woodUsed = min(self.cargo[\"wood\"], woodNeeded)\n fuelNeeded -= woodUsed * self.configs[\"parameters\"][\"RESOURCE_TO_FUEL_RATE\"][\"WOOD\"]\n self.cargo[\"wood\"] -= woodUsed\n if fuelNeeded <= 0:\n return True\n\n coalNeeded = math.ceil(\n fuelNeeded / self.configs[\"parameters\"][\"RESOURCE_TO_FUEL_RATE\"][\"COAL\"]\n )\n coalUsed = min(self.cargo[\"coal\"], coalNeeded)\n fuelNeeded -= coalUsed * self.configs[\"parameters\"][\"RESOURCE_TO_FUEL_RATE\"][\"COAL\"]\n self.cargo[\"coal\"] -= coalUsed\n\n if fuelNeeded <= 0:\n return True\n\n uraniumNeeded = math.ceil(\n fuelNeeded / self.configs[\"parameters\"][\"RESOURCE_TO_FUEL_RATE\"][\"URANIUM\"]\n )\n uraniumUsed = min(self.cargo[\"uranium\"], uraniumNeeded)\n fuelNeeded -= uraniumUsed * self.configs[\"parameters\"][\"RESOURCE_TO_FUEL_RATE\"][\"URANIUM\"]\n self.cargo[\"uranium\"] -= uraniumUsed\n\n if fuelNeeded <= 0:\n return True\n\n return fuelNeeded <= 0", "def charge(self,price):\n\n if price + self._balance> self._limit:\n return False\n else:\n self._balance+=price\n return True", "def Charge(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "async def run_capcacity_control(self):\n # remove all customers which will order nothing, and any sit_together customers\n\n # remove members from end of end of line( waiting the least amount of time ) \n\n try:\n capacity = self.restaurant.line.line_number\n number_to_remove = int(capacity * .25)\n self.log.warning(f\"{self} capacity control started to remove {number_to_remove} customers\")\n queue = self.waiting_list['queue'].copy()\n for _ in range(number_to_remove):\n if len(self.waiting_list['none']) > 0:\n for customer in self.waiting_list['none']:\n await self.remove_customer_from_line(customer)\n number_to_remove-=1\n queue = self.waiting_list['queue'].copy()\n if number_to_remove == 0:\n break\n try:\n customer = queue.pop()\n await self.remove_customer_from_line(customer)\n except IndexError:\n break\n\n self.log.warning(f\"{self} capacity control completed\")\n except Exception as e:\n self.log.warning(f\"{self} error during capacity control\")" ]
[ "0.71078575", "0.6817597", "0.6817597", "0.62536246", "0.6069697", "0.6024664", "0.5913083", "0.59018713", "0.5884432", "0.5862029", "0.58491033", "0.58241874", "0.5808281", "0.57654655", "0.57301015", "0.56985885", "0.5672971", "0.56674844", "0.56599915", "0.5631653", "0.5631653", "0.54995185", "0.54777", "0.54633176", "0.5463271", "0.5420981", "0.5417564", "0.54138094", "0.5406196", "0.54024404" ]
0.7852408
0
find nearest word from target that satisfy condition
def word_nearest(word_list, target, condition = None, consider_phase = True): if not condition: condition = lambda t: True min_distance = 100 min_word = None def word_distance(word1, word2): position1 = word1.position position2 = word2.position distance = [a-b for a, b in zip(position1, position2)] return np.sum(np.abs(distance)) if isinstance(word_list, Word): word_list = [word_list] elif isinstance(word_list, list): #word_list = word_list pass else: print (word_list) raise TypeError() for word in word_list: phase = word.phase for word_compare in target: if not condition(word_compare): continue elif consider_phase and phase - word_compare.phase: continue distance = word_distance(word, word_compare) #print (word_compare, distance) if min_distance > distance: min_distance = distance min_word = word_compare elif min_distance == distance: pass # should be revised return min_word
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def closest_word_to(word, some_words):\n closest = ''\n distance = len(word)\n for target in some_words:\n this_distance = len(set(target) - set(word))\n if this_distance < distance:\n distance = this_distance\n closest = target\n return closest", "def get_closest(target_word: str, word_to_idx: Dict, embeddings: torch.Tensor, n: int = 5) -> List[Tuple[str, torch.Tensor]]:\n\n # Calculate distances to all other words\n\n word_embedding = embeddings[word_to_idx[target_word.lower()]]\n distances = []\n for word, index in word_to_idx.items():\n if word == \"<MASK>\" or word == target_word:\n continue\n distances.append((word, torch.dist(word_embedding, embeddings[index])))\n\n results = sorted(distances, key=lambda x: x[1])[1:n + 2]\n return results", "def get_closest_levenshtein(word, possible_words, threshold):\n result = None\n min_distance = 10\n for possible_word in possible_words:\n word_distance = distance(word, possible_word)\n if word_distance < min_distance:\n result = possible_word\n min_distance = word_distance\n result = result if min_distance < threshold else None\n return result, min_distance", "def closest(text, database):\n from fuzzywuzzy import process\n\n # Check if an exact match exists\n if database.find(text):\n return text\n\n # Get the closest matching statement from the database\n return process.extract(text, database.keys(), limit=1)[0][0]", "def replace_nearest(word): \n nearest = spellcheck.correction(word)\n #When there is no valid word, the nearest word\n #is the same as the original\n if word == nearest:\n #This implies we need to try splitting it\n return split_word(word)\n return nearest", "def translate_nearest_neighbour(self, source_vector):\n similarity_vector = np.matmul(FastVector.normalised(self.embed), source_vector)\n target_id = np.argmax(similarity_vector)\n return self.id2word[target_id]", "def closest_words(self, word, n):\n\n vector = self.get_vector(word)\n\n if vector is None:\n return None\n\n distances = [\n (w, torch.dist(vector, self.get_vector(w)).item())\n for w in self.glove.itos\n ]\n\n return [w for w, v in sorted(distances, key=lambda w: w[1])[:n]]", "def min_dist_solution(self, words, keyword_dict = None):\r\n\r\n\t\t# default settings\r\n\t\tif keyword_dict == None:\r\n\t\t\tkeyword_dict = self.keyword_dict\r\n\r\n\t\tindexed_text = list(enumerate(words))\r\n\t\t# all found keyword positions\r\n\t\tkeyword_pos = []\r\n\t\tkw_counts = [(len(kw.split()),kw) for kw in keyword_dict]\r\n\t\tkw_length_set = set((l[0] for l in kw_counts))\r\n\t\t\r\n\t\t# seperate keywords by their length\r\n\t\tfor length in kw_length_set:\r\n\t\t\tkw_lgram = ngrams(indexed_text, length)\r\n\t\t\t# start, end, ngram token\r\n\t\t\tkw_lgram_text = [(g[0][0],g[-1][0],' '.join([token[1] for token in g])) \r\n\t\t\t\t\t\t\t for g in kw_lgram]\r\n\t\t\tfixed_length_kw = [kw[1] for kw in kw_counts if kw[0] == length]\r\n\t\t\t\r\n\t\t\tfixed_keyword_pos = [(kw_s,kw_e,token) for kw_s,kw_e,token in kw_lgram_text\r\n\t\t\t \t\t\t\t\t if token in fixed_length_kw]\r\n\t\t\tkeyword_pos += fixed_keyword_pos\r\n\t\t# all found distances\r\n\t\tdistances = []\r\n\t\tfor kw_s,kw_e,kw in keyword_pos:\r\n\t\t\tdistance = keyword_dict[kw]['distance']\r\n\t\t\t# TODO handle case when value we search for is consisted of multiple words\r\n\t\t\tregex_pattern = keyword_dict[kw]['regex']\r\n\t\t\tsearch_direction = keyword_dict[kw]['search_direction']\r\n\t\t\t# start of the block\r\n\t\t\tstart = kw_s - distance if kw_s-distance > 0 else 0\r\n\t\t\t# end of the block\r\n\t\t\tend = kw_e + distance\r\n\t\t\tif search_direction == 'right':\r\n\t\t\t\tsearchable_block = indexed_text[kw_e:end]\r\n\t\t\telif search_direction == 'left':\r\n\t\t\t\tsearchable_block = indexed_text[start:kw_s]\r\n\t\t\telif search_direction == 'both':\r\n\t\t\t\tsearchable_block = indexed_text[start:end]\r\n\t\t\telse:\r\n\t\t\t\t# mb hanlde search_direction value\r\n\t\t\t\tsearchable_block = []\r\n\t\t\t\r\n\t\t\tvalue_pos = [index for index,value in searchable_block\r\n\t\t\t\t\t\t if re.search(regex_pattern,value)]\r\n\t\t\tdistance = [(self.dist(vp,kw_s,kw_e),vp,kw) for vp in value_pos]\r\n\r\n\t\t\tdistances += distance\r\n\t\tif len(distances) == 0:\r\n\t\t\treturn ('not found', None,'no kw')\r\n\t\telse:\r\n\t\t\tmin_distance,found_target_pos,kw = min(distances)\r\n\t\t\treturn words[found_target_pos],found_target_pos,kw", "def closest_phrase(phrases, possibility):\n\n return sorted(phrases, key=lambda w: abs(possibility-w.possibility))[0]", "def suggest(word, cutoff=0.77):\n if word in LOOKUP_TABLE:\n return LOOKUP_TABLE[word]\n\n guess = difflib.get_close_matches(word, MOST_COMMON_DOMAINS, n=1, cutoff=cutoff)\n if guess and len(guess) > 0:\n return guess[0]\n return word", "def find_word(target):\n results = []\n string = \"\"\n\n for a in range(0, len(grid)):\n for b in range(0, len(grid[a])):\n # Create strings on rows in the grid.\n string += grid[a][b]\n # Is the target is his string?\n if target in string:\n # Find the target by index in the string.\n index = string.index(target)\n # The target string was found at the row and index.\n results += [(a, index)]\n string = \"\"\n\n for b in range(0, len(grid[0])):\n for a in range(0, len(grid)):\n # Create strings based on the columns of the grid.\n string += grid[a][b]\n # Is the target in this string?\n if target in string:\n # Find the target by index in the string.\n index = string.index(target)\n # The target string was found at the index and column.\n results += [(index, b)]\n string = \"\"\n\n return results", "def nearest_words(embedding, voc_size, word, wint, intw, n_words=10):\n similar_words = {}\n word_embed = embedding(torch.LongTensor([wint[word]]))\n for i in range(voc_size):\n emb = embedding(torch.LongTensor([i]))\n cos_sim = F.cosine_similarity(emb, word_embed)\n if len(similar_words) < n_words:\n similar_words[float(cos_sim)] = intw[i]\n else:\n if cos_sim > min(similar_words):\n min_key = min(similar_words)\n del similar_words[min_key]\n similar_words[float(cos_sim)] = intw[i]\n else:\n pass\n # Ordering dict based on the value of the cosine similarity\n return sorted(similar_words.items())[::-1]", "def nearest_neighbors(self, word, dictionary):\n vectors = self.word_embeds.weight.data.cpu().numpy()\n index = dictionary.token2id[word]\n query = vectors[index]\n\n ranks = vectors.dot(query).squeeze()\n denom = query.T.dot(query).squeeze()\n denom = denom * np.sum(vectors ** 2, 1)\n denom = np.sqrt(denom)\n ranks = ranks / denom\n mostSimilar = []\n [mostSimilar.append(idx) for idx in ranks.argsort()[::-1]]\n nearest_neighbors = mostSimilar[:10]\n nearest_neighbors = [dictionary[comp] for comp in nearest_neighbors]\n\n return nearest_neighbors", "def search_sentence(target, sentences, tags, distance_evaluator=DistanceEvaluators.JACCARD):\n tag_id = 'VOID'\n best_sentence = ''\n best_distance = float('Infinity')\n x = list(zip(sentences, tags))\n for sentence, tag in zip(sentences, tags):\n #print(sentence)\n #print(\"\\n\\n\\n\" + tag)\n distance = distance_evaluator(sentence, target)\n if distance < best_distance:\n tag_id = tag\n best_sentence = sentence\n best_distance = distance\n return tag_id, best_sentence, best_distance", "def suggested_search(search_text):\n threshold = 0.6\n global model\n\n search_text = remove_stop_words(search_text)\n tmp_search = search_text.split()\n\n new_search = []\n for word in tmp_search:\n similar_words = get_similar_words(model, word)\n new_search = select_top_words(similar_words, new_search, threshold)\n\n new_search = list(set(new_search))\n new_search = ' '.join(new_search)\n\n return new_search + ' ' + search_text", "def get_fuzzy_match(object, answer, threshold=80):\n answer_phrase = generate_ngrams(answer)\n if answer_phrase:\n best_match = [fuzz.ratio(object, phr) for phr in answer_phrase]\n if np.max(best_match)>threshold:\n return np.max(best_match), answer_phrase[np.argmax(best_match)]\n else:\n return 0,''\n else:\n return 0, ''", "def get_best_candidate(word, ngram_dict, threshold=0.8):\n candidates = []\n w_l = len(word)\n freq = ngram_dict[word] if word in ngram_dict else 0.0\n if w_l >= 5:\n for uniq_word in ngram_dict:\n edit_dist = 0\n if word != uniq_word:\n edit_dist = Levenshtein.distance(word, uniq_word)\n levenshtein_ratio = 1.0 - edit_dist / w_l\n if levenshtein_ratio >= threshold:\n candidates.append([uniq_word, ngram_dict[uniq_word], edit_dist])\n else:\n candidates.append([word, ngram_dict[word], edit_dist])\n\n if len(candidates) == 0:\n return word, freq, 0\n\n candidates = sorted(candidates, key=lambda item: item[1], reverse=True)\n return candidates[0]\n else:\n return word, freq, 0", "def find_closest(self, query, n=10):\n\n idx = self.word_idx.get(query, None)\n # Handle case where query is not in vocab\n if idx is None:\n logging.info(f'{query} not found in vocab.')\n return\n else:\n vec = self.embedding_matrix[idx]\n # Handle case where word doesn't have an embedding\n if np.all(vec == 0):\n logging.info(f'{query} has no pre-trained embedding.')\n return\n else:\n # Calculate distance between vector and all others\n dists = np.dot(self.embedding_matrix, vec)\n\n # Sort indexes in reverse order\n idxs = np.argsort(dists)[::-1][:n]\n sorted_dists = dists[idxs]\n closest = [self.idx_word[i] for i in idxs]\n\n logging.info(f'Query: {query}\\n')\n max_len = max([len(i) for i in closest])\n # Print out the word and cosine distances\n for word, dist in zip(closest, sorted_dists):\n logging.info(f'Word: {word:15} Cosine Similarity: {round(dist, 4)}')", "def correct_word(word, cutoff):\n if WORDS is not None:\n result = difflib.get_close_matches(word, WORDS, n=1, cutoff=cutoff)\n if len(result) > 0:\n return result[0]\n\n return word", "def find_word2(target):\n results = []\n\n # Traverse the grid by row\n for column in range(0, len(grid)):\n # Create a string with the characters in the row.\n row = \"\".join(grid[column])\n # Is the target in the row?\n if target in row:\n # Find the target by index in the row.\n index = row.index(target)\n results += [(column, index)]\n # Transform the grid 90 degrees with the zip(*) method.\n row = 0\n for row2 in zip(*grid):\n # Create a string with the characters in the column.\n col2 = \"\".join(row2)\n # Is the target in the column?\n if target in col2:\n # Find the target by index in the column\n index = col2.index(target)\n results += [(index, row)]\n row += 1\n\n return results", "def closest_match(desired_language: {str, Language}, supported_languages: list,\n max_distance: int=25) -> (str, int):\n # Quickly return if the desired language is directly supported\n if desired_language in supported_languages:\n return desired_language, 0\n\n # Reduce the desired language to a standard form that could also match\n desired_language = standardize_tag(desired_language)\n if desired_language in supported_languages:\n return desired_language, 0\n\n match_distances = [\n (supported, tag_distance(desired_language, supported))\n for supported in supported_languages\n ]\n match_distances = [\n (supported, distance) for (supported, distance) in match_distances\n if distance <= max_distance\n ] + [('und', 1000)]\n\n match_distances.sort(key=itemgetter(1))\n return match_distances[0]", "def get_closest(occurences, content, k=25):\n result = []\n o = occurences[0] #get first\n for idx in o:\n res = 0\n for i in range(1, len(occurences)): #other than first\n oo = occurences[i]\n where = bisect_left(oo, idx)\n #try both, after and before the binary searched index (if exists)\n try:\n res += min(abs(oo[min(len(oo)-1, where+1)]-idx), abs(oo[min(len(oo)-1, where)]-idx))\n except:\n print(\"Something went wrong here\")\n result.append((res, idx))\n \n #if res < best:\n # best = res\n # best_where = (idx)\n\n result = sorted(result)\n final = []\n \n for score, i in tqdm(result):\n f = False\n for ii in final:\n if abs(i-ii) <= 200:\n f = True\n break\n if not f:\n final.append(i)\n\n paragraphs = []\n\n for idx in final:\n paragraphs.append(get_para(content, idx))\n\n print(\"DONE HERE\")\n return paragraphs[:k]", "def compare_tokenized(repairmsg, targetmsg):\n nearest = -1\n\n # Cartesian product of tokens in either msg\n token_pairs = product(repairmsg.tokenized(), targetmsg.tokenized())\n\n # Reduce to unique pairs of inequivalent tokens\n # This used to change order of r and t but now we keep them...\n tokens = list(set((r, t)# if r < t else (t, r)\n for r, t in token_pairs\n if r != t))\n\n # Get minimum positive dist (distances <0 are edit distances above the\n # short-circuit threshold of our Levenshtein distance function).\n distances = [compare_token(r, t) for r, t in tokens]\n\n outputs = [(repair, target, dist)\n for ((repair, target), dist) in zip(tokens, distances)\n if dist > 0]\n if not outputs:\n return -1\n\n # min() will return the tuple with lowest dist\n repair, target, dist = min(outputs, key=lambda tup: tup[2]) # [2] is dist\n return repair, target, dist", "def search(word):\n try:\n words = list_every_word(file_name)\n if len(words) > 20000:\n print(\"This might take a while.\")\n except IOError:\n print(\"This file doesn't exist... Are you sure you defined a valid filename? Use 'file <your filename>'\")\n except:\n print(\"An undefined error occured\")\n if dictionnary == False: \n print(\"You forgot to switch to dictionnary mode. Just use 'dictionnary'\")\n return\n else:\n try:\n ld = smallest_ld(word,words) \n print(\"The closest word found in the file is: {0}\".format(ld[0][1]))\n return ld[0][1]\n except:\n print(\"An unexpected error occured, be sure to have valid input in your file\")\n return", "def find_nearest_repetition(paragraph):\n word_to_latest_index, nearest_repeated_distance = {}, float('inf')\n for i, word in enumerate(paragraph):\n if word in word_to_latest_index:\n latest_equal_word = word_to_latest_index[word]\n nearest_repeated_distance = min(nearest_repeated_distance, i-latest_equal_word)\n word_to_latest_index[word] = i\n return nearest_repeated_distance if nearest_repeated_distance != float('inf') else -1", "def goal(target, prediction):\n return closest_point_on_segment(prediction, target)", "def findNextWordForSpellcheck(text, startPos, wikiPage):\r\n return (None, None, None)", "def search(self, word):", "def select_lower_edit_distance(ref_word, word_list):\n word_dict = {word: edit_distance(ref_word, word) for word in word_list}\n min_dist = min(word_dict.values())\n\n return [word for word, dist in word_dict.items() if dist == min_dist]", "def fetch_most_similar_answer(self, text, unclean=True, threshold=.25):\n if is_greeting(text):\n return random.choice(GREETING_RESPONSES)\n if unclean:\n inp_embedded = aggregate_embeddings(self.clean_then_embed(text)).reshape(-1, 1)\n comparison_dict = {k: spatial.distance.cosine(v.reshape(-1, 1), inp_embedded) for k, v in\n self.key.items()}\n # dont get lost, just calculating the cosine similarity between this and every answer in the corpus #\n\n if max(comparison_dict.items(), key=operator.itemgetter(1))[1] < threshold:\n return \"I apologize, I don't understand\"\n return max(comparison_dict.items(), key=operator.itemgetter(1))[0]" ]
[ "0.73988307", "0.7295452", "0.727856", "0.6958168", "0.69574374", "0.66173923", "0.6592807", "0.65599316", "0.650897", "0.6483398", "0.64312065", "0.6338591", "0.6320641", "0.62936354", "0.62920094", "0.623869", "0.6105924", "0.610165", "0.6100946", "0.60979575", "0.60804534", "0.60767555", "0.6043818", "0.60334355", "0.6015742", "0.59862775", "0.5946549", "0.59462327", "0.59434634", "0.58984286" ]
0.8085685
0
Update keyword_mini in keyword_list
def update_keyword_pack(keyword_list, keyword_mini): #print (keyword_mini) keyword_pack = Get(keyword_mini, -1) def update_weight(prop_tuple, weight, pack=keyword_pack): if prop_tuple in pack: pack[prop_tuple] += weight else: prop, sub_prop, prop_type = prop_tuple weight_before = pack.get((prop, None, prop_type), 0) pack[prop_tuple] = weight + weight_before for prop, sub_prop, prop_type, weight in keyword_list: prop_tuple = (prop, sub_prop, prop_type) if not sub_prop: # sub_prop == None for pack in keyword_mini: if not pack and pack is not keyword_pack: # pack is empty and it is not last_term continue for other_prop in pack: if other_prop[0] == prop and other_prop[1]: # Update before keyword (sub_prop != None) update_weight(other_prop, weight, pack) update_weight(prop_tuple, weight, keyword_pack) #print ('ahah', keyword_list, keyword_mini) return keyword_mini
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Adjust_Keyword_List( self ):\r\n listing = list( self.system.Get_Term_List( ) ) #get the term list of the current profile\r\n\r\n d=ExpressionAdjust.ExpressionAdjuster( self.root, listing, 'Keywords' )\r\n if(d.return_state==0):\r\n return #Cancel hit\r\n self.system.Set_Term_List( d.profile_list )\r\n self.system.Apply_Profile_Term_List_2()", "def _update_key_set(self):\n self._key_set = set([item.keyword for item in self._metadata])", "def SetKeyWords(self, kw_lst):\n # Parse Keyword Settings List simply ignoring bad values and badly\n # formed lists\n self._code['keywords'] = list()\n kwlist = \"\"\n for keyw in kw_lst:\n if len(keyw) != 2:\n continue\n else:\n if not isinstance(keyw[0], int) or \\\n not isinstance(keyw[1], basestring):\n continue\n else:\n kwlist += keyw[1]\n super(EditraBaseStc, self).SetKeyWords(keyw[0], keyw[1])\n\n # Can't have ? in scintilla autocomp list unless specifying an image\n # TODO: this should be handled by the autocomp service\n if '?' in kwlist:\n kwlist.replace('?', '')\n\n kwlist = kwlist.split() # Split into a list of words\n kwlist = list(set(kwlist)) # Remove duplicates from the list\n kwlist.sort() # Sort into alphabetical order\n\n self._code['keywords'] = kwlist", "def apply_keyword_to_fields_list(self, metadata_list):\n self._basket.apply_keyword_to_fields_list(metadata_list)", "def set_keywords(self):\n\n if len(self.get_keywords()) == 0 and len(self.get_files()) > 0:\n self.keywords = self.files[0].get_parent()[\"title\"].split(\" \")\n for keyword in self.keywords:\n if str(keyword) in str(self.text):\n self.keywords = []", "def update_dictionary_entries(word_list, the_dict):\n\tfor word in word_list:\n\t\tthe_dict[word] = True\n\treturn the_dict", "def add_keyword(self, new_word):\n\n self.keywords.append(new_word)\n words = Keywords()\n words.add(new_word)\n mongo.db.users.update({\"name\": self.username},\n {\"$set\": {\"keywords\": self.keywords}})", "def update_knowledge(self):\n pass", "def recommend_by_keywords(self, key_words_list=None):\n pass", "def add_user_keywords(words, uid):\n profile = db.Profile.get(uid)\n for word in words:\n profile.keywords[word] = 100.0\n\n db.session.commit()", "def doKeywords(self, kVector, kw):\n kw = self.opts.kwModified(kVector, kw)\n for thisDict in (self.kw, self.plotKeywords):\n for name in thisDict:\n if name not in kw:\n kw[name] = thisDict[name]\n return kw", "def setKeywords(self,value):\n self.PDFreactorConfiguration.in1[\"keywords\"] = value", "def add_keyword(self,\r\n index,\r\n keywords):\r\n\r\n if isinstance(keywords, str):\r\n keywords = {keywords}\r\n\r\n self.edit(index,\r\n self.get_keys_from_note(index).union(keywords),\r\n self.get_text_from_note(index))", "def add_kw(obj,kw):\n\tif kw:\n\t\tkw = kw.replace(';',',')\n\t\tkw = kw.split(',')\n\t\tfor k in kw:\n\t\t\tk=k.lstrip()\n\t\t\tk=k.rstrip()\n\t\t\tK = KW.objects.filter(nm_kw__iexact=k)\n\t\t\tif K:\n\t\t\t\tobj.KW.add(K[0])\n\t\t\telse:\n\t\t\t\tkk = KW(nm_kw = k)\n\t\t\t\tkk.save()\n\t\t\t\tobj.KW.add(kk)\n\t\tobj.save()", "def update_list(*args):\n\n search_term = search_var.get()\n all_anime = load(open(Save_file_dir.joinpath(\"anime_save.p\"), \"rb\"))\n\n all_anime_list = []\n for key, value in all_anime.items():\n all_anime_list.append(key)\n\n libox_all_anime.delete(0, END)\n\n for item in all_anime_list:\n if search_term.lower() in item.lower():\n libox_all_anime.insert(END, item)", "def addkeyword(self, line):\n self.__keywords.append(line)", "def update_follow_set(model: Dict[str, Set[str]], word: str, follow_word: str) -> None:\n if word not in model:\n model[word] = {follow_word}\n\n else:\n model[word].add(follow_word)", "def add_keyword(x):\n ParsingTmp.keywords.append(x)", "def filter_keywords(self, keywords):\n\t\tself.keywords += self._coerce_list(keywords)", "def SetupKeywords(self):\n kwlist = u\" \".join(self._keywords)\n self.SetKeyWords(0, kwlist)", "def addkeywords(self, keywords):\n if isinstance(keywords, str):\n keywords = [keywords]\n self._kw.extend(keywords)", "def set_document_keyword(self, document_keyword):\n self.set_value_into_input_field(self.keywords_text_field_locator, document_keyword)", "def keywords(self, keywords):\n self._keywords = keywords", "def update_keywords(self, keyword, where=None):\n rowcount = 0\n if keyword is not None:\n self.update_generic_data(keyword, TABLE_NAME_KW, where)\n # done\n return rowcount", "def synonyms_keyword(self, p_keyword):\n pass", "def _add_better_search_words(self):\n for kw in self.better_search_kw:\n self.search_query += kw", "def update(self,haiku, typenum):\n self.occurrences += 1\n for i in range(2):\n for x in (haiku.triple[i]).wordarray:\n if (self.wordtype == dictionary.wordtype(x) and \n dictionary.word_filter(x) != self.word):\n self.update_adj_dict(x, i==typenum)", "def keywords(self, keywords):\n\n self._keywords = keywords", "def _update_feature_vec(fvec, word, tag_ngram):", "def add(self, keyword, definitions):\r\n for x_temp in definitions:\r\n self.query(term1='kd',term2=keyword,term3=x_temp.strip(),action='set')" ]
[ "0.7004033", "0.6163652", "0.604905", "0.5958035", "0.58509445", "0.58496016", "0.5842247", "0.57948285", "0.5778456", "0.5731634", "0.5702554", "0.5648011", "0.56097215", "0.56031096", "0.55414486", "0.5538705", "0.5530985", "0.55123645", "0.54711574", "0.54400563", "0.54375124", "0.543244", "0.5418615", "0.5414808", "0.5368877", "0.53633773", "0.5345927", "0.53444064", "0.5335337", "0.53330934" ]
0.7432671
0
List of property in specific unit input
def property_list_of_specific_unit(data_list, unit, counter= None, show = False): if not isinstance(counter, Counter): counter = Counter() total_list = check_from_specific_unit(data_list, unit, show) #print ('t', total_list) #prop_list = [data['Property'] for data in total_list] prop_list = map(lambda data: data['Property'], total_list) #print ('p', prop_list) counter.update(prop_list) return counter
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_property_list(self,filtr):\n\n\n return self.dp.get_property_list(filtr)", "def test_list_properties(self):\n pass", "def get_properties():", "def getPropertiesAll():", "def getListOfUnits(self, *args):\n return _libsbml.UnitDefinition_getListOfUnits(self, *args)", "def get_units(self, obj: Dimension) -> [Unit]:\n try:\n return obj.units()\n except KeyError as e:\n logging.error(str(e))\n return []", "def _get_representation_component_units(args, kwargs):\n if \"unit\" not in kwargs:\n units = [None, None, None]\n\n else:\n units = kwargs.pop(\"unit\")\n\n if isinstance(units, str):\n units = [x.strip() for x in units.split(\",\")]\n # Allow for input like unit='deg' or unit='m'\n if len(units) == 1:\n units = [units[0], units[0], units[0]]\n elif isinstance(units, (Unit, IrreducibleUnit)):\n units = [units, units, units]\n\n try:\n units = [(Unit(x) if x else None) for x in units]\n units.extend(None for x in range(3 - len(units)))\n if len(units) > 3:\n raise ValueError()\n except Exception as err:\n raise ValueError(\n \"Unit keyword must have one to three unit values as \"\n \"tuple or comma-separated string.\"\n ) from err\n\n return units", "def _determine_properties(self, paramdict):\n for var in paramdict:\n if is_dimensionless(paramdict[var]):\n self._all_params_unit[var] = \"none\"\n yield lems.Property(var, \"none\")\n else:\n dim = _determine_dimension(paramdict[var])\n self._all_params_unit[var] = dim\n yield lems.Property(var, dim)", "def getProperties():", "def get_property(self, property, data):\n\n values = data.xpath(\"%s//*[@%s='%s']\" % (self.scope, self.attribute, property))\n if len(values) == 0:\n values = data.xpath(\"//*[@%s='%s']\" % (self.attribute, property))\n return values", "def get_attributes(units, properties=[\"p_set\", \"q_set\"]):\n df = pd.DataFrame()\n for unit in units.items():\n for prop in properties:\n df.at[unit[0], prop] = getattr(unit[1], prop)\n return df", "def matchesProperties(self, *args):\n return _libsbml.SBMLUnitsConverter_matchesProperties(self, *args)", "def get(self, *args):\n return _libsbml.ListOfUnits_get(self, *args)", "def getProperties(targets):", "def property_unit(self, unit: str) -> None:\n if sys.version_info[0] == 2:\n unit = unit\n\n if len(unit) > 100:\n unit = unit[:100]\n self.prop_unit = unit", "def propertyManagers(self) -> Iterator[unicode]:\n ...", "def test_properties_stats_get(self):\n pass", "def __str__(self):\n phases = '|'.join([phase.name for phase in PropertyPhase if self & phase])\n return phases", "def units(self, *args):\n u = self.parent.unit\n return tuple('%s%s' % (a, u) for a in args)", "def get_units(self) -> List[str]:\n result = []\n for elements in self._get_results_list():\n result.append(elements[3])\n return result", "def _prop(self):\n return [\"%s = %s\" % (str(k), repr(v)) for k, v in self.prop.items()]", "def test_properties(self):\n self.assertEqual(LENGTH_KILOMETERS, METRIC_SYSTEM.length_unit)\n self.assertEqual(TEMP_CELSIUS, METRIC_SYSTEM.temperature_unit)\n self.assertEqual(MASS_GRAMS, METRIC_SYSTEM.mass_unit)\n self.assertEqual(VOLUME_LITERS, METRIC_SYSTEM.volume_unit)", "def test_properties_get(self):\n pass", "def properties(self):", "def properties(self):", "def properties(self):", "def properties(self):\n raise NotImplementedError", "def list_data_units(self):\n return self.data_units.items()", "def list_property(\n self, key: str) -> Collection[Tuple[str, PropertyAttribute]]:\n return self._env.list_property(key)", "def properties(self):\n return {'u': {'name': 'streamwise velocity',\n 'range': self.u_range},\n 'uf': {'name': 'streamwise velocity',\n 'range': self.u_range},\n 'w': {'name': 'vertical velocity',\n 'range': self.w_range},\n 'wf': {'name': 'vertical velocity',\n 'range': self.w_range},\n 'uf_': {'name': 'mean streamwise velocity',\n 'range': self.u_range},\n 'wf_': {'name': 'mean vertical velocity',\n 'range': self.w_range},\n 'uf_abs': {'name': 'absolute velocity',\n 'range': self.u_abs_range},\n 'vorticity': {'name': 'vorticity',\n 'range': self.w_range},\n 'vertical_shear': {'name': 'vertical_shear',\n 'range': self.w_range}}" ]
[ "0.63807905", "0.62724996", "0.6171355", "0.6048868", "0.60106623", "0.5979617", "0.58818513", "0.58047694", "0.5754058", "0.5747146", "0.57451415", "0.5701677", "0.56990063", "0.5683549", "0.56530946", "0.5640377", "0.5630344", "0.5627606", "0.560844", "0.559831", "0.55886334", "0.55663353", "0.5551319", "0.55487895", "0.55487895", "0.55487895", "0.55329555", "0.5523314", "0.5523003", "0.5499378" ]
0.6809606
0
returns a function from input to sensitivity analysis heatmap takes in additional keys for "input" and "idx"
def customizable_sensitivity_analysis_fn(input_name, logit_name, network, handlers, inputs, outputs=None, **kwargs): if outputs is None: outputs = {} assert "outputs" not in outputs handlers = [ SensitivityAnalysisOutput(idx_input_key="idx", output_key="outputs", input_name=input_name, logit_name=logit_name), canopy.handlers.override_hyperparameters(deterministic=True) ] + handlers assert "input" not in inputs assert "idx" not in inputs # make a copy of inputs so that we can mutate inputs = dict(inputs) inputs["input"] = input_name fn = canopy.handled_fn(network, handlers=handlers, inputs=inputs, outputs=outputs, **kwargs) return fn
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sensitivity_analysis_fn(input_name,\n logit_name,\n network,\n handlers,\n inputs=None,\n **kwargs):\n handlers = [\n SensitivityAnalysisOutput(idx_input_key=\"idx\",\n output_key=\"outputs\",\n input_name=input_name,\n logit_name=logit_name),\n canopy.handlers.override_hyperparameters(deterministic=True)\n ] + handlers\n\n fn = canopy.handled_fn(network,\n handlers=handlers,\n inputs={\"input\": input_name},\n outputs={},\n **kwargs)\n\n def inner(in_val, idx_val):\n return fn({\"input\": in_val, \"idx\": idx_val})[\"outputs\"]\n\n return inner", "def __predict_input_fn(self):\n ## Recast spectra into dictionary for estimator\n features = {'flux': self.spectra_test}\n return features", "def preprocessing_fn(inputs):\n outputs = {}\n\n # This function is the entry point for your feature engineering with\n # TensorFlow Transform, using the TFX Transform component. In this example\n # the feature engineering is very simple, only applying z-score scaling.\n for key in Features.FEATURE_KEYS:\n outputs[transformed_name(key)] = tft.scale_to_z_score(inputs[key])\n\n # inputs[key]\n\n # tft.scale_to_z_score(inputs[key])\n\n # Do not apply label transformation as it will result in wrong evaluation.\n outputs[transformed_name(\n Features.LABEL_KEY)] = inputs[Features.LABEL_KEY]\n\n return outputs", "def feature_dist_func_dict():\n return {\"tanimoto_dissimilarity\": tanimoto_dissimilarity}", "def __test_input_fn(self):\n ## Test labels\n labels = self.labels_test\n ## Recast spectra into dictionary for estimator\n features = {'flux': self.spectra_test}\n ## Convert labels to integers\n ilabels = [self.label_index_lookup[l] for l in labels]\n return features, ilabels", "def cloudy_table_map(x_index='lognHs',y_index='lognSFRs',**kwargs):\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n cloudy_library = clo.library()\n lookup_table = cloudy_library._restore_lookup_table()\n print(lookup_table.nH_mw.min())\n print(lookup_table.nH_mw.max())\n\n fig,ax = plt.subplots(figsize=(8,5))\n\n key_const1, key_const2, key_const3, key_const4 = list(p.keep_const.keys())[0],list(p.keep_const.keys())[1],list(p.keep_const.keys())[2],list(p.keep_const.keys())[3]\n value_const1, value_const2, value_const3, value_const4 = list(p.keep_const.values())[0],list(p.keep_const.values())[1],list(p.keep_const.values())[2],list(p.keep_const.values())[3]\n\n # for key, value in p.cloudy_param.items():\n # key = key\n # value = value\n\n # cloudy_parameters = np.array(['logNHs','lognHs','lognSFRs','logZs','logFUVs'])\n\n # x_index = cloudy_parameters[(cloudy_parameters != key) & (cloudy_parameters != 'Machs')][0]\n # y_index = cloudy_parameters[(cloudy_parameters != key) & (cloudy_parameters != 'Machs')][1]\n\n print('%s table values:' % key_const1)\n print(np.unique(lookup_table[key_const1]))\n print('kept fixed at %f' % value_const1)\n\n print('%s table values:' % key_const2)\n print(np.unique(lookup_table[key_const2]))\n print('kept fixed at %f' % value_const2)\n\n print('%s table values:' % key_const3)\n lookup_table[key_const3] = np.round(lookup_table[key_const3]*10.)/10.\n print(np.unique(lookup_table[key_const3]))\n print('kept fixed at %f' % value_const3)\n\n print('%s table values:' % key_const4)\n print(np.unique(lookup_table[key_const4]))\n print('kept fixed at %f' % value_const4)\n\n lookup_table_cut = lookup_table[(lookup_table[key_const1] == value_const1) & \\\n (lookup_table[key_const2] == value_const2) & \\\n (lookup_table[key_const3] == value_const3) & \\\n (lookup_table[key_const4] == value_const4)]\n x, y = lookup_table_cut[x_index].values, lookup_table_cut[y_index].values\n\n X, Y = np.meshgrid(np.unique(x), np.unique(y))\n print(lookup_table_cut.nH_mw.min())\n print(lookup_table_cut.nH_mw.max())\n\n\n if p.line == '[CII]158_CO(1-0)':\n line_lum = 10.**lookup_table_cut['[CII]158'].values / 10.**lookup_table_cut['CO(1-0)'].values\n line_lum = np.log10(line_lum)\n if p.line == 'alpha_CO':\n line_lum = 1e4 / aux.Lsun_to_K_km_s_pc2(10.**lookup_table_cut['CO(1-0)'].values,'CO(1-0)') \n try:\n line_lum = lookup_table_cut[p.line].values\n except:\n pass\n\n lum = line_lum.reshape([len(np.unique(x)), len(np.unique(y))]).T\n\n vmin = np.min(lum)\n vmax = np.max(lum)\n print(vmin,vmax)\n if p.zlim:\n vmin = p.zlim[0]\n vmax = p.zlim[1]\n lum[lum < vmin] = vmin\n lum[lum > vmax] = vmax\n if p.log: \n print('AAAA')\n lum = np.log10(lum)\n vmin,vmax = np.log10(vmin),np.log10(vmax)\n\n print('Highest and lowest value to be mapped:', np.min(lum), np.max(lum))\n print(vmin,vmax)\n\n cf = ax.contourf(X,Y, lum, cmap=\"jet\", vmin=vmin, vmax=vmax, levels=30, lw=0, rstride=1, cstride=1,alpha=0.8)\n if getlabel(p.line) == '':\n if p.log: plt.colorbar(cf,label='log '+p.line)\n if not p.log: plt.colorbar(cf,label=p.line)\n else: \n plt.colorbar(cf,label=getlabel(p.line))\n \n # Show where grid points are, but only where lum > 0\n failed_models = lookup_table_cut['fail'].values\n ax.plot(x[failed_models == 0],y[failed_models == 0],'x',ms=5,mew=2,color='w')\n\n translate_labels = {'lognHs':'lnH','logNHs':'lNH','logFUVs':'lG0','logZs':'lZ','lognSFRs':'lSFR_density'}\n ax.set_xlabel(getlabel(translate_labels[x_index]))\n ax.set_ylabel('\\n\\n' + getlabel(translate_labels[y_index]))\n if p.ylim: ax.set_ylim(p.ylim)\n if p.xlim: ax.set_xlim(p.xlim)\n plt.tight_layout()\n\n if p.savefig:\n if not os.path.isdir(p.d_plot + 'look-up/'): os.mkdir(p.d_plot + 'look-up/') \n plt.savefig(p.d_plot + 'look-up/cloudy_table%s_%s.png' % (p.grid_ext,p.line), format='png', dpi=300)", "def get_score(self, input, target_idx, noise_idx):\n raise NotImplementedError()", "def input_fn():\n # It's important to build all the tensors together in one DataFrame.\n # If we did df.select() for both key sets and then build those, the two\n # resulting DataFrames would be shuffled independently.\n tensors = limited_dataframe.build(**kwargs)\n\n base_input_features = {key: tensors[key] for key in base_input_keys}\n labels = {key: tensors[key] for key in label_keys}\n\n # TODO(soergel): Remove this special case when b/30367437 is fixed.\n if len(labels) == 1:\n labels = list(labels.values())[0]\n\n return base_input_features, labels", "def input_fn(params=None):\n del params\n if is_tpu:\n features = get_input_fn_dataset(pattern, flags, batch_size)()[0]\n else:\n features = get_input_fn_queue(pattern, flags, batch_size)()[0]\n\n if flags.color_data_augment:\n\n def augment_img(image):\n image = tf.image.random_hue(image, .5)\n return image\n\n features[IMAGE_FEATURE_NAME] = tf.map_fn(\n augment_img, features[IMAGE_FEATURE_NAME], parallel_iterations=32)\n\n return features, None", "def chi2sf(x, k):", "def input_function(given_x):\n\tkeys = signal_Assignment.keys()\n\tkeys.sort\n\n\tfor i in range(len(signal_Assignment)):\n\t\tif given_x > keys[-1]:\n\t\t\treturn signal_Assignment[keys[-1]]\n\t\telif given_x > keys[i] and given_x < keys[i+1]:\n\t\t\tif given_x < (keys[i]+keys[i+1])/2:\n\t\t\t\treturn signal_Assignment[keys[i]]\n\t\t\telse:\n\t\t\t\treturn signal_Assignment[keys[i+1]]\n\t\telif given_x == keys[i]:\n\t\t\treturn signal_Assignment[keys[i]]", "def _linearly_inseparable_binary_input_fn():\n return {\n 'multi_dim_feature':\n constant_op.constant([[1.0, 1.0], [1.0, -1.0], [-1.0, -1.0],\n [-1.0, 1.0]]),\n }, constant_op.constant([[1], [0], [1], [0]])", "def __train_input_fn(self):\n ## To ensure unbiased training, grab random labels to define batch\n labels = np.random.choice(np.unique(self.labels_train), self.batch_size)\n ## Then grab a random spectrum from each label\n spectra = np.zeros((self.batch_size, len(self.spectra_train[0])))\n for i,l in enumerate(labels):\n good = self.labels_train == l\n idx = np.random.choice(np.sum(good))\n spectra[i] = self.spectra_train[good][idx]\n ## Recast into dictionary for estimator\n features = {'flux': spectra}\n ## Convert labels to integers\n ilabels = [self.label_index_lookup[l] for l in labels]\n return features, ilabels", "def call(self, inputs):\n\n strengths, = self.extract_inputs(inputs)\n thresholded = nd.threshold(strengths, th=self.threshold) \n probabilities = nd.boltzmann(thresholded, self.temperature)\n d = nd.draw(probabilities, n=1)\n d = nd.with_default(d, default=0)\n \n return d", "def construct_feature_mapping_approx(feature_mapping, weights):\n # here is a function that is created on the fly from the input feature\n # mapping and weights\n def prediction_function(xs):\n designmtx = np.matrix(feature_mapping(xs))\n return linear_model_predict(designmtx, weights)\n # we return the function reference (handle) itself. This can be used like\n # any other function\n return prediction_function", "def _mappings(self, inputs):\n return self.mapbias + tensor.dot(\n self._factorsX(inputs) * self._factorsY(inputs), self.whf_in.T)", "def input_fn(sources, train, params):\n \n raise NotImplementedError", "def call(self, inputs):\n\n strengths, = self.extract_inputs(inputs)\n cmds_by_dims = group_by_dims(self.interface.cmds)\n params_by_dims = group_by_dims(self.interface.params)\n items_by_dims = chain(cmds_by_dims.items(), params_by_dims.items())\n\n d = nd.MutableNumDict(default=0)\n for dim, fs in items_by_dims:\n if len(fs) == 1: # output strength of singleton param dim\n assert dim in params_by_dims\n f, = fs\n d[f] = strengths[f]\n else: # select value for cmd dim or multivalue param dim\n assert 1 < len(fs)\n ipt = nd.NumDict({f: strengths[f] for f in fs})\n prs = nd.boltzmann(ipt, self.temperature)\n selection = nd.draw(prs, n=1)\n d.update(selection)\n\n return d", "def __getitem__(self, idx):\n image_path = self.image_paths[idx]\n \n lookup = image_path.split(\"/\")[-1].split(\".\")[0]\n \n image = Image.open(image_path)\n #y = self.name_to_label[idx]\n y = self.fish_dict[lookup]\n X = self.transform(image)\n return X,y", "def __getitem__(self, index):\n A_path = self.A_paths[index]\n A_img = np.array(Image.open(A_path).convert('RGB'))\n A_img = self.stack(A_img)\n A_img = resize(A_img,(256, 256))\n A_img = np.einsum('abc->cab', A_img)\n # A_img = hsi_loader(A_path)\n # print(np.max(A_img))\n A = hsi_normalize(A_img, max_=1)\n \n #A = self.transform(A_img)\n return {'A': A, 'A_paths': A_path}", "def _make_train_input_fn(is_classification):\n\n def _input_fn():\n features = dict(FEATURES_DICT)\n if is_classification:\n labels = CLASSIFICATION_LABELS\n else:\n labels = REGRESSION_LABELS\n return features, labels\n\n return _input_fn", "def InElasticFunction_getValueAt(*args):\n return _osgAnimation.InElasticFunction_getValueAt(*args)", "def __getitem__(self, idx):\n row = self.dataset.iloc[idx]\n\n img = cv2.imread(row['path'])\n img_keypoints = row['keypoints']\n\n pafmap_mask, n_pafs = self.apply_pafmap_mask(img_keypoints, img.shape,\n self.coco_pafmap_joints if 'coco' in row['path'] else self.mpii_pafmap_joints)\n\n keypoint_mask, n_kps = self.apply_keypoint_mask(img_keypoints, img.shape,\n self.coco_keypoints if 'coco' in row['path'] else self.mpii_keypoints)\n\n img = cv2.resize(img, (self.input_shape[1], self.input_shape[0]))\n\n if self.augment:\n seq_det = self.seq.to_deterministic()\n\n pafmap_mask = HeatmapsOnImage(pafmap_mask, shape=img.shape, min_value=-1.0, max_value=1.0)\n\n keypoint_mask = HeatmapsOnImage(keypoint_mask, shape=img.shape, min_value=0.0, max_value=1.0)\n\n img = seq_det.augment_image(img)\n pafmap_mask = seq_det.augment_heatmaps(pafmap_mask).get_arr()\n keypoint_mask = seq_det.augment_heatmaps(keypoint_mask).get_arr()\n\n pafmap_mask = cv2.resize(pafmap_mask, (self.input_shape[1] // self.downscale,\n self.input_shape[0] // self.downscale))\n keypoint_mask = cv2.resize(keypoint_mask, (self.input_shape[1] // self.downscale,\n self.input_shape[0] // self.downscale))\n\n img = np.transpose(img, (2, 0, 1)).copy() / 255.\n pafmap_mask = np.transpose(pafmap_mask, (2, 0, 1)).copy()\n keypoint_mask = np.transpose(keypoint_mask, (2, 0, 1)).copy()\n\n return img, pafmap_mask, keypoint_mask, n_pafs, n_kps", "def chi2sf_inplace(x, k):", "def make_input_fn(step_output):\n return tf.nn.embedding_lookup(embeddings, step_output.predictions)", "def process_primary_inputs(dict_):\n try:\n dict_[\"ESTIMATION\"][\"bins\"]\n except KeyError:\n bins = 25\n else:\n bins = dict_[\"ESTIMATION\"][\"bins\"]\n\n try:\n dict_[\"ESTIMATION\"][\"logit\"]\n except KeyError:\n logit = True\n else:\n logit = dict_[\"ESTIMATION\"][\"logit\"]\n\n try:\n dict_[\"ESTIMATION\"][\"bandwidth\"]\n except KeyError:\n bandwidth = 0.32\n else:\n bandwidth = dict_[\"ESTIMATION\"][\"bandwidth\"]\n\n try:\n dict_[\"ESTIMATION\"][\"gridsize\"]\n except KeyError:\n gridsize = 500\n else:\n gridsize = dict_[\"ESTIMATION\"][\"gridsize\"]\n\n try:\n dict_[\"ESTIMATION\"][\"ps_range\"]\n except KeyError:\n prop_score_range = [0.005, 0.995]\n else:\n prop_score_range = dict_[\"ESTIMATION\"][\"ps_range\"]\n\n start_grid = prop_score_range[0]\n endgrid = prop_score_range[1]\n\n return bins, logit, bandwidth, gridsize, start_grid, endgrid", "def InOutElasticFunction_getValueAt(*args):\n return _osgAnimation.InOutElasticFunction_getValueAt(*args)", "def _evaluator(idx_dct):\n\n def _value(key):\n ngb_idxs = tuple(\n sorted(map(idx_dct.__getitem__, ngb_keys_dct[key])))\n return (ngb_idxs, key)\n\n return _value", "def json_serving_input_fn():\n inputs = {}\n for feat in INPUT_COLUMNS:\n inputs[feat.name] = tf.placeholder(shape=[None], dtype=feat.dtype)\n\n features = {\n key: tf.expand_dims(tensor, -1)\n for key, tensor in inputs.iteritems()\n }\n return tf.contrib.learn.InputFnOps(features, None, inputs)", "def computeSensitivityMap(X, y, C_val, gamma_val, no_channels, no_timepoints):\n \n ### Compute SVM classifier ###\n y = np.squeeze(y)\n classifier = SVC(C=C_val, gamma=gamma_val)\n clf = classifier.fit(X, y)\n \n \n ### Extract classifier model coefficients and add zero indices ### \n coefficients = clf.dual_coef_\n support_array = clf.support_\n \n coefficients = np.squeeze(coefficients)\n \n trials = len(X[:,0])\n features = len(X[0])\n alpha = np.zeros(trials)\n alpha[support_array] = coefficients\n alpha = np.squeeze(alpha)\n \n no_zero_indices = trials - len(support_array)\n \n ### Compute training kernal matrix, K ###\n M = distance.pdist(X,'euclidean')\n \n M_exp = np.exp(gamma_val*(-(np.square(M))))\n K = distance.squareform(M_exp) \n \n ### Compute sensitivity map ###\n \n X = np.transpose(X) # Obtain training examples in columns for further computation\n\n mapping = np.matmul(X,np.matmul(np.diag(alpha),K))-(np.matmul(X,(np.diag(np.matmul(alpha,K)))))\n s = np.sum(np.square(mapping),axis=1)/np.size(alpha) \n\n s_matrix = np.reshape(s,[no_channels,no_timepoints])\n # np.save('sensitivity_map.npy',s_matrix)\n \n ### Generation of sensitivity map plot ###\n \n # Examples of x- and y-axis labels\n \n# channel_vector = ['P7','P4','Cz','Pz','P3','P8','O1','O2','T8','F8','C4','F4',\n# 'Fp2','Fz','C3','F3','Fp1','T7','F7','Oz','PO3','AF3','FC5',\n# 'FC1','CP5','CP1','CP2','CP6','AF4','FC2','FC6','PO4']\n#\n# time_vector = ['-100','0','100','200','300','400','500']\n\n plt.matshow(s_matrix)\n plt.xlabel('Time points')\n #plt.xticks(np.arange(0,no_timepoints,10),time_vector)\n #plt.yticks(np.arange(no_channels),channel_vector)\n plt.ylabel('EEG channels')\n plt.colorbar()\n plt.title('Sensitivity map SVM RBF kernel')\n # plt.show()\n \n return s_matrix, plt\n \n print('Sensitivity map computed. Number of support vectors for the classifier: {0}.'.format(len(support_array)))" ]
[ "0.56896526", "0.5655239", "0.5604212", "0.5551312", "0.5425711", "0.53594214", "0.5334569", "0.5329085", "0.52712095", "0.52428246", "0.5231885", "0.5228857", "0.52231294", "0.52001", "0.51708233", "0.51637757", "0.5150478", "0.51333094", "0.51168174", "0.5115549", "0.51040894", "0.5103434", "0.5093742", "0.5090191", "0.508231", "0.5064582", "0.5057431", "0.5053445", "0.5049118", "0.5034738" ]
0.5841512
0
Create a line graph of the rate over time for flow 1 and 2.
def plot(self): clf() # Plot rate for flow 1 x = [] y = [] i = 0 maxY = None while i < self.max_time: bytes = 0 # loop through array of data and find relevant data for (t,sequence,size) in self.data1: if (t >= i - 1) and (t <= i): bytes += size # compute interval left = i - 1 if i - 1 < 0: left = 0 right = i # add data point if (right - left) != 0: rate = (bytes*8.0/1000000)/(right-left) x.append(i) y.append(rate) if not maxY or rate > maxY: maxY = int(rate) + 1 i += 0.1 plot(x,y) # Plot rate for flow 2 x = [] y = [] i = 0 while i < self.max_time: bytes = 0 # loop through array of data and find relevant data for (t,sequence,size) in self.data2: if (t >= i - 1) and (t <= i): bytes += size # compute interval left = i - 1 if i - 1 < 0: left = 0 right = i # add data point if (right - left) != 0: rate = (bytes*8.0/1000000)/(right-left) x.append(i) y.append(rate) if not maxY or rate > maxY: maxY = int(rate) + 1 i += 0.1 plot(x,y) # Plot rate for flow 3 x = [] y = [] i = 0 while i < self.max_time: bytes = 0 # loop through array of data and find relevant data for (t,sequence,size) in self.data3: if (t >= i - 1) and (t <= i): bytes += size # compute interval left = i - 1 if i - 1 < 0: left = 0 right = i # add data point if (right - left) != 0: rate = (bytes*8.0/1000000)/(right-left) x.append(i) y.append(rate) if not maxY or rate > maxY: maxY = int(rate) + 1 i += 0.1 plot(x,y) # Plot rate for flow 4 x = [] y = [] i = 0 while i < self.max_time: bytes = 0 # loop through array of data and find relevant data for (t,sequence,size) in self.data4: if (t >= i - 1) and (t <= i): bytes += size # compute interval left = i - 1 if i - 1 < 0: left = 0 right = i # add data point if (right - left) != 0: rate = (bytes*8.0/1000000)/(right-left) x.append(i) y.append(rate) if not maxY or rate > maxY: maxY = int(rate) + 1 i += 0.1 plot(x,y) # Plot rate for flow 1 x = [] y = [] i = 0 while i < self.max_time: bytes = 0 # loop through array of data and find relevant data for (t,sequence,size) in self.data5: if (t >= i - 1) and (t <= i): bytes += size # compute interval left = i - 1 if i - 1 < 0: left = 0 right = i # add data point if (right - left) != 0: rate = (bytes*8.0/1000000)/(right-left) x.append(i) y.append(rate) if not maxY or rate > maxY: maxY = int(rate) + 1 i += 0.1 plot(x,y) xlabel('Time (seconds)') ylabel('Rate (Mbps)') ylim([0,maxY]) savefig(self.output_file + '.png')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_rates(self, title, ymax=None, linewidth=1):\n line_generator = self.line_gen()\n for flowID, rate_points in self.flow_rates.items():\n times = [point[0] for point in rate_points]\n rates = [point[1] for point in rate_points]\n if flowID is not None:\n linestyle = line_generator.next()\n plt.plot(times, rates, label='Flow {}'.format(flowID[0]), linewidth=linewidth, linestyle=linestyle)\n plt.xlabel('time (ns)')\n plt.ylabel('rate (Gbps)')\n plt.title(title)\n #plt.legend(loc='lower right')\n plt.legend(loc='upper left')\n if ymax is not None:\n plt.ylim(0, ymax)", "def plot_rates(self, title, ymax=None, linewidth=1):\n line_generator = self.line_gen()\n max_rate = 0\n for flowID, rate_points in self.flow_rates.items():\n times = [(point[0] - self.start_time)*1e-6 for point in rate_points]\n rates = [point[1] for point in rate_points]\n if len(rates) > 0:\n max_rate = max(rates) if max(rates) > max_rate else max_rate\n if flowID is not None:\n linestyle = line_generator.next()\n plt.plot(times, rates, label='Flow {}'.format(flowID), linewidth=linewidth, linestyle=linestyle)\n plt.xlabel('time (ms)')\n plt.ylabel('rate (Gbps)')\n plt.title(title)\n #plt.legend(loc='lower right')\n plt.legend(loc='upper left')\n if ymax is not None:\n plt.ylim(0, max_rate)", "def test():\n data1 = resources_vs_time(0.0, 50)\n data2 = resources_vs_time(1.0, 10)\n data3 = resources_vs_time(2.0, 10)\n data4 = resources_vs_time(0.5, 10)\n print data1\n simpleplot.plot_lines(\"Growth\", 600, 600, \"time\", \"total resources\", [data1])", "def line_graph():\n fig = plt.figure()\n ax = plt.axes()\n x = [1, 2, 3]\n y = [5, 6, 7]\n plt.plot(x, y)\n plt.show()", "def _line_example_1(price_by_date):\n # Plot the data\n ch = chartify.Chart(blank_labels=True, x_axis_type=\"datetime\")\n ch.set_title(\"Line charts\")\n ch.set_subtitle(\"Plot two numeric values connected by an ordered line.\")\n ch.plot.line(\n # Data must be sorted by x column\n data_frame=price_by_date.sort_values(\"date\"),\n x_column=\"date\",\n y_column=\"total_price\",\n )\n ch.show(_OUTPUT_FORMAT)", "def line_graph():\r\n #create the data in an array\r\n xval = np.arange(0,6,(np.pi*(1./10)))\r\n yval = np.cos(xval)\r\n data = np.array([xval,yval])\r\n data = data.transpose()\r\n y = np.arange(-1,1.5,0.5)\r\n #convert the data to a pd DataFrame\r\n df = pd.DataFrame(data,columns=[\"x\",\"y\"])\r\n #tell the DataFrame to plot the data\r\n ax = df.plot(x=\"x\",y=\"y\",label=\"0\",ylim=(-1,1),yticks=y,title=\"Cosine Approximated at Intervals of 1/(10pi)\")\r\n ax.set(xlabel=\"\",ylabel=\"\")\r\n\t#get the figure from the axes and save it\r\n fig = ax.get_figure()\r\n fig.savefig(\"my_line_graph.png\")", "def drawLines_income(t0, t1, t2, t3):\r\n t0.pd()\r\n t1.pd()\r\n t2.pd()\r\n t3.pd()\r\n t0.pencolor(\"blue\")\r\n t0.pensize(3)\r\n t1.pensize(3)\r\n t2.pensize(3)\r\n t3.pensize(3)\r\n t1.pencolor(\"red\")\r\n t2.pencolor(\"green\")\r\n t3.pencolor(\"gold\")\r\n t0.rt(90)\r\n t1.rt(90)\r\n t2.rt(90)\r\n t3.rt(90)\r\n t0.fd(70)\r\n t1.fd(70)\r\n t2.fd(70)\r\n t3.fd(70)", "def plot_graph(costs):\n plt.figure()\n for i in range(len(np.array(costs).T)):\n plt.plot(np.array(costs)[:, i])\n plt.title(\"Costs\")\n plt.show()", "def graph(stock):\n output=stock_price(stock)\n return plt.plot(output)", "def draw_spike_times(spike_times):\n for line in spike_times:\n plt.axvline(x=line, color='y')", "def _line_example_2_chart(price_by_date_and_country):\n ch = chartify.Chart(blank_labels=True, x_axis_type=\"datetime\")\n ch.set_title(\"Line charts - Grouped by color\")\n ch.plot.line(\n # Data must be sorted by x column\n data_frame=price_by_date_and_country.sort_values(\"date\"),\n x_column=\"date\",\n y_column=\"total_price\",\n color_column=\"fruit\",\n )\n ch.show(_OUTPUT_FORMAT)", "def getColorLine(self, dot1, dot2): # generate all dots of line\n x1, y1, z1, feed1 = dot1\n x2, y2, z2, feed2 = dot2\n min_color = self.colors_list[feed1]\n max_color = self.colors_list[feed2]\n\n # NB! feed1,feed2 >= 0; 0 = dot[3]-fmin\n # self.colors_list = grad(MIN_COLOR, MAX_COLOR, self.fmax)\n if self.current_feedrate == 0:\n start_color = min_color\n if feed2 > feed1:\n finish_color = max_color\n else:\n finish_color = min_color\n else:\n if feed2 > feed1:\n start_color = min_color\n finish_color = max_color\n elif feed1 > feed2:\n start_color = max_color\n finish_color = min_color\n else: # feed1 == feed2\n if feed2 == self.current_feedrate:\n start_color = finish_color = self.current_color\n\n self.current_color = finish_color\n self.current_feedrate = feed2\n color_list = grad(start_color, finish_color, n=self.speed+1)\n\n i = 0\n coords = []\n stepx = (x2-x1) / self.speed\n stepy = (y2-y1) / self.speed\n stepz = (z2-z1) / self.speed\n for i in range(self.speed):\n dot = [0, 0, 0, 0, 0, 0, 0] # x, y, z, r, g, b, p\n dot[0] = x1 + i*stepx\n dot[1] = y1 + i*stepy\n dot[2] = z1 + i*stepz\n dot[3:7] = color_list[i]\n coords.append(dot)\n\n return coords", "def example3():\n arrive_time=example2() # Get packets arrive time using example1\n time_series.plot_time_series(arrive_time) # Plot time series using packets arrive time", "def graph1():\r\n sheet = workbook.sheet_by_index(0)\r\n data = [[sheet.cell_value(r, c) for c in range(sheet.ncols)] for r in range(sheet.nrows)]\r\n\r\n for i in range(2, sheet.nrows):\r\n list_data[0].append((data[i][0], round((data[i][3]/data[i][1])*100, 2)))\r\n list_data[1].append((data[i][0], round((data[i][4]/data[i][2])*100, 2)))\r\n list_data[2].append((data[i][0], round(((data[i][3] + data[i][4])/(data[i][1] + data[i][2]))*100, 2)))\r\n\r\n line_graph = pygal.XY()\r\n line_graph.title = 'อัตราการดื่มเครื่องดื่มแอลกอฮอล์รวม และแยกตามเพศ ระหว่างปี 2544 ถึง 2557'\r\n line_graph.x_labels = (2544, 2546, 2548, 2550, 2552, 2554, 2556, 2558)\r\n for i in range(3):\r\n line_graph.add(data_name[i], list_data[i])\r\n line_graph.render_to_file('1Alcohol consumption rate by genders between 2001 and 2014.svg')", "def line(x1, y1, x2, y2):\r\n\r\n x1 = normalize(x1)\r\n y1 = normalize(y1)\r\n x2 = normalize(x2)\r\n y2 = normalize(y2)\r\n\r\n xdiff = max(x1, x2) - min(x1, x2)\r\n ydiff = max(y1, y2) - min(y1, y2)\r\n xdir = 1 if x1 <= x2 else -1\r\n ydir = 1 if y1 <= y2 else -1\r\n\r\n r = max(xdiff, ydiff)\r\n\r\n for i in range(r+1):\r\n x = x1\r\n y = y1\r\n\r\n if ydiff:\r\n y += (float(i) * ydiff) / r * ydir\r\n if xdiff:\r\n x += (float(i) * xdiff) / r * xdir\r\n\r\n yield (x, y)", "def plot_line_graph(target_offenses, counts, year_list, filename):\n\t#this is to demonstrate line graphs but the data is categorical so you should actually be using bar graphs\n\tfig, ax = plt.subplots()\n\tcolors = [\"blue\",\"red\",\"orange\",\"green\",\"yellow\",\"purple\"]\n\tfor index,offense in enumerate(target_offenses):\n\t\tplt.plot(year_list, counts[index], color=colors[index], marker= 'o', label=offense)\n\tax.get_xaxis().get_major_formatter().set_useOffset(False)\t\n\tplt.xlabel('Year')\n\tplt.ylabel('Number of offenses')\n\tplt.legend()\n\tplt.savefig(filename,format=\"png\")\n\tplt.show()", "def task_1():\n\n # To store the list of speeds to plot\n list_of_speeds = []\n list_of_times = []\n\n # To go from 1 through 80\n for i in range(LOW_SPEED, HIGH_SPEED + 1):\n list_of_speeds.append(i)\n time = (DISTANCE/i) * 60 * 60\n list_of_times.append(time)\n\n plt.plot(list_of_speeds, list_of_times)\n plt.xlabel(\"Speed (in mph)\")\n plt.ylabel(\"Time (in s)\")\n plt.show()", "def plot_tuning_curves(self, baseline_rate=10.):\n x = np.arange(0, 1 + 0.01, 0.01)\n l0 = self.data['L0']\n l1 = self.data['L1']\n y_on = np.exp(np.log(l0) + x * np.log(l1 / l0))\n y_off = np.exp(np.log(l0) + (1 - x) * np.log(l1 / l0))\n plt.plot(x, y_on, label='ON')\n plt.plot(x, y_off, label='OFF')\n plt.plot(x, baseline_rate + 0 * x, '--')\n # plt.xlabel('Stimulus intensity')\n # plt.ylabel('Firing Rate (Hz)')\n # plt.title('Firing rate as a function \\n of Stimulus Intensity')\n # plt.legend()", "def __plot_rank_line(self):\n numbers = [i for i in range(self.__rank_length)]\n\n fig = plt.figure()\n ax = fig.add_subplot(211)\n ax.plot(numbers, self.__motion, label=\"Motion\", color='r')\n ax.plot(numbers, self.__blur, label=\"Blur\", color='g')\n ax.plot(numbers, self.__audio, label=\"Audio\", color='c')\n ax.plot(numbers, self.__text, label=\"Text\", color='m')\n ax.set_title(\"rankings for all features\")\n ax.set_ylim(-1)\n plt.legend(loc=2).set_draggable(True)\n\n ax = fig.add_subplot(212)\n for start, end, in self.__timestamps:\n ax.plot([start, start], [0, 10], color='red', linestyle='dashed', linewidth=1.5)\n ax.plot([end, end], [0, 10], color='green', linestyle='dashed', linewidth=1.5)\n\n custom_lines = [Line2D([0], [0], color='red', linestyle='dashed', linewidth=1.5),\n Line2D([0], [0], color='green', linestyle='dashed', linewidth=1.5)]\n\n ax.plot([i for i in range(self.__rank_length)], self.__ranks)\n ax.set_ylim(0)\n ax.set_title(\"sum of all rankings\")\n ax.legend(custom_lines, ['start time', 'end time'], loc=0).set_draggable(True)\n\n plt.tight_layout()\n plt.show()", "def linefreq(transition) :\n return s.lineFreq(transition)", "def draw_line(cvs, p1, p2, antialias):\n xs, ys = np.array([p1[0], p2[0]]), np.array([p1[1], p2[1]])\n points = pd.DataFrame({'x': xs, 'y': ys, 'val': 5.0})\n return cvs.line(points, 'x', 'y', agg=ds.reductions.max(\"val\"),\n antialias=antialias)", "def plot_comparison(results):\n dfs = []\n for res in results:\n equity = (1 + res['equity']).cumprod()\n equity.name = 'equity'\n equity = equity.reset_index()\n equity['name'] = res['name']\n dfs.append(equity)\n data = pd.concat(dfs, axis=0)\n\n fig = px.line(data, x='time_idx', y='equity', color='name')\n fig.show()", "def task_2():\n\n # To store the list of speeds to plot\n list_of_speeds = []\n list_of_times = []\n list_of_time_difference = [0]\n\n # To go from 1 through 80\n for i in range(LOW_SPEED, HIGH_SPEED + 1, 5):\n list_of_speeds.append(i)\n list_of_times.append(((DISTANCE / i) * 60))\n\n for i in range(1, len(list_of_times)):\n list_of_time_difference.append(list_of_times[i-1] - list_of_times[i])\n\n plt.plot(list_of_speeds, list_of_time_difference)\n plt.xlabel(\"Speed (in mph)\")\n plt.ylabel(\"Time saved (in minutes)\")\n plt.show()", "def add_line(self, point1: Point, point2: Point, counts_as_step=True, interesting=False) -> Line:\n line = Line(point1=point1, point2=point2)\n self.add_step_premade(line, counts_as_step=counts_as_step, interesting=interesting)\n return line", "def rateTS (self,ts):\n output = []\n for line in ts.data:\n output.append([line[0],self.rate(line[1]),line[2]])\n return timeseries(output)", "def plot_series(self, t1=0, t2=100, t1p=None, t2p=None):\n \n plot_discretized(self.ts, self.ts_dis, t1=t1, t2=t2, t1p=t1p, t2p=t2p)", "def draw_lines(asr,ax):\n r = asr.value\n y = 475.\n x = (r**2-y**2)**(.5)\n xs = np.linspace(-x,x,10)\n yt = np.zeros(xs.size)+y\n yb = np.zeros(xs.size)-y\n ax.plot(xs,yt,'-.',color='red',alpha=1.,linewidth=2,zorder=5000)\n ax.plot(xs,yb,'-.',color='red',alpha=1.,linewidth=2,zorder=5000)\n return ax", "def plot_lines(self):\n self.plot(3)", "def plt_connecting_lines():\n\n for i in range(0, Molecule.connection_count):\n tmp1 = Molecule.right_endpt[Molecule.left_connection[i] - 1]\n tmp2 = Molecule.left_endpt[Molecule.right_connection[i] - 1]\n tmp3 = Molecule.energy[Molecule.left_connection[i] - 1]\n tmp4 = Molecule.energy[Molecule.right_connection[i] - 1]\n\n plt.plot([tmp1, tmp2], [tmp3, tmp4], color=PlotParameter.connection_line_color,\n lw=PlotParameter.connection_line_width, linestyle='--')\n\n return None", "def DRPathValidation(self, flow, p):\n\t\tpath_rate_lst = []\n\n\t\tarr_time = flow[3]\n\t\tend_time = flow[3] + flow[4]\n\t\tedge_lst = [(p[i], p[i+1]) for i in xrange(len(p)-1)]\n\t\t# Calculate the bottlenecked rate at each event point\n\t\tfor evt in self.event_lst:\n\t\t\tmin_rate = float('inf')\n\t\t\tfor e in edge_lst:\n\t\t\t\tif evt in self.rate_lst[e].keys() and self.rate_lst[e][evt] < min_rate:\n\t\t\t\t\tmin_rate = self.rate_lst[e][evt]\n\t\t\tif min_rate < float('inf'):\n\t\t\t\tpath_rate_lst.append((evt, min_rate))\n\n\t\t# Calculate the cumulative size over the bottleneck rates between arr_time and end_time\n\t\tprev_time = arr_time\n\t\tprev_rate = 0\n\t\tcum_size = 0\n\t\trate_alloc = [(-1, 0)]\t# Start from time -1 with rate 0, so as a benchmark from the beginning\n\t\tfor time, rate in path_rate_lst:\n\t\t\t# Each time period is from prev_time -> min{time, end_time}\n\t\t\tif time > prev_time:\n\t\t\t\tif time < end_time:\n\t\t\t\t\tcum_size += prev_rate * (time - prev_time)\n\t\t\t\t\tcur_ed_time = time\n\t\t\t\telse:\n\t\t\t\t\tcum_size += prev_rate * (end_time - prev_time)\n\t\t\t\t\tcur_ed_time = end_time\n\t\t\t\trate_alloc.append((prev_time, prev_rate))\n\n\t\t\t\tif cum_size >= flow[2]:\n\t\t\t\t\tbreak\n\t\t\t\telif time >= end_time:\n\t\t\t\t\tbreak\n\n\t\t\tprev_time = time\n\t\t\tprev_rate = rate\n\n\t\t# If it can carry the flow size before deadline\n\t\tif cum_size >= flow[2]:\n\t\t\tcum_size -= prev_rate * (cur_ed_time - prev_time)\n\t\t\tnew_end_time = float(flow[2] - cum_size) / prev_rate + prev_time\n\t\t\t# Add the finish rate onto the allocation list\n\t\t\trate_alloc.append((new_end_time, 0))\n\n\t\t\treturn True, edge_lst, self.DRAllocTrim(rate_alloc), new_end_time\n\n\t\t# If not, return the minimum edge regarding the cumulative size\n\t\tedge, size = self.DRFindMinimalEdge(flow, edge_lst)\n\t\treturn False, edge, size" ]
[ "0.6258836", "0.62256557", "0.6003119", "0.5807549", "0.5559554", "0.5534315", "0.55321395", "0.5495035", "0.5476266", "0.5467404", "0.5456977", "0.5430374", "0.5430327", "0.5418864", "0.53900796", "0.53784734", "0.53725505", "0.53439164", "0.53158927", "0.531341", "0.5303742", "0.5266912", "0.5257372", "0.5244298", "0.5236354", "0.52228105", "0.51979905", "0.5194435", "0.51876014", "0.51658046" ]
0.7296539
0
Cleanup Xcode cache and derived data
def cache_clean(): run(cmd="rm -rf ~/Library/Developer/Xcode/DerivedData/*")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clean():\n clean_flatbuffer_binaries()\n clean_webp_textures()", "def _clean_up(self):", "def cleanup():", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def clean():\n C.libs.clear()\n shutil.rmtree(C.cache_dir, ignore_errors=True)", "def cleanUp(self):\r\n pass", "def cleanup(self):\r\n pass", "def cleanup(self):\r\n pass", "def cleanup(self):", "def cleanup(self):", "def cleanup(self):", "def cleanup (self):\n pass", "def cleanUp(self):\r\n # All intermediates should be removed by app controller\r\n pass", "def cleanup(self):\n\n pass", "def cleanup(self):\r\n pass", "def clean_up(self):\n\t\tpass", "def clean_up(self):\n pass", "def clean_up(self):\n pass", "def _clean(base_dir):\n # remove the snakemake cache\n shutil.rmtree(os.path.join(base_dir, \".snakemake\"), ignore_errors=True)\n\n # remove seq2science caches\n shutil.rmtree(os.path.expanduser(os.path.join(xdg.XDG_CACHE_HOME, \"seq2science\")), ignore_errors=True)\n\n # remove historic seq2science cache location\n shutil.rmtree(os.path.expanduser(f\"~/.config/seq2science/\"), ignore_errors=True)\n\n print(\"All cleaned up!\")", "def cleanUp(self):\n import evoware.fileutil as F\n F.tryRemove(self.f_project, verbose=(self.VERBOSITY>1), tree=1)" ]
[ "0.7105218", "0.69078654", "0.68643934", "0.6836506", "0.6836506", "0.6836506", "0.6836506", "0.6836506", "0.6836506", "0.6836506", "0.6836506", "0.6836506", "0.6836506", "0.6836506", "0.68172604", "0.68070483", "0.6770995", "0.6770995", "0.6767389", "0.6767389", "0.6767389", "0.6743061", "0.6738757", "0.67261285", "0.67160827", "0.6710307", "0.6675457", "0.6675457", "0.6647335", "0.6616188" ]
0.8461176
0
removes the columns with missing values below a certain threshold
def remove_columns_missing_values(df, min_threshold): for col in df.columns: rate = sum(df[col].notnull())/float(len(df)) * 100 if rate <= min_threshold: df = df.drop(col,1) return df
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def drop_high_nan(df, threshold=0.5):\n n_nans = df.isnull().sum()\n freq_nans = n_nans/float(len(df)) #in percentage\n to_drop = (freq_nans > threshold).values\n columns_drop = df.columns.values[to_drop].tolist()\n return df.drop(columns_drop, axis=1)", "def filterMissings(self, threshold, data):\n\n #replace NAs by 0 for counting\n data.fillna(0).astype(bool).sum(axis=1)\n\n filtered_columns = data.columns\n\n\n #find out threshold, i.e. minimum number of non-zero in real numbers\n rowNumber = data.shape[0]\n min_nonZeros = int(rowNumber - ((rowNumber * int(threshold))/100))\n\n zero_counts = data.astype(bool).sum(axis=0)\n\n for columnID, nonZeros in zero_counts.items():\n if nonZeros <= min_nonZeros:\n filtered_columns = filtered_columns.drop(columnID)\n\n\n return data[filtered_columns]", "def remove_null_cols(df, thresh=0.08):\n \n # look at this\n # df.dropna(thresh=int(df.shape[0] * .9), axis=1)\n pct_null = df.isnull().sum() / len(df)\n missing_features = pct_null[pct_null > thresh].index\n return df.drop(missing_features, axis=1)", "def remove_empty_columns(x, threshold=0.4):\n # For each column compute the ratio of nan values over the number of rows\n prop_empty_column = (np.isnan(x)).sum(axis=0) / len(x)\n column_mask = prop_empty_column < threshold\n return x[:, column_mask], column_mask", "def remove_missing(self, threshold=1):\n qualified = np.sum(self.table.isnull().values, 1) >= threshold\n print(f\"@Timetable.remove_missing: {np.sum(qualified)} with more than {threshold} missing data will be removed.\")\n self.table = self.table[np.logical_not(qualified)]", "def high_null_count(df, thresh):\n cols_remove = []\n for col in df.columns:\n if df[col].isna().sum() / df.shape[0] >= thresh:\n cols_remove.append(col)\n\n return df.drop(columns=cols_remove, axis=1)", "def threshold_col_del(self, threshold):\n self.data = self.data.dropna(thresh=threshold*len(self.data), axis=1) \n self.X = self.data.drop(self.target, axis =1)\n self.y = self.data[self.target]", "def remove_cols_high_missing_rates(data, min_missing_rate=0.4):\n cols_keep = list(data.isna().mean()[data.isna().mean() < min_missing_rate].index)\n return data[cols_keep], cols_keep", "def remove_columns(df, threshold, log=False):\n if log: sectionTimer = Timer(log=f\"removing columns with more than {threshold * 100}% of nans\")\n \n # removes columns with many nans\n non_nan_values = int(df.shape[0] * (1 - threshold))\n df_clean = df.dropna(thresh=non_nan_values, axis=1)\n dropped_cols = list(set(df.columns) - set(df_clean.columns))\n\n if log: sectionTimer.end_timer(log=f\"removed {len(set(df.columns)) - df_clean.shape[1]} columns\")\n return df_clean, dropped_cols", "def _remove_non_informative_rows(self, df, threshold):\n df_tmp = pd.DataFrame()\n n_features = len(df.columns)\n # calculating ratio of rows that have more than \"ratio\" missing values\n df_tmp['ratio'] = df.apply(lambda row: row.isnull().sum()/n_features, axis='columns')\n\n # kick too noisy rows\n return df[df_tmp['ratio'] <= threshold]", "def filter_by_count(df, min_columns, min_count):\n num_columns = len(df.columns)\n df = df.ix[df[df > min_count].isnull().sum(axis=1) < (num_columns - min_columns)]\n return df", "def drop_nan_streaks_above_threshold(df, df_nan_table, thresholds):\n\n # Check for NaN streaks > threshold and drop them from the df\n length = len(df_nan_table['Amount of NaNs'])\n print('df_nan_table length: %s' % length)\n\n indices_to_drop = []\n for i, amount in enumerate(df_nan_table['Amount of NaNs']):\n selected_column = df_nan_table['Column name'][i]\n try:\n if amount > thresholds[selected_column]:\n start_index = (df_nan_table['Start index'][i])\n stop_index = (df_nan_table['Stop index'][i])\n indices = df[start_index:stop_index].index\n print('Enumeration %s of %s | From \\t %s \\t to \\t %s | column %s | NaN streak length: %s'\n % (i, length, start_index, stop_index, selected_column, (len(indices))))\n try:\n indices_to_drop += indices\n except:\n print('Could not add indices to indices_to_drop list')\n else:\n #print('amount < threshold')\n pass\n except:\n #print('No threshold detected for %s' % selected_column)\n pass\n\n print('Dropping NaN streaks > threshold')\n l1 = len(df)\n df = df.drop(indices_to_drop)\n l2 = len(df)\n print('Removed %s rows' % (l1-l2))\n return df", "def dropna(df, axis=0, th=0.4):\n df = df.copy()\n axis = 0 if axis==1 else 1\n col_idx = df.isna().sum(axis=axis)/df.shape[axis] <= th\n df = df.iloc[:, col_idx.values]\n return df", "def drop_quasi_zero(df, thresh=0.05):\n drop_list = []\n for el in df.columns.values:\n non_zero = df[el][df[el] != 0].shape[0] / df.shape[0]\n if non_zero < thresh:\n drop_list.append(el)\n print('Dropping column: {} | Non-zero values ratio: {}%'.format(\n el, round(100 * non_zero, 3)))\n return df.drop(drop_list, axis=1)", "def remove_rows(df, threshold, log=False):\n if log: section_timer = Timer(log=f\"removing rows with more than {threshold * 100}% of NaNs\")\n\n non_nan_values = int(df.shape[1] * (1 - threshold))\n df_clean = df.dropna(thresh=non_nan_values, axis=0)\n\n if log: section_timer.end_timer(log=f\"removed {df.shape[0] - df_clean.shape[0]} rows\")\n return df_clean", "def handel_nans(self):\n col_nan_pct = self.df.isin([' ',np.nan]).mean() #Calculates percent of Nans\n col_names = col_nan_pct[col_nan_pct >= .1].index # Gets name of columns with over 50% Nans\n col_count = [self.df[col].count() for col in col_names for x in self.df if x == col] #Gets length of valid values for column\n dropped_col = [col for col in zip(col_count, col_names) if col[0] <= 1400] #Gets columns names with under 50 values\n [self.df.drop(columns=[col[1]], inplace=True) for col in dropped_col]\n self.dropped_cols_phase_one = dropped_col\n [self.column_dtypes.pop(item[1]) for item in dropped_col]\n self.df[self.target].dropna(inplace=True)", "def remove(dataframe, limit=250):\n logfile = open('logfile_removecolumns.txt', 'w') # Create a logfile\n logfile.write('=====> Time: %s <=====\\n' % time.asctime(time.localtime()))\n logfile.write('=====> Log from file %s.py <===== \\n\\n' % __name__)\n\n columns_overview = dataframe.columns.summary() # Create an overview of the dataframe\n cols_list = dataframe.columns.tolist()\n cols_to_be_deleted = list()\n logfile.write('Overview of the dataframe: \\n%s' % columns_overview)\n\n for stock in range(len(cols_list)): # Walk through all stocks\n if dataframe[cols_list[stock]].isnull().sum() > limit: # Check No. of null values in a column\n cols_to_be_deleted.append(cols_list[stock])\n \n logfile.write('\\nNo. of Columns with more that %d missing values: %s\\n'\n % (limit, len(cols_to_be_deleted)))\n logfile.write('Deleted columns:\\n')\n for col in cols_to_be_deleted:\n logfile.write('%s \\n' % str(col))\n logfile.close()\n \n # Return updated dataframe or list of columns. See test code below\n dataframe_updated = dataframe[dataframe.columns.drop(cols_to_be_deleted)]\n return dataframe_updated", "def clean_columns(df: pd.DataFrame, filled_rate: float = 0.6) -> pd.DataFrame:\n\n print(f\"Initial shape of the dataframe: {str(df.shape) : >17}\")\n # keep columns that are filled more than the filled rate, default = 60%\n df = df.loc[:, (df.isnull().mean() < (1 - filled_rate))]\n print(f\"Shape after removing null columns: {str(df.shape) : >14}\")\n\n return df", "def remove_columns(df):\n avg = np.mean(df[df['sentiment'] != 'None']['sentiment'].astype('float'))\n df['sentiment'] = df['sentiment'].replace('None', avg).astype('float')\n\n to_remove = []\n print('column(s) removed: ')\n for column in df.columns:\n print(column)\n if(np.unique(df[column][df[column].notnull()]).shape[0] < 2):\n print(column)\n to_remove.append(column)\n \n return df.drop(columns = to_remove)", "def filter_rows_by_non_empty(df, threshold=1):\n # Boolean DataFrame where `True` means the cell value is non-zero.\n non_zeros = df.applymap(lambda cell: cell != 0)\n\n # Boolean Series where `True` means the row has enough non-zeros.\n enough_non_zeros = non_zeros.apply(\n # Check that the row contains `True`, meaning it has a non-zero.\n # check that the row has enough non-zeros, i.e. more than the threshold.\n lambda row: True in row.value_counts() and row.value_counts()[True] > threshold,\n axis=1\n )\n result = df[enough_non_zeros]\n if df.shape != result.shape:\n print('THRESHOLDING: filter_rows_by_non_empty')\n return result", "def handle_invalid(x, column_names=None):\n\n invalid_value = -999.0\n invalid_threshold = 0.7\n\n # Remove columns with a pct of invalid values above 70%\n pct_undef = (x <= invalid_value).mean(axis=0)\n below_thresh = pct_undef < invalid_threshold\n\n print(f\"{(~below_thresh).sum()} columns are above the invalid threshold. Removing\", end=\"\\n\\t\")\n if column_names is not None:\n print(*column_names[~below_thresh], sep=\"\\n\\t\")\n column_names = column_names[below_thresh]\n\n x = x[:, below_thresh]\n\n # Replace -999 with mean value of remaining values for each column still in dataset\n for i in range(x.shape[1]):\n col = x[:, i]\n mean = col[col > invalid_value].mean()\n col[col <= invalid_value] = mean\n\n return x, column_names", "def purgeNanEveryWhere(df):\n #Row-wise dropping\n toDrop = np.array([])\n for i in range(df.shape[0]):\n if( np.sum ( pd.isnull(df.iloc[i]) ) == df.shape[1]-1 ):\n toDrop= np.append(toDrop,i)\n df.drop(df.index[toDrop.astype(int)],inplace=True) \n #Column-wise dropping\n for col in df.columns:\n arr = pd.notnull(df[col])\n nnan = np.sum(arr) \n if (nnan == df.shape[1]):\n df.drop(col,inplace=True,axis=1)\n return df", "def purgeHighSparsedFeatures(df,threshold,barplot=False,title=''):\n \n thr = math.floor(df.shape[1] * threshold)\n rowsToDrop = np.array([])\n logger.debug(Sc+'Patient Threshold is %d' % thr) \n logger.debug(Sc+'Matrix dimensions : Rows %d , Columns %d'% (df.shape[0],df.shape[1]))\n #axis_x = np.arange(0,df.shape[0]) \n axis_y = np.array([]) \n numRows = df.shape[0] \n for i in range(1,numRows):\n arr = pd.isnull(df.iloc[i])\n nnan = np.sum(arr) \n axis_y = np.append(axis_y,nnan)\n if (nnan > thr):\n rowsToDrop = np.append(rowsToDrop,i)\n logger.debug ('%d features to drop ' % len(rowsToDrop))\n np.savetxt('debug/sparseFeaturesaxis_y.txt',axis_y)\n #if(barplot):\n # ax.title.set_text(title)\n # ax.bar(axis_x,axis_y) \n #logger.debug('After purge there are %d columns '% df.shape[1])\n return rowsToDrop", "def removeNonQuant(df, cols):\r\n df = df[~(df[cols].isnull().all(1))]\r\n return df", "def missing_stats(X, missing_threshold, axis=1):\n a = 1-axis\n missing_series = X.isnull().sum(axis = a) / X.shape[a]\n # Calculate the fraction of missing in each column \n missing_series = X.isnull().sum() / X.shape[0]\n if axis == 1:\n missing_stats_cols = pd.DataFrame(missing_series).rename(columns = {'index': 'feature', 0: 'missing_fraction'})\n # Sort with highest number of missing values on top\n missing_stats_cols = missing_stats_cols.sort_values('missing_fraction', ascending = False)\n missing_threshold_cols_grid = pd.DataFrame(missing_series[missing_series >= missing_threshold]).reset_index().rename(columns = {'index': 'cols', 0: 'missing_fraction'})\n return missing_threshold_cols_grid\n elif axis == 0:\n missing_stats_rows = pd.DataFrame(missing_series).rename(columns = {'index': 'feature', 0: 'missing_fraction'})\n # Sort with highest number of missing values on top\n missing_stats_rows = missing_stats_rows.sort_values('missing_fraction', ascending = False)\n missing_threshold_rows_grid = pd.DataFrame(missing_series[missing_series > missing_threshold]).reset_index().rename(columns = {'index': 'rows', 0: 'missing_fraction'})\n return missing_threshold_rows_grid", "def get_columns_with_nulls(X, columns_to_scan = \"all\", rows_to_scan=100000):\n rows_to_scan = get_rows_to_scan(rows_to_scan, X.shape[0])\n \n columns_to_scan = get_list_of_columns_to_check(columns_to_scan, X.columns)\n mask = np.array(X[columns_to_scan][:rows_to_scan].count() < rows_to_scan)\n return list(np.array(columns_to_scan)[mask])", "def drop_outliers(data, cols, t=1.5):\n iqr_d = iqr(data, cols, t)\n for col in cols:\n return data[~((data[col]< iqr_d[\"low_b\"][col]) | (data[col]> iqr_d[\"upp_b\"][col]))]", "def filter_rows_by_non_empty_until(df, max_=MAX_NUM_ROWS):\n print('Starting shape: %s' % str(df.shape))\n threshold = 1\n while df.shape[0] > max_:\n df = filter_rows_by_non_empty(df, threshold=threshold)\n print('THRESHOLDING: to shape: %s' % str(df.shape))\n threshold += 1\n print('Ending shape: %s' % str(df.shape))\n return df", "def _drop_inferior_features_transaction(\n df: pd.DataFrame,\n nan_threshold: float,\n target: str = \"isFraud\"\n) -> pd.DataFrame:\n print(\"Executing inferior feature removal...\")\n df = df.copy()\n num_columns = df.shape[1]\n if nan_threshold > 1.0 or nan_threshold < 0.0:\n raise ValueError(\"nan_threshold should be in range [0, 1].\")\n\n for col in df.columns:\n if col == target: # Preserve the target column.\n continue\n nan_percentage = np.mean(df[col].isna())\n if nan_percentage >= nan_threshold:\n df.drop(columns=[col], inplace=True)\n print(\"{}/{} features left with nan threshold {}\".format(\n df.shape[1], num_columns, nan_threshold\n ))\n return df", "def get_columns_not_all_nulls(X, columns_to_check='all', rows_to_scan='all'):\n columns_to_check = get_list_of_columns_to_check(columns_to_check, X.columns)\n remove_columns = get_columns_with_all_nulls(X, columns_to_check, rows_to_scan)\n return list(set(columns_to_check)-set(remove_columns))" ]
[ "0.8112062", "0.76812786", "0.76032734", "0.757241", "0.75673884", "0.73716843", "0.7303441", "0.7258961", "0.7185174", "0.6898564", "0.67759484", "0.67757314", "0.6744774", "0.67218643", "0.6713158", "0.66977847", "0.6672517", "0.66387624", "0.65781564", "0.6426446", "0.64067847", "0.63772273", "0.634327", "0.62863714", "0.6270596", "0.62441564", "0.623037", "0.62164253", "0.6173185", "0.61576724" ]
0.8181951
0
Wrap function pointers in C/C++ to Python functions.
def cython_c2py_conv_function_pointer(t_, ts): t = t_[1] argnames = [] argdecls = [] argbodys = [] argrtns = [] for n, argt in t[1][2]: argnames.append(n) decl, body, rtn = ts.cython_py2c(n, argt, proxy_name="c_" + n) argdecls += decl.split('\n') if isinstance(decl,basestring) else [decl] argbodys += body.split('\n') if isinstance(body,basestring) else [body] argrtns += rtn.split('\n') if isinstance(rtn,basestring) else [rtn] rtnname = 'rtn' rtnprox = 'c_' + rtnname rtncall = 'c_call_' + rtnname while rtnname in argnames or rtnprox in argnames: rtnname += '_' rtnprox += '_' argdecls = indent(argdecls) argbodys = indent(argbodys) rtndecl, rtnbody, rtnrtn, _ = ts.cython_c2py(rtncall, t[2][2], cached=False, proxy_name=rtnprox, existing_name=rtncall) if rtndecl is None and rtnbody is None: rtnprox = rtnname rtndecls = [rtndecl] returns_void = (t[2][2] == 'void') if not returns_void: rtndecls.append("cdef {0} {1}".format(ts.cython_ctype(t[2][2]), rtncall)) rtndecl = indent(rtndecls) rtnbody = indent(rtnbody) s = ('def {{proxy_name}}({arglist}):\n' '{argdecls}\n' '{rtndecl}\n' ' if {{var}} == NULL:\n' ' raise RuntimeError("{{var}} is NULL and may not be ' 'safely called!")\n' '{argbodys}\n') s += ' {{var}}({carglist})\n' if returns_void else \ ' {rtncall} = {{var}}({carglist})\n' s += '{rtnbody}\n' s = s.format(arglist=", ".join(argnames), argdecls=argdecls, cvartypeptr=ts.cython_ctype(t_).format(type_name='cvartype'), argbodys=argbodys, rtndecl=rtndecl, rtnprox=rtnprox, rtncall=rtncall, carglist=", ".join(argrtns), rtnbody=rtnbody) caches = 'if {cache_name} is None:\n' + indent(s) if not returns_void: caches += "\n return {rtnrtn}".format(rtnrtn=rtnrtn) caches += '\n {cache_name} = {proxy_name}\n' return s, s, caches
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_c_function_stubs(self):\n fn =\\\n\"\"\"{rettype} {fnname}({args}){{\n {rettype} ret;\n\n ret = {cast_and_deref}___madz_LANG_python_OUTPUT.{nodename}({argnames});\n\n return ret;\n}}\n\n\"\"\"\n fn_no_return =\\\n\"\"\"{rettype} {fnname}({args}){{\n ___madz_LANG_python_OUTPUT.{nodename}({argnames});\n return;\n}}\n\n\"\"\"\n res = \"\"\n c_gen = c_wrapgen.CGenerator([],\"\", self.description)\n for node in self.description.definitions():\n if isinstance(node.type.get_type(), pdl.TypeFunction):\n fragments = {\n \"maybe_parentheses\": \")\" if isinstance(node.type.return_type.get_type(),pdl.TypeStruct) else \"\",\n \"cast_and_deref\": self.make_c_cast_deref_string(c_gen, node.type.return_type),\n \"rettype\": c_gen.gen_type_string(\"\", node.type.return_type),\n \"fnname\": \"___madz_LANG_python_FN_\" + node.name,\n \"nodename\": node.name,\n \"args\": \",\".join(map(\n lambda a: c_gen.gen_type_string(a.name, a.type),\n node.type.args)),\n \"argnames\":\",\".join(map(\n lambda a: a.name,\n node.type.args))\n }\n res += (fn if not isinstance(node.type.return_type, pdl.TypeTypeNone) else fn_no_return).format(**fragments)\n return res", "def WrapFunction(lib, funcname, restype, argtypes):\n func = lib.__getattr__(funcname)\n func.restype = restype\n func.argtypes = argtypes\n return func", "def write_fortran_wrappers(out, decl, return_val):\n delegate_name = decl.name + f_wrap_suffix\n out.write(decl.fortranPrototype(delegate_name, [\"static\"]))\n out.write(\" { \\n\")\n\n call = FortranDelegation(decl, return_val)\n\n if decl.name == \"MPI_Init\":\n # Use out.write() here so it comes at very beginning of wrapper function\n out.write(\" int argc = 0;\\n\");\n out.write(\" char ** argv = NULL;\\n\");\n call.addActual(\"&argc\");\n call.addActual(\"&argv\");\n call.write(out)\n out.write(\" *ierr = %s;\\n\" % return_val)\n out.write(\"}\\n\\n\")\n\n # Write out various bindings that delegate to the main fortran wrapper\n write_fortran_binding(out, decl, delegate_name, \"MPI_INIT\", [\"fortran_init = 1;\"])\n write_fortran_binding(out, decl, delegate_name, \"mpi_init\", [\"fortran_init = 2;\"])\n write_fortran_binding(out, decl, delegate_name, \"mpi_init_\", [\"fortran_init = 3;\"])\n write_fortran_binding(out, decl, delegate_name, \"mpi_init__\", [\"fortran_init = 4;\"])\n return\n\n # This look processes the rest of the call for all other routines.\n for arg in decl.args:\n if arg.name == \"...\": # skip ellipsis\n continue\n\n if not (arg.pointers or arg.array):\n if not arg.isHandle():\n # These are pass-by-value arguments, so just deref and pass thru\n dereferenced = \"*%s\" % arg.name\n call.addActual(dereferenced)\n else:\n # Non-ptr, non-arr handles need to be converted with MPI_Blah_f2c\n # No special case for MPI_Status here because MPI_Statuses are never passed by value.\n call.addActualMPI2(\"%s_f2c(*%s)\" % (conversion_prefix(arg.type), arg.name))\n call.addActualMPICH(\"(%s)(*%s)\" % (arg.type, arg.name))\n\n else:\n if not arg.isHandle():\n # Non-MPI handle pointer types can be passed w/o dereferencing, but need to\n # cast to correct pointer type first (from MPI_Fint*).\n call.addActual(\"(%s)%s\" % (arg.castType(), arg.name))\n else:\n # For MPI-1, assume ints, cross fingers, and pass things straight through.\n call.addActualMPICH(\"(%s*)%s\" % (arg.type, arg.name))\n conv = conversion_prefix(arg.type)\n temp = \"temp_%s\" % arg.name\n\n # For MPI-2, other pointer and array types need temporaries and special conversions.\n if not arg.isHandleArray():\n call.addTemp(arg.type, temp)\n call.addActualMPI2(\"&%s\" % temp)\n\n if arg.isStatus():\n call.addCopy(\"%s_f2c(%s, &%s);\" % (conv, arg.name, temp))\n call.addWriteback(\"%s_c2f(&%s, %s);\" % (conv, temp, arg.name))\n else:\n call.addCopy(\"%s = %s_f2c(*%s);\" % (temp, conv, arg.name))\n call.addWriteback(\"*%s = %s_c2f(%s);\" % (arg.name, conv, temp))\n else:\n # Make temporary variables for the array and the loop var\n temp_arr_type = \"%s*\" % arg.type\n call.addTemp(temp_arr_type, temp)\n call.addTemp(\"int\", \"i\")\n\n # generate a copy and a writeback statement for this type of handle\n if arg.isStatus():\n copy = \" %s_f2c(&%s[i], &%s[i])\" % (conv, arg.name, temp)\n writeback = \" %s_c2f(&%s[i], &%s[i])\" % (conv, temp, arg.name)\n else:\n copy = \" temp_%s[i] = %s_f2c(%s[i])\" % (arg.name, conv, arg.name)\n writeback = \" %s[i] = %s_c2f(temp_%s[i])\" % (arg.name, conv, arg.name)\n\n # Generate the call surrounded by temp array allocation, copies, writebacks, and temp free\n count = \"*%s\" % arg.countParam().name\n call.addCopy(\"%s = (%s)malloc(sizeof(%s) * %s);\" %\n (temp, temp_arr_type, arg.type, count))\n call.addCopy(\"for (i=0; i < %s; i++)\" % count)\n call.addCopy(\"%s;\" % copy)\n call.addActualMPI2(temp)\n call.addWriteback(\"for (i=0; i < %s; i++)\" % count)\n call.addWriteback(\"%s;\" % writeback)\n call.addWriteback(\"free(%s);\" % temp)\n\n call.write(out)\n if decl.returnsErrorCode():\n out.write(\" *ierr = %s;\\n\" % return_val)\n else:\n out.write(\" return %s;\\n\" % return_val)\n out.write(\"}\\n\\n\")\n\n # Write out various bindings that delegate to the main fortran wrapper\n write_fortran_binding(out, decl, delegate_name, decl.name.upper())\n write_fortran_binding(out, decl, delegate_name, decl.name.lower())\n write_fortran_binding(out, decl, delegate_name, decl.name.lower() + \"_\")\n write_fortran_binding(out, decl, delegate_name, decl.name.lower() + \"__\")", "def dummy_wrapper(func):\n return func", "def dummy_python_wrapper(*args, **_kwargs):\n def _inner_wrapper(func):\n return func\n\n if len(args) == 1 and callable(args[0]):\n return _inner_wrapper(args[0])\n\n return _inner_wrapper", "def cpp_function(self):", "def _called_funcs_to_python(loop, context, indent):\n \n # Get the definitions for all local functions called directly in the loop.\n local_funcs = _get_all_called_funcs(loop, context)\n local_func_hashes = set()\n for curr_func in local_funcs:\n curr_func_hash = hashlib.md5(safe_str_convert(curr_func).encode()).hexdigest()\n local_func_hashes.add(curr_func_hash)\n \n # Now get the definitions of all the local functions called by the local\n # functions.\n seen_funcs = set()\n funcs_to_handle = list(local_funcs)\n while (len(funcs_to_handle) > 0):\n\n # Get the current function definition to check for calls.\n curr_func = funcs_to_handle.pop()\n curr_func_hash = hashlib.md5(safe_str_convert(curr_func).encode()).hexdigest()\n \n # Already looked at this one?\n if (curr_func_hash in seen_funcs):\n continue\n seen_funcs.add(curr_func_hash)\n\n # Get the functions called in the current function.\n curr_local_funcs = _get_all_called_funcs(curr_func, context)\n\n # Save the new functions for processing.\n for new_func in curr_local_funcs:\n new_func_hash = hashlib.md5(safe_str_convert(new_func).encode()).hexdigest()\n if (new_func_hash not in local_func_hashes):\n local_func_hashes.add(new_func_hash)\n local_funcs.append(new_func)\n funcs_to_handle.append(new_func)\n \n # Convert each local function to Python.\n r = \"\"\n for local_func in local_funcs:\n r += to_python(local_func, context, indent=indent) + \"\\n\"\n\n # Done.\n indent_str = \" \" * indent\n r = indent_str + \"# VBA Local Function Definitions\\n\" + r\n return r", "def adaptPythonToCpp(self, *args):\n return _SALOMERuntime.RuntimeSALOME_adaptPythonToCpp(self, *args)", "def make_function_callbacks(self):\n res = \"\"\n for node in self.description.definitions():\n if isinstance(node.type.get_type(), pdl.TypeFunction):\n frags={\n \"name\": node.name,\n \"nameupper\": self.python_madz_deftypes + \"___\" + node.name,\n \"sanitize\": \"_sanitize_python_callback\" if isinstance(node.type.return_type.get_type(), pdl.TypePointer) else \"_python_callback\"\n }\n res += \\\n\"\"\"\n temp = cast({sanitize}(user_code_module.{name}, {nameupper}), {nameupper})\n keepers['{nameupper}'] = temp\n _plugin.contents.{name} = temp\n\"\"\".format(**frags)\n return res", "def __call__(fun_name):", "def _wrapper(func, args):\n return func(*args)", "def wrapper(*args):", "def test_callback_from_c(self):\n source = io.StringIO(\"\"\"\n int add(int x, int y);\n int x(int a) {\n return add(a + 1, 13);\n }\n \"\"\")\n arch = get_current_arch()\n obj = cc(source, arch, debug=True)\n def my_add(x: int, y: int) -> int:\n return x + y + 2\n imports = {\n 'add': my_add\n }\n m = load_obj(obj, imports=imports)\n y = m.x(101)\n self.assertEqual(117, y)", "def make_c_header(self):\n res = \\\n\"\"\"PyThreadState* ___madz_LANG_python_thread_state; //Holds Thread State for this interpreter\nPyObject *___madz_LANG_python_wrapper_module; //Hold Pointer to the _madz.py file representing this plugin\ntypedef struct{{\n{function_pointers}\n}}___madz_LANG_python_TYPE_;\n___madz_LANG_python_TYPE_ ___madz_LANG_python_OUTPUT;\nvoid ___madz_init_imports();\n{fn_dec}\n\n\"\"\"\n c_gen = c_wrapgen.CGenerator([],\"\", self.description)\n #TODO function_pointers, all same except\n fragments ={\"fn_dec\" : \"\", \"function_pointers\" : \"\"}\n fn = \"\"\"{rettype}{fnname}({args});\\n\"\"\"\n pointer = \"\"\" {prettype} (*{nodename})({args});\\n\"\"\"\n for node in self.description.definitions():\n if isinstance(node.type.get_type(), pdl.TypeFunction):\n frg = {\n \"prettype\": c_gen.gen_type_string(\"\", node.type.return_type),\n \"rettype\": c_gen.gen_type_string(\"\", node.type.return_type),\n \"fnname\": \"___madz_LANG_python_FN_\" + node.name,\n \"nodename\": node.name,\n \"args\": \",\".join(map(\n lambda a: c_gen.gen_type_string(a.name, a.type),\n node.type.args)),\n\n }\n fragments[\"fn_dec\"] += fn.format(**frg)\n fragments[\"function_pointers\"] += pointer.format(**frg)\n if fragments[\"function_pointers\"] == \"\":\n fragments[\"function_pointers\"] = \"uint8_t _madz_empty;\"\n return res.format(**fragments)", "def write_gotcha_c_wrapper(out, decl, return_val, write_body):\n # Write the pointer to the original function\n out.write(\"gotcha_wrappee_handle_t _wrap_py_%s_handle = 0x0;\\n\" % decl.name)\n\n # Now write the wrapper function, which will call the original function through the pointer\n out.write(decl.gotcha_prototype(default_modifiers))\n out.write(\" { \\n\")\n out.write(\" %s %s = 0;\\n\" % (decl.retType(), return_val))\n\n write_body(out)\n\n out.write(\" return %s;\\n\" % return_val)\n out.write(\"}\\n\\n\")\n\n # Write the GOTCHA binding struct\n out.write(\"struct gotcha_binding_t wrap_%s_binding = { \\\"%s\\\", (void*) wrap_%s, &_wrap_py_%s_handle };\\n\\n\" % (decl.name, decl.name, decl.name, decl.name))", "def wrap_function(funcname, restype, argtypes):\n func = _lib.__getattr__(funcname)\n func.restype = restype\n func.argtypes = argtypes\n return func", "def test_py_closure(self):", "def wrap(function, wrapper):\n\t# Copy the function signature.\n\twrapper.__module__ = function.__module__\n\twrapper.__name__ = function.__name__\n\twrapper.__doc__ = function.__doc__", "def registerForeign(func, swipl, name=None, arity=None, flags=0):\n global cwraps\n\n if arity is None:\n arity = func.arity\n\n if name is None:\n name = func.__name__\n\n nondeterministic = bool(flags & PL_FA_NONDETERMINISTIC)\n\n cwrap = _callbackWrapper(arity, nondeterministic)\n fwrap = _foreignWrapper(func, swipl, nondeterministic)\n fwrap2 = cwrap(fwrap)\n cwraps.append(fwrap2)\n return swipl.PL_register_foreign(name, arity, fwrap2, flags)\n # return PL_register_foreign(name, arity,\n # _callbackWrapper(arity)(_foreignWrapper(func)), flags)", "def make_wrapper(fname, atypes, rtype, cres):\n fndesc = cres.fndesc\n module = cres.library.create_ir_module(fndesc.unique_name)\n context = cres.target_context\n ll_argtypes = [context.get_value_type(ty) for ty in atypes]\n ll_return_type = context.get_value_type(rtype)\n\n # TODO: design a API for custom wrapping\n if type(rtype).__name__ == 'ArrayPointer':\n wrapty = ir.FunctionType(ir.VoidType(),\n [ll_return_type] + ll_argtypes)\n wrapfn = module.add_function(wrapty, fname)\n builder = ir.IRBuilder(wrapfn.append_basic_block('entry'))\n fnty = context.call_conv.get_function_type(rtype, atypes)\n fn = builder.module.add_function(fnty, cres.fndesc.llvm_func_name)\n status, out = context.call_conv.call_function(\n builder, fn, rtype, atypes, wrapfn.args[1:])\n with cgutils.if_unlikely(builder, status.is_error):\n cgutils.printf(builder,\n f\"rbc: {fname} failed with status code %i\\n\",\n status.code)\n builder.ret_void()\n builder.store(builder.load(out), wrapfn.args[0])\n builder.ret_void()\n else:\n wrapty = ir.FunctionType(ll_return_type, ll_argtypes)\n wrapfn = module.add_function(wrapty, fname)\n builder = ir.IRBuilder(wrapfn.append_basic_block('entry'))\n fnty = context.call_conv.get_function_type(rtype, atypes)\n fn = builder.module.add_function(fnty, cres.fndesc.llvm_func_name)\n status, out = context.call_conv.call_function(\n builder, fn, rtype, atypes, wrapfn.args)\n with cgutils.if_unlikely(builder, status.is_error):\n cgutils.printf(builder,\n f\"rbc: {fname} failed with status code %i\\n\",\n status.code)\n builder.ret(out)\n\n cres.library.add_ir_module(module)", "def _make_methods(functions, modname):\n methods_table = []\n codes = []\n for funcname, flags, code in functions:\n cfuncname = \"%s_%s\" % (modname, funcname)\n if 'METH_KEYWORDS' in flags:\n signature = '(PyObject *self, PyObject *args, PyObject *kwargs)'\n else:\n signature = '(PyObject *self, PyObject *args)'\n methods_table.append(\n \"{\\\"%s\\\", (PyCFunction)%s, %s},\" % (funcname, cfuncname, flags))\n func_code = \"\"\"\n static PyObject* {cfuncname}{signature}\n {{\n {code}\n }}\n \"\"\".format(cfuncname=cfuncname, signature=signature, code=code)\n codes.append(func_code)\n\n body = \"\\n\".join(codes) + \"\"\"\n static PyMethodDef methods[] = {\n %(methods)s\n { NULL }\n };\n static struct PyModuleDef moduledef = {\n PyModuleDef_HEAD_INIT,\n \"%(modname)s\", /* m_name */\n NULL, /* m_doc */\n -1, /* m_size */\n methods, /* m_methods */\n };\n \"\"\" % dict(methods='\\n'.join(methods_table), modname=modname)\n return body", "def register(prim):\n def deco(fn):\n vm_register(prim)(lambda vm, *args: fn(*args))\n return py_register(prim)(fn)\n return deco", "def _wrap_FunctionDef(self, expr):\n if expr.is_private:\n return EmptyNode()\n\n name = self.scope.get_new_name(f'bind_c_{expr.name.lower()}')\n self._wrapper_names_dict[expr.name] = name\n\n # Create the scope\n func_scope = self.scope.new_child_scope(name)\n self.scope = func_scope\n\n self._additional_exprs = []\n\n if any(isinstance(a.var, FunctionAddress) for a in expr.arguments):\n warnings.warn(\"Functions with functions as arguments cannot be wrapped by pyccel\")\n return EmptyNode()\n\n # Wrap the arguments and collect the expressions passed as the call argument.\n func_arguments = [self._wrap(a) for a in expr.arguments]\n call_arguments = [self._get_call_argument(fa) for fa in func_arguments]\n func_to_call = {fa : ca for ca, fa in zip(call_arguments, func_arguments)}\n\n func_results = [self._wrap_FunctionDefResult(r) for r in expr.results]\n\n func_call_results = [r.var.clone(self.scope.get_expected_name(r.var.name)) for r in expr.results]\n\n body = self._get_function_def_body(expr, func_arguments, func_to_call, func_call_results)\n\n body.extend(self._additional_exprs)\n self._additional_exprs.clear()\n\n self.exit_scope()\n\n func = BindCFunctionDef(name, func_arguments, func_results, body, scope=func_scope, original_function = expr,\n doc_string = expr.doc_string)\n\n self.scope.functions[name] = func\n\n return func", "def wrapper(*args, **kwargs):\r\n return lambda: func(*args, **kwargs)", "def params(funcarglist):\n def wrapper(function):\n function.funcarglist = funcarglist\n return function\n return wrapper", "def public(*args):\n def public_wrapper():\n pass\n return public_wrapper", "def wrapper(func, *args, **kwargs):\n def wrapped():\n return func(*args, **kwargs)\n return wrapped", "def minimal_wrap(new_fun):\n fun_name = new_fun.__name__\n assert fun_name in dir(torch)\n torch_fun = getattr(torch, fun_name)\n new_fun.__doc__ = torch_fun.__doc__\n STABLE_FUNCTIONS[torch_fun] = new_fun\n return new_fun", "def wrapperfunc(func):\n def wrap_wrapper(self, *args, **kwargs):\n def inner_wrapper(arg):\n apifunc = get_apifunc(arg)\n func(self, apifunc, *args, **kwargs)\n return apifunc\n return inner_wrapper\n return wrap_wrapper", "def methdispatch(func): \n dispatcher = singledispatch(func)\n def wrapper(*args, **kw):\n return dispatcher.dispatch(args[1].__class__)(*args, **kw)\n wrapper.register = dispatcher.register\n update_wrapper(wrapper, func)\n return wrapper" ]
[ "0.6707142", "0.62710714", "0.61361706", "0.6121755", "0.6089381", "0.5960903", "0.5935476", "0.592127", "0.5906617", "0.5896414", "0.5892851", "0.5890732", "0.5884935", "0.58657503", "0.5816589", "0.58025694", "0.57851225", "0.57759845", "0.57509685", "0.57376343", "0.5663666", "0.56482357", "0.5616386", "0.5589189", "0.558182", "0.5545899", "0.55420125", "0.5538886", "0.55209404", "0.54873276" ]
0.7016661
0
Given batch of logits, return onehot sample using epsilon greedy strategy (based on given epsilon)
def onehot_from_logits(logits, eps=0.0): # get best (according to current policy) actions in one-hot form argmax_acs = (logits == logits.max(1, keepdim=True)[0]).float() if eps == 0.0: return argmax_acs # get random actions in one-hot form rand_acs = torch.tensor(torch.eye(logits.shape[1])[[np.random.choice( range(logits.shape[1]), size=logits.shape[0])]], requires_grad=False) # chooses between best and random actions using epsilon greedy return torch.stack([argmax_acs[i] if r > eps else rand_acs[i] for i, r in enumerate(torch.rand(logits.shape[0]))])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def onehot(rating):\n vec = [0 for i in range(5)]\n vec[rating - 1] = 1\n return np.array(vec)", "def gumbel_softmax_sample(logits, temperature):\n y = logits + sample_gumbel(tf.shape(logits))\n return tf.nn.softmax( y / temperature)", "def gumbel_softmax_sample(logits, temperature):\n y = logits + sample_gumbel(tf.shape(logits))\n return tf.nn.softmax( y / temperature)", "def one_hot(self, x, vocab, n_grams) -> np.ndarray:\n _x = np.zeros((x.__len__(), self.max_length, vocab.__len__()))\n for i, item in enumerate(tqdm(x, desc='One Hot Text Encoder')):\n items = TextProcessor.n_gram_split(item, n_grams)\n for j, token in enumerate(items):\n if j >= self.max_length:\n break\n if token in vocab:\n idx = vocab.index(token)\n _x[i][j][idx] = 1\n\n return _x", "def onehot(index):\n classNum=2#1\n onehot = np.zeros(classNum)#这代表种类类型\n onehot[index] = 1.0\n return onehot", "def onehot(isTrue):\n if isTrue:\n return [1, 0]\n else:\n return [0, 1]", "def gumbel_softmax_sample(logits, temperature):\n y = logits + sample_gumbel(tf.shape(logits))\n return tf.nn.softmax( y / temperature)", "def onehot(inputs, num_classes):\n num_sample = inputs.size(0)\n inputs_onehot = torch.zeros(num_sample, num_classes)\n inputs_onehot.scatter_(1, inputs.unsqueeze(1), 1.0)\n return inputs_onehot", "def one_hot(a, actions, dtype=t.float32):\n assert 1 == a.shape[-1]\n return (a == t.arange(actions, device=a.device)).to(dtype=dtype)", "def attack_batch(self, img, label_1hot):\n def is_confidently_fooled(x, true_label):\n if not isinstance(x, (float, int, np.int64)) and not (isinstance(x, torch.Tensor) and x.numel() == 1):\n z = torch.clone(x)\n if self.TARGETED:\n z[true_label] -= self.CONFIDENCE\n else:\n z[true_label] += self.CONFIDENCE\n z = torch.argmax(z)\n else:\n z = x\n\n if self.TARGETED:\n return z == true_label\n else:\n return z != true_label\n\n # convert img to float32 to avoid numba error\n img = img.type(torch.float32)\n\n if torch.argmax(model(img+0.5)) != torch.argmax(label_1hot):\n print(\"Image is already misclassified.\")\n return img, 0.0\n\n # remove the extra batch dimension\n if len(img.shape) == 4:\n img = img[0]\n if len(label_1hot.shape) == 2:\n label_1hot = label_1hot[0]\n # convert to tanh-space\n if self.use_tanh:\n img = torch.arctanh(img*1.999999)\n\n # set the lower and upper bounds accordingly\n c_lower_bound = 0.0\n c = self.initial_c\n c_upper_bound = 1e10\n\n # set the upper and lower bounds for the modifier\n if not self.use_tanh:\n self.modifier_up = 0.5 - img.reshape(-1)\n self.modifier_down = -0.5 - img.reshape(-1)\n\n # clear the modifier\n # if not self.load_checkpoint:\n # if self.use_resize:\n # self.resize_img(self.resize_init_size,\n # self.resize_init_size, True)\n # else:\n # self.real_modifier = torch.zeros(\n # (1,) + (self.num_channels, self.small_x, self.small_y), dtype=torch.float32, device=self.device)\n # if self.solver_name == \"fake_zero\":\n # self.real_modifier.requires_grad = True\n\n # the best l2, score, and image attack\n outer_best_c = c\n outer_best_l2 = 1e10\n outer_best_score = -1\n if self.use_tanh:\n outer_best_adv = torch.tanh(img)/2\n else:\n outer_best_adv = img\n\n for outer_step in range(self.BINARY_SEARCH_STEPS):\n print(outer_best_l2)\n\n best_l2 = 1e10\n best_score = -1\n\n # The last iteration (if we run many steps) repeat the search once.\n if self.repeat == True and outer_step == self.BINARY_SEARCH_STEPS-1:\n c = c_upper_bound\n\n # set the variables so that we don't have to send them over again\n # self.setup = []\n self.true_img = img.detach().clone()\n self.true_label_1hot = label_1hot.detach().clone()\n self.c = c\n # self.setup = [self.true_img, self.true_label_1hot, self.c]\n\n # use the current best model\n # np.copyto(self.real_modifier, outer_best_adv - img)\n # use the model left by last constant change\n\n prev_loss = 1e6\n train_timer = 0.0\n last_loss1 = 1.0\n if not self.load_checkpoint:\n if self.use_resize:\n self.resize_img(self.resize_init_size,\n self.resize_init_size, True)\n else:\n self.real_modifier = torch.zeros(\n (1,) + (self.num_channels, self.small_x, self.small_y), dtype=torch.float32, device=self.device)\n if self.solver_name == \"fake_zero\":\n self.real_modifier.requires_grad = True\n\n # reset ADAM status\n self.mt.fill_(0.0)\n self.vt.fill_(0.0)\n self.adam_epoch.fill_(1)\n self.stage = 0\n multiplier = 1\n eval_costs = 0\n if self.solver_name != \"fake_zero\":\n multiplier = 24\n for iteration in range(self.start_iter, self.MAX_ITERATIONS):\n if self.use_resize:\n if iteration == 2000:\n # if iteration == 2000 // 24:\n self.resize_img(64, 64)\n if iteration == 10000:\n # if iteration == 2000 // 24 + (10000 - 2000) // 96:\n self.resize_img(128, 128)\n # if iteration == 200*30:\n # if iteration == 250 * multiplier:\n # self.resize_img(256,256)\n # print out the losses every 10%\n if iteration % (self.print_every) == 0:\n # print(iteration,self.sess.run((self.total_loss,self.real,self.other,self.loss1,self.loss2), feed_dict={self.modifier: self.real_modifier}))\n\n self.compute_loss(self.real_modifier)\n\n total_loss, real, other, loss1, loss2 = self.total_loss, self.real, self.other, self.loss1, self.loss2\n print(\"[STATS][L2] iter = {}, cost = {}, time = {:.3f}, size = {}, loss = {:.5g}, real = {:.5g}, other = {:.5g}, loss1 = {:.5g}, loss2 = {:.5g}\".format(\n iteration, eval_costs, train_timer, self.real_modifier.shape, total_loss[0], real[0], other[0], loss1[0], loss2[0]))\n sys.stdout.flush()\n # np.save('black_iter_{}'.format(iteration), self.real_modifier)\n\n attack_begin_time = time.time()\n # perform the attack\n if self.solver_name == \"fake_zero\":\n total_loss, l2, loss1, loss2, score, nimg = self.fake_blackbox_optimizer()\n else:\n total_loss, l2, loss1, loss2, score, nimg = self.blackbox_optimizer(\n iteration)\n\n if self.solver_name == \"fake_zero\":\n eval_costs += self.real_modifier.numel()\n else:\n eval_costs += self.batch_size\n\n # reset ADAM states when a valid example has been found\n if loss1 == 0.0 and last_loss1 != 0.0 and self.stage == 0:\n # we have reached the fine tunning point\n # reset ADAM to avoid overshoot\n if self.reset_adam_after_found:\n self.mt.fill_(0.0)\n self.vt.fill_(0.0)\n self.adam_epoch.fill_(1)\n self.stage = 1\n last_loss1 = loss1\n\n # check if we should abort search if we're getting nowhere.\n # if self.ABORT_EARLY and iteration%(self.MAX_ITERATIONS//10) == 0:\n if self.ABORT_EARLY and iteration % self.early_stop_iters == 0:\n if total_loss > prev_loss*.9999:\n print(\"Early stopping because there is no improvement\")\n break\n prev_loss = total_loss\n\n # adjust the best result found so far\n # the best attack should have the target class with the largest value,\n # and has smallest l2 distance\n\n if l2 < best_l2 and is_confidently_fooled(score, torch.argmax(label_1hot)):\n best_l2 = l2\n best_score = torch.argmax(score)\n if l2 < outer_best_l2 and is_confidently_fooled(score, torch.argmax(label_1hot)):\n # print a message if it is the first attack found\n if outer_best_l2 == 1e10:\n print(\"[STATS][L3](First valid attack found!) iter = {}, cost = {}, time = {:.3f}, size = {}, loss = {:.5g}, loss1 = {:.5g}, loss2 = {:.5g}, l2 = {:.5g}\".format(\n iteration, eval_costs, train_timer, self.real_modifier.shape, total_loss, loss1, loss2, l2))\n sys.stdout.flush()\n outer_best_l2 = l2\n outer_best_score = torch.argmax(score)\n outer_best_adv = nimg\n outer_best_c = c\n\n train_timer += time.time() - attack_begin_time\n\n # adjust the constant as needed\n\n if is_confidently_fooled(best_score, torch.argmax(label_1hot)) and best_score != -1:\n # success, divide const by two\n print('old c: ', c)\n c_upper_bound = min(c_upper_bound, c)\n if c_upper_bound < 1e9:\n c = (c_lower_bound + c_upper_bound)/2\n print('new c: ', c)\n else:\n # failure, either multiply by 10 if no solution found yet\n # or do binary search with the known upper bound\n print('old c: ', c)\n c_lower_bound = max(c_lower_bound, c)\n if c_upper_bound < 1e9:\n c = (c_lower_bound + c_upper_bound)/2\n else:\n c *= 10\n print('new c: ', c)\n\n if self.use_tanh:\n img = torch.tanh(img)/2\n\n # return the best solution found\n return outer_best_adv, outer_best_c", "def onehot(t, num_classes, device='cpu'):\n return torch.zeros(t.size()[0], num_classes).to(device).scatter_(1, t.view(-1, 1), 1)", "def one_hot(labels):\n one_hot_labels = np.zeros(labels.shape + (n_actions,))\n for c in range(n_actions):\n one_hot_labels[labels == c, c] = 1.0\n return one_hot_labels", "def _one_hot(self, proba, y):\n hm_1hot = []\n for label in np.unique(y):\n mask_ = y == label\n proba_tmp = proba.copy()\n proba_tmp[~mask_] = 0\n hm_y, auto_gamma = self._adjust_gamma(proba_tmp)\n\n if self.verbose:\n print('Adjusted gamma: ', str(auto_gamma))\n hm_1hot.append(hm_y)\n return hm_1hot", "def one_hot(x):\n cart_pos,cart_vel,pole_ang,pole_vel = x\n\n # Cart position\n discrete_cart_pos = int((cart_pos - CART_POSITION_MIN)/(CART_POSITION_MAX-CART_POSITION_MIN)*4)\n\n # Pole angle\n discrete_pol_ang = int((cart_pos - POLE_ANGLE_MIN)/(POLE_ANGLE_MAX-POLE_ANGLE_MIN)*4)\n\n # Cart velocity\n cart_vel_discretisations = [-1,0,1]\n discrete_cart_vel= 3\n for i,v in enumerate(cart_vel_discretisations):\n if cart_vel < v:\n discrete_cart_vel = i\n break\n\n # Pole tip velocity\n pole_vel_discretisations = [-1,0,1]\n discrete_pole_vel= 3\n for i,v in enumerate(pole_vel_discretisations):\n if pole_vel < v:\n discrete_pole_vel = i\n break\n\n # Convert to one-hot encoding\n x = discrete_cart_pos + discrete_cart_vel*4 + discrete_pol_ang*8 + discrete_cart_vel*12\n output = [0] * ONE_HOT_NUM_FEATURES\n output[x] = 1\n return np.array([output]).transpose()", "def onehot(t, num_classes):\n assert isinstance(t, torch.LongTensor)\n return torch.zeros(t.size()[0], num_classes).scatter_(1, t.view(-1, 1), 1)", "def one_hot(self, x, N):\n\n ### YOUR CODE HERE ###\n M = x.shape[0]\n one_hot = np.zeros((N, M))\n for i in range(M):\n one_hot[x[i], i] = 1\n \n# for xi,intWord in enumerate(x):\n# one_hot[xi,intWord] = 1\n \n\n assert one_hot.shape == (N, x.shape[0])\n return one_hot", "def make_one_hot(X, onehot_size):\n if onehot_size < 450:\n dig_one = np.zeros((onehot_size, onehot_size))\n np.fill_diagonal(dig_one, 1)\n rX = dig_one[np.asarray(X)]\n else:\n # for large onehot size, this is faster\n rX = np.zeros((len(X), onehot_size))\n for i in range(len(X)):\n rX[i, X[i]] = 1\n return rX", "def one_hot(x, num_classes, dtype=jnp.float32):\n return jax.nn.one_hot(x, num_classes).astype(dtype)", "def one_hot(y_):\n y_ = y_.reshape(len(y_))\n n_values = int(np.max(y_)) + 1\n n_values = 6\n return np.eye(n_values)[np.array(y_, dtype=np.int32)] # Returns FLOATS", "def onehot_argmax(logits):\n return T.extra_ops.to_one_hot(T.argmax(logits,-1),logits.shape[-1])", "def smooth_predict_hard(model, x, noise, sample_size=64, noise_batch_size=512):\n counts = None\n num_samples_left = sample_size\n\n while num_samples_left > 0:\n\n shape = torch.Size([x.shape[0], min(num_samples_left, noise_batch_size)]) + x.shape[1:]\n samples = x.unsqueeze(1).expand(shape)\n samples = samples.reshape(torch.Size([-1]) + samples.shape[2:])\n samples = noise.sample(samples.view(len(samples), -1)).view(samples.shape)\n logits = model.forward(samples).view(shape[:2] + torch.Size([-1]))\n top_cats = torch.argmax(logits, dim=2)\n if counts is None:\n counts = torch.zeros(x.shape[0], logits.shape[-1], dtype=torch.float, device=x.device)\n counts += F.one_hot(top_cats, logits.shape[-1]).float().sum(dim=1)\n num_samples_left -= noise_batch_size\n\n return Categorical(probs=counts)", "def _get_one_hot(targets, num_classes):\n ret = np.zeros((num_classes, targets.shape[0]))\n ret[targets, np.arange(targets.size)] = 1\n return ret", "def softmax_cross_entropy(logit, onehot, axis=-1):\n return SoftmaxCrossEntropy(axis).forward(logit, onehot)", "def smooth_loss(logits, labels, num_classes, eps):\n smoothed_one_hot = one_hot(labels.reshape(-1), num_classes)\n smoothed_one_hot = smoothed_one_hot * (1 - eps) + (1 - smoothed_one_hot) * eps / (num_classes - 1)\n log_prb = F.log_softmax(logits.reshape(-1, num_classes), dim=1)\n loss = -(smoothed_one_hot * log_prb).sum(dim=1)\n # print(\"loss:\", loss)\n loss = loss.mean()\n return loss", "def sample_attack(self, eps, num_samples = 100):\n #Repeat x num_sample times\n n, d = self.x.shape\n x_ext = tf.keras.backend.repeat(self.x, num_samples)\n big_shape = tf.shape(x_ext)\n x_ext = tf.reshape(x_ext, [-1, d])\n n, num_classes = self.y.shape\n y_ext = tf.keras.backend.repeat(self.y, num_samples)\n y_ext = tf.reshape(y_ext, [-1, num_classes])\n\n #Perturb x_ext\n x_pert = x_ext + tf.random.uniform(tf.shape(x_ext), minval = -eps, maxval = eps)\n\n #Get loss for x_pert\n activations, predictions = model(x_pert, self.hidden_sizes, self.num_classes, self.sigma)\n loss_vector_ext = tf.nn.softmax_cross_entropy_with_logits(logits=predictions, labels=y_ext)\n\n #Reshape into desired shapes\n loss_vector_ext = tf.reshape(loss_vector_ext, [-1, num_samples])\n x_three_dim = tf.reshape(x_ext, big_shape)\n\n #Perform argmax to get indices\n best_indices = tf.argmax(loss_vector_ext, axis = 1, output_type = tf.dtypes.int32)\n n = tf.shape(self.x)[0]\n row_idx = tf.range(n)\n extract_idx = tf.stack([row_idx, best_indices], axis = 1)\n\n #Return X_adv, loss_adv, acc_adv\n x_adv = tf.gather_nd(x_three_dim, extract_idx)\n\n #Sample a bunch of points around X\n return x_adv", "def gumbel_softmax(logits, temperature=1.0, hard=False):\n y = gumbel_softmax_sample(logits, temperature)\n if hard:\n y_hard = onehot_from_logits(y)\n y = (y_hard - y).detach() + y\n return y", "def one_hot(x, dtype=np.float32):\n return np.array(x[:, None] == np.arange(x.max()+1), dtype)", "def top1gating(\n logits: torch.Tensor,\n input_mask: Optional[torch.Tensor] = None,\n use_fp32=False,\n capacity_factor=1.0,\n eval_mode=False,\n moe_eval_capacity_token_fraction=EVAL_CAPACITY_TOKEN_FRACTION,\n use_xmoe=False,\n gate_obj=None,\n) -> Tuple[Tensor, Tensor, Tensor, Dict]:\n metadata = {}\n if use_fp32:\n orig_dtype = logits.dtype\n logits = logits.float()\n\n gates = F.softmax(logits, dim=1)\n metadata[\"entropy_gating\"] = entropy(probs=gates).mean().detach()\n\n # gates has shape of SE\n num_tokens = gates.shape[0]\n num_experts = gates.shape[1]\n if moe_eval_capacity_token_fraction > 0.0 and eval_mode:\n capacity = math.ceil(moe_eval_capacity_token_fraction * num_tokens)\n else:\n # capacity = capacity_factor * S/E\n capacity = int(capacity_factor * math.ceil(num_tokens / num_experts))\n\n # Create a mask for 1st's expert per token\n indices1_s = torch.argmax(gates, dim=1)\n mask1 = one_hot(indices1_s, num_classes=num_experts, unsqueeze_indices=True)\n if input_mask is not None and input_mask.any():\n nonpadding = ~input_mask\n mask1 = mask1 * nonpadding.unsqueeze(-1).to(mask1.dtype)\n\n # for logging (percent of tokens routed to each expert)\n expert1_hist = (\n 100\n * torch.histc(\n (indices1_s.squeeze() + 1), bins=num_experts, min=1, max=num_experts\n )\n / num_tokens\n )\n metadata[\"unused_expert1_count\"] = (expert1_hist == 0).sum()\n expert1_hist = (\n torch.sort(expert1_hist, dim=0, descending=True).values\n + torch.finfo(torch.float32).tiny\n )\n\n sample_count = max(math.ceil(num_experts * SAMPLE_FRACTION), 1)\n metadata[\"expert1_balance_top\"] = expert1_hist[:sample_count].sum()\n metadata[\"expert1_balance_bottom\"] = expert1_hist[-sample_count:].sum()\n\n gates1_s = (gates * mask1).sum(dim=1)\n\n # Compute locations in capacity buffer\n locations1 = fused_cumsum_sub_one(mask1)\n\n # Compute l_aux\n me = torch.mean(gates, dim=0)\n ce = torch.mean(mask1.to(gates.dtype), dim=0)\n\n l_aux = torch.mean(me * ce)\n l_aux = l_aux * num_experts * num_experts\n\n if has_tutel:\n locations1_s = torch.sum(locations1 * mask1, dim=1)\n return (\n l_aux,\n metadata,\n capacity,\n num_experts,\n [\n indices1_s,\n ],\n [\n locations1_s,\n ],\n [\n gates1_s,\n ],\n )\n\n # Remove locations outside capacity from mask\n mask1 = mask1 * torch.lt(locations1, capacity)\n # Store the capacity location for each token\n locations1_s = torch.sum(locations1 * mask1, dim=1)\n\n # Calculate combine_weights and dispatch_mask\n gates1 = gates1_s.unsqueeze(-1) * mask1.to(gates1_s.dtype) # einsum(\"s,se->se\")\n # locations1_sc = num_tokens * capacity\n locations1_sc = one_hot(locations1_s, num_classes=capacity, unsqueeze_indices=True)\n combine1_sec = torch.bmm(\n # einsum(\"se,sc->sec\")\n gates1.unsqueeze(-1),\n locations1_sc.to(gates1.dtype).unsqueeze(1),\n )\n dispatch_mask = combine1_sec.bool()\n if use_fp32:\n return l_aux, combine1_sec.to(orig_dtype), dispatch_mask, metadata\n else:\n return l_aux, combine1_sec, dispatch_mask, metadata", "def _gumbel_softmax_sample(self, logits, temperature):\r\n y = logits + self._sample_gumbel(tf.shape(logits))\r\n return tf.nn.softmax(y / temperature, axis=-1)", "def one_hot(self, y):\n\n one_hot = np.zeros((self.n_samples, self.n_classes))\n\n # using np.array to select elements of another np.array\n\n # first diemention index\n # np.arange(self.n_samples) : (1, n_samples)\n # row vectors (0,1,2.....n_samples-1) \n\n # second dimention index\n # \n\n\n one_hot[np.arange(self.n_samples), y.T] = 1\n return one_hot" ]
[ "0.6337137", "0.6137332", "0.6137332", "0.60971755", "0.6095149", "0.60834587", "0.6070503", "0.6048451", "0.6005317", "0.59944475", "0.59887624", "0.5985697", "0.5955396", "0.595437", "0.59288865", "0.5925413", "0.59172916", "0.591372", "0.59115016", "0.591121", "0.59053683", "0.58989", "0.58983546", "0.58878934", "0.58861166", "0.5883406", "0.58710045", "0.58693886", "0.5864558", "0.5857319" ]
0.6582483
0
Perform DDPG soft update (move target params toward source based on weight factor tau)
def soft_update(target, source, tau): for target_param, param in zip(target.parameters(), source.parameters()): target_param.data.copy_(target_param.data * (1.0 - tau) + param.data * tau)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def soft_update(source_net, target_net, tau):\n for target_param, param in zip(target_net.parameters(), source_net.parameters()):\n target_param.data.copy_(\n target_param.data * (1.0 - tau) + param.data * tau\n )", "def get_soft_target_model_updates(target, source, tau):\n target_weights = target.get_weights()\n tau_values = np.ones(np.shape(target_weights)) * tau\n new_weights = (1 - tau_values) * target.get_weights() + tau_values * source.get_weights()\n target.set_weights(new_weights)\n return target", "def _soft_update(self, active, target):\n\n for t_param, param in zip(target.parameters(), active.parameters()):\n t_param.data.copy_(self.tau*param.data + (1-self.tau)*t_param.data)", "def _soft_update_target_network(self):\n\n # Update the target network\n for target_param, param in zip(self.actor_target_network.parameters(), self.actor_network.parameters()):\n target_param.data.copy_((1-self.args.tau) * target_param.data + self.args.tau * param.data)\n\n # Update the critic network\n for target_param, param in zip(self.critic_target_network.parameters(), self.critic_network.parameters()):\n target_param.data.copy_((1-self.args.tau) * target_param.data + self.args.tau * param.data)", "def soft_update_target_network(self):\n \n pars_behavior = self.model.get_weights() # these have form [W1, b1, W2, b2, ..], Wi = weights of layer i\n pars_target = self.target_model.get_weights() # bi = biases in layer i\n \n for par_behavior,par_target in zip(pars_behavior,pars_target):\n par_target = par_target*(1-self.tau) + par_behavior*self.tau\n pars_target[ctr] = par_target\n\n self.target_model.set_weights(pars_target)", "def soft_update_target_network(self):\n \n pars_behavior = self.model.get_weights() # these have form [W1, b1, W2, b2, ..], Wi = weights of layer i\n pars_target = self.target_model.get_weights() # bi = biases in layer i\n \n ctr = 0\n for par_behavior,par_target in zip(pars_behavior,pars_target):\n par_target = par_target*(1-self.tau) + par_behavior*self.tau\n pars_target[ctr] = par_target\n ctr += 1\n\n self.target_model.set_weights(pars_target)", "def SoftUpdate(self, local, target, tau):\n for target_param, local_param in zip(target.parameters(), local.parameters()):\n target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)", "def update_target(self):\n with torch.no_grad():\n for target_q_param, q_param in zip(self.target_q_funcs.parameters(), self.q_funcs.parameters()):\n target_q_param.data.copy_(self.tau * q_param.data + (1.0 - self.tau) * target_q_param.data)\n for target_pi_param, pi_param in zip(self.target_policy.parameters(), self.policy.parameters()):\n target_pi_param.data.copy_(self.tau * pi_param.data + (1.0 - self.tau) * target_pi_param.data)", "def soft_update(self, local_model, target_model, tau):\n for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):\n target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)", "def soft_update_critic(self):\n local_weights = np.array(self.critic_local.model.get_weights())\n target_weights = np.array(self.critic_target.model.get_weights())\n\n assert len(local_weights) == len(\n target_weights), ('Local and target model parameters must have '\n 'the same size')\n\n new_weights = self.tau * local_weights + (1 - self.tau) * target_weights\n self.critic_target.model.set_weights(new_weights)", "def soft_update(self, other, tau):\n new_weights = {}\n\n own_weights = self.get_weight_copies()\n other_weights = other.get_weight_copies()\n\n for k in own_weights:\n #print(own_weights[k].shape, other_weights[k].shape)\n new_weights[k] = (1 - tau) * own_weights[k] + tau * other_weights[k]\n self.set_weights(new_weights)", "def soft_update_actor(self):\n local_weights = np.array(self.actor_local.model.get_weights())\n target_weights = np.array(self.actor_target.model.get_weights())\n\n assert len(local_weights) == len(\n target_weights), ('Local and target model parameters must have '\n 'the same size')\n\n new_weights = self.tau * local_weights + (1 - self.tau) * target_weights\n self.actor_target.model.set_weights(new_weights)", "def update_generate_params(self,inps,trgs,preds):\n batch_size = np.shape(trgs)[0]\n\n self.delta_weight_h_to_v = self.learning_rate / batch_size * np.transpose(trgs) @ (inps - preds)\n self.delta_bias_v = self.learning_rate * np.mean(inps - preds)\n \n self.weight_h_to_v += self.delta_weight_h_to_v\n self.bias_v += self.delta_bias_v \n \n return", "def soft_update(self, local_model, target_model, tau):\n local_weights = np.array(local_model.get_weights())\n target_weights = np.array(target_model.get_weights())\n\n assert len(local_weights) == len(\n target_weights), ('Local and target model parameters must have '\n 'the same size')\n\n new_weights = tau * local_weights + (1 - tau) * target_weights\n target_model.set_weights(new_weights)", "def update(self, state, action, nextState, reward):\n \"\"\"Description:\n Use second equation in slide 71 of MDP\n Adjest weight of active features depend on tranistion \n \"\"\"\n \"\"\" YOUR CODE HERE \"\"\"\n feat = self.featExtractor.getFeatures(state, action)\n\n # if weight is empty, then weight will need to initial to 1 for all features\n # According to which Extractor user choose, weight counter will have equal number of keys.\n if len(self.weight) == 0:\n feat = self.featExtractor.getFeatures(state, action)\n self.weight.incrementAll(feat.keys(), 1)\n \n maxQns = self.getValue(nextState)\n if maxQns == None:\n maxQns = 0\n Qsa = self.getQValue(state, action)\n difference = ( reward + self.discountRate * maxQns ) - Qsa\n \n for key in self.weight.keys():\n self.weight[key] += (self.alpha * difference * feat[key])\n \n \n \"\"\" END CODE \"\"\"", "def update_target_network(self, tau):\n for t, e in zip(\n self.target_network.trainable_variables, self.online_network.trainable_variables\n ):\n t.assign(t * (1-tau) + e * tau)", "def update_target_network(self, tau):\n for p_target, p_local in zip(self.q_network_target.parameters(), self.q_network_local.parameters()):\n p_target.data.copy_(tau * p_local.data + (1.0-tau) * p_target.data)", "def update_policy(self, minibatch_size):\n \n steps = self.rewards.shape[0]\n batch_size = self.rewards.shape[0] * self.rewards.shape[1]\n #steps = 500\n #batch_size = 500\n #print(steps)\n #print(batch_size)\n \n # Compute advantages\n '''\n with torch.no_grad():\n if self.gae:\n advantages = torch.zeros_like(self.rewards).to(self.training_device)\n lastgaelam = 0\n for t in reversed(range(steps)):\n if t == steps - 1:\n nextnonterminal = 1.0 - self.dones[t]\n nextvalues = self.state_values[t]\n else:\n nextnonterminal = 1.0 - self.dones[t + 1]\n nextvalues = self.state_values[t + 1]\n delta = self.rewards[t] + self.gamma * nextvalues * nextnonterminal - self.state_values[t]\n advantages[t] = lastgaelam = delta + self.gamma * self.gae_lambda * nextnonterminal * lastgaelam\n returns = advantages + self.state_values\n else:\n returns = torch.zeros_like(self.rewards).to(self.training_device)\n for t in reversed(range(steps)):\n if t == steps - 1:\n nextnonterminal = 1.0 - self.dones[t]\n next_return = self.state_values[t]\n else:\n nextnonterminal = 1.0 - self.dones[t+1]\n next_return = returns[t+1]\n returns[t] = self.rewards[t] + self.gamma * nextnonterminal * next_return\n advantages = returns - self.state_values\n ''' \n returns = torch.zeros_like(self.rewards).to(self.training_device)\n for t in reversed(range(steps)):\n if t == steps - 1:\n nextnonterminal = 1.0 - self.dones[t]\n next_return = self.state_values[t]\n else:\n nextnonterminal = 1.0 - self.dones[t+1]\n next_return = returns[t+1]\n returns[t] = self.rewards[t] + self.gamma * nextnonterminal * next_return\n advantages = returns - self.state_values\n \n\n # flatten the batch\n #b_obs = self.states.reshape((-1,) + self.state_space)\n #print(self.states.shape)\n b_obs = self.states.reshape((-1,4)).detach()\n b_logprobs = self.action_probs.reshape(-1,1).detach()\n b_actions = self.actions.reshape((-1,)).detach()\n b_advantages = advantages.reshape(-1,1)\n b_returns = returns.reshape(-1,1)\n b_values = self.state_values.reshape(-1,1)\n \n # Optimize policy and value network for K epochs, run optimization in minibatches\n \n inds = np.arange(batch_size)\n for i_epoch_pi in range(self.epochs):\n np.random.shuffle(inds)\n for start in range(0, batch_size, minibatch_size):\n end = start + minibatch_size\n minibatch_ind = inds[start:end]\n mb_advantages = b_advantages[minibatch_ind]\n if self.norm_adv:\n mb_advantages = (mb_advantages - mb_advantages.mean()) / (mb_advantages.std() + 1e-8)\n \n #_, newlogproba, entropy = self.get_action(b_obs[minibatch_ind], b_actions[minibatch_ind])\n newlogproba, entropy = self.evaluate(b_obs[minibatch_ind], b_actions[minibatch_ind])\n #ratio = (newlogproba - b_logprobs[minibatch_ind]).exp()\n ratio = torch.exp((newlogproba - b_logprobs[minibatch_ind].detach()))\n \n # Stats\n approx_kl = (b_logprobs[minibatch_ind] - newlogproba).mean()\n\n # Policy loss\n pg_loss1 = -mb_advantages * ratio\n pg_loss2 = -mb_advantages * torch.clamp(ratio, 1 - self.clip_epsilon, 1 + self.clip_epsilon)\n pg_loss = torch.max(pg_loss1, pg_loss2).mean()\n entropy_loss = entropy.mean()\n\n # Value loss\n _, new_values = self.policy.forward(b_obs[minibatch_ind])\n if self.clip_vloss:\n \n v_loss_unclipped = self.MseLoss(new_values,b_returns[minibatch_ind])\n #v_loss_unclipped = ((new_values - b_returns[minibatch_ind]) ** 2)\n v_clipped = b_values[minibatch_ind] + torch.clamp(new_values - b_values[minibatch_ind],\n -self.clip_epsilon, self.clip_epsilon)\n #v_loss_clipped = (v_clipped - b_returns[minibatch_ind]) ** 2\n v_loss_clipped = self.MseLoss(v_clipped,b_returns[minibatch_ind])\n v_loss_max = torch.max(v_loss_unclipped, v_loss_clipped)\n #v_loss = 0.5 * v_loss_max.mean()\n v_loss = 0.5 * v_loss_max\n else:\n #v_loss = 0.5 * ((new_values - b_returns[minibatch_ind]) ** 2).mean()\n v_loss = self.MseLoss(new_values,b_returns[minibatch_ind])\n\n loss = pg_loss + v_loss * self.vf_coeff - self.ent_coeff * entropy_loss\n\n self.optimizer.zero_grad()\n loss.backward()\n torch.nn.utils.clip_grad_norm_(self.policy.parameters(), self.max_grad_norm)\n self.optimizer.step()\n # Copy new weights into old policy:\n self.old_policy.load_state_dict(self.policy.state_dict())", "def hard_update_target_network(self,step):\n \n if step % self.C == 0:\n pars = self.model.get_weights()\n self.target_model.set_weights(pars)", "def hard_update_target_network(self,step):\n \n if step % self.C == 0:\n pars = self.model.get_weights()\n self.target_model.set_weights(pars)", "def update_network(self, tr_d, lr, relz=\"\", lmbda=0.0, mu=0.0):\n trlen = float(len(tr_d))\n delta_b = [np.zeros(b.shape) for b in self.biases]\n delta_w = [np.zeros(w.shape) for w in self.weights]\n for x,y in tr_d:\n delta_b_single, delta_w_single = self.backppg_ce(x,y)\n delta_b = [db+dbs for db,dbs in zip(delta_b, delta_b_single)]\n delta_w = [dw+dws for dw,dws in zip(delta_w, delta_w_single)]\n #update the parameters in network\n if(relz==\"\"):\n mu=0.0\n elif(relz[0:2] == \"MO\"):\n relz = relz[2:]\n self.velw = [mu*vw-(lr/trlen)*dw for vw,dw in zip(self.velw, delta_w)]\n self.velb = [mu*vb-(lr/trlen)*db for vb,db in zip(self.velb, delta_b)]\n self.biases = [b + vb for b,vb in zip(self.biases, self.velb)]\n if(relz == \"L2\"):\n self.weights = [w + vw - (lr/trlen/100)*lmbda*w for w,vw in zip(self.weights, self.velw)]\n elif(relz == \"L1\"):\n self.weights = [w + vw - (lr/trlen/100)*lmbda*np.sign(w) for w,vw in zip(self.weights, self.velw)]\n else:\n self.weights = [w + vw for w,vw in zip(self.weights, self.velw)]", "def add_grad_updates(self):\n \n gradients = T.grad(self.cost, self.theta)\n \n for target_param, grad in zip(self.theta, gradients):\n \n if target_param.name ==\"W\" and self.num_hidden ==0\\\n and self.zero_diag:\n \n grad = grad - T.diag(T.diag(grad)) # no x i - xi connections\n # for all i = 1, ..., D\n ##############################################################\n if target_param.name ==\"b\" and self.learn_biases == False:\n print(\"Will not learn bias terms\")\n pass\n \n elif target_param.name ==\"bhid\" and self.learn_biases == False:\n print(\"Will not learn bias terms\")\n pass\n \n else:\n \n if self.use_momentum:\n \n # alternative definition (mostly seen):\n #g_tilda = self.momentum*self.grad_vec[target_param.name] - \\\n #T.cast(self.learning_rate, dtype = theano.config.floatX)*grad\n #self.updates[target_param] = target_param + g_tilda\n \n g_tilda = self.momentum*self.grad_vec[target_param.name] - \\\n (1-self.momentum)*grad\n \n self.updates[target_param] = target_param +\\\n T.cast(self.learning_rate, dtype = theano.config.floatX)*g_tilda\n \n # store g_tilda for next iteration:\n self.updates[self.grad_vec[target_param.name]] = g_tilda\n \n else:\n \n self.updates[target_param] = target_param -\\\n T.cast(self.learning_rate, dtype = theano.config.floatX)*grad\n \n if (\"PCD\" in self.algorithm) and self.num_hidden > 0:\n \n self.updates[self.persistent_gibbs] = self.hid_samples", "def update_target(self, target, pred, update_rate):\n for target_param, pred_param in zip(target.parameters(), pred.parameters()):\n target_param.data.copy_((1.0 - update_rate)\n * target_param.data + update_rate * pred_param.data)", "def update_parameters_with_gd(parameters, grads, learning_rate):\n\n L = len(parameters) // 2 # number of layers in the neural networks\n\n # Update rule for each parameter\n for l in range(L):\n ### START CODE HERE ### (approx. 2 lines)\n parameters[\"W\" + str(l+1)] = parameters[\"W\" + str(l+1)]-learning_rate* grads[\"dW\" + str(l+1)]\n parameters[\"b\" + str(l+1)] = parameters[\"b\" + str(l+1)]-learning_rate* grads[\"db\" + str(l+1)]\n ### END CODE HERE ###\n \n return parameters", "def soft_update(self, local_model, target_model):\n local_weights = np.array(local_model.get_weights())\n target_weights = np.array(target_model.get_weights())\n\n assert len(local_weights) == len(target_weights), \"Local and target model parameters must have the same size.\"\n\n new_weights = self.tau * local_weights + (1 - self.tau) * target_weights\n target_model.set_weights(new_weights)", "def update_policy(self):\n self.optimizer.step()\n self.optimizer.zero_grad()", "def get_hard_target_model_updates(target, source):\n target.set_weights(source.get_weights())\n\n return target", "def densitychange(self,dt=0.1):\n #Using conservation of mass and diffusion\n dp_dt = -div(self.u*self.d)\n dp_dt += ndimage.laplace(self.d)\n #This term seems to make the density clump together, producing \n #waves which can make the simulation blow up.\n #dp_dt -= np.add.reduce(self.u*np.array(np.gradient(self.d)))\n #Edge density shouldn't change.\n dp_dt[[0,-1]] = dp_dt[:,[0,-1]] = 0\n self.d += dp_dt*dt\n #Change pressure accordingly to ideal gas law\n #AAAAAAAAAAAAAAAA this fixed most of the poblems from before!!!\n self.P = self.d*8.214*273\n #Conserve mass by spreading out fluctuations \n self.d[1:-1,1:-1] += (self.mass-np.sum(self.d))/self.vol", "def update(self, state, action, nextState, reward):\n \"*** YOUR CODE HERE ***\"\n features = self.featExtractor.getFeatures(state,action)\n\n learning_rate = self.alpha #gives us the learning rate\n\n temporary_QValue = self.getQValue(state,action) #to get the Q value of the state,action pair\n\n nextState_QValue = self.getValue(nextState) #to get the Q value of the landing state when taken action a and state s\n\n discount_factor = self.discount #to get the gamma/ discount factor\n\n weight = self.weights\n\n Q_Value = 0\n\n difference = (reward + discount_factor * nextState_QValue ) - (temporary_QValue) #refer to README_Reinforcement.txt for the formula\n\n for each_feature in features:\n\n #refer to README_Reinforcement.txt for the formula at line 20\n weight[each_feature] = weight[each_feature] + learning_rate * difference * features[each_feature]\n\n #util.raiseNotDefined()", "def _update_parameter(self, dWxh, dbh, dWhy, dby):\n # Add code to update all the weights and biases here" ]
[ "0.7275282", "0.69240314", "0.6855993", "0.67452204", "0.67254907", "0.67084914", "0.66630906", "0.6627795", "0.6586101", "0.6532605", "0.65074867", "0.64917654", "0.63443524", "0.63400596", "0.6314455", "0.6290518", "0.62819207", "0.6264888", "0.6256692", "0.6256692", "0.62240535", "0.6141723", "0.6120249", "0.6083234", "0.60305226", "0.6026632", "0.60011756", "0.59996504", "0.5991698", "0.5990019" ]
0.7679478
0
Check if n_samples samples can be sampled from the buffer.
def can_sample(self, n_samples): return len(self) >= n_samples
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def can_sample(self, n_samples):\n return self.replay_buffer.can_sample(n_samples)", "def can_sample(self, batch_size):\n return batch_size + 1 <= self.num_in_buffer", "def has_nsamples(results, n):\n n_rates = results.growth_rates.sample_id.nunique()\n n_exchanges = results.exchanges.sample_id.nunique()\n return n_rates == n and n_exchanges == n", "def has_more_samples(self):\n return True", "def has_more_samples(self):\n return True", "def has_more_samples(self):\n return True", "def should_sample(self):\n sampling_probability = 1.0 / self._trans_sample_rate\n log.debug(\"should_sample (sampling prob: %f)\", sampling_probability)\n\n return random() <= sampling_probability", "def _test_sampsize(t):\n return t.shape[1] != len(t.ids(axis='sample'))", "def is_sampled(z):\n return True", "def sample(self, n_samples, sample_seed=None):\n return NotImplemented", "def test_sample_from_extra_bounds_good(self):\n dim = Real(\"yolo\", \"norm\", 0, 2, low=-5, high=+5, shape=(4, 4))\n for _ in range(8):\n samples = dim.sample(8)\n for sample in samples:\n assert sample in dim", "def is_sample(self):\n return self.flags & NODE_IS_SAMPLE", "def _checkSample(self):\n Sampler._checkSample(self)\n # make sure the prefix is registered for tracking\n # but if there's no identifying information, skip this check\n if self._registeredIdentifiers:\n prefix = self.inputInfo['prefix']\n if not prefix in self._prefixToIdentifiers:\n self.raiseAnError(RuntimeError, f'Prefix \"{prefix}\" has not been tracked in adaptive sampling!')", "def _check_run_sample_masks(n_runs, sample_masks):\n if not isinstance(sample_masks, (list, tuple, np.ndarray)):\n raise TypeError(\n f\"sample_mask has an unhandled type: {sample_masks.__class__}\"\n )\n\n if isinstance(sample_masks, np.ndarray):\n sample_masks = (sample_masks,)\n\n checked_sample_masks = [_convert_bool2index(sm) for sm in sample_masks]\n\n if len(checked_sample_masks) != n_runs:\n raise ValueError(\n f\"Number of sample_mask ({len(checked_sample_masks)}) not \"\n f\"matching number of runs ({n_runs}).\"\n )\n return checked_sample_masks", "def should_sample(self, span_context):\n if span_context.trace_options.get_enabled():\n return True\n\n lower_long = get_lower_long_from_trace_id(span_context.trace_id)\n bound = self.rate * 0xffffffffffffffff\n return lower_long <= bound", "def test_sample_outputs_range(self):\n context = {'class_label': tf.zeros((_BATCH_SIZE,), dtype=tf.int32)}\n sample_dict = self.model.sample(\n _BATCH_SIZE, max_sample_length=_MAX_SAMPLE_LENGTH_VERTS,\n context=context)\n with self.session() as sess:\n sess.run(tf.global_variables_initializer())\n sample_dict_np = sess.run(sample_dict)\n in_range = np.logical_and(\n 0 <= sample_dict_np['vertices'],\n sample_dict_np['vertices'] <= 2**_QUANTIZATION_BITS).all()\n self.assertTrue(in_range)", "def _number_of_samples(self):\n return len(self._raw_data.samples)", "def sample(self, n):\n raise NotImplementedError", "def sample(self, span):\n # type: (Span) -> bool\n if self.sample_rate == 1:\n return True\n elif self.sample_rate == 0:\n return False\n\n return ((span._trace_id_64bits * KNUTH_FACTOR) % _MAX_UINT_64BITS) <= self._sampling_id_threshold", "def accept_sample(self, proposal: np.array) -> bool:\n ratio = self.objective.p(proposal) / self.objective.p(self.theta)\n if np.random.uniform() < ratio:\n return True\n return False", "def verify_psample_stats(dut, params):\n output = psample_stats(dut, params.keys())\n if not output:\n st.log(\"Observed empty output\")\n return False\n entries = filter_and_select(output, None, params)\n if not entries:\n st.log(\"PSAMPLE STATS VERIFICATION FAILED\")\n return False\n return True", "def should_sample(self, span_context):\n raise NotImplementedError", "def sample(self, n=1):\n raise NotImplementedError", "def _is_padding_necessary(self, signal: np.array) -> bool:\n if len(signal) < self.number_expected_samples:\n return True\n else:\n return False", "def is_sampler(self, sampler):\n self.assertIsInstance(sampler, types.GeneratorType)\n for _ in range(10):\n sample = next(sampler)\n self.assertIsInstance(sample, np.ndarray)\n self.assertEqual(\n sample.shape,\n (\n self._dim,\n 1,\n ),\n )", "def _sample_n_unique(n, lo, hi, exclude=None):\n batch = np.empty(n, dtype=np.uint32)\n k = 0\n while k < n:\n samples = np.random.randint(low=lo, high=hi, size=n - k)\n samples = np.unique(samples) # Get only the unique entries\n # Get only the entries which are not in exclude\n if exclude is not None:\n valid = np.all(samples[:, None] != exclude, axis=-1)\n # print(\"***\", (samples[:, None] != exclude).shape, valid) # (32, 5)\n samples = samples[valid] # (None,) contains True or False\n # print(\"samples:\", samples)\n # Update batch\n end = min(k + samples.shape[0], n)\n batch[k:end] = samples\n k = end\n return batch", "def _copy_buffer_samples(self, buffer_info, nSamples, arr, copy_all_samples=False, last_read=False):\n\t\tcAvailable = c_int()\n\t\tcLost = c_int()\n\t\tcCorrupted = c_int()\n\t\tcSamples = buffer_info[0]\n\n\t\t# get DigitalIn status because we want to read from buffer\n\t\tself._get_DigitalIn_status(read_data=True)\n\n\t\t# record info about the data collection process (filling of the buffer)\n\t\tdwf.FDwfDigitalInStatusRecord(self.interface_handler, byref(cAvailable), byref(cLost), byref(cCorrupted))\n\n\t\tif copy_all_samples:\n\t\t\tdwf.FDwfDigitalInStatusData(self.interface_handler, byref(arr), c_int(2*4096))\n\t\t\treturn [0, 0, 0]\n\n\t\tcSamples += cLost.value\n\t\tif cSamples + cAvailable.value > nSamples:\n\t\t\tcAvailable = c_int(nSamples - cSamples)\n\n\t\t# copy samples to arr on computer\n\t\tdwf.FDwfDigitalInStatusData(self.interface_handler, byref(arr, 2*cSamples), c_int(2*cAvailable.value))\n\n\t\tcSamples += cAvailable.value\n\n\t\tbuffer_info = [cSamples, buffer_info[1] + cLost.value, buffer_info[2] + cCorrupted.value]\n\t\treturn buffer_info", "def generate_samples(self, n_samples):", "def generate_samples(self, n_samples):", "def test_sample(self):\n dist = self.many_samples([0, 0, 0, 1])\n self.assertEquals(3, dist.argMax())\n\n dist = self.many_samples([1, 0, 0, 0, 0])\n self.assertEquals(0, dist.argMax())\n\n dist = self.many_samples([0.5, 0, 0, 0.25, 0.25])\n self.assertAlmostEquals(dist[0], 0.5, delta=0.01)\n self.assertAlmostEquals(dist[3], 0.25, delta=0.01)\n self.assertAlmostEquals(dist[4], 0.25, delta=0.01)\n self.assertEquals(dist[1], 0)\n self.assertEquals(dist[2], 0)\n\n with self.assertRaises(AssertionError):\n diffp.sample([0.5, 0.5, 0.01])" ]
[ "0.8454722", "0.75795233", "0.640667", "0.6323213", "0.6323213", "0.6323213", "0.6122864", "0.6073661", "0.6004948", "0.5970521", "0.5948469", "0.5939403", "0.58542854", "0.5847299", "0.5821416", "0.57972753", "0.578739", "0.5759048", "0.5714147", "0.5691538", "0.5649599", "0.56185585", "0.5617566", "0.55663115", "0.5554738", "0.5549458", "0.553235", "0.55171484", "0.55171484", "0.55149996" ]
0.8536972
0
Check whether the replay buffer is full or not.
def is_full(self): return len(self) == self.buffer_size
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bufferIsFull(self):\n return len(self.buffer) == self.bufferSize", "def isFull(self):\n return self.__size == len(self.__buffer)", "def full(self):\n return len(self.future_buffer) == self.CAPACITY", "def isFull(self):\r\n if (len(self.queue) == self.maxlen):\r\n return True\r\n else:\r\n return False", "def _buffer_capacity_reached(self) -> bool:\n return self.size() >= self.buffer_capacity", "def are_buffers_empty(self): \n i = 0\n for i in range(self.no_robots):\n if self.is_buffer_empty_for_robot(i) is True:\n i += 1\n else:\n return False\n if i >= self.no_robots:\n return True\n else:\n pass", "def isBufferEmpty(self):\n return self.ecg_buffer.empty()", "def is_buffer_empty(self): \n if self.buffer.shape == (0, 5):\n return True\n else:\n return False", "def isFull(self):\n return len(self.queue) == self.size", "def is_empty(self):\r\n return self.buff==[]", "def isFull(self):\n if len(self._data) == self._length:\n return True\n else:\n return False", "def isFull(self):\n if len(self.batch) == self.__batch_size:\n return True\n return False", "def has_msg(self):\n return self.bufsize >= 4 and self.bufsize - 4 >= struct.unpack('!I', str(self.buf.peek(0, 4)))[0]", "def isFull(self):\n return self.rear == self.size", "def check_if_full(self):\n pass", "def is_full(self):\n return len(self._data) == 1", "def isFull(self) -> bool:\n return self.size == self.maxlen", "def is_full(self):\n return len(self.cache_data) >= self.MAX_ITEMS", "def is_full(self):\n return self.remaining_space_in_hold() == 0", "def isFull(self) -> bool:\n return self.count == self.capacity", "def isFull(self) -> bool:\n return self.count == self.capacity", "def isFull(self):\n return (self.end + 1) % self.max_length == self.start", "def more(self):\n # return True if there are still frames in the queue. If stream is not stopped, try to wait a moment\n tries = 0\n while self.Q.qsize() == 0 and not self.stopped and tries < 5:\n time.sleep(0.1)\n tries += 1\n\n return self.Q.qsize() > 0", "def isFull(self):\n return self.count == self.capacity", "def isFull(self):\n return self.count == self.capacity", "def full(self):\n return self._current_size == self._size", "def is_full(self):\n\n return self.count == len(self.array)", "def is_buffer_empty_for_robot(self, robot_id): \n if isinstance(self.all_buffers[robot_id], float):\n return True\n elif isinstance(self.all_buffers[robot_id], np.ndarray):\n if self.all_buffers[robot_id].size == 0:\n return True\n else:\n return False\n else:\n return False", "def isFull(self):\n\t\treturn self.size == self.capacity", "def is_full(self) -> bool:\r\n return self.size == self.capacity" ]
[ "0.77881503", "0.74171346", "0.7156626", "0.7121728", "0.7075783", "0.7043759", "0.69657725", "0.69448876", "0.69159514", "0.69134", "0.6893223", "0.680426", "0.6794952", "0.6730122", "0.6697695", "0.66528744", "0.6636179", "0.66332513", "0.6545874", "0.6479786", "0.6479786", "0.64549893", "0.64424163", "0.64170545", "0.64170545", "0.6392958", "0.6386083", "0.63844377", "0.637224", "0.636922" ]
0.7575566
1
Method to get the main event in a list of event position in a sentence. When there is a tie, the first one is chosen. Actually 'cause event only account for one token, we could use leaf_treeposition to get the distance from root to that leaf.
def get_highest_event (self, list_of_event_pos): highest = None highest_distance = 100 part_of_speech_list = self.tree.pos() for i in xrange(len(list_of_event_pos)): event_pos = list_of_event_pos[i] try: distance = len(self.tree.leaf_treeposition(event_pos)) if distance < highest_distance: highest_distance = distance highest = i elif distance == highest_distance: try: highest_POS = part_of_speech_list[list_of_event_pos[highest]][1] current_POS = part_of_speech_list[list_of_event_pos[i]][1] """ If the current event is actually a verb, it should replace the highest event with the same high """ if highest_POS[0] != 'V' and current_POS[0] == 'V': highest_distance = distance highest = i except Exception: logger.warn("Problem in comparing part of speech of two \ highest event candidate") except IndexError as ie: logger.warn("Index error") logger.info('Event pos %d' %event_pos) logger.info('Tree length %d' %len(self.tree.leaves())) logger.info(str(self.tree)) return highest
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getEventLocation():\n global currentToken\n global currentChunk\n global currentSentence\n if currentSentence is not None:\n if currentToken is not None and currentToken.isAdjToken(): #if not currentChunk\n position = currentToken.position\n logger.debug(\"Event position obtained from AdkToken: \"+str(position))\n else:\n position = currentChunk.position\n logger.debug(\"Event position obtained from Chunk: \"+str(position))\n return position\n else:\n debug.error(\"No position for current Event\")", "def _find_position(self, e):\n walk = self._data.first()\n while walk is not None and walk.element()._value != e:\n walk = self._data.after(walk)\n \n return walk", "def locate_nearest_event(self):\n nearest_event_date = ''\n min = 1000000\n today = self.get_today()\n event_array = self.events.keys()\n for event_date in event_array:\n event_date = self.date_to_operate_format(event_date)\n if int(event_date) - int(today) > 0:\n if int(event_date) - int(today) < min:\n min = int(event_date) - int(today)\n nearest_event_date = event_date\n\n nearest_event = '0'\n if len(event_array) > 0:\n nearest_event = self.change_format_to_database_index(nearest_event_date)\n\n return nearest_event", "def find_event_and_match(text: str) -> typing.Tuple[typing.Optional['string'], typing.Optional[int]]:\n for event in EVENT_IDS:\n if event in text.lower():\n numbers = re.findall(r'\\d+', text)\n match_number = 0\n \n if len(numbers) == 1:\n match_number = numbers[0]\n return (EVENT_IDS[event], int(match_number))\n if len(numbers) == 2:\n match_number = numbers[0] * 10 + numbers[1]\n return (EVENT_IDS[event], int(match_number))\n else:\n return (EVENT_IDS[event], None)\n return (None, None)", "def treepos(self, tree):\n if tree is None:\n raise ValueError(\"Parse tree not available\")\n stack = [tree]\n treepos = []\n\n wordnum = 0\n while True:\n # tree node:\n if isinstance(stack[-1], Tree):\n # Select the next child.\n if len(treepos) < len(stack):\n treepos.append(0)\n else:\n treepos[-1] += 1\n # Update the stack.\n if treepos[-1] < len(stack[-1]):\n stack.append(stack[-1][treepos[-1]])\n else:\n # End of node's child list: pop up a level.\n stack.pop()\n treepos.pop()\n # word node:\n else:\n if wordnum == self.wordnum:\n return tuple(treepos[: len(treepos) - self.height - 1])\n else:\n wordnum += 1\n stack.pop()", "def best_event_match(events, text, threshold_percentage):\n # Filter out short, stop words\n text = filter(lambda x: x not in ENGLISH_STOPWORDS, text.lower().split())\n\n score = 0\n best_event = None\n for event in events:\n # print event\n new_score, entity_score = event_compare(event, text)\n if entity_score == 0:\n continue\n if new_score > score:\n # print new_score, score\n score = new_score\n best_event = event\n\n if not best_event:\n return None\n\n threshold = len(best_event.text.split())*threshold_percentage\n if score < threshold: \n # print \"Event score %s not above threshold %s\" % (score, threshold)\n return None\n\n # print \"For match %s, %s\" % (text, best_event.text)\n return best_event", "def max_event_offset(event_list):\n\n if isinstance(event_list, dcase_util.containers.MetaDataContainer):\n return event_list.max_offset\n\n else:\n max_offset = 0\n for event in event_list:\n if 'event_offset' in event:\n if event['event_offset'] > max_offset:\n max_offset = event['event_offset']\n\n elif 'offset' in event:\n if event['offset'] > max_offset:\n max_offset = event['offset']\n\n return max_offset", "def getIndexPoint(event=None, plane=None, epsilon=2):\n\n if event is None:\n return None\n if plane is None:\n return None\n if len(plane) == 0:\n return None\n\n xt = np.asarray([i[1] for i in plane])\n yt = np.asarray([i[0] for i in plane])\n d = np.sqrt((xt - event.xdata)**2 / 16 + (yt - event.ydata)**2)\n index = d.argsort()[:1][0]\n # position to far away\n if d[index] >= epsilon:\n return None\n index = int(index)\n return index", "def get_index_under_point(self, event):\r\n xy = np.asarray(list(zip(self.xs, self.ys)))\r\n xyt = self.line.get_transform().transform(xy)\r\n xt, yt = xyt[:, 0], xyt[:, 1]\r\n d = np.sqrt((xt - event.x) ** 2 + (yt - event.y) ** 2)\r\n pt_idx = np.argmin(d)\r\n if d[pt_idx] >= self.max_pixels_from_vertex:\r\n pt_idx = None\r\n return pt_idx", "def next_event(self):\n return self.events[self._current_event_ndx]", "def findpointcommand(bvid, event_list):\n for r in event_list:\n if r[0]==bvid:\n return r[1]\n return None", "def _get_past_tense_event(self):\n return f'{self.event_type}d' if self.event_type[-1] == 'e' else f'{self.event_type}ed'", "def get_event(self):\r\n return self.events[0]", "def searchForEvent(file):\n\n MatchRunEvent = re.compile(\"Run: [0-9]+ Event: [0-9]+$\")\n\n # I'm just grabbing the last twenty lines for the hell of it\n lines = tailNLinesFromFile(file, 20)\n\n lastMatch = None\n for line in lines:\n if MatchRunEvent.search(line.strip()):\n matches = MatchRunEvent.findall(line.strip())\n lastMatch = matches[-1]\n\n if lastMatch != None:\n # //\n # // Extract and update last run/event number\n # //\n try:\n runInfo, lastEvent = lastMatch.split(\"Event:\", 1)\n lastRun = int(runInfo.split(\"Run:\", 1)[1])\n lastEvent = int(lastEvent)\n return (lastRun, lastEvent)\n except Exception:\n return (None, None)\n\n return (None, None)", "def event_starting_point_extractor(row) -> int:\n to_return = None\n # First, define the variables that we will need for the rest of this\n # function.\n positions_list = literal_eval(row[\"positions\"])\n assert isinstance(positions_list, list)\n assert 1 <= len(positions_list) <= 2\n\n # Next, extract the starting and ending positions.\n raw_starting_x = positions_list[0].get(\"x\")\n raw_starting_y = positions_list[0].get(\"y\")\n\n starting_x = (raw_starting_x/100)*104\n starting_y = (raw_starting_y/100)*68\n\n # Finally, validate and return the result.\n to_return = [starting_x, starting_y]\n\n return to_return", "def sentences_between(e1, e2, news):\n if e1.ID == e2.ID:\n return -1\n\n return abs(news.get_sentence_by_entity(e1).index -\n news.get_sentence_by_entity(e2).index)", "def findontarget(starttime, event_list):\n for r in event_list:\n if r[0]==18 and r[1]>starttime: return r[1]\n return None", "def get_prev_word(self, index, orignal=False):\n try:\n\n word = self.df.iloc[index][1]\n if word[-1] == \".\":\n if orignal:\n return word[:-1]\n return self.label_encoder.transform([word[:-1]])[0]\n else:\n # NOT A PERIOD\n # I think it would be better to return a <NAP> token\n # This might also help in cleaning the data\n # If orignal is true return word as is...\n if orignal:\n return word\n return self.label_encoder.transform([\"<NAP>\"])[0]\n except ValueError:\n # Returning -1 for unseen words\n return -1\n except IndexError:\n if orignal:\n return \"<START>\"\n return self.label_encoder.transform([\"<START>\"])[0]", "def findguidingstart(starttime, event_list):\n for r in event_list:\n if r[0]==5 and r[1]>starttime: return r[1]\n return None", "def next_event(self) -> Tuple[Event, int]:\n def find_min(values: Sequence[Optional[float]]) -> \\\n Tuple[Optional[int], float]:\n \"\"\"Helper to get minimum timestamp and its index.\"\"\"\n min_value: float = np.inf\n min_index: Optional[int] = None\n for index_, value_ in enumerate(values):\n if value_ is not None and value_ < min_value:\n min_value = value_\n min_index = index_\n return min_index, min_value\n\n min_arrival_node, min_arrival_time = find_min(self._next_arrivals)\n min_service_node, min_service_time = find_min(self._service_ends)\n\n if min_arrival_node is None and min_service_node is None:\n return Event.STOP, 0\n\n if min_arrival_time >= min_service_time:\n self._time = min_service_time\n self._service_ends[min_service_node] = None\n return Event.SERVICE_END, min_service_node\n\n # Otherwise, arrival happened:\n self._time = min_arrival_time\n self._next_arrivals[min_arrival_node] = None\n return Event.ARRIVAL, min_arrival_node", "def get_ellipsis_location(tree, target_tag):\n\n index = \"\".join(re.findall(r\"\\d+\", target_tag))\n tag = re.sub(index, \"\", target_tag)\n counter = 0\n for node in tree.subtrees():\n if node.label().split(\"end\")[0] == tag:\n if counter == int(index):\n return node.treeposition()\n else:\n counter += 1", "def get_offset(self, index):\n if self.is_leaf():\n raise TerminalNodeException\n try:\n return self.offsets[index]\n except KeyError:\n return None", "def event(self):\n return self.events[0]", "def _get_closest_waypoint(self, pose):\n pos = pose.position\n x = pos.x\n y = pos.y\n closest_idx = self.waypoints_tree.query([x,y],1)[1]\n\n return closest_idx", "def get_input_pos(self, output_index):\r\n ipos = self.n_seqs\r\n opos = output_index\r\n for otaps, itaps in zip(self.mitmot_out_taps(), self.mitmot_taps()):\r\n if len(otaps) > opos:\r\n return ipos\r\n else:\r\n opos = opos - len(otaps)\r\n ipos += len(itaps)\r\n for dx, taps in enumerate(self.mitsot_taps()):\r\n if opos == 0:\r\n return ipos\r\n else:\r\n opos = opos - 1\r\n ipos += len(taps)\r\n if opos < self.info['n_sit_sot']:\r\n return ipos + opos\r\n else:\r\n return -1", "def event_ending_point_extractor(row) -> int:\n to_return = None\n # First, define the variables that we will need for the rest of this\n # function.\n positions_list = literal_eval(row[\"positions\"])\n assert isinstance(positions_list, list)\n assert 1 <= len(positions_list) <= 2\n\n # Next, extract the starting and ending positions.\n starting_x = positions_list[0].get(\"x\")\n starting_y = positions_list[0].get(\"y\")\n\n try:\n ending_x = positions_list[1].get(\"x\")\n raw_ending_y = positions_list[1].get(\"y\")\n except IndexError:\n # If the event is one where there is no ending point to list (i.e.,\n # a foul).\n ending_x, raw_ending_y = starting_x, starting_y\n\n ending_y = (raw_ending_y/100)*69\n\n # Finally, validate and return the result.\n to_return = [ending_x, ending_y]\n\n return to_return", "def treeposition2offsetPosition(subTrPosList, tr):\n\t\toffsetList = []\n\t\tcnt = 0\n\t\tfor pos in subTrPosList:\n\t\t\tpar = tr[pos]\n\t\t\twhile par != tr:\n\t\t\t\tfor i in xrange(par.parent_index()):\n\t\t\t\t\tif isinstance(par.parent()[i], nltk.ParentedTree):\n\t\t\t\t\t\tcnt += len(par.parent()[i].leaves())\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint >> debug_log, tr\n\t\t\t\tpar = par.parent()\n\n\t\t\tlabel = ''\n\t\t\tstart = False\n\t\t\tfor char in tr[pos].node:\n\t\t\t\tif not start:\n\t\t\t\t\tif char not in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ': \n\t\t\t\t\t\tcontinue\n\t\t\t\t\telse:\n\t\t\t\t\t\tstart = True\n\t\t\t\t\t\tlabel += char\n\t\t\t\telse:\n\t\t\t\t\tif char not in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ': \n\t\t\t\t\t\tbreak\n\t\t\t\t\telse:\n\t\t\t\t\t\tlabel += char\n\t\t\toffsetList.append((cnt, cnt+len(tr[pos].leaves()), label)) \n\t\t\tcnt = 0\n\t\treturn offsetList", "def _get_absolute(self, event):\n return Quartz.CGEventGetLocation(event)", "def _one_emotion(emotions_lst, emotion):\n for cur_el in emotions_lst:\n if cur_el[0] == emotion:\n return cur_el\n return None", "def _locate_events(self, start_time, end_time):\n\n # Define pre-pad as a function of the onset windows\n if self.pre_pad is None:\n self.pre_pad = max(self.p_onset_win[1],\n self.s_onset_win[1]) \\\n + 3 * max(self.p_onset_win[0],\n self.s_onset_win[0])\n\n # Adjust pre- and post-pad to take into account cosine taper\n t_length = self.pre_pad + 4*self.marginal_window + self.post_pad\n self.pre_pad += np.ceil(t_length * 0.06)\n self.post_pad += np.ceil(t_length * 0.06)\n\n trig_events = self.output.read_triggered_events(start_time, end_time)\n n_evts = len(trig_events)\n\n for i, trig_event in trig_events.iterrows():\n event_uid = trig_event[\"EventID\"]\n msg = \"=\" * 120 + \"\\n\"\n msg += \"\\tEVENT - {} of {} - {}\\n\"\n msg += \"=\" * 120 + \"\\n\\n\"\n msg += \"\\tDetermining event location...\\n\"\n msg = msg.format(i + 1, n_evts, event_uid)\n self.output.log(msg, self.log)\n\n w_beg = trig_event[\"CoaTime\"] - 2*self.marginal_window \\\n - self.pre_pad\n w_end = trig_event[\"CoaTime\"] + 2*self.marginal_window \\\n + self.post_pad\n\n timer = util.Stopwatch()\n self.output.log(\"\\tReading waveform data...\", self.log)\n try:\n self._read_event_waveform_data(trig_event, w_beg, w_end)\n except util.ArchiveEmptyException:\n msg = \"\\tNo files found in archive for this time period\"\n self.output.log(msg, self.log)\n continue\n except util.DataGapException:\n msg = \"\\tAll available data for this time period contains gaps\"\n msg += \"\\n\\tOR data not available at start/end of time period\\n\"\n self.output.log(msg, self.log)\n continue\n self.output.log(timer(), self.log)\n\n timer = util.Stopwatch()\n self.output.log(\"\\tComputing 4D coalescence grid...\", self.log)\n\n daten, max_coa, max_coa_norm, loc, map_4d = self._compute(\n w_beg, w_end,\n self.data.signal,\n self.data.availability)\n coord = self.lut.xyz2coord(np.array(loc).astype(int))\n event_coa_data = pd.DataFrame(np.array((daten, max_coa,\n coord[:, 0],\n coord[:, 1],\n coord[:, 2])).transpose(),\n columns=[\"DT\", \"COA\", \"X\", \"Y\", \"Z\"])\n event_coa_data[\"DT\"] = event_coa_data[\"DT\"].apply(UTCDateTime)\n event_coa_data_dtmax = \\\n event_coa_data[\"DT\"].iloc[event_coa_data[\"COA\"].astype(\"float\").idxmax()]\n w_beg_mw = event_coa_data_dtmax - self.marginal_window\n w_end_mw = event_coa_data_dtmax + self.marginal_window\n\n if (event_coa_data_dtmax >= trig_event[\"CoaTime\"]\n - self.marginal_window) \\\n and (event_coa_data_dtmax <= trig_event[\"CoaTime\"]\n + self.marginal_window):\n w_beg_mw = event_coa_data_dtmax - self.marginal_window\n w_end_mw = event_coa_data_dtmax + self.marginal_window\n else:\n msg = \"\\n\\tEvent {} is outside marginal window.\\n\"\n msg += \"\\tDefine more realistic error - the marginal window\"\n msg += \" should be an estimate of the origin time uncertainty,\"\n msg += \"\\n\\tdetermined by the expected spatial uncertainty and\"\n msg += \"the seismic velocity in the region of the earthquake\\n\"\n msg += \"\\n\" + \"=\" * 120 + \"\\n\"\n msg = msg.format(event_uid)\n self.output.log(msg, self.log)\n continue\n\n event_mw_data = event_coa_data\n event_mw_data = event_mw_data[(event_mw_data[\"DT\"] >= w_beg_mw) &\n (event_mw_data[\"DT\"] <= w_end_mw)]\n map_4d = map_4d[:, :, :,\n event_mw_data.index[0]:event_mw_data.index[-1]]\n event_mw_data = event_mw_data.reset_index(drop=True)\n event_max_coa = event_mw_data.iloc[event_mw_data[\"COA\"].astype(\"float\").idxmax()]\n\n # Update event UID; make out_str\n event_uid = str(event_max_coa.values[0])\n for char_ in [\"-\", \":\", \".\", \" \", \"Z\", \"T\"]:\n event_uid = event_uid.replace(char_, \"\")\n out_str = \"{}_{}\".format(self.output.name, event_uid)\n self.output.log(timer(), self.log)\n\n # Make phase picks\n timer = util.Stopwatch()\n self.output.log(\"\\tMaking phase picks...\", self.log)\n phase_picks = self._phase_picker(event_max_coa)\n self.output.write_picks(phase_picks[\"Pick\"], event_uid)\n self.output.log(timer(), self.log)\n\n # Determining earthquake location error\n timer = util.Stopwatch()\n self.output.log(\"\\tDetermining earthquake location and uncertainty...\", self.log)\n loc_spline, loc_gau, loc_gau_err, loc_cov, \\\n loc_cov_err = self._calculate_location(map_4d)\n self.output.log(timer(), self.log)\n\n # Make event dictionary with all final event location data\n event = pd.DataFrame([[event_max_coa.values[0],\n event_max_coa.values[1],\n loc_spline[0], loc_spline[1], loc_spline[2],\n loc_gau[0], loc_gau[1], loc_gau[2],\n loc_gau_err[0], loc_gau_err[1],\n loc_gau_err[2],\n loc_cov[0], loc_cov[1], loc_cov[2],\n loc_cov_err[0], loc_cov_err[1],\n loc_cov_err[2]]],\n columns=self.EVENT_FILE_COLS)\n\n self.output.write_event(event, event_uid)\n\n self._optional_locate_outputs(event_mw_data, event, out_str,\n phase_picks, event_uid, map_4d)\n\n self.output.log(\"=\" * 120 + \"\\n\", self.log)\n\n del map_4d, event_coa_data, event_mw_data, event_max_coa, \\\n phase_picks\n self.coa_map = None" ]
[ "0.64501244", "0.6028287", "0.58297163", "0.5478848", "0.54352885", "0.5413929", "0.540867", "0.5355675", "0.5355121", "0.5337441", "0.53225654", "0.5274684", "0.52629817", "0.5251305", "0.5098605", "0.5098509", "0.5095546", "0.5054653", "0.5035906", "0.50138444", "0.49922046", "0.495451", "0.49537787", "0.49460784", "0.489979", "0.48976985", "0.48820192", "0.487663", "0.48657745", "0.48611113" ]
0.6938581
0
Get the path in the syntactic tree between two extends. The particular purpose of the method in the task is to find the minimum tree that connects between two events, removing the POS and LEMMA of single token entity, removing internal structure of multiple token entity (consider the multiple token entity as one node in the tree) removing branches and leaves in between two entities
def get_pruned_tree_path (self, index_1_beg, index_1_end, index_2_beg, index_2_end, in_between_children = False ): tempo_2_beg = index_2_beg tempo_2_end = index_2_end if index_1_beg >= index_2_end: index_2_beg = index_1_beg index_2_end = index_1_end index_1_beg = tempo_2_beg index_1_end = tempo_2_end if index_1_end - index_1_beg > 1: lca_1_index = self.tree.treeposition_spanning_leaves( index_1_beg, index_1_end ) else: lca_1_index = self.tree.treeposition_spanning_leaves( index_1_beg, index_1_end )[:-1] if index_2_end - index_2_beg > 1: lca_2_index = self.tree.treeposition_spanning_leaves( index_2_beg, index_2_end ) else: lca_2_index = self.tree.treeposition_spanning_leaves( index_2_beg, index_2_end )[:-1] if index_2_end - index_1_beg > 1: lca_index = self.tree.treeposition_spanning_leaves( index_1_beg, index_2_end ) else: lca_index = self.tree.treeposition_spanning_leaves( index_1_beg, index_2_end )[:-1] lca = self.tree[lca_index] new_tree = ParentedTree('(' + lca.node + ')') #Point to the root # Branch of the first entity current_pointer = new_tree tempo_lca = lca # try: for i in xrange(len(lca_index), len(lca_1_index)): tempo_lca = tempo_lca[lca_1_index[i]] if not (type(tempo_lca) == str or type(tempo_lca) == unicode): current_pointer.insert(0, ParentedTree('('+tempo_lca.node +')')) current_pointer = current_pointer[0] current_pointer = new_tree #Insert the first level of children of lca if len(lca_index) < len(lca_1_index) and len(lca_index) < len(lca_2_index): if in_between_children: for i in xrange(lca_1_index[len(lca_index)] + 1, lca_2_index[len(lca_index)]): current_pointer.insert(i, ParentedTree('('+lca[i].node +')')) #Point to the root # Branch of the second entity current_pointer = new_tree tempo_lca = lca first_time = True for i in xrange(len(lca_index), len(lca_2_index)): tempo_lca = tempo_lca[lca_2_index[i]] if not (type(tempo_lca) == str or type(tempo_lca) == unicode): if first_time: if not in_between_children: children_index_of_2nd_branch = 1 else: """ Don't really need to check lca_2_index[len(lca_index)] 'cause if it come to this point, the length constraint is already satisfied However, it's necessary to check lca_1_index[len(lca_index)] """ if len(lca_index) < len(lca_1_index): children_index_of_2nd_branch = lca_2_index[len(lca_index)]\ - lca_1_index[len(lca_index)] else: """ No left child, no in_between_children """ children_index_of_2nd_branch = 0 current_pointer.insert(children_index_of_2nd_branch, ParentedTree('('+tempo_lca.node +')')) current_pointer = current_pointer[children_index_of_2nd_branch] first_time = False else: current_pointer.insert(0, ParentedTree('('+tempo_lca.node +')')) current_pointer = current_pointer[0] return new_tree
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def shortest_path(start, end):\n\troot1=node(start)\n\troot2=node(end)\n\tarr1=[[] for wqw in range(8)]\n\tarr2=[[] for qwq in range(8)]\n\tser1=[[] for lp in range(100000)]\n\tser2=[[] for lp in range(100000)]\n\tarr1[0].append(root1)\n\tarr2[0].append(root2)\n\tser1[(hash(start))%100000].append(start)\n\tser2[(hash(end))%100000].append(end)\n\tflag=0\n\tchk=None\n\tfor h in range(0,7):\n\t\tchk=check_match(arr1[h],arr2[h])\n\t\tif not chk is None:\n\t\t\tflag=1\n\t\t\tbreak\n\t\telse:\n\t\t\tfor w in range(len(arr1[h])):\n\t\t\t\tfin=insert_e(arr1[h][w].data,h+1,arr1,ser1)\n\t\t\t\tarr1=fin[0]\n\t\t\t\tser1=fin[1]\n\t\tchk=check_match(arr2[h],arr1[h+1])\n\t\tif not chk is None:\n\t\t\tflag=2\n\t\t\tbreak\n\t\telse:\n\t\t\tfor q in range(len(arr2[h])):\n\t\t\t\tfim=insert_e(arr2[h][w].data,h+1,arr2,ser2)\n\t\t\t\tarr2=fim[0]\n\t\t\t\tser2=fim[1]\n\tif flag==1:\n\t\twhile not chk[0].parent is None:\n\t\t\tres1.append(chk[0].parent.data)\n\t\t\tchk[0]=chk[0].parent\n\t\tres+=res1.reverse()\n\t\tres.append(chk[1])\n\t\twhile not chk[1].parent is None:\n\t\t\tres2.append(chk[1].parent.data)\n\t\t\tchk[1]=chk[1].parent\n\t\treturn res\n\telif flag==2:\n\t\twhile not chk[1].parent is None:\n\t\t\tres1.append(chk[1].parent.data)\n\t\t\tchk[1]=chk[1].parent\n\t\tif not res1 is None:\n\t\t\tres+=res1.reverse()\n\t\tres.append(chk[0])\n\t\twhile not chk[0].parent is None:\n\t\t\tres2.append(chk[0].parent.data)\n\t\t\tchk[0]=chk[0].parent\n\t\treturn res\n\telse:\n\t\tprint \"The given configuration is not solvable/wrong.\"", "def shortest_path(start, end):\n if start == end:\n return []\n \n start_frontier = Queue.Queue()\n start_parent = {}\n start_level = {}\n start_parent_move = {}\n start_frontier.put(start)\n start_level[start] = 0\n start_parent[start] = None\n start_parent_move[start] = None\n \n end_frontier = Queue.Queue()\n end_parent = {}\n end_level = {}\n end_parent_move = {}\n end_frontier.put(end)\n end_level[end] = 0\n end_parent[end] = None\n end_parent_move[end] = None\n \n intersectFound = False\n intersect = None\n level = 0\n while (True):\n level += 1\n# print (\"level = \" + str(level))\n if not start_frontier.empty():\n vertex = start_frontier.get()\n for move in rubik.quarter_twists:\n position = rubik.perm_apply(move,vertex)\n if position not in start_parent:\n# print (\"start permutation unvisited\")\n start_parent[position] = vertex\n start_level[position] = level\n start_parent_move[position] = move\n start_frontier.put(position)\n if position in end_parent:\n# print (\"position exists in end_parent\")\n intersect = position\n intersectFound = True\n break\n if intersectFound:\n break\n if not end_frontier.empty():\n vertex = end_frontier.get()\n for move in rubik.quarter_twists:\n position = rubik.perm_apply(move,vertex)\n if position not in end_parent:\n# print (\"end permutation unvisited\")\n end_parent[position] = vertex\n end_level[position] = level\n end_parent_move[position] = move\n end_frontier.put(position)\n if position in start_parent:\n# print (\"position exists in start_parent\")\n intersect = position\n intersectFound = True\n break\n if intersectFound:\n break\n if end_frontier.empty() and start_frontier.empty():\n break\n \n if intersect is None:\n return None\n \n path = []\n pos = intersect\n while (start_parent[pos] is not None):\n path.insert(0,start_parent_move[pos])\n pos = start_parent[pos]\n \n pos = intersect\n while (end_parent[pos] is not None):\n move = rubik.perm_inverse(end_parent_move[pos])\n path.append(move)\n pos = end_parent[pos]\n \n# path = [None] * start_level[intersect]\n# pos = intersect\n# move = start_parent_move[pos]\n# path[start_level[intersect]-1] = move\n# for i in range(start_level[intersect]-2,-1,-1):\n# if (start_parent[pos] is not None):\n# pos = start_parent[pos]\n# move = start_parent_move[pos]\n# path[i] = move\n# \n# pos = intersect\n# while (end_parent[pos] is not None):\n# move = rubik.perm_inverse(end_parent_move[pos])\n# path.append(move)\n# pos = end_parent[pos]\n \n return path", "def get_path(mod_token, head_token):\n # Compute the path from the root to each token.\n mod_ancestors = list(reversed(list(mod_token.ancestors)))\n head_ancestors = list(reversed(list(head_token.ancestors)))\n\n # If the paths don't start at the same place (odd!) then there is no path at\n # all.\n if (not mod_ancestors or not head_ancestors\n or mod_ancestors[0] != head_ancestors[0]):\n return None\n\n # Eject elements from the common path until we reach the first differing\n # ancestor.\n ix = 1\n while (ix < len(mod_ancestors) and ix < len(head_ancestors)\n and mod_ancestors[ix] == head_ancestors[ix]):\n ix += 1\n\n # Construct the path. TODO: add \"satellites\", possibly honor sentence\n # ordering between modifier and head rather than just always traversing from\n # the modifier to the head?\n path = ['/'.join(('<X>', mod_token.pos_, mod_token.dep_, '>'))]\n\n path += ['/'.join((tok.lemma_, tok.pos_, tok.dep_, '>'))\n for tok in reversed(mod_ancestors[ix:])]\n\n root_token = mod_ancestors[ix - 1]\n path += ['/'.join((root_token.lemma_, root_token.pos_, root_token.dep_, '^'))]\n\n path += ['/'.join((tok.lemma_, tok.pos_, tok.dep_, '<'))\n for tok in head_ancestors[ix:]]\n\n path += ['/'.join(('<Y>', head_token.pos_, head_token.dep_, '<'))]\n\n return '::'.join(path)", "def get_path(self, v0, v2):\n start = self.get_node(v0)\n target = self.get_node(v2)\n return self.astar(start,target)", "def path_to(self, other: \"BaseSegment\") -> List[PathStep]:\n # Return empty if they are the same segment.\n if self is other:\n return [] # pragma: no cover\n\n # Do we have any child segments at all?\n if not self.segments:\n return []\n\n # Identifying the highest parent we can using any preset parent values.\n midpoint = other\n lower_path = []\n while True:\n _higher = midpoint.get_parent()\n # If we've run out of parents, stop for now.\n if not _higher:\n break\n lower_path.append(\n PathStep(\n _higher,\n _higher.segments.index(midpoint),\n len(_higher.segments),\n _higher._code_indices,\n )\n )\n midpoint = _higher\n # If we're found the target segment we can also stop.\n if midpoint == self:\n break\n\n # Reverse the path so far\n lower_path.reverse()\n\n # Have we already found the parent?\n if midpoint == self:\n return lower_path\n # Have we gone all the way up to the file segment?\n elif midpoint.class_is_type(\"file\"):\n return [] # pragma: no cover\n # Are we in the right ballpark?\n # NOTE: Comparisons have a higher precedence than `not`.\n elif not self.get_start_loc() <= midpoint.get_start_loc() <= self.get_end_loc():\n return []\n\n # From here, we've worked \"up\" as far as we can, we now work \"down\".\n # When working down, we only need to go as far as the `midpoint`.\n\n # Check through each of the child segments\n for idx, seg in enumerate(self.segments):\n # Set the parent if it's not already set.\n seg.set_parent(self)\n # Build the step.\n step = PathStep(self, idx, len(self.segments), self._code_indices)\n # Have we found the target?\n # NOTE: Check for _equality_ not _identity_ here as that's most reliable.\n if seg == midpoint:\n return [step] + lower_path\n # Is there a path to the target?\n res = seg.path_to(midpoint)\n if res:\n return [step] + res + lower_path\n\n # Not found.\n return [] # pragma: no cover", "def _find_path(self, node1, node2, path=[]):\r\n\r\n path = path + [node1]\r\n if node1 == node2:\r\n return path\r\n if node1 not in self._graph:\r\n return None\r\n for node in self._graph[node1]:\r\n if node not in path:\r\n new_path = self._find_path(node, node2, path)\r\n if new_path:\r\n return new_path\r\n return None", "def min_path(self, start, end, maxD=1e309):\n tdist, preceding_node = self.dijkstra(start, maxD)\n dist = tdist[end]\n backpath = [end]\n try:\n while end != start:\n end = preceding_node[end]\n backpath.append(end)\n path = list(reversed(backpath))\n except KeyError:\n path = None\n\n return dist, path", "def convert_paths(self):\n # convert to node sequences, dropping s'\n self.nodeseq_paths = []\n for path in self.paths:\n node_seq = [] # don't include s'\n for arc in path:\n node_seq.append(self.arc_info[arc]['destin'])\n self.nodeseq_paths.append(node_seq)\n # convert to og graph\n self.converted_paths = []\n for path in self.nodeseq_paths:\n this_path = []\n add_next_node = True\n for i in range(len(path) - 1):\n print(\"This path is\", this_path)\n node1 = path[i]\n node2 = path[i + 1]\n print(\"node1={}, node2={}\".format(node1, node2))\n if (node1, node2) in self.mapping:\n sc = self.mapping[(node1, node2)]\n print(\"uses sc edge for {}\".format(sc))\n print(\"should add {}, but also need to check for overlaps\".\n format(sc[1:-1]))\n if sc[1] in this_path:\n # we have an overlap\n start = len(this_path) - this_path.index(sc[1])\n this_path.extend(sc[start:-1])\n else:\n this_path.extend(sc[1:-1])\n add_next_node = False # next node is second of sc edge\n elif add_next_node:\n this_path.append(node1)\n else:\n add_next_node = True\n this_path.append(path[-1])\n self.converted_paths.append(this_path)", "def find_path2(mol,atom0_index,atom1_index):\r\n atom0_index = atom0_index+1\r\n atom1_index = atom1_index+1\r\n atom_iter=ob.OBAtomAtomIter(mol.GetAtom(atom0_index))\r\n alist=[]\r\n \r\n index=0\r\n for a in atom_iter:\r\n alist.append(a.GetIdx())\r\n index=index+1\r\n #print('The list of bound atoms is:', alist)\r\n index=0\r\n depth=0\r\n finished=False\r\n for atom_index in alist:\r\n path=atom_index\r\n atom_iter=ob.OBAtomAtomIter(mol.GetAtom(atom_index))\r\n for a in atom_iter:\r\n #print(a.GetIdx())\r\n if a.GetIdx() ==atom1_index:\r\n finished=True\r\n break\r\n \r\n if finished:\r\n break\r\n if not finished:\r\n #print('Unable to find a path between atoms',atom0_index-1,' and ',atom1_index-1,'with a depth of 2')\r\n return -1\r\n path=path-1\r\n return path", "def get_shortest_path(self, n1, n2):\n nodes_visited = []\n current_node = self.nodes[n1]\n current_branch_nodes = [current_node]\n while True:\n current_node = current_branch_nodes.pop(0)\n for i in np.arange(0, self.id):\n if(self.adjacency_matrix[current_node,i] == 0):\n continue\n if(self.adjacency_matrix[current_node,i] == 1):\n next_node_dict = next((node for node in nodes_visited if node['id'] == i), False)\n if(next_node_dict == False):\n nodes_visited.append({'id': i, 'prev_node_id': current_node})\n current_branch_nodes.append(i)\n if self.nodes[n2] in current_branch_nodes:\n break\n if current_branch_nodes == []:\n return False\n path = []\n current_node = self.nodes[n2]\n path.append(n2)\n while(True):\n node_dict = next((node for node in nodes_visited if node['id'] == current_node), False)\n current_node = node_dict['prev_node_id']\n path.append(self.ids[current_node])\n if(current_node == self.nodes[n1]):\n break\n path.reverse()\n return path", "def get_subtree_nodes(T1, T2, new):\n # Get subtree of T2 induced by the new node\n T2_ind = trilearn.graph.junction_tree.subtree_induced_by_subset(T2, {new})\n T2_subtree_nodes = None\n # Find the subtree(2) in T1\n if T2_ind.has_node(frozenset([new])):\n # Isolated node. Unique empty subtree\n T2_subtree_nodes = [{frozenset([new]): None}]\n\n elif T2_ind.order() == 1:\n # Look which is its neighbor\n #c = T2_ind.nodes()[0] # nx < 2.x\n c = list(T2_ind.nodes())[0] # nx > 2.x\n\n if T1.has_node(c - {new}):\n # if it was connected to everything in a clique\n T2_subtree_nodes = [{c: c - {new}}]\n else:\n # c always has at lest one neighbor and the separator c\\new\n # for all of them.\n # We have to decide which of them was the emerging clique.\n # 3 cases:\n # 1) 1 neighbor: Trivial.\n # 2) 2 neighbors: Then it could be any of these.\n # 3) >2 neighbors: The emerging clique is the one that has the\n # others as a subset of its neighbors in T1\n #neigs = T2.neighbors(c) # nx < 2.x\n neigs = list(T2.neighbors(c)) # nx > 2.x\n possible_origins = [c1 for c1 in neigs if c1 & c == c - {new}]\n g = len(possible_origins)\n if g == 1:\n # This neighbor has to be the one it came from\n T2_subtree_nodes = [{c: possible_origins[0]}]\n elif g == 2 and len(neigs) == 2:\n # If there are 2 possible neighbors with the same separator\n T2_subtree_nodes = [{c: possible_origins[0]},\n {c: possible_origins[1]}]\n else:\n for neig in possible_origins:\n if set(neigs) - {neig} <= set(T1.neighbors(neig)):\n T2_subtree_nodes = [{c: neig}]\n break\n else:\n tmp = {}\n # In this case the subtree nodes are uniquely defined by T1 and T2\n # Loop through all edges in T2 in order to extract the correct\n # subtree of T1. Note that, by construction we know that it has the same structure as\n # the induced subtree of T2.\n for e in T2_ind.edges():\n # Non-swallowed cliques, get all potential \"emerging\"\n # (cliques from where the new cliques could have have emerged) cliques.\n Ncp1 = [c for c in T2.neighbors(e[0]) if\n c & e[0] == e[0] - {new}]\n Ncp2 = [c for c in T2.neighbors(e[1]) if\n c & e[1] == e[1] - {new}]\n\n # If the clique was swallowed in the new clique,\n # there will be no neighbors, so the clique itself\n # (except from the new node) is the unique emerging clique.\n if Ncp1 == []:\n Ncp1 = [e[0] - {new}]\n if Ncp2 == []:\n Ncp2 = [e[1] - {new}]\n\n # Replicate the structure in T2\n for neig1 in Ncp1:\n for neig2 in Ncp2:\n if T1.has_edge(neig1, neig2): # Know that this edge is unique\n tmp[e[0]] = neig1\n tmp[e[1]] = neig2\n T2_subtree_nodes = [tmp]\n return T2_subtree_nodes", "def parse_tree_for_sub_component(self, index_1_beg, index_1_end,\n index_2_beg, index_2_end, flag_string):\n \n tempo_2_beg = index_2_beg\n tempo_2_end = index_2_end\n if index_1_beg >= index_2_end:\n index_2_beg = index_1_beg\n index_2_end = index_1_end\n index_1_beg = tempo_2_beg\n index_1_end = tempo_2_end\n\n ## Path-enclosed Tree (PT)\n if flag_string == SUBTREE_PT:\n if index_2_end - index_1_beg > 1:\n subtree_pos = self.tree.treeposition_spanning_leaves( index_1_beg, index_2_end )\n lca = self.tree[subtree_pos]\n no_of_leaves = self.number_of_leaves_before(subtree_pos)\n return Tree.prune_tree(lca, index_1_beg - no_of_leaves, index_2_end - 1 - no_of_leaves)\n else:\n return self.tree[self.tree.treeposition_spanning_leaves( index_1_beg, index_2_end )[:-1]]\n \n ## Minimum Complete Tree (MCT)\n if flag_string == SUBTREE_MCT:\n if index_2_end - index_1_beg > 1:\n lca = self.tree[self.tree.treeposition_spanning_leaves( index_1_beg, index_2_end )]\n else:\n lca = self.tree[self.tree.treeposition_spanning_leaves( index_1_beg, index_2_end )[:-1]]\n return lca\n\n ## Context-Sensitive Path Tree (CSPT)\n if flag_string == SUBTREE_CSPT:\n if index_1_beg > 0:\n index_1_beg -= 1\n if index_2_end < len(self.tree.leaves()) - 1:\n index_2_end += 1\n subtree_pos = self.tree.treeposition_spanning_leaves( index_1_beg, index_2_end )\n lca = self.tree[subtree_pos]\n no_of_leaves = self.number_of_leaves_before(subtree_pos)\n return Tree.prune_tree(lca, index_1_beg - no_of_leaves, index_2_end - 1 - no_of_leaves)\n \n ## Highly-pruned Path Tree (HPPT)\n if flag_string == SUBTREE_HPPT:\n # Get a single path tree that connect \n # the two entities \n return self.get_pruned_tree_path(index_1_beg, index_1_end,\n index_2_beg, index_2_end, True)", "def segsFromTangents(self,svgCommandsList, refNode):\n sourcepoints, svgCommandsList = toArray(svgCommandsList)\n\n d = D(sourcepoints[0],sourcepoints[-1])\n x,y,wTot,hTot = computeBox(sourcepoints)\n aR = min(wTot/hTot, hTot/wTot)\n maxDim = max(wTot, hTot)\n isClosing = aR*0.2 > d/maxDim\n debug('isClosing ', isClosing, maxDim, d)\n if d==0:\n # then we remove the last point to avoid null distance\n # in other calculations\n sourcepoints = sourcepoints[:-1]\n svgCommandsList = svgCommandsList[:-1]\n\n if len(sourcepoints) < 4:\n return PathGroup.toSegments(sourcepoints, svgCommandsList, refNode, isClosing=isClosing)\n \n tangents = buildTangents(sourcepoints, isClosing=isClosing)\n\n # global quantities :\n\n # Check if circle -----------------------\n if isClosing:\n if len(sourcepoints)<9:\n return PathGroup.toSegments(sourcepoints, svgCommandsList, refNode, isClosing=True)\n isCircle, res = self.checkForCircle( sourcepoints, tangents) \n debug(\"Is Circle = \", isCircle )\n if isCircle:\n x,y,rmin, rmax,angle = res\n debug(\"Circle -> \", rmin, rmax,angle )\n if rmin/rmax>0.7:\n circ = Circle((x,y),0.5*(rmin+rmax), refNode )\n else:\n circ = Circle((x,y),rmin, refNode, rmax=rmax, angle=angle)\n circ.points = sourcepoints\n return circ\n # -----------------------\n \n\n\n # cluster points by angle of their tangents -------------\n tgSegs = [ Segment.fromCenterAndDir( p, t ) for (p,t) in zip(sourcepoints,tangents) ]\n clustersInd = clusterAngles( [s.angle for s in tgSegs] )\n clustersInd.sort( )\n debug(\"build envelop cluster: \", clustersInd)\n\n # build Segments from clusters \n newSegs = []\n for imin, imax in clustersInd:\n if imin+1< imax: # consider clusters with more than 3 points\n seg = fitSingleSegment(sourcepoints[imin:imax+1])\n elif imin+1==imax: # 2 point path : we build a segment\n seg = Segment.from2Points(sourcepoints[imin], sourcepoints[imax] , sourcepoints[imin:imax+1])\n else:\n seg = Path( sourcepoints[imin:imax+1] )\n seg.sourcepoints = sourcepoints\n newSegs.append( seg )\n resetPrevNextSegment( newSegs )\n debug(newSegs)\n # -----------------------\n\n\n # -----------------------\n # Merge consecutive Path objects \n updatedSegs=[]\n def toMerge(p):\n l=[p]\n setattr(p, 'merged', True)\n if p.next and not p.next.isSegment():\n l += toMerge(p.next)\n return l\n \n for i,seg in enumerate(newSegs[:-1]):\n if seg.isSegment():\n updatedSegs.append( seg) \n continue\n if hasattr(seg,'merged'): continue\n mergeList = toMerge(seg)\n debug('merging ', mergeList)\n p = Path(numpy.concatenate([ p.points for p in mergeList]) )\n debug('merged == ', p.points)\n updatedSegs.append(p)\n\n if not hasattr(newSegs[-1],'merged'): updatedSegs.append( newSegs[-1]) \n debug(\"merged path\", updatedSegs)\n newSegs = resetPrevNextSegment( updatedSegs )\n\n\n # Extend segments -----------------------------------\n if self.options.segExtensionEnable:\n newSegs = SegmentExtender.extendSegments( newSegs, self.options.segExtensionDtoSeg, self.options.segExtensionQual )\n debug(\"extended segs\", newSegs)\n newSegs = resetPrevNextSegment( newSegs )\n debug(\"extended segs\", newSegs)\n\n # ----------------------------------------\n \n\n # ---------------------------------------\n # merge consecutive segments with close angle\n updatedSegs=[]\n\n if self.options.segAngleMergeEnable:\n newSegs = mergeConsecutiveCloseAngles( newSegs , mangle=0.2 )\n newSegs=resetPrevNextSegment(newSegs)\n debug(' __ 2nd angle merge')\n newSegs = mergeConsecutiveCloseAngles( newSegs, mangle=0.35 ) # 2nd pass\n newSegs=resetPrevNextSegment(newSegs)\n debug('after merge ', len(newSegs), newSegs)\n # Check if first and last also have close angles.\n if isClosing and len(newSegs)>2 :\n first ,last = newSegs[0], newSegs[-1]\n if first.isSegment() and last.isSegment():\n if closeAngleAbs( first.angle, last.angle) < 0.1:\n # force merge\n points= numpy.concatenate( [ last.points, first.points] )\n newseg = fitSingleSegment(points)\n newseg.next = first.next\n last.prev.next = None\n newSegs[0]=newseg\n newSegs.pop()\n\n # -----------------------------------------------------\n # remove negligible Path/Segments between 2 large Segments\n if self.options.segRemoveSmallEdge:\n self.removeSmallEdge(newSegs , wTot, hTot)\n newSegs=resetPrevNextSegment(newSegs)\n\n debug('after remove small ', len(newSegs),newSegs)\n # -----------------------------------------------------\n\n # -----------------------------------------------------\n # Extend segments to their intersections\n for p in newSegs:\n if p.isSegment() and p.next:\n p.setIntersectWithNext()\n # -----------------------------------------------------\n \n return PathGroup(newSegs, svgCommandsList, refNode, isClosing)", "def part1(input_lines):\n # This is a DAG problem. We need to form a dependency graph.\n tower = get_tower(input_lines)\n return find_root(tower)", "def find_short_path(aux_structures, loc1, loc2):\n node1 = get_closest_node(aux_structures, loc1)\n node2 = get_closest_node(aux_structures, loc2)\n p = find_min_cost_path(\n aux_structures,\n node1,\n lambda x: x == node2,\n lambda parent_id: aux_structures[parent_id]['adjacent'],\n get_dist_cost,\n lambda x: gcd_heuristic(aux_structures, x, node2))\n return get_coord_list(aux_structures, p) if p is not None else None", "def build_path(start, end):\n a = hierarchy.index(start)\n b = hierarchy.index(end)\n if a == b:\n return []\n elif a < b:\n return hierarchy[a + 1 : b + 1]\n return list(reversed(hierarchy[b:a]))", "def path(most_important_up, most_important_down, total_distance, to_source2, to_source1):\n\n if total_distance == min(total_distance, to_source2[0], to_source1[0]):\n return source_to_source(most_important_up, most_important_down), total_distance\n elif to_source2[0] == min(total_distance, to_source2[0], to_source1[0]):\n return most_important_to_source(to_source2[1]), to_source2[0]\n else:\n return most_important_to_source(to_source1[1], up=False), to_source1[0]", "def get_steps_of_closest_intersections(commands1, commands2):\n\n path1 = get_one_path(commands1)\n path2 = get_one_path(commands2)\n intersections = set(path1).intersection(set(path2))\n # index is 0 based, therefore +2\n return min(map(lambda x: path1.index(x) + path2.index(x), intersections)) + 2", "def find_short_path_nodes(aux_structures, node1, node2):\n p = find_min_cost_path(\n aux_structures,\n node1,\n lambda x: x == node2,\n lambda parent_id: aux_structures[parent_id]['adjacent'],\n get_dist_cost,\n lambda x: gcd_heuristic(aux_structures, x, node2))\n return list(p) if p is not None else None", "def a_star_alg(self, p1: int, p2: int, max_level: int = 1000):\r\n \r\n # Create start and end node\r\n start_node = Node(None, p1, self.node_dict[p1])\r\n start_node.g = start_node.h = start_node.f = 0\r\n end_node = Node(None, p2, self.node_dict[p2])\r\n end_node.g = end_node.h = end_node.f = 0\r\n\r\n # Initialize both open and closed list\r\n open_list = []\r\n closed_list = []\r\n\r\n # Add the start node\r\n open_list.append(start_node)\r\n\r\n # Loop until you find the end\r\n level = 0\r\n while len(open_list) > 0 and level < max_level:\r\n level += 1\r\n\r\n # Get the current node (the node in open_list with the lowest cost)\r\n current_node = open_list[0]\r\n current_index = 0\r\n for index, item in enumerate(open_list):\r\n if item.f < current_node.f:\r\n current_node = item\r\n current_index = index\r\n\r\n # Pop current off open list, add to closed list\r\n open_list.pop(current_index)\r\n closed_list.append(current_node)\r\n\r\n # Found the goal\r\n if current_node == end_node:\r\n path = []\r\n distance = current_node.g\r\n current = current_node\r\n while current is not None:\r\n path.append(current.number)\r\n current = current.parent\r\n\r\n return path[::-1], distance # Return reversed path\r\n\r\n # Generate children\r\n children = []\r\n for new_number in self.road_tree[current_node.number]: # Adjacent nodes\r\n new_node = Node(current_node, new_number, self.node_dict[new_number])\r\n children.append(new_node)\r\n\r\n # Loop through children\r\n for child in children:\r\n append_to_open_list = False\r\n\r\n # Create the f, g, and h values\r\n child.g = current_node.g + self.road_dict[(current_node.number, child.number)]\r\n child.h = sqrt((child.x - end_node.x) ** 2 + (child.y - end_node.y) ** 2) / 200\r\n child.f = child.g + child.h\r\n\r\n # Child is already in the closed list\r\n closed_list, append_to_open_list = self.check_in_list(child, closed_list, append_to_open_list)\r\n\r\n # Child is already in the open list\r\n open_list, append_to_open_list = self.check_in_list(child, open_list, append_to_open_list)\r\n\r\n # Add the child to the open list\r\n if append_to_open_list:\r\n open_list.append(child)\r\n\r\n return [], 1e10", "def shortest_path(self, other):\n shortest_paths = []\n lcs = self.lowest_common_subsumer(other)\n for subsumer in lcs:\n paths_to_lcs1 = self.shortest_path_to_hypernym(subsumer)\n paths_to_lcs2 = other.shortest_path_to_hypernym(subsumer)\n for path_to_lcs1 in paths_to_lcs1:\n for path_to_lcs2 in paths_to_lcs2:\n current_path = path_to_lcs1\n path_to_lcs2 = path_to_lcs2[::-1]\n for el in path_to_lcs2[1:]:\n current_path.append(el)\n shortest_paths.append(current_path)\n return shortest_paths", "def get_path(self,first_node,last_node):\n edge_pattern=re.compile('edge_(?P<begin_node>\\w+)_(?P<end_node>\\w+)_(?P<iterator>\\w+)')\n exit_paths=self.get_exiting_edges(first_node)\n next_nodes=self.get_exiting_nodes(first_node)\n #be careful here using the wrong assignment statement breaks this function\n possible_paths=[]\n for exit_path in exit_paths:\n possible_paths.append([exit_path])\n #print(\"{0} is {1}\".format('possible_paths',possible_paths))\n for i in range(len(self.node_names)):\n for index,path in enumerate(possible_paths):\n last_edge=path[-1]\n match=re.match(edge_pattern,last_edge)\n begin_node=match.groupdict()['begin_node']\n end_node=match.groupdict()['end_node']\n #print next_node\n if end_node==last_node:\n #print(\"The path found is {0}\".format(path))\n return path\n next_possible_paths=[]\n next_edges=self.get_exiting_edges(end_node)\n next_nodes=self.get_exiting_nodes(end_node)\n #print(\"{0} is {1}\".format('next_edges',next_edges))\n for index,next_edge in enumerate(next_edges):\n #be careful here using the wrong assignment statement breaks this function\n #next_path=path is a deal breaker!!\n next_path=[]\n for edge in path:\n next_path.append(edge)\n #print(\"{0} is {1}\".format('next_path',next_path))\n #print(\"{0} is {1}\".format('next_edge',next_edge))\n #next_node=next_nodes[index]\n #print next_node\n next_match=re.match(edge_pattern,next_edge)\n next_node=next_match.groupdict()[\"end_node\"]\n begin_node_next_edge=next_match.groupdict()[\"begin_node\"]\n #print(\"{0} is {1}\".format('next_node',next_node))\n #print(\"{0} is {1}\".format('begin_node_next_edge',begin_node_next_edge))\n\n if next_node==last_node and begin_node_next_edge==end_node:\n next_path.append(next_edge)\n #print(\"The path found is {0}\".format(next_path))\n return next_path\n elif begin_node_next_edge==end_node:\n next_path.append(next_edge)\n next_possible_paths.append(next_path)\n #print(\"{0} is {1}\".format('next_possible_paths',next_possible_paths))\n else:\n pass\n #print(\"{0} is {1}\".format('next_possible_paths',next_possible_paths))\n possible_paths=next_possible_paths\n #print(\"{0} is {1}\".format('possible_paths',possible_paths))", "def _path(from_object, to_object):\n\n if from_object._root != to_object._root:\n raise ValueError(\"No connecting path found between \" +\n str(from_object) + \" and \" + str(to_object))\n\n other_path = []\n obj = to_object\n while obj._parent is not None:\n other_path.append(obj)\n obj = obj._parent\n other_path.append(obj)\n object_set = set(other_path)\n from_path = []\n obj = from_object\n while obj not in object_set:\n from_path.append(obj)\n obj = obj._parent\n index = len(from_path)\n i = other_path.index(obj)\n while i >= 0:\n from_path.append(other_path[i])\n i -= 1\n return index, from_path", "def traverse_graph_start_end_extra_node(graph):\n\n # get tree with starting node tags\n\n def traverse(graph, node):\n\n children = [int(c) for c in graph[node][\"children\"]]\n tagged_children = []\n for child in children:\n ellipsed_parents = [int(p) for p in graph[child][\"ellipsed_parents\"]]\n # if the child is explicit\n if node not in ellipsed_parents:\n if graph[child][\"terminal\"] == \"yes\":\n tagged_children.append(ParentedTree(graph[child][\"tag\"], [graph[child][\"text\"]]))\n else:\n tagged_children.append(traverse(graph, child))\n # if the child is ellipsed\n else:\n ellipsis_tag = get_ellipsis_tag_from_graph(graph, child)\n tagged_children.append(ParentedTree(ellipsis_tag, []))\n \n tree = ParentedTree(graph[node][\"tag\"], tagged_children)\n\n return tree\n\n tree = traverse(graph, 0)\n\n # get ending node tags\n positions = [pos for pos in tree.treepositions() if pos not in tree.treepositions(\"leaves\")]\n end_tags = []\n ellipsis_id = 0 # assign an id to each ellipsis start and end nodes\n for pos_i, pos in enumerate(positions):\n if tree[pos].label().startswith(\"start\"):\n ellipsis_tag = tree[pos].label().split(\"start\")[-1]\n tree[pos].set_label(\"start\" + str(ellipsis_id))\n end_location = get_ellipsis_location(tree, ellipsis_tag)\n end_tag = \"end\" + str(ellipsis_id)\n end_tags.append((end_location, end_tag))\n ellipsis_id += 1\n\n # insert ending node tags\n for index, st in enumerate(tree.subtrees()):\n for end_location, end_tag in end_tags:\n if st.treeposition() == end_location:\n st.insert(index, ParentedTree(end_tag, []))\n\n positions = [pos for pos in tree.treepositions() if pos not in tree.treepositions(\"leaves\")]\n rev_positions = [pos for pos in reversed(positions)]\n for pos_i, pos in enumerate(rev_positions):\n # append start tag to the previous node\n if tree[pos].label().startswith(\"start\"):\n prev_pos_i = pos_i + 1\n prev_pos = rev_positions[prev_pos_i]\n tree[prev_pos].set_label(tree[prev_pos].label() + tree[pos].label())\n del tree[pos]\n # append end tag to the parent of the current node\n elif tree[pos].label().startswith(\"end\"):\n parent_pos = tree[pos].parent().treeposition()\n tree[parent_pos].set_label(tree[parent_pos].label() + tree[pos].label())\n del tree[pos] \n\n # wrap each constituent that has end or start tags with extra nodes\n\n def add_extra_nodes(tree):\n children = []\n for subtree in tree:\n if type(subtree) == str:\n children.append(subtree)\n else:\n splits = re.split(\"(start|end)\", subtree.label())\n const_tag = splits[0]\n ellipsis_tag = \"\".join(splits[1:]) \n if len(ellipsis_tag) > 0:\n children.append(Tree(subtree.label(), [Tree(const_tag, [sst for sst in subtree])]))\n else:\n children.append(add_extra_nodes(subtree))\n\n return Tree(tree.label(), children)\n\n tree = add_extra_nodes(tree)\n\n return tree", "def addPath(self, from_node, to_node):\n x1,y1 = from_node.x,from_node.y\n x2,y2 = to_node.x,to_node.y\n \n pointsx = []\n pointsy = []\n \n\n m_new = 2 * (y2 - y1)\n slope_error_new = m_new - (x2 - x1)\n \n y=y1\n for x in range(x1,x2+1):\n \n pointsx.append(x)\n pointsy.append(y)\n # Add slope to increment angle formed\n slope_error_new =slope_error_new + m_new\n \n # Slope error reached limit, time to\n # increment y and update slope error.\n if (slope_error_new >= 0):\n y=y+1\n slope_error_new =slope_error_new - 2 * (x2 - x1)\n\n new_node = self.Node(to_node.x,to_node.y)\n new_node.path_x = pointsx\n new_node.path_y = pointsy\n new_node.path_x.append(to_node.x)\n new_node.path_y.append(to_node.y)\n\n print(\"len path x\",len(new_node.path_x))\n print(\"len path y\",len(new_node.path_y) )\n\n new_node.parent = from_node\n\n return new_node", "def get_move_path_for(self, character, start, end):\n\t\tassert isinstance(start, tuple)\n\t\tassert isinstance(end, tuple)\n\t\tassert self.is_haltable_for(character, end)\n\t\treturn shortest_path(self, start, end, lambda self_, pos: self_.get_neighbours(pos, lambda pos_: self_.is_passable_for(character, pos_)))", "def path(g): #g: graph\n marked = set()\n nodes = set(g.nodes) \n output = list()\n def recursive(g):\n for i in nodes.copy():\n d = dependents(g,i)\n if (not d) or all(dd in marked for dd in d):\n output.append((i,g.nodes[i]['word']))\n marked.add(i)\n nodes.remove(i)\n if nodes==set([0]):\n break\n recursive(g)\n break\n recursive(g)\n return output", "def get_subgraph_between_nodes(self, start, end):\n nodes = set()\n nodes.add(start)\n\n to_visit = set()\n to_visit.add(start)\n\n while len(to_visit) > 0:\n current_visit = copy.copy(to_visit)\n for tv in current_visit:\n to_visit.remove(tv)\n if tv is not end:\n for s in self.successors(tv):\n to_visit.add(s)\n nodes.add(s)\n\n nodes.add(end)\n\n return self.subgraph(nodes)", "def get_tree_distance (self, index_1_beg, index_1_end,\n index_2_beg, index_2_end ):\n tempo_2_beg = index_2_beg\n tempo_2_end = index_2_end\n if index_1_beg >= index_2_end:\n index_2_beg = index_1_beg\n index_2_end = index_1_end\n index_1_beg = tempo_2_beg\n index_1_end = tempo_2_end\n \n if index_1_end - index_1_beg > 1:\n lca_1 = self.tree[self.tree.\\\n treeposition_spanning_leaves( index_1_beg, index_1_end )]\n else:\n lca_1 = self.tree[self.tree.\\\n treeposition_spanning_leaves( index_1_beg, index_1_end )[:-1]]\n\n if index_2_end - index_2_beg > 1:\n lca_2 = self.tree[self.tree.\\\n treeposition_spanning_leaves( index_2_beg, index_2_end )]\n else:\n lca_2 = self.tree[self.tree.\\\n treeposition_spanning_leaves( index_2_beg, index_2_end )[:-1]]\n\n if index_2_end - index_1_beg > 1:\n lca = self.tree[self.tree.\\\n treeposition_spanning_leaves( index_1_beg, index_2_end )]\n else:\n lca = self.tree[self.tree.\\\n treeposition_spanning_leaves( index_1_beg, index_2_end )[:-1]]\n\n distance = max(len(lca_1.treeposition()) - len(lca.treeposition()),\n len(lca_2.treeposition()) - len(lca.treeposition())\n )\n \n return distance", "def shortest_unshared_between(cls, seq_1, seq_2):\n tree = cls(seq_2 + '$', 'B').append_named_sequence(seq_1 + '$', 'A')\n condition = lambda n, c, p: 'B' not in c.seq_ids\n out = lambda n, c, p: p + n.label + c.label[0]\n unshared = tree.root.bfs_paths(condition, out)\n shortest = next(unshared)\n common = [shortest]\n for possible in unshared:\n if len(possible) < len(shortest) and not possible[-1] == '$':\n shortest = possible\n common = [possible]\n elif len(possible) == len(shortest) and not possible[-1] == '$':\n common.append(possible)\n return min(common)" ]
[ "0.5908375", "0.58959204", "0.5705175", "0.5621129", "0.5587853", "0.5524062", "0.55239725", "0.54976195", "0.5487586", "0.54690427", "0.54500455", "0.54114205", "0.5408605", "0.53920585", "0.5367605", "0.53577787", "0.5334908", "0.53292596", "0.5320974", "0.5301725", "0.5288237", "0.5263911", "0.52601266", "0.5255411", "0.52459294", "0.52423835", "0.5219186", "0.5181496", "0.51786405", "0.51675844" ]
0.6095896
0
Prune the tree that include the begin_index and the end_index so that it doesn't include leaves outside of the range limited by begin_index and end_index
def prune_tree( cls, tree, begin_index, end_index ): begin_path = tree.leaf_treeposition(begin_index) end_path = tree.leaf_treeposition(end_index) current_node = tree[begin_path[:-1]] end_node = tree[end_path[:-1]] new_tree = ParentedTree('(' + tree.node + ')') ## Initialize new tree l = [] current_new = new_tree current_old = tree for i in xrange(len(begin_path)-1): if type(current_old[begin_path[i]]) != str: current_new.insert(0, ParentedTree('('+current_old[begin_path[i]].node +')')) current_new = current_new[0] current_old = current_old[begin_path[i]] while current_old != end_node: if not (type(current_old[0]) == str or type(current_old[0]) == unicode): current_old = current_old[0] current_new.insert( 0, ParentedTree('('+current_old.node +')')) current_new = current_new[0] else: current_new.insert(0, current_old[0]) while len(current_old.parent()) == current_old.parent_index() + 1: current_old = current_old.parent() current_new = current_new.parent() current_old = current_old.parent()[current_old.parent_index() + 1] current_new.parent().insert( current_new.parent_index() + 1, ParentedTree('('+current_old.node +')')) current_new = current_new.parent()[current_new.parent_index() + 1] current_new.insert(0, current_old[0]) # print current_new return new_tree
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def subtask_prune(tree):\n\n for st in tree.subtrees():\n if \"start\" in st.label():\n new_label = st.label().split(\"start\")[0] + \"start\"\n st.set_label(new_label)\n if \"end\" in st.label():\n new_label = st.label().split(\"end\")[0] + \"end\"\n st.set_label(new_label)\n\n return tree", "def prune(self, n_leaves):\n true_node_count = self.node_count - sum(self.children_left == _tree.TREE_UNDEFINED)\n leaves = np.where(self.children_left == _tree.TREE_LEAF)[0]\n to_remove_count = true_node_count - 2*n_leaves + 1\n\n nodes_to_remove = pruning_order(self, max_to_prune = to_remove_count/2)\n\n # self._copy is gone, but this does the same thing\n out_tree = _tree.Tree(*self.__reduce__()[1])\n out_tree.__setstate__(self.__getstate__().copy())\n\n for node in nodes_to_remove:\n #TODO: Add a Tree method to remove a branch of a tree\n out_tree.children_left[out_tree.children_left[node]] = _tree.TREE_UNDEFINED\n out_tree.children_right[out_tree.children_left[node]] = _tree.TREE_UNDEFINED\n out_tree.children_left[out_tree.children_right[node]] = _tree.TREE_UNDEFINED\n out_tree.children_right[out_tree.children_right[node]] = _tree.TREE_UNDEFINED\n out_tree.children_left[node] = _tree.TREE_LEAF\n out_tree.children_right[node] = _tree.TREE_LEAF\n\n # FIXME: currently should not change node_count, after deletion\n # this is not number of nodes in the tree\n #out_tree.node_count -= 2*len(nodes_to_remove)\n\n return out_tree", "def remove_biggest_loop_in_range(self, start, end):\r\n for i in range(start, end+2, 2):\r\n n = self.get_node(i)\r\n n.visited = None\r\n for i in range(0, int((end-start)//2), 2):\r\n left = start + i\r\n right = end - i\r\n s = self.get_node(left)\r\n if s.visited is not None:\r\n return self.remove_loop(left, s.visited)\r\n # Loop Detected.\r\n if left == right:\r\n break\r\n s.visited = left\r\n e = self.get_node(right)\r\n if e.visited is not None:\r\n return self.remove_loop(right, e.visited)\r\n # Loop Detected.\r\n e.visited = right\r\n return 0", "def prune(self, n_leaves):\n self.tree_ = prune(self.tree_, n_leaves)\n return self", "def reduce(self,\r\n noterange=None):\r\n\r\n if noterange is None:\r\n noterange = self.apply_limit([str(Index(a_temp))\r\n for a_temp in self.indexes()\r\n if Index(a_temp).is_top()])\r\n\r\n\r\n for tup in reduce_tupples([Index(x_temp) for x_temp in self.find_within(indexfrom=Index(0),orequal=True,withinrange=noterange)]):\r\n self.move(tup[0], tup[1], withchildren=True)", "def get_slice(the_tree,begin_element,end_element):\n all_elements=[e for e in the_tree.iter()]\n begin_index=all_elements.index(begin_element)\n end_index=all_elements.index(end_element)+1\n uniq_elements=[]\n for element in all_elements[begin_index:end_index]:\n tempset=set(uniq_elements)\n ancestors=set([e for e in element.iterancestors()])\n if len (tempset & ancestors) == 0:\n uniq_elements.append(element)\n return uniq_elements", "def _trim_tree(state):\n for n in list(state.tree.leaf_node_gen):\n if n.type_str == TYPE_NODE_TAG:\n n.parent.child_list.remove(n)\n return _trim_tree(state)", "def cull_tree(nodes_to_keep):\n [n.attrib.update({'current': 'true'}) for n in nodes_to_keep]\n all_parents = set()\n [all_parents.update(list(x.iterancestors()) + [x]) for x in nodes_to_keep]\n\n def test_inclusion(node, current):\n inclusion = node == current or node.tag in ['label', 'heading', 'cover', 'text']\n if not inclusion and node.tag == 'crosshead':\n # is the crosshead the first previous one?\n try:\n inclusion = node == current.itersiblings(tag='crosshead', preceding=True).next()\n except StopIteration:\n pass\n return inclusion or node in all_parents\n\n def fix_parents(node):\n while node.getparent() is not None:\n parent = node.getparent()\n to_remove = filter(lambda x: not test_inclusion(x, node), parent.getchildren())\n [parent.remove(x) for x in to_remove]\n node = parent\n [fix_parents(n) for n in nodes_to_keep]\n return nodes_to_keep[0].getroottree()", "def prune( self ):\n if self.children is None:\n return\n \n # recursively prune from bottom up\n for space in self.children:\n space.prune()\n\n # if all child nodes are empty remove them all\n for space in self.children:\n if not space.is_empty():\n return\n\n self.children = None", "def prune_tree ( self ):\n tree = copy.deepcopy ( self.tree )\n change_made = True\n # As long as changes are made, recursively prune from the root node.\n while change_made:\n change_made = self.prune_node ( tree, tree.root )\n return tree\n # End prune_tree()", "def trim_range(self, low_bound, hi_bound, full_bound=True):\n low_bound_int = int(low_bound[:self.place+1])\n hi_bound_int = int(hi_bound[:self.place+1])\n\n # Remove keys outside of range\n # modifying dict during loop caused lots of problems - del after loop\n keys_to_del = []\n for key in self.Poss_Tree:\n if key < int(low_bound[:self.place]):\n keys_to_del.append(key)\n continue\n elif key > int(hi_bound[:self.place]):\n keys_to_del.append(key)\n continue\n for key in keys_to_del:\n del self.Poss_Tree[key]\n\n # Remove values outside of range\n vals_to_del = defaultdict(list)\n for key in self.Poss_Tree:\n for choice in self.Poss_Tree[key]:\n password = int(construct_pass(key, choice))\n if password > hi_bound_int or password < low_bound_int:\n vals_to_del[key].append(choice)\n for key in vals_to_del:\n for val in vals_to_del[key]:\n self.Poss_Tree[key].remove(val)", "def get_pruned_tree_path (self, index_1_beg, index_1_end,\n index_2_beg, index_2_end, in_between_children = False ):\n tempo_2_beg = index_2_beg\n tempo_2_end = index_2_end\n if index_1_beg >= index_2_end:\n index_2_beg = index_1_beg\n index_2_end = index_1_end\n index_1_beg = tempo_2_beg\n index_1_end = tempo_2_end\n \n if index_1_end - index_1_beg > 1:\n lca_1_index = self.tree.treeposition_spanning_leaves( index_1_beg, index_1_end )\n else:\n lca_1_index = self.tree.treeposition_spanning_leaves( index_1_beg, index_1_end )[:-1]\n\n if index_2_end - index_2_beg > 1:\n lca_2_index = self.tree.treeposition_spanning_leaves( index_2_beg, index_2_end )\n else:\n lca_2_index = self.tree.treeposition_spanning_leaves( index_2_beg, index_2_end )[:-1]\n \n if index_2_end - index_1_beg > 1:\n lca_index = self.tree.treeposition_spanning_leaves( index_1_beg, index_2_end )\n else:\n lca_index = self.tree.treeposition_spanning_leaves( index_1_beg, index_2_end )[:-1]\n \n lca = self.tree[lca_index]\n new_tree = ParentedTree('(' + lca.node + ')')\n\n #Point to the root\n # Branch of the first entity\n current_pointer = new_tree\n tempo_lca = lca\n# try:\n for i in xrange(len(lca_index), len(lca_1_index)):\n tempo_lca = tempo_lca[lca_1_index[i]]\n if not (type(tempo_lca) == str or type(tempo_lca) == unicode):\n current_pointer.insert(0, ParentedTree('('+tempo_lca.node +')'))\n current_pointer = current_pointer[0]\n\n current_pointer = new_tree\n #Insert the first level of children of lca\n if len(lca_index) < len(lca_1_index) and len(lca_index) < len(lca_2_index):\n if in_between_children:\n for i in xrange(lca_1_index[len(lca_index)] + 1, lca_2_index[len(lca_index)]):\n current_pointer.insert(i, ParentedTree('('+lca[i].node +')'))\n\n #Point to the root\n # Branch of the second entity\n current_pointer = new_tree\n tempo_lca = lca\n first_time = True\n for i in xrange(len(lca_index), len(lca_2_index)):\n tempo_lca = tempo_lca[lca_2_index[i]]\n if not (type(tempo_lca) == str or type(tempo_lca) == unicode):\n if first_time:\n if not in_between_children:\n children_index_of_2nd_branch = 1\n else:\n \"\"\"\n Don't really need to check lca_2_index[len(lca_index)]\n 'cause if it come to this point, the length constraint\n is already satisfied\n However, it's necessary to check lca_1_index[len(lca_index)]\n \"\"\"\n if len(lca_index) < len(lca_1_index):\n children_index_of_2nd_branch = lca_2_index[len(lca_index)]\\\n - lca_1_index[len(lca_index)]\n else:\n \"\"\"\n No left child, no in_between_children\n \"\"\"\n children_index_of_2nd_branch = 0\n current_pointer.insert(children_index_of_2nd_branch,\n ParentedTree('('+tempo_lca.node +')'))\n current_pointer = current_pointer[children_index_of_2nd_branch]\n first_time = False\n else:\n current_pointer.insert(0, ParentedTree('('+tempo_lca.node +')'))\n current_pointer = current_pointer[0]\n return new_tree", "def prune_tree(tree, cutoff, posteriors):\n new_tree = []\n for e in tree:\n try:\n if posteriors[e] > cutoff:\n new_tree.append(e)\n except KeyError:\n if posteriors[e[::-1]] > cutoff:\n new_tree.append(e)\n return new_tree", "def EliminateSingleNodeSegments(self):\n sections_to_remove = [] # List of indices into self._section_list\n index = 0\n for each_section in self._section_list:\n if len(each_section.Nodes()) == 1:\n self.ReparentSectionChildren(each_section)\n sections_to_remove.append(index)\n index += 1\n sections_to_remove.reverse()\n for each_index in sections_to_remove:\n self._section_list.pop(each_index)\n self.RegenerateCaches()", "def prune_tree(self):\n tree = copy.deepcopy(self.tree)\n change_made = True\n # As long as changes are made, recursively prune from the root node.\n while change_made:\n change_made = self.prune_node(tree, tree.root)\n return tree", "def discard(self, rng: Rangelike) -> None:\n # be lazy and do O(n^2) erasure\n if isinstance(rng, RangeSet):\n temp = self.copy()\n for r in rng:\n temp.discard(r)\n self._ranges = temp._ranges\n return\n # elif _is_iterable_non_string(rng):\n # raise ValueError(\"argument is iterable and not range-like. Use .difference_update() instead\")\n # make sure rng is a Range\n rng = Range(rng)\n # remove rng from our ranges until we no longer need to\n current_node = self._ranges.first\n while current_node:\n new_range = current_node.value.difference(rng)\n if not new_range or new_range.isempty():\n # first node is entirely consumed by the range to remove. So remove it.\n self._ranges.pop_node(current_node)\n elif isinstance(new_range, RangeSet):\n # replace current value with lower, and add higher just afterwards.\n # It can't possibly overlap with the next range, because they are disjoint.\n current_node.value = new_range._ranges.first.value\n self._ranges.insert_after(current_node, new_range._ranges.last.value)\n # in this case, we also know that we just hit the top of the discarding range.\n # therefore, we can short-circuit.\n break\n else:\n # replace just this element, which was cut off\n if new_range > current_node.value:\n # we're only computing the difference of one contiguous range.\n # if all we've done is cut off the bottom part of this range, then\n # we must have reached the top of the discarding range.\n # therefore, we can short-circuit.\n current_node.value = new_range\n break\n else:\n # otherwise, we just change this element (maybe replace it with itself) and keep going.\n current_node.value = new_range\n current_node = current_node.next", "def split(self, thresh=0):\n\n new_tree_bounds = []\n new_tree_ids = []\n\n self.contains_null = False\n\n for qi, quad in enumerate(self.tree):\n\n left, bottom, right, top = quad.bounds\n xcenter = left + (right - left) / 2.0\n ycenter = top - (top - bottom) / 2.0\n\n quad_id = self.tree_ids[qi]\n\n for id_, bbox in zip(\n [1, 3, 0, 2],\n [\n (left, ycenter, xcenter, top),\n (xcenter, ycenter, right, top),\n (left, bottom, xcenter, ycenter),\n (xcenter, bottom, right, ycenter),\n ],\n ):\n\n id_list = list(self.sindex.intersection(bbox))\n\n if id_list:\n\n if len(id_list) > thresh:\n\n new_tree_bounds.append(bbox)\n new_tree_ids.append(quad_id + str(id_))\n\n else:\n self.contains_null = True\n\n else:\n self.contains_null = True\n\n self.tree_bounds = new_tree_bounds\n self.tree_ids = new_tree_ids\n\n return self", "def pruning_order(self, max_to_prune=None):\n\n def _get_terminal_nodes(children):\n \"\"\"Lists the nodes that only have leaves as children\"\"\"\n leaves = np.where(children[:,0]==_tree.TREE_LEAF)[0]\n child_is_leaf = np.in1d(children, leaves).reshape(children.shape)\n return np.where(np.all(child_is_leaf, axis=1))[0]\n\n def _next_to_prune(tree, children=None):\n \"\"\"Weakest link pruning for the subtree defined by children\"\"\"\n\n if children is None:\n children = tree.children\n\n t_nodes = _get_terminal_nodes(children)\n g_i = tree.init_error[t_nodes] - tree.best_error[t_nodes]\n\n return t_nodes[np.argmin(g_i)]\n\n if max_to_prune is None:\n max_to_prune = self.node_count - sum(self.children_left == _tree.TREE_UNDEFINED)\n\n children = np.array([self.children_left.copy(), self.children_right.copy()]).T\n nodes = list()\n\n while True:\n node = _next_to_prune(self, children)\n nodes.append(node)\n\n if (len(nodes) == max_to_prune) or (node == 0):\n return np.array(nodes)\n\n #Remove the subtree from the children array\n children[children[node], :] = _tree.TREE_UNDEFINED\n children[node, :] = _tree.TREE_LEAF", "def wrong_buildTree(self, start, end, cur):\n if start == end:\n self.nodes[cur] = self.nums[start]\n else: \n mid = start+(end-start)/2\n left, right = 2*cur+1, 2*cur+2\n self.nodes[cur] = self.buildTree(start, mid, left) + self.buildTree(mid+1, end, right)\n return self.nodes[cur]", "def drop_between(self, start, end):\n # catch all invalid args and throw an Index error if true\n if start < 0 or end > self.size or start > end:\n raise IndexError()\n # initialize node counter to 0\n counter = 0\n # current node for looping is front node\n current = self.front\n # while current node isn't None\n while current is not None:\n # if it's position is within start and end args\n if start <= counter < end:\n # skip the current node in the deque, effectively\n # deleting it\n current.prior.next = current.next\n # if current node's next is empty\n if current.next is None:\n # you have one node left, and you have to get\n # rid of it, so clear the deque and break\n self.size = 0\n self.front = None\n self.back = None\n break\n # set next node's prior to current's prior, effectively\n # skipping the current node in deque\n current.next.prior = current.prior\n # decrement size of deque\n self.size -= 1\n # add one to the counter\n counter += 1\n # move on to the next node who flows. He nose dove and sold\n # nada :)\n current = current.prior", "def getCoveringRanges( self, left_ranges, right_ranges, parent_ranges ):\n \n child_ranges = map( lambda x: (x[0], x[1], 0), left_ranges)\n child_ranges += map( lambda x: (x[0], x[1], 1), right_ranges)\n \n child_ranges.sort()\n parent_ranges.sort()\n \n new_left_ranges = []\n new_right_ranges = []\n \n parent_index = 0\n last_to = 0\n \n parent_left, parent_right = parent_ranges[parent_index]\n\n self.debug( \"child_ranges=%s\" % str(child_ranges) )\n self.debug( \"parent_ranges=%s\" % str(parent_ranges))\n \n last_left, last_right, last_is_right = child_ranges[0]\n \n for this_left, this_right, this_is_right in child_ranges[1:]:\n \n ## look at previous segment last_left to last_right:\n ## find matching parent_index:\n old_parent_index = parent_index\n while (min(parent_right, last_right) - max(parent_left, last_left)) < 0:\n parent_index += 1\n if parent_index == len(parent_ranges): break\n parent_left, parent_right = parent_ranges[parent_index]\n \n ## skip fragments that do not overlap\n if parent_index == len(parent_ranges):\n parent_index = old_parent_index\n last_left, last_right, last_is_right = this_left, this_right, this_is_right\n continue\n \n ## firstly: make segment covering\n new_left = min(parent_left, last_left)\n new_right = min(max(parent_right, last_right), this_left)\n \n if last_is_right:\n new_right_ranges.append((new_left, new_right))\n else:\n new_left_ranges.append((new_left, new_right))\n \n ## reduce parent on left side\n parent_left=max(new_right, parent_left)\n \n last_left, last_right, last_is_right = this_left, this_right, this_is_right\n \n ## process last segment\n while (min(parent_right, last_right) - max(parent_left, last_left)) < 0:\n parent_index += 1\n if parent_index >= len(parent_ranges): break \n parent_left, parent_right = parent_ranges[parent_index]\n \n new_left = min(parent_left, last_left)\n new_right = max(parent_right, last_right)\n \n if last_is_right:\n new_right_ranges.append((new_left, new_right))\n else:\n new_left_ranges.append((new_left, new_right))\n \n self.debug( \"old left ranges=%s\" % str(left_ranges))\n self.debug( \"new left ranges=%s\" % str(new_left_ranges))\n self.debug( \"old right ranges=%s\" % str(right_ranges))\n self.debug( \"new right ranges=%s\" % str(new_right_ranges))\n \n return new_left_ranges, new_right_ranges", "def _prune( tree, impurity_crit, dataSet, treeSeq ):\n\n\t\tsaved = {}\n\n\t\ttotal_leaf_impurity, num_leaves = DecisionTree._fetch(tree, impurity_crit, dataSet, saved)\n\n\t\tnodes, sets, G = saved['node'], saved['set'], saved['G']\n\n\t\t# choose TreeNode such that g is minimum to prune\n\t\tmin_g_ind = np.argmin(G)\n\t\tnode2Prune = nodes[min_g_ind]\n\t\tnode2Prune.value = DecisionTree._make_leaf(sets[min_g_ind], impurity_crit)\n\t\tnode2Prune.cut_off = None\n\n\t\t# get a new tree pruned\n\t\ttreeSeq['alpha'].append(G[min_g_ind])\n\t\ttreeSeq['tree'].append(tree)\n\t\ttreeSeq['num_leaves'].append(num_leaves-node2Prune.leaves()+1)\n\n\t\tif not (tree.left.cut_off is None and tree.right.cut_off is None):\n\n\t\t\tDecisionTree._prune(deepcopy(tree), impurity_crit, dataSet, treeSeq )\n\t\telse:\n\t\t\treturn", "def _next_to_prune(tree, children=None):\n\n if children is None:\n children = tree.children\n\n t_nodes = _get_terminal_nodes(children)\n g_i = tree.init_error[t_nodes] - tree.best_error[t_nodes]\n\n return t_nodes[np.argmin(g_i)]", "def searchDeadEnd(self):\n boundaries = []\n if not self.red:\n i = self.midWidth - 1\n else:\n i = self.midWidth + 1\n boudaries = [(i, j) for j in range(self.height)]\n validPositions = []\n for i in boudaries:\n if not (i[0], i[1]) in self.walls:\n validPositions.append(i)\n\n dangerPos = []\n\n toExpand = self.scanmap.twoEntryPoints()\n for (x,y) in toExpand:\n adjacent = self.scanmap.adjacentValidPoints(x, y)\n if not (x,y) in dangerPos:\n for (u, w) in adjacent:\n visited = []\n visited.append((x, y))\n safe = False\n danger = False\n DFS = util.Stack()\n DFS.push((u,w))\n while not safe and not danger:\n (i,j) = DFS.pop()\n visited.append((i,j))\n adjacents = self.scanmap.adjacentValidPoints(i,j)\n for position in adjacents:\n if not position in visited:\n DFS.push(position)\n if DFS.isEmpty():\n danger = True\n dangerPos = list(set(dangerPos) | set(visited))\n\n if (i,j) in validPositions:\n safe = True\n oneEntry = self.scanmap.oneEntryPoints()\n dangerPos = list(set(oneEntry).union(set(dangerPos)))\n dangerPos.sort()\n return dangerPos", "def prune(self, accuracy, validation_data):\n for depth in range(self.maximum_depth, 0, -1):\n accuracy = self.__prune_tree(accuracy, self.root, validation_data, depth)\n\n return accuracy", "def _prune(self):\n while len(self.data) > self.limit:\n self.data.popleft()", "def pruning(self):\n data = self.data.copy()\n for d in self.data:\n # cascade purning method. Inspired from \"Efficient Computation of Group Skyline Queries on MapReduce (FCU)\"\n if d in data:\n pastart = [self.drange[1] if i+self.radius>self.drange[1] else i+self.radius for i in d.getLocationMax()]\n pamax = [self.drange[1] for j in range(self.dim)]\n # prune data points that are obviously dominated by current data point\n pruned = (self.index.intersection(tuple(pastart+pamax),objects=True))\n for p in pruned:\n if p.object in data:\n data.remove(p.object)\n self.pruned = data", "def remove_short_roots(self):\n\n # Proportion of the branch point's radius that the total length has to be to avoid removal.\n # Lower multipliers remove less incorrect roots, but also don't incorrectly remove real roots\n radius_multiplier = 0\n\n edge_roots = list()\n\n for root in self.root_dict.values():\n if not root.branches_at_endpoint:\n edge_roots.append(root)\n\n while edge_roots:\n\n next_root_list = list()\n\n for root in edge_roots:\n\n if root and len(root.pixel_list) < radius_multiplier * root.pixel_list[0].radius and root.parent_root:\n\n self.remove_pixels(root.pixel_list)\n\n parent = root.remove_edge_root()\n if parent and not parent.branches_at_endpoint:\n next_root_list.append(parent)\n\n self.root_dict.pop(root.key, None)\n\n edge_roots = next_root_list", "def query_dd_range_tree(self, queries, axis=0):\n assert len(queries) == self.max_depth\n i, j = queries[axis]\n\n if i > j:\n i, j = j, i\n\n output = []\n v_split = self.find_split_node(i, j)\n if self.isleaf(v_split):\n # check if the point in v_split\n if self.qualifies(v_split, queries):\n output.append(v_split)\n else:\n v = v_split.left\n while not self.isleaf(v):\n if v.point >= i:\n # report right subtree\n subset = self.__filter(v.right, axis, queries)\n output += subset\n v = v.left\n else:\n v = v.right\n # v is now a leaf\n if self.qualifies(v, queries):\n output.append(v)\n # now we follow right side\n v = v_split.right\n while v is not None and not self.isleaf(v):\n if v.point < j:\n # report left subtree\n subset = self.__filter(v.left, axis, queries)\n output += subset\n # it is possible to traverse to an external node\n v = v.right\n else:\n v = v.left\n # check whether this point should be included too\n if v is not None and self.qualifies(v, queries):\n output.append(v)\n return output", "def leaves(node, res):\n leaf = True\n if node.lesser:\n leaf = False\n leaves(node.lesser, res)\n if node.greater:\n leaf = False\n leaves(node.greater, res)\n if leaf:\n res.append(node.indices)" ]
[ "0.6238034", "0.6027081", "0.5997571", "0.5995651", "0.59796435", "0.5975343", "0.5934116", "0.56413025", "0.5607906", "0.5580411", "0.55378866", "0.5519736", "0.5518996", "0.5501341", "0.5469353", "0.54643464", "0.54435647", "0.5434015", "0.5430153", "0.5422087", "0.5415858", "0.5410851", "0.5383976", "0.5369435", "0.53458357", "0.53114223", "0.52963144", "0.5295993", "0.52803105", "0.526526" ]
0.76061183
0
Genarmos el estado compuesto por distGhost = distancia al fantasma mas cercano // 0 > cerca (menos de 3 unidades), 1 > medio (entre 3 y 7), 2 > lejos (mas de 7 unidades) isParedEast = (0 > no hay pared en la Direccion este, 1 > hay pared) isParedWest isParedNorth isParedShouth directionGhost = direccion en la que se encunetra el fantasma mas cercano
def generateState(self, gameState): state = [None, None, None, None, None, None] #Calculamos la distancia al fantasma mas cercano distGhosts = gameState.data.ghostDistances nearest = 100000 for i in distGhosts: if i < nearest and i is not None: nearest = i if nearest <= 3: state[0] = 0 elif nearest > 3 and nearest <= 7: state[0] = 1 else: state[0] = 2 legalActions = gameState.getLegalActions() #Calculamos los isPared #isParedEast if "East" in legalActions: state[1] = 0 else: state[1] = 1 #isParedWest if "West" in legalActions: state[2] = 0 else: state[2] = 1 #isParedNorth if "North" in legalActions: state[3] = 0 else: state[3] = 1 #isParedShouth if "South" in legalActions: state[4] = 0 else: state[4] = 1 #Bloque que controla si el pacman ha pasado por el hueco encontrado controlador =0 x1,y1 = gameState.getPacmanPosition() if len(self.huecos)==2: if x1 ==self.huecos[0]: controlador=controlador+1 if y1 ==self.huecos[1]: controlador=controlador+1 if controlador == 2: self.huecos=[] posGhosts = gameState.getGhostPositions() nearestGhostIndex = distGhosts.index(nearest) nearestGhostPos = posGhosts[nearestGhostIndex] pacmanPosition = gameState.getPacmanPosition() print(self.huecos) if(len(self.huecos) == 2): goalPosition = self.huecos else: goalPosition = nearestGhostPos legales = [] for a in self.goodActions(pacmanPosition,goalPosition): #Metodo goodActions devuelve las mejores acciones que puede realizar el pacman para llegar a su objetivo if a in legalActions: legales.append(a) #Buenas acciones legales if (len(legales) < 1): if (len(self.goodActions(pacmanPosition,goalPosition))): #Si existen buenas acciones pero no son legales es porque un muro esta impidiendo su ejecucion self.huecos = [] rd= random.randint(0, len(self.goodActions(pacmanPosition,goalPosition))-1) self.buscaHueco(gameState,self.goodActions(pacmanPosition,goalPosition)[rd],pacmanPosition[0],pacmanPosition[1]) #Se busca hueco en una de las direcciones de las buenas acciones #Calculamos la direccion en la que se encuentra el fantasma o el hueco mas cercano # esto lo hacemos calculando la distancia del fantasma al pacman y viendo cual es la componente mayor. # Si es la X, la direccion sera o derecha o izquierda y dependera del sentido de esta. Con la Y parasria lo mismo relX = gameState.getPacmanPosition()[0] - goalPosition[0] relY = gameState.getPacmanPosition()[1] - goalPosition[1] if abs(relX) >= abs(relY): if relX > 0: state[5] = "West" elif relX < 0: state[5] = "East" elif abs(relY) > abs(relX): if relY > 0: state[5] = "South" elif relY < 0: state[5] = "North" return state
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def formation_dir(self, id):\n player = self.players[id]\n min_dist = 2\n\n if abs(player.pos.x - FORM[self.formation][self.dir][id]['coord'].x) <= min_dist and abs(player.pos.y - FORM[self.formation][self.dir][id]['coord'].y) <= min_dist:\n player.walk_count = 0\n return 'NOTHING'\n elif abs(player.pos.x - FORM[self.formation][self.dir][id]['coord'].x) <= min_dist:\n if (player.pos.y - FORM[self.formation][self.dir][id]['coord'].y) > min_dist:\n return 'MOVE_U'\n else:\n return 'MOVE_D'\n elif abs(player.pos.y - FORM[self.formation][self.dir][id]['coord'].y) <= min_dist:\n if (player.pos.x - FORM[self.formation][self.dir][id]['coord'].x) > min_dist:\n return 'MOVE_L'\n else:\n return 'MOVE_R'\n elif (player.pos.x - FORM[self.formation][self.dir][id]['coord'].x) > min_dist:\n if (player.pos.y - FORM[self.formation][self.dir][id]['coord'].y) > min_dist:\n return random.choices(['MOVE_L', 'MOVE_U'])[0]\n else:\n return random.choices(['MOVE_L', 'MOVE_D'])[0]\n elif (player.pos.x - FORM[self.formation][self.dir][id]['coord'].x) < - min_dist:\n if (player.pos.y - FORM[self.formation][self.dir][id]['coord'].y) > min_dist:\n return random.choices(['MOVE_R', 'MOVE_U'])[0]\n else:\n return random.choices(['MOVE_R', 'MOVE_D'])[0]\n else:\n return 'NOTHING'", "def deplacer(self,direction):\n if direction == \"haut\":\n if self.y>=200: #Eviter que le joueur sorte de la grille\n if grille[self.casey-1][self.casex-1][2][0]==1 and grille[self.casey-2][self.casex-1][2][1]==1: # on teste si le déplacement est autorisé : sur la case du joueur et la case au dessus pour le déplacement vers le haut\n grille[self.casey-1][self.casex-1][3]=0 #on enleve perso=1 de la case sur laquelle etait le perso\n self.casey-=1\n self.y=self.casey*100\n grille[self.casey-1][self.casex-1][3]=1 # perso=1 pour la case sur laquelle le perso passe\n self.direction=self.haut\n\n if direction == \"bas\":\n if self.y<=600: #Eviter que le joueur sorte de la grille\n if grille[self.casey-1][self.casex-1][2][1]==1 and grille[self.casey][self.casex-1][2][0]==1: # on teste si le déplacement est autorisé : sur la case du joueur et la case en dessous\n grille[self.casey-1][self.casex-1][3]=0\n self.casey+=1\n self.y=self.casey*100\n grille[self.casey-1][self.casex-1][3]=1\n self.direction=self.bas\n\n if direction == \"gauche\":\n if self.x>=200: #Eviter que le joueur sorte de la grille\n if grille[self.casey-1][self.casex-1][2][2]==1 and grille[self.casey-1][self.casex-2][2][3]==1: # on teste si le déplacement est autorisé : sur la case du joueur et la case a gauche pour le déplacement vers la gauche\n grille[self.casey-1][self.casex-1][3]=0\n self.casex-=1\n self.x=self.casex*100\n grille[self.casey-1][self.casex-1][3]=1\n self.direction=self.gauche\n\n if direction == \"droite\":\n if self.x<=600: #Eviter que le joueur sorte de la grille\n if grille[self.casey-1][self.casex-1][2][3]==1 and grille[self.casey-1][self.casex][2][2]==1: # on teste si le déplacement est autorisé : sur la case du joueur et la case a droite pour le déplacement vers la droite\n grille[self.casey-1][self.casex-1][3]=0\n self.casex+=1\n self.x=self.casex*100\n grille[self.casey-1][self.casex-1][3]=1\n self.direction=self.droite", "def find(Map, PosI, PosF):\n \n # Pour les tests, cf. Pathfinding et Pathfinding2 \n \n InitialPosI = PosI\n InitialPosF = PosF\n Chemin = []\n \n Hvalue = np.zeros((np.shape(Map))) #Distance\n Gvalue = np.zeros((np.shape(Map))) #Movement Cost\n Fvalue = np.zeros((np.shape(Map))) #G+H \n Gvalue[:] = np.nan #initialiser Gvalue à une matrice NaN\n \n OpenList = [(InitialPosI,'N')]\n CloseList = []\n \n # Initialisation de Hvalue\n for i in range(np.shape(Hvalue)[0]):\n for j in range(np.shape(Hvalue)[1]):\n if Map[i,j] !=1:\n Hvalue[i,j] = abs(i-PosF[0]) + abs(j-PosF[1])\n else:\n Hvalue[i,j] = np.nan\n\n### Round 1 (+initialisations)\n \n CloseList.append(tuple(PosI))\n \n if PosI[0]-1>=0 and Map[PosI[0]-1,PosI[1]] != 1 and ((PosI[0]-1,PosI[1]) not in OpenList) and ((PosI[0]-1,PosI[1]) not in CloseList): #Check vertical haut\n OpenList.append(((PosI[0]-1,PosI[1]),'D')) #D : fleche vers le bas..\n if PosI[0]+1<=np.shape(Map)[0]-1 and Map[PosI[0]+1,PosI[1]] != 1 and ((PosI[0]+1,PosI[1]) not in OpenList) and ((PosI[0]+1,PosI[1]) not in CloseList): #Check vertical bas\n OpenList.append(((PosI[0]+1,PosI[1]),'U')) \n if PosI[1]-1>=0 and Map[PosI[0],PosI[1]-1] != 1 and ((PosI[0],PosI[1]-1) not in OpenList) and ((PosI[0],PosI[1]-1) not in CloseList): #Check horiz gauche\n OpenList.append(((PosI[0],PosI[1]-1),'R'))\n if PosI[1]+1<=np.shape(Map)[1]-1 and Map[PosI[0],PosI[1]+1] != 1 and ((PosI[0],PosI[1]+1) not in OpenList) and ((PosI[0],PosI[1]+1) not in CloseList): #Check horiz droit\n OpenList.append(((PosI[0],PosI[1]+1),'L'))\n \n \n for OV in OpenList: #OV pour OpenValue \n Gvalue[OV[0][0],OV[0][1]] = 10\n \n Fvalue = np.copy(Gvalue + Hvalue)\n for CV in CloseList: #CV pour ClosedValue\n Fvalue[CV[0],CV[1]] = np.nan\n \n\n#### Round NEXT \n ###Vers le min de Fvalue:\n while PosF not in CloseList and PosI != PosF:\n \n if np.all(np.isnan(Fvalue)): #Check si F est égale à une matrice Full NaN\n# print('Pas de chemin')\n return(False) # soit return False, soit return la position init, donc bon..\n \n Index = np.argwhere(Fvalue == np.nanmin(Fvalue))\n PosI = Index.tolist()[0]\n \n CloseList.append(tuple(PosI))\n if PosI[0]-1>=0 and Map[PosI[0]-1,PosI[1]] != 1 and ((PosI[0]-1,PosI[1]) not in OpenList) and ((PosI[0]-1,PosI[1]) not in CloseList): #Check vertical haut\n OpenList.append(((PosI[0]-1,PosI[1]),'D')) #DOWN (fleche vers le bas..)\n if PosI[0]+1<=np.shape(Map)[0]-1 and Map[PosI[0]+1,PosI[1]] != 1 and ((PosI[0]+1,PosI[1]) not in OpenList) and ((PosI[0]+1,PosI[1]) not in CloseList): #Check vertical bas\n OpenList.append(((PosI[0]+1,PosI[1]),'U')) #Up\n if PosI[1]-1>=0 and Map[PosI[0],PosI[1]-1] != 1 and ((PosI[0],PosI[1]-1) not in OpenList) and ((PosI[0],PosI[1]-1) not in CloseList): #Check horiz gauche\n OpenList.append(((PosI[0],PosI[1]-1),'R')) #Right\n if PosI[1]+1<=np.shape(Map)[1]-1 and Map[PosI[0],PosI[1]+1] != 1 and ((PosI[0],PosI[1]+1) not in OpenList) and ((PosI[0],PosI[1]+1) not in CloseList): #Check horiz droit\n OpenList.append(((PosI[0],PosI[1]+1),'L')) #Left\n \n for OV in OpenList:\n Gvalue[OV[0][0],OV[0][1]] = 10\n \n Fvalue = np.copy(Gvalue + Hvalue)\n for CV in CloseList:\n Fvalue[CV[0],CV[1]] = np.nan\n \n\n \n############## TRACING BACK \n PosF = InitialPosF\n\n while InitialPosI not in Chemin:\n \n for Trace in OpenList:\n if Trace[0] == PosF:\n Chemin.append(PosF)\n if Trace[1] == 'U':\n PosF = (PosF[0]-1,PosF[1]) #Go up\n elif Trace[1] == 'D':\n PosF = (PosF[0]+1,PosF[1]) #Go down\n elif Trace[1] == 'L':\n PosF = (PosF[0],PosF[1]-1) #Go left\n elif Trace[1] == 'R':\n PosF = (PosF[0],PosF[1]+1) #Go right\n# else:\n# print(Chemin)\n Chemin.reverse()\n return(Chemin)", "def possible_way(self, pos:tuple, direction:str, ispac = True):\r\n x_offset, y_offset = 0,0\r\n # Check the four different directions \r\n if direction == 'u':\r\n y_offset = -1\r\n elif direction == 'd':\r\n y_offset = 1\r\n elif direction == 'r':\r\n x_offset = 1\r\n elif direction == 'l':\r\n x_offset = -1\r\n x = pos[0] // self.grid_size\r\n y = pos[1] // self.grid_size\r\n # If the x position is out of the gamefield that means the figure is at the end of the tunnel\r\n if x + x_offset >= len(self.look_up_table[1]) or x + x_offset < 0:\r\n return 'os' #< os: other side -> The figure has to spwan at the other side of the gamefield\r\n # Get the value from the look up table\r\n value = self.look_up_table[y + y_offset][x + x_offset] \r\n # Check if the value is a dot or an Energizer \r\n if value != None and (value[0] =='p' or value[0] == 'e') and ispac:\r\n # Check if the end of the value field is a 'n' (not). The 'n' shouldn't remove from the gamefield.\r\n if value[-1] == 'n':\r\n self.look_up_table[y + y_offset][x + x_offset] = 'n'\r\n else:\r\n # Remove the dot or the energizer from the gamefield if Pac-Man eats them.\r\n self.look_up_table[y + y_offset][x + x_offset] = None\r\n return value", "def fill_walk(self):\n\n #Seguir tomando caminos hasta que se alcance la cantidad establecida.\n while len(self.x_values) < self.num_points:\n\n #Decidir cual direccion tomar y cuan lejos ir hacia esa direccion.\n x_direction = choice([1, -1])\n x_distance = choice([0, 1, 2, 3, 4])\n x_step = x_direction * x_distance\n\n y_direction = choice([1,-1])\n y_distance = choice([0, 1, 2, 3, 4])\n y_step = y_direction * y_distance\n\n #Ignorar movimientos nulos.\n if x_step == 0 and y_step == 0:\n continue\n\n #Calcular la nueva posicion.\n x = self.x_values[-1] + x_step\n y = self.y_values[-1] + y_step\n\n self.x_values.append(x)\n self.y_values.append(y)", "def ubicar_fragata():\n tamano = Fragata.tamano #se importa el tamano del barco desde su clase\n cantidad = Fragata.cantidad #se importa la cantidad de barcos desde su clase\n orientacion = orientaciones[(randint(0, 1))] #elige aleatoriamente el index de la tupla orientaciones = (\"Vertical\", \"Horizontal\")\n seguir_coordenadas = True\n while seguir_coordenadas:\n mal_ubicado = \"no\"\n if orientacion == \"Vertical\":\n #se eligen random las filas y las columnas\n coor_fila = randint(1, numero_filas)\n coor_columna = randint (1, numero_columnas)\n while (coor_fila + tamano) > 10: #como su orientacion es vertical la fila incial del barco mas su tamano (2) no puede ser mayor que 10 porque se saldria del mapa\n coor_fila = randint(1,numero_filas)\n ubicacion = (coor_fila, coor_columna)\n lista_temporal.append(ubicacion) #lista donde se ubicaran temporalmente las ubicaciones de los barcos\n while len(lista_temporal) < tamano: #sacar las posiciones restantes \n coor_fila += 1\n ubicacion = (coor_fila, coor_columna)\n lista_temporal.append(ubicacion)\n for x in lista_ubicacion_barco:\n for y in lista_temporal:\n if x == y:\n mal_ubicado = \"si\" #si alguna coordenada de este barco coincide con la de otro la variable mal ubicado sera \"si\" \n elif (y[0] == x[0] or (y[0]+1) == x[0] or (y[0]-1) == x[0]) and ((y[1]) == x[1] or (y[1]+1) == x[1] or (y[1]- 1) == x[1]): #validacion para que no se ubique el barco al lado o diagonalmente contiguo a otro\n mal_ubicado = \"si\" #si esta validacion sucede la variable mal ubicado sera \"si\" \n if orientacion == \"Horizontal\":\n #se eligen random las filas y las columnas\n coor_fila = randint(1, numero_filas)\n coor_columna = randint(1, numero_columnas)\n while (coor_columna + tamano) > 10: #como su orientacion es horizontal la columna incial del barco mas su tamano (2) no puede ser mayor que 10 porque se saldria del mapa\n coor_columna = randint(1, numero_columnas)\n ubicacion = (coor_fila, coor_columna)\n lista_temporal.append(ubicacion) #lista donde se ubicaran temporalmente las ubicaciones de los barcos\n while len(lista_temporal) < tamano: #sacar las posiciones restantes \n coor_columna += 1\n ubicacion = (coor_fila, coor_columna)\n lista_temporal.append(ubicacion)\n for x in lista_ubicacion_barco:\n for y in lista_temporal:\n if x == y:\n mal_ubicado = \"si\" #si alguna coordenada de este barco coincide con la de otro la variable mal ubicado sera \"si\" \n elif (y[0] == x[0] or (y[0]+1) == x[0] or (y[0]-1) == x[0]) and ((y[1]) == x[1] or (y[1]+1) == x[1] or (y[1]- 1) == x[1]): #validacion para que no se ubique el barco al lado o diagonalmente contiguo a otro\n mal_ubicado = \"si\" #si esta validacion sucede la variable mal ubicado sera \"si\" \n if mal_ubicado == \"si\": #si la variable mal ubicado es \"si\" se repetira el proceso otra vez\n seguir_coordenadas = True\n lista_temporal.clear()\n elif mal_ubicado == \"no\": #si la variable es \"no\" se continuara el proceso\n for x in lista_temporal:\n lista_ubicacion_barco.append(x) #se agregan las posiciones a la lista general\n coordenadas_fragata.append(x) \n lista_temporal.clear() #se limpia la lista temporal para usarla en el otro barco\n seguir_coordenadas = False", "def Ez_area(position, angle, detect):\n# a = range(round(-2*Ez_height),round(2*Ez_height))\n# b = range(round(-2*Ez_height),round(2*Ez_height))\n# a_valid = []\n# b_valid= []\n \n # These are the grid points in a coordinate system based on the Ez's angle\n if detect:\n a_valid = [-11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -8, -8, -8, -8, -8, -8, -8, -8, -8, -8, -8, -8, -8, -7, -7, -7, -7, -7, -7, -7, -7, -7, -7, -7, -7, -7, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6]\n b_valid = [-6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -4, -3, -2, -1, 0, 1, 2, 3, 4, -3, -2, -1, 0, 1, 2, 3]\n else:\n a_valid = [-9, -9, -9, -9, -9, -9, -9, -9, -9, -8, -8, -8, -8, -8, -8, -8, -8, -8, -7, -7, -7, -7, -7, -7, -7, -7, -7, -6, -6, -6, -6, -6, -6, -6, -6, -6, -5, -5, -5, -5, -5, -5, -5, -5, -5, -4, -4, -4, -4, -4, -4, -4, -4, -4, -3, -3, -3, -3, -3, -3, -3, -3, -3, -2, -2, -2, -2, -2, -2, -2, -2, -2, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4]#[-19, -19, -19, -19, -19, -19, -19, -19, -19, -19, -19, -19, -19, -19, -19, -19, -19, -19, -19, -18, -18, -18, -18, -18, -18, -18, -18, -18, -18, -18, -18, -18, -18, -18, -18, -18, -18, -18, -17, -17, -17, -17, -17, -17, -17, -17, -17, -17, -17, -17, -17, -17, -17, -17, -17, -17, -17, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -14, -14, -14, -14, -14, -14, -14, -14, -14, -14, -14, -14, -14, -14, -14, -14, -14, -14, -14, -13, -13, -13, -13, -13, -13, -13, -13, -13, -13, -13, -13, -13, -13, -13, -13, -13, -13, -13, -12, -12, -12, -12, -12, -12, -12, -12, -12, -12, -12, -12, -12, -12, -12, -12, -12, -12, -12, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -8, -8, -8, -8, -8, -8, -8, -8, -8, -8, -8, -8, -8, -8, -8, -8, -8, -8, -8, -7, -7, -7, -7, -7, -7, -7, -7, -7, -7, -7, -7, -7, -7, -7, -7, -7, -7, -7, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9]\n b_valid = [-4, -3, -2, -1, 0, 1, 2, 3, 4, -4, -3, -2, -1, 0, 1, 2, 3, 4, -4, -3, -2, -1, 0, 1, 2, 3, 4, -4, -3, -2, -1, 0, 1, 2, 3, 4, -4, -3, -2, -1, 0, 1, 2, 3, 4, -4, -3, -2, -1, 0, 1, 2, 3, 4, -4, -3, -2, -1, 0, 1, 2, 3, 4, -4, -3, -2, -1, 0, 1, 2, 3, 4, -4, -3, -2, -1, 0, 1, 2, 3, 4, -4, -3, -2, -1, 0, 1, 2, 3, 4, -4, -3, -2, -1, 0, 1, 2, 3, 4, -3, -2, -1, 0, 1, 2, 3, -2, -1, 0, 1, 2]#[-9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -4, -3, -2, -1, 0, 1, 2, 3, 4]\n positions = []\n# for i in a:\n# for j in b:\n# if (i > ((Ez_width/2)-Ez_height-detect_length) and abs(j) < (Ez_width/2+detect_length) and i < 0) or (i > 0 and np.sqrt(i**2 + j**2) < Ez_width/2+detect_length):\n# a_valid.append(i)\n# b_valid.append(j)\n# print('AAAA', a_valid)\n# print(' ')\n# print('BBBB', b_valid)\n# print(' ')\n \n # This is a coordinate transfromation to x,y\n for i in range(len(a_valid)):\n positions.append((int(round(a_valid[i]*np.cos(angle) + b_valid[i]*np.sin(angle) + position[0])), int(round(a_valid[i]*np.sin(angle) - b_valid[i]*np.cos(angle) + position[1]))))\n return positions", "def create_grid(data, drone_altitude, safety_distance):\n\n # minimum and maximum north coordinates\n north_min = np.floor(np.min(data[:, 0] - data[:, 3]))\n north_max = np.ceil(np.max(data[:, 0] + data[:, 3]))\n #print(north_min, north_max)\n\n # minimum and maximum east coordinates\n east_min = np.floor(np.min(data[:, 1] - data[:, 4]))\n east_max = np.ceil(np.max(data[:, 1] + data[:, 4]))\n #print(east_min, east_max)\n # given the minimum and maximum coordinates we can\n # calculate the size of the grid.\n north_size = int(np.ceil((north_max - north_min)))\n east_size = int(np.ceil((east_max - east_min)))\n #print(north_size, east_size)\n # Initialize an empty grid\n grid = np.zeros((north_size, east_size))\n # Center offset for grid\n north_min_center = np.min(data[:, 0])\n east_min_center = np.min(data[:, 1])\n \n # Populate the grid with obstacles\n for i in range(data.shape[0]):\n north, east, alt, d_north, d_east, d_alt = data[i, :]\n\n if alt + d_alt + safety_distance > drone_altitude:\n obstacle = [\n int(north - d_north - safety_distance - north_min_center),\n int(north + d_north + safety_distance - north_min_center),\n int(east - d_east - safety_distance - east_min_center),\n int(east + d_east + safety_distance - east_min_center),\n ]\n grid[obstacle[0]:obstacle[1], obstacle[2]:obstacle[3]] = 1\n\n return grid", "def ghost_cc_left(self,current, ghost, g_pos):\n x, y, facing ,(st,b),start,p_prob = g_pos[ghost]\n node = self.nodes_array[x][y].getNeighborByDirection(facing)\n if st > 0 and b and node is not None:\n st = max(st-1,0)\n return [(1,(node.i,node.j,facing,(st,not b),start,p_prob))]\n\n node = self.nodes_array[x][y]\n st = st - 1 if st - 1 > 0 else 0\n if not st:\n if node.left and (facing != Directions.EAST or len(node.neighbors) == 1):\n return [(1.0, (node.left.i, node.left.j, Directions.WEST,(0,False),start,p_prob))]\n if node.down and (facing != Directions.NORTH or len(node.neighbors) == 1):\n return [(1.0, (node.down.i, node.down.j, Directions.SOUTH,(0,False),start,p_prob))]\n if node.right and (facing != Directions.WEST or len(node.neighbors) == 1):\n return [(1.0, (node.right.i, node.right.j, Directions.EAST,(0,False),start,p_prob))]\n if node.up and (facing != Directions.SOUTH or len(node.neighbors) == 1):\n return [(1.0, (node.up.i, node.up.j, Directions.NORTH,(0,False),start,p_prob))]\n else:\n if node.left and (facing != Directions.EAST or len(node.neighbors) == 1):\n return [(1.0, (node.left.i, node.left.j, Directions.WEST,(st,True),start,p_prob))]\n if node.down and (facing != Directions.NORTH or len(node.neighbors) == 1):\n return [(1.0, (node.down.i, node.down.j, Directions.SOUTH,(st,True),start,p_prob))]\n if node.right and (facing != Directions.WEST or len(node.neighbors) == 1):\n return [(1.0, (node.right.i, node.right.j, Directions.EAST,(st,True),start,p_prob))]\n if node.up and (facing != Directions.SOUTH or len(node.neighbors) == 1):\n return [(1.0, (node.up.i, node.up.j, Directions.NORTH,(st,True),start,p_prob))]", "def getMove(self, grid):\n# global prune\n# prune = 0\n def Terminal(stateTup):\n \"\"\"\n Checks if the node is a terminal node\n Returns eval(state) if it is terminal\n \"\"\"\n state = stateTup[0]\n maxDepth = self.depthLimit\n if stateTup[1] == maxDepth:\n val = self.h.get(str(state.map))\n if val == None:\n Val = Eval(state)\n self.h[str(state.map)] = Val\n return Val\n else:\n return val\n elif len(stateTup[0].getAvailableMoves()) == 0:\n val = self.h.get(str(state.map))\n if val == None:\n Val = Eval(state)\n self.h[str(state.map)] = Val\n return Val\n else:\n return val\n\n def Eval(state):\n \"\"\"\n This is the eval function which combines many heuristics and assigns\n weights to each of them\n Returns a single value\n \"\"\"\n\n# H1 = htest2(state)\n# return H1\n H2 = h1(state)*monotonic(state)\n return H2\n\n\n def h1(state):\n Max = state.getMaxTile()\n left = len(state.getAvailableCells())/16\n if state.getCellValue([0,0]) == Max:\n v = 1\n else:\n v= 0.3\n Max = Max/1024\n return Max*left*v\n\n def mono(state):\n mon = 0\n# for i in range(4):\n# row = 0\n# for j in range(3):\n# if state.map[i][j] > state.map[i][j+1]:\n# row+=1\n# if row == 4:\n# mon += 1\n# for i in range(4):\n# column = 0\n# for j in range(3):\n# if state.map[j][i] > state.map[j+1][i]:\n# column +=1\n# if column == 4:\n# mon +=1\n#\n#\n# return mon/8\n for i in range(4):\n if all(earlier >= later for earlier, later in zip(grid.map[i], grid.map[i][1:])):\n mon+=1\n\n return mon/8\n\n def monotonic(state):\n cellvals = {}\n Path1 = [(3,0),(3,1),(3,2),(3,3),(2,3),(2,2),(2,1),(2,0),\n (1,0),(1,1),(1,2),(1,3),(0,3),(0,2),(0,1),(0,0)]\n for i in Path1:\n cellvals[i] = state.getCellValue(i)\n mon = 0\n for i in range(4):\n if cellvals.get((i,0)) >= cellvals.get((i,1)):\n if cellvals.get((i,1)) >= cellvals.get((i,2)):\n if cellvals.get((i,2)) >= cellvals.get((i,3)):\n mon +=1\n for j in range(4):\n if cellvals.get((0,j)) >= cellvals.get((1,j)):\n if cellvals.get((1,j)) >= cellvals.get((2,j)):\n if cellvals.get((2,j)) >= cellvals.get((3,j)):\n mon+=1\n return mon/8\n\n\n\n def htest2(state):\n score1 = 0\n score2 = 0\n r = 0.5\n\n Path1 = [(3,0),(3,1),(3,2),(3,3),(2,3),(2,2),(2,1),(2,0),\n (1,0),(1,1),(1,2),(1,3),(0,3),(0,2),(0,1),(0,0)]\n Path2 = [(3,0),(2,0),(1,0),(0,0),(0,1),(1,1),(2,1),(3,1),\n (3,2),(2,2),(1,2),(0,2),(0,3),(1,3),(2,3),(3,3)]\n valDict = {}\n for n in range(16):\n valDict[Path1[n]] = state.getCellValue(Path1[n])\n for n in range(16):\n if n%3 == 0:\n self.emergency()\n cell1 = valDict.get(Path1[n])\n cell2 = valDict.get(Path2[n])\n score1 += (cell1) * (r**n)\n score2 += (cell2) * (r**n)\n return max(score1,score2)\n\n\n def Maximize(stateTup,A,B):\n \"\"\"\n Returns a tuple of state,eval(state)\n Takes in a stateTup(tuple of grid + depth of the grid), alpha,\n and beta\n \"\"\"\n self.emergency()\n t = Terminal(stateTup)\n if t != None:\n return (None, t)\n\n maxChild , maxUtility = None,-999999999\n state = stateTup[0]\n Map = self.dict.get(str(state.map))\n if Map == None:\n children = []\n for M in range(4):\n g = state.clone()\n if g.move(M):\n children.append(g)\n self.dict[str(state.map)] = children\n else:\n children = Map\n for child in children:\n childTup = (child,stateTup[1]+1)\n utility = Minimize(childTup,A,B)[1]\n if utility > maxUtility:\n maxChild , maxUtility = child , utility\n if maxUtility >= B:\n# global prune\n# prune +=1\n break\n if maxUtility > A:\n A = maxUtility\n\n return (maxChild,maxUtility)\n\n\n def Minimize(stateTup,A,B):\n \"\"\"\n Returns a tuple of state,eval(state)\n Takes in a stateTup(tuple of grid + depth of the grid), alpha,\n and beta\n \"\"\"\n self.emergency()\n t = Terminal(stateTup)\n if t != None:\n return (None, t)\n\n minChild , minUtility = None,999999999\n state = stateTup[0]\n Map= self.dict.get(str(state.map))\n if Map == None:\n cells= state.getAvailableCells()\n children = []\n tiles = [2,4]\n for i in cells:\n for j in tiles:\n g = state.clone()\n g.insertTile(i,j)\n children.append(g)\n self.dict[str(state.map)] = children\n else:\n children = Map\n for child in children:\n childTup = (child,stateTup[1]+1)\n utility = Maximize(childTup,A,B)[1]\n if utility < minUtility:\n minChild , minUtility = child , utility\n if minUtility <= A:\n# global prune\n# prune +=1\n break\n if minUtility < B:\n B = minUtility\n\n return (minChild,minUtility)\n\n\n\n def decision(grid):\n \"\"\"\n Decision function which returns the move which led to the state\n \"\"\"\n child = Maximize((grid,0),-999999999,999999999)[0]\n Child = child.map\n g = grid.clone()\n for M in range(4):\n if g.move(M):\n if g.map == Child:\n # global prune\n # global pruneLog\n # pruneLog.append(prune)\n # print(prune)\n # print(sum(pruneLog)/len(pruneLog))\n return M\n g = grid.clone()\n\n self.dict = {}\n self.h = {}\n self.prevTime = time.clock()\n self.depthLimit = 1\n self.mL = []\n self.over = False\n while self.over == False:\n self.depthLimit +=1\n try :\n self.mL.append(decision(grid))\n\n except KeyError:\n# print(self.depthLimit)\n return self.mL[-1]\n except IndexError:\n return random.randint(0,3)\n self.Alarm(time.clock())\n return self.mL[-1]", "def check_reached(self):\n m_x, m_y = self.destination.get_pos()\n m_radius = self.destination.radius\n distance_centre = math.sqrt((m_x - self.x)**2 + (m_y - self.y)**2)\n sum_radii = m_radius + self.radius\n if distance_centre < sum_radii:\n self.color = pygame.colordict.THECOLORS['green']\n self.has_reached = True", "def _move_ghost(self, ghost):\n pos = ghost['pos']\n new_pos = np.zeros(shape=(2,), dtype=np.float32)\n pillman = self.world_state['pillman']\n available = []\n for i in range(2, self.nactions + 1):\n update_2d_pos(self.map, pos, i, new_pos)\n if pos[0] != new_pos[0] or pos[1] != new_pos[1]:\n available.append(i)\n n_available = len(available)\n if n_available == 1:\n ghost['dir'] = available[0]\n elif n_available == 2:\n if ghost['dir'] not in available:\n if self.reverse_dir[ghost['dir'] - 2] == available[0]:\n ghost['dir'] = available[1]\n else:\n ghost['dir'] = available[0]\n else:\n rev_dir = self.reverse_dir[ghost['dir'] - 2]\n for i in range(n_available):\n if available[i] == rev_dir:\n available.pop(i)\n n_available -= 1\n break\n prods = np.zeros(n_available, dtype=np.float32)\n x = np.array(\n [pillman['pos'][0] - pos[0], pillman['pos'][1] - pos[1]], dtype=np.float32)\n norm = np.linalg.norm(x)\n if norm > 0:\n x *= 1. / norm\n for i in range(n_available):\n prods[i] = np.dot(x, self.dir_vec[available[i] - 2])\n if self.world_state['power'] == 0:\n if self.stochasticity > np.random.uniform():\n j = np.random.randint(n_available)\n else:\n # move towards pillman:\n j = np.argmax(prods)\n else:\n # run away from pillman:\n j = np.argmin(prods)\n ghost['dir'] = available[j]\n update_2d_pos(self.map, pos, ghost['dir'], pos)", "def casdetude_dinardo():\n file_path = PROJECT_PATH + \"/geographycal_data/Monterusciello/MontEdo_buildings\"\n router = Router(building_file=file_path)\n\n router.design_aqueduct(0)\n\n router.solve(router.acqueduct)\n minimal = router.design_minimal_aqueduct(router.acqueduct, \"Q*H\")\n kpi_calculator(minimal)\n\n print(\"N H Z P\")\n for i, (node, datadict) in enumerate(router.acqueduct.nodes.items()):\n print(i, round(datadict[\"H\"]), round(datadict[\"ELEVATION\"]), round(datadict[\"H\"] - datadict[\"ELEVATION\"]))\n\n\n router.write2shp(minimal, PROJECT_PATH + \"/Monterusciello_solution/Monterusciello_acqueduct\")\n router.write2epanet(minimal, PROJECT_PATH + \"/Monterusciello_solution/Monterusciello_acqueduct\")", "def create_grid(data, drone_altitude, safety_distance):\n\n # minimum and maximum north coordinates\n north_min = np.floor(np.amin(data[:, 0] - data[:, 3]))\n north_max = np.ceil(np.amax(data[:, 0] + data[:, 3]))\n print(0, north_max - north_min)\n\n # minimum and maximum east coordinates\n east_min = np.floor(np.amin(data[:, 1] - data[:, 4]))\n east_max = np.ceil(np.amax(data[:, 1] + data[:, 4]))\n print(0, east_max - east_min)\n\n # given the minimum and maximum coordinates we can\n # calculate the size of the grid.\n north_size = int(np.ceil(north_max - north_min))\n east_size = int(np.ceil(east_max - east_min))\n\n # Initialize an empty grid\n grid = np.zeros((north_size, east_size))\n\n # Populate the grid with obstacles\n print(data.shape[0])\n for i in range(data.shape[0]):\n north, east, alt, d_north, d_east, d_alt = data[i, :]\n # Determine which cells contain obstacles\n nc = int(north - north_min)\n ec = int(east - east_min)\n dn = int(d_north)\n de = int(d_east)\n sd = int(safety_distance)\n x0 = int(ec - (de + sd))\n y0 = int(nc - (dn + sd))\n xm = int(ec + (de + sd))\n ym = int(nc + (dn + sd))\n nm = north_max - north_min\n em = east_max - east_min\n for e in range(x0, xm):\n for n in range(y0, ym):\n # skip out of range conditions\n if e < 0:\n continue\n if e >= em:\n continue\n if n < 0:\n continue\n if n >= nm:\n continue\n if (alt + d_alt + safety_distance) <= drone_altitude:\n continue\n # plot it\n grid[n][e] = 1\n\n return grid", "def MoveGhost(self,P):\n\t\tself.fg=0\n\t\twhile(self.fg!=1):\n\t\t\tmove=random.randint(1,4)\n\t\t\tif(move==1):\n\t\t\t\tif(self.CheckMove('w')!=0 and self.CheckWall('w')!=0):\n\t\t\t\t\tif(board[self.x-1][self.y]=='C'):\n\t\t\t\t\t\tself.fg_next=1\n\t\t\t\t\telse:\n\t\t\t\t\t \tself.fg_next=0\n\t\t\t\t\tif(self.fg_prev==1):\n\t\t\t\t\t\tboard[self.x][self.y]='C'\n\t\t\t\t\telse:\n\t\t\t\t\t \tboard[self.x][self.y]='.'\n\t\t\t\t\tself.fg_prev=self.fg_next\n\t\t\t\t\tself.MoveUp()\n\t\t\t\t\tboard[self.x][self.y]='G'\n\t\t\t\t\tself.fg=1\n\t\t\telif(move==2):\n\t\t\t\tif(self.CheckMove('s')!=0 and self.CheckWall('s')!=0):\n\t\t\t\t\tif(board[self.x+1][self.y]=='C'):\n\t\t\t\t\t\tself.fg_next=1\n\t\t\t\t\telse:\n\t\t\t\t\t \tself.fg_next=0\n\t\t \t\t\tif(self.fg_prev==1):\n\t\t\t\t\t\tboard[self.x][self.y]='C'\n\t\t\t\t\telse:\n\t\t\t\t \t\tboard[self.x][self.y]='.'\t\n\t\t\t\t\tself.fg_prev=self.fg_next\n\t\t\t\t\tself.MoveDown()\n\t\t\t\t\tboard[self.x][self.y]='G'\n\t\t\t\t\tself.fg=1\n\t\t\telif(move==3):\n\t\t\t\tif(self.CheckMove('d')!=0 and self.CheckWall('d')!=0):\n\t\t \t\t\tif(board[self.x][self.y+1]=='C'):\n\t\t\t\t\t\tself.fg_next=1\n\t\t\t\t\telse:\n\t\t\t\t\t \tself.fg_next=0\n\t\t\t\t\tif(self.fg_prev==1):\n\t\t\t\t\t\tboard[self.x][self.y]='C'\n\t\t\t\t\telse:\n\t\t\t\t\t \tboard[self.x][self.y]='.'\n\t\t\t\t\tself.fg_prev=self.fg_next\n\t\t \t\t\tself.MoveRight()\n\t\t\t\t\tboard[self.x][self.y]='G'\n\t\t\t\t\tself.fg=1\n\t\t\telif(move==4):\n\t\t\t\tif(self.CheckMove('a')!=0 and self.CheckWall('a')!=0):\n\t\t\t\t\tif(board[self.x][self.y-1]=='C'):\n\t\t\t\t\t\tself.fg_next=1\n\t\t\t\t\telse:\n\t\t\t\t \t\tself.fg_next=0\n\t\t\t\t\tif(self.fg_prev==1):\n\t\t\t\t\t\tboard[self.x][self.y]='C'\n\t\t\t\t\telse:\n\t\t\t\t\t \tboard[self.x][self.y]='.'\n\t\t\t\t\tself.fg_prev=self.fg_next\n\t\t\t \t\tself.MoveLeft()\n\t\t\t\t\tboard[self.x][self.y]='G'\n\t\t\t\t\tself.fg=1\n\t\t\tif((P.CheckGhost(self)==1)):\n\t\t\t\tboard[self.x][self.y]=\":(\"\n\t\t\t\tPrintBoard()\n\t\t\t\tprint '\\n'\n\t\t\t\tprint \"The game is over\"\n\t\t\t\tprint \"score = {}\".format(score)\t\n\t\t\t\tbreak", "def pre_or_post_turn(self, game_field, all_ghost_out:bool):\r\n\r\n reference_pos = self.pos[0] + self.grid_size // 2, self.pos[1] + self.grid_size // 2 #< Positon is set to center of Pac-Man so there is no difference in which direction he moves\r\n field = game_field.possible_way(reference_pos, self.last_dir)\r\n self.cnt_points(field, all_ghost_out)\r\n self.dist = reference_pos[0] % self.grid_size, reference_pos[1] % self.grid_size\r\n\r\n # Check if Pac-Man is moving to the right \r\n if self.direction == 'r':\r\n\r\n # dist to the center of the crossing less then grid_size//2 -> it's a preturn\r\n if self.dist[0] < self.grid_size // 2:\r\n\r\n # Check if Pac-Man wants to move up after the crossing\r\n if self.last_dir == 'u': \r\n \r\n # Check if the next field is a field Pac-Man can move to\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] += (self.grid_size - (self.pos[0] % self.grid_size))\r\n self.pos[1] -= self.speed\r\n self.direction = self.last_dir[:]\r\n\r\n # Check if Pac-Man wants to move down after the crossing\r\n if self.last_dir == 'd':\r\n\r\n # Check if the next field is a field Pac-Man can move to\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] += (self.grid_size - (self.pos[0] % self.grid_size))\r\n self.pos[1] += self.speed\r\n self.direction = self.last_dir[:]\r\n\r\n # dist to the center of the crossing greater then grid_size//2 -> it's a postturn\r\n elif self.dist[0] > self.grid_size // 2:\r\n\r\n # Check if Pac-Man wants to move up after the crossing\r\n if self.last_dir == 'u': \r\n \r\n # Check if the next field is a field Pac-Man can move to\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] -= (self.pos[0] % self.grid_size)\r\n self.pos[1] -= self.speed\r\n self.direction = self.last_dir[:]\r\n\r\n # Check if Pac-Man wants to move down after the crossing\r\n if self.last_dir == 'd':\r\n\r\n # Check if the next field is a field Pac-Man can move to\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] -= (self.pos[0] % self.grid_size)\r\n self.pos[1] += self.speed\r\n self.direction = self.last_dir[:]\r\n \r\n # The rest of the function does the same as above, just for the other three directions \r\n\r\n elif self.direction == 'l':\r\n #Preturn left\r\n if self.dist[0] > self.grid_size // 2:\r\n if self.last_dir == 'u':\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] -= (self.pos[0] % self.grid_size)\r\n self.pos[1] -= self.speed\r\n self.direction = self.last_dir[:]\r\n if self.last_dir == 'd':\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] -= (self.pos[0] % self.grid_size)\r\n self.pos[1] += self.speed\r\n self.direction = self.last_dir[:]\r\n #Postturn left\r\n elif self.dist[0] < self.grid_size // 2:\r\n if self.last_dir == 'u':\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] += (self.grid_size - (self.pos[0] % self.grid_size))\r\n self.pos[1] -= self.speed\r\n self.direction = self.last_dir[:]\r\n if self.last_dir == 'd':\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] += (self.grid_size - (self.pos[0] % self.grid_size))\r\n self.pos[1] += self.speed\r\n self.direction = self.last_dir[:]\r\n \r\n elif self.direction == 'u':\r\n #Preturn up\r\n if self.dist[1] > self.grid_size // 2:\r\n if self.last_dir == 'l':\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] -= self.speed\r\n self.pos[1] -= (self.pos[1] % self.grid_size)\r\n self.direction = self.last_dir[:]\r\n if self.last_dir == 'r':\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] += self.speed\r\n self.pos[1] -= (self.pos[1] % self.grid_size)\r\n self.direction = self.last_dir[:]\r\n #Postturn up\r\n elif self.dist[1] < self.grid_size // 2:\r\n if self.last_dir == 'l':\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] -= self.speed\r\n self.pos[1] += self.grid_size - (self.pos[1] % self.grid_size)\r\n self.direction = self.last_dir[:]\r\n if self.last_dir == 'r':\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] += self.speed\r\n self.pos[1] += (self.grid_size - (self.pos[1] % self.grid_size))\r\n self.direction = self.last_dir[:]\r\n \r\n elif self.direction == 'd':\r\n #Preturn down\r\n if self.dist[1] < self.grid_size // 2:\r\n if self.last_dir == 'l':\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] -= self.speed\r\n self.pos[1] += (self.grid_size - (self.pos[1] % self.grid_size))\r\n self.direction = self.last_dir[:]\r\n if self.last_dir == 'r':\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] += self.speed\r\n self.pos[1] += (self.grid_size - (self.pos[1] % self.grid_size))\r\n self.direction = self.last_dir[:]\r\n #Postturn down\r\n elif self.dist[1] > self.grid_size // 2:\r\n if self.last_dir == 'l':\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] -= self.speed\r\n self.pos[1] -= (self.pos[1] % self.grid_size)\r\n self.direction = self.last_dir[:]\r\n if self.last_dir == 'r':\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] += self.speed\r\n self.pos[1] -= (self.pos[1] % self.grid_size)\r\n self.direction = self.last_dir[:]\r\n pass", "def climb_tree():\n global UP_TREE\n westdesc = \"\"\n eastdesc = \"\"\n northdesc = \"\"\n southdesc = \"\"\n UP_TREE = True\n westinvalid = False\n eastinvalid = False\n northinvalid = False\n southinvalid = False\n\n\n printmessage(\"You climb the large tree to get a look at your surroundings.\", 5, MAGENTA, 2)\n\n if ZERO_BASE_PLYR_POS in range(0, 10):\n northinvalid = True\n if ZERO_BASE_PLYR_POS in range(90, 100):\n southinvalid = True\n if ZERO_BASE_PLYR_POS in range(0, 91, 10):\n eastinvalid = True\n if ZERO_BASE_PLYR_POS in range(9, 100, 10):\n westinvalid = True\n \n if not westinvalid: \n westpos = GROUND_FEATURES_LIST[ZERO_BASE_PLYR_POS - 1]\n if HAS_COMPASS: \n DISCOVERED[ZERO_BASE_PLYR_POS + 1] = \"Y\"\n if westpos == 10: # Water\n westdesc = TREE_VIEWS[2]\n else:\n westdesc = TREE_VIEWS[1]\n\n westpos = ENEMY_LIST[ZERO_BASE_PLYR_POS - 1]\n if westpos == 1:\n westdesc = TREE_VIEWS[3]\n elif westpos == 2:\n westdesc = TREE_VIEWS[4]\n else:\n westdesc = TREE_VIEWS[5]\n\n if not eastinvalid:\n eastpos = GROUND_FEATURES_LIST[ZERO_BASE_PLYR_POS + 1]\n if HAS_COMPASS:\n DISCOVERED[ZERO_BASE_PLYR_POS - 1] = \"Y\"\n if eastpos == 10: # Water\n eastdesc = TREE_VIEWS[2]\n else:\n eastdesc = TREE_VIEWS[1]\n\n eastpos = ENEMY_LIST[ZERO_BASE_PLYR_POS + 1]\n if eastpos == 1:\n eastdesc = TREE_VIEWS[3]\n elif eastpos == 2:\n eastdesc = TREE_VIEWS[4]\n else:\n eastdesc = TREE_VIEWS[6]\n\n\n if not northinvalid:\n northpos = GROUND_FEATURES_LIST[ZERO_BASE_PLYR_POS - 10]\n if HAS_COMPASS:\n DISCOVERED[ZERO_BASE_PLYR_POS - 10] = \"Y\"\n if northpos == 10: # Water\n northdesc = TREE_VIEWS[2]\n else:\n northdesc = TREE_VIEWS[1]\n\n northpos = ENEMY_LIST[ZERO_BASE_PLYR_POS - 10]\n if northpos == 1: # bear\n northdesc = TREE_VIEWS[3]\n elif northpos == 2: # grizzly\n northdesc = TREE_VIEWS[4]\n else:\n northdesc = TREE_VIEWS[7]\n\n\n if not southinvalid:\n southpos = GROUND_FEATURES_LIST[ZERO_BASE_PLYR_POS + 10]\n if HAS_COMPASS:\n DISCOVERED[ZERO_BASE_PLYR_POS + 10] = \"Y\"\n if southpos == 10: # Water\n southdesc = TREE_VIEWS[2]\n else:\n southdesc = TREE_VIEWS[1]\n\n southpos = ENEMY_LIST[ZERO_BASE_PLYR_POS + 10]\n if southpos == 1: # bear\n southdesc = TREE_VIEWS[3]\n elif southpos == 2: # grizzly\n southdesc = TREE_VIEWS[4]\n else:\n southdesc = TREE_VIEWS[8]\n\n clear_messages(0)\n printmessage(\"West: \" + westdesc, 2, GREEN, 0)\n printmessage(\"East: \" + eastdesc, 3, YELLOW, 0)\n printmessage(\"North: \" + northdesc, 4, CYAN, 0)\n printmessage(\"South: \" + southdesc, 5, MAGENTA, 0)\n #show_movement(True, 10)\n update_player_on_map()\n pause_for_keypress()\n clear_messages(0)", "def shortest(house1, house2):\n \n # x coordinate 1 of house 1\n x1h1 = house1.x1\n # x 1 of house 2\n x1h2 = house2.x1\n # x 2 of house 1\n x2h1 = house1.x2\n # x 2 of house 2\n x2h2 = house2.x2\n\n # y 1 of house 1\n y1h1 = house1.y1\n # y 1 of house 2\n y1h2 = house2.y1\n # y 2 of house 1\n y2h1 = house1.y2\n # y 2 of house 2\n y2h2 = house2.y2\n \n # The first two if statements check if the houses are directly horizontal or\n # vertical to each other. (Or inside each other.) The other two if statements\n # can determine between which corners the shortest distance is located \n # otherwise.\n\n # Checks if the houses are above each other\n if x1h1 <= x1h2 <= x2h1 or x1h1 <= x2h2 <= x2h1 or x1h2 <= x1h1 <= x2h2 or x1h2 <= x2h1 <= x2h2:\n\n # Checks if the houses are next to each other\n if y1h1 <= y1h2 <= y2h1 or y1h1 <= y2h2 <= y2h1 or y1h2 <= y1h1 <= y2h2 or y1h2 <= y2h1 <=y2h2:\n \n # If they are both above and nest to each other, they must be inside\n # each other, the total distance is returned as -1\n return -1\n\n # Checks if house 2 is below house 1\n elif y1h1 > y2h2:\n \n # If so, the distance between the lower wall of house 1 and the upper\n # wall of house 2 is returned\n return y1h1 - y2h2\n\n # Checks if house 2 is above house 1\n elif y2h1 < y1h2:\n \n # If so, the distance between the upper wall of house 1 and the lower\n # wall of house 2 is returned\n return y1h2 - y2h1\n\n # Checks if the houses are next to each other\n elif y1h1 <= y1h2 <= y2h1 or y1h1 <= y2h2 <= y2h1 or y1h2 <= y1h1 <= y2h2 or y1h2 <= y2h1 <=y2h2:\n\n # Checks if house 2 is to the left of house 1\n if x1h1 > x2h2:\n \n # If so, the distance between the left wall of house 1 and the right\n # wall of house 2 is returned\n return x1h1 - x2h2\n\n # Checks if house 2 is to the right of house 1\n elif x2h1 < x1h2:\n \n # If so, the distance between the right wall of house 1 and the left\n # wall of house 2 is returned\n return x1h2 - x2h1\n\n # The rest of the statements checks between which corners the shortest \n # distance is located. This distance is calculated with Pythagoras, using\n # the coordinates of the corners in question\n\n # Checks if house 2 is to the left of house 1\n elif x1h1 > x2h2:\n\n # Checks if house 2 is below house 1\n if y1h1 > y2h2:\n \n # House 2 is located to the bottom left of house 1\n return ((y1h1 - y2h2) ** 2 + (x1h1 - x2h2) ** 2) ** 0.5\n\n # Checks if house 2 is above house 1\n elif y2h1 < y1h2:\n \n # House 2 is located to the top left of house 1\n return ((y1h2 - y2h1) ** 2 + (x1h1 - x2h2) ** 2) ** 0.5\n\n # Checks if house 2 is to the right of house 1\n elif x2h1 < x1h2:\n\n # Checks if house 2 is below house 1\n if y1h1 > y2h2:\n \n # House 2 is located to the bottom right of house 1\n return ((y1h1 - y2h2) ** 2 + (x1h2 - x2h1) ** 2) ** 0.5\n\n # Checks if house 2 is above house 1\n elif y2h1 < y1h2:\n \n # House is located to the top right of house 1\n return ((y1h2 - y2h1) ** 2 + (x1h2 - x2h1) ** 2) ** 0.5\n\n # This should never occur, but it's here as a safety net\n else:\n print(\"error\")\n return -2", "def getGhostGoal(self, gameState):\n enemies = [gameState.getAgentState(i) for i in self.getOpponents(gameState)]\n ghost = [a for a in enemies if not a.isPacman and a.getPosition() != None]\n myPos = self.getCurrentObservation().getAgentState(self.index).getPosition()\n if len(ghost) > 0:\n dis = 9999\n nearestPacman = ghost[0]\n for p in ghost:\n temp = self.getMazeDistance(myPos, p.getPosition())\n if temp < dis:\n dis = temp\n nearestPacman = p\n return nearestPacman.getPosition(), dis\n else:\n return None, None", "def calculate_crosswalk_check_tiles(self, preferred_direction):\n\n if preferred_direction == (1, 0):\n return (\n (self.position[0] + CAR_LENGTH, self.position[1] - 1),\n (self.position[0] + CAR_LENGTH, self.position[1] + 2),\n (self.position[0] + CAR_LENGTH + 1, self.position[1] - 1),\n (self.position[0] + CAR_LENGTH + 1, self.position[1] + 2),\n )\n elif preferred_direction == (-1, 0):\n return (\n (self.position[0] - 1, self.position[1] - 1),\n (self.position[0] - 1, self.position[1] + 2),\n (self.position[0] - 2, self.position[1] - 1),\n (self.position[0] - 2, self.position[1] + 2),\n )\n elif preferred_direction == (0, 1):\n return (\n (self.position[0] - 1, self.position[1] + CAR_LENGTH),\n (self.position[0] + 2, self.position[1] + CAR_LENGTH),\n (self.position[0] - 1, self.position[1] + CAR_LENGTH + 1),\n (self.position[0] + 2, self.position[1] + CAR_LENGTH + 1),\n )\n elif preferred_direction == (0, -1):\n return (\n (self.position[0] - 1, self.position[1] - 1),\n (self.position[0] + 2, self.position[1] - 1),\n (self.position[0] - 1, self.position[1] - 2),\n (self.position[0] + 2, self.position[1] - 2),\n )", "def getSuccessors(self, state):\n\n successors = []\n top, right = self.walls.height - 2, self.walls.width - 2\n for action in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:\n # Add a successor state to the successor list if the action is legal\n # Here's a code snippet for figuring out whether a new position hits a wall:\n x, y = state[0]\n dx, dy = Actions.directionToVector(action)\n nextx, nexty = int(x + dx), int(y + dy)\n hitsWall = self.walls[nextx][nexty]\n \"*** YOUR CODE HERE ***\"\n \"\"\"\n La función sucesores funciona de la siguiente manera:\n * Si la acción no hace que choque con una pared, entonces...\n - Defino nextState como las coordenadas de lo que me da la acción\n - Creo una copia de la grid de true/false que tiene el estado, para así no modificar la original\n - A esta copia le actualizo la información, si el sucesor es una de las esquinas. Tengo que realizar\n esto manualmente dada la definición de mi grid de booleanos.\n - Creo una nueva variable que es una tupla en la que inserto las nuevas coordenadas y la grid actualizada\n - La añado a la lista de sucesores\n \"\"\"\n if not hitsWall:\n nextState = (nextx, nexty) # Defino la tupla que será la posición del sucesor\n nextFood = state[1].copy() # Hago una copia para así poder modificarla tranquilamente\n if nextState == (1, 1): # Manualmente miro si es alguna de las esquinas\n nextFood[1][0] = False # Si lo es, actualizo de true a false el elemento correspondiente\n if nextState == (1, top):\n nextFood[0][0] = False\n if nextState == (right, 1):\n nextFood[1][1] = False\n if nextState == (right, top):\n nextFood[0][1] = False\n nextStateFood = (nextState, nextFood) # Lo añado como tupla\n cost = 1 # Por orden del enunciado, el coste es siempre 1\n successors.append((nextStateFood, action, cost)) # Lo añado a la lista de sucesores\n self._expanded += 1\n return successors", "def __init__(self, startingGameState):\n self.walls = startingGameState.getWalls()\n self.startingPosition = startingGameState.getPacmanPosition()\n top, right = self.walls.height - 2, self.walls.width - 2\n self.corners = ((1, 1), (1, top), (right, 1), (right, top))\n for corner in self.corners:\n if not startingGameState.hasFood(*corner):\n print('Warning: no food in corner ' + str(corner))\n self._expanded = 0 # DO NOT CHANGE; Number of search nodes expanded\n # Please add any code here which you would like to use\n # in initializing the problem\n \"*** YOUR CODE HERE ***\"\n \"\"\"\n Mi espacio de estados consistirá en que cada estado será una tupla del tipo (pos, grid), donde:\n * pos es la posición en coordenadas (x,y) (como antes)\n * grid contendrá una grid 2x2 con la información relevante de la comida en las esquinas. Esto es:\n - En cada item de la grid habrá un true o un false, en función de si en esa esquina hay o no comida.\n - Por ejemplo, si la grid es:\n | True False |\n | True True |\n entonces significa que ya habremos comido la comida de la esquina (right,top)\n \"\"\"\n self.startingFood = startingGameState.getFood()\n self.cornersFood = game.Grid(2, 2) # Defino la matriz tipo grid de dimensión 2x2\n self.cornersFood[0][0] = self.startingFood[1][top] # Asigno manualmente cada valor a la grid\n self.cornersFood[0][1] = self.startingFood[right][top] # El problema es que yo enumero diferente la matriz\n self.cornersFood[1][0] = self.startingFood[1][1] # Es decir, a[0][0] es la esquina superior izquierda\n self.cornersFood[1][1] = self.startingFood[right][1]\n self.startFoodPosition = (self.startingPosition, self.cornersFood)", "def heuristic(self):\r\n # 1.\r\n blacks, whites = 0, 0\r\n weights = [0 for _ in range(6)]\r\n directions = [[-1, -1], [-1, 1], [1, 1], [1, -1]]\r\n user_dir = directions[:2] if self.current_player == 'n' else directions[2:]\r\n for i in range(8):\r\n for j in range(8):\r\n blacks += 1 if self.matrix[i][j] in ['N', 'n'] else 0\r\n whites += 1 if self.matrix[i][j] in ['A', 'a'] else 0\r\n if self.matrix[i][j] == self.current_player or self.matrix[i][j] == self.current_player.upper():\r\n\r\n # numarul de piese rege\r\n if self.matrix[i][j] == self.current_player.upper():\r\n weights[1] += 7.75\r\n\r\n # numarul de piese normale\r\n else:\r\n weights[0] += 5\r\n\r\n # numarul de piese de pe baseline in functie de tipul de piesa\r\n # conform strategiilor de joc este o strategie buna sa ai cat mai multe\r\n # piesa pe baseline pentru a preveni creare de piese de tip rege ale adversarului\r\n if self.current_player in ['n', 'N']:\r\n if i == 7:\r\n weights[2] += 4\r\n elif self.current_player in ['a', 'A']:\r\n if i == 0:\r\n weights[2] += 4\r\n\r\n # numarul de piese din mijlocul tablei\r\n # la fel este o strategie buna pentru atac\r\n if 3 <= i <= 4 and 3 <= j <= 4:\r\n weights[3] += 2\r\n\r\n # numar piese vulnerabile\r\n # adica piese ce pot fi capturate de oponent la urmatoare tura\r\n for d in user_dir:\r\n\r\n vx = d[0] + i\r\n vy = d[1] + j\r\n back_x = i - d[0]\r\n back_y = j - d[1]\r\n next_x, next_y = vx + d[0], vy + d[1]\r\n if self.bounded(vx, vy) and self.matrix[vx][vy] in [self.opponent(), self.opponent().upper()]:\r\n if self.bounded(back_x, back_y) and self.matrix[back_x][back_y] == '.':\r\n weights[4] -= 3\r\n if self.bounded(vx, vy) and self.matrix[vx][vy] in [self.opponent(), self.opponent().upper()]:\r\n if self.bounded(next_x, next_y) and self.matrix[next_x][next_y] == '.':\r\n # daca elimin o piesa rege este o mutare mai buna\r\n if self.matrix[vx][vy] == self.opponent().upper():\r\n weights[5] += 10\r\n else:\r\n weights[5] += 7\r\n\r\n diff = (blacks - whites) if self.current_player == 'n' else (whites - blacks)\r\n # cand sunt mai putin piese, AI adopta o tactica mai ofensiva\r\n if blacks + whites <= 10:\r\n return sum(weights) + diff\r\n return sum(weights)", "def cuadrilateros(option,b,h):\n x1 =[0,0]\n x2 =[0,h-1]\n x3 =[b-1,0]\n x4 =[b-1,h-1]\n \n if option == 1:\n #dda\n lado1_x,lado1_y = dda_algrithm(x1[0],x1[1],x2[0],x2[1])\n lado2_x,lado2_y = dda_algrithm(x1[0],x1[1],x3[0],x3[1])\n lado3_x,lado3_y = dda_algrithm(x2[0],x2[1],x4[0],x4[1])\n lado4_x,lado4_y = dda_algrithm(x3[0],x3[1],x4[0],x4[1])\n\n return [lado1_x,lado1_y,lado2_x,lado2_y,lado3_x,lado3_y,lado4_x,lado4_y]\n else:\n #bresenham\n lado1_x,lado1_y = bresenham_algrithm(x1[0],x1[1],x2[0],x2[1])\n lado2_x,lado2_y = bresenham_algrithm(x1[0],x1[1],x3[0],x3[1])\n lado3_x,lado3_y = bresenham_algrithm(x2[0],x2[1],x3[0],x3[1])\n lado4_x,lado4_y = bresenham_algrithm(x3[0],x3[1],x4[0],x4[1])\n return [lado1_x,lado1_y,lado2_x,lado2_y,lado3_x,lado3_y,lado4_x,lado4_y]", "def getPossibleMoves(self): # called to get possible positions this piece can go\r\n \r\n moves = {}\r\n\r\n ids = []\r\n\r\n for piece in self.board.pieces.values():\r\n if piece.name == \"empty\":\r\n piece.glow = False\r\n piece.ready = False\r\n\r\n self.piece = self\r\n\r\n def check(direction=\"left\", heading=\"north\", x=None, y=None):\r\n piece = self.piece\r\n if direction == \"left\": x -= 50\r\n else: x += 50\r\n\r\n if heading == \"north\": y -= 50\r\n else: y += 50\r\n\r\n if (x, y) in self.board.pieces: # position is empty\r\n empty = self.board.getPiece((x, y))\r\n empty.glow = True\r\n old, new, obj = (direction, heading), (x, y), piece\r\n identity = self.getRandomID(ids) # get an ID for the move\r\n moves[identity] = old, new, obj\r\n\r\n if piece.isKing: # piece is a king, so go on\r\n check(direction, heading, x, y)\r\n else: # its not empty, so check if its comrade\r\n x1, y1 = x+25, y+25\r\n piece2 = self.board.getPiece((x1, y1))\r\n try:\r\n if piece.isComrade(piece2):# piece is comrade so return\r\n return\r\n else: # piece is not comrade, so check empty\r\n if direction == \"left\": x2 = x1-25-50\r\n else: x2 = x1-25+50\r\n\r\n if heading == \"north\": y2 = y1-25-50\r\n else: y2 = y1-25+50\r\n\r\n if (x2, y2) in self.board.pieces: # its empty, so notify player\r\n empty = self.board.getPiece((x2, y2))\r\n empty.glow = True\r\n empty.ready = True\r\n\r\n old, new, obj = (direction, heading), (x2, y2), piece2\r\n identity = self.getRandomID(ids)\r\n moves[identity] = old, new, obj\r\n\r\n check(direction, heading, piece2.x-25, piece2.y-25)\r\n check(direction, heading, x2, y2)\r\n \r\n # check empty or comrade again\r\n if direction == \"left\": x3 = x2-50\r\n else: x3 = x2+50\r\n\r\n if heading == \"north\": y3 = y2-50\r\n else: y3 = y2+50\r\n\r\n if (x3, y3) in self.board.pieces: # positon(address) is empty\r\n return\r\n else: # there is a piece, so check if comrade, stop, if not comrade continue\r\n x3+=25\r\n y3+= 25\r\n\r\n piece3 = self.board.getPiece((x3, y3))\r\n if piece3.isComrade(piece2): # comrades, so stop\r\n return\r\n else: # not comrades, so continue\r\n self.piece = piece3\r\n check(direction, heading, x, y)\r\n\r\n #self.piece = piece2\r\n \r\n #check(direction, heading, x2, y2) # keep searching\r\n else: # its not empty, so return\r\n return\r\n except:\r\n pass\r\n\r\n if self.piece.name == \"white\": direction = \"north\"\r\n else: direction = \"south\"\r\n \r\n check(\"left\", direction, self.piece.x-25, self.piece.y-25)\r\n check(\"right\", direction, self.piece.x-25, self.piece.y-25)\r\n \r\n if self.piece.isKing:\r\n if self.piece.name == \"white\": heading = \"south\"\r\n else: heading = \"north\"\r\n \r\n check(\"left\", heading, self.piece.x-25, self.piece.y-25)\r\n check(\"right\", heading, self.piece.x-25, self.piece.y-25)\r\n\r\n if self.piece.name == \"white\":\r\n eatMoves = self.board.game.thinkEatMoves(moves, \"person\")\r\n if eatMoves is not None:\r\n return eatMoves\r\n\r\n return moves", "def gridgen4(num_points, diameter, min_dist, n_miss_max=10000):\n\n # Grid size and scaling onto the grid\n grid_size = min(100, int(floor(float(diameter) / min_dist)))\n grid_cell = float(diameter) / grid_size # Grid sector cell size\n scale = 1.0 / grid_cell # Scaling onto the sector grid.\n print('- Grid size: %i' % grid_size)\n print('- Grid cell: %f' % grid_cell)\n\n r = diameter / 2.0 # Radius\n r_sq = r**2 # Radius, squared\n min_dist_sq = min_dist**2 # minimum distance, squared\n\n # Pre-allocate coordinate arrays\n x = numpy.zeros(num_points)\n y = numpy.zeros(num_points)\n\n # Grid meta-data\n next = numpy.zeros(num_points, dtype='i8') # Next coordinate index.\n h1 = -numpy.ones((grid_size, grid_size), dtype='i8') # First index in the grid\n h2 = -numpy.ones((grid_size, grid_size), dtype='i8') # Last index in the grid\n grid_count = numpy.zeros((grid_size, grid_size), dtype='i8') # Points in grid cell.\n\n n = num_points\n n_req = num_points\n num_miss = 0\n for j in range(n_req):\n\n # First time no need to check the minimum distance req, just needs\n # to be inside the diameter.\n if j == 0:\n done = False\n while not done:\n x[j], y[j] = get_trail_position(r)\n done = (x[j]**2 + y[j]**2) <= r_sq\n jx, jy = grid_position(x[j], y[j], scale, r)\n grid_count[jx, jy] += 1\n h1[jx, jy] = 0\n h2[jx, jy] = 0\n\n # All other points have to be inside the diameter and match the\n # minimum separation requirements.\n else:\n done = False\n while not done:\n xt, yt = get_trail_position(r)\n\n # Check if the point is inside the diameter\n if (xt**2 + yt**2) > r_sq:\n num_miss += 1\n else:\n # Scale onto grid.\n jx, jy = grid_position(xt, yt, scale, r)\n # Find minimum distance to other points\n y0 = max(0, jy - 1)\n y1 = min(grid_size - 1, jy + 1)\n x0 = max(0, jx - 1)\n x1 = min(grid_size - 1, jx + 1)\n dmin_sq = diameter\n for ky in range(y0, y1 + 1):\n for kx in range(x0, x1 + 1):\n if grid_count[kx, ky] > 0:\n kh1 = h1[kx, ky]\n for kh in range(grid_count[kx, ky]):\n dx = xt - x[kh1]\n dy = yt - y[kh1]\n dist_sq = dx**2 + dy**2\n dmin_sq = min(dist_sq, dmin_sq)\n kh1 = next[kh1]\n\n # Check if the minimum distance requirement is met.\n if dmin_sq >= min_dist_sq:\n x[j] = xt\n y[j] = yt\n if h1[jx, jy] == -1:\n h1[jx, jy] = j\n else:\n next[h2[jx, jy]] = j\n h2[jx, jy] = j\n grid_count[jx, jy] += 1\n num_miss = 0\n done = True\n else:\n num_miss += 1\n\n if num_miss >= n_miss_max:\n n = j - 1\n done = True\n\n if num_miss >= n_miss_max:\n break\n\n if n < n_req:\n x = x[0:n]\n y = y[0:n]\n\n return x, y", "def cornersHeuristic(state, problem):\n corners = problem.corners # These are the corner coordinates\n walls = problem.walls # These are the walls of the maze, as a Grid (game.py)\n \"*** YOUR CODE HERE ***\"\n \"\"\"\n En este ejercicio me he dado cuenta de un problema de mi definición del espacio de estados:\n - El espacio de estados consiste en tuplas ((x,y), grid), donde (x,y) es la posición en coordenadas\n y grid es la tabla de true/false.\n - El problema es que yo he pensado la tabla grid en forma de matriz matemática, de manera que los índices\n no van de acuerdo con la posición de las esquinas, sinó con los índices de una matriz.\n Para solucionar este problema sin tener que modificar todo lo anterior (dado que no me queda tiempo) lo que he\n tenido que hacer es crear una lista y añadir de forma ordenada los valores true/false, para que se corresponda\n cada uno con su esquina.\n \n Mi heurística consiste en lo siguiente:\n * Calculo la distancia desde la posición en la que me sitúo hasta todos los corners no visitados (los que aún\n tienen comida) y me quedo con la mínima de estas distancias, y con el corner que me de esa mínima.\n * Calculo la distancia desde ese corner (el mínimo de antes) hasta todos los otros posibles corners no visitados\n y de nuevo me quedo con la mínima distancia y con el corner que me da esa mínima.\n * Repito este proceso hasta que no queden corners.\n Entonces lo que hago es definir una nueva lista de corners, newListOfCorners que irá extrayendo los corners a medida\n que su distanca sea calculada. Por ejemplo, si tengo los cuatro corners con comida y estoy en una posición \n aleatoria, la lista newListOfCorners estará llena. Se calculará la distancia a cada corner y el corner que de la \n mínima será extraído de newListOfCorners. Entonces se calculará la distancia desde este corner hasta los restantes\n tres corners de newListOfCorners y el corner de esos tres que me de la mínima será extraído de la lista. Etc...\n \"\"\"\n\n # Ordenamos la lista de True's y False's para que vaya acorde con el orden de la lista corners:\n visitedCorners = []\n visitedCorners.append(state[1][1][0])\n visitedCorners.append(state[1][0][0])\n visitedCorners.append(state[1][1][1])\n visitedCorners.append(state[1][0][1])\n corners = list(corners) # De aquí saco una lista que contenga los corners ordenados.\n # Ahora los corners y la lista de visitedCorners contendrán la información de forma ordenada y coherente\n minimum = 9999999999999999 # Defino un mínimo muy grande para asegurarme que nunca sea superado\n total = 0 # Inicializo el total a cero\n newListOfCorners = [] # Creo una nueva lista para añadir los corners no estudiados\n for corner in corners: # Primero vamos a llenar la lista de corners con los que me interesen: los que tienen comida\n if visitedCorners[corners.index(corner)]: # Miramos que el corner tenga comida, sino pasamos\n newListOfCorners.append(corner) # Si tiene comida, lo añadimos\n minimCorner = corners[0] # Inicializo el minimCorner a un corner aleatorio para que no me de problemas más tarde\n actualState = state[0] # Lo mismo\n\n while not len(newListOfCorners) == 0: # Mientras la lista no esté vacía...\n for corner in newListOfCorners: # Cogemos un corner de la lista\n distanceToCorner = manhattanHeuristicToCorners(actualState, corner) # Calculamos dist. a corner\n if distanceToCorner < minimum: # Calculamos el mínimo\n minimum = distanceToCorner\n minimCorner = corner\n total += minimum # Y lo añadimos al total\n actualState = minimCorner # Reactualizamos cada variable para volver a empezar el bucle\n minimum = 9999999999999999999999999999999\n newListOfCorners.remove(minimCorner)\n return total", "def test_uses_relative_moves(self):\n plato = SVGPlato()\n\n plato.draw_polygon([(-10, -7), (-10, 7), (10, 7), (10, -7)])\n result = plato.g[-1].attrib['d']\n\n self.assertEquals(result, 'M-10,-7v14h20v-14z')", "def mover_bm_izquierda(self):\n self.nueva_posicion_posible_parte_superior = self.mapa.consultar_casilla_por_movimiento([self.casilla[0] - 1,\n self.casilla[1]],\n [self.vertice_1[0] - self.velocidad,self.vertice_1[1]], \n [self.vertice_1[0] - 5 - 5, self.vertice_1[1]])\n self.nueva_posicion_posible_parte_inferior = self.mapa.consultar_casilla_por_movimiento([self.casilla[0] - 1,\n self.casilla[1] + 1],\n [self.vertice_3[0] - self.velocidad,self.vertice_3[1]],\n [self.vertice_3[0] - 5,self.vertice_3[1]]) \n if self.nueva_posicion_posible_parte_superior[0] != 1 and self.nueva_posicion_posible_parte_inferior[0] != 1:\n self.x -= self.velocidad * (self.x >= 15)\n self.posicion = [self.x,self.posicion[1]]\n self.casilla = [self.casilla[0] - self.nueva_posicion_posible_parte_superior[1] *(self.nueva_posicion_posible_parte_inferior[0] != 1) * (self.nueva_posicion_posible_parte_superior[0] != 1), self.casilla[1]]\n self.redefinir_vertices()", "def aggrandir_serpent():\n\n (i,j)=coordonnees_serpent[0]\n \n if direction==0 :\n pass\n\n elif direction == \"haut\" :\n serpent.insert(0,\n TERRAIN.create_rectangle(i* coté, j * coté, i * coté+ coté, j * coté + coté, fill = \"green\", outline = 'green'))\n coordonnees_serpent.insert(0,(i,j-1))\n\n elif direction == \"bas\" :\n serpent.insert(0,\n TERRAIN.create_rectangle(i* coté, j * coté, i * coté+ coté, j * coté + coté, fill = \"green\", outline = 'green'))\n coordonnees_serpent.insert(0,(i,j+1))\n\n elif direction == \"droite\" :\n serpent.insert(0,\n TERRAIN.create_rectangle(i* coté, j * coté, i * coté+ coté, j * coté + coté, fill = \"green\", outline = 'green'))\n coordonnees_serpent.insert(0,(i+1,j))\n\n elif direction == \"gauche\" :\n serpent.insert(0,\n TERRAIN.create_rectangle(i* coté, j * coté, i * coté+ coté, j * coté + coté, fill = \"green\", outline = 'green'))\n coordonnees_serpent.insert(0,(i-1,j))" ]
[ "0.6291606", "0.6226369", "0.6037247", "0.58772886", "0.5799525", "0.57812417", "0.5745552", "0.5744289", "0.5727976", "0.5722428", "0.56805277", "0.56567246", "0.5634245", "0.55896896", "0.5572104", "0.55714655", "0.55670816", "0.55526555", "0.5545422", "0.5538702", "0.5537728", "0.55333704", "0.551894", "0.5513712", "0.5513661", "0.5510833", "0.54802775", "0.5479754", "0.54698884", "0.5468503" ]
0.625722
1
Syncs all incident workflows daily.
def daily_sync_workflow(db_session: SessionLocal, project: Project): workflow_plugin = plugin_service.get_active_instance( db_session=db_session, project_id=project.id, plugin_type="workflow" ) if not workflow_plugin: log.warning(f"No workflow plugin is enabled. ProjectId: {project.id}") return incidents = incident_service.get_all(db_session=db_session, project_id=project.id).all() sync_workflows(db_session, project, workflow_plugin, incidents, notify=False)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sync_entries():\n import time\n\n while True:\n try:\n update_pending_scripts(settings['api_handler'])\n except:\n logging.exception(\"Error occured during synchronisation\")\n time.sleep(60)", "def sync(self):\n self._start_slow_sync()\n self._ask_for_all_records()\n self._process_events()\n self._process_reminders()\n self._process_recurrences()\n #self._write_events()", "def sync_all(c, environment, ingest_db=True):\n sync_database(c, environment, ingest=ingest_db)\n sync_media(c, environment)", "def sync():\n\n DFS.update(get_data_without_transactions())\n DFS[c.dfs.TRANS] = get_df_transactions()\n\n YML.update(get_config())", "def _update_all_tasks(self) -> None:\n for task in self.tasks:\n task.update()", "def sync_performance_reports():\n from mspray.apps.main.utils import find_missing_performance_report_records\n from mspray.apps.main.utils import performance_report\n from mspray.apps.main.utils import queryset_iterator\n\n missing_sprayformids = find_missing_performance_report_records()\n\n queryset = SprayDay.objects.filter(\n data__sprayformid__in=missing_sprayformids\n ).distinct(\"spray_operator\")\n\n for record in queryset_iterator(queryset):\n performance_report(record.spray_operator)", "def _run_scheduled_daily_tasks():\n worker.add_task(daily.run)", "def sync_jira():\n from security_monkey import jirasync\n if jirasync:\n app.logger.info('Syncing issues with Jira')\n jirasync.sync_issues()\n else:\n app.logger.info('Jira sync not configured. Is SECURITY_MONKEY_JIRA_SYNC set?')", "def run(self, request, queryset):\n\n for settings in queryset:\n settings.run()\n\n self.message_user(\n request,\n _('Data synchronization started in background.'))", "def _loop_daily(config, configfile, topdatadir, startdate, model_forcing):\n\n delta = datetime.timedelta(days=1)\n scriptdir = config['SETUP']['LISFDIR'] + '/lis/utils/usaf/s2s/s2s_modules/s2spost/'\n\n # The very first day may be missing. Gracefully handle this\n firstdate = startdate\n if _is_lis_output_missing(firstdate, model_forcing):\n firstdate += delta\n\n if startdate.month == 12:\n enddate = datetime.datetime(year=(startdate.year + 1),\n month=1,\n day=1)\n else:\n enddate = datetime.datetime(year=startdate.year,\n month=(startdate.month + 1),\n day=1)\n\n curdate = firstdate\n while curdate <= enddate:\n cmd = f\"python {scriptdir}/daily_s2spost_nc.py {configfile}\"\n for model in [\"SURFACEMODEL\", \"ROUTING\"]:\n cmd += f\" lis_fcst/{model_forcing}/{model}/\"\n cmd += f\"{curdate.year:04d}{curdate.month:02d}\"\n cmd += \"/LIS_HIST_\"\n cmd += f\"{curdate.year:04d}{curdate.month:02d}{curdate.day:02d}\"\n cmd += \"0000.d01.nc\"\n\n cmd += f\" {topdatadir}/cf_{model_forcing}_\"\n cmd += f\"{startdate.year:04d}{startdate.month:02d}\"\n\n cmd += f\" {curdate.year:04d}{curdate.month:02d}{curdate.day:02d}00\"\n\n cmd += f\" {model_forcing}\"\n\n print(cmd)\n returncode = subprocess.call(cmd, shell=True)\n if returncode != 0:\n print(\"[ERR] Problem running CF conversion!\")\n sys.exit(1)\n\n curdate += delta", "def run(self):\n self.create_all_sync_instances()", "def sync_update(self):\n for rec in self:\n if rec.ks_last_exported_date and rec.ks_sync_date:\n ks_reduced_ks_sync_time = rec.ks_last_exported_date - datetime.timedelta(seconds=30)\n ks_increased_ks_sync_time = rec.ks_last_exported_date + datetime.timedelta(seconds=30)\n if rec.ks_sync_date > ks_reduced_ks_sync_time and rec.ks_sync_date < ks_increased_ks_sync_time:\n rec.ks_sync_status = True\n else:\n rec.ks_sync_status = False\n else:\n rec.ks_sync_status = False", "def sync():\n sync_ssda()", "def syncRequests(self):\n log.msg(\"Synchronizing bot requests\")\n\n if self.client:\n self.client.send_sync(self.requests)", "def sync(self, no_of_days=1):\n zd = Zendesk()\n tg = Toggl()\n try:\n self.print(\"Syncing...\")\n self.print_divider(30)\n tickets = zd.get_tickets(no_of_days)\n for ticket in tickets:\n project_title = self.format_title(ticket.id, ticket.subject)\n if ticket.organization:\n client_id = tg.get_client_id(name=ticket.organization.name)\n if not client_id:\n new_client = tg.create_client(ticket.organization.name)\n client_id = new_client['id']\n else:\n client_id = False\n self.print(\"Ticket '%s' has no associated organization!\" % (project_title))\n all_projects = tg.get_projects()\n if not self.already_created(ticket.id, all_projects):\n self.print(\"Creating project '%s'...\" % (project_title))\n result = tg.create_project(project_title, client_id, is_private=False)\n self.print(\"Toggl response:\")\n self.log(result, silent=False)\n else:\n self.print(\"There is already a Toggl project for Zendesk ticket #%s!\" % ticket.id)\n pass\n # TODO: edit Toggl project\n # tg.edit_project(project_id, name=ticket.subject)\n self.print_divider(30)\n self.print(\"Done!\")\n except:\n self.log(traceback.format_exc(), silent=False)", "def sync_territories(self):\n for territory_state in self.territory.all():\n territory_state.sync()", "def test_sync_all_models(monkeypatch):\n sync_model_mock = Mock()\n monkeypatch.setattr('datahub.search.tasks.sync_model', sync_model_mock)\n\n sync_all_models.apply()\n tasks_created = {call[1]['args'][0] for call in sync_model_mock.apply_async.call_args_list}\n assert tasks_created == {app.name for app in get_search_apps()}", "def send_notifications():\n due_notifications = Notification.query.filter(Notification.delivery_date <= datetime.now(timezone.utc))\n for notification in due_notifications:\n send_notification.delay(notification.id)", "def sync_all_teams_coverage():\n teams = Team.objects.all()\n\n for team in teams:\n identifier = team.identifier\n\n sync_team_coverage.apply_async(args=(identifier, ))\n sync_team_cluster_stats.apply_async(args=(identifier, ))\n sync_team_advisory_stats.apply_async(args=(identifier, ))", "def batch_transfer(self):\n ticket_range = self.zendesk.ticket_range()\n for i in range(1, ticket_range):\n tickets = self.zendesk.get_list_of_tickets(i)\n for ticket in tickets[\"tickets\"]:\n ticket_id = ticket[\"id\"]\n self.transfer_ticket(ticket_id)", "def sendAllNotifications():\n delta = prefs.getDaysToNotifyMinistriesQuestionsPendingResponse()\n date = datetime.date.today()\n sendNotificationToMinistry(date)\n sendNotificationToClerksOffice(date)\n sendNotificationToMP(date)", "def doSync (self) :\r\n \r\n self.factory.getSyncFor(self)", "def sync_datasets_acls(self):\n future_response = self.client._perform_json(\n \"POST\", \"/admin/connections/%s/sync\" % self.name,\n body = {'root':True})\n return DSSFuture(self.client, future_response.get('jobId', None), future_response)", "def update_synchronization():\n logger.debug(\"Update synchronizations started\")\n for sa in SocialAttributes.objects.filter(start_page_token__isnull=False):\n if should_sync(sa.user, 'google-oauth2', 'tasks.gdrive'):\n if sa.user.social_auth.filter(provider='google-oauth2').first():\n access_token, refresh_token = get_google_tokens(sa.user)\n subtask(sync_gdrive_changes).delay(sa.user, access_token, refresh_token, sa.start_page_token)\n else:\n logger.info(\"Gdrive oauth token for user '%s' already in use, skipping sync ...\", sa.user.username)", "def sync(self, streams):\n # TODO: pass the streams\n for tap_stream_id in streams:\n LOGGER.info(\"Syncing stream:\" + tap_stream_id)\n\n if tap_stream_id == \"deliveries\":\n self.sync_deliveries(tap_stream_id)\n else:\n self.sync_full_table_streams(tap_stream_id)\n\n singer.write_state(self.state)", "def sync(config, state, catalog):\n # Loop over selected streams in catalog\n for stream in catalog.get_selected_streams(state):\n LOGGER.info(\"Syncing stream:\" + stream.tap_stream_id)\n\n full_path = \"schemas/{}.json\".format(stream.tap_stream_id.lower())\n schema = utils.load_json(get_abs_path(full_path))\n singer.write_schema(\n stream_name=stream.tap_stream_id,\n schema=schema,\n key_properties=stream.key_properties,\n )\n\n get_token(config)\n interval = set_interval(lambda: get_token(config), 3500)\n get_report(stream.tap_stream_id, config, schema)\n interval.cancel()\n singer.write_state({\"last_updated_at\": str(datetime.now().isoformat()), \"stream\": stream.tap_stream_id})\n return", "def run(self, request, queryset):\n\n for sequence in queryset:\n for settings in sequence.settings.all():\n settings.buffer_file = sequence.buffer_file\n settings.save()\n settings.run()\n\n self.message_user(\n request,\n _('Data synchronization started in background.'))", "def sync_on_start():\n if (environ.get('ENV', None) != \"development\"):\n print(\"[#] Not dev environment. Syncing!\", flush = True)\n sync_stats()\n sync_users()\n else:\n print(\"[#] Dev environment. Skipping sync...\", flush = True)", "def sync_dashboards(app=None):\n\tif not cint(frappe.db.get_single_value('System Settings', 'setup_complete')):\n\t\treturn\n\tif app:\n\t\tapps = [app]\n\telse:\n\t\tapps = frappe.get_installed_apps()\n\n\tfor app_name in apps:\n\t\tprint(\"Updating Dashboard for {app}\".format(app=app_name))\n\t\tfor module_name in frappe.local.app_modules.get(app_name) or []:\n\t\t\tfrappe.flags.in_import = True\n\t\t\tmake_records_in_module(app_name, module_name)\n\t\t\tfrappe.flags.in_import = False", "def updateAllRealtime():\n for stockName in db.STOCK_MAP.keys():\n getStock(stockName, \"all\")\n\n db.UPDATING_REALTIME = False" ]
[ "0.57603675", "0.57562184", "0.57505476", "0.55784416", "0.5569469", "0.55263174", "0.54931366", "0.5471725", "0.5429856", "0.54000676", "0.5378067", "0.53648674", "0.5315507", "0.52774256", "0.5255891", "0.52555865", "0.5242909", "0.5198132", "0.5193238", "0.51775247", "0.5162619", "0.51459545", "0.5115691", "0.51062095", "0.5100662", "0.5095773", "0.5086817", "0.50845855", "0.50762475", "0.50659215" ]
0.7605369
0
Method to launch the camera to capture a new users image
def launch_webcam(self): global face_encoding # Call the image_import.add_user method which launches the camera and # returns the face encodings if a new picture is taken face_encoding = image_import.add_user() # Check if a new image was returned from the add_user method if len(face_encoding) == 128: # Confirm if a new image has been captured self.label_face_captured.setText(QtCore.QCoreApplication.translate("MainWindow", "Image Captured ")) self.check_box.show() self.check_box.setEnabled(True) self.check_box.setChecked(True) else: # Notify if a new image is not captured self.label_face_captured.setText(QtCore.QCoreApplication.translate("MainWindow", "No Image Captured")) self.check_box.hide()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def take_picture(self):\n self.drone.take_picture()", "def take_picture(self):\n self.drone.take_picture()", "def capture(self):\n current_time=time.strftime('%Y%m%d-%H%M%S')\n self.filepath=f\"files/{current_time}.png\"\n self.ids.camera.export_to_png(self.filepath)\n self.manager.current='image_screen'\n self.manager.current_screen.ids.img.source=self.filepath", "def start_camera(self):\n # create the video capture thread\n self.thread = VideoThread()\n # connect its signal to the update_image slot\n self.thread.change_pixmap_signal.connect(self.update_image)\n # start the thread\n self.thread.start()", "def startCamera(self):\n if self.video == \"camera\":\n self.cap = cv2.VideoCapture(gstreamer_pipeline(\n capture_width=416, capture_height=416, flip_method=0), cv2.CAP_GSTREAMER)\n else:\n video_path = Path(self.video)\n if not video_path.exists():\n raise Exception(\"Video file not found\")\n self.cap = cv2.VideoCapture(str(video_path))", "def open_camera(self):\n camera_source = self.winOpenCam.camera_source_used()\n if camera_source:\n param_name = select_file(\n \"Select Parameter\", \"../\", \"Parameter Files (*.json)\")\n if param_name:\n self.moildev = Moildev(param_name)\n self.running_video(camera_source)\n self.cam = True", "def start():\n global running\n running = True\n messagebox.showinfo(\"Camera mode\",\"Start image grab\")\n camera.start_preview(fullscreen=False, window = (100,20,612,404))", "def capture_image(self, data={}):\n if self.camera:\n image_name = f'{os.path.join(self.path, self.filename)}.jpg'\n self.camera.capture(image_name)\n self.last_image = os.path.abspath(image_name)\n self.increment_count()\n self.fire({'event': 'ImageCaptured', 'image': image_name})", "def take_photo(self):\n\n status = self.camera.status()\n if status['mode'] != 'still':\n # place camera in snapshot mode\n self.camera.command('mode', 'still')\n\n photo_successful = self.camera.command('record', 'on')\n\n if photo_successful:\n\n # sleep for two seconds so the camera can process\n # and serve the new photo via http\n\n retrieved = False\n while not retrieved:\n print(\"Waiting for image to be served.\")\n time.sleep(2)\n retrieved = self.get_photos_from_device()\n\n print(\"Image got served.\")\n return True\n\n else:\n return False", "def camera():\n while True:\n subprocess.check_output(['fswebcam', 'image.jpg'])\n sleep(60)", "def camera_start(self):\n mycam = ONVIFCamera(self.__cam_ip, 80, self.__cam_user, self.__cam_password)\n logging.info('Create media service object')\n media = mycam.create_media_service()\n logging.info('Get target profile')\n media_profile = media.GetProfiles()[0]\n logging.info('Camera working!')\n\n self.mycam = mycam\n self.camera_media_profile = media_profile\n self.camera_media = media\n self.mycam = mycam\n\n return self.mycam", "def initialCamera(self, cmd):\n\n pass", "def _start_live_capture(self):\n if self._scanner is not None:\n self._stop_live_capture()\n\n self._scanner = CameraScanner(self._new_scan_queue)\n self._scanner.stream_camera(config=self._config)", "def StartWebcam(self):\n if not os.path.exists('static'):\n os.mkdir('static')\n camera = olpc.Camera('static/webcam.png')\n camera.StartWebcam()", "def preview(self,*args,**kwargs):\n self.cam.start_preview(*args,**kwargs)", "def camera_image(self):\n if not self.ezvizService.switchState:\n return \"\"\n\n now = time.time()\n if now < self._last_snapshot_time + self._interval_snapshots:\n return self._last_image\n\n result = self.ezvizService.post('/lapp/device/capture', data={'deviceSerial':self.deviceSerial,'channelNo':1})\n if (result['code']!='200'):\n _LOGGER.error(\"EZVIZ capture image fail:%s\", result)\n return self._last_image\n\n image_path = result['data']['picUrl']\n try:\n response = requests.get(image_path)\n except requests.exceptions.RequestException as error:\n _LOGGER.error(\"EZVIZ getting camera image: %s\", error)\n return self._last_image\n\n self._last_snapshot_time = now\n self._last_image = response.content\n return self._last_image", "def run(self):\n\n info(\"creating camera\")\n self.camera_controller = CameraController()\n self.camera_controller.camera.resolution = self.photo_resolution\n\n self.screen_resolution = ui.get_screen_resolution()\n self.normalized_screen_resolution = ui.normalize_dimension(self.screen_resolution)\n info(\"screen_resolution: %s\", self.screen_resolution)\n info(\"normalized_screen_resolution: %s\", self.normalized_screen_resolution)\n\n info(\"creating buffer image and canvas\")\n self.buffer_image = Image.new('RGB', self.normalized_screen_resolution)\n self.canvas = ImageDraw.Draw(self.buffer_image)\n debug(\"buffer_image resolution: %s\", self.buffer_image.size)\n\n info(\"creating preview renderer\")\n self.preview_renderer = self.camera_controller.start_preview(\n fullscreen=False,\n window=ui.normalize_dimension((\n 0, 0,\n self.normalized_screen_resolution[0] * 0.75,\n self.normalized_screen_resolution[1]\n )))\n debug(\"preview location: %s\", self.preview_renderer.window)\n\n info(\"creating window renderer\")\n self.window_renderer = self.camera_controller.add_overlay(\n self.buffer_image.tobytes(),\n size=self.buffer_image.size,\n fullscreen=False,\n layer=1,\n window=(\n 0, 0,\n self.normalized_screen_resolution[0],\n self.normalized_screen_resolution[1]\n ))\n debug(\"window location: %s\", self.window_renderer.window)\n\n info(\"setting up UI\")\n self._setup_ui()\n\n info(\"setting up input\")\n self.yes_button = GPIOButton(self.yes_pin)\n self.no_button = GPIOButton(self.no_pin)\n\n info(\"starting app\")\n self._enter_state(STATE_DEFAULT)\n self.render_timer.start()\n ui_context = ui.UIContext(self.canvas, self.window, update_function=self._logic)\n ui_context.main_loop()\n\n info(\"exiting\")", "def capture_image():\n global img_tk\n r, img_cam = cam.read()\n img_pil = Image.fromarray(cv2.cvtColor(img_cam, cv2.COLOR_BGR2RGB))\n img_tk = ImageTk.PhotoImage(img_pil)\n tk_cam.create_image(0, 0, image=img_tk, anchor='nw')\n return img_pil", "def capture_image(self):\n ext = self.image_save_type.lower()\n\n if self.calibrating:\n print('calibrating')\n\n if ext == 'fits':\n self.save_fits()\n self._image_counter += 1\n else:\n img = self.original_image\n path = os.path.join(self.home, 'data')\n name = \"camtrak_frame_{}.png\".format(self._image_counter) \n fn = os.path.join(path, name)\n cv2.imwrite(fn, img)\n\n QtWidgets.QApplication.beep()\n self.statusBar().showMessage(f'Saved image to {fn}')\n self._image_counter += 1", "def preview_camera(self):\n self.statusbar.clearMessage()\n self.statusbar.showMessage('Previewing the camera. Press the Enter key to exit.')\n self.buttonPreview.setText('Press Enter\\nto finish.')\n self.comboCamera.setEnabled(False)\n self.buttonSelectColor.setEnabled(False)\n self.buttonLogSet.setEnabled(False)\n cap = webcam.initiate_camera(self.comboCamera.currentIndex())\n while True:\n _, frame = cap.read()\n frame = np.rot90(frame, self.comboRotation.currentIndex())\n cv2.imshow('Camera Preview', frame)\n key = cv2.waitKey(1) & 0xFF\n if key == ord('\\r'):\n break\n cap.release()\n cv2.destroyAllWindows()\n self.buttonPreview.setText('Preview')\n self.comboCamera.setEnabled(True)\n self.buttonSelectColor.setEnabled(True)\n self.buttonLogSet.setEnabled(True)\n self.statusbar.clearMessage()", "def start_camera(config):\n print(\"Starting {} on {}\".format(config.name, config.path))\n cs = CameraServer.getInstance()\n camera = cs.startAutomaticCapture(name=config.name, path=config.path)\n\n camera.setConfigJson(json.dumps(config.config))\n\n return cs, camera", "def run(self):\n\n with Camera.instance() as camera:\n try:\n # create log file and write headers\n with open(self.log_path, \"w+\") as log:\n writer = csv.writer(log)\n writer.writerow([\"image\", \"angle\", \"previous_angle\"])\n except OSError:\n raise OSError(\"The log file could not be created.\")\n\n previous_angle = 0.0\n while self.active:\n if camera.image is None: continue # skip loop if no image provided\n\n # save image\n img_filename = datetime.today().strftime(\"%H-%M-%S-%f\") + \".\" + self.img_extension\n np.save(self.img_dir + img_filename, camera.image)\n\n try:\n # write data to csv file\n with open(self.log_path, \"a\") as log:\n writer = csv.writer(log)\n angle = str(round(self.driver.angle, 3))\n previous_angle = str(previous_angle)\n writer.writerow([img_filename, angle, previous_angle])\n except OSError:\n raise OSError(\"The log file could not be opened.\")\n\n previous_angle = angle # update previous angle for next loop\n time.sleep(self.CAPTURE_INTERVAL)", "def capture(self):\n current_time = time.strftime('%Y%m%d-%H%M%S')\n filepath = f'files/{current_time}.png'\n self.ids.camera.export_to_png(filepath)\n self.manager.current = 'image_screen' # switch to the next screen\n self.manager.current_screen.ids.img.source = filepath # inherit img to the next screen\n return filepath", "def capture():\n stream = BytesIO()\n cam.capture(stream, 'jpeg')\n data = np.fromstring(stream.getvalue(), dtype=np.uint8)\n # \"Decode\" the image preserving color\n img = cv2.imdecode(data, 1)\n # switch BGR order to RGB order\n img = img[:, :, ::-1]\n\n # resize image to match training size\n img = cv2.resize(img, (args.resize, args.resize), interpolation=cv2.INTER_AREA)\n print(\"done resizing\")\n\n# cv2.imshow('image',img)\n# cv2.waitKey(0)\n# cv2.destroyAllWindows()\n return img.flatten()", "def camera(ctx, cam_id, verbose):\n client = ctx.obj.client\n cap = cv2.VideoCapture(cam_id)\n frame_num = 1\n classes = {}\n try:\n while True:\n ret, frame = cap.read()\n if not ret:\n print(\"Stream unavailable. Exiting.\")\n break\n if verbose:\n print(frame)\n cv2.imshow('Camera Feed', frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n frame_num += 1\n except:\n pass\n\n cap.release()\n cv2.destroyAllWindows()", "def screenShot(self, cam=None, path=os.path.expanduser('~'), basenm='view'):\n if cam is None:\n # This allows use to dynamicly select cameras\n cam = GetActiveCamera()\n os.chdir(path)\n self.view(cam=cam)\n WriteImage(\"%s.png\" % (basenm))", "def grab_image(self):\n _, camera_image = self.camera.read()\n with self.lock:\n self.image = camera_image", "def onclick_open_camera_button(self):\n self.openCam.show()", "def rpi_capture_image(project, number):\n import picamera\n #now = datetime.datetime.now()\n #now_str = now.strftime('%Y%m%d:%H:%M:%S')\n #print now_str\n name = project.name\n #print name\n if not os.path.exists(IMAGE_PATH):\n call([\"mkdir\", IMAGE_PATH])\n image_number = str(number).zfill(7)\n path = \"%s%s/\" % (IMAGE_PATH, name)\n call([\"mkdir\", path])\n filename = \"%s_%s.jpg\" % (name, image_number)\n #call([\"fswebcam\", \"-r\", \"1280x720\", \"--no-banner\", \"%s%s\" % (path, filename)])\n with picamera.PiCamera() as camera:\n camera.resolution = (1280, 720)\n camera.start_preview()\n camera.exposure_compensation = 2\n camera.exposure_mode = 'spotlight'\n camera.meter_mode = 'matrix'\n camera.image_effect = 'gpen'\n # Give the camera some time to adjust to conditions\n time.sleep(2)\n camera.capture(\"%s%s\" % (path, filename))\n camera.stop_preview()\n image = File(open('%s%s' % (path, filename), 'r'))\n ti = TimelapseImage(name=name, image=image, timelapse=project)\n ti.save()\n print \"photo taken\"", "def take_picture():\n\n #This reads the data from the webcam\n ret, frame = vid.read() \n \n #This writes the image to the unknown directory\n cv2.imwrite('/Users/srikarkarra/Downloads/Important Stuff/Coding/facial_rec/unknown/unknown.jpg', frame)" ]
[ "0.70523137", "0.70523137", "0.70149106", "0.69485945", "0.6907842", "0.6902539", "0.6862673", "0.68337494", "0.6800053", "0.6798291", "0.6715196", "0.6605783", "0.6555237", "0.6545424", "0.65297544", "0.65204567", "0.6518754", "0.6515041", "0.6500687", "0.6491813", "0.64815116", "0.63889736", "0.63731474", "0.6372307", "0.6367578", "0.63378525", "0.6336501", "0.63278896", "0.6326718", "0.6305899" ]
0.73050624
0
Expand config file path and switch default path if OS is windows.
def expand_config_path(path): if path == DEFAULT_LINUX_PATH and os.name == "nt": path = DEFAULT_WINDOWS_PATH return os.path.expanduser(path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setdefault_path(envvar, default):\n if envvar in os.environ:\n return _strip_slash(os.environ[envvar])\n value = os.environ[envvar] = _strip_slash(default() if callable(default) else default)\n return value", "def _get_config_path():\n return os.path.join(os.path.expanduser('~'))", "def set_config(self, file, prop):\n if file.lower() not in ['config']:\n file = os.path.abspath(file)\n elif file.lower() == 'config':\n file = os.path.join(self.modpath, 'config', self.configfiles[prop])\n return file", "def _real_paths(config):\n for key in ('--config', '--ffmpeg-bin', '--log', '--music-source', '--working-dir'):\n if not config[key]:\n continue\n config[key] = os.path.realpath(os.path.expanduser(config[key]))", "def update_config_windows(config): # pragma: windows\n out = []\n if not config.has_section('windows'):\n config.add_section('windows')\n # Find paths\n clibs = [('libzmq_include', 'zmq.h', 'The full path to the zmq.h header file.'),\n ('libzmq_static', 'zmq.lib', 'The full path to the zmq.lib static library.'),\n ('czmq_include', 'czmq.h', 'The full path to the czmq.h header file.'),\n ('czmq_static', 'czmq.lib', 'The full path to the czmq.lib static library.')]\n for opt, fname, desc in clibs:\n if not config.has_option('windows', opt):\n fpath = locate_file(fname)\n if fpath:\n print('located %s: %s' % (fname, fpath))\n config.set('windows', opt, fpath)\n else:\n out.append(('windows', opt, desc))\n return out", "def expand_path(self, original_path):\n path = self.fix_dir_separator(original_path)\n path = os.path.expanduser(path)\n return os.path.join(self.config['work_dir'], path)", "def default_path():\n return os.path.join(os.environ.get('OVERRIDE_ETC', '/etc'), 'auth')", "def _adjust_path_values(self, variable_name: str, value: str) -> str:\n if not self._file_path:\n return value\n for token in FileConfig.PATH_TOKEN:\n if token in variable_name:\n config_file_dir = os.path.dirname(self._file_path)\n resolved_path = os.path.join(config_file_dir, value)\n value = os.path.realpath(resolved_path)\n break\n return value", "def get_default_config_path():\n if os.name == 'posix':\n config_path = os.path.join(os.path.expanduser(\"~\"), '.fpdb')\n elif os.name == 'nt':\n config_path = os.path.join(os.environ[\"APPDATA\"], 'fpdb')\n else: config_path = False\n return config_path", "def set_config_path(self, new_config_path):\n oldpath = self.get_config_path()\n cdir, cfile = os.path.split(new_config_path)\n \n if not cdir.startswith('/'):\n cdit='/'+cdir\n if not cfile:\n cfile = 'site.yaml'\n\n self.dropbox_base_dir = cdir\n self.dropbox_site_yaml = cfile\n newpath = self.get_config_path()\n if newpath !=oldpath:\n return oldpath", "def getConfigPath():\n\n global args, ConfigPathDefault\n\n if args.config_location:\n return args.config_location;\n return ConfigPathDefault;", "def _expand(self):\n return Path(os.path.expandvars(self)).expanduser().resolve()", "def conditional_abspath (filename):\n if sys.platform.find('cygwin') != -1:\n return filename\n else:\n return os.path.abspath(filename)", "def get_path(self, key):\n value = self.getn(key)\n if value is None:\n logger.warning(\"Specified config '%s' is None or not exist\" % key)\n return None\n if not isinstance(value, str):\n msg = \"Specified config '%s' is non-string: %s\" % (key, value)\n logger.error(msg)\n raise ValueError(msg)\n #\n path = os.path.expanduser(value)\n if not os.path.isabs(path):\n # Got relative path, try to convert to the absolute path\n if hasattr(self, \"userconfig\"):\n # User configuration loaded\n path = os.path.join(os.path.dirname(self.userconfig), path)\n else:\n logger.warning(\"Cannot convert to absolute path: %s\" % path)\n return os.path.normpath(path)", "def expand_path(path):\n path = os.path.expanduser(path)\n path = os.path.expandvars(path)\n path = os.path.abspath(path)\n return path", "def config_path():\n dir_ = os.path.dirname(__file__)\n demo_dir = os.path.join(dir_, '../..')\n return os.path.join(demo_dir, 'mike_dev.ini')", "def _expand_path(self, path):\n return os.path.abspath(os.path.expanduser(path))", "def expand_path(path):\n return os.path.expandvars(os.path.expanduser(path))", "def expand(self, path, dic):\n if dic: path = path % dic\n return os.path.normpath(os.path.expandvars(os.path.expanduser(path)))", "def _app_config_file() -> str:\n if 'AISCALATOR_HOME' in os.environ:\n home = os.environ['AISCALATOR_HOME']\n file = os.path.join(home, \"config\", \"aiscalator.conf\")\n if os.path.exists(file):\n return file\n return os.path.join(os.path.expanduser(\"~\"), '.aiscalator',\n 'config', 'aiscalator.conf')", "def _github_config(self, config_file_name):\n home = os.path.abspath(os.environ.get('HOME', ''))\n config_file_path = os.path.join(home, config_file_name)\n return config_file_path", "def getConfigPath():\n if sys.platform == 'linux':\n configpath = os.path.normpath(os.path.expanduser('~/.config/phobos'))\n elif sys.platform == 'darwin':\n configpath = os.path.normpath(os.path.expanduser('~/Library/Application Support/phobos'))\n elif sys.platform == 'win32':\n configpath = os.path.normpath(os.path.expanduser('~/AppData/Roaming/phobos'))\n else:\n configpath = 'ERROR: {0} not supported,'.format(sys.platform)\n return configpath", "def _get_config_filepath(self):\n\t\tif self.configfilepath is None:\n\t\t\treturn os.path.join(self.workdir, \"config.txt\")\n\t\telse:\n\t\t\treturn self.configfilepath", "def expand_path(path):\n\n return os.path.abspath(os.path.expanduser(os.path.expandvars(path)))", "def expand_path(path):\n\n return os.path.abspath(os.path.expanduser(os.path.expandvars(path)))", "def default_configfile(self):\r\n config = None\r\n for path in self.searchpaths:\r\n if os.path.exists(path):\r\n config = path\r\n break\r\n if config is None and self.require_configfile:\r\n self.usage('No config file found at default paths (%s); '\r\n 'use the -c option to specify a config file '\r\n 'at a different path' % ', '.join(self.searchpaths))\r\n return config", "def full_path(self, config_path=CONFIG_PATH):\n return os.path.join(config_path, self.filename)", "def platform_config_dir():\n if POSIX: # nocover\n dpath_ = os.environ.get('XDG_CONFIG_HOME', '~/.config')\n elif DARWIN: # nocover\n dpath_ = '~/Library/Application Support'\n elif WIN32: # nocover\n dpath_ = os.environ.get('APPDATA', '~/AppData/Roaming')\n else: # nocover\n raise NotImplementedError('Unknown Platform %r' % (sys.platform,))\n dpath = normpath(expanduser(dpath_))\n return dpath", "def default_path(path):\r\n while path[len(path) - 1] == '/' or path[len(path) - 1] == '\\\\':\r\n path = path[0:-1]\r\n\r\n return path", "def _prepend_pkg_config_path(path: str) -> str:\n pkgconf = os.environ.get('PKG_CONFIG_PATH')\n if pkgconf:\n return f'{path}{os.path.pathsep}{pkgconf}'\n return path" ]
[ "0.62735844", "0.62659293", "0.6136765", "0.61052674", "0.6024256", "0.60104406", "0.5977486", "0.59363705", "0.59358454", "0.5918923", "0.5909648", "0.58821416", "0.5881801", "0.58483577", "0.5808059", "0.58060867", "0.57793844", "0.5775549", "0.5765239", "0.571692", "0.5692899", "0.56716764", "0.5666837", "0.56644726", "0.56644726", "0.56462306", "0.5640255", "0.56310016", "0.56046987", "0.5603158" ]
0.82842994
0
Create a instance of proxmox API.
def connection_proxmox(config): return ProxmoxAPI( config["host"], user=config["user"], password=config["password"], verify_ssl=config["verify_ssl"], )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, **kwargs):\n\n builder_kwargs = {}\n\n if \"token\" in kwargs and str(kwargs[\"token\"]) != \"None\":\n\n # If there is a token use it along with the specified proxy details if specified\n config = ApiConfiguration(\n api_url=kwargs.get(\"api_url\", None),\n certificate_filename=kwargs.get(\"certificate_filename\", None),\n proxy_config=ProxyConfig(\n address=kwargs.get(\"proxy_url\", None),\n username=kwargs.get(\"proxy_username\", None),\n password=kwargs.get(\"proxy_password\", None),\n ) if kwargs.get(\"proxy_url\", None) is not None else None,\n app_name=kwargs.get(\"app_name\", None)\n )\n\n builder_kwargs[\"api_configuration\"] = config\n builder_kwargs[\"token\"] = kwargs[\"token\"]\n\n # Otherwise use a secrets file if it exists\n builder_kwargs[\"api_secrets_filename\"] = kwargs.get(\"api_secrets_filename\", None)\n\n # add the correlation id if specified\n builder_kwargs[\"correlation_id\"] = kwargs.get(\"correlation_id\", None)\n\n # add the id provider response handler if specified\n builder_kwargs[\"id_provider_response_handler\"] = kwargs.get(\"id_provider_response_handler\", None)\n\n builder_kwargs[\"tcp_keep_alive\"] = kwargs.get(\"tcp_keep_alive\", False)\n\n # Call the client builder, this will result in using either a token, secrets file or environment variables\n self.api_client = ApiClientBuilder.build(**builder_kwargs)", "def create_api(self):\n return DJinnAPI(djenkins=self.dj, pipeline_results=self.db)", "def __init__(self, zosapi, *args, **kwargs):\n\n self.zosapi = zosapi", "def __init__(self, zosapi, *args, **kwargs):\n\n self.zosapi = zosapi", "def __init__(self, *args, **kwargs):\n # view non-shortened diffs\n self.maxDiff = None\n\n os.environ['APPLICATION_ID'] = APP_ID\n apiproxy_stub_map.apiproxy = apiproxy_stub_map.APIProxyStubMap()\n # memcache stub\n cache_stub = memcache_stub.MemcacheServiceStub()\n apiproxy_stub_map.apiproxy.RegisterStub('memcache', cache_stub)\n # user stub\n user_stub = UserServiceStub()\n apiproxy_stub_map.apiproxy.RegisterStub('user', user_stub)", "def __init__(self):\n self.api = Api(consumer_key=credentials[\"consumer_key\"],\n consumer_secret=credentials[\"consumer_secret\"],\n access_token_key=credentials[\"access_token_key\"],\n access_token_secret=credentials[\"access_token_secret\"])", "def repository_create_proxy():\n pass", "def create_interface(domain=None):\n secret_token = getpass(prompt=\"DigitalOcean API Token: \")\n interface = MachineSetup(secret_token, domain)\n return interface", "def CreateProxy(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"CreateProxy\", params, headers=headers)\n response = json.loads(body)\n model = models.CreateProxyResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "def __init__(self, api=None, properties=None):\n if not api is None:\n self.api = api", "def create(cls, api_key: Optional[str] = None, **params) -> Any:\n requestor = Requestor(local_api_key=api_key)\n url = cls.class_url()\n wrapped_params = {cls.snakecase_name(): params}\n response, api_key = requestor.request(method=RequestMethod.POST, url=url, params=wrapped_params)\n return convert_to_easypost_object(response=response, api_key=api_key)", "def make_client(instance):\r\n neutron_client = utils.get_client_class(\r\n API_NAME,\r\n instance._api_version[API_NAME],\r\n API_VERSIONS,\r\n )\r\n instance.initialize()\r\n url = instance._url\r\n url = url.rstrip(\"/\")\r\n if '2.0' == instance._api_version[API_NAME]:\r\n client = neutron_client(username=instance._username,\r\n tenant_name=instance._tenant_name,\r\n password=instance._password,\r\n region_name=instance._region_name,\r\n auth_url=instance._auth_url,\r\n endpoint_url=url,\r\n token=instance._token,\r\n auth_strategy=instance._auth_strategy,\r\n insecure=instance._insecure,\r\n ca_cert=instance._ca_cert)\r\n return client\r\n else:\r\n raise exceptions.UnsupportedVersion(_(\"API version %s is not \"\r\n \"supported\") %\r\n instance._api_version[API_NAME])", "def __init__(self, verbosity=1):\n self.verbosity = verbosity\n self.project_slug = settings.TRANSIFEX[\"PROJECT_SLUG\"]\n self.organization_slug = settings.TRANSIFEX[\"ORGANIZATION_SLUG\"]\n\n api_token = settings.TRANSIFEX[\"API_TOKEN\"]\n auth = TransifexAuthRequests(token=api_token)\n self.api_v20 = requests.Session()\n self.api_v20.auth = auth\n\n self.api_v25 = requests.Session()\n self.api_v25.auth = auth", "def setUp(self):\n self.clnt = CvpClient()\n nodes = [\"1.1.1.1\"]\n self.clnt.nodes = nodes\n self.clnt.node_cnt = len(nodes)\n self.clnt.node_pool = cycle(nodes)\n self.api = CvpApi(self.clnt)", "def __init__(__self__, *,\n addon_configs: Optional[pulumi.Input[Mapping[str, Any]]] = None,\n api_metadata_properties: Optional[pulumi.Input['GatewayApiMetadataPropertiesArgs']] = None,\n apm_types: Optional[pulumi.Input[Sequence[pulumi.Input[Union[str, 'ApmType']]]]] = None,\n client_auth: Optional[pulumi.Input['GatewayPropertiesClientAuthArgs']] = None,\n cors_properties: Optional[pulumi.Input['GatewayCorsPropertiesArgs']] = None,\n environment_variables: Optional[pulumi.Input['GatewayPropertiesEnvironmentVariablesArgs']] = None,\n https_only: Optional[pulumi.Input[bool]] = None,\n public: Optional[pulumi.Input[bool]] = None,\n resource_requests: Optional[pulumi.Input['GatewayResourceRequestsArgs']] = None,\n sso_properties: Optional[pulumi.Input['SsoPropertiesArgs']] = None):\n if addon_configs is not None:\n pulumi.set(__self__, \"addon_configs\", addon_configs)\n if api_metadata_properties is not None:\n pulumi.set(__self__, \"api_metadata_properties\", api_metadata_properties)\n if apm_types is not None:\n pulumi.set(__self__, \"apm_types\", apm_types)\n if client_auth is not None:\n pulumi.set(__self__, \"client_auth\", client_auth)\n if cors_properties is not None:\n pulumi.set(__self__, \"cors_properties\", cors_properties)\n if environment_variables is not None:\n pulumi.set(__self__, \"environment_variables\", environment_variables)\n if https_only is None:\n https_only = False\n if https_only is not None:\n pulumi.set(__self__, \"https_only\", https_only)\n if public is None:\n public = False\n if public is not None:\n pulumi.set(__self__, \"public\", public)\n if resource_requests is not None:\n pulumi.set(__self__, \"resource_requests\", resource_requests)\n if sso_properties is not None:\n pulumi.set(__self__, \"sso_properties\", sso_properties)", "def api_factory(config):\n return SdkApi(config.get('apiKey'),\n sdk_api_base_url=config['sdkApiBaseUrl'],\n events_api_base_url=config['eventsApiBaseUrl'],\n split_sdk_machine_name=config['splitSdkMachineName'],\n split_sdk_machine_ip=config['splitSdkMachineIp'],\n connect_timeout=config['connectionTimeout'],\n read_timeout=config['readTimeout'])", "def __init__(self, api_key=None, api_secret=None):\n\n self.session = requests.session()\n headers = {'Accept': 'application/json', 'User-Agent': 'binance/python'}\n\n if api_key is not None and api_secret is not None:\n self.set_api_key(api_key, api_secret)\n headers['X-MBX-APIKEY'] = self.API_KEY\n\n self.session.headers.update(headers)\n\n # init DNS and SSL cert\n self.ping()", "def __init__(self,ip,user,pwd):\r\n self.user = user\r\n self.pwd = pwd\r\n self.ip = ip\r\n self.api_endpoint = 'https://%s/api/json/v2/types/' % self.ip\r\n\r\n self.clusters = self._get_objects(\"clusters\")\r\n self.xms = self._get_objects(\"xms\")", "def __init__(self, url, auth_token, xapi_version=\"1.0.3\"):\n self.url = url\n self.auth_token = auth_token\n self.xapi_version = xapi_version", "def __create_api(self):\n consumer_key = os.environ.get(\"CONSUMER_KEY\")\n consumer_secret = os.environ.get(\"CONSUMER_SECRET\")\n access_token = os.getenv(\"ACCESS_TOKEN\")\n access_token_secret = os.environ.get(\"ACCESS_TOKEN_SECRET\")\n\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n api = tweepy.API(\n auth,\n compression=True,\n wait_on_rate_limit=True,\n wait_on_rate_limit_notify=True,\n )\n try:\n api.verify_credentials()\n except Exception as e:\n logging.error(\"Error creating API\", exc_info=True)\n raise e\n return api", "def __init__(self, app_key=None, app_sid=None, base_url=None,\n api_version=None, debug=False, proxy=None):\n configuration = Configuration(app_key=app_key,\n app_sid=app_sid,\n base_url=base_url,\n api_version=api_version,\n debug=debug,\n\t\t\t\t\t\t\t\t\t proxy=proxy)\n self.api_client = ApiClient(configuration)", "def __init__(self):\n if Config.USEMEMCACHED is True:\n self.mc = MCache(server = Config.MEMCACHED_SERVER,\n username = Config.MEMCACHED_USERNAME,\n password = Config.MEMCACHED_PASSWORD)\n else:\n self.mc = None\n self.api = DozensApi()", "def setup_class(cls):\n initialize(api_key=API_KEY, app_key=APP_KEY, api_host=API_HOST)", "def __init__(self, api_use=False):\n self.api_use = api_use", "def __init__(self, consumer_key, consumer_secret, access_token, access_token_secret):\n self.api = self.getAPI(consumer_key, consumer_secret, access_token, access_token_secret)", "def __init__(self, request: object) -> None:\n super().__init__({}, request, URL, Api)", "def create_api_gateway(args):\n return create_pool(_create_api_gateway_from_meta, args, 1)", "def createWrapper():\n\n # read properties file and get MANO name and IP\n config = RawConfigParser()\n config.read(\"../../coreMano/coreMano.properties\")\n name = config.get(\"CoreMano\", \"coreMano.name\")\n host_ip = config.get(\"CoreMano\", \"coreMano.ip\")\n\n # instanciate and return the MANO\n if name == \"osm\":\n mano = OsmWrapper(name, host_ip)\n if name == \"cloudify\":\n mano = CloudifyWrapper(name, host_ip)\n return mano", "def __init__(self):\n self.config = get_config()\n self.log = get_logger(self)\n\n self.factory = SugarServerFactory(\"wss://*:5505\")\n self.factory.protocol = SugarServerProtocol\n\n self.console_factory = SugarConsoleServerFactory(\"wss://localhost:5507\")\n self.console_factory.protocol = SugarConsoleServerProtocol\n\n self.api = APIService(self.config)", "def __init__(self):\n super().__init__()\n self._api_url = API_BASE_URL\n self._api_params = API_BASE_PARAMS.copy()" ]
[ "0.62811315", "0.6246885", "0.6237589", "0.6237589", "0.6151815", "0.61117625", "0.60022", "0.595346", "0.5949549", "0.5943282", "0.590346", "0.5862528", "0.58615875", "0.58429396", "0.5837407", "0.5835226", "0.5828535", "0.5819076", "0.5808679", "0.5795841", "0.5793958", "0.57522744", "0.57291293", "0.57239825", "0.5715102", "0.5708786", "0.5704111", "0.5700367", "0.56825024", "0.56791013" ]
0.694525
0
List resources by pools
def list_resources(px, pools): result = [] for pool in pools: for i in px.pools.get(pool)["members"]: result.append( { "pool": pool, "vmid": i["vmid"], "name": i["name"], "status": i["status"], "type": i["type"], } ) return result, { "pool": "pool(s)", "vmid": "vmid", "name": "name", "status": "status", "type": "type", }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cmd_list_resources(config=DEFAULT_LINUX_PATH):\n config = load_config_file(expand_config_path(config))\n px = connection_proxmox(config[\"proxmox\"])\n try:\n if config[\"pools\"]:\n l, h = list_resources(px, config[\"pools\"])\n return tabulate(l, h)\n else:\n print(\"Dick 'pools' is empty\")\n except KeyError:\n print(\"Missing 'pools' dict in config file\")\n sys.exit(1)", "def test_list_pools_sort(self):\r\n resources = \"pools\"\r\n cmd = pool.ListPool(test_cli20.MyApp(sys.stdout), None)\r\n self._test_list_resources(resources, cmd,\r\n sort_key=[\"name\", \"id\"],\r\n sort_dir=[\"asc\", \"desc\"])", "def list_resource_pool(client, private_cloud, location):\n return client.list(location, private_cloud)", "def pool_list(mnode):\n cmd = \"gluster pool list\"\n return g.run(mnode, cmd)", "def pool_list(call=None):\n if call == \"action\":\n raise SaltCloudSystemExit(\n \"This function must be called with -f, --function argument.\"\n )\n ret = {}\n session = _get_session()\n pools = session.xenapi.pool.get_all()\n for pool in pools:\n pool_record = session.xenapi.pool.get_record(pool)\n ret[pool_record[\"name_label\"]] = pool_record\n return ret", "def getPools(self):\n data = self.connect('get','pools',None)\n return data", "def show_resource_pool(client, private_cloud, resource_pool, location):\n return client.get(location, private_cloud, resource_pool)", "def get_pools():\n poolinfostr = fork_and_get_output(\"zpool list -H -o all\".split())\n header = get_zpool_header()\n poolinfo = poolinfostr.splitlines()\n poolobjs = []\n for poolstr in poolinfo:\n poolobjs.append(DataZFS(poolstr, header, 'pool'))\n return poolobjs", "def list_device_pools(arn=None, type=None, nextToken=None):\n pass", "def pool_list(request, format=None):\n if request.method == 'GET':\n pools = storage.models.Pool.objects.all()\n serializer = serializers.PoolSerializer(pools)\n return Response(serializer.data)\n\n elif request.method == 'POST':\n serializer = serializers.PoolSerializer(data=request.DATA)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "def _list_pool_vm(args):\n _logger.debug('_list_pool_vm')\n #\n #\n _data_struct = {'name': {'head': 'Name', 'func': 'name', 'type': 'str'},\n 'uuid': {'head': 'UUID', 'func': 'UUIDString', 'type': 'str'},\n 'autostart': {'head': 'Autostart', 'func': 'autostart', 'type': 'yesno', 'convert': get_yesno},\n 'active': {'head': 'Active', 'func': 'isActive', 'type': 'yesno', 'convert': get_yesno},\n 'persistent': {'head': 'Persistent', 'func': 'isPersistent', 'type': 'yesno', 'convert': get_yesno},\n 'volumes': {'head': 'Volumes', 'func': 'numOfVolumes', 'type': 'int'},\n 'state': {'head': 'State', 'func': 'info', 'type': 'list', 'index': 0, 'convert': get_pool_state},\n 'capacity': {'head': 'Capacity', 'func': 'info', 'type': 'list', 'index': 1, 'convert': format_size},\n 'allocation': {'head': 'Allocation', 'func': 'info', 'type': 'list', 'index': 2, 'convert': format_size},\n 'available': {'head': 'Available', 'func': 'info', 'type': 'list', 'index': 3, 'convert': format_size},\n 'type': {'head': 'Type', 'func': None, 'type': 'str'}\n }\n #\n # get the pools\n _sps_fs, _sps_netfs = _get_pools()\n _sps = _sps_fs + _sps_netfs\n if len(_sps) == 0:\n _logger.info('No pools found.')\n return\n #\n # initialise the column widths\n _data_struct = initalise_column_lengths(_data_struct)\n #\n # column cantains only 'fs' or 'net fs'\n _data_struct['type']['len'] = 6\n #\n # format data and determine optimal length of fields.\n pool_data = list()\n for _sp in _sps:\n _sp_data = dict()\n for key, value in _data_struct.items():\n value_data = get_value_data(_sp, _data_struct[key])\n _sp_data[key] = value_data[0]\n val_length = value_data[1]\n _data_struct[key]['collen'] = max(val_length, _data_struct[key]['collen'])\n _sp_data['type'] = 'fs' if _sp in _sps_fs else 'net fs'\n pool_data.append(_sp_data)\n #\n # compose data\n _title = 'VM pool Information:'\n _columns = list()\n for key, value in _data_struct.items():\n _columns.append([value['head'], value['collen']+2, key])\n #\n printerKlass = get_row_printer_impl(args.output_mode)\n printer = printerKlass(title=_title, columns=_columns)\n printer.printHeader()\n #\n # print\n for _sp in pool_data:\n printer.rowBreak()\n printer.printRow(_sp)\n printer.printFooter()\n printer.finish()\n return", "def get_pools():\n pools = ch_core.hookenv.action_get('pools')\n if pools:\n return [p.strip() for p in pools.split(',')]\n return None", "def _get_pools():\n conn = libvirt.open(None)\n try:\n _spsfs = list()\n _spsnetfs = list()\n if conn:\n # file system pool\n _spsfs = conn.listAllStoragePools(flags=128)\n # nfs pool\n _spsnetfs = conn.listAllStoragePools(flags=256)\n else:\n _logger.error('Failed to contact hypervisor')\n raise ValueError('Failed to contact hypervisor.')\n except libvirt.libvirtError as e:\n _logger.error('Failed to collect vm pool data: %s', str(e))\n raise ValueError('Failed to collect vm pool data.') from e\n finally:\n conn.close()\n return _spsfs, _spsnetfs", "def pools(self, summary=True, tags_intersect=None, tags=None):\n return list(self.all_pools(summary=summary, tags=tags, tags_intersect=tags_intersect))", "def handle_cluster_pools(self, request):\n \"\"\"\n @api {get} /cluster/pools Get cluster pools\n @apiName GetClusterPools\n @apiGroup Cluster\n @apiVersion 1.0.0\n\n @apiDescription List pools and nodes registered into each.\n\n @apiSuccess {String[]} pool List of nodes registered into the pool.\n\n @apiSuccessExample {json} Example response:\n {\n \"pool1\": [\"node1\", \"node2\"],\n \"pool2: [\"node1\", \"node3\"]\n }\n \"\"\"\n\n headers = {\n 'Content-Type': 'application/javascript',\n 'Access-Control-Allow-Origin': '*'\n }\n\n return HTTPReply(body = json.dumps(self.cluster.pools), headers = headers)", "def show_pool(self, pool, **_params):\r\n return self.get(self.pool_path % (pool), params=_params)", "def collect_resources_list(namespace, output_dir, k8s_cli, mode):\n selector = \"\"\n if mode == MODE_RESTRICTED:\n selector = '--selector=\"{}\"'.format(OPERATOR_LABEL)\n collect_helper(output_dir,\n cmd=\"{} get all -o wide -n {} {}\".format(k8s_cli, namespace, selector),\n file_name=\"resources_list\",\n resource_name=\"resources list\",\n namespace=namespace)", "def get_pool_status():\n pools_status = split_status_pools(fork_and_get_output(\"zpool status\".split()))\n pools = []\n for p in pools_status:\n pools.append(status.PoolStatus(p))\n return pools", "def get_pool(self, name, dc, cluster):\n cluster_obj = self.get_cluster(cluster, dc)\n for rp in cluster_obj.resourcePool.resourcePool:\n if rp.name == name:\n return rp", "def test_list_pools(mocker, api: API, account: Account, raw_pools):\n api.candlepin.get_pools.return_value = raw_pools\n\n apply_mapping_spy = mocker.patch(\n \"ethel.account.apply_mapping\", side_effect=apply_mapping\n )\n pools = account.list_pools()\n assert len(pools) == len(raw_pools)\n\n expected_call_count = len(account.POOL_ATTRIBUTES_MAPPING) * len(raw_pools)\n\n assert apply_mapping_spy.call_count == expected_call_count", "def get(self, request, pool_id):\n conn = get_sdk_connection(request)\n pool = conn.load_balancer.find_pool(pool_id)\n pool = _get_sdk_object_dict(pool)\n\n if request.GET.get('includeChildResources'):\n resources = {}\n resources['pool'] = pool\n\n if pool.get('members'):\n member_list = _sdk_object_to_list(\n conn.load_balancer.members(pool_id))\n resources['members'] = member_list\n\n if pool.get('health_monitor_id'):\n monitor_id = pool['health_monitor_id']\n monitor = conn.load_balancer.find_health_monitor(\n monitor_id)\n monitor = _get_sdk_object_dict(monitor)\n resources['monitor'] = monitor\n\n return resources\n else:\n return pool", "def list_pools(self, retrieve_all=True, **_params):\r\n # Pass filters in \"params\" argument to do_request\r\n return self.list('pools', self.pools_path, retrieve_all,\r\n **_params)", "def _add_resource_descriptions_to_pools(self, meta_list):\r\n if not meta_list:\r\n return\r\n\r\n for meta in meta_list:\r\n getattr(resources, meta.resource_type).add(meta)", "def get_resources(resource_client) -> list:\n resource_list = []\n paginator = resource_client.get_paginator(BOTO3_LIST_FUNCTION)\n pages = paginator.paginate()\n for page in pages:\n # Your going to have to look through the response and append the correct value to the list\n resource = page[\"something\"]\n resource_list = resource_list + resource\n return resource_list", "def _get_objects(cls, lb, names, minimal=False):\n\n if not names:\n return []\n\n pools = cls.factory.create(names, lb)\n\n if not minimal:\n active_member_count = cls._lbcall(lb, 'get_active_member_count',\n names)\n description = cls._lbcall(lb, 'get_description', names)\n lbmethod = cls._lbcall(lb, 'get_lb_method', names)\n members = cls._lbcall(lb, 'get_member', names)\n minimum_active_member = cls._lbcall(lb, 'get_minimum_active_member',\n names)\n minimum_up_member = cls._lbcall(lb, 'get_minimum_up_member',\n names)\n slow_ramp_time = cls._lbcall(lb, 'get_slow_ramp_time', names)\n statistics = cls._lbcall(lb, 'get_statistics', names)\n\n for idx,pool in enumerate(pools):\n pool._active_member_count = active_member_count[idx]\n pool._description = description[idx]\n pool._lbmethod = lbmethod[idx]\n pool._minimum_active_member = minimum_active_member[idx]\n pool._minimum_up_member = minimum_up_member[idx]\n pool._slow_ramp_time = slow_ramp_time[idx]\n pool._statistics = statistics['statistics'][idx]\n\n pool._members = f5.PoolMember._get_objects(lb, [pool],\n [members[idx]], minimal=True)\n\n return pools", "def test_get_resource_group_list(self):\n pass", "def get_pool_ids(host=None):\n cmd = utils.XMS_CLI_HEADER + \"-f json pool list\"\n print cmd\n pool_ids = []\n ret = utils.execute_cmd_in_host(cmd, host)\n if ret[2] != 0 or isinstance(ret[0], dict):\n print \"[Error] Failed to get pool info. Error message: [{err}]\".format(err=ret[1])\n return -1, pool_ids\n try:\n pool_info = json.loads(ret[0])\n pools = pool_info[\"pools\"]\n for p in pools:\n pool_ids.append(p[\"id\"])\n except Exception as e:\n print \"[Error] error message is: \" + e.message\n return -1, pool_ids\n return 0, pool_ids", "def get_pool_list(mnode):\n ret, out, _ = g.run(mnode, \"gluster pool list --xml\", log_level='DEBUG')\n if ret != 0:\n g.log.error(\"Failed to execute 'pool list' on node %s. \"\n \"Hence failed to parse the pool list.\", mnode)\n return None\n\n try:\n root = etree.XML(out)\n except etree.ParseError:\n g.log.error(\"Failed to parse the gluster pool list xml output.\")\n return None\n\n pool_list_list = []\n for peer in root.findall(\"peerStatus/peer\"):\n peer_dict = {}\n for element in peer.getchildren():\n if element.tag == \"hostname\" and element.text == 'localhost':\n element.text = mnode\n if element.tag == \"hostnames\":\n hostnames_list = []\n for hostname in element.getchildren():\n hostnames_list.append(hostname.text)\n element.text = hostnames_list\n peer_dict[element.tag] = element.text\n\n pool_list_list.append(peer_dict)\n return pool_list_list", "def get_pools():\n command = 'zpool list -H'\n try:\n p = subprocess.Popen(command.split(' '), stdout=subprocess.PIPE)\n except OSError:\n raise Exception('No ZFS tools found!')\n zpout, zperr = p.communicate()\n if p.returncode:\n raise Exception(\"Error executing '%s': %d\" % (command, p.returncode))\n return [line.split('\\t', 1)[0] for line in zpout.split('\\n') if line]", "def node_pools(self) -> Sequence['outputs.NodePoolResponse']:\n return pulumi.get(self, \"node_pools\")" ]
[ "0.7661837", "0.7562814", "0.73195827", "0.71622837", "0.71473014", "0.6853682", "0.6808259", "0.6700441", "0.6636041", "0.6562624", "0.6485822", "0.63801914", "0.6367146", "0.6345225", "0.6284152", "0.62583864", "0.6168935", "0.6146001", "0.6121617", "0.6098948", "0.60779124", "0.6075847", "0.6006043", "0.5994728", "0.5971395", "0.5952988", "0.59407276", "0.5911494", "0.590045", "0.5876994" ]
0.8344572
0
Switcher virtual machine to use one pci resource like GPU
def cmd_switch_vm(name, config=DEFAULT_LINUX_PATH): config = load_config_file(expand_config_path(config)) px = connection_proxmox(config["proxmox"]) resources, _ = list_resources(px, config["pools"]) name_int = -1 try: name_int = int(name) except Exception as e: print(e) item = list(filter(lambda i: i["vmid"] == name_int or i["name"] == name, resources)) if item: proxmox_pci_switcher(px, item[0]) else: print(f"resource: '{name}' not found.") sys.exit(1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_gpu_device_if_present():\n d = dpctl.SyclDevice(\"gpu,cpu\")\n print(\"Selected \" + (\"GPU\" if d.is_gpu else \"CPU\") + \" device\")", "def create_gpu_device():\n d1 = dpctl.SyclDevice(\"gpu\")\n d2 = dpctl.select_gpu_device()\n assert d1 == d2\n print_device(d1)\n return d1", "def set_device(sys_device_id):\n device_id = -1\n cuda = (sys_device_id != -1)\n if cuda:\n # CUDA_VISIBLE_DEVICE is a list, and device_id is the index of its members.\n import os\n os.environ['CUDA_VISIBLE_DEVICES'] = str(sys_device_id)\n device_id = 0\n TVT = TransferVarTensor(device_id)\n TMO = TransferModulesOptims(device_id)\n return TVT, TMO", "def vga_session(self):\n gpu_driver = None\n if self.user['gpu_driver'] is True:\n\n # NVIDIA controller - append packages\n if 'nvidia' in self.user['vga_controller'].lower():\n\n if self.user['gpu_proprietary'] is True:\n hardvideo = self.packages['hardvideo'][3]\n\n if self.user['kernel'] == 'linux':\n gpu_driver = self.packages['gpu_driver'][3]\n\n elif self.user['kernel'] == 'linux-lts':\n gpu_driver = self.packages['gpu_driver'][4]\n\n else:\n gpu_driver = self.packages['gpu_driver'][5]\n\n else:\n gpu_driver = self.packages['gpu_driver'][2]\n hardvideo = self.packages['hardvideo'][2]\n\n # AMD Controller - append packages\n elif ('ATI' in self.user['vga_controller']) or \\\n ('AMD' in self.user['vga_controller']):\n\n gpu_driver = self.packages['gpu_driver'][1]\n hardvideo = self.packages['hardvideo'][1]\n\n # Intel controller - append packages\n elif 'intel' in self.user['vga_controller'].lower():\n gpu_driver = self.packages['gpu_driver'][0]\n hardvideo = self.packages['hardvideo'][0]\n\n # Unreconized controller - append packages\n else:\n gpu_driver = self.packages['gpu_driver'][6]\n hardvideo = self.packages['hardvideo'][4]\n\n # Set model with corresponding driver\n self.user['gpu'] = {'model': self.user['vga_controller'],\n 'driver': gpu_driver,\n 'hardvideo': self.user['hardvideo']}\n\n # Set hardware video acceleration\n if self.user['hardvideo'] is True:\n self.user['gpu']['hardvideo'] = hardvideo", "def test_update_pci_switch(self):\n pass", "def test_patch_pci_switch(self):\n pass", "def __init__(self, machine):\n super().__init__(machine)\n self.features['has_i2c'] = True", "def __init__(self, machine):\n super().__init__(machine)\n self.features['has_i2c'] = True", "def set_gpu(gpus):\n os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = gpus", "def load_device():", "def set_device(in_arg): \n \n return torch.device(\"cuda\" if torch.cuda.is_available() and in_arg.gpu == 1 else \"cpu\")", "def setup_device(n_gpus: int) -> object:\n if n_gpus >= 1 and torch.cuda.is_available():\n LOG.info('\\n CUDA is available! using GPU...')\n return torch.device('cuda')\n else:\n LOG.info('\\n Using CPU...')\n return torch.device('cpu')", "def move2gpu(self):\n if self.generator.backend.upper() == \"LOOPY\":\n try:\n import pyopencl as cl\n import pyopencl.array # pylint: disable=unused-variable\n from .context import queue\n except ImportError:\n raise ImportError(\"Please install loo.py\")\n\n self.rhs = cl.array.to_device(queue, self.rhs)\n if hasattr(self, \"s\"):\n self.s = cl.array.to_device(\n queue, self.s\n ) # pylint: disable=attribute-defined-outside-init\n self.istore = cl.array.to_device(queue, self.istore)\n for i in range(len(self.iload)):\n self.iload[i] = cl.array.to_device(queue, self.iload[i])", "def device_placement(self):\n if is_tf_available():\n import tensorflow as tf\n with tf.device('/CPU:0' if self.device == -1 else '/device:GPU:{}'.format(self.device)):\n yield\n else:\n import torch\n if self.device >= 0:\n torch.cuda.set_device(self.device)\n\n yield", "def try_gpu(i=0): #@save\n if len(tf.config.experimental.list_physical_devices('GPU')) >= i + 1:\n return tf.device(f'/GPU:{i}')\n return tf.device('/CPU:0')", "def setGPU(state):\n\n\timport tensorflow as tf\n\tfrom keras import backend as K\n\n\tcheckGPU()\n\n\tnum_cores = 1\n\tnum_CPU = 1\n\tnum_GPU = 0\n\tif state:\n\t\tnum_GPU = 1\n\n\tconfig = tf.ConfigProto(intra_op_parallelism_threads=num_cores,\\\n\t inter_op_parallelism_threads=num_cores, allow_soft_placement=True,\\\n\t device_count = {'CPU' : num_CPU, 'GPU' : num_GPU})\n\tsession = tf.Session(config=config)\n\tK.set_session(session)", "def set_device(gpu_arg):\n\n dev = 'cpu'\n if gpu_arg and torch.cuda.is_available():\n dev = 'cuda'\n elif gpu_arg:\n print('Not gpu found. Using cpu instead.') \n\n return torch.device(dev)", "def set_current_device_id():\n import paddle\n curr_dev = paddle.device.get_device()\n select_gpu = os.getenv(\"FLAGS_selected_gpus\", \"0\")\n paddle.set_flags({\n 'FLAGS_selected_gpus': os.getenv(\"FLAGS_selected_gpus\", \"0\")\n })\n if \"gpu\" in curr_dev and select_gpu != curr_dev.split(\":\")[-1]:\n paddle.set_device(\"gpu:\" + select_gpu)\n\n curr_dev_id = paddle.framework.core.get_cuda_current_device_id()\n if \"gpu\" in curr_dev and select_gpu != str(curr_dev_id):\n paddle.zeros([])", "def SetGPU(id):\n global option\n option['device_id'] = id", "def set_device(device, backend='autograd'):\n if backend == 'autograd':\n return None\n elif backend == 'pytorch':\n try:\n tc.cuda.set_device(device)\n except:\n pass", "def _setup_io_devices(self) -> None:\n # Add PCI\n self.platform.pci_host.pio = self.iobus.mem_side_ports\n\n # Add Ethernet card\n self.ethernet = IGbE_e1000(\n pci_bus=0, pci_dev=0, pci_func=0, InterruptLine=1, InterruptPin=1\n )\n\n self.ethernet.host = self.platform.pci_host\n self.ethernet.pio = self.iobus.mem_side_ports\n self.ethernet.dma = self.iobus.cpu_side_ports\n\n if self.get_cache_hierarchy().is_ruby():\n for device in self._off_chip_devices + self._on_chip_devices:\n device.pio = self.iobus.mem_side_ports\n\n else:\n for device in self._off_chip_devices:\n device.pio = self.iobus.mem_side_ports\n for device in self._on_chip_devices:\n device.pio = self.get_cache_hierarchy().get_mem_side_port()\n\n self.bridge = Bridge(delay=\"10ns\")\n self.bridge.mem_side_port = self.iobus.cpu_side_ports\n self.bridge.cpu_side_port = (\n self.get_cache_hierarchy().get_mem_side_port()\n )\n self.bridge.ranges = [\n AddrRange(dev.pio_addr, size=dev.pio_size)\n for dev in self._off_chip_devices\n ]\n\n # PCI\n self.bridge.ranges.append(AddrRange(0x2F000000, size=\"16MB\"))\n self.bridge.ranges.append(AddrRange(0x30000000, size=\"256MB\"))\n self.bridge.ranges.append(AddrRange(0x40000000, size=\"512MB\"))", "def test_feat_parity_msr_arch_cap(\n vm_builder, inst_set_cpu_template, microvm, guest_kernel, disk\n):\n vm = create_vm(vm_builder, inst_set_cpu_template, microvm, guest_kernel, disk)\n vm.start()\n\n ssh_conn = net_tools.SSHConnection(vm.ssh_config)\n arch_capabilities_addr = \"0x10a\"\n rdmsr_cmd = f\"rdmsr {arch_capabilities_addr}\"\n _, stdout, stderr = ssh_conn.execute_command(rdmsr_cmd)\n\n if inst_set_cpu_template == \"T2CL\":\n assert stderr.read() == \"\"\n actual = int(stdout.read().strip(), 16)\n # fmt: off\n expected = (\n (1 << 0) | # RDCL_NO\n (1 << 1) | # IBRS_ALL\n (1 << 3) | # SKIP_L1DFL_VMENTRY\n (1 << 5) | # MDS_NO\n (1 << 6) | # IF_PSCHANGE_MC_NO\n (1 << 7) # TSX_CTRL\n )\n # fmt: on\n assert actual == expected, f\"{actual=:#x} != {expected=:#x}\"\n elif inst_set_cpu_template == \"T2A\":\n # IA32_ARCH_CAPABILITIES shall not be available\n assert stderr.read() != \"\"", "def test_get_node_hardware_fast(self):\n pass", "def test_patch_pci_device(self):\n pass", "def test_update_pci_device(self):\n pass", "def try_gpu(i=0):\n if torch.cuda.device_count() >= i + 1:\n return torch.device(f'cuda:{i}')\n return torch.device('cpu')", "def try_gpu(i=0):\n if torch.cuda.device_count() >= i + 1:\n return torch.device(f'cuda:{i}')\n return torch.device('cpu')", "def try_gpu(i=0):\n if torch.cuda.device_count() >= i + 1:\n return torch.device(f'cuda:{i}')\n return torch.device('cpu')", "def GetGPU():\n return option['device_id']", "def add_pci(self, pci, host_obj, vm_update, vm_status, mmio_size):\n\n self.logger.info(\"Adding PCI device {0} for {1}\".format(pci, self.vm_obj.name))\n extra_config_key1 = \"pciPassthru.64bitMMIOSizeGB\"\n extra_config_key2 = \"pciPassthru.use64bitMMIO\"\n if mmio_size is None:\n mmio_size = 256\n tasks = []\n pci_obj = GetHost(host_obj).pci_obj(pci)\n # Convert decimal to hex for the device ID of PCI device\n device_id = hex(pci_obj.deviceId % 2 ** 16).lstrip(\"0x\")\n if not vm_status.uefi():\n self.logger.warning(\n \"VM {0} is not installed with UEFI. \"\n \"If PCI device has large BARs, \"\n \"UEFI installation is required.\".format(self.vm_obj.name)\n )\n else:\n self.logger.info(\n \"Good. VM {0} has UEFI \" \"installation.\".format(self.vm_obj.name)\n )\n sys_id = vm_status.pci_id_sys_id_passthru()\n backing = vim.VirtualPCIPassthroughDeviceBackingInfo(\n deviceId=device_id,\n id=pci_obj.id,\n systemId=sys_id[pci_obj.id],\n vendorId=pci_obj.vendorId,\n deviceName=pci_obj.deviceName,\n )\n backing_obj = vim.VirtualPCIPassthrough(backing=backing)\n dev_config_spec = vim.VirtualDeviceConfigSpec(device=backing_obj)\n dev_config_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add\n config_spec = vim.vm.ConfigSpec()\n config_spec.deviceChange = [dev_config_spec]\n tasks.append(self.vm_obj.ReconfigVM_Task(spec=config_spec))\n tasks.append(vm_update.add_extra(extra_config_key1, str(mmio_size)))\n tasks.append(vm_update.add_extra(extra_config_key2, \"TRUE\"))\n return tasks" ]
[ "0.61294276", "0.60951805", "0.60499793", "0.60417134", "0.60086673", "0.59688807", "0.59526575", "0.59526575", "0.5943511", "0.5913145", "0.5844729", "0.58256614", "0.5817914", "0.5816057", "0.5782098", "0.57035667", "0.5691402", "0.5680439", "0.5678658", "0.56265086", "0.5603328", "0.557916", "0.557492", "0.5574814", "0.55497104", "0.5547715", "0.5547715", "0.5547715", "0.5541033", "0.55409616" ]
0.6416047
0
return sale price if not None otherwise return prices
def get_price(self): return self.sale_price if self.sale_price else self.price
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_prices(variation_object,Map):\n\n sale_price = variation_object[Map['itemtype']['fields']['sale_price']]\n price = sale_price\n\n if variation_object[Map['itemtype']['fields']['price']]:\n price = variation_object[Map['itemtype']['fields']['price']]\n\n return price,sale_price", "def solve_prices(self):\n return None", "def _get_normal_sold_prices(self, jdict):\n price_dict = jdict['props']['homeDetails']['price']\n try:\n sales_price_text = price_dict['formattedPrice'].replace(',','')\\\n .replace('$', '')\n sales_price = self._extract_num(sales_price_text)\n sales_date = price_dict['formattedSoldDate']\n sales_date_formatted = datetime.datetime\\\n .strptime(sales_date, '%b %d, %Y')\\\n .strftime('%Y-%m-%d')\n try:\n asking_price_text = price_dict['listingPrice']['formattedPrice'].replace(',','')\\\n .replace('$', '')\n if 'k' in asking_price_text.lower():\n asking_price = self._extract_num(asking_price_text)*1e3\n elif 'm' in asking_price_text.lower():\n asking_price = self._extract_num(asking_price_text)*1e6\n else:\n asking_price = self._extract_num(asking_price_text)\n return sales_date_formatted, sales_price, asking_price\n except:\n return sales_date_formatted, sales_price, np.nan\n except:\n return None, None, None", "def get_sale_price(self):\n Currency = Pool().get('currency.currency')\n Company = Pool().get('company.company')\n\n if self.carrier_cost_method != 'gls':\n return super(Carrier, self).get_sale_price() # pragma: no cover\n\n currency, = Currency.search([('code', '=', 'EUR')])\n company = Transaction().context.get('company')\n\n if company:\n currency = Company(company).currency\n\n return Decimal('0'), currency.id", "def get_prices(self, grab, subject):\n prices = []\n try:\n extras = grab.doc.rex_text(\n '<h3 class\\=\"h6 copy-sp-m\">.*?%s.*?</h3>(.+?)</ul>' % subject,\n flags=re.S\n )\n except DataNotFound:\n logging.debug(\n \"Price %s is not found on %s\"\n % (subject, grab.doc.url)\n )\n return None\n\n sel = XpathSelector(fromstring(extras))\n prices = []\n for li in sel.select('//li[@class=\"list__item u-cf\"]'):\n obligatory = OrderedDict()\n obligatory['name'] = li.select('node()').text()\n money = li.select('node()/strong').text()\n obligatory['value'] = money[1:].replace(',', '')\n\n # Find perweek or perday\n if li.select(\n 'span[@class=\"boatview__extras-amount\"' +\n ' and contains(text(),\"per week\")]'\n ).exists():\n obligatory['perweek'] = True\n elif li.select(\n 'span[@class=\"boatview__extras-amount\"' +\n ' and contains(text(),\"per day\")]'\n ).exists():\n obligatory['perday'] = True\n obligatory['currency'] = money[0]\n prices.append(obligatory)\n\n if len(prices) < 1:\n logging.debug(\n \"Price %s contains less than one element on: %s\"\n % (subject, grab.doc.url)\n )\n return None\n\n return prices", "def priceGetAll(soup):\n try:\n price = priceGetMost(soup)\n except:\n price = priceGetSome(soup)\n\n return price", "def get_price(self, field_name='PRICES'):\n price_data = self.get_price_data()\n return price_data.get('price') or self.find_price(self.get_default(field_name))", "def priceGetSome(soup):\n price = soup.find('span', id='priceblock_ourprice', class_='a-size-medium a-color-price')\n price = price.text\n price = price.strip()\n price = price.lstrip('$')\n price = float(price)\n\n return price", "def extract_listing_price_from_result(soup, prices):\r\n for description in soup.find_all(name='div', class_='descr'):\r\n price = description.find(name='div', class_='price')\r\n if price == None:\r\n prices.append('No Price')\r\n else:\r\n prices.append(price.get_text())\r\n # print(prices)\r\n return prices", "def _get_price(self, soup):\n\n try:\n # try to locate the price tag \n price_tag = soup.find('div', class_='listing-detail-header-row md:ml-auto -ml-1')\\\n .find('h4', class_='h3')\n # remove punctuation marks \n price_text = price_tag.get_text()\\\n .replace(',','')\\\n .strip()\n\n price = self._extract_num(price_text)\n return price\n except:\n return np.nan", "def sale_price(self):\n if self.sold_on is not None:\n return 0.0 # Already sold\n return 5000.0 * self.wheels", "def sale_price(self):\n if self.sold_on is not None:\n return 0.0 # Already sold\n return 5000.0 * self.wheels", "def sale_price(self):\n if self.sold_on is not None:\n return 0.0 # Already sold\n return 5000.0 * self.wheels", "def get_prices(self):\n pass", "def get_prices(self):\n price = self.get_price()\n if price:\n return [price]\n return []", "def test_get_standard_price_1(self):\n # By default get_standard_price returns then normal price of the product\n standard_price = self.p1.get_standard_price(self.request)\n self.assertEqual(standard_price, 1.0)\n\n # Switch to for sale\n self.p1.for_sale = True\n self.p1.save()\n\n # If the product is for sale ``get_price`` returns the for sale price\n price = self.p1.get_price(self.request)\n self.assertEqual(price, 0.5)\n\n # But ``get_standard_price`` returns still the normal price\n standard_price = self.p1.get_standard_price(self.request)\n self.assertEqual(standard_price, 1.0)", "def get_price(item):\n return float(item[1])", "def _get_price(self, soup):\n\n try:\n # price tag\n price = soup.find('div', class_='c-price').get_text().replace(',','') # clean up the text\n return self._extract_num(price) # extract number from the text\n except:\n return None", "def get_price(self):\r\n return self.price", "def get_order_price(self):\r\n if self.price is not None:\r\n return self.price #typical limit price order\r\n else:\r\n #Check the orderbook\r\n logger.info(\"floating price\")\r\n self.get_orderbook()\r\n logger.info(self.orderbook_snapshot)\r\n\r\n pass", "def get_product_price(self, url):\n self.driver.get(url)\n\n try:\n price = self.driver.find_element_by_id(\"priceblock_ourprice\").text\n except:\n pass\n\n try:\n price = self.driver.find_element_by_id(\"priceblock_dealprice\").text\n except:\n pass\n\n if price is None:\n price = \"Not available\"\n\n else:\n non_decimal = re.compile(r'[^\\d.]+')\n price = non_decimal.sub('', price)\n\n return price", "def get_prices(name,products,sales):\r\n return tuple((products[0],products[1]*((1-tuple(filter(lambda x: x[0]==name, sales))[0][1]))) for products in products)", "def getprice():\n\n print(\"Get price\")\n latest_price = get_latest_price(item_code)\n return latest_price", "def get_item_price(self, soup: BeautifulSoup) -> None:\n try:\n price = soup.find(\"span\", class_=\"_olc9rf0\").get_text()\n price = re.findall(\"\\d+(?:\\.\\d+)?\", price)[0]\n except (AttributeError, IndexError):\n price = None\n self.__collected_dic[\"price\"].append(price)", "def get_price():\n \n #Teacher's code. Could not get it working.\n #price = db(db.product.name == productName).select(db.product.price)[0].price\n \n \n return (200)", "def _get_price(self, jdict):\n\n try:\n price_dict = jdict['props']['homeDetails']['price']\n return float(price_dict['price'])\n except:\n return None", "def precio(self):\n price = self.html.xpath(self.xpath_sale_price)\n precio = map(self.limpieza_precio, price)\n return precio", "def get_discount(self, price):\r\n pass", "def get_product_price(container) -> str:\r\n try:\r\n price_container = container.findAll(\"li\", {\"class\": \"price-current\"})\r\n price: str = price_container[0].strong.text + price_container[0].sup.text\r\n return price\r\n except AttributeError:\r\n return None", "def get_product_price(self):\n\n price = \"0.0000\"\n\n try:\n price = self.trees.get_element_by_id(\"priceblock_ourprice\").text\n except:\n try:\n price = self.trees.get_element_by_id(\n \"price_inside_buybox\").text\n except:\n try:\n price = self.trees.get_element_by_id(\n \"priceblock_dealprice\").text\n except:\n try:\n price = self.trees.xpath(\n \"//span[@class='a-color-price']/text()\")[0]\n except:\n try:\n price = self.trees.xpath(\n \"//span[@class='a-size-base a-color-price']/text()\")[0]\n except:\n pass\n\n non_decimal = re.compile(r'[^\\d.]+')\n price = non_decimal.sub('', price)\n\n return round(float(price[0:5]), 2)" ]
[ "0.668608", "0.66682285", "0.66242206", "0.6555448", "0.6544855", "0.6537114", "0.65338093", "0.6442291", "0.6424068", "0.6401285", "0.6377285", "0.6377285", "0.6377285", "0.63466", "0.6335578", "0.63162786", "0.6285069", "0.62215525", "0.6178284", "0.6166635", "0.61564565", "0.6152488", "0.6145447", "0.6106609", "0.6053134", "0.6044706", "0.6024521", "0.6000453", "0.59910786", "0.5990133" ]
0.72438234
0
LibThread(q) Get representation of cmus' cache On success, puts a dict representing cmus' cache in the Queue object q. This function is intended to be called as a separate thread! Thus, it does few error handling and modifies nice value.
def LibThread(q): os.nice(1) time.sleep(0) library = cmus.library() cache = cmus.Cache() liblist = {} # BUG: newly added tracks don't appear in the listing as they aren't recorded # neither in the cache nor in the library.pl # TODO: report progress values, maybe even return partial results? for track in cache: # allow other thread to jump in time.sleep(0) try: q.get_nowait() q.task_done() return False except Queue.Empty: pass try: del library[library.index(track['file'])] except ValueError: # file is in cache but not in library continue # use dummy values if no value given artist = track['albumartist'] if track.has_key('albumartist') and track['albumartist'] != '' \ else track['artist'] if track.has_key('artist') and track['artist'] != '' \ else '[unknown]' album = track['album'] if track.has_key('album') and track['album'] != '' \ else '[unknown]' title = track['title'] if track.has_key('title') and track['title'] != '' \ else os.path.basename(track['file']).rsplit('.', 1)[0] if not liblist.has_key(artist): liblist[artist] = {} if not liblist[artist].has_key(album): liblist[artist][album] = {} liblist[artist][album][title] = track # TODO: get files not in cache manually # (hint: library contains their paths now) def sorter(x): ref = liblist[artist][album][x] if ref.has_key('tracknumber') and ref['tracknumber'] != 0: return ref['tracknumber'] elif ref.has_key('title') and ref['title'] != '': return ref['title'] else: return 0 for artist in liblist.keys(): for album in liblist[artist].keys(): # sort by tracknumber if existant, title else liblist[artist][album]['__keys__'] = \ sorted(liblist[artist][album].keys(), key=sorter) liblist[artist]['__keys__'] = sorted(liblist[artist].keys(), key=str.lower) liblist['__keys__'] = sorted(liblist.keys(), key=str.lower) q.put(liblist)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_cache(*args, **kws):\n resp = self.response\n out = resp.out\n namespace = ''\n if self.cache_nsfuncs.get(func, None):\n namespace = self.cache_nsfuncs[func](self.request)\n p = urlsplit(self.request.url)[2]\n c = memcache.get(p, namespace)\n if c:\n # in case cache is found, use it \n # instead of rendering by calling function.\n out.write(c['body'])\n for k, i in c['hdr'].items():\n resp.headers[k] = i\n return\n\n r = func(*args, **kws)\n expire = self.cache_expires.get(func, 0)\n if expire == 0:\n return\n out.seek(0)\n try:\n p = urlsplit(self.request.url)[2]\n memcache.set(p, {'hdr':resp.headers,'body':out.read()},\n expire, namespace=namespace)\n logging.debug('%s is cahed' % p)\n except:\n memcache.flush_all()\n logging.debug('memcache is flashed.')", "def _retrieveCachedData(self):", "def crawl_queue(q, result_set):\n _log = logging.getLogger(crawl_queue.__name__)\n while not q.empty():\n worker = q.get() #get an itme from the queue\n\n try:\n req = requests.get(worker[1], verify = False, timeout = (30,30), headers = create_fakeheader(ua,browser))\n cont = req.content\n result_set[worker[0]] = cont\n except:\n _log.warning(f' couldnt find a request for index {worker[0]}')\n result_set[worker[0]] = ''\n if q.qsize() % 100 == 0:\n _log.info(f'things left to process {q.qsize()}')\n q.task_done()\n return True", "def get(self):\n CACHE_KEY = 'topics'\n if not memcache.get(CACHE_KEY):\n logging.info('Populating cache.')\n topics = Topic.all().order('name')\n topic_list = []\n for topic in topics:\n topic_list.append(topic.ToDict())\n memcache.add(CACHE_KEY, simplejson.dumps(topic_list), 600)\n logging.info('Using cache.')\n logging.info(memcache.get(CACHE_KEY))\n self.response.headers['Content-Type'] = 'application/json'\n self.response.out.write(memcache.get(CACHE_KEY))", "def _cache_data(self):\n while self._run:\n try:\n values = self._data_streamer.get_data_current_state()\n for parameter, mapping_method in self._mapping.items():\n value = values[parameter]\n mapped_notes = self._data_streamer.get_mapper_for_param(parameter, mapping_method[0]).map(value)\n self._value_queues[parameter].put((value,mapped_notes))\n except Exception, e:\n print e.message", "def redis_cache(key, query, ttl=30):\n print 'checking redis cache'\n if r.get(key):\n print 'returning data found in cache'\n return r.get(key)\n else:\n print 'retrieving and caching new query results'\n results = cleanup_queries(query())\n js = json.dumps(results)\n r.set(key, js)\n r.expire(key, ttl)\n return js", "def getcache(sl, tl, st):\n if len(sl) > 0 and len(tl) > 0 and len(st) > 0:\n m = md5.new()\n m.update(sl)\n m.update(tl)\n m.update(st)\n md5hash = str(m.hexdigest())\n tt = memcache.get('wwl|' + md5hash)\n if tt is not None:\n return tt\n else:\n return ''\n else:\n return st", "def _get_cache(self, course_version_guid):\r\n if not hasattr(self.thread_cache, 'course_cache'):\r\n self.thread_cache.course_cache = {}\r\n system = self.thread_cache.course_cache\r\n return system.get(course_version_guid)", "async def _get(self, key, encoding=\"utf-8\"):\n return SimpleMemoryBackend._cache.get(key)", "def _cache_get(self, metric_name):\n pass", "def test_cache_retrieved(self):\n read = self.client.get(\"/read/froLit/jns915/jns1856/ciham-fro1/1\")\n data = read.data.decode()\n self.assertIn(\n '<span class=\"expan\">et </span>', data,\n \"Text content should be transformed\"\n )\n self.assertIn(\n 'Facsimilaire', data,\n \"Other content should be added\"\n )\n\n cached = self.cache.get(\"urn:cts:froLit:jns915.jns1856.ciham-fro1:1\").decode()\n self.assertIn('<aside class=\"text-left\">', cached, \"Assert cache is made\")\n\n with mock.patch(\"nemo_xslttwo_plugin.shell\") as shell:\n read = self.client.get(\"/read/froLit/jns915/jns1856/ciham-fro1/1\")\n cached_response = read.data.decode()\n self.assertEqual(\n cached_response, data,\n \"Text content should the same in cache\"\n )\n self.assertEqual(\n shell.call_count, 0,\n \"Shell should not be called because we use cache\"\n )", "async def get() -> list:\n if _cache is None:\n await _update()\n return _cache", "def compiserve_queue_get(uid):\n\n try:\n\n print(\"Received UID is: \" + uid)\n\n job_key = \"compiserv::jobs::{0}\".format(uid)\n\n result = {\n \"state\": \"\"\n }\n\n if not _redis.exists(job_key):\n return jsonify(state='not_found')\n\n # Split the UID into its components.\n id, tokenid = uid.split(\"+\", 1)\n\n # Retrieve the state of the remote JOB\n resp = requests.get(GET_URL.format(id, tokenid))\n jsresp = resp.json()\n\n # BinaryFile, CompletedDate, LogFile, State\n state = jsresp['State'].lower()\n\n if state == 'finished':\n\n # The job is not active anymore, but it may have succeeded or failed.\n\n print \"[DEBUG]: CompiServ finished with {0}. From thread: {1}\".format(uid, threading.current_thread())\n\n binary_file = jsresp['BinaryFile'] # type: list[int]\n completed_date = jsresp['CompletedDate']\n log_file = jsresp['LogFile']\n compile_result = 'success' if binary_file is not None else 'error'\n\n # Store the binary file as a byte array.\n # TODO: Check whether flask supports bytearray\n\n # This converts from an array of integers representing the bytes, to a bytes str (or in\n # Python 3, to a 'bytes'.\n binary_file = array.array('B', binary_file).tostring()\n binary_file = array.array('B', str(binary_file)).tostring()\n log_file = array.array('B', str(log_file)).tostring()\n\n # Store the files in the redis-powered job\n _redis.hset(job_key, \"binary_file\", binary_file)\n _redis.hset(job_key, \"completed_date\", completed_date)\n _redis.hset(job_key, \"log_file\", log_file)\n _redis.hset(job_key, \"result\", compile_result)\n\n if compile_result == 'error':\n result['state'] = 'failed'\n else:\n result['state'] = 'done'\n\n print(\"[DEBUG] Compiserv result saved.\")\n\n elif state.startswith('unfinished'):\n splits = state.split(\":\")\n number = int(splits[1].strip())\n result['state'] = 'queued'\n result['position'] = number\n\n else:\n raise Exception(\"Unrecognized job state: \" + state)\n\n contents = json.dumps(result, indent=4)\n response = make_response(contents)\n response.content_type = 'application/json'\n return response\n\n except Exception as ex:\n tb = traceback.format_exc()\n return jsonify(state='error', traceback=tb)", "def _memcache_get(*args, **kwargs):\n return ndb.get_context().memcache_get(*args, **kwargs)", "def cache(self,redis_wrapper,key='default'):\n \n \n if key == 'default':\n key = self.showId()\n \n logger.info('Serializing GriddedTaxonomy. \\n Depending on the amount of data it can take some time')\n \n #Cleaning GeoQueryValuesSets fields\n map(lambda grid : grid.removeQuerySets(),self)\n \n import pickle\n logger.info('Serializing with pickle') \n self_pickle = pickle.dumps(self)\n logger.info(\"Storing in Cache\")\n try:\n \n redis_wrapper.set(key,self_pickle)\n return True\n except:\n logger.error(\"Problem in serializing. The intented caching object could be very big!\")\n return self_pickle", "def cache_produce () :\n\n \"\"\"\n list for movie\n each entry is the sum and count of all ratings received by each movie, which will be used later to calculate the user offset\n \"\"\"\n mcache = movie_read(open('/u/downing/cs/netflix/movie_titles.txt', 'r', encoding = \"ISO-8859-1\"))\n\n \"\"\"\n dictionaries for user caches\n each entry contain the sum and count of all ratings given by each user, which will be used later to calculate the user offset\n mean is the average of all ratings from all movies\n \"\"\"\n ucache, mean = user_read(mcache, \"/u/downing/cs/netflix/training_set\")\n\n cal_offset(mcache, mean)\n cal_offset(ucache, mean)\n\n mcache.append(mean)\n\n output_cache(ucache, open('/u/wc6892/Documents/cs373-netflix/wc6892-ucacheoff.txt', 'w'))\n output_cache(mcache, open('/u/wc6892/Documents/cs373-netflix/wc6892-mcacheoff.txt', 'w'))", "def _put(self, item, queue):", "def getData(self, local_cache):", "def _get_cache(self, course_version_guid):\n if self.request_cache is None:\n return None\n\n return self.request_cache.data.setdefault('course_cache', {}).get(course_version_guid)", "def get_page_queue(url, queue):\n queue.put(urlopen(url).read())\n return None", "def run(self):\n\n import time\n LOGGER.info(\"Caching thread started !\")\n\n while True:\n\n # Get all data\n # Make data visible from parent thread\n self.data = self._forge_data(self._sqla_session)\n\n # Wait 30 seconds before new processing\n time.sleep(cm.DELAY)", "def _add_cache(self, course_version_guid, system):\r\n if not hasattr(self.thread_cache, 'course_cache'):\r\n self.thread_cache.course_cache = {}\r\n self.thread_cache.course_cache[course_version_guid] = system\r\n return system", "def cache(self,redis_wrapper,key='default'):\n \n \n if key == 'default':\n key = self.showId()\n \n logger.info('Serializing NestedGriddedTaxonomy. \\n Depending on the amount of data it can take some time')\n \n #Cleaning GeoQueryValuesSets fields\n map(lambda grid : grid.removeQuerySets(),self.levels.values())\n \n import pickle\n logger.info('Serializing with pickle') \n self_pickle = pickle.dumps(self)\n logger.info(\"Storing in Cache\")\n try:\n \n redis_wrapper.set(key,self_pickle)\n return True\n except:\n logger.error(\"Problem in serializing. The intented caching object could be very big!\")\n return self_pickle", "def get_cache(self, key):\n return self.r.get(key)", "def test_cache_multithread_synchronization(self):\n RULES_MAP = {\"h1\": \"{ color: blue; }\", \"h2\": \"{ color: green; }\"}\n\n class RuleMapper(threading.Thread):\n def __init__(self):\n super(RuleMapper, self).__init__()\n self.exception = None\n\n def run(self):\n try:\n for rule in RULES_MAP:\n get_styles(rule)\n except KeyError as e:\n self.exception = e\n\n class DelayedDeletionLRUCache(cachetools.LRUCache):\n \"\"\"\n Overrides base LRU implementation to introduce a small delay when\n removing elements from cache.\n\n The delay makes sure that multiple threads try to pop same item from\n cache, resulting in KeyError being raised. Reference to exception is\n kept to make assertions afterwards.\n \"\"\"\n\n def popitem(self):\n try:\n key = next(iter(self._LRUCache__order))\n except StopIteration:\n raise KeyError(\"%s is empty\" % self.__class__.__name__)\n else:\n time.sleep(0.01)\n return (key, self.pop(key))\n\n cache_module = imp.load_source(\n \"cache.py\", os.path.join(\"premailer\", \"cache.py\")\n )\n\n # Set module cache to point to overridden implementation.\n cache_module.cache = DelayedDeletionLRUCache(maxsize=1)\n\n @cache_module.function_cache()\n def get_styles(rule):\n return RULES_MAP[rule]\n\n threads = [RuleMapper() for _ in range(2)]\n for thread in threads:\n thread.start()\n\n for thread in threads:\n thread.join()\n\n exceptions = [thread.exception for thread in threads if thread.exception]\n self.assertTrue(\n not exceptions, \"Unexpected exception when accessing Premailer cache.\"\n )", "def cache():\n if request.method == 'GET':\n cache_info = in_water.cache_info()\n return json.dumps({\n 'hits': cache_info.hits,\n 'misses': cache_info.misses,\n 'maxsize': cache_info.maxsize,\n 'currsize': cache_info.currsize,\n })", "def ztest_get_item(self):\n \n queue = NMSQueue()\n \n result_set = queue.get_items_with_priority(1,1,0,1)\n \n for item in result_set:\n print(\"\\nItem = %s\\n\" % (item) )\n newitem = queue.get_item(item.uuid)\n print(\"\\nRetrieve the same from queue Item = %s\\n\" % (newitem) )", "def cached_job(preload_queue_html_data):\n cached_job = CachedJob(queue=preload_queue_html_data)\n cached_job.wait_for_new_job(sleep=2)\n yield cached_job", "def request(self, *args, **kwargs):\n self.work_request_queue.put((args, kwargs))\n return self.result_queue.get()", "def request(self, *args, **kwargs):\n self.work_request_queue.put((args, kwargs))\n return self.result_queue.get()" ]
[ "0.56700563", "0.56635237", "0.553123", "0.5365313", "0.5352841", "0.5348453", "0.52659357", "0.52659", "0.5217064", "0.52100486", "0.5172683", "0.5155207", "0.51392865", "0.5134736", "0.51345325", "0.51182157", "0.5099683", "0.5090276", "0.505746", "0.50211537", "0.5011173", "0.5008483", "0.49962637", "0.49961707", "0.49823254", "0.49595866", "0.4934674", "0.4934651", "0.49321386", "0.49321386" ]
0.60629076
0
load_font(fontname, fontsize) > the appropriate pygame.Font() Searches for the font given by fontname and fontsize at the following
def load_font(fontname, fontsize): # system fonts if pygame.font.get_fonts().count(fontname) == 1: return pygame.font.SysFont(fontname, fontsize) # standard MS fonts if os.path.exists('/usr/share/fonts/truetype/msttcorefonts/'+fontname+'.ttf'): return pygame.font.Font('/usr/share/fonts/truetype/msttcorefonts/'+fontname+'.ttf', fontsize) # search /usr/share/fonts/ for root, dirs, files in os.walk('/usr/share/fonts'): if fontname+'.ttf' in files: return pygame.font.Font(os.path.join(root, fontname+'.ttf'), fontsize) # search in working dir if os.exists('./'+fontname+'.ttf'): return pygame.font.Font(fontname+'.ttf', fontsize) # last resort: return default font return pygame.font.Font(None, fontsize)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def loadCustomFont(path,name,size):\n\n fullname = os.path.join(path,name)\n f = pygame.font.Font(fullname,size)\n return f", "def load_font(fontSize):\n f1='/usr/share/fonts/corefonts/arialbd.ttf' \n f2='/usr/share/fonts/truetype/msttcorefonts/Arial_Bold.ttf'\n if os.path.isfile(f1): font=ImageFont.truetype(f1,fontSize)\n if os.path.isfile(f2): font=ImageFont.truetype(f2,fontSize)\n return font", "def loadSystemFont(name, size):\n\n try:\n f = pygame.font.SysFont(name,size)\n except error, message:\n print \"Cannot load font: \", name\n raise SystemExit, message\n return f", "def loadDefaultFont(size):\n\n try:\n f = pygame.font.Font(None,size)\n except error, message:\n print \"Cannot load the default font\"\n raise SystemExit, message\n return f", "def font(size=20, name=None):\n name = name or \"regular\"\n path = ROOT_DIR / \"wclib\" / \"assets\" / (name + \".ttf\")\n return pygame.font.Font(path, size)", "def _create_font(cls, font, size):\n if font[-4:] in (\".ttf\", \".otf\"):\n return pygame.font.Font(font, size)\n else:\n return pygame.font.SysFont(font, size)", "def get_font_at_size(fonts_path, font_name, initial_font_size, text_to_print, target_width):\n font_size = initial_font_size\n while True:\n font = ImageFont.truetype(path.join(fonts_path, font_name), font_size)\n text_width = font.getsize(text_to_print)[0]\n if text_width <= target_width:\n break\n if font_size < 9:\n break\n font_size = font_size - 1\n return font", "def CreateFont(name, size):\r\n\ttry:\r\n\t\tf = pygame.font.Font(name, size)\r\n\t\treturn f\r\n\texcept IOError:\r\n\t\treturn pygame.font.SysFont(name, size)", "def get_fonts():\r\n return pygame.font.get_fonts()", "def load_fonts(self):\n for key, font in enumerate(self.fonts):\n self.fonts[key]['font'] = load_font(font['name'], font['size'])\n checkpoint('fonts')", "def getFont(fontsize):\n\t\t\n\tfontPath = os.path.join(GATEWAYPATH, \"pilfonts\", \"FreeSans.ttf\")\n\ttry:\n\t\tfont = ImageFont.truetype ( fontPath, fontsize )\t\n\texcept:\n\t\tfont = ImageFont.load('%s/pilfonts/B%0.2d.pil' % (GATEWAYPATH, 24) )\n\treturn font", "def get_font_dict(f):\n return tk_font.Font(font=f).actual()", "def create(font_name, point):\n return pygame.font.SysFont(font_name, int(point))", "def _get_font_button(field_size):\r\n font_size = int(field_size * 2) # calculates font's size\r\n return pygame.font.SysFont(None, font_size) # returns font\r", "def get_named_font(*a, **kw):\n return get_named_font(*a, **kw)", "def load_font(self, path: str, font_family: str, mode: str='n') -> None:\n font = PDFTrueTypeFont('F'+str(self.index), path)\n if not font_family in self.fonts:\n self.fonts[font_family] = {'n': font}\n self.fonts[font_family][mode] = font\n self.index += 1", "def get_font_dict(*a, **kw):\n return get_font_dict(*a, **kw)", "def create_font(font_name, fit = True):\n font = {}\n try:\n numbers = Image.open(fonts_path + font_name + \".jpg\")\n if fit:\n numbers = images.fit_to_display(numbers, True)\n width, height = numbers.size\n font[\"d\"] = Image.open(fonts_path + \"degree.jpg\")\n font[\"d\"] = images.fit_to_display(font[\"d\"])\n font[\"p\"] = Image.open(fonts_path + \"percent.jpg\")\n font[\"p\"] = images.fit_to_display(font[\"p\"])\n font[\"m\"] = Image.open(fonts_path + \"am.jpg\")\n font[\"m\"] = images.fit_to_display(font[\"m\"], True)\n font[\"a\"] = Image.open(fonts_path + \"pm.jpg\")\n font[\"a\"] = images.fit_to_display(font[\"a\"], True)\n d_w, d_h = font[\"d\"].size\n font[\"d\"] = font[\"d\"].crop((10,0,d_w-10,d_w))\n box_width = float(width)/10 \n #Crop out each character in the provided image and save that to a dictionary\n for i in range(0, 10):\n box = [int(round(i*(box_width))), 0, int(round((i + 1)*(box_width))), height]\n #Checks if a subrectangle passes the width of the image, and shortens it if necessary\n if box[3] > width:\n box[3] = width\n \n box = tuple(box)\n font[str(i)] = numbers.crop(box) \n return font\n except IOError:\n print(\"Specified font file: %s.jpg cannot be found at: %s\" % (font_name,fonts_path))", "def GetFont(*args, **kwargs):\n return _gdi_.StockGDI_GetFont(*args, **kwargs)", "def set_font(self, font):\n\tself.m_font = font", "def load_font(self, file):\n self.font = []\n with open(file, 'rb') as f:\n while True:\n buf = f.read(FONT_HEIGHT)\n if not buf:\n break\n self.font.append(buf)", "def get_font(self, option):\n return get_font(option=option)", "def selectfont(self, char):\n\n charcode = ord(char)\n for font in fontchecksequence:\n for fontrange in fontmapping[font]:\n if charcode in xrange(fontrange[0], fontrange[1]):\n return font\n return \"Helvetica\" # fallback, if no thirdparty font is installed", "def create_text(text, font_size, bold, text_color):\n myfont = pygame.font.SysFont(\"Courier\", font_size, bold)\n surface = myfont.render(text,True,text_color)\n return surface", "def load_font(font_path):\n\n # ttc is collection of ttf\n if font_path.endswith('ttc'):\n ttc = TTCollection(font_path)\n # assume all ttfs in ttc file have same supported chars\n return ttc.fonts[0]\n\n if font_path.endswith('ttf') or font_path.endswith('TTF') or font_path.endswith('otf'):\n ttf = TTFont(font_path, 0, allowVID=0,\n ignoreDecompileErrors=True,\n fontNumber=-1)\n\n return ttf", "def selectFont():\n font,ok = QtGui.QFontDialog.getFont()\n if ok:\n return font\n else:\n return None", "def harfbuzz_open_font(fontpath):\n\t# Need to create GLib.Bytes explicitly until this bug is fixed:\n\t# https://bugzilla.gnome.org/show_bug.cgi?id=729541\n\tfontdata = open (fontpath, 'rb').read ()\n\tblob = hb.glib_blob_create (GLib.Bytes.new (fontdata))\n\tface = hb.face_create (blob, 0)\n\tdel blob\n\tfont = hb.font_create (face)\n\tscale = 1000 # hb.face_get_upem (face)\n\tdel face\n\thb.font_set_scale (font, scale, scale)\n\thb.ot_font_set_funcs (font)\n\treturn font", "def load(filename, size=12):\r\n # face = Face('./VeraMono.ttf')\r\n face = freetype.Face(filename)\r\n face.set_char_size(size*size)\r\n return face", "def truetype(font=None, size=10, index=0, encoding=\"\",\r\n layout_engine=None):\r\n if not freetype_installed:\r\n raise NotImplementedError(\"freetype-py is not installed or the libfreetype.dll/dylib/so is missing, if freetype-py is not installed, install it with pip install freetype-py\")\r\n fontpath = font\r\n font = FreeTypeFont(font, size)\r\n if font.font is not None:\r\n return font.font\r\n else:\r\n ttf_filename = os.path.basename(fontpath)\r\n dirs = []\r\n if sys.platform == \"win32\":\r\n # check the windows font repository\r\n # NOTE: must use uppercase WINDIR, to work around bugs in\r\n # 1.5.2's os.environ.get()\r\n windir = os.environ.get(\"WINDIR\")\r\n if windir:\r\n dirs.append(os.path.join(windir, \"Fonts\"))\r\n elif sys.platform in ('linux', 'linux2'):\r\n lindirs = os.environ.get(\"XDG_DATA_DIRS\", \"\")\r\n if not lindirs:\r\n # According to the freedesktop spec, XDG_DATA_DIRS should\r\n # default to /usr/share\r\n lindirs = '/usr/share'\r\n dirs += [os.path.join(lindir, \"fonts\")\r\n for lindir in lindirs.split(\":\")]\r\n elif sys.platform == 'darwin':\r\n dirs += ['/Library/Fonts', '/System/Library/Fonts',\r\n os.path.expanduser('~/Library/Fonts')]\r\n ext = os.path.splitext(ttf_filename)[1]\r\n first_font_with_a_different_extension = None\r\n for directory in dirs:\r\n for walkroot, walkdir, walkfilenames in os.walk(directory):\r\n for walkfilename in walkfilenames:\r\n if ext and walkfilename == ttf_filename:\r\n fontpath = os.path.join(walkroot, walkfilename)\r\n font = FreeTypeFont(fontpath, size)\r\n return font.font\r\n elif (not ext and\r\n os.path.splitext(walkfilename)[0] == ttf_filename):\r\n fontpath = os.path.join(walkroot, walkfilename)\r\n if os.path.splitext(fontpath)[1] == '.ttf':\r\n font = FreeTypeFont(fontpath, size)\r\n return font.font\r\n raise IOError(\"cannot find font file\")", "def set_font(self, font):\n\ttry:\n\t self.m_gdfont = self._fonts[font.lower()]\n\t self.m_font = font\n\texcept KeyError:\n\t raise ValueError, 'Illegal font name.'" ]
[ "0.803262", "0.7981457", "0.7703131", "0.75344706", "0.74571955", "0.72757286", "0.7114398", "0.701324", "0.6925238", "0.6885135", "0.68326175", "0.6789328", "0.675228", "0.66453326", "0.6518165", "0.6435163", "0.63940454", "0.63801473", "0.63370556", "0.63328856", "0.6306267", "0.6302275", "0.6299692", "0.6289646", "0.6251832", "0.6249131", "0.6247247", "0.6241387", "0.62204075", "0.6201266" ]
0.8439754
0
checkpoint(name, [first]) print elapsed time since last checkpoint Prints the elapsed time since the last call to checkpoint(), but only if the global variable DEBUG is nonzero.
def checkpoint(name, first = False): global DEBUG if DEBUG: if name != 'first': print 'checkpoint %15s: %f' % ((time.time() - SCRIPT_START) if not first else name, (time.time() - checkpoint.start)) checkpoint.start = time.time()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def timeCheckpoint(start_time, name):\n\n time = clock() - start_time\n print(str.capitalize(name) + ': \\t%.3f' % time)\n return clock()", "def checkpoint():", "def checkpoint(self, msg=None, start=False):\n if start:\n start_time = time.perf_counter()\n self.start_time = start_time\n else:\n start_time = self.start_time\n duration = round((time.perf_counter() - start_time), 2)\n if duration > 2:\n if msg:\n print(\"{:0.2f}s {}\".format(duration, msg))", "def checkpoint(self, timestamp=0.0, **keywords):\n self.services.debug('checkpoint() method called')\n pass", "def load_ckpt(self, name=None):\r\n name = name if name == 'latest' else \"ckpt_epoch{}\".format(name)\r\n load_path = os.path.join(self.model_dir, \"{}.pth\".format(name))\r\n if not os.path.exists(load_path):\r\n raise ValueError(\"Checkpoint {} not exists.\".format(load_path))\r\n\r\n checkpoint = torch.load(load_path)\r\n print(\"Checkpoint loaded from {}\".format(load_path))\r\n if isinstance(self.net, nn.DataParallel):\r\n self.net.module.load_state_dict(checkpoint['model_state_dict'])\r\n else:\r\n self.net.load_state_dict(checkpoint['model_state_dict'])\r\n self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\r\n self.scheduler.load_state_dict(checkpoint['scheduler_state_dict'])\r\n self.clock.restore_checkpoint(checkpoint['clock'])", "def checkpoint(self):\r\n return self._checkpoint", "def save_ckpt(self, name=None):\r\n if name is None:\r\n save_path = os.path.join(self.model_dir, \"ckpt_epoch{}.pth\".format(self.clock.epoch))\r\n print(\"Checkpoint saved at {}\".format(save_path))\r\n else:\r\n save_path = os.path.join(self.model_dir, \"{}.pth\".format(name))\r\n if isinstance(self.net, nn.DataParallel):\r\n torch.save({\r\n 'clock': self.clock.make_checkpoint(),\r\n 'model_state_dict': self.net.module.cpu().state_dict(),\r\n 'optimizer_state_dict': self.optimizer.state_dict(),\r\n 'scheduler_state_dict': self.scheduler.state_dict(),\r\n }, save_path)\r\n else:\r\n torch.save({\r\n 'clock': self.clock.make_checkpoint(),\r\n 'model_state_dict': self.net.cpu().state_dict(),\r\n 'optimizer_state_dict': self.optimizer.state_dict(),\r\n 'scheduler_state_dict': self.scheduler.state_dict(),\r\n }, save_path)\r\n self.net.cuda()", "def save_checkpoint(state, is_best, filename='checkpoint/chpt.tar'):\n if is_best:\n print (\"=> Saving a new best\")\n torch.save(state, filename) # save checkpoint\n else:\n print (\"=> Validation Accuracy did not improve\")", "def save_checkpoint(state, filename):\n print (\"=> Saving a new best\")\n torch.save(state, filename) # save checkpoint", "def save_checkpoint(checkpoint_dir, epoch, iteration, save_dict):\n os.makedirs(checkpoint_dir, exist_ok=True)\n path = opj(checkpoint_dir, str(epoch) + '.' + str(iteration) + '.ckpt')\n assert epoch == save_dict['epoch'], \"`epoch` != save_dict's `start_epoch`\"\n assert iteration == save_dict['iteration'], \"`iteration` != save_dict's `start_iteration`\"\n if os.path.isfile(path):\n print(\"Overwrite checkpoint in epoch %d, iteration %d :exclamation:\" % (epoch, iteration))\n try:\n torch.save(save_dict, path)\n except Exception:\n raise Exception(\"Fail to save checkpoint\")\n \n print(\"Checkpoint %s saved :heavy_check_mark:\" % (str(epoch) + '.' + str(iteration) + '.ckpt'))", "def test_checkpoints(self):\r\n\r\n self.tmpdir = mkdtemp(dir=\"./\",\r\n suffix=\"_test_checkpoints/\")\r\n\r\n bestscores = dict({1: 0.9,\r\n 2: 1.1,\r\n 3: 2.3,\r\n 4: 99.93232344})\r\n\r\n out_fp = write_checkpoint(\r\n \"Key\", 99, self.mapping, [1, 2, 3, 4], bestscores,\r\n [2, 1, 3, 4],\r\n self.tmpdir)\r\n\r\n observed = read_checkpoint(out_fp)\r\n\r\n self.assertEqual(observed[0], \"Key\")\r\n self.assertEqual(observed[1], 99)\r\n self.assertEqual(observed[2], self.mapping)\r\n self.assertEqual(observed[3], [1, 2, 3, 4])\r\n self.assertEqual(observed[4], bestscores)\r\n self.assertEqual(observed[5], [2, 1, 3, 4])", "def checkpoint(self):\n self.logger.info('Checkpointing Sampler')\n with open(self.resume_file, \"wb\") as f:\n pickle.dump(self, f)", "def do_checkpoint(prefix, period=1):\n period = int(max(1, period))\n def _callback(iter_no, sym, arg, aux):\n \"\"\"The checkpoint function.\"\"\"\n if (iter_no + 1) % period == 0:\n save_checkpoint(prefix, iter_no + 1, sym, arg, aux)\n return _callback", "def checkpoint(self, epoch: int):\n if self.exp.scheduler_stepper is not None:\n torch.save(\n {\n \"model_state_dict\": self.exp.model.state_dict(),\n \"optimizer_state_dict\": self.exp.optimizer.state_dict(),\n \"scheduler_state_dict\": self.exp.scheduler_stepper.scheduler.state_dict(),\n \"Epoch\": epoch,\n },\n self.exp.path,\n )\n else:\n torch.save(\n {\n \"model_state_dict\": self.exp.model.state_dict(),\n \"optimizer_state_dict\": self.exp.optimizer.state_dict(),\n \"Epoch\": epoch,\n },\n self.exp.path,\n )", "def reload_checkpoint_if_exists(sess, saver, train_writer, validation_writer, test_writer):\n global_step = -1\n if FLAGS.continue_run:\n ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)\n if ckpt and ckpt.model_checkpoint_path:\n # Restores from checkpoint\n saver.restore(sess, ckpt.model_checkpoint_path)\n # extract global_step from it.\n global_step = int(ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1])\n print(\"checkpoint found at step %d\", global_step)\n # ensure that the writers ignore saved summaries that occurred after the last checkpoint but before a crash\n train_writer.add_session_log(tf.SessionLog(status=tf.SessionLog.START), global_step)\n validation_writer.add_session_log(tf.SessionLog(status=tf.SessionLog.START), global_step)\n test_writer.add_session_log(tf.SessionLog(status=tf.SessionLog.START), global_step)\n else:\n print('No checkpoint file found')\n return global_step", "def checkpoint(self):\n checkpoint_status = self.load.checkpoint()\n if checkpoint_status >= TD_ERROR:\n status_message = MESSAGES.get(int(checkpoint_status), None)\n error_table = [(\"Error Code\", \"Error Description\")]\n error_table.append((checkpoint_status, status_message))\n log.info(\"\\r{}\".format(format_table(error_table)))\n return checkpoint_status", "def parse_checkpoint(self):\n pass", "def checkpoint(function, *args, **kwargs):\n common_parms = ['debug', 'message', 'log_file', 'console']\n debug = kwargs.get('debug', False)\n message = kwargs.get('message', None)\n log_file = kwargs.get('log_file', None)\n console = kwargs.get('console', False)\n\n f_kwargs = {key: value for key, value in kwargs.items()\n if not key in common_parms}\n\n result = function(*args, **f_kwargs)\n if debug:\n console_log(\"Checkpoint: checking %s with args:\\n%s\\n\\nResult:\\n%s\\n\" %\n (function.__name__, \"\\n\".join([repr(arg) for arg in args]),\n repr(result)))\n # resume is the first element in the result tuple\n if not result[0] and message is not None:\n log_message(message, log_file=log_file, console=console)\n return result", "async def checkpoint(cls) -> None:", "def checkpoint(self):\n save()", "def checkpoint(iteration, G, D, opts):\n ckpt_path = os.path.join(opts.checkpoint_dir, 'ckpt_{:06d}.pth.tar'.format(iteration))\n torch.save({'G': G.state_dict(),\n 'D': D.state_dict(),\n 'iter': iteration}, \n ckpt_path)", "def load_checkpoint(checkpoint_dir, epoch, iteration):\n path = opj(checkpoint_dir, str(epoch) + '.' + str(iteration) + '.ckpt')\n if not os.path.isfile(path):\n raise Exception(\"Checkpoint in epoch %d doesn't exist :sob:\" % epoch)\n\n checkpoint = torch.load(path)\n start_epoch = checkpoint['epoch']\n state_dict = checkpoint['state_dict']\n start_iteration = checkpoint['iteration']\n\n assert iteration == start_iteration\n return start_epoch, start_iteration, state_dict", "def save_checkpoint(model, is_best, filename='./model/checkpoint.pth.tar'):\n if is_best:\n torch.save(model.state_dict(), filename) # save checkpoint\n else:\n print (\"=> Validation Accuracy did not improve\")", "def duration(self):\n return int(\n (self.finish_checkpoint - self.start_checkpoint) * 1000000\n )", "def saveCheckpoint(acc, epoch, model, train_hist):\r\n print('Saving..')\r\n state = {\r\n 'model': model,\r\n 'acc': acc,\r\n 'epoch': epoch,\r\n 'rng_state': torch.get_rng_state(),\r\n 'train_hist': train_hist\r\n }\r\n if not os.path.isdir('checkpoint'): # save to checkpoint directory\r\n os.mkdir('checkpoint')\r\n torch.save(state, './checkpoint/ckpt' + '_' + str(epoch+1))", "def save_checkpoint(state, is_best, checkpoint_dir, logger=None):\n\n def log_info(message):\n if logger is not None:\n logger.info(message)\n\n if not os.path.exists(checkpoint_dir):\n log_info(\n \"Checkpoint directory does not exists. Creatding {}\".format(checkpoint_dir))\n os.mkdir(checkpoint_dir)\n\n last_file_path = os.path.join(checkpoint_dir, 'last_checkpoint.pytorch')\n log_info(\"Saving last checkpoint\")\n torch.save(state, last_file_path)\n if is_best:\n best_file_path = os.path.join(checkpoint_dir, 'best_checkpoint.pytorch')\n log_info(\"Saving best checkpoint\")\n shutil.copyfile(last_file_path, best_file_path)", "def callstack_now():\n return checkpoints[-1]", "def save_checkpoint(args,state, is_best, filename=\"checkpoint.pth.tar\"):\n directory = \"runs/%s-net/\" % (args.name)\n\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n\n epoch = state['epoch']\n\n filename = directory + filename\n torch.save(state, filename)\n\n if is_best:\n shutil.copyfile(filename, \"runs/%s-net/\" % (args.name) + \"model_best.pth.tar\")\n\n if epoch==0 or epoch==2:\n shutil.copyfile(filename, \"runs/%s-net/\" % (args.name) + \"model_epoch_%d.pth.tar\" % epoch )", "def load_checkpoint(self):\n if self.params.resume_from is not None and os.path.exists(self.params.resume_from):\n try:\n LOG('Loading Checkpoint at %s' % self.params.resume_from)\n ckpt = torch.load(self.params.resume_from)\n self.epoch = ckpt['epoch']\n try:\n self.train_loss = ckpt['train_loss']\n self.val_loss = ckpt['val_loss']\n except:\n self.train_loss = []\n self.val_loss = []\n self.network.load_state_dict(ckpt['state_dict'])\n self.opt.load_state_dict(ckpt['optimizer'])\n LOG('Checkpoint Loaded!')\n LOG('Current Epoch: %d' % self.epoch)\n self.ckpt_flag = True\n except:\n WARNING('Cannot load checkpoint from %s. Start loading pre-trained model......' % self.params.resume_from)\n else:\n WARNING('Checkpoint do not exists. Start loading pre-trained model......')", "def load(loadname, checkpoint=None):\n ckpt_dir = \"./models/tf_ckpt_\" + loadname + \"/\"\n if checkpoint is not None:\n status = checkpoint.restore(tf.train.latest_checkpoint(ckpt_dir))\n status.assert_consumed()\n print(\"Loaded checkpoint\")\n else:\n print(\"Not Loading any checkpoint\")\n print(\"Starting training from initial configuration\")" ]
[ "0.7120378", "0.6994583", "0.6789564", "0.6231906", "0.57427275", "0.57129097", "0.5706359", "0.56981295", "0.5689351", "0.5677075", "0.5628234", "0.5567891", "0.5557613", "0.550662", "0.54966205", "0.54919827", "0.54845047", "0.54744905", "0.54601556", "0.5449334", "0.54462993", "0.5437544", "0.5433307", "0.5429041", "0.5415703", "0.5407638", "0.5404293", "0.54034865", "0.538243", "0.5379934" ]
0.90047246
0
load_svg(filename, size) > pygame.Surface() Loads the SVG graphic pointed at by filename rendered at the given size into a pygame surface
def load_svg(filename, size): try: import rsvg, cairo, array, cStringIO os.stat(filename) except (ImportError, OSError): return pygame.Surface((0,0)) width, height = size csurface = cairo.ImageSurface(cairo.FORMAT_ARGB32, width, height) context = cairo.Context(csurface) svg = rsvg.Handle(file=filename) ssize = svg.get_dimension_data() context.set_matrix(cairo.Matrix(width/ssize[2], 0, 0, height/ssize[3], 0, 0)) svg.render_cairo(context) f = cStringIO.StringIO() csurface.write_to_png(f) f.seek(0) return pygame.image.load(f, 'temp.png').convert_alpha()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def openSVG(path):\n from xml.dom import minidom\n doc = minidom.parse(open(path))\n svg = doc.getElementsByTagName(\"svg\")[0]\n sizeMatch = re.match(r\"(\\d+) (\\d+) (\\d+) (\\d+)\", svg.getAttribute(\"viewBox\"))\n w, h = int(sizeMatch.group(3)), int(sizeMatch.group(4))\n return svg, w, h", "def load_svg(file_path):\n assert os.path.exists(file_path)\n doc = parse(file_path)\n\n svg = doc.getElementsByTagName('svg')[0]\n svg_attributes = dom2dict(svg)\n\n defs = g = ''\n for i, tag in enumerate(svg.childNodes):\n if tag.localName == 'defs':\n defs = tag.toxml()\n if tag.localName == 'g':\n g = tag.toxml()\n\n doc.unlink()\n\n return defs, g, svg_attributes", "def load(filename, imageprops):\n with gzip.open(filename, 'rb') as f:\n file_content = f.read()\n return parse_svg.parse_svg_string(file_content, imageprops, \"en\")", "def loadImgDrawing(self, target, name, fileName, textureSize = None):\r\n fileName = self.resource.fileName(fileName)\r\n if os.path.exists(fileName):\r\n drawing = self.resource.load(target, name, lambda: ImgDrawing(self.svg, fileName), synch = True)\r\n if textureSize:\r\n drawing.convertToTexture(textureSize[0], textureSize[1])\r\n else:\r\n raise IOError(fileName)\r\n return drawing", "def render_svg(svg):\n b64 = base64.b64encode(svg.encode('utf-8')).decode(\"utf-8\")\n html = r'<img src=\"data:image/svg+xml;base64,%s\"/>' % b64\n st.write(html, unsafe_allow_html=True)", "def getSvgHtml(svgFile, width, height):\n html = '<object type=\"image/svg+xml\" data=\"%s\" width=\"%s\" height=\"%s\"/>'\n return html % (svgFile, width, height)", "def create_svg(svg_tag, img_width, img_height, out_path):\n script_dir = utils.get_script_dir()\n svg_template_path = utils.join_paths_str(script_dir, \"./templates/template.svg\")\n with open(svg_template_path, \"rt\") as fin:\n with open(out_path, \"wt\") as fout:\n for line in fin:\n fout.write(\n line.replace(\"INSERT_WIDTH\", str(img_width))\n .replace(\"INSERT_HEIGHT\", str(img_height))\n .replace(\"INSERT_OBJECT\", svg_tag)\n )", "def sauver_svg(pcanvas,pgrille,pliste_cubes,pcube_visu):\n global grille\n grille = pgrille\n\n fichier = filedialog.asksaveasfilename(\n defaultextension=\".svg\",\n filetypes=((\"SVG files\", \".svg\"),(\"All files\", \".*\")))\n if not fichier:\n return # l'utilisateur a annule ou ferme la fenetre\n try:\n f = open(fichier, \"w\", encoding = \"utf-8\")\n except FileNotFoundError:\n messagebox.showerror(title=\"Error\",\n message=\"Erreur fichier non trouvé\")\n except IOError:\n messagebox.showerror(title=\"Error\",\n message=\"Le fichier n'existe pas\")\n else:\n # Ecriture du header xml, puis d'une viewbox,\n # qui est en realite comme notre canvas\n f.write(\"<?xml version=\\\"1.0\\\" encoding=\\\"UTF-8\\\"?>\"\n + \"<!DOCTYPE svg PUBLIC \\\"-//W3C//DTD SVG 1.1//EN\\\" \"\n + \"\\\"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd\\\">\\n\")\n f.write(\"<svg viewBox=\" + \"\\\"0 0 \"\n + str(pcanvas.cget(\"width\")) + \" \"\n + str(pcanvas.cget(\"height\")) + \"\\\" \"\n + \"xmlns=\\\"http://www.w3.org/2000/svg\\\">\\n\")\n _dessiner_grille_svg(f)\n \n # pas optimise, mais facultatif\n \n liste = pcanvas.find_all()[grille.taille_x+grille.taille_y+3:]\n # a partir du nb de lignes+1 jusqu'a la fin : les faces des cubes\n # note : les id commencent a 1\n \n for i in range(0,len(liste),3):\n if liste[i] != pcube_visu.id:\n # on a un id de cube, il nous faut l'objet pour ses coordonnees\n for c in pliste_cubes:\n if c.id == liste[i]:\n cube = c\n break\n # cube est le cube correspondant a l'id i\n coords2D = grille.canvas_to_grille(cube.coords)\n _dessiner_cube_svg(coords2D,f,cube.h,cube.couleur)\n f.write(\"</svg>\")\n f.close()", "def loadImage(self, file_name):\n self.surf = pygame.image.load(file_name)\n self.draw_angle = 0 # In degrees\n self.bullets = []", "def simplestExample():\n\n my_svg = drawSVG.SVG()\n return my_svg", "def json2svg(json_f, path_out, pathway_iri, wp_id, pathway_version, theme):\n\n dir_out = path.dirname(path_out)\n # example base_out: 'WP4542.svg'\n base_out = path.basename(path_out)\n [stub_out, ext_out_with_dot] = path.splitext(base_out)\n\n pvjs_cmd = f\"pvjs --theme {theme}\"\n with open(json_f, \"r\") as f_in:\n with open(path_out, \"w\") as f_out:\n pvjs_ps = subprocess.Popen(\n shlex.split(pvjs_cmd), stdin=f_in, stdout=f_out, shell=False\n )\n pvjs_ps.communicate()[0]\n\n tree = ET.parse(path_out, parser=parser)\n root = tree.getroot()\n\n #############################\n # SVG > .svg\n #############################\n\n # TODO: make the stand-alone SVGs work for upload to WM Commons:\n # https://www.mediawiki.org/wiki/Manual:Coding_conventions/SVG\n # https://commons.wikimedia.org/wiki/Help:SVG\n # https://commons.wikimedia.org/wiki/Commons:Commons_SVG_Checker?withJS=MediaWiki:CommonsSvgChecker.js\n # W3 validator: http://validator.w3.org/#validate_by_upload+with_options\n\n # WM says: \"the recommended image height is around 400–600 pixels. When a\n # user views the full size image, a width of 600–800 pixels gives\n # them a good close-up view\"\n # https://commons.wikimedia.org/wiki/Help:SVG#Frequently_asked_questions\n root.set(\"width\", \"800px\")\n root.set(\"height\", \"600px\")\n\n # TODO: verify that all of the following cases are now correctly handled in pvjs\n for style_el in root.findall(\".//style\"):\n if not style_el.text == \"\":\n raise Exception(\"Expected empty style sheets.\")\n for el in root.findall(\".//pattern[@id='PatternQ47512']\"):\n raise Exception(\"Unexpected pattern.\")\n\n edge_warning_sent = False\n for el in root.xpath(\n \".//svg:g/svg:g[contains(@class,'Edge')]/svg:g\", namespaces=SVG_NS\n ):\n if not edge_warning_sent:\n print(\"TODO: update pvjs to avoid having nested g elements for edges.\")\n edge_warning_sent = True\n # raise Exception(\"Unexpected nested g element for edge.\")\n\n for el in root.xpath(\n \"/svg:svg/svg:g/svg:g[contains(@class,'Edge')]/svg:path/@style\",\n namespaces=SVG_NS,\n ):\n raise Exception(\n \"Unexpected style attribute on path element for edge.\", namespaces=SVG_NS\n )\n\n for el in root.xpath(\n \"/svg:svg/svg:defs/svg:g[@id='jic-defs']/svg:svg/svg:defs\", namespaces=SVG_NS\n ):\n raise Exception(\"Unexpected nested svg for defs.\")\n\n for el in root.findall(\".//defs/g[@id='jic-defs']/svg/defs\"):\n raise Exception(\"Unexpected nested svg for defs.\")\n\n for el in root.xpath(\n \".//svg:g/svg:g[contains(@class,'Edge')]/svg:path/@style\", namespaces=SVG_NS\n ):\n raise Exception(\"Unexpected style attribute on path element for edge.\")\n\n # TODO: should any of this be in pvjs instead?\n style_selector = (\n \"[@style='color:inherit;fill:inherit;fill-opacity:inherit;stroke:inherit;stroke-width:inherit']\"\n )\n for el_parent in root.findall(f\".//*{style_selector}/..\"):\n stroke_width = el_parent.attrib.get(\"stroke-width\", 1)\n for el in root.findall(f\".//*{style_selector}\"):\n el.set(\n \"style\",\n f\"color:inherit;fill:inherit;fill-opacity:inherit;stroke:inherit;stroke-width:{str(stroke_width)}\",\n )\n\n for el in root.findall(\".//*[@filter='url(#kaavioblackto000000filter)']\"):\n el.attrib.pop(\"filter\", None)\n\n for image_parent in root.findall(\".//*image/..\"):\n images = image_parent.findall(\"image\")\n for image in images:\n image_parent.remove(image)\n\n # TODO: do the attributes \"filter\" \"fill\" \"fill-opacity\" \"stroke\" \"stroke-dasharray\" \"stroke-width\"\n # on the top-level g element apply to the g elements for edges?\n\n # TODO: do the attributes \"color\" \"fill\" \"fill-opacity\" \"stroke\" \"stroke-dasharray\" \"stroke-width\"\n # on the top-level g element apply to the path elements for edges?\n\n # TODO: Which of the following is correct?\n # To make the SVG file independent of Arial, change all occurrences of\n # font-family: Arial to font-family: 'Liberation Sans', Arial, sans-serif\n # https://commons.wikimedia.org/wiki/Help:SVG#fallback\n # vs.\n # Phab:T64987, Phab:T184369, Gnome #95; font-family=\"'font name'\"\n # (internally quoted font family name) does not work\n # (File:Mathematical_implication_diagram-alt.svg, File:T184369.svg)\n # https://commons.wikimedia.org/wiki/Commons:Commons_SVG_Checker?withJS=MediaWiki:CommonsSvgChecker.js\n\n # Liberation Sans is the open replacement for Arial, but its kerning\n # has some issues, at least as processed by librsvg.\n # An alternative that is also supported MW is DejaVu Sans. Using\n # transform=\"scale(0.92,0.98)\"\n # might yield better kerning and take up about the same amount of space.\n\n # Long-term, should we switch our default font from Arial to something prettier?\n # It would have to be a well-supported font.\n # This page <https://commons.wikimedia.org/wiki/Help:SVG#fallback> says:\n # On Commons, librsvg has the fonts listed in:\n # https://meta.wikimedia.org/wiki/SVG_fonts#Latin_(basic)_fonts_comparison\n # ...\n # In graphic illustrations metric exact text elements are often important\n # and Arial can be seen as de-facto standard for such a feature.\n\n for el in root.xpath(\".//*[contains(@font-family,'Arial')]\", namespaces=SVG_NS):\n el.set(\"font-family\", \"'Liberation Sans', Arial, sans-serif\")\n\n # TODO: do we need to specify fill=currentColor for any elements?\n\n for el in root.xpath(\".//svg:defs//svg:marker//*[not(@fill)]\", namespaces=SVG_NS):\n el.set(\"fill\", \"currentColor\")\n\n for el in root.xpath(\".//svg:text[@stroke-width='0.05px']\", namespaces=SVG_NS):\n el.attrib.pop(\"stroke-width\", None)\n\n for el in root.xpath(\".//svg:text[@overflow]\", namespaces=SVG_NS):\n el.attrib.pop(\"overflow\", None)\n\n for el in root.xpath(\".//svg:text[@dominant-baseline]\", namespaces=SVG_NS):\n el.attrib.pop(\"dominant-baseline\", None)\n\n for el in root.xpath(\".//svg:text[@clip-path]\", namespaces=SVG_NS):\n el.attrib.pop(\"clip-path\", None)\n\n FONT_SIZE_RE = re.compile(r\"^([0-9.]*)px$\")\n # TRANSLATE_RE = re.compile(r\"^translate[(]([0-9.]*),([0-9.]*)[)]$\")\n TRANSLATE_RE = re.compile(r\"^translate\\(([0-9.]*),([0-9.]*)\\)$\")\n # We are pushing the text down based on font size.\n # This is needed because librsvg doesn't support attribute \"alignment-baseline\".\n\n for el in root.xpath(\".//svg:text[@font-size]\", namespaces=SVG_NS):\n font_size_full = el.attrib.get(\"font-size\")\n font_size_matches = re.search(FONT_SIZE_RE, font_size_full)\n if font_size_matches:\n font_size = float(font_size_matches.group(1))\n\n if not font_size:\n font_size = 5\n\n x_translation = None\n y_translation = None\n transform_full = el.attrib.get(\"transform\")\n if transform_full:\n translate_matches = re.search(TRANSLATE_RE, transform_full)\n if translate_matches:\n x_translation = float(translate_matches.group(1))\n y_translation_uncorrected = float(translate_matches.group(2))\n\n if not x_translation:\n x_translation = 0\n y_translation_uncorrected = 0\n\n y_translation_corrected = font_size / 3 + y_translation_uncorrected\n el.set(\"transform\", f\"translate({x_translation},{y_translation_corrected})\")\n\n # Add link outs\n WIKIDATA_CLASS_RE = re.compile(\"Wikidata_Q[0-9]+\")\n for el in root.xpath(\".//*[contains(@class,'DataNode')]\", namespaces=SVG_NS):\n wikidata_classes = list(\n filter(WIKIDATA_CLASS_RE.match, el.attrib.get(\"class\").split(\" \"))\n )\n if len(wikidata_classes) > 0:\n # if there are multiple, we just link out to the first\n wikidata_id = wikidata_classes[0].replace(\"Wikidata_\", \"\")\n el.tag = \"{http://www.w3.org/2000/svg}a\"\n # linkout_base = \"https://www.wikidata.org/wiki/\"\n linkout_base = \"https://scholia.toolforge.org/\"\n el.set(\"{http://www.w3.org/1999/xlink}href\", linkout_base + wikidata_id)\n\n # make linkout open in new tab/window\n el.set(\"target\", \"_blank\")\n\n ###########\n # Run SVGO\n ###########\n\n pre_svgo_svg_f = f\"{dir_out}/{stub_out}.pre_svgo.svg\"\n tree.write(pre_svgo_svg_f)\n\n tree.write(path_out)\n args = shlex.split(\n f'svgo --multipass --config \"{SCRIPT_DIR}/svgo-config.json\" {path_out}'\n )\n subprocess.run(args)\n\n #########################################\n # Future enhancements for pretty version\n #########################################\n\n # TODO: convert the following bash code into Python\n\n # Glyphs from reactome\n # TODO: how about using these: https://reactome.org/icon-lib\n # for example, mitochondrion: https://reactome.org/icon-lib?f=cell_elements#Mitochondrion.svg\n # They appear to be CC-4.0, which might mean we can't upload them to WM Commons?\n\n # Glyphs from SMILES\n # metabolite_patterns_css_f = (\n # f\"{dir_out}/{bare_stub_out}.metabolite-patterns-uri.css\"\n # )\n # metabolite_patterns_svg_f = (\n # f\"{dir_out}/{bare_stub_out}.metabolite-patterns-uri.svg\"\n # )\n #\n # if path.exists(metabolite_patterns_svg_f) and path.exists(\n # metabolite_patterns_css_f\n # ):\n # print(\n # f\"{metabolite_patterns_svg_f} & {metabolite_patterns_css_f} already exist. To overwrite, delete them & try again.\"\n # )\n # else:\n # # If only one of them exists, we recreate both\n # if path.exists(metabolite_patterns_svg_f):\n # os.remove(metabolite_patterns_svg_f)\n # elif path.exists(metabolite_patterns_css_f):\n # os.remove(metabolite_patterns_css_f)\n #\n # metabolite_patterns_svg_tree = ET.parse(\n # \"<svg><defs></defs></svg>\", parser=parser\n # )\n # metabolite_patterns_svg_root = metabolite_patterns_svg_tree.getroot()\n #\n # # TODO convert the following sh script to Python\n # \"\"\"\n # jq -r '[.entitiesById[] | select(.type | contains([\"Metabolite\"]))] | unique_by(.type)[] | [.xrefDataSource, .xrefIdentifier, [.type[] | select(startswith(\"wikidata:\"))][0], [.type[] | select(startswith(\"hmdb:\") and length == 14)][0]] | @tsv' \"$json_f\" | \\\n # while IFS=$'\\t' read -r data_source identifier wikidata_id hmdb_id; do\n # wikidata_identifier=$(echo \"$wikidata_id\" | sed 's/wikidata://');\n # bridgedb_request_uri=\"http://webservice.bridgedb.org/Human/attributes/$data_source/$identifier?attrName=SMILES\"\n # if [ -z \"$data_source\" ] || [ -z \"$identifier\" ]; then\n # echo \"Missing Xref data source and/or identifier in $stub_out\";\n # continue;\n # fi\n #\n # smiles=$(curl -Ls \"$bridgedb_request_uri\")\n # bridgedb_request_status=$?\n #\n # if [ \"$bridgedb_request_status\" != 0 ] || [ -z \"$smiles\" ] || [[ \"$smiles\" =~ 'The server has not found anything matching the request URI' ]]; then\n # # if [ \"$bridgedb_request_status\" != 0 ]; then\n # # echo \"Failed to get SMILES string for $stub_out:$data_source:$identifier from $bridgedb_request_uri (status code: $bridgedb_request_status)\";\n # # elif [ -z \"$smiles\" ]; then\n # # echo \"Failed to get SMILES string for $stub_out:$data_source:$identifier from $bridgedb_request_uri (nothing returned)\";\n # # elif [[ \"$smiles\" =~ 'The server has not found anything matching the request URI' ]]; then\n # # echo \"Failed to get SMILES string for $stub_out:$data_source:$identifier from $bridgedb_request_uri\";\n # # echo '(The server has not found anything matching the request URI)'\n # # fi\n #\n # # If the DataSource and Identifier specified don't get us a SMILES string,\n # # it could be because BridgeDb doesn't support queries for that DataSource.\n # # For example, WP396_97382 has a DataNode with PubChem-compound:3081372,\n # # http://webservice.bridgedb.org/Human/attributes/PubChem-compound/3081372?attrName=SMILES\n # # doesn't return anything. However, that DataNode can be mapped to HMDB:HMDB61196, and\n # # the url http://webservice.bridgedb.org/Human/attributes/HMDB/HMDB61196\n # # does return a SMILES string.\n # # Note that BridgeDb currently requires us to use the 5 digit HMDB identifier,\n # # even though there is another format that uses more digits.\n #\n # if [ ! -z \"$hmdb_id\" ]; then\n # hmdb_identifier=\"HMDB\"${hmdb_id:(-5)};\n # bridgedb_request_uri_orig=\"$bridgedb_request_uri\"\n # bridgedb_request_uri=\"http://webservice.bridgedb.org/Human/attributes/HMDB/$hmdb_identifier?attrName=SMILES\"\n # #echo \"Trying alternate bridgedb_request_uri: $bridgedb_request_uri\"\n # smiles=$(curl -Ls \"$bridgedb_request_uri\")\n # bridgedb_request_status=$?\n # if [ \"$bridgedb_request_status\" != 0 ]; then\n # echo \"Failed to get SMILES string for $stub_out:$data_source:$identifier from both $bridgedb_request_uri_orig and alternate $bridgedb_request_uri (status code: $bridgedb_request_status)\";\n # continue;\n # elif [ -z \"$smiles\" ]; then\n # echo \"Failed to get SMILES string for $stub_out:$data_source:$identifier from both $bridgedb_request_uri_orig and alternate $bridgedb_request_uri (nothing returned)\";\n # continue;\n # elif [[ \"$smiles\" =~ 'The server has not found anything matching the request URI' ]]; then\n # echo \"Failed to get SMILES string for $stub_out:$data_source:$identifier from both $bridgedb_request_uri_orig and alternate $bridgedb_request_uri\";\n # echo '(The server has not found anything matching the request URI)'\n # continue;\n # fi\n # else\n # continue;\n # fi\n # fi\n #\n # smiles_url_encoded=$(echo \"$smiles\" | jq -Rr '@uri')\n # cdkdepict_url=\"http://www.simolecule.com/cdkdepict/depict/bow/svg?smi=$smiles_url_encoded&abbr=on&hdisp=bridgehead&showtitle=false&zoom=1.0&annotate=none\"\n #\n # cat >> \"$css_out\" <<EOF\n # [typeof~=\"wikidata:$wikidata_identifier\"]:hover > .Icon {\n # cursor: default;\n # fill: url(#Pattern$wikidata_identifier);\n # transform-box: fill-box;\n # transform: scale(2, 3);\n # transform-origin: 50% 50%;\n # }\n # [typeof~=\"wikidata:$wikidata_identifier\"]:hover > .Text {\n # font-size: 0px;\n # }\n # EOF\n #\n # # TODO: do we want to disable the clip-path on hover?\n # #[typeof~=wikidata:$wikidata_identifier]:hover > .Icon {\n # # clip-path: unset;\n # # rx: unset;\n # # ry: unset;\n # # cursor: default;\n # # fill: url(#Pattern$wikidata_identifier);\n # # transform-box: fill-box;\n # # transform: scale(2, 3);\n # # transform-origin: 50% 50%;\n # #}\n #\n # # \"transform-box: fill-box\" is needed for FF.\n # # https://bugzilla.mozilla.org/show_bug.cgi?id=1209061\n #\n # xmlstarlet ed -L \\\n # -s \"/svg/defs\" -t elem -n \"pattern\" -v \"\" \\\n # --var prevpattern '$prev' \\\n # -s '$prevpattern' -t elem -n \"image\" -v \"\" \\\n # --var previmage '$prev' \\\n # -i '$prevpattern' -t attr -n \"id\" -v \"Pattern$wikidata_identifier\" \\\n # -i '$prevpattern' -t attr -n \"width\" -v \"100%\" \\\n # -i '$prevpattern' -t attr -n \"height\" -v \"100%\" \\\n # -i '$prevpattern' -t attr -n \"patternContentUnits\" -v \"objectBoundingBox\" \\\n # -i '$prevpattern' -t attr -n \"preserveAspectRatio\" -v \"none\" \\\n # -i '$prevpattern' -t attr -n \"viewBox\" -v \"0 0 1 1\" \\\n # -i '$previmage' -t attr -n \"width\" -v \"1\" \\\n # -i '$previmage' -t attr -n \"height\" -v \"1\" \\\n # -i '$previmage' -t attr -n \"href\" -v \"$cdkdepict_url\" \\\n # -i '$previmage' -t attr -n \"preserveAspectRatio\" -v \"none\" \\\n # \"$svg_out\"\n # done\n #\n # sed -i '/<style.*>/{\n # r '\"$metabolite_patterns_css_f\"'\n # }' \"$path_out\"\n #\n # sed -i '/<g id=\"jic-defs\">/{\n # r /dev/stdin\n # }' \"$path_out\" < <(xmlstarlet sel -t -c '/svg/defs/*' \"$metabolite_patterns_svg_f\")\n # \"\"\"", "def __init__(self,file_name):\n \n #Load the graphic\n self.sprite_sheet = pygame.image.load(file_name).convert()", "def image(surface, node):\n url = node.get(\"{http://www.w3.org/1999/xlink}href\")\n if not url:\n return\n if url.startswith(\"data:\"):\n image_bytes = open_data_url(url)\n else:\n base_url = node.get(\"{http://www.w3.org/XML/1998/namespace}base\")\n if base_url:\n url = urlparse.urljoin(base_url, url)\n if node.url:\n url = urlparse.urljoin(node.url, url)\n if urlparse.urlparse(url).scheme:\n input_ = urlopen(url)\n else:\n input_ = open(url, 'rb') # filename\n image_bytes = input_.read()\n\n if len(image_bytes) < 5:\n return\n\n x, y = size(surface, node.get(\"x\"), \"x\"), size(surface, node.get(\"y\"), \"y\")\n width = size(surface, node.get(\"width\"), \"x\")\n height = size(surface, node.get(\"height\"), \"y\")\n surface.context.rectangle(x, y, width, height)\n surface.context.clip()\n\n if image_bytes[:4] == b\"\\x89PNG\":\n png_bytes = image_bytes\n elif image_bytes[:5] in (b\"\\x3csvg \", b\"\\x3c?xml\"):\n surface.context.save()\n surface.context.translate(x, y)\n if \"x\" in node:\n del node[\"x\"]\n if \"y\" in node:\n del node[\"y\"]\n if \"viewBox\" in node:\n del node[\"viewBox\"]\n tree = Tree(url=url, bytestring=image_bytes)\n tree_width, tree_height, viewbox = node_format(surface, tree)\n if not tree_width or not tree_height:\n tree_width = tree[\"width\"] = width\n tree_height = tree[\"height\"] = height\n node.image_width = tree_width or width\n node.image_height = tree_height or height\n scale_x, scale_y, translate_x, translate_y = \\\n preserve_ratio(surface, node)\n surface.set_context_size(*node_format(surface, tree))\n surface.context.translate(*surface.context.get_current_point())\n surface.context.scale(scale_x, scale_y)\n surface.context.translate(translate_x, translate_y)\n surface.draw(tree)\n surface.context.restore()\n # Restore twice, because draw does not restore at the end of svg tags\n surface.context.restore()\n return\n else:\n try:\n from pystacia import read_blob\n png_bytes = read_blob(image_bytes).get_blob('png')\n except:\n # No way to handle the image\n return\n\n image_surface = cairo.ImageSurface.create_from_png(BytesIO(png_bytes))\n\n node.image_width = image_surface.get_width()\n node.image_height = image_surface.get_height()\n scale_x, scale_y, translate_x, translate_y = preserve_ratio(surface, node)\n\n surface.context.rectangle(x, y, width, height)\n pattern_pattern = cairo.SurfacePattern(image_surface)\n surface.context.save()\n surface.context.translate(*surface.context.get_current_point())\n surface.context.scale(scale_x, scale_y)\n surface.context.translate(translate_x, translate_y)\n surface.context.set_source(pattern_pattern)\n surface.context.fill()\n surface.context.restore()", "def read_svg(svg_string, workspace, tolerance, forced_dpi=None, optimize=True):\n svgReader = SVGReader(tolerance, workspace)\n res = svgReader.parse(svg_string, forced_dpi)\n # {'boundarys':b, 'dpi':d, 'lasertags':l, 'rasters':r}\n\n # create an dba job from res\n # TODO: reader should generate an dba job to begin with\n job = {'head':{}, 'passes':[], 'items':[], 'defs':[]}\n if 'rasters' in res:\n for raster in res['rasters']:\n job['defs'].append({\"kind\":\"image\",\n \"data\":raster['data'] ,\n \"pos\":raster['pos'] ,\n \"size\": raster['size']})\n job['items'].append({\"def\":len(job['defs'])-1})\n\n if 'boundarys' in res:\n if 'dpi' in res:\n job['head']['dpi'] = res['dpi']\n for color,path in res['boundarys'].items():\n if optimize:\n pathoptimizer.optimize(path, tolerance)\n job['defs'].append({\"kind\":\"path\",\n \"data\":path})\n job['items'].append({\"def\":len(job['defs'])-1, \"color\":color})\n if optimize:\n job['head']['optimized'] = tolerance\n\n if 'lasertags' in res:\n # format: [('12', '2550', '', '100', '%', ':#fff000', ':#ababab', ':#ccc999', '', '', '')]\n # sort lasertags by pass number\n # def _cmp(a, b):\n # if a[0] < b[0]: return -1\n # elif a[0] > b[0]: return 1\n # else: return 0\n res['lasertags'].sort()\n # add tags ass passes\n for tag in res['lasertags']:\n if len(tag) == 11:\n idxs = []\n for colidx in range(5,10):\n color = tag[colidx]\n i = 0\n for item in job['items']:\n if 'color' in item and item['color'] == color:\n idxs.append(i)\n i += 1\n if \"passes\" not in job:\n job[\"passes\"] = []\n job[\"passes\"].append({\n \"items\": idxs,\n \"feedrate\": tag[1],\n \"intensity\": tag[3]\n })\n return job", "def send_svg():\n state = request.get_json()\n path = os.path.dirname(__file__).replace('core', 'resources/tmp')\n filename = path + \"/\" + now_date(str=True) + \"-roast.png\"\n cairosvg.svg2png(bytestring=state['svg'], write_to=filename)\n return jsonify({'success': True})", "def load_image(filename):\r\n image = pygame.image.load(filename)\r\n image = image.convert_alpha()\r\n return image, image.get_rect()", "def save_svg(string, file_name):\n file_handle = file(file_name, \"w\")\n file_handle.write(string)\n file_handle.close()", "def write_svg(\n self,\n outfile,\n scaling=10,\n style=None,\n fontstyle=None,\n background=\"#222\",\n pad=\"5%\",\n precision=None,\n ):\n bb = self.get_bounding_box()\n if bb is None:\n return\n close = True\n if hasattr(outfile, \"__fspath__\"):\n outfile = open(outfile.__fspath__(), \"w\")\n elif isinstance(outfile, (basestring, Path)):\n outfile = open(outfile, \"w\")\n else:\n close = False\n if style is None:\n style = {}\n if fontstyle is None:\n fontstyle = {}\n bb *= scaling\n x = bb[0, 0]\n y = -bb[1, 1]\n w = bb[1, 0] - bb[0, 0]\n h = bb[1, 1] - bb[0, 1]\n if background is not None:\n if isinstance(pad, basestring):\n if pad[-1] == \"%\":\n pad = max(w, h) * float(pad[:-1]) / 100\n else:\n pad = float(pad)\n x -= pad\n y -= pad\n w += 2 * pad\n h += 2 * pad\n outfile.write(\n \"\"\"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<svg xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\"\n width=\"{}\" height=\"{}\" viewBox=\"{} {} {} {}\">\n<defs>\n<style type=\"text/css\">\n\"\"\".format(\n numpy.format_float_positional(w, trim=\"0\", precision=precision),\n numpy.format_float_positional(h, trim=\"0\", precision=precision),\n numpy.format_float_positional(x, trim=\"0\", precision=precision),\n numpy.format_float_positional(y, trim=\"0\", precision=precision),\n numpy.format_float_positional(w, trim=\"0\", precision=precision),\n numpy.format_float_positional(h, trim=\"0\", precision=precision),\n )\n )\n ldkeys, ltkeys = self.get_svg_classes()\n for k in ldkeys:\n l, d = k\n if k in style:\n style_dict = style[k]\n else:\n c = \"rgb({}, {}, {})\".format(\n *[\n int(255 * c + 0.5)\n for c in colorsys.hsv_to_rgb(\n (l % 3) / 3.0 + (l % 6 // 3) / 6.0 + (l // 6) / 11.0,\n 1 - ((l + d) % 8) / 12.0,\n 1 - (d % 3) / 4.0,\n )\n ]\n )\n style_dict = {\"stroke\": c, \"fill\": c, \"fill-opacity\": \"0.5\"}\n outfile.write(\".l{}d{} {{\".format(l, d))\n outfile.write(\" \".join(\"{}: {};\".format(*x) for x in style_dict.items()))\n outfile.write(\"}\\n\")\n for k in ltkeys:\n l, t = k\n if k in fontstyle:\n style_dict = fontstyle[k]\n else:\n c = \"rgb({}, {}, {})\".format(\n *[\n int(255 * c + 0.5)\n for c in colorsys.hsv_to_rgb(\n (l % 3) / 3.0 + (l % 6 // 3) / 6.0 + (l // 6) / 11.0,\n 1 - ((l + t) % 8) / 12.0,\n 1 - (t % 3) / 4.0,\n )\n ]\n )\n style_dict = {\"stroke\": \"none\", \"fill\": c}\n outfile.write(\".l{}t{} {{\".format(l, t))\n outfile.write(\" \".join(\"{}: {};\".format(*x) for x in style_dict.items()))\n outfile.write(\"}\\n\")\n outfile.write(\"</style>\\n\")\n for cell in self.get_dependencies(True):\n cell.to_svg(outfile, scaling, precision, \"\")\n outfile.write(\"</defs>\\n\")\n if background is not None:\n outfile.write(\n '<rect x=\"{}\" y=\"{}\" width=\"{}\" height=\"{}\" fill=\"{}\" stroke=\"none\"/>\\n'.format(\n numpy.format_float_positional(x, trim=\"0\", precision=precision),\n numpy.format_float_positional(y, trim=\"0\", precision=precision),\n numpy.format_float_positional(w, trim=\"0\", precision=precision),\n numpy.format_float_positional(h, trim=\"0\", precision=precision),\n background,\n )\n )\n self.to_svg(outfile, scaling, precision, 'transform=\"scale(1 -1)\"')\n outfile.write(\"</svg>\")\n if close:\n outfile.close()", "def loadimg(filename, rect=False):\n filename = os.path.join('data', filename)\n try:\n img = pygame.image.load(filename)\n if img.get_alpha is None:\n img = img.convert()\n else:\n img = img.convert_alpha()\n except pygame.error, message:\n print \"Impossible de charger l'image : \", filename\n raise SystemExit, message\n if rect:\n return img, img.get_rect()\n else:\n return img", "def _check_svg_file(svg_file):\n if isinstance(svg_file, str):\n try:\n svg = sg.fromfile(svg_file)\n except Exception as exc:\n raise Exception('Error reading svg file {}.'.format(svg_file)) from exc\n else:\n return svg\n\n if isinstance(svg_file, sg.SVGFigure):\n return svg_file\n\n raise ValueError('Expected `svg_file` to be `str` or `svgutils.SVG`, got {}.'.format(type(svg_file)))", "def load_tile(path, tile_size):\n img = pyglet.resource.image(path)\n img.width = tile_size\n img.height = tile_size\n return img", "def loadCustomFont(path,name,size):\n\n fullname = os.path.join(path,name)\n f = pygame.font.Font(fullname,size)\n return f", "def load_png(name):\n fullname = os.path.join('data', name)\n try:\n image = pygame.image.load(fullname)\n if image.get_alpha is None:\n image = image.convert()\n else:\n image = image.convert_alpha()\n except pygame.error, message:\n print 'Cannot load image:', fullname\n raise SystemExit, message\n return image, image.get_rect()", "def draw(self, stats=[]):\n clear_output(wait=True)\n svg_html = self.to_html(stats)\n display(svg_html)", "def show_svg(tmp_path = DEFAULT_PATH): \n global show_counter\n file_name = tmp_path + \"show_tmp_file_{}.svg\".format(show_counter)\n plt.savefig(file_name)\n os.system(\"open {}\".format(file_name))\n show_counter += 1\n plt.close()", "def image_svg():\n data = resource(\"images/svg_logo.svg\")\n return Response(data, headers={\"Content-Type\": \"image/svg+xml\"})", "def cairo_surface(filename, width, height):\n surface = cairo.PDFSurface(filename, width, height)\n return surface", "def load_img(name):\n\tpath = os.path.join(IMG_DIR, name)\n\timage = pygame.image.load(path)\n\tif image.get_alpha is None:\n\t\timage = image.convert()\n\telse:\n\t\timage = image.convert_alpha()\n\timage_rect = image.get_rect()\n\treturn image, image_rect", "def load_image(self, filename):\n return pygame.image.load(os.path.join('images', filename))", "def _load(self):\r\n\t\t\r\n\t\tself.image.blit(self.sheet.sheet, (0,0), (self.x, self.y, self.size, self.size))" ]
[ "0.6418509", "0.618921", "0.61643314", "0.58858806", "0.5492147", "0.5443901", "0.54266787", "0.53245497", "0.5302827", "0.5243275", "0.5228394", "0.5216369", "0.520375", "0.5180269", "0.5172298", "0.5142751", "0.513323", "0.51182353", "0.5066457", "0.5053668", "0.504966", "0.5044753", "0.5041556", "0.5016602", "0.5015532", "0.50007105", "0.49634996", "0.49575508", "0.49489808", "0.49156073" ]
0.8696741
0
Screen.load_fonts() load all required fonts Loads all fonts defined in Screen.fonts by name and size as pygame.Font object.
def load_fonts(self): for key, font in enumerate(self.fonts): self.fonts[key]['font'] = load_font(font['name'], font['size']) checkpoint('fonts')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_fonts():\r\n return pygame.font.get_fonts()", "def loadCustomFont(path,name,size):\n\n fullname = os.path.join(path,name)\n f = pygame.font.Font(fullname,size)\n return f", "def set_fonts(cls, fonts={}):\n for font in fonts:\n if font not in cls._fonts:\n cls._fonts[font] = _Font()\n cls._fonts[font].replace(cls._create_font(fonts[font], 16))\n\n if not cls._fonts[\"widget\"]:\n cls._fonts[\"widget\"].replace(cls._create_font(\"Arial\", 16))\n if not cls._fonts[\"title\"]:\n name = fonts[\"widget\"] if (\"widget\" in fonts) else \"Arial\"\n cls._fonts[\"title\"].replace(cls._create_font(name, 30))\n if not cls._fonts[\"mono\"]:\n cls._fonts[\"mono\"].replace(cls._create_font(\n \"Ubuntu Mono, FreeMono, Monospace\", 16))\n\n #if SCREEN._opengl:\n # cls.mono_w = cls[\"mono\"].font.Advance(\"e\")\n #else:\n cls.mono_w = cls[\"mono\"].render(\"e\", False, (0,0,0)).get_width()", "def loadSystemFont(name, size):\n\n try:\n f = pygame.font.SysFont(name,size)\n except error, message:\n print \"Cannot load font: \", name\n raise SystemExit, message\n return f", "def load_font(fontname, fontsize):\n # system fonts\n if pygame.font.get_fonts().count(fontname) == 1:\n return pygame.font.SysFont(fontname, fontsize)\n # standard MS fonts\n if os.path.exists('/usr/share/fonts/truetype/msttcorefonts/'+fontname+'.ttf'):\n return pygame.font.Font('/usr/share/fonts/truetype/msttcorefonts/'+fontname+'.ttf', fontsize)\n # search /usr/share/fonts/\n for root, dirs, files in os.walk('/usr/share/fonts'):\n if fontname+'.ttf' in files:\n return pygame.font.Font(os.path.join(root, fontname+'.ttf'), fontsize)\n # search in working dir\n if os.exists('./'+fontname+'.ttf'):\n return pygame.font.Font(fontname+'.ttf', fontsize)\n # last resort: return default font\n return pygame.font.Font(None, fontsize)", "def load_font(fontSize):\n f1='/usr/share/fonts/corefonts/arialbd.ttf' \n f2='/usr/share/fonts/truetype/msttcorefonts/Arial_Bold.ttf'\n if os.path.isfile(f1): font=ImageFont.truetype(f1,fontSize)\n if os.path.isfile(f2): font=ImageFont.truetype(f2,fontSize)\n return font", "def getAvailableFonts():\n return list(AVAILABLE_FONTS)", "def load_all_fonts(directory, accept=(\".ttf\",)):\n return load_all_music(directory, accept)", "def load_all_fonts(directory, accept=(\".ttf\",)):\n return load_all_music(directory, accept)", "def get_fonts():\n\n fontpath = \"/usr/share/fonts/truetype/freefont/\"\n font1 = \"FreeSansBold.ttf\"\n pfont = {}\n pfont['big'] = ImageFont.truetype(fontpath + font1, 120)\n pfont['medium'] = ImageFont.truetype(fontpath + font1, 70)\n pfont['small'] = ImageFont.truetype(fontpath + font1, 25)\n pfont['time'] = ImageFont.truetype(fontpath + font1, 160)\n \n return pfont", "def initialize():\n #carga las fuente del usuario\n for family in USER_FONTS:\n for font in USER_FONTS[family]:\n name, path = USER_FONTS[family][font]\n pdfmetrics.registerFont(TTFont(name, path))", "def generate_fonts_doc() -> None:\n text = 'pygame menu'\n save_font_image(pygame_menu.font.FONT_8BIT, text, '_static/font_8bit.png')\n save_font_image(pygame_menu.font.FONT_BEBAS, text, '_static/font_bebas.png')\n save_font_image(pygame_menu.font.FONT_COMIC_NEUE, text, '_static/font_comic_neue.png')\n save_font_image(pygame_menu.font.FONT_DIGITAL, text, '_static/font_digital.png')\n save_font_image(pygame_menu.font.FONT_FIRACODE, text, '_static/font_firacode.png')\n save_font_image(pygame_menu.font.FONT_FIRACODE_BOLD, text, '_static/font_firacode_bold.png')\n save_font_image(pygame_menu.font.FONT_FIRACODE_BOLD_ITALIC, text, '_static/font_firacode_bold_italic.png')\n save_font_image(pygame_menu.font.FONT_FIRACODE_ITALIC, text, '_static/font_firacode_italic.png')\n save_font_image(pygame_menu.font.FONT_FRANCHISE, text, '_static/font_franchise.png')\n save_font_image(pygame_menu.font.FONT_HELVETICA, text, '_static/font_helvetica.png')\n save_font_image(pygame_menu.font.FONT_MUNRO, text, '_static/font_munro.png')\n save_font_image(pygame_menu.font.FONT_NEVIS, text, '_static/font_nevis.png')\n save_font_image(pygame_menu.font.FONT_OPEN_SANS, text, '_static/font_open_sans.png')\n save_font_image(pygame_menu.font.FONT_OPEN_SANS_BOLD, text, '_static/font_open_sans_bold.png')\n save_font_image(pygame_menu.font.FONT_OPEN_SANS_ITALIC, text, '_static/font_open_sans_italic.png')\n save_font_image(pygame_menu.font.FONT_OPEN_SANS_LIGHT, text, '_static/font_open_sans_light.png')\n save_font_image(pygame_menu.font.FONT_PT_SERIF, text, '_static/font_pt_serif.png')", "def loadDefaultFont(size):\n\n try:\n f = pygame.font.Font(None,size)\n except error, message:\n print \"Cannot load the default font\"\n raise SystemExit, message\n return f", "def font(size=20, name=None):\n name = name or \"regular\"\n path = ROOT_DIR / \"wclib\" / \"assets\" / (name + \".ttf\")\n return pygame.font.Font(path, size)", "def _load_font(file: str) -> None:\n\n pyglet.font.add_file(Config.RES_DIR + \"font\" + Config.FILE_SEPARATOR + file)\n pyglet.font.load(\"Munro\")", "def load_font(self, file):\n self.font = []\n with open(file, 'rb') as f:\n while True:\n buf = f.read(FONT_HEIGHT)\n if not buf:\n break\n self.font.append(buf)", "def system_font_demo(my_canvas, fonts):\n pos_y = 750\n for font in fonts:\n try:\n ttf = TTFont(font, fonts[font])\n except:\n # Skip this font\n continue\n\n pdfmetrics.registerFont(ttf)\n\n my_canvas.setFont(font, 12)\n my_canvas.drawString(30, pos_y, font)\n pos_y -= 10\n if pos_y < 40:\n my_canvas.showPage()\n pos_y = 750", "def getfonts(self):\n return self.vffile.getfonts()", "def available_text_fonts():\n bad = [u'acalc',\n u'acb',\n u'aco',\n u'acp']\n all = available_fonts()\n fonts = []\n for f in all:\n if (f == u'Series 60 ZDigi'):\n continue\n for b in bad:\n try:\n if (f.lower().startswith(b) and f[len(b)].isdigit()):\n break\n except IndexError:\n pass\n else:\n fonts.append(f)\n\n\n\n def compare(a, b):\n return -(a.lower() < b.lower())\n\n\n fonts.sort(compare)\n return fonts", "def get_fonts(self):\n\n font_path = self.execute_shell([\"figlet\", \"-I2\"])\n\n # get the font files installed in font_path,\n # and clean them up for printing\n fonts = [os.path.split(x)[1].split(\".\")[0] \\\n for x in self.execute_shell([\"find\",\n font_path, \"-iname\", \"*.flf\"]).split(\"\\n\")]\n\n return fonts", "def _create_font(cls, font, size):\n if font[-4:] in (\".ttf\", \".otf\"):\n return pygame.font.Font(font, size)\n else:\n return pygame.font.SysFont(font, size)", "def get_fonts():\n fonts = [f.name for f in matplotlib.font_manager.fontManager.ttflist]\n fonts.append([f.name for f in matplotlib.font_manager.fontManager.afmlist])\n\n fonts = sorted(list(set(fonts[:-1])))\n\n return fonts", "def get_fonts(folder=None):\n fonts = {}\n if folder:\n cmd = ['fc-scan', '--format', '\"%{file}:%{family}:style=%{style}\\n\"', folder]\n else:\n cmd = ['fc-list', ':', 'file', 'family', 'style']\n for line in subprocess.check_output(cmd).decode('utf-8').split(\"\\n\"):\n logger.debug(line)\n line.strip()\n if not line: continue\n if 'otf' not in line and 'ttf' not in line: continue\n parts = line.split(':')\n path = parts[0]\n families = parts[1].strip().split(',')\n styles = parts[2].split('=')[1].split(',')\n if len(families) == 1 and len(styles) > 1:\n families = [families[0]] * len(styles)\n elif len(families) > 1 and len(styles) == 1:\n styles = [styles[0]] * len(families)\n if len(families) != len(styles):\n logger.debug(\"Problem with this font: \" + line)\n continue\n for i in range(len(families)):\n try: fonts[families[i]]\n except: fonts[families[i]] = dict()\n fonts[families[i]][styles[i]] = path\n logger.debug(\"Added this font: \" + str((families[i], styles[i], path)))\n return fonts", "def process_fonts():\n fonts_path = os.path.join(settings.BASE_DIR, 'themes/CMESH/assets/fonts/')\n static_fonts = os.path.join(settings.BASE_DIR, 'static/CMESH/fonts/')\n\n copy_files(fonts_path, static_fonts)", "def load_font(self, path: str, font_family: str, mode: str='n') -> None:\n font = PDFTrueTypeFont('F'+str(self.index), path)\n if not font_family in self.fonts:\n self.fonts[font_family] = {'n': font}\n self.fonts[font_family][mode] = font\n self.index += 1", "def CreateFont(name, size):\r\n\ttry:\r\n\t\tf = pygame.font.Font(name, size)\r\n\t\treturn f\r\n\texcept IOError:\r\n\t\treturn pygame.font.SysFont(name, size)", "def fontDialog(*args, FontList: bool=True, scalable: bool=True, **kwargs)->AnyStr:\n pass", "def setHardwareFont():\n dislin.hwfont()", "def get_font_dict(*a, **kw):\n return get_font_dict(*a, **kw)", "def get_font_dict(f):\n return tk_font.Font(font=f).actual()" ]
[ "0.79402465", "0.69677985", "0.6945662", "0.69180137", "0.6783275", "0.66593045", "0.6651047", "0.65976846", "0.65976846", "0.6560783", "0.6481212", "0.6473694", "0.6451046", "0.641963", "0.6212922", "0.6205454", "0.61772597", "0.61706495", "0.6160708", "0.6146837", "0.61234784", "0.60909164", "0.59685194", "0.59653276", "0.5932562", "0.5929475", "0.588632", "0.5876712", "0.58526427", "0.58045226" ]
0.82232374
0
Screen.deactivate_screensaver() deactivate a running screensaver
def deactivate_screensaver(self): # TODO: support xscreensaver and maybe others (kscreensaver?) try: try: self.session_bus = dbus.SessionBus() self.scrsvr = self.session_bus.get_object( 'org.gnome.ScreenSaver', '/org/gnome/ScreenSaver' ) self.scrsvr_cookie = self.scrsvr.Inhibit( 'cmus-status', 'Showing played track info' ) except dbus.exceptions.DBusException: pass except NameError: pass # TODO: doesn't belong here f = file(os.path.expanduser(os.path.join('~', '.cmus', 'inhibit-osd')), 'w') f.close() checkpoint('screensaver')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def activate_screensaver(self):\n # TODO: support xscreensaver and maybe others (kscreensaver?)\n try:\n try:\n self.scrsvr.UnInhibit(self.scrsvr_cookie)\n except dbus.exceptions.DBusException:\n pass\n except (NameError, AttributeError):\n pass", "def quit(self):\n pygame.display.quit()\n os.unlink(os.path.expanduser(os.path.join('~', '.cmus', 'inhibit-osd')))\n if hasattr(self, 'lircsock'):\n pylirc.exit()\n if self.thread and self.queue:\n self.queue.put('quit', False)\n self.queue.join()\n self.activate_screensaver()", "def AutoUnlockScreen(self):\n try:\n adb_unlock_args = _UNLOCK_SCREEN_KEYEVENT % {\n \"adb_bin\": self._adb_command,\n \"device_serial\": self._device_serial}\n subprocess.check_call(adb_unlock_args.split())\n except subprocess.CalledProcessError:\n utils.PrintColorString(\"Failed to unlock screen.\"\n \"(adb_port: %s)\" % self._adb_port,\n utils.TextColors.WARNING)", "def lockScreensaver(display, screensaver=\"xscreensaver\"):\n result = subprocess.run(['tsdiscon.exe', 'console'], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, check=False)\n logger.info(\"Running command {}\".format(result.args))\n for line in iter(result.stdout.splitlines()):\n logger.debug(line)\n for line in iter(result.stderr.splitlines()):\n logger.warning(line)\n # hopefully the user was disconnected.\n return 0", "def deactivate(self):\n self._glir.command('FRAMEBUFFER', self._id, False)", "def deactivate(self):\n self.in_foreground = False\n self.o.noCursor()\n logging.info(\"{} deactivated\".format(self.name))", "def end_screen(win):\n\tpass", "def unlock(self):\n self.shell(\"input keyevent MENU\")\n self.shell(\"input keyevent BACK\")", "def stopComponent(self, opts):\n screen = self.findScreen(opts.verbose)\n\n if screen == None:\n print(\"No screen session named %s to stop\" % self._screenName, file=sys.stderr)\n return False\n\n print(\"Stopping screen instance %s\" % screen)\n\n (autoRestartPid, comm) = execCmd(\"ps --ppid %s -o pid=,comm=\" % screen.split(\".\")[0], opts.verbose).split()\n\n # Any uncaught signal sent to mono (including SIGUSR1) will kill it\n if comm == \"bash\":\n os.kill(int(autoRestartPid), signal.SIGUSR1)\n\n execCmd(\"%s -S %s -p 0 -X stuff quit$(printf \\r)\" % (self._screenPath, screen), opts.verbose)\n\n timeElapsed = 0\n\n while timeElapsed < self._pollingTimeMax:\n time.sleep(self._pollingInterval)\n timeElapsed += self._pollingInterval\n\n screen = self.findScreen(opts.verbose)\n\n if screen == None:\n print(\"Screen instance %s terminated.\" % self._screenName)\n return True\n\n if timeElapsed % self._pollingNotificationInterval == 0:\n print(\"Waited %s seconds for screen named %s to terminate\" % (timeElapsed, self._screenName))\n\n print(\"Screen %s has not terminated after %s seconds. Please investigate.\" % (self._screenName, self._pollingTimeMax), file=sys.stderr)\n return False", "def deactivate(self):\n super(Pixiv_bot, self).deactivate()", "def return_screen_to_normal():\n curses.endwin()", "def close(self):\n if(screen == self):\n screen = None", "def transition_back():\n SCREEN_MANAGER.current = MAIN_SCREEN_NAME", "def Stopped(ss):\n ss.IsRunning = False\n if ss.Win != 0:\n vp = ss.Win.WinViewport2D()\n if ss.ToolBar != 0:\n ss.ToolBar.UpdateActions()\n vp.SetNeedsFullRender()\n ss.UpdateClassView()", "def deactivate(self):\r\n self.activated = False", "def deactivate(self):\n pass", "def deactivate(self):\n pass", "def deactivate(self):\n self.active = False", "def deactivate(self):\n self.active = False", "def deactivate(self):\r\n self.update_enrollment(is_active=False)", "def _turn_off(self):\n self._turn_display('OFF')", "def erase(self):\n command = \"export STLINK_DEVICE=\" + self.stlink.port + \"; st-flash erase\"\n subprocess.run(command, shell=True)\n time.sleep(1)", "def quit_alternate_screen(self) -> None:\n if self._in_alternate_screen:\n stdout = HANDLE(\n self._winapi(windll.kernel32.GetStdHandle, STD_OUTPUT_HANDLE)\n )\n self._winapi(windll.kernel32.SetConsoleActiveScreenBuffer, stdout)\n self._winapi(windll.kernel32.CloseHandle, self.hconsole)\n self.hconsole = stdout\n self._in_alternate_screen = False", "def deactivate():\n deactivate_connection_with_mainloop(get_uuid())", "def stop_monitor(self):\n self._logger.info(\"Stopping monitor...\")\n if self.monitor_lc:\n self.monitor_lc.cancel()\n if self.monitor_process:\n self.monitor_process.terminate()\n os.system(\"pkill -f listenblock\") # To kill the spawned Go run subprocess", "def snap_deactivate(mnode, snapname):\n\n cmd = \"gluster snapshot deactivate %s --mode=script\" % snapname\n return g.run(mnode, cmd)", "def shutdown_capture(self):\n\n if self._hook_screenshot is not None:\n keyboard.unhook(self._hook_screenshot)", "def unregister(self):\r\n self.__screen.unregister_asteroid(self)", "def deactivate(self):\n self._is_active = False", "def deactivate(self):\n if self._parser.env.get('current_program', 0) != 0:\n self._parser.env['current_program'] = 0\n gl.glUseProgram(0)" ]
[ "0.7479362", "0.63613266", "0.63610584", "0.633496", "0.6135051", "0.5999619", "0.59912586", "0.5932093", "0.58503425", "0.5755786", "0.5723827", "0.5719525", "0.57059157", "0.5682629", "0.5667251", "0.5652389", "0.5652389", "0.5630067", "0.5630067", "0.5618864", "0.5607908", "0.55957144", "0.5594603", "0.55722547", "0.5571841", "0.55694723", "0.55268836", "0.55207956", "0.55111325", "0.5510498" ]
0.83224225
0
Screen.activate_screensaver() activate screensaver Reactivates a previously deactivated screensaver.
def activate_screensaver(self): # TODO: support xscreensaver and maybe others (kscreensaver?) try: try: self.scrsvr.UnInhibit(self.scrsvr_cookie) except dbus.exceptions.DBusException: pass except (NameError, AttributeError): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def deactivate_screensaver(self):\n # TODO: support xscreensaver and maybe others (kscreensaver?)\n try:\n try:\n self.session_bus = dbus.SessionBus()\n self.scrsvr = self.session_bus.get_object(\n 'org.gnome.ScreenSaver',\n '/org/gnome/ScreenSaver'\n )\n self.scrsvr_cookie = self.scrsvr.Inhibit(\n 'cmus-status',\n 'Showing played track info'\n )\n except dbus.exceptions.DBusException:\n pass\n except NameError:\n pass\n # TODO: doesn't belong here\n f = file(os.path.expanduser(os.path.join('~', '.cmus', 'inhibit-osd')), 'w')\n f.close()\n checkpoint('screensaver')", "def lockScreensaver(display, screensaver=\"xscreensaver\"):\n result = subprocess.run(['tsdiscon.exe', 'console'], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, check=False)\n logger.info(\"Running command {}\".format(result.args))\n for line in iter(result.stdout.splitlines()):\n logger.debug(line)\n for line in iter(result.stderr.splitlines()):\n logger.warning(line)\n # hopefully the user was disconnected.\n return 0", "def enter_alternate_screen(self) -> None:\n if not self._in_alternate_screen:\n GENERIC_READ = 0x80000000\n GENERIC_WRITE = 0x40000000\n\n # Create a new console buffer and activate that one.\n handle = HANDLE(\n self._winapi(\n windll.kernel32.CreateConsoleScreenBuffer,\n GENERIC_READ | GENERIC_WRITE,\n DWORD(0),\n None,\n DWORD(1),\n None,\n )\n )\n\n self._winapi(windll.kernel32.SetConsoleActiveScreenBuffer, handle)\n self.hconsole = handle\n self._in_alternate_screen = True", "def set_current_screen(self, screen):\n\t\tself.current_screen = screen\n\t\tscreen.screen_manager = self", "def isScreensaverOn(display, screensaver=\"xscreensaver\"):\n # note that because of windows - only if the lockscreen shows no users is it considered as \"on\"\n # if you are in the user selection section, the selected user will be reported as active, even if you haven't logged in with that account!\n try:\n getUserForDisplay(getCurrentDisplay())\n logger.debug(\"We have a user logged on console.\")\n return False\n except Exception as e:\n #no user logged on, we can consider that the screensaver is on\n logger.debug(\"Received exception \"+str(e)+\". Assuming lock screen is on...\")\n return True", "def saveActivate():\n save()\n activate(block=\"true\")", "def _set_SS_State(self,newState):\r\n try:\r\n win32gui.SystemParametersInfo(win32con.SPI_SETSCREENSAVEACTIVE,newState,win32con.SPIF_UPDATEINIFILE)\r\n except:\r\n self.__error = True", "def AutoUnlockScreen(self):\n try:\n adb_unlock_args = _UNLOCK_SCREEN_KEYEVENT % {\n \"adb_bin\": self._adb_command,\n \"device_serial\": self._device_serial}\n subprocess.check_call(adb_unlock_args.split())\n except subprocess.CalledProcessError:\n utils.PrintColorString(\"Failed to unlock screen.\"\n \"(adb_port: %s)\" % self._adb_port,\n utils.TextColors.WARNING)", "def set_keepawake(keep_screen_awake=False):\r\n flags = ES_CONTINUOUS | ES_SYSTEM_REQUIRED\r\n if keep_screen_awake:\r\n flags |= ES_DISPLAY_REQUIRED\r\n\r\n ctypes.windll.kernel32.SetThreadExecutionState(flags)", "def enable_screen_and_show_control_buttons(self):\n event_logger.debug(\"Activating display\")\n rpi_utils.toggle_screen_state(\"on\")\n self.show_control_buttons()", "def _activate(self):\n self.active = True", "def activate(self):\r\n self.update_enrollment(is_active=True)", "def start_system_restore(self):\n confirmation = input(\"Do you want to system restore? (Y or N)\\n\")\n if confirmation in ('Y', 'y'):\n return self.mycam.devicemgmt.StartSystemRestore()\n return None", "def activate(self):\n self.active = True", "def activate(self):\n self.active = True", "def gamemode_startscreen(self) -> None:\n self.__draw_startscreen()", "def setScreenMode(mode='normal'):\n screendict = {'normal':'REVERS', 'black':'NOREV'}\n dislin.scrmod(screendict[mode])", "def activate(self):\n self._is_active = True", "def transition_back():\n SCREEN_MANAGER.current = MAIN_SCREEN_NAME", "def _get_SS_State(self):\r\n try :\r\n state = win32gui.SystemParametersInfo(win32con.SPI_GETSCREENSAVEACTIVE)\r\n return state\r\n except:\r\n self.__error = True\r\n return False", "def init(stdscr):\n # Ensures a clean visual space.\n stdscr.clear()\n curses.curs_set(False)\n\n # Set the background of the app to the secondary color.\n stdscr.bkgd(' ', curses.color_pair(1))\n stdscr.refresh()", "def set_lock_in_full_screen_mode(self, bValue):\n\t\tcall_sdk_function('PrlVmCfg_SetLockInFullScreenMode', self.handle, bValue)", "def early_interact():\n\n global fullscreen\n global current_movie\n\n fullscreen = True\n current_movie = None", "def do_activate(self, *args, **kwargs):\n self.register_signals()\n self.perform_setup()\n assert self.main_window\n self.main_window.show()\n self.hold()", "def change_focus(window):\n set_active_window_checked(window).check()\n sleep(0.01)", "def lib_screen(self, lib_screen):\n self.logger.debug(\"In 'lib_screen' setter.\")\n\n self._lib_screen = lib_screen", "def screen_open(self, sname=None):\n if sname:\n xtitle = sname\n else:\n xtitle = 'msh_' + ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(8))\n\n self.run(\"screen -S %s\" % (xtitle))\n sret = self.run(\"echo $STY\")\n\n return sret", "def SetLockScreenSettings(device):\n if device.build_type not in _COMPATIBLE_BUILD_TYPES:\n logging.warning('Unable to disable lockscreen on %s builds.',\n device.build_type)\n return\n\n def get_lock_settings(table):\n return [(table, 'lockscreen.disabled', '1'),\n (table, 'lockscreen.password_type', PASSWORD_QUALITY_UNSPECIFIED),\n (table, 'lockscreen.password_type_alternate',\n PASSWORD_QUALITY_UNSPECIFIED)]\n\n if device.FileExists(_LOCK_SCREEN_SETTINGS_PATH):\n db = _LOCK_SCREEN_SETTINGS_PATH\n locksettings = get_lock_settings('locksettings')\n columns = ['name', 'user', 'value']\n generate_values = lambda k, v: [k, '0', v]\n elif device.FileExists(_ALTERNATE_LOCK_SCREEN_SETTINGS_PATH):\n db = _ALTERNATE_LOCK_SCREEN_SETTINGS_PATH\n locksettings = get_lock_settings('secure') + get_lock_settings('system')\n columns = ['name', 'value']\n generate_values = lambda k, v: [k, v]\n else:\n logging.warning('Unable to find database file to set lock screen settings.')\n return\n\n for table, key, value in locksettings:\n \n values = generate_values(key, value)\n\n cmd = \"\"\"begin transaction;\ndelete from '%(table)s' where %(primary_key)s='%(primary_value)s';\ninsert into '%(table)s' (%(columns)s) values (%(values)s);\ncommit transaction;\"\"\" % {\n 'table': table,\n 'primary_key': columns[0],\n 'primary_value': values[0],\n 'columns': ', '.join(columns),\n 'values': ', '.join([\"'%s'\" % value for value in values])\n }\n output_msg = device.RunShellCommand('sqlite3 %s \"%s\"' % (db, cmd),\n as_root=True)\n if output_msg:\n logging.info(' '.join(output_msg))", "def set_use_desktop(self, bEnabled):\n\t\tcall_sdk_function('PrlVmCfg_SetUseDesktop', self.handle, bEnabled)", "def unlock(self):\n self.shell(\"input keyevent MENU\")\n self.shell(\"input keyevent BACK\")" ]
[ "0.71691304", "0.6425529", "0.57666975", "0.5484504", "0.5466138", "0.53428775", "0.52931684", "0.5235076", "0.51869184", "0.5186303", "0.5072131", "0.50614804", "0.5037811", "0.5016691", "0.5016691", "0.5003856", "0.49859083", "0.49517408", "0.4933769", "0.48208684", "0.48007408", "0.4798402", "0.4784577", "0.47716957", "0.4758028", "0.47343597", "0.47258574", "0.4718407", "0.47140068", "0.4708377" ]
0.82880116
0
compute weights for a nearestneighbor sampling between low and high resolution grids llat,llon specify the lat and longitude grids on the low resolution grid hlat,hlon specify the lat and longitude grids on the high resolution grid returns a lowres grid with values of n high res points per grid cell
def gen_weights(hlat,hlon,llat,llon,mask=None,verbose=False): if len(llat.shape)==1: llon,llat=np.meshgrid(llon,llat) if len(hlat.shape)==1: hlon,hlat=np.meshgrid(hlon,hlat) output=np.zeros(llon.shape) if mask==None: mask=np.ones(hlat.shape,dtype=bool) search=2 if verbose: print("Total={}".format(hlat.shape[0])) for i in range(hlat.shape[0]): dists=(llat-hlat[i,0])**2 + (llon-hlon[i,0])**2 lastx,lasty=np.unravel_index(np.argmin(dists),dists.shape) if verbose: print(i,end=" ") sys.stdout.flush() for j in range(hlat.shape[1]): sx,ex=lastx-search,lastx+search sy,ey=lasty-search,lasty+search dists=(llat[sx:ex,sy:ey]-hlat[i,j])**2 + (llon[sx:ex,sy:ey]-hlon[i,j])**2 subx,suby=np.unravel_index(np.argmin(dists),dists.shape) curx=subx+sx cury=suby+sy if mask[i,j]: output[curx,cury]+=1 lastx=curx lasty=cury return output
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def lat_weights_regular_grid(lat): \n dlat = np.abs(np.diff(lat))\n np.testing.assert_almost_equal(dlat, dlat[0])\n w = np.abs(np.sin(np.radians(lat + dlat[0] / 2.)) - np.sin(np.radians(lat - dlat[0] / 2.)))\n\n if np.abs(lat[0]) > 89.9999: \n w[0] = np.abs(1. - np.sin(np.radians(np.pi / 2 - dlat[0])))\n\n if np.abs(lat[-1]) > 89.9999:\n w[-1] = np.abs(1. - np.sin(np.radians(np.pi / 2 - dlat[0])))\n\n return w", "def _buildWeights(self):\r\n # Compute the spatial tree\r\n kd = spatial.cKDTree(self.XYin)\r\n \r\n # Perform query on all of the points in the grid\r\n dist,self.ind=kd.query(self.XYout,distance_upper_bound=self.maxdist,k=self.NNear)\r\n \r\n self.Nc = np.size(self.ind,axis=0)\r\n print '%d interpolation points.'%self.Nc\r\n # Now loop through and get the weights for each point\r\n self.W = np.zeros((self.NNear,self.Nc))\r\n\r\n # Print percentages\r\n p0=0\r\n pstep=5\r\n for ii in range(0,self.Nc):\r\n \r\n if self.verbose:\r\n pfinish = float(ii)/float(self.Nc)*100.0\r\n if pfinish> p0:\r\n print '%3.1f %% complete...'%pfinish\r\n p0+=pstep\r\n \r\n W = self.getWeights(dist[ii,:],self.XYin[self.ind[ii,:],0],self.XYin[self.ind[ii,:],1])\r\n self.W[:,ii] = W.T", "def get_grid_weights(lookup, u):\n locations = np.arange(\n int(np.floor(u - lookup.W / 2)) + 1,\n int(np.floor(u + lookup.W / 2) + 1))\n nu_C = (u - lookup.W / 2) - np.floor(u - lookup.W / 2)\n rev = (nu_C > 0.5)\n if rev:\n nu_C = 1.0 - nu_C\n loc = lookup.Ms * nu_C\n pt = int(np.floor(loc)) if lookup.degree > 0 else int(np.round(loc))\n ft = loc - pt\n # Perform polynomial interpolation\n weights = lookup.table[0][:, pt].copy()\n factor = 1\n for k in range(lookup.degree):\n factor *= (ft - k) / (k + 1)\n weights += lookup.table[k + 1][:, pt] * factor\n if rev:\n weights = weights[::-1]\n return locations, weights", "def zonal_avg2(data,Log=False):\n print 'setting up the destination grid'\n # get lat and lon for new regular grid\n# fpin = Nio.open_file('/home/ivan/Python/data/lat_t.nc','r')\n fpin = Nio.open_file('/home/emunoz/Python/mapping/model_grid/lat_t.nc','r')\n lat_t = fpin.variables['lat_t'][:]\n lat_t_edges = fpin.variables['lat_t_edges'][:]\n fpin.close()\n# fpin = Nio.open_file('/home/ivan/Python/data/gx3v5.nc','r')\n fpin = Nio.open_file('/home/emunoz/Python/mapping/model_grid/gx3v5.nc','r')\n lon_t = N.sort(fpin.variables['TLONG'][0,:])\n ulon = N.sort(fpin.variables['ULONG'][0,:])\n lon_t_edges = N.concatenate((ulon,ulon[0,N.newaxis]+360.),0)\n # get gx3v5 lat and lon\n tlon = fpin.variables['TLONG'][:]\n tlat = fpin.variables['TLAT'][:]\n fpin.close()\n\n # compute area of cells in new regular grid\n area = grid_area(lon_t_edges,lat_t_edges)\n\n nlat = lat_t.shape[0]\n nlon = lon_t.shape[0]\n\n print 'computing weights for grid cell'\n ilist = []\n jlist = []\n wghts2D = []\n wghts3D = []\n for i in range(nlat):\n for j in range(nlon):\n i_inds, j_inds = find_stn_idx(lon_t[j], lat_t[i], tlon, tlat)\n ilist.append(i_inds)\n jlist.append(j_inds)\n dist = gc_dist(lon_t[i], lat_t[i], tlon, tlat)\n # make weights=0 on land\n work2D = 1./MA.array(dist,mask=data[0,...].mask)\n wghts2D.append(MA.filled(N.take(N.take(work2D,i_inds,0),j_inds,1)\n ,0))\n\n work3D = 1./MA.array(N.resize(dist,data.shape),mask=data.mask)\n wghts3D.append(MA.filled(N.take(N.take(work3D,i_inds,-2),j_inds,-1)\n ,0))\n\n #print 'computing zonal average'\n return lon_t, lat_t, ilist, jlist, wghts2D, wghts3D", "def test_nearest_neighbour_regular_1d():\n # test with regular grid and 1d coords\n grid_lon = np.arange(100)\n grid_lat = np.arange(50)\n data = np.zeros((50, 100))\n\n # the four nearest values for the first point\n data[20:22, 10:12] = 7\n\n # the four nearest values for the second point\n data[17:19, 13:15] = 8\n\n # the actual test\n res = enstools.interpolation.nearest_neighbour(grid_lon, grid_lat, (10.2, 13.2), (20.2, 17.2), npoints=4)(data)\n np.testing.assert_array_almost_equal(res, [7, 8])\n\n # same test, but with 3d-data (e.g., level, lat, lon)\n data2 = np.zeros((10, 50, 100))\n for i in range(10):\n data2[i, :, :] = data + i\n\n res = enstools.interpolation.nearest_neighbour(grid_lon, grid_lat, (10.2, 13.2), (20.2, 17.2), npoints=4)(data2)\n np.testing.assert_array_almost_equal(res, np.asarray([np.arange(7, 17, 1), np.arange(8, 18, 1)]).transpose())\n\n # same test with only one neighbour or only one target point\n res = enstools.interpolation.nearest_neighbour(grid_lon, grid_lat, (10.2, 13.2), (20.2, 17.2), npoints=1)(data)\n np.testing.assert_array_almost_equal(res, [7, 8])\n res = enstools.interpolation.nearest_neighbour(grid_lon, grid_lat, 10.2, 20.2, npoints=1)(data)\n np.testing.assert_array_almost_equal(res, 7)\n res = enstools.interpolation.nearest_neighbour(grid_lon, grid_lat, 13.2, 17.2, npoints=1)(data2)\n np.testing.assert_array_almost_equal(res, np.arange(8, 18, 1).reshape(10, 1))", "def gen_distortion_weights(grid='equiangular', n_side=64):\r\n if grid == \"equiangular\":\r\n if n_side % 2 != 0:\r\n raise ValueError('Need an even number of points on the latitude')\r\n\r\n weight = torch.zeros(1, 1, n_side, n_side)\r\n theta_range = torch.linspace(0, np.pi, steps=n_side + 1)\r\n dphi = 2 * np.pi / n_side\r\n for theta_idx in range(n_side):\r\n area = dphi * abs((math.cos(theta_range[theta_idx]) - math.cos(theta_range[theta_idx+1])))\r\n weight[:, :, theta_idx, :] = area\r\n weight = weight.flatten(-2)\r\n\r\n elif grid == \"healpix\":\r\n n = 12 * n_side ** 2\r\n area = hp.nside2pixarea(n_side)\r\n weight = torch.ones(1, 1, n) * area\r\n\r\n else:\r\n raise NotImplementedError\r\n\r\n weight = torch.nn.Parameter(weight, requires_grad=False)\r\n\r\n return weight", "def regrid(old_grid):\n bins = np.floor((np.log10(old_grid) - l_min) / dl).astype(int)\n w = (bins >= 0) & (bins < nbins)\n\n return bins, w", "def test_nearest_neighbour_regular_2d():\n # test with regular grid and 2d coords\n grid_lon, grid_lat = np.meshgrid(np.arange(100), np.arange(50), indexing=\"ij\")\n data = np.zeros((100, 50))\n\n # the four nearest values for the first point\n data[10:12, 20:22] = 7\n\n # the four nearest values for the second point\n data[13:15, 17:19] = 8\n\n # the actual test\n res = enstools.interpolation.nearest_neighbour(grid_lon, grid_lat, (10.2, 13.2), (20.2, 17.2), npoints=4)(data)\n np.testing.assert_array_almost_equal(res, [7, 8])\n\n # same test, but with 3d-data (e.g., level, lon, lat)\n data2 = np.zeros((10, 100, 50))\n for i in range(10):\n data2[i, :, :] = data + i\n\n res = enstools.interpolation.nearest_neighbour(grid_lon, grid_lat, (10.2, 13.2), (20.2, 17.2), npoints=4)(data2)\n np.testing.assert_array_almost_equal(res, np.asarray([np.arange(7, 17, 1), np.arange(8, 18, 1)]).transpose())\n\n # same test with one neighbour point\n res = enstools.interpolation.nearest_neighbour(grid_lon, grid_lat, 10.2, 20.2, npoints=1)(data2)\n np.testing.assert_array_almost_equal(res, np.arange(7, 17, 1).reshape(10, 1))\n res = enstools.interpolation.nearest_neighbour(grid_lon, grid_lat, (10.2, 13.2), (20.2, 17.2), npoints=1)(data2)\n np.testing.assert_array_almost_equal(res, np.asarray([np.arange(7, 17, 1), np.arange(8, 18, 1)]).transpose())", "def initialize_weights_and_bias(self, X_train):\n n_samples, n_features = np.shape(X_train)\n n_output = 1 \n \n # This is the numeber of gridcells and we want to make one prediction pr cell. \n # It this doesn't work calculate the number of griddcells.\n\n self.b_h = [] #np.ones((self.n_hidden_layers, self.n_hidden[0]))\n self.W_h = []\n\n for i in range(len(self.n_hidden)):\n if (i == 0):\n self.W_h.append(self.random.normal(loc=0.0, scale=0.1, size=(n_features, self.n_hidden[0])))\n self.b_h.append(np.ones(self.n_hidden[0]))\n else:\n self.W_h.append(self.random.normal(loc=0.0, scale=0.1, size=(self.n_hidden[i-1], self.n_hidden[i])))\n self.b_h.append(np.ones(self.n_hidden[i])) \n \n self.b_out = [1]\n self.W_out = self.random.normal(loc=0.0, scale=0.1, size=(self.n_hidden[-1], n_output))", "def test_nearest_neighbour_unstructured():\n # create coordinates\n grid_lon = np.arange(100)\n grid_lat = np.ones(100)\n data = np.zeros(100)\n\n # the nearest 3 points\n data[10:13] = 7\n res = enstools.interpolation.nearest_neighbour(grid_lon, grid_lat, (11.2, 2.2), (11.2, 13.2), npoints=3, src_grid=\"unstructured\")(data)\n np.testing.assert_array_almost_equal(res, [7, 0])\n\n # same test, but with 2d-data (e.g., level, ncell)\n data2 = np.zeros((10, 100))\n for i in range(10):\n data2[i, :] = data + i\n\n res = enstools.interpolation.nearest_neighbour(grid_lon, grid_lat, (11.2, 2.2), (11.2, 13.2), npoints=3, src_grid=\"unstructured\")(data2)\n np.testing.assert_array_almost_equal(res, np.asarray([np.arange(7, 17, 1), np.arange(0, 10, 1)]).transpose())\n\n # only one point\n res = enstools.interpolation.nearest_neighbour(grid_lon, grid_lat, 11.2, 13.2, npoints=3, src_grid=\"unstructured\")(data)\n np.testing.assert_almost_equal(res, 7)\n res = enstools.interpolation.nearest_neighbour(grid_lon, grid_lat, 11.2, 13.2, npoints=3, src_grid=\"unstructured\")(data2)\n np.testing.assert_array_almost_equal(res, np.arange(7, 17, 1).reshape(10, 1))\n\n # same test with one one neighbour point\n res = enstools.interpolation.nearest_neighbour(grid_lon, grid_lat, 11.2, 13.2, npoints=1, src_grid=\"unstructured\")(data)\n np.testing.assert_almost_equal(res, 7)\n res = enstools.interpolation.nearest_neighbour(grid_lon, grid_lat, 11.2, 13.2, npoints=1, src_grid=\"unstructured\")(data2)\n np.testing.assert_almost_equal(res, np.arange(7, 17, 1).reshape(10, 1))", "def get_neighbours(lat, long):\n # ns = north east, ew = east west (ratio between 1 feet and degree) \n # its different on diferent places on earth (sphere)!!\n ns = 0.0025\n ew = 0.0025\n walk = []\n for i in range(-2, 3):\n for j in range(-2, 3):\n thiscell = CellId.from_lat_lng(LatLng.from_degrees(lat + ns*i, long + ew*j)).parent(S2_CELL_LEVEL)\n if abs(i * j) < 4:\n walk.append(thiscell.id())\n return sorted(walk)", "def test_weighting(self):\n dset = self.dset.spec.sel(\n lons=self.lons_inexact, lats=self.lats_inexact, method=\"idw\"\n )\n for stat in [\"hs\", \"tp\"]:\n idw = dset.spec.stats([stat])[stat].values\n site0 = self.dset.isel(site=[0]).spec.stats([stat])[stat].values\n site1 = self.dset.isel(site=[1]).spec.stats([stat])[stat].values\n lower = np.array([min(s1, s2) for s1, s2 in zip(site0, site1)])\n upper = np.array([max(s1, s2) for s1, s2 in zip(site0, site1)])\n assert (upper - idw > 0).all() and (idw - lower > 0).all()", "def get_neigh_demand(city):\n\n # get station set S with more than 10 charge equipment\n static_file_path = exp_data_path + os.sep + 'static' + os.sep + 'static_feature_{}.csv'.format(city)\n static_feature = pd.read_csv(static_file_path, header=0)\n station_set = set(static_feature[static_feature.num >= 10].index)\n\n # calculate 10 nearest neighborhoods for each station, sort by distance and store their index, get a map\n neighbor_distance_map = {}\n matrix_distance = np.load(exp_data_path + os.sep + 'similarity' + os.sep + 'similarity_distance_{}_numpy.npy'.format(city), allow_pickle=True)\n all_distance_map = {i: [] for i in range(station_count[city])}\n for i in range(station_count[city]):\n if i not in station_set:\n continue\n for j in range(station_count[city]):\n if j not in station_set:\n continue\n all_distance_map[i].append((j, matrix_distance[i][j]))\n all_distance_map[i].sort(key=lambda x : x[1], reverse=True)\n neighbor_distance_map[i] = [idx for idx, distance in all_distance_map[i][:10]]\n\n # 11 times header, get static neighborhood feature for each station(in S), get csv: neighbor_feature_{city}.csv\n ALL_HEADER = ['index']\n ALL_HEADER.extend(GENERAL_HEADER)\n for i in range(10):\n for j in GENERAL_HEADER:\n ALL_HEADER.append('{}_{}'.format(j, i))\n\n raw_data = np.empty((len(neighbor_distance_map), len(ALL_HEADER)))\n for i, idx in enumerate(neighbor_distance_map.keys()):\n raw_data[i][0] = idx\n raw_data[i][1:1+len(GENERAL_HEADER)] = static_feature.iloc[idx]['num':'mall']\n for j in range(10):\n neighbor_idx = neighbor_distance_map[idx][j]\n raw_data[i][1+len(GENERAL_HEADER)*(j+1):1+len(GENERAL_HEADER)*(j+2)] = static_feature.iloc[neighbor_idx]['num':'mall']\n neighbor_feature_data = pd.DataFrame(raw_data, columns=ALL_HEADER)\n print('neighbor feature')\n print(neighbor_feature_data)\n\n neighbor_feature_path = exp_data_path + os.sep + 'static' + os.sep + 'static_neighor_feature_{}.csv'.format(city)\n if os.path.exists(neighbor_feature_path):\n os.remove(neighbor_feature_path)\n neighbor_feature_data.to_csv(neighbor_feature_path)\n\n # create final csv(11 times header with basic info(time_index + time_embed_index))\n # if index in S, fill basic info, neighbor_feature and demand\n\n demand = np.load(exp_data_path + os.sep + 'station' + os.sep + 'demand_{}.npy'.format(city), allow_pickle=True)\n time_count = demand.shape[1]\n\n DEMAND_HEADER = []\n DEMAND_HEADER.extend(ALL_HEADER)\n DEMAND_HEADER.extend(['time_index', 'time_embed', 'demand'])\n neighbor_demand_raw_data = np.empty(((len(neighbor_distance_map)*time_count, len(DEMAND_HEADER))))\n\n # get time map like {\"0800\": 1, \"0830\": 2, ....}\n time_index_map = np.load(exp_data_path + os.sep + 'station_list' + os.sep + 'time_index.npy', allow_pickle=True)\n time_index_map = dict(time_index_map.tolist())\n time_map = {t: i for i, t in enumerate(sorted(set([k[-4:] for k in time_index_map['rev_index'].keys()])))}\n\n cur_idx = 0\n for time_idx in range(time_count):\n time_embed_idx = time_map[time_index_map['index'][time_idx][-4:]]\n for station_idx in station_set:\n neighbor_demand_raw_data[cur_idx][0:len(ALL_HEADER)] = neighbor_feature_data.loc[neighbor_feature_data['index']==station_idx, 'index':'mall_9']\n neighbor_demand_raw_data[cur_idx][len(ALL_HEADER)] = time_idx\n neighbor_demand_raw_data[cur_idx][len(ALL_HEADER)+1] = time_embed_idx\n neighbor_demand_raw_data[cur_idx][len(ALL_HEADER)+2] = demand[station_idx][time_idx][-1]\n # todo add slow demand and quick demand here\n cur_idx = cur_idx + 1\n print(cur_idx, neighbor_demand_raw_data.shape)\n\n neighbor_demand_data = pd.DataFrame(neighbor_demand_raw_data, columns=DEMAND_HEADER)\n print('neighbor demand')\n print(neighbor_demand_data)\n\n neighbor_demand_path = exp_data_path + os.sep + 'static' + os.sep + 'neighbor_demand_{}.csv'.format(city)\n if os.path.exists(neighbor_demand_path):\n os.remove(neighbor_demand_path)\n neighbor_demand_data.to_csv(neighbor_demand_path)", "def get_hardwired_speed_weights(self):\n \n phase_shift=self.speed_phase_shift\n \n # row 1 has the weights of speed cells to grid cell 1\n self.W_speed_east=np.zeros_like(self.W_ee) \n self.W_speed_west=np.zeros_like(self.W_ee) \n self.W_speed_north=np.zeros_like(self.W_ee) \n self.W_speed_south=np.zeros_like(self.W_ee) \n\n if self.use_eight_directions is True:\n self.W_speed_north_east=np.zeros_like(self.W_ee) \n self.W_speed_north_west=np.zeros_like(self.W_ee) \n self.W_speed_south_east=np.zeros_like(self.W_ee) \n self.W_speed_south_west=np.zeros_like(self.W_ee) \n\n\n for phase_idx,phase in enumerate(self.gp.phases):\n shifted_north_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(np.pi/2.),self.gp.phases)\n shifted_south_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(-np.pi/2.),self.gp.phases)\n shifted_east_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(0),self.gp.phases)\n shifted_west_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(-np.pi),self.gp.phases)\n\n self.W_speed_north[phase_idx,:]=self.W_ee[shifted_north_phase_idx,:]\n self.W_speed_south[phase_idx,:]=self.W_ee[shifted_south_phase_idx,:]\n self.W_speed_east[phase_idx,:]=self.W_ee[shifted_east_phase_idx,:]\n self.W_speed_west[phase_idx,:]=self.W_ee[shifted_west_phase_idx,:] \n \n if self.use_eight_directions is True:\n shifted_north_east_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(np.pi/4),self.gp.phases)\n shifted_north_west_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(np.pi*3/4),self.gp.phases)\n shifted_south_east_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(-np.pi/4),self.gp.phases)\n shifted_south_west_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(-np.pi*3/4),self.gp.phases)\n \n self.W_speed_north_east[phase_idx,:]=self.W_ee[shifted_north_east_phase_idx,:]\n self.W_speed_north_west[phase_idx,:]=self.W_ee[shifted_north_west_phase_idx,:]\n self.W_speed_south_east[phase_idx,:]=self.W_ee[shifted_south_east_phase_idx,:]\n self.W_speed_south_west[phase_idx,:]=self.W_ee[shifted_south_west_phase_idx,:]", "def initializeWeights(n_in,n_out):\r\n \r\n epsilon = sqrt(6) / sqrt(n_in + n_out + 1);\r\n W = (np.random.rand(n_out, n_in + 1)*2* epsilon) - epsilon;\r\n return W", "def initializeWeights(n_in,n_out):\r\n \r\n epsilon = sqrt(6) / sqrt(n_in + n_out + 1);\r\n W = (np.random.rand(n_out, n_in + 1)*2* epsilon) - epsilon;\r\n return W", "def initializeWeights(n_in,n_out):\n \n epsilon = sqrt(6) / sqrt(n_in + n_out + 1);\n W = (np.random.rand(n_out, n_in + 1)*2* epsilon) - epsilon;\n return W", "def calculateWeights(self):\n return self.distances #En lo que encontramos una funcion que represente", "def initializeWeights(n_in,n_out):\n epsilon = sqrt(6) / sqrt(n_in + n_out + 1);\n W = (np.random.rand(n_out, n_in + 1)*2* epsilon) - epsilon;\n return W", "def initializeWeights(n_in,n_out):\n epsilon = sqrt(6) / sqrt(n_in + n_out + 1);\n W = (np.random.rand(n_out, n_in + 1)*2* epsilon) - epsilon;\n return W", "def initializeWeights(n_in,n_out):\n epsilon = sqrt(6) / sqrt(n_in + n_out + 1);\n W = (np.random.rand(n_out, n_in + 1)*2* epsilon) - epsilon;\n return W", "def swath_from_lonlat_grid(grid_lons, grid_lats, lons, lats, data,\n radius_of_influence):\n\n valid_index = get_valid_index_from_lonlat_grid(\n grid_lons, grid_lats, lons, lats, radius_of_influence)\n\n lons = lons[valid_index]\n lats = lats[valid_index]\n data = data[valid_index]\n\n return lons, lats, data", "def test_test_nearest_neighbour_dmean():\n # test with regular grid and 1d coords\n grid_lon = np.arange(100)\n grid_lat = np.arange(50)\n data = np.zeros((50, 100))\n\n # the four nearest values for the first point\n data[20, 10] = 7\n\n # the four nearest values for the second point\n data[17, 13] = 8\n\n # the actual test\n res = enstools.interpolation.nearest_neighbour(grid_lon, grid_lat, (10, 13), (20, 17), npoints=2, method=\"d-mean\")(data)\n np.testing.assert_array_almost_equal(res, [5.6, 6.4])", "def score_grid():\r\n\t\r\n\tp = 'results\\\\mnist_filter'\r\n\t(tr_x, tr_y), (te_x, te_y) = load_mnist()\r\n\t\r\n\t# Get the SPs\r\n\tsps = [load(os.path.join(p, sp)) for sp in os.listdir(p) if sp[2] == '0']\r\n\tsp2 = load(os.path.join(p, 'sp1-0.pkl'))\r\n\t\r\n\tnwindows = 26 ** 2\r\n\tnfeat = 100 * nwindows\r\n\t\r\n\t# w = [sp2.p[sp2.syn_map == j] for j in xrange(nfeat)]\r\n\t# ms = max(wi.shape[0] for wi in w)\r\n\t# with open(os.path.join(p, 'data.pkl'), 'wb') as f:\r\n\t\t# cPickle.dump((w, ms), f, cPickle.HIGHEST_PROTOCOL)\r\n\twith open(os.path.join(p, 'data.pkl'), 'rb') as f:\r\n\t\tw, ms = cPickle.load(f)\r\n\t\r\n\t# Get training data\r\n\ttr_x2 = np.zeros((tr_x.shape[0], nfeat))\r\n\tfor i, x in enumerate(tr_x):\r\n\t\tnx = extract_patches_2d(x.reshape(28, 28), (3, 3)).reshape(\r\n\t\t\tnwindows, 9)\r\n\t\tx = np.array(np.zeros(nfeat), dtype='bool')\r\n\t\tfor j, (xi, sp) in enumerate(izip(nx, sps)):\r\n\t\t\tsp.step(xi)\r\n\t\t\tx[j*100:(j*100)+100] = sp.y[:, 0]\r\n\t\t\r\n\t\ty = sp2.p * x[sp2.syn_map]\r\n\t\tw = np.zeros((nfeat, ms))\r\n\t\tfor j in xrange(nfeat):\r\n\t\t\ta = y[sp2.syn_map == j]\r\n\t\t\tw[j][:a.shape[0]] = a\r\n\t\ttr_x2[i] = np.mean(w, 1)\r\n\t\r\n\t# Get testing data\r\n\tte_x2 = np.zeros((te_x.shape[0], nfeat))\r\n\tfor i, x in enumerate(te_x):\r\n\t\tnx = extract_patches_2d(x.reshape(28, 28), (3, 3)).reshape(\r\n\t\t\tnwindows, 9)\r\n\t\tx = np.array(np.zeros(nfeat), dtype='bool')\r\n\t\tfor j, (xi, sp) in enumerate(izip(nx, sps)):\r\n\t\t\tsp.step(xi)\r\n\t\t\tx[j*100:(j*100)+100] = sp.y[:, 0]\r\n\t\t\r\n\t\ty = sp2.p * x[sp2.syn_map]\r\n\t\tw = np.zeros((nfeat, ms))\r\n\t\tfor j in xrange(nfeat):\r\n\t\t\ta = y[sp2.syn_map == j]\r\n\t\t\tw[j][:a.shape[0]] = a\r\n\t\tte_x2[i] = np.mean(w, 1)\r\n\t\r\n\t# Classify\r\n\tclf = LinearSVC(random_state=123456789)\r\n\tclf.fit(tr_x2, tr_y)\r\n\tprint 'SVM Accuracy : {0:2.2f} %'.format(clf.score(te_x2, te_y) * 100)", "def get_random_inhibitory_weights(self):\n \n self.W_ei=np.zeros((self.N_e,self.N_i))\n self.W_ie=np.zeros((self.N_i,self.N_e)) \n self. W_ii=np.zeros((self.N_i,self.N_i))\n\n \n # connections to the excitatory neurons \n for row_idx in xrange(self.N_e):\n \n # from ihibitory\n all_idxs_ei=np.arange(self.N_i)\n np.random.shuffle(all_idxs_ei)\n self.W_ei[row_idx,all_idxs_ei[0:self.num_conns_ei]]=self.W_max_ei \n \n # connections to inhibitory neurons\n for row_idx in range(self.N_i):\n \n # from exitatory \n all_idxs_ie=np.arange(self.N_e)\n np.random.shuffle(all_idxs_ie)\n self.W_ie[row_idx,all_idxs_ie[0:self.num_conns_ie]]=self.W_max_ie\n \n # from inhibitory\n all_idxs_ii=np.arange(self.N_i)\n np.random.shuffle(all_idxs_ii)\n self.W_ii[row_idx,all_idxs_ii[0:self.num_conns_ii]]=self.W_max_ii\n \n \n self.W[:self.N_e,self.N_e:]=self.W_ei\n self.W[self.N_e:,:self.N_e]=self.W_ie\n self.W[self.N_e:,self.N_e:]=self.W_ii", "def CreateTargetGeoMap(latS, latN, lonW, lonE, latlen, lonlen):\n\n lat_grid = np.linspace(latS, latN, latlen)\n lon_grid = np.linspace(lonW, lonE, lonlen)\n\n return lat_grid,lon_grid", "def calculate_supp_pcp_weights(supplemental_precip, id_tmp, tmp_file, config_options, mpi_config,\n lat_var=\"latitude\", lon_var=\"longitude\"):\n ndims = 0\n if mpi_config.rank == 0:\n ncvar = id_tmp.variables[supplemental_precip.netcdf_var_names[0]]\n ndims = len(ncvar.dimensions)\n if ndims == 3:\n latdim = 1\n londim = 2\n elif ndims == 2:\n latdim = 0\n londim = 1\n else:\n latdim = londim = -1\n config_options.errMsg = \"Unable to determine lat/lon grid size from \" + tmp_file\n err_handler.err_out(config_options)\n\n try:\n supplemental_precip.ny_global = id_tmp.variables[supplemental_precip.netcdf_var_names[0]].shape[latdim]\n except (ValueError, KeyError, AttributeError, Exception) as err:\n config_options.errMsg = \"Unable to extract Y shape size from: \" + \\\n supplemental_precip.netcdf_var_names[0] + \" from: \" + \\\n tmp_file + \" (\" + str(err) + \", \" + type(err) + \")\"\n err_handler.err_out(config_options)\n try:\n supplemental_precip.nx_global = id_tmp.variables[supplemental_precip.netcdf_var_names[0]].shape[londim]\n except (ValueError, KeyError, AttributeError, Exception) as err:\n config_options.errMsg = \"Unable to extract X shape size from: \" + \\\n supplemental_precip.netcdf_var_names[0] + \" from: \" + \\\n tmp_file + \" (\" + str(err) + \", \" + type(err) + \")\"\n err_handler.err_out(config_options)\n\n # mpi_config.comm.barrier()\n\n # Broadcast the forcing nx/ny values\n supplemental_precip.ny_global = mpi_config.broadcast_parameter(supplemental_precip.ny_global,\n config_options, param_type=int)\n supplemental_precip.nx_global = mpi_config.broadcast_parameter(supplemental_precip.nx_global,\n config_options, param_type=int)\n # mpi_config.comm.barrier()\n\n try:\n # noinspection PyTypeChecker\n supplemental_precip.esmf_grid_in = ESMF.Grid(np.array([supplemental_precip.ny_global,\n supplemental_precip.nx_global]),\n staggerloc=ESMF.StaggerLoc.CENTER,\n coord_sys=ESMF.CoordSys.SPH_DEG)\n except ESMF.ESMPyException as esmf_error:\n config_options.errMsg = \"Unable to create source ESMF grid from temporary file: \" + \\\n tmp_file + \" (\" + str(esmf_error) + \")\"\n err_handler.err_out(config_options)\n # mpi_config.comm.barrier()\n\n try:\n supplemental_precip.x_lower_bound = supplemental_precip.esmf_grid_in.lower_bounds[ESMF.StaggerLoc.CENTER][1]\n supplemental_precip.x_upper_bound = supplemental_precip.esmf_grid_in.upper_bounds[ESMF.StaggerLoc.CENTER][1]\n supplemental_precip.y_lower_bound = supplemental_precip.esmf_grid_in.lower_bounds[ESMF.StaggerLoc.CENTER][0]\n supplemental_precip.y_upper_bound = supplemental_precip.esmf_grid_in.upper_bounds[ESMF.StaggerLoc.CENTER][0]\n supplemental_precip.nx_local = supplemental_precip.x_upper_bound - supplemental_precip.x_lower_bound\n supplemental_precip.ny_local = supplemental_precip.y_upper_bound - supplemental_precip.y_lower_bound\n except (ValueError, KeyError, AttributeError) as err:\n config_options.errMsg = \"Unable to extract local X/Y boundaries from global grid from temporary \" + \\\n \"file: \" + tmp_file + \" (\" + str(err) + \")\"\n err_handler.err_out(config_options)\n # mpi_config.comm.barrier()\n\n # Check to make sure we have enough dimensionality to run regridding. ESMF requires both grids\n # to have a size of at least 2.\n if supplemental_precip.nx_local < 2 or supplemental_precip.ny_local < 2:\n config_options.errMsg = \"You have either specified too many cores for: \" + supplemental_precip.productName + \\\n \", or your input forcing grid is too small to process. Local grid \" \\\n \"must have x/y dimension size of 2.\"\n err_handler.log_critical(config_options, mpi_config)\n err_handler.check_program_status(config_options, mpi_config)\n\n lat_tmp = lon_tmp = None\n if mpi_config.rank == 0:\n # Process lat/lon values from the GFS grid.\n if len(id_tmp.variables[lat_var].shape) == 3:\n # We have 2D grids already in place.\n lat_tmp = id_tmp.variables[lat_var][0, :]\n lon_tmp = id_tmp.variables[lon_var][0, :]\n elif len(id_tmp.variables[lon_var].shape) == 2:\n # We have 2D grids already in place.\n lat_tmp = id_tmp.variables[lat_var][:]\n lon_tmp = id_tmp.variables[lon_var][:]\n elif len(id_tmp.variables[lat_var].shape) == 1:\n # We have 1D lat/lons we need to translate into\n # 2D grids.\n lat_tmp = np.repeat(id_tmp.variables[lat_var][:][:, np.newaxis], supplemental_precip.nx_global, axis=1)\n lon_tmp = np.tile(id_tmp.variables[lon_var][:], (supplemental_precip.ny_global, 1))\n # mpi_config.comm.barrier()\n\n # Scatter global GFS latitude grid to processors..\n if mpi_config.rank == 0:\n var_tmp = lat_tmp\n else:\n var_tmp = None\n var_sub_lat_tmp = mpi_config.scatter_array(supplemental_precip, var_tmp, config_options)\n # mpi_config.comm.barrier()\n\n if mpi_config.rank == 0:\n var_tmp = lon_tmp\n else:\n var_tmp = None\n var_sub_lon_tmp = mpi_config.scatter_array(supplemental_precip, var_tmp, config_options)\n # mpi_config.comm.barrier()\n\n try:\n supplemental_precip.esmf_lats = supplemental_precip.esmf_grid_in.get_coords(1)\n except ESMF.GridException as ge:\n config_options.errMsg = \"Unable to locate latitude coordinate object within supplemental precip ESMF grid: \" \\\n + str(ge)\n err_handler.err_out(config_options)\n # mpi_config.comm.barrier()\n\n try:\n supplemental_precip.esmf_lons = supplemental_precip.esmf_grid_in.get_coords(0)\n except ESMF.GridException as ge:\n config_options.errMsg = \"Unable to locate longitude coordinate object within supplemental precip ESMF grid: \" \\\n + str(ge)\n err_handler.err_out(config_options)\n # mpi_config.comm.barrier()\n\n supplemental_precip.esmf_lats[:, :] = var_sub_lat_tmp\n supplemental_precip.esmf_lons[:, :] = var_sub_lon_tmp\n del var_sub_lat_tmp\n del var_sub_lon_tmp\n del lat_tmp\n del lon_tmp\n\n # Create a ESMF field to hold the incoming data.\n supplemental_precip.esmf_field_in = ESMF.Field(supplemental_precip.esmf_grid_in,\n name=supplemental_precip.productName + \"_NATIVE\")\n\n # mpi_config.comm.barrier()\n\n # Scatter global grid to processors..\n if mpi_config.rank == 0:\n if ndims == 3:\n var_tmp = id_tmp[supplemental_precip.netcdf_var_names[0]][0, :]\n elif ndims == 2:\n var_tmp = id_tmp[supplemental_precip.netcdf_var_names[0]][:]\n else:\n var_tmp = None\n # Set all valid values to 1.0, and all missing values to 0.0. This will\n # be used to generate an output mask that is used later on in downscaling, layering,\n # etc.\n var_tmp[:] = 1.0\n else:\n var_tmp = None\n var_sub_tmp = mpi_config.scatter_array(supplemental_precip, var_tmp, config_options)\n mpi_config.comm.barrier()\n\n # Place temporary data into the field array for generating the regridding object.\n supplemental_precip.esmf_field_in.data[:] = var_sub_tmp\n # mpi_config.comm.barrier()\n\n supplemental_precip.regridObj = ESMF.Regrid(supplemental_precip.esmf_field_in,\n supplemental_precip.esmf_field_out,\n src_mask_values=np.array([0]),\n regrid_method=ESMF.RegridMethod.BILINEAR,\n unmapped_action=ESMF.UnmappedAction.IGNORE)\n\n # Run the regridding object on this test dataset. Check the output grid for\n # any 0 values.\n supplemental_precip.esmf_field_out = supplemental_precip.regridObj(supplemental_precip.esmf_field_in,\n supplemental_precip.esmf_field_out)\n supplemental_precip.regridded_mask[:] = supplemental_precip.esmf_field_out.data[:]", "def get_grid_mappings(weighed_sum,\n use_defaults=True,min_index_row_peaks =40):\n # Detect peaks\n col_peaks = indices_highest_peaks(weighed_sum, 0)\n row_peaks = indices_highest_peaks(weighed_sum, 1)\n \n # Map col_peaks to Hz values\n if len(col_peaks) == len(HZ):\n mapping_Hz = dict(zip(HZ,col_peaks)) \n \n # Map adjusted col_peaks to Hz values\n else:\n try: \n col_peaks =adjust_arr_peaks(weighed_sum,col_peaks,len(HZ),0)\n mapping_Hz = dict(zip(HZ,col_peaks)) \n \n # Map adjusted HZ values to default coordinates if need be\n if use_defaults:\n for i,key in enumerate(HZ):\n if mapping_Hz[key] > UPPER_LIMIT_HZ_COORD[i] or mapping_Hz[key] < LOWER_LIMIT_HZ_COORD[i]:\n mapping_Hz[key] = DEFAULT_HZ_COORD[i]\n except:\n if use_defaults:\n mapping_Hz = dict(zip(HZ,DEFAULT_HZ_COORD )) \n else: \n return np.nan,np.nan,np.nan,np.nan\n \n \n row_peaks = row_peaks[row_peaks > min_index_row_peaks]\n \n try:\n row_100 = row_peaks[0] #Should be around 30 for 100 km\n row_200 = row_peaks[1] #Should be around 30 for 100 km\n except:\n if use_defaults:\n row_100 = KM_DEFAULT_100\n row_200 = KM_DEFAULT_200 \n else: \n return np.nan,np.nan,np.nan,np.nan\n \n if use_defaults:\n if abs(row_100 - KM_DEFAULT_100) > abs(KM_DEFAULT_200 - KM_DEFAULT_100):\n row_100 = KM_DEFAULT_100\n if abs(row_200 - KM_DEFAULT_200) > abs(KM_DEFAULT_200 - KM_DEFAULT_100):\n row_200 = KM_DEFAULT_200 \n \n mapping_km = {100:row_100,200:row_200}\n\n return col_peaks,row_peaks,mapping_Hz, mapping_km", "def test_weighting_implementation():\n\n # generate two locusts of points\n npts = 100\n epsilon = 0.05\n # cluster 1\n coords1 = generate_locus_of_3d_points(npts, 0.1, 0.1, 0.1, epsilon=epsilon)\n # cluster 2\n coords2 = generate_locus_of_3d_points(npts, 0.9, 0.9, 0.9, epsilon=epsilon)\n\n # generate orientation vectors for cluster 1\n vectors1 = generate_aligned_vectors(len(coords1))\n\n # generate a random index value to check for each cluster\n idx = np.random.randint(npts)\n idx2 = np.random.randint(npts)\n\n # calculate dot product between vectors1 and cluster 2\n r = np.sqrt((0.9 - 0.1) ** 2 + (0.9 - 0.1) ** 2 + (0.9 - 0.1) ** 2)\n # s, vector between coords1 and cluster2\n s = np.zeros((3))\n s[0] = coords2[idx2, 0] - coords1[idx, 0]\n s[1] = coords2[idx2, 1] - coords1[idx, 1]\n s[2] = coords2[idx2, 2] - coords1[idx, 2]\n\n # calculate dot product between orientation and direction between cluster 1 and 2\n angles = angles_between_list_of_vectors(vectors1[idx], s)\n costheta = np.cos(angles) # dot product between vectors\n\n idx_costheta = costheta\n\n # define radial bins\n rbins = np.array([0.0, 0.1, r + 2.0 * epsilon])\n\n # define weights appropiate for weighting function\n weights1 = np.zeros((npts, 4))\n weights1[idx] = 1.0\n weights1[:, 1] = vectors1[:, 0]\n weights1[:, 2] = vectors1[:, 1]\n weights1[:, 3] = vectors1[:, 2]\n weights2 = np.zeros(npts)\n weights2[idx2] = 1.0\n\n # calculate weighted counts\n\n # weighting 1\n # calculate weighted counts\n weighted_counts, counts = positional_marked_npairs_3d(\n coords1,\n coords2,\n rbins,\n period=None,\n weights1=weights1,\n weights2=weights2,\n weight_func_id=1,\n num_threads=1,\n )\n\n msg = \"weighted counts do not match expected result given the weighting function\"\n assert np.isclose(weighted_counts[-1], idx_costheta, rtol=0.01 / npts), msg", "def island_loss_of_weight(self):\n for y in self.island_map:\n for cell in y:\n cell.loss_of_weight()" ]
[ "0.6848878", "0.6793101", "0.62360746", "0.62186825", "0.6205327", "0.60586023", "0.60279727", "0.58803076", "0.57930404", "0.57810956", "0.573738", "0.5684344", "0.5633059", "0.56270546", "0.5623209", "0.5623209", "0.56100583", "0.55861974", "0.55694896", "0.55694896", "0.55694896", "0.5538802", "0.5535589", "0.5521797", "0.55083585", "0.55071366", "0.54797477", "0.5478848", "0.5468525", "0.54524094" ]
0.7154857
0
Compare WRF, SNODAS, and Lidar data on the lidar domain
def main(): snowdensity=0.35 #from May 1 2010 SNOTEL (2011,2013 were similar, 2014 was 0.4), at the saddle in May 1 2010 it was 0.4 snodasyears=[2010,2004,2005] wdata=[wrf.load("wrf/SWE_daily.nc",extractday=212+5+int(np.round(365.25*year))) for year in [3,4]] wdata.extend([wrf.load("wrf/SWE_daily.nc",extractday=212+20+int(np.round(365.25*year))) for year in [3,4]]) print(len(wdata)) sdata=[snodas.load("snodas/SWE_Daily0600UTC_WesternUS_{}.dat".format(year),extractday=125) for year in snodasyears] sdata.extend([snodas.load("snodas/SWE_Daily0600UTC_WesternUS_{}.dat".format(year),extractday=140) for year in snodasyears]) print(len(sdata)) # sdata=[snodas.load("snodas/SWE_Daily0600UTC_WesternUS_{}.dat".format(year),extractday=120) for year in range(2004,2013)] # sdata.insert(0,sdata.pop(6)) #move year 2010 to the begining of the list ldata=lidar.load_fast(loc="lidar/",geofile="snow-on-dem.nc",decimation_factor=10) print("Calculating WRF weights") try: wrfweights=mygis.read_nc("wrf2lidar_weights.nc").data except: wrfweights =gen_weights(ldata.lat,ldata.lon,wdata[0].lat,wdata[0].lon,mask=(ldata.dem>1500)) mygis.write("wrf2lidar_weights.nc",wrfweights) # wrfbounds =find_bounds(wrfweights) print("Calculating SNODAS weights") try: snodasweights=mygis.read_nc("snodas2lidar_weights.nc").data except: snodasweights=gen_weights(ldata.lat,ldata.lon,sdata[0].lat,sdata[0].lon,mask=(ldata.dem>1500)) mygis.write("snodas2lidar_weights.nc",snodasweights) # snodasbounds =find_bounds(snodasweights) wdata[0].lc[wrfweights==0]=0 sdata[0].lc[snodasweights==0]=0 print("Binning by elevations...") #dx=4000) #note use dx=lidar_dx because weights are lidar gridcells... wrfbyz=[bin_by_elevation(w.data,w.dem,wdata[0].lc,weights=wrfweights,dz=200,dx=10) for w in wdata] print("Binning by elevations...") snodasbyz=[bin_by_elevation(s.data,sdata[0].dem,sdata[0].lc,weights=snodasweights,dz=150,dx=10) for s in sdata]#dx=926) print("Binning by elevations...") lidarbyz=bin_by_elevation(ldata.data*snowdensity,ldata.dem,ldata.lc,dz=100,dx=10) print("Plotting") plot_volumes(wrfbyz,snodasbyz,lidarbyz) snodasyears=[2010,2004,2005,2010.2,2004.2,2005.2] for i in range(len(snodasbyz)): plot_elevation_bands(snodasbyz[i],outputfile="SNODAS_swe_by_z_{}.png".format(snodasyears[i]),title="SNODAS SWE {}".format(snodasyears[i]))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compare_river_directions_with_dynriver_corrs_and_MERIThydro_derived_corrs(self):\n ref_base_dir = (\"/Users/thomasriddick/Documents/data/lake_analysis_runs\"\n \"/lake_analysis_one_21_Jun_2021\")\n data_base_dir = (\"/Users/thomasriddick/Documents/data/lake_analysis_runs/\"\n \"lake_analysis_two_26_Mar_2022\")\n ref_filename=os.path.join(ref_base_dir,\n \"rivers/results/diag_version_29_date_0\",\n \"30min_flowtocell.nc\")\n data_filename=os.path.join(data_base_dir,\n \"rivers/results/diag_version_0_date_0\",\n \"30min_flowtocell.nc\")\n lsmask_filename=os.path.join(self.plots_data_dir,\n \"ls_mask_extract_ls_mask_from_corrected_HD_rdirs_20160504_142435.nc\")\n ref_catchment_filename=os.path.join(ref_base_dir,\n \"rivers/results/diag_version_29_date_0\",\n \"30min_catchments.nc\")\n data_catchment_filename=os.path.join(data_base_dir,\n \"rivers/results/diag_version_0_date_0\",\n \"30min_catchments.nc\")\n ref_rdirs_filename=os.path.join(ref_base_dir,\n \"rivers/results/diag_version_29_date_0\",\n \"30min_rdirs.nc\")\n reference_rmouth_outflows_filename=os.path.join(ref_base_dir,\n \"rivers/results/diag_version_29_date_0\",\n \"30min_flowtorivermouths.nc\")\n data_rmouth_outflows_filename=os.path.join(data_base_dir,\n \"rivers/results/diag_version_0_date_0\",\n \"30min_flowtorivermouths.nc\")\n #glacier_mask_filename=os.path.join(self.orog_data_directory,\"ice5g_v1_2_21_0k_10min.nc\")\n self.FlowMapTwoColourComparisonWithCatchmentsHelper(ref_flowmap_filename=ref_filename,\n data_flowmap_filename=data_filename,\n ref_catchment_filename=\\\n ref_catchment_filename,\n data_catchment_filename=\\\n data_catchment_filename,\n ref_rdirs_filename=\\\n ref_rdirs_filename,\n data_rdirs_filename=None,\n reference_rmouth_outflows_filename=\\\n reference_rmouth_outflows_filename,\n data_rmouth_outflows_filename=\\\n data_rmouth_outflows_filename,\n lsmask_filename=lsmask_filename,\n minflowcutoff=80,\n flip_data=False,\n rotate_data=False,\n flip_ref=False,\n rotate_ref=False,\n lsmask_has_same_orientation_as_ref=False,\n flip_lsmask=False,rotate_lsmask=False,\n invert_ls_mask=False,\n first_datasource_name=\"old\",\n second_datasource_name=\"MERIT derived\",\n matching_parameter_set='extensive',\n catchment_and_outflows_mods_list_filename=\\\n None,\n #\"catch_and_outflow_mods_ice6g_vs_ice5g_lgm.txt\",\n #additional_matches_list_filename=\\\n #\"additional_matches_ice6g_vs_ice5g_lgm.txt\",\n use_single_color_for_discrepancies=True,\n use_only_one_color_for_flowmap=False,\n use_title=False,remove_antartica=True,\n difference_in_catchment_label=\"Difference\",\n grid_type='HD')", "def compare_river_directions_with_dynriver_corrs_and_MERIThydro_derived_corrs_original_ts(self):\n ref_base_dir = (\"/Users/thomasriddick/Documents/data/lake_analysis_runs\"\n \"/lake_analysis_one_21_Jun_2021\")\n data_base_dir = (\"/Users/thomasriddick/Documents/data/lake_analysis_runs/\"\n \"lake_analysis_two_26_Mar_2022\")\n ref_filename=os.path.join(ref_base_dir,\n \"rivers/results/default_orog_corrs/diag_version_29_date_0_original_truesinks\",\n \"30min_flowtocell.nc\")\n data_filename=os.path.join(data_base_dir,\n \"rivers/results/diag_version_0_date_0_original_truesinks\",\n \"30min_flowtocell.nc\")\n lsmask_filename=os.path.join(self.plots_data_dir,\n \"ls_mask_extract_ls_mask_from_corrected_HD_rdirs_20160504_142435.nc\")\n ref_catchment_filename=os.path.join(ref_base_dir,\n \"rivers/results/default_orog_corrs/diag_version_29_date_0_original_truesinks\",\n \"30min_catchments.nc\")\n data_catchment_filename=os.path.join(data_base_dir,\n \"rivers/results/diag_version_0_date_0_original_truesinks\",\n \"30min_catchments.nc\")\n ref_rdirs_filename=os.path.join(ref_base_dir,\n \"rivers/results/default_orog_corrs/\"\n \"diag_version_29_date_0_original_truesinks\",\n \"30min_rdirs.nc\")\n reference_rmouth_outflows_filename=os.path.join(ref_base_dir,\n \"rivers/results/default_orog_corrs/\"\n \"diag_version_29_date_0_original_truesinks\",\n \"30min_rmouth_flowtocell.nc\")\n data_rmouth_outflows_filename=os.path.join(data_base_dir,\n \"rivers/results/diag_version_0_date_0_original_truesinks\",\n \"30min_rmouth_flowtocell.nc\")\n #glacier_mask_filename=os.path.join(self.orog_data_directory,\"ice5g_v1_2_21_0k_10min.nc\")\n self.FlowMapTwoColourComparisonWithCatchmentsHelper(ref_flowmap_filename=ref_filename,\n data_flowmap_filename=data_filename,\n ref_catchment_filename=\\\n ref_catchment_filename,\n data_catchment_filename=\\\n data_catchment_filename,\n ref_rdirs_filename=\\\n ref_rdirs_filename,\n data_rdirs_filename=None,\n reference_rmouth_outflows_filename=\\\n reference_rmouth_outflows_filename,\n data_rmouth_outflows_filename=\\\n data_rmouth_outflows_filename,\n lsmask_filename=lsmask_filename,\n minflowcutoff=80,\n flip_data=False,\n rotate_data=False,\n flip_ref=False,\n rotate_ref=False,\n lsmask_has_same_orientation_as_ref=False,\n flip_lsmask=False,rotate_lsmask=False,\n invert_ls_mask=False,\n first_datasource_name=\"old\",\n second_datasource_name=\"MERIT derived\",\n matching_parameter_set='default',\n catchment_and_outflows_mods_list_filename=\\\n None,\n #additional_matches_list_filename=\\\n #\"additional_matches_ice6g_vs_ice5g_lgm.txt\",\n use_single_color_for_discrepancies=True,\n use_only_one_color_for_flowmap=False,\n use_title=False,remove_antartica=True,\n difference_in_catchment_label=\"Difference\",\n grid_type='HD')", "def compare_river_directions_with_dynriver_corrs_and_MERIThydro_derived_corrs_new_ts_10min(self):\n ref_base_dir = (\"/Users/thomasriddick/Documents/data/lake_analysis_runs/\"\n \"lake_analysis_one_21_Jun_2021/rivers/results/\"\n \"default_orog_corrs/\"\n \"diag_version_29_date_0_original_truesinks\")\n data_base_dir = (\"/Users/thomasriddick/Documents/data/lake_analysis_runs/\"\n \"lake_analysis_two_26_Mar_2022/\"\n \"rivers/results/diag_version_32_date_0_with_truesinks\")\n ref_filename=os.path.join(ref_base_dir,\"10min_flowtocell.nc\")\n data_filename=os.path.join(data_base_dir,\"10min_flowtocell.nc\")\n #lsmask_filename=os.path.join(self.plots_data_dir,\n # \"ls_mask_extract_ls_mask_from_corrected_HD_rdirs_20160504_142435.nc\")\n lsmask_filename=None\n ref_catchment_filename=os.path.join(ref_base_dir,\n \"10min_catchments_ext.nc\")\n data_catchment_filename=os.path.join(data_base_dir,\n \"10min_catchments_ext.nc\")\n ref_rdirs_filename=os.path.join(ref_base_dir,\n \"10min_rdirs.nc\")\n data_rdirs_filename=os.path.join(data_base_dir,\n \"10min_rdirs.nc\")\n reference_rmouth_outflows_filename=os.path.join(ref_base_dir,\n \"10min_rmouth_flowtocell.nc\")\n data_rmouth_outflows_filename=os.path.join(data_base_dir,\n \"10min_rmouth_flowtocell.nc\")\n #glacier_mask_filename=os.path.join(self.orog_data_directory,\"ice5g_v1_2_21_0k_10min.nc\")\n self.FlowMapTwoColourComparisonWithCatchmentsHelper(ref_flowmap_filename=ref_filename,\n data_flowmap_filename=data_filename,\n ref_catchment_filename=\\\n ref_catchment_filename,\n data_catchment_filename=\\\n data_catchment_filename,\n ref_rdirs_filename=\\\n ref_rdirs_filename,\n data_rdirs_filename=None,\n reference_rmouth_outflows_filename=\\\n reference_rmouth_outflows_filename,\n data_rmouth_outflows_filename=\\\n data_rmouth_outflows_filename,\n lsmask_filename=lsmask_filename,\n minflowcutoff=80*9/3,\n flip_data=False,\n rotate_data=False,\n flip_ref=False,\n rotate_ref=False,\n lsmask_has_same_orientation_as_ref=False,\n flip_lsmask=False,rotate_lsmask=False,\n invert_ls_mask=False,\n first_datasource_name=\"old\",\n second_datasource_name=\"MERIT derived\",\n matching_parameter_set='minimal',\n additional_matches_list_filename=\n \"/Users/thomasriddick/Documents/data/HDdata/\"\n \"addmatches/additional_matches_10min_upscaled_\"\n \"MERIT_rdirs_vs_modern_day_ext.txt\",\n additional_truesink_matches_list_filename=\n \"/Users/thomasriddick/Documents/data/HDdata/\"\n \"addmatches/addmatches_truesinks/\"\n \"additional_truesinks_matches_10min_upscaled_\"\n \"MERIT_rdirs_vs_modern_day.txt\",\n catchment_and_outflows_mods_list_filename=\\\n \"/Users/thomasriddick/Documents/data/HDdata/\"\n \"catchmods/catch_and_outflow_mods_10min_upscaled_MERIT_rdirs_vs_modern_day.txt\",\n use_single_color_for_discrepancies=True,\n use_only_one_color_for_flowmap=False,\n use_title=False,remove_antartica=False,\n difference_in_catchment_label=\"Difference\",\n grid_type='LatLong10min')\n self.FlowMapTwoColourComparisonWithCatchmentsHelper(ref_flowmap_filename=ref_filename,\n data_flowmap_filename=data_filename,\n ref_catchment_filename=\\\n ref_catchment_filename,\n data_catchment_filename=\\\n data_catchment_filename,\n ref_rdirs_filename=\\\n ref_rdirs_filename,\n data_rdirs_filename=None,\n reference_rmouth_outflows_filename=\\\n reference_rmouth_outflows_filename,\n data_rmouth_outflows_filename=\\\n data_rmouth_outflows_filename,\n lsmask_filename=lsmask_filename,\n minflowcutoff=20*9/3,\n flip_data=False,\n rotate_data=False,\n flip_ref=False,\n rotate_ref=False,\n lsmask_has_same_orientation_as_ref=False,\n flip_lsmask=False,rotate_lsmask=False,\n invert_ls_mask=False,\n first_datasource_name=\"old\",\n second_datasource_name=\"MERIT derived\",\n matching_parameter_set='minimal',\n additional_matches_list_filename=\n \"/Users/thomasriddick/Documents/data/HDdata/\"\n \"addmatches/additional_matches_10min_upscaled_\"\n \"MERIT_rdirs_vs_modern_day_ext.txt\",\n additional_truesink_matches_list_filename=\n \"/Users/thomasriddick/Documents/data/HDdata/\"\n \"addmatches/addmatches_truesinks/\"\n \"additional_truesinks_matches_10min_upscaled_\"\n \"MERIT_rdirs_vs_modern_day.txt\",\n catchment_and_outflows_mods_list_filename=\\\n \"/Users/thomasriddick/Documents/data/HDdata/\"\n \"catchmods/catch_and_outflow_mods_10min_upscaled_MERIT_rdirs_vs_modern_day.txt\",\n use_single_color_for_discrepancies=True,\n use_only_one_color_for_flowmap=False,\n use_title=False,remove_antartica=False,\n difference_in_catchment_label=\"Difference\",\n grid_type='LatLong10min')\n self.FlowMapTwoColourComparisonWithCatchmentsHelper(ref_flowmap_filename=ref_filename,\n data_flowmap_filename=data_filename,\n ref_catchment_filename=\\\n ref_catchment_filename,\n data_catchment_filename=\\\n data_catchment_filename,\n ref_rdirs_filename=\\\n ref_rdirs_filename,\n data_rdirs_filename=\\\n data_rdirs_filename,\n reference_rmouth_outflows_filename=\\\n reference_rmouth_outflows_filename,\n data_rmouth_outflows_filename=\\\n data_rmouth_outflows_filename,\n lsmask_filename=lsmask_filename,\n minflowcutoff=20*9/3,\n flip_data=False,\n rotate_data=False,\n flip_ref=False,\n rotate_ref=False,\n lsmask_has_same_orientation_as_ref=False,\n flip_lsmask=False,rotate_lsmask=False,\n invert_ls_mask=False,\n first_datasource_name=\"old\",\n second_datasource_name=\"MERIT derived\",\n matching_parameter_set='minimal',\n additional_matches_list_filename=\n \"/Users/thomasriddick/Documents/data/HDdata/\"\n \"addmatches/additional_matches_10min_upscaled_\"\n \"MERIT_rdirs_vs_modern_day_ext.txt\",\n additional_truesink_matches_list_filename=\n \"/Users/thomasriddick/Documents/data/HDdata/\"\n \"addmatches/addmatches_truesinks/\"\n \"additional_truesinks_matches_10min_upscaled_\"\n \"MERIT_rdirs_vs_modern_day.txt\",\n catchment_and_outflows_mods_list_filename=\\\n \"/Users/thomasriddick/Documents/data/HDdata/\"\n \"catchmods/catch_and_outflow_mods_10min_upscaled_MERIT_rdirs_vs_modern_day.txt\",\n use_single_color_for_discrepancies=True,\n use_only_one_color_for_flowmap=False,\n use_title=False,remove_antartica=False,\n show_true_sinks=True,\n difference_in_catchment_label=\"Difference\",\n grid_type='LatLong10min')\n self.FlowMapTwoColourComparisonWithCatchmentsHelper(ref_flowmap_filename=ref_filename,\n data_flowmap_filename=data_filename,\n ref_catchment_filename=\\\n ref_catchment_filename,\n data_catchment_filename=\\\n data_catchment_filename,\n ref_rdirs_filename=\\\n ref_rdirs_filename,\n data_rdirs_filename=\\\n None,\n reference_rmouth_outflows_filename=\\\n reference_rmouth_outflows_filename,\n data_rmouth_outflows_filename=\\\n data_rmouth_outflows_filename,\n lsmask_filename=lsmask_filename,\n minflowcutoff=5*9/3,\n flip_data=False,\n rotate_data=False,\n flip_ref=False,\n rotate_ref=False,\n lsmask_has_same_orientation_as_ref=False,\n flip_lsmask=False,rotate_lsmask=False,\n invert_ls_mask=False,\n first_datasource_name=\"old\",\n second_datasource_name=\"MERIT derived\",\n matching_parameter_set='minimal',\n additional_matches_list_filename=\n \"/Users/thomasriddick/Documents/data/HDdata/\"\n \"addmatches/additional_matches_10min_upscaled_\"\n \"MERIT_rdirs_vs_modern_day_ext.txt\",\n additional_truesink_matches_list_filename=\n \"/Users/thomasriddick/Documents/data/HDdata/\"\n \"addmatches/addmatches_truesinks/\"\n \"additional_truesinks_matches_10min_upscaled_\"\n \"MERIT_rdirs_vs_modern_day.txt\",\n catchment_and_outflows_mods_list_filename=\\\n \"/Users/thomasriddick/Documents/data/HDdata/\"\n \"catchmods/catch_and_outflow_mods_10min_upscaled_MERIT_rdirs_vs_modern_day.txt\",\n use_single_color_for_discrepancies=True,\n use_only_one_color_for_flowmap=False,\n use_title=False,remove_antartica=False,\n show_true_sinks=False,\n difference_in_catchment_label=\"Difference\",\n grid_type='LatLong10min')\n self.FlowMapTwoColourComparisonWithCatchmentsHelper(ref_flowmap_filename=ref_filename,\n data_flowmap_filename=data_filename,\n ref_catchment_filename=\\\n ref_catchment_filename,\n data_catchment_filename=\\\n data_catchment_filename,\n ref_rdirs_filename=\\\n ref_rdirs_filename,\n data_rdirs_filename=\\\n data_rdirs_filename,\n reference_rmouth_outflows_filename=\\\n reference_rmouth_outflows_filename,\n data_rmouth_outflows_filename=\\\n data_rmouth_outflows_filename,\n lsmask_filename=lsmask_filename,\n minflowcutoff=5*9/3,\n flip_data=False,\n rotate_data=False,\n flip_ref=False,\n rotate_ref=False,\n lsmask_has_same_orientation_as_ref=False,\n flip_lsmask=False,rotate_lsmask=False,\n invert_ls_mask=False,\n first_datasource_name=\"old\",\n second_datasource_name=\"MERIT derived\",\n matching_parameter_set='minimal',\n additional_matches_list_filename=\n \"/Users/thomasriddick/Documents/data/HDdata/\"\n \"addmatches/additional_matches_10min_upscaled_\"\n \"MERIT_rdirs_vs_modern_day_ext.txt\",\n additional_truesink_matches_list_filename=\n \"/Users/thomasriddick/Documents/data/HDdata/\"\n \"addmatches/addmatches_truesinks/\"\n \"additional_truesinks_matches_10min_upscaled_\"\n \"MERIT_rdirs_vs_modern_day.txt\",\n catchment_and_outflows_mods_list_filename=\\\n \"/Users/thomasriddick/Documents/data/HDdata/\"\n \"catchmods/catch_and_outflow_mods_10min_upscaled_MERIT_rdirs_vs_modern_day.txt\",\n use_single_color_for_discrepancies=True,\n use_only_one_color_for_flowmap=False,\n use_title=False,remove_antartica=False,\n show_true_sinks=True,\n difference_in_catchment_label=\"Difference\",\n grid_type='LatLong10min')\n self.FlowMapTwoColourComparisonWithCatchmentsHelper(ref_flowmap_filename=ref_filename,\n data_flowmap_filename=data_filename,\n ref_catchment_filename=\\\n ref_catchment_filename,\n data_catchment_filename=\\\n data_catchment_filename,\n ref_rdirs_filename=\\\n ref_rdirs_filename,\n data_rdirs_filename=\\\n data_rdirs_filename,\n reference_rmouth_outflows_filename=\\\n reference_rmouth_outflows_filename,\n data_rmouth_outflows_filename=\\\n data_rmouth_outflows_filename,\n lsmask_filename=lsmask_filename,\n minflowcutoff=2*9/3,\n flip_data=False,\n rotate_data=False,\n flip_ref=False,\n rotate_ref=False,\n lsmask_has_same_orientation_as_ref=False,\n flip_lsmask=False,rotate_lsmask=False,\n invert_ls_mask=False,\n first_datasource_name=\"old\",\n second_datasource_name=\"MERIT derived\",\n matching_parameter_set='minimal',\n additional_matches_list_filename=\n \"/Users/thomasriddick/Documents/data/HDdata/\"\n \"addmatches/additional_matches_10min_upscaled_\"\n \"MERIT_rdirs_vs_modern_day_ext.txt\",\n additional_truesink_matches_list_filename=\n \"/Users/thomasriddick/Documents/data/HDdata/\"\n \"addmatches/addmatches_truesinks/\"\n \"additional_truesinks_matches_10min_upscaled_\"\n \"MERIT_rdirs_vs_modern_day.txt\",\n catchment_and_outflows_mods_list_filename=\\\n \"/Users/thomasriddick/Documents/data/HDdata/\"\n \"catchmods/catch_and_outflow_mods_10min_upscaled_MERIT_rdirs_vs_modern_day.txt\",\n use_single_color_for_discrepancies=True,\n use_only_one_color_for_flowmap=False,\n use_title=False,remove_antartica=False,\n show_true_sinks=True,\n difference_in_catchment_label=\"Difference\",\n grid_type='LatLong10min')", "def load_data():\n\t\t# load the data\n\t\tDATPATH = \"../data/\"\n\t\t#fnino = DATPATH + \"nino3.csv\" # 1871-2000\n\t\tfnino = DATPATH + \"tas_Amon_IPSL-CM5A-LR_past1000_r1i1p1_0850_1850_nino3_tseries.csv\" # 1871-2016\n\t\t#fnino = DATPATH + \"nino34.long.data\"\n\t\t#nc_data_nino3 = netCDF4.Dataset(fnino)\n\t\t#nino3_load = nc_data_nino3.variables['tas'][:]\n\t\t#dnino = nino3_load.flatten()\n\n\t\tdnino = np.genfromtxt(fnino, delimiter=\",\", dtype=float).flatten()\n\t\t#fismr = DATPATH + \"ismr.csv\" # 1871-2000\n\t\t#fismr = DATPATH + \"psl_Amon_IPSL-CM5A-LR_past1000_r1i1p1_0850_1850_1_india_goswami_2002_tseries.csv\" # 1871-2016\n\t\tfismr = DATPATH + \"pr_Amon_IPSL-CM5A-LR_past1000_r1i1p1_0850_1850_goswami_india_tseries.csv\" # 1871-2016\n\t\tdismr = np.genfromtxt(fismr, delimiter=\",\", dtype=float).flatten()\n\t\t#fvolc = DATPATH + \"robock.txt\" # 1871-2000\n\t\tfvolc = DATPATH + \"sigl.txt\" # 1871-2016\n\t\tdvolc = np.genfromtxt(fvolc, delimiter=\",\", dtype=float).flatten()\n\n\t\tfvolc_source = DATPATH + \"volc_source_850_1850.csv\" # 1871-2016\n\t\tdvolc_source = np.genfromtxt(fvolc_source, delimiter=\",\", dtype=float).flatten()\n\t\t# simple check for data consistency\n\t\tassert dnino.shape == dismr.shape, \"Data sets are unequal!\"\n\t\tassert int(dismr.shape[0]/12) == dvolc.shape[0], \"Data sets are unequal\"\n\t\treturn dnino, dismr, dvolc, dvolc_source", "def merge_refl_data(gridradfiles):\n # Read data\n dats = [xr.open_dataset(file) for file in gridradfiles]\n inds = dats[0][\"index\"]\n time = dats[0].Analysis_time\n lats = dats[0][\"Latitude\"]\n lons = dats[0][\"Longitude\"]-360.\n z = dats[0][\"Altitude\"]\n refl = dats[0][\"Reflectivity\"].values\n # Reshape reflectivity data\n refl_vals = np.zeros(len(z.values)*len(lats.values)*len(lons.values))*np.NaN\n refl_vals[inds.values] = refl\n refl_reshape = refl_vals.reshape((len(z.values), len(lats.values),\n len(lons.values)))\n # Create reflectivity xarray DataArray obj\n dat = xr.DataArray(refl_reshape, coords=[z, lats, lons])\n dat.name = \"Reflectivity\"\n # For each time stamp in the dataset, pull reflectivity and concatenate\n # onto DataArray object\n for i in range(1,len(dats)):\n # Pull reflectivity information and post-process\n ds = dats[i]\n inds = ds[\"index\"]\n time = ds.Analysis_time\n lats = ds[\"Latitude\"]\n lons = ds[\"Longitude\"]-360.\n z = ds[\"Altitude\"]\n refl = ds[\"Reflectivity\"].values\n refl_vals = np.zeros(len(z.values)*len(lats.values)*len(lons.values))*np.NaN\n refl_vals[inds.values] = refl\n refl_reshape = refl_vals.reshape((len(z.values), len(lats.values),\n len(lons.values)))\n # Initialize DataArray to concatenate\n da = xr.DataArray(refl_reshape, coords=[z, lats, lons])\n da.attrs['Analysis_Endtime'] = time\n da.name = \"Reflectivity\"\n # Concatenate new DataArray onto original\n dat = xr.concat([dat, da], dim='Hours')\n # Add the end datetime object as an attribute\n dat.attrs[\"Analysis_Endtime\"] = time\n return dat", "def merge_refl_data(gridradfiles):\n # Read data\n dats = [xr.open_dataset(file) for file in gridradfiles]\n inds = dats[0][\"index\"]\n time = dats[0].Analysis_time\n lats = dats[0][\"Latitude\"]\n lons = dats[0][\"Longitude\"]-360.\n z = dats[0][\"Altitude\"]\n refl = dats[0][\"Reflectivity\"].values\n # Reshape reflectivity data\n refl_vals = np.zeros(len(z.values)*len(lats.values)*len(lons.values))*np.NaN\n refl_vals[inds.values] = refl\n refl_reshape = refl_vals.reshape((len(z.values), len(lats.values),\n len(lons.values)))\n # Create reflectivity xarray DataArray obj\n dat = xr.DataArray(refl_reshape, coords=[z, lats, lons])\n dat.name = \"Reflectivity\"\n # For each time stamp in the dataset, pull reflectivity and concatenate\n # onto DataArray object\n for i in range(1,len(dats)):\n # Pull reflectivity information and post-process\n ds = dats[i]\n inds = ds[\"index\"]\n time = ds.Analysis_time\n lats = ds[\"Latitude\"]\n lons = ds[\"Longitude\"]-360.\n z = ds[\"Altitude\"]\n refl = ds[\"Reflectivity\"].values\n refl_vals = np.zeros(len(z.values)*len(lats.values)*len(lons.values))*np.NaN\n refl_vals[inds.values] = refl\n refl_reshape = refl_vals.reshape((len(z.values), len(lats.values),\n len(lons.values)))\n # Initialize DataArray to concatenate\n da = xr.DataArray(refl_reshape, coords=[z, lats, lons])\n da.attrs['Analysis_Endtime'] = time\n da.name = \"Reflectivity\"\n # Concatenate new DataArray onto original\n dat = xr.concat([dat, da], dim='Hours')\n # Add the end datetime object as an attribute\n dat.attrs[\"Analysis_Endtime\"] = time\n return dat", "def dataIdentify(self, in_nc):\r\n data_nc = NET.Dataset(in_nc)\r\n time = data_nc.variables['time'][:]\r\n diff = NUM.unique(NUM.diff(time))\r\n data_nc.close()\r\n #time_interval_highres = NUM.array([1.0,3.0,6.0],dtype=float)\r\n #time_interval_lowres_full = NUM.array([3.0, 6.0],dtype=float)\r\n #time_interval_lowres = NUM.array([6.0],dtype=float)\r\n #time_interval_lowres_3Hr = NUM.array([3.0],dtype=float)\r\n\t\t\r\n time_interval_HRES1 = NUM.array([1.0],dtype=float) # Line Added/Modified CJB 20190108\r\n time_interval_HRES13 = NUM.array([1.0,3.0],dtype=float) # Line Added/Modified CJB 20190108\r\n time_interval_HRES136 = NUM.array([1.0,3.0,6.0],dtype=float) # Line Added/Modified CJB 20190108\r\n time_interval_ENS3 = NUM.array([3.0],dtype=float) # Line Added/Modified CJB 20190108\r\n time_interval_ENS36 = NUM.array([3.0,6.0],dtype=float) # Line Added/Modified CJB 20190108\r\n time_interval_ENS6 = NUM.array([6.0],dtype=float) # Line Added/Modified CJB 20190108\r\n\r\n\r\n #print \"SDR - diff:\", diff, time_interval_highres, time_interval_lowres_full, time_interval_lowres\r\n #if NUM.array_equal(diff, time_interval_highres):\r\n # return \"HighRes\"\r\n #elif NUM.array_equal(diff, time_interval_lowres_full):\r\n # return \"LowResFull\"\r\n #elif NUM.array_equal(diff, time_interval_lowres):\r\n # return \"LowRes\"\r\n #elif NUM.array_equal(diff, time_interval_lowres_3Hr):\r\n # return \"Low3HrRes\"\r\n #else:\r\n # return None\r\n\t\t\t\r\n if NUM.array_equal(diff, time_interval_HRES1): # Line Added/Modified CJB 20190108\r\n return \"HRES1\" # Line Added/Modified CJB 20190108\r\n elif NUM.array_equal(diff, time_interval_HRES13): # Line Added/Modified CJB 20190108\r\n return \"HRES13\" # Line Added/Modified CJB 20190108\r\n elif NUM.array_equal(diff, time_interval_HRES136): # Line Added/Modified CJB 20190108\r\n return \"HRES136\" # Line Added/Modified CJB 20190108\r\n elif NUM.array_equal(diff, time_interval_ENS3): # Line Added/Modified CJB 20190108\r\n return \"ENS3\" # Line Added/Modified CJB 20190108\r\n elif NUM.array_equal(diff, time_interval_ENS36): # Line Added/Modified CJB 20190108\r\n return \"ENS36\" # Line Added/Modified CJB 20190108\r\n elif NUM.array_equal(diff, time_interval_ENS6): # Line Added/Modified MJS, CJB 20190108\r\n return \"ENS6\" # Line Added/Modified CJB 20190108\r\n else: # Line Added/Modified CJB 20190108\r\n return None # Line Added/Modified CJB 20190108\r", "def compare_at_site(mr1='waroona_run2', mr2='waroona_run2uc', latlon = plotting._latlons_['AWS_wagerup']):\n print(\"TBD\")", "def compare_displacements(ds1,ds2):\n # Obteniendo los datos para BP\n t1 = ds1['t']\n t1 = t1[:n_im-1]\n t1 = mplt.dates.date2num(t1)\n d1 = ds1['d_t']\n # Obteniendo los datos para RMA\n t2 = ds2['t']\n t2 = t2[:n_im-1]\n t2 = mplt.dates.date2num(t2)\n d2 = ds2['d_t']\n\n # Graficando las 2 curvas juntas\n formatter = DateFormatter(\"%d/%m - %H:%M\")\n for i in range(len(d1)):\n # Hallando el valor promedio final x zona\n mean_bp = d1[i].mean()\n mean_rma = d2[i].mean()\n print(\"Valor promedio BP_zona\"+str(i)+\": \",mean_bp)\n print(\"Valor promedio RMA_zona\"+str(i)+\": \",mean_rma)\n print(\"\")\n # Graficando\n direction = 'desplazamientosPromedios_dset'+str(i_o)+'-'+str(i_o+n_im-1)+'_zona'+str(i)\n\n fig, ax= plt.subplots(figsize=(10,7))\n ax.plot_date(t1,d1[i],'b',marker='',markerfacecolor='b',markeredgecolor='b',label='Back Projection')\n ax.plot_date(t2,d2[i],'r',marker='',markerfacecolor='r',markeredgecolor='r',label='RMA')\n ax.set(xlabel='Tiempo',ylabel='Desplazamiento(mm)',title=\"Desplazamientos promedios\\n(Zona \"+str(i)+')')\n ax.xaxis.set_major_formatter(formatter)\n ax.xaxis.set_tick_params(rotation=20)\n #ax.set_xlim([R.min(),R.max()])\n ax.set_ylim([-c*1000*4/(4*fc),c*1000*4/(4*fc)])\n ax.grid(linestyle='dashed')\n ax.legend()\n plt.show()\n fig.savefig(os.getcwd()+\"/Results/Desplazamientos/\"+direction,orientation='landscape')\n\n return 'Ok'", "def compare_present_day_and_lgm_river_directions_with_catchments_virna_data_plus_tarasov_style_orog_corrs_for_both(self):\n ref_filename=os.path.join(self.plots_data_dir,\n \"flowmap_ten_minute_data_from_virna_0k_ALG4_sinkless_no_true_sinks_oceans\"\n \"_lsmask_plus_upscale_rdirs_tarasov_orog_corrs_20170422_195301_upscaled_updated.nc\")\n data_filename=os.path.join(self.plots_data_dir,\n \"flowmap_ten_minute_data_from_virna_lgm_ALG4_sinkless_no_true_sinks_oceans_lsmask\"\n \"_plus_upscale_rdirs_tarasov_orog_corrs_20170422_195436_upscaled_updated.nc\")\n lsmask_filename=os.path.join(self.plots_data_dir,\n \"ls_mask_ten_minute_data_from_virna_lgm_ALG4_sinkless_no_true_sinks_oceans_lsmask\"\n \"_plus_upscale_rdirs_tarasov_orog_corrs_20170422_195436_HD_transf.dat\")\n ref_catchment_filename=(\"catchmentmap_ten_minute_data_from_virna_0k_ALG4_sinkless_no_true_sinks_oceans_lsmask\"\n \"_plus_upscale_rdirs_tarasov_orog_corrs_20170422_195301_upscaled_updated.nc\")\n data_catchment_filename=(\"catchmentmap_ten_minute_data_from_virna_lgm_ALG4_sinkless_no_true_sinks_oceans_\"\n \"lsmask_plus_upscale_rdirs_tarasov_orog_corrs_20170422_195436_upscaled_updated.nc\")\n ref_rdirs_filename=(\"upscaled_rdirs_ten_minute_data_from_virna_0k_ALG4_sinkless_no_true_\"\n \"sinks_oceans_lsmask_plus_upscale_rdirs_tarasov_orog_corrs_20170422_195301_upscaled_\"\n \"updated_transf.dat\")\n reference_rmouth_outflows_filename=(self.plots_data_dir + \"/rmouthmap_ten_minute_\"\n \"data_from_virna_0k_ALG4_sinkless_no_true_sinks_oceans_lsmask_plus_upscale\"\n \"_rdirs_tarasov_orog_corrs_20170422_195301_upscaled_updated.nc\")\n data_rmouth_outflows_filename=(self.plots_data_dir + \"/rmouthmap_ten_minute_\"\n \"data_from_virna_lgm_ALG4_sinkless_no_true_sinks_oceans_lsmask_plus_upscale\"\n \"_rdirs_tarasov_orog_corrs_20170422_195436_upscaled_updated.nc\")\n glacier_mask_filename=os.path.join(self.orog_data_directory,\"ice5g_v1_2_21_0k_10min.nc\")\n self.FlowMapTwoColourComparisonWithCatchmentsHelper(ref_flowmap_filename=ref_filename,\n data_flowmap_filename=data_filename,\n ref_catchment_filename=\\\n ref_catchment_filename,\n data_catchment_filename=\\\n data_catchment_filename,\n ref_rdirs_filename=\\\n ref_rdirs_filename,\n data_rdirs_filename=None,\n reference_rmouth_outflows_filename=\\\n reference_rmouth_outflows_filename,\n data_rmouth_outflows_filename=\\\n data_rmouth_outflows_filename,\n lsmask_filename=lsmask_filename,\n minflowcutoff=100,\n flip_data=False,\n rotate_data=True,\n flip_ref=False,\n rotate_ref=True,\n lsmask_has_same_orientation_as_ref=False,\n invert_ls_mask=True,\n first_datasource_name=\"Present day\",\n second_datasource_name=\"LGM\",\n matching_parameter_set='extensive',\n catchment_and_outflows_mods_list_filename=\\\n \"catch_and_outflow_mods_lgm_vs_present_day.txt\",\n additional_matches_list_filename=\\\n \"additional_matches_10min_upscaled_lgm_vs_present.txt\",\n use_single_color_for_discrepancies=True,\n use_only_one_color_for_flowmap=False,\n use_title=False,remove_antartica=True,\n difference_in_catchment_label=\"Difference\",\n glacier_mask_filename=glacier_mask_filename,\n glacier_mask_grid_type='LatLong10min',\n flip_glacier_mask=True,\n rotate_glacier_mask=True,\n grid_type='HD')", "def find_missing_lsoas():\n\n df = pd.read_excel(os.path.join(census_data_fpath,\"london\",\"deprivation_london.xls\"),sheet_name=\"Sub domains\")\n lsoas_all = df[\"LSOA code (2011)\"].tolist()\n lsoas = dill.load(open(os.path.join(output_path_files,\"mobility\",\"antenna_lsoa_london_only.dill\"),\"rb\"))\n print len(set(lsoas.values()))\n temp = list(set(lsoas_all) - set(lsoas.values()))\n print 'total number of lsoas: {0}'.format(len(lsoas_all))\n print \"number of missing lsoas: {0} {1}\".format(len(temp), float(len(temp))/len(lsoas_all))", "def process_lidar(radial_file, scan_file, wind_file, site, period, netcdf_path):\n lidar = rasp.lidar_from_csv(radial_file, scan_file, wind=wind_file)\n # remove status==0 data (if we have the whole data)\n if 'Status' in lidar.data_vars:\n lidar['CNR'] = lidar['CNR'].where(lidar['Status'])\n lidar['DRWS'] = lidar['DRWS'].where(lidar['Status'])\n # remove unneeded variables if they exist\n to_drop = list(set(lidar.data_vars) & set(['Status', 'Error', 'Confidence', 'RWS']))\n lidar = lidar.drop(to_drop)\n lidar = lidar.rasp.cf_compliant()\n lidar.to_netcdf(netcdf_path)", "def getDataForLBMPvsLoadComparisons(self):\n\n\t\t# Variables\n\t\tlbmp_data = self.getDataForLBMPZonalComparison()[14] # Getting CAPITL zone\n\t\tload_data = self.getLoadData()\n\t\tfinal_data = []\n\t\tlbmp_dict = {}\n\t\tload_dict = {}\n\t\tload_values = []\n\t\tdates = []\n\t\tprice_values = []\n\n\t\t# Getting needed lbmp_data\n\t\tkey = \"LBMP ($/MWHr) in \" + lbmp_data['key']\n\t\tfor value in lbmp_data['values']:\n\t\t\tdates_and_prices = []\n\t\t\tdates_and_prices.append(value['x'])\n\t\t\tdates.append(value['x'])\n\t\t\tdates_and_prices.append(value['y'])\n\t\t\tprice_values.append(dates_and_prices)\n\t\tlbmp_dict['key'] = key\n\t\tlbmp_dict['values'] = price_values\n\t\tfinal_data.append(lbmp_dict)\n\t\tdata_dict = {}\n\n\t\t# Getting needed load data\n\t\tyesterday = self.helper.getYesterday()\n\t\tloads = load_data[yesterday[0]][int(yesterday[1])][int(yesterday[2])]\n\t\tfor i in range(0,len(loads)):\n\t\t\tdates_and_loads = []\n\t\t\tdates_and_loads.append(dates[i])\n\t\t\tdates_and_loads.append(loads[i])\n\t\t\tload_values.append(dates_and_loads)\n\t\tload_dict['key'] = lbmp_data['key'] + \" Area Loads\"\n\t\tload_dict['values'] = load_values\n\t\tload_dict['bar'] = 'True'\n\t\tfinal_data.append(load_dict)\n\t\treturn final_data", "def compare_plane_data(pd1, pd2):\n raise NotImplementedError", "def lidar_callback(self, data):\n try:\n lidar_data = list()\n lidar_degrees = list()\n overlay_lidar_data = list()\n overlay_lidar_degrees = list()\n\n for i in range(len(data.ranges)):\n degree = utils.rad2deg(data.angle_min + data.angle_increment * i)\n # Select the distance for the angle from min_lidar_angle to max_lidar_angle set as per the\n # lidar configuration of the model selected\n if degree >= self.lidar_configuration[constants.LidarConfigurationKeys.MIN_LIDAR_ANGLE] and \\\n degree <= self.lidar_configuration[constants.LidarConfigurationKeys.MAX_LIDAR_ANGLE]:\n # If the distance value if \"inf\", ignore and set it to be the max lidar dist\n if data.ranges[i] == float(\"inf\"):\n lidar_data.append(self.max_lidar_dist)\n else:\n # Select the distance values in the min to max range of distance\n # as per the lidar configuration of the model selected\n lidar_data.append(min(max(data.ranges[i],\n self.lidar_configuration[\n constants.\n LidarConfigurationKeys.\n MIN_LIDAR_DIST]),\n self.max_lidar_dist))\n lidar_degrees.append(degree)\n if data.ranges[i] == float(\"inf\"):\n overlay_lidar_data.append(self.lidar_overlay_configuration[\n constants.\n LidarOverlayConfigurationKeys.\n MAX_LIDAR_DIST])\n else:\n # Select the distance values in the min to max range of distance\n # as per the lidar configuration of the model selected\n overlay_lidar_data.append(min(max(data.ranges[i],\n self.lidar_overlay_configuration[\n constants.\n LidarOverlayConfigurationKeys.\n MIN_LIDAR_DIST]),\n self.lidar_overlay_configuration[\n constants.\n LidarOverlayConfigurationKeys.\n MAX_LIDAR_DIST]))\n overlay_lidar_degrees.append(degree)\n\n # interpolate the lidar data\n desired_lidar_degrees = np.linspace(self.lidar_configuration[\n constants.\n LidarConfigurationKeys.\n MIN_LIDAR_ANGLE],\n self.lidar_configuration[\n constants.\n LidarConfigurationKeys.\n MAX_LIDAR_ANGLE],\n num=self.lidar_configuration[\n constants.\n LidarConfigurationKeys.\n NUM_LIDAR_VALUES])\n\n # numpy array\n lidar_data_interp = np.interp(desired_lidar_degrees, lidar_degrees, lidar_data)\n\n self.lidar_buffer.put(self.lidar_preprocessing_obj.preprocess_data(lidar_data_interp).tolist())\n\n # interpolate lidar data for overlay\n desired_overlay_lidar_degrees = np.linspace(constants.\n DEFAULT_LIDAR_OVERLAY_CONFIGURATION[\n constants.\n LidarConfigurationKeys.\n MIN_LIDAR_ANGLE],\n constants.\n DEFAULT_LIDAR_OVERLAY_CONFIGURATION[\n constants.\n LidarConfigurationKeys.\n MAX_LIDAR_ANGLE],\n num=constants.\n DEFAULT_LIDAR_OVERLAY_CONFIGURATION[\n constants.\n LidarConfigurationKeys.\n NUM_LIDAR_VALUES])\n\n # numpy array\n overlay_lidar_data_interp = np.interp(desired_overlay_lidar_degrees,\n overlay_lidar_degrees,\n overlay_lidar_data)\n self.overlay_lidar_buffer.put(\n self.lidar_overlay_preprocessing_obj.preprocess_data(overlay_lidar_data_interp))\n except Exception as ex:\n self.get_logger().error(f\"Error in LiDAR callback: {ex}\")", "def compare(self, other, enforce_mask=False, enforce_grid=False,\n enforce_area=False, enforce_aream=False, enforce_all=False):\n eps_mask = 1.0e-6\n eps_grid = 1.0e-2\n eps_area = 1.0e-1\n\n # Do a global gather to create a non-distributed attribute vector\n debugPrint( \"self.lgrid:\\n\",self.lgrid )\n debugPrint( \"other.lgrid:\\n\",other.lgrid )\n gGrid1 = attributevector.AttributeVector(self.ifields, self.rfields, self.lsize())\n gGrid1.initv(self.lgrid, self.lgrid.lsize())\n gGrid1.gather(self.lgrid, self.gsMap, comm.world_pe0, comm.component_pid, comm.local_comm) \n gGrid2 = attributevector.AttributeVector(other.ifields, other.rfields, other.lsize())\n gGrid2.initv( other.lgrid, other.lgrid.lsize() )\n gGrid2.gather(other.lgrid, self.gsMap,comm.world_pe0, comm.component_pid, comm.local_comm)\n\n # From here on, everything is done by the root pe\n if( comm.component_pid != comm.world_pe0 ):\n return\n\n # Compare size of domain\n npts1 = gGrid1.lsize()\n npts2 = gGrid2.lsize()\n npts = npts1\n\n if ( npts1 == npts2 ):\n debugPrint( \"the domain size is \",npts )\n else:\n debugPrint( \"domain size #1 = \", npts1 )\n debugPrint( \"domain size #2 = \", npts2 )\n debugPrint( \"ERROR: domain size mis-match\" )\n # call shr_sys_abort(subName // \"ERROR: domain size mis-match\")\n # Exceptions?\n\n # If there was no problem, continue:\n # Compare Domain masks:\n debugPrint(\"gData1:\\n\",gGrid1)\n debugPrint(\"gData2:\\n\",gGrid2)\n data1,data1_size = gGrid1.exportRAttr(\"mask\")#rcode)?\n data2,data2_size = gGrid2.exportRAttr(\"mask\")#rcode)?\n \n ndiff = 0\n debugPrint( \"npts:\",npts )\n debugPrint( \"length of data1:\",data1_size )\n for n in xrange(0,npts-1):\n if ( (( (abs(data1[n])) > eps_mask ) and (abs(data1[n]) < eps_mask )) or \n ( (( abs(data1[n])) < eps_mask ) and (( abs(data1[n])) > eps_mask) ) ):\n ndiff = ndiff + 1\n\n # Enforce consistency: \n # Nested function declaration\n def enforce_consistency(msg,exception=None):\n if (enforce_mask or enforce_all):\n if (ndiff > 0):\n debugPrint( msg )\n # Raise Exception\n \n enforce_consistency(\"ERROR: incompatible domain masks\")\n \n # Compute Maximum Latitude and Longitude Differences\n mask = data1\n ndiff = 0\n data1,data1_size = gGrid1.exportRAttr(\"lat\")#,rcode))\n data2,data2_size = gGrid2.exportRAttr(\"lat\")#,rcode))\n diff = 0\n max_diff = 0.0\n for n in xrange(npts):\n if( abs( mask[n] ) > eps_mask ):\n diff = abs( data1[n] - data2[n] )\n max_diff = max(max_diff, diff)\n if( diff > eps_grid ):\n ndiff = ndiff + 1\n debugPrint( \"Maximum latitude difference = \",max_diff )\n\n data1,data1_size = gGrid1.exportRAttr(\"lon\")#,rcode))\n data2,data2_size = gGrid2.exportRAttr(\"lon\")#,rcode))\n max_diff = 0.0\n\n for n in xrange(npts):\n if( abs( mask[n] ) > eps_mask ):\n x1 = data1[n]\n x2 = data2[n]\n if( x1 > x2 ): #make sure x1 < x2\n # swap(x1,x2)\n x1 = data2[n]\n x2 = data1[n]\n while( (x1+360.0) < (x2+180.0) ):#longitude is periodic\n x1 = x1 + 360.0\n diff = abs( x2 - x1 )\n max_diff = max(max_diff,diff)\n \n if (diff > eps_grid):\n ndiff = ndiff + 1\n debugPrint( \"Maximum longitude difference = \",max_diff )\n\n enforce_consistency(\"ERROR: incompatible domain grid coordinates!\")\n\n # Compare Area:\n data1,data1_size = gGrid1.exportRAttr( \"area\" )#, rcode )\n data2,data2_size = gGrid2.exportRAttr( \"area\" )#, rcode )\n\n ndiff = 0\n max_diff = 0.0\n\n for n in xrange(npts):\n if( abs( mask[n] ) > eps_mask ):\n if( data2[n] != 0.0 ):\n diff = abs( (data2[n] - data1[n]) / data2[n] )\n max_diff = max(max_diff,diff)\n if( diff > eps_area ):\n ndiff = ndiff + 1\n debugPrint( \"Maxium relative error of area (model) = \", max_diff )\n\n enforce_consistency(\"ERROR: icompatible domain area(model)\")\n\n # Compare aream\n data1,data1_size = gGrid1.exportRAttr(\"aream\")#,rcode))\n data2,data2_size = gGrid2.exportRAttr(\"aream\")#,rcode))\n\n ndiff = 0\n max_diff = 0.0\n for n in xrange(npts):\n if ( abs( mask[n] ) > eps_mask ):\n if( data2[n] != 0.0 ):\n diff = abs((data2[n] - data1[n])/data2[n])\n max_diff = max(max_diff,diff)\n if( diff > eps_area ):\n ndiff = ndiff + 1\n debugPrint( \"maximum relative error of area(map) = \",max_diff )\n\n enforce_consistency(\"ERROR: incompatible domain area (map)\")\n\n # Clean up, we're finished!\n return", "def test_lat_not_loc_1(self):\n patient = Semiology('lat_not_loc', Laterality.LEFT, Laterality.LEFT)\n patient.data_frame = self.df\n lat_not_loc_all_combined_gifs = patient.query_lateralisation(\n one_map_dummy)\n\n # inspect result\n lat_not_loc_result, num_query_loc = patient.query_semiology()\n\n self.assertIs(type(lat_not_loc_all_combined_gifs), pd.DataFrame)\n assert not lat_not_loc_all_combined_gifs.empty\n\n # drop the zero entries as these are from the CL/IL zeros:\n lat_not_loc_all_combined_gifs = lat_not_loc_all_combined_gifs[['Gif Parcellations', 'pt #s']].astype(\n {'Gif Parcellations': 'int32', 'pt #s': 'int32'})\n lat_not_loc_all_combined_gifs.set_index(\n 'Gif Parcellations', inplace=True)\n lat_not_loc_gifsclean = lat_not_loc_all_combined_gifs.loc[\n lat_not_loc_all_combined_gifs['pt #s'] != 0, :]\n # now we know only the CL data remains in this dummy data, which is on the RIGHT.\n gifs_right, gifs_left = gifs_lat_factor()\n lat_not_loc_gifsclean_rights = (\n lat_not_loc_gifsclean.index.isin(gifs_right).all()\n )\n\n # inspect result assertions\n assert(lat_not_loc_result.Localising.sum() == 0)\n assert(lat_not_loc_result['Lateralising'].sum() == 1)\n\n # all_combined_gifs assertions\n assert((\n lat_not_loc_gifsclean_rights == True)\n )\n assert(\n (\n lat_not_loc_gifsclean.index.isin(gifs_left)).any() == False\n )\n assert (lat_not_loc_gifsclean['pt #s'].sum()\n == lat_not_loc_gifsclean.shape[0])\n\n # test MTG on right 155 gif # gives 1:\n heatmap, _ = patient.get_num_datapoints_dict(method='minmax')\n assert 156 not in heatmap # left\n assert heatmap[155] == 1 # right", "def compare_lgm_river_directions_with_catchments_virna_data_with_vs_without_tarasov_style_orog_corrs(self):\n tarasov_upscaled_data_datetime=\"20170518_193949\"\n ref_filename=os.path.join(self.plots_data_dir,\n \"flowmap_ten_minute_data_from_virna_lgm_ALG4_sinkless\"\n \"_no_true_sinks_oceans_lsmask_plus_upscale_rdirs_20170127\"\n \"_163957_upscaled_updated.nc\")\n data_filename=os.path.join(self.plots_data_dir,\n \"flowmap_ten_minute_data_from_virna_lgm_ALG4_sinkless_no_true_sinks_oceans_lsmask\"\n \"_plus_upscale_rdirs_tarasov_orog_corrs_{0}_upscaled_updated.nc\".\\\n format(tarasov_upscaled_data_datetime))\n lsmask_filename=os.path.join(self.plots_data_dir,\n \"ls_mask_ten_minute_data_from_virna_lgm_ALG4_sinkless_no_true_sinks_oceans_lsmask\"\n \"_plus_upscale_rdirs_tarasov_orog_corrs_{0}_HD_transf.dat\".\\\n format(tarasov_upscaled_data_datetime))\n ref_catchment_filename=(\"catchmentmap_ten_minute_data_from_virna_lgm_ALG4_sinkless_no_true_sinks_oceans_\"\n \"lsmask_plus_upscale_rdirs_20170127_163957_upscaled_updated.nc\")\n data_catchment_filename=(\"catchmentmap_ten_minute_data_from_virna_lgm_ALG4_sinkless_no_true_sinks_oceans_\"\n \"lsmask_plus_upscale_rdirs_tarasov_orog_corrs_{0}_upscaled_updated.nc\".\\\n format(tarasov_upscaled_data_datetime))\n ref_rdirs_filename=(\"upscaled_rdirs_ten_minute_data_from_virna_0k_ALG4_sinkless_no_true\"\n \"_sinks_oceans_lsmask_plus_upscale_rdirs_20170123_165707.nc\")\n reference_rmouth_outflows_filename=(self.plots_data_dir + \"/rmouthmap_ten_\"\n \"minute_data_from_virna_0k_ALG4_sinkless_no_true_sinks_oceans_lsmask_plus\"\n \"_upscale_rdirs_20170123_165707_upscaled_updated.nc\")\n data_rmouth_outflows_filename=(self.plots_data_dir + \"/rmouths/rmouthmap_ten_minute_\"\n \"data_from_virna_lgm_ALG4_sinkless_no_true_sinks_oceans_lsmask_plus_upscale\"\n \"_rdirs_tarasov_orog_corrs_{0}_upscaled_updated.nc\".\\\n format(tarasov_upscaled_data_datetime))\n glacier_mask_filename=os.path.join(self.orog_data_directory,\"ice5g_v1_2_21_0k_10min.nc\")\n self.FlowMapTwoColourComparisonWithCatchmentsHelper(ref_flowmap_filename=ref_filename,\n data_flowmap_filename=data_filename,\n ref_catchment_filename=\\\n ref_catchment_filename,\n data_catchment_filename=\\\n data_catchment_filename,\n ref_rdirs_filename=\\\n ref_rdirs_filename,\n data_rdirs_filename=None,\n reference_rmouth_outflows_filename=\\\n reference_rmouth_outflows_filename,\n data_rmouth_outflows_filename=\\\n data_rmouth_outflows_filename,\n lsmask_filename=lsmask_filename,\n minflowcutoff=100,\n flip_data=False,\n rotate_data=True,\n flip_ref=False,\n rotate_ref=True,\n lsmask_has_same_orientation_as_ref=False,\n invert_ls_mask=True,\n first_datasource_name=\"Present day\",\n second_datasource_name=\"LGM\",\n matching_parameter_set='extensive',\n catchment_and_outflows_mods_list_filename=\\\n \"catch_and_outflow_mods_lgm_vs_present_day.txt\",\n additional_matches_list_filename=\\\n \"additional_matches_10min_upscaled_lgm_vs_present.txt\",\n use_single_color_for_discrepancies=True,\n use_only_one_color_for_flowmap=False,\n use_title=False,remove_antartica=True,\n difference_in_catchment_label=\"Difference\",\n glacier_mask_filename=glacier_mask_filename,\n glacier_mask_grid_type='LatLong10min',\n flip_glacier_mask=True,\n rotate_glacier_mask=True,\n grid_type='HD')", "def evaluate_field_diff(smdf1, smdf2, fieldname, orbitnameone, orbitnametwo, vmin=-1, vmax=1, xaxis='Latitude', save_fig_directory=None):\n logging.debug(\"Evaluating difference between 2 dataframes for field '{}'...\".format(fieldname))\n logging.debug('The difference runs from 1 -> 2, ie. {} -> {}, 2 subtract 1'.format(orbitnameone, orbitnametwo))\n logging.debug('Dataset 1: {}'.format(orbitnameone))\n logging.debug('Dataset 2: {}'.format(orbitnametwo))\n\n # Exclude NaN records (reported as fieldname = -999.0)\n frame1 = smdf1[smdf1[fieldname] != -999.0]\n frame2 = smdf2[smdf2[fieldname] != -999.0]\n\n # Print record counts\n logging.debug('Dataset 1 contains {}/{} valid datarows'.format(len(frame1.index), len(smdf1)))\n logging.debug('Dataset 2 contains {}/{} valid datarows'.format(len(frame2.index), len(smdf2)))\n\n # Get records in common\n common = pd.merge(frame1, frame2, how='inner', on=['Days', 'Seconds', 'Microseconds', 'Grid_Point_ID'])\n common.rename(columns={'Latitude_x': 'Latitude', 'Longitude_x': 'Longitude'}, inplace=True)\n common.drop('Latitude_y', axis=1, inplace=True)\n common.drop('Longitude_y', axis=1, inplace=True)\n common[fieldname+'_Diff'] = common[fieldname+'_y'] - common[fieldname+'_x']\n common.reset_index(inplace=True)\n\n # Outer merge ready for getting new records\n outer = pd.merge(frame1, frame2, how='outer', on=['Days', 'Seconds', 'Microseconds', 'Grid_Point_ID'],\n indicator=True)\n # Get records in 1 but not 2\n leftonly = outer[outer['_merge'] == 'left_only'].copy()\n leftonly.rename(columns={'Latitude_x': 'Latitude', 'Longitude_x': 'Longitude', fieldname+'_x': fieldname},\n inplace=True)\n leftonly.drop('Latitude_y', axis=1, inplace=True)\n leftonly.drop('Longitude_y', axis=1, inplace=True)\n leftonly.drop(fieldname+'_y', axis=1, inplace=True)\n leftonly.drop('_merge', axis=1, inplace=True)\n\n # Get records in 2 but not 1\n rightonly = outer[outer['_merge'] == 'right_only'].copy()\n rightonly.rename(columns={'Latitude_x': 'Latitude', 'Longitude_x': 'Longitude', fieldname+'_y': fieldname},\n inplace=True)\n rightonly.drop('Latitude_y', axis=1, inplace=True)\n rightonly.drop('Longitude_y', axis=1, inplace=True)\n rightonly.drop(fieldname+'_x', axis=1, inplace=True)\n rightonly.drop('_merge', axis=1, inplace=True)\n\n non_zero_diff = common[common[fieldname+'_Diff'] != 0]\n\n logging.debug('Dataset analysis:')\n logging.debug('{} rows common to both datasets.'.format(len(common.index)))\n logging.debug('{}/{} common rows have non-zero differences.'.format(len(non_zero_diff.index), len(common.index)))\n logging.debug('{} rows in dataset 1 only.'.format(len(leftonly.index)))\n logging.debug('{} rows in dataset 2 only.'.format(len(rightonly.index)))\n\n # Get records in common that are same/diff\n\n # Make plots only if we have differences\n if non_zero_diff.empty:\n logging.debug('No differences to plot')\n else:\n # First plot (geographic map)\n plot_sm_difference(common, orbitnameone, orbitnametwo, fieldname=fieldname+'_Diff', vmin=vmin, vmax=vmax, save_fig_directory=save_fig_directory)\n\n #fig2, ax2 = plt.subplots(1)\n ## plot each difference against the index grid point id\n #common.plot(x=xaxis, y=fieldname+'_Diff', ax=ax2, legend=False, rot=90,\n # fontsize=8, clip_on=False, style='o')\n #ax2.set_ylabel(fieldname + ' Diff')\n #ax2.axhline(y=0, linestyle=':', linewidth='0.5', color='k')\n #fig2.tight_layout()\n\n # Second plot (scatter plot, diff by lat)\n # Plot only the ones with a non-zero difference\n fig3, ax3 = plt.subplots(1)\n plt.title('{} : ({}) subtract ({})'.format(fieldname.replace('_',' '), orbitnametwo, orbitnameone), wrap=True)\n non_zero_diff.plot(x=xaxis, y=fieldname+'_Diff', ax=ax3, legend=False,\n rot=90, fontsize=8, clip_on=False, style='o')\n ax3.axhline(y=0, linestyle=':', linewidth='0.5', color='k')\n ax3.set_ylabel(fieldname + ' Diff')\n fig3.tight_layout()\n\n if (save_fig_directory != None):\n # Requested to save the figure\n save_name = 'diff-scatter-({})-subtr-({})-field-({})-{}.png'.format(orbitnametwo, orbitnameone, fieldname.replace(' ', ''), datetime.now().strftime('%Y%m%d-%H%M%S'))\n logging.debug('Attempting to save figure with name \"{}\"'.format(save_name))\n plt.savefig(os.path.join(save_fig_directory, save_name))\n plt.close()\n else:\n plt.show()", "def test_latnotloc_and_latandloc_2(self):\n patient = Semiology('lat_', Laterality.LEFT, Laterality.LEFT)\n patient.data_frame = self.df\n lat_not_loc_all_combined_gifs = patient.query_lateralisation(\n one_map_dummy)\n\n # inspect result\n lat_not_loc_result, _ = patient.query_semiology()\n\n self.assertIs(type(lat_not_loc_all_combined_gifs), pd.DataFrame)\n assert not lat_not_loc_all_combined_gifs.empty\n\n # drop the zero entries - should be only the IL left ones which aren't MTG of TL:\n lat_not_loc_all_combined_gifs = lat_not_loc_all_combined_gifs[['Gif Parcellations', 'pt #s']].astype(\n {'Gif Parcellations': 'int32', 'pt #s': 'int32'})\n lat_not_loc_all_combined_gifs.set_index(\n 'Gif Parcellations', inplace=True)\n lat_not_loc_gifsclean = lat_not_loc_all_combined_gifs.loc[\n lat_not_loc_all_combined_gifs['pt #s'] != 0, :]\n\n gifs_right, gifs_left = gifs_lat_factor()\n lat_not_loc_gifsclean_rights = (\n lat_not_loc_gifsclean.drop(index=156).index.isin(gifs_right).all()\n )\n\n # inspect result assertions\n assert(lat_not_loc_result.Localising.sum() == 1)\n assert(lat_not_loc_result['Lateralising'].sum() == 2)\n\n # all_combined_gifs assertions\n # all except GIF 156 (L MTG) are in the right GIFs:\n assert((\n lat_not_loc_gifsclean_rights == True)\n )\n assert(\n (\n lat_not_loc_gifsclean.index.isin(gifs_left)).any() == True\n )\n # assert using shape as all pt #s are 1:\n assert (lat_not_loc_gifsclean['pt #s'].sum()\n == lat_not_loc_gifsclean.shape[0])\n\n # check that latnotloc gives 1 and latandloc adds zero to right MTG GIF #155\n heatmap, _ = patient.get_num_datapoints_dict(method='minmax')\n assert heatmap[155] == 1 # right", "def preprocess_data(self, lidar_data):\n lidar_data = np.min(lidar_data.reshape(-1, self.num_sectors), axis=1)\n return (lidar_data < self.max_lidar_dist).astype(float)", "def compare(self, *args):\n return _ida_hexrays.lvar_locator_t_compare(self, *args)", "def compare_present_day_river_directions_with_catchments_virna_data_with_vs_without_tarasov_style_orog_corrs(self):\n ref_filename=os.path.join(self.plots_data_dir,\n \"flowmap_ten_minute_data_from_virna_0k_ALG4_sinkless_no_true_sinks_oceans\"\n \"_lsmask_plus_upscale_rdirs_tarasov_orog_corrs_20170422_195301_upscaled_updated.nc\")\n data_filename=os.path.join(self.plots_data_dir,\n \"flowmap_ten_minute_data_from_virna_0k_ALG4_sinkless\"\n \"_no_true_sinks_oceans_lsmask_plus_upscale_rdirs_20170123\"\n \"_165707_upscaled_updated.nc\")\n lsmask_filename=os.path.join(self.plots_data_dir,\n \"ls_mask_ten_minute_data_from_virna_0k_ALG4_sinkless_no_true_sinks_oceans_lsmask\"\n \"_plus_upscale_rdirs_tarasov_orog_corrs_20170422_195301_HD_transf.nc\")\n ref_catchment_filename=(\"catchmentmap_ten_minute_data_from_virna_0k_ALG4_sinkless_no_true_sinks_oceans_lsmask\"\n \"_plus_upscale_rdirs_tarasov_orog_corrs_20170422_195301_upscaled_updated.nc\")\n data_catchment_filename=(\"catchmentmap_ten_minute_data_from_virna_0k_ALG4_sinkless\"\n \"_no_true_sinks_oceans_lsmask_plus_upscale_rdirs_20170123_165707_upscaled_updated.nc\")\n ref_rdirs_filename=(\"upscaled_rdirs_ten_minute_data_from_virna_0k_ALG4_sinkless_no_true_\"\n \"sinks_oceans_lsmask_plus_upscale_rdirs_tarasov_orog_corrs_20170422_195301_upscaled_\"\n \"updated_transf.dat\")\n reference_rmouth_outflows_filename=(self.plots_data_dir + \"/rmouthmap_ten_minute_\"\n \"data_from_virna_0k_ALG4_sinkless_no_true_sinks_oceans_lsmask_plus_upscale\"\n \"_rdirs_tarasov_orog_corrs_20170422_195301_upscaled_updated.nc\")\n data_rmouth_outflows_filename=(self.plots_data_dir + \"/rmouths/rmouthmap_ten_\"\n \"minute_data_from_virna_0k_ALG4_sinkless_no_true_sinks_oceans_lsmask_plus\"\n \"_upscale_rdirs_20170123_165707_upscaled_updated.nc\")\n glacier_mask_filename=os.path.join(self.orog_data_directory,\"ice5g_v1_2_21_0k_10min.nc\")\n self.FlowMapTwoColourComparisonWithCatchmentsHelper(ref_flowmap_filename=ref_filename,\n data_flowmap_filename=data_filename,\n ref_catchment_filename=\\\n ref_catchment_filename,\n data_catchment_filename=\\\n data_catchment_filename,\n ref_rdirs_filename=\\\n ref_rdirs_filename,\n data_rdirs_filename=None,\n reference_rmouth_outflows_filename=\\\n reference_rmouth_outflows_filename,\n data_rmouth_outflows_filename=\\\n data_rmouth_outflows_filename,\n lsmask_filename=lsmask_filename,\n minflowcutoff=100,\n flip_data=False,\n rotate_data=True,\n flip_ref=False,\n rotate_ref=True,\n lsmask_has_same_orientation_as_ref=False,\n invert_ls_mask=True,\n first_datasource_name=\"Present day\",\n second_datasource_name=\"LGM\",\n matching_parameter_set='extensive',\n catchment_and_outflows_mods_list_filename=\\\n \"catch_and_outflow_mods_lgm_vs_present_day.txt\",\n additional_matches_list_filename=\\\n \"additional_matches_10min_upscaled_lgm_vs_present.txt\",\n use_single_color_for_discrepancies=True,\n use_only_one_color_for_flowmap=False,\n use_title=False,remove_antartica=True,\n difference_in_catchment_label=\"Difference\",\n glacier_mask_filename=glacier_mask_filename,\n glacier_mask_grid_type='LatLong10min',\n flip_glacier_mask=True,\n rotate_glacier_mask=True,\n grid_type='HD')", "def load_nir_alcohol():\n\n # loading matlab data set\n raw_data = sio.loadmat(__data_set_path)\n\n # ----------------\n\n # getting all variable/features labels\n var_labels_all = raw_data['var_labels_all'].tolist()\n\n # getting spectra labels\n var_labels_spectra = [int(l) for l in var_labels_all[3:]]\n\n # getting properties labels\n var_labels_properties = var_labels_all[:3]\n\n # ----------------\n\n # getting all data (train-new-msc)\n data_all = raw_data['data_all']\n\n # getting spectra data\n data_spectra = data_all[:, 3:]\n\n # getting properties data\n data_properties = data_all[:, :3]\n\n # ----------------\n\n obj_labels_train = raw_data['obj_labels_train'].tolist()\n data_train = data_spectra[:27, :]\n other_cols_train = {p_name: p_data for p_name, p_data in zip(var_labels_properties, data_properties[:27, :].T)}\n\n ds_train = utils.build_data_set(data_train, obj_labels_train, var_labels_spectra, extra_cols=other_cols_train)\n\n # ----------------\n\n obj_labels_new = raw_data['obj_labels_new'].tolist()\n data_new = data_spectra[27:40, :]\n other_cols_new = {p_name: p_data for p_name, p_data in zip(var_labels_properties, data_properties[27:40, :].T)}\n\n ds_new = utils.build_data_set(data_new, obj_labels_new, var_labels_spectra, extra_cols=other_cols_new)\n\n # ----------------\n\n obj_labels_msc = raw_data['obj_labels_mscorrected'].tolist()\n data_msc = data_spectra[40:, :]\n other_cols_msc = {p_name: p_data for p_name, p_data in zip(var_labels_properties, data_properties[40:, :].T)}\n\n ds_msc = utils.build_data_set(data_msc, obj_labels_msc, var_labels_spectra, extra_cols=other_cols_msc)\n\n # ----------------\n\n # training/validation data sets and labels\n data_sets = [ds_train, ds_new, ds_msc]\n labels = ['train', 'new', 'msc']\n\n # actually building the joint data set\n ds = pd.concat(data_sets, keys=labels)\n\n # returning the final data set\n return ds", "def test_gleckler_index(self):\n\n # generate sample data\n # sample data\n tmp = np.zeros((5, 3, 1))\n tmp[:,0,0] = np.ones(5)*1.\n tmp[:,1,0] = np.ones(5)*2.\n tmp[:,2,0] = np.ones(5)*5.\n\n # The data is like ...\n #| 1 | 2 | 5 |\n #| 1 | 2 | 5 |\n #| 1 | 2 | 5 |\n #| 1 | 2 | 5 |\n #| 1 | 2 | 5 |\n\n x = self.D.copy()\n x._temporal_subsetting(0, 4)\n\n x.data = np.ma.array(tmp, mask=tmp!=tmp)\n x.std = np.ones(x.data.shape)\n x.time[0] = pl.datestr2num('2000-02-15')\n x.time[1] = pl.datestr2num('2000-03-15')\n x.time[2] = pl.datestr2num('2000-04-15')\n x.time[3] = pl.datestr2num('2000-05-15')\n x.time[4] = pl.datestr2num('2000-06-15')\n\n y = self.D.copy()\n y._temporal_subsetting(0, 4)\n tmp = np.ones(x.data.shape) # sample data 2\n y.data = np.ma.array(tmp, mask=tmp!=tmp)\n y.time[0] = pl.datestr2num('2000-02-15')\n y.time[1] = pl.datestr2num('2000-03-15')\n y.time[2] = pl.datestr2num('2000-04-15')\n y.time[3] = pl.datestr2num('2000-05-15')\n y.time[4] = pl.datestr2num('2000-06-15')\n\n # Case 1: same area weights\n # cell area\n tmp = np.ones((3, 1))\n x.cell_area = tmp*1.\n\n #| 1-1 | 2-1 | 5-1 |\n #| 1-1 | 2-1 | 5-1 |\n #| 1-1 | 2-1 | 5-1 |\n #| 1-1 | 2-1 | 5-1 |\n #| 1-1 | 2-1 | 5-1 |\n #===================\n #| 0 | 5 | 5*4**2=5*16. = 80 |\n #==> E2 = sqrt(85./(15.))\n D = GlecklerPlot()\n r = D.calc_index(x, y, 'a', 'b', time_weighting=False)\n\n wt = np.ones(5) / 5.\n ref = np.sqrt(((85./15.) * wt).sum())\n t = np.abs(1. - r / ref)\n self.assertLess(t, 0.000001) # relative error\n\n D = GlecklerPlot()\n r = D.calc_index(x, y, 'a', 'b')\n\n wt = np.asarray([29., 31., 30., 31., 30.])\n wt = wt / wt.sum()\n ref = np.sqrt(((85./15.) * wt).sum())\n t = np.abs(1. - r / ref)\n self.assertLess(t, 0.000001) # relative error\n\n\n\n # Case 2: Different area weights\n # cell area\n tmp = np.ones((3, 1))\n tmp[1, 0] = 2.\n x.cell_area = tmp*1.\n\n #| 1-1=0 | 2-1=1 | 5-1=16 |\n #| 1-1=0 | 2-1=1 | 5-1=16 |\n #| 1-1=0 | 2-1=1 | 5-1=16 |\n #| 1-1=0 | 2-1=1 | 5-1=16 |\n #| 1-1=0 | 2-1=1 | 5-1=16 |\n #--------------------------\n # w = 0.25 w = 0.5 w=0.25|\n #--------------------------\n\n # 0.25*0 + 0.5 * 1 + 0.25 * 16 = 0 + 0.5 + 4 = 4.5\n # the mean of that is 4.5 for each timestep\n # mean because the overall weights are calculated as such that\n # they give a total weight if 1\n\n # diagnostic\n D = GlecklerPlot()\n r = D.calc_index(x, y, 'a', 'b', time_weighting=False)\n\n wt = np.ones(5) / 5.\n ref = np.sqrt((4.5 * wt).sum())\n t = np.abs(1. - r / ref)\n self.assertLess(t, 0.000001) # relative error\n\n wt = np.asarray([29., 31., 30., 31., 30.])\n wt = wt / wt.sum()\n ref = np.sqrt((4.5 * wt).sum())\n t = np.abs(1. - r / ref)\n self.assertLess(t, 0.000001) # relative error\n\n # Case 3: use different std\n x.std = np.ones(x.data.shape)\n x.std[:, 2, 0] = 0.5\n\n #| 1-1=0 | 2-1=1 | 5-1=16 / 0.5 |\n #| 1-1=0 | 2-1=1 | 5-1=16 / 0.5 |\n #| 1-1=0 | 2-1=1 | 5-1=16 / 0.5 |\n #| 1-1=0 | 2-1=1 | 5-1=16 / 0.5 |\n #| 1-1=0 | 2-1=1 | 5-1=16 / 0.5 |\n #--------------------------------\n # w = 0.25 w = 0.5 w=0.25|\n # 0 + 0.5 + 0.25*32 = 0.5 + 8 = 8.5\n\n D = GlecklerPlot()\n r = D.calc_index(x, y, 'a', 'b', time_weighting=False)\n\n wt = np.ones(5) / 5.\n ref = np.sqrt((8.5 * wt).sum())\n t = np.abs(1. - r / ref)\n self.assertLess(t, 0.000001) # relative error\n\n wt = np.asarray([29., 31., 30., 31., 30.])\n wt = wt / wt.sum()\n ref = np.sqrt((8.5 * wt).sum())\n t = np.abs(1. - r / ref)\n self.assertLess(t, 0.000001) # relative error", "def compare_present_day_and_lgm_river_directions_with_catchments_ICE6G_plus_tarasov_style_orog_corrs_for_both(self):\n present_day_data_datetime = \"20170612_202721\"\n lgm_data_datetime = \"20170612_202559\"\n ref_filename=os.path.join(self.plots_data_dir,\n \"flowmap_ICE6g_0k_ALG4_sinkless_no_true_sinks_oceans_lsmask_plus_upscale_rdirs_tarasov\"\n \"_orog_corrs_{0}_upscaled_updated.nc\".\\\n format(present_day_data_datetime))\n data_filename=os.path.join(self.plots_data_dir,\n \"flowmap_ICE6g_lgm_ALG4_sinkless_no_true_sinks_oceans_lsmask_plus_upscale_rdirs_\"\n \"tarasov_orog_corrs_{0}_upscaled_updated.nc\".\\\n format(lgm_data_datetime))\n lsmask_filename=os.path.join(self.plots_data_dir,\n \"ls_mask_ICE6g_lgm_ALG4_sinkless_no_true_sinks_oceans_lsmask_plus_upscale_rdirs\"\n \"_tarasov_orog_corrs_{0}_HD_transf.nc\".\\\n format(lgm_data_datetime))\n extra_lsmask_filename=os.path.join(self.plots_data_dir,\n \"ls_mask_ICE6g_0k_ALG4_sinkless_no_true_sinks_oceans_lsmask_plus_upscale\"\n \"_rdirs_tarasov_orog_corrs_{0}_HD_transf.nc\".\\\n format(present_day_data_datetime))\n ref_catchment_filename=(\"catchmentmap_ICE6g_0k_ALG4_sinkless_no_true_sinks_oceans_lsmask_plus_upscale_rdirs_\"\n \"tarasov_orog_corrs_{0}_upscaled_updated.nc\".\\\n format(present_day_data_datetime))\n data_catchment_filename=(\"catchmentmap_ICE6g_lgm_ALG4_sinkless_no_true_sinks_oceans_lsmask_plus_upscale_rdirs\"\n \"_tarasov_orog_corrs_{0}_upscaled_updated.nc\".\\\n format(lgm_data_datetime))\n ref_rdirs_filename=(\"upscaled_rdirs_ICE6g_0k_ALG4_sinkless_no_true_sinks_oceans_lsmask_\"\n \"plus_upscale_rdirs_tarasov_orog_corrs_{0}_upscaled_\"\n \"updated.nc\".format(present_day_data_datetime))\n reference_rmouth_outflows_filename=os.path.join(self.plots_data_dir,\n \"rmouthflows_ICE6g_0k_ALG4_sinkless_no_true_sinks_oceans_\"\n \"lsmask_plus_upscale_rdirs_tarasov_orog_corrs\"\n \"_{0}_upscaled_updated.nc\".\\\n format(present_day_data_datetime))\n data_rmouth_outflows_filename=os.path.join(self.plots_data_dir,\n \"rmouthflows_ICE6g_lgm_ALG4_sinkless_no_true_sinks_oceans_\"\n \"lsmask_plus_upscale_rdirs_tarasov_orog_corrs_\"\n \"{0}_upscaled_updated.nc\".\\\n format(lgm_data_datetime))\n glacier_mask_filename=os.path.join(self.orog_data_directory,\"Ice6g_c_VM5a_10min_21k.nc\")\n self.FlowMapTwoColourComparisonWithCatchmentsHelper(ref_flowmap_filename=ref_filename,\n data_flowmap_filename=data_filename,\n ref_catchment_filename=\\\n ref_catchment_filename,\n data_catchment_filename=\\\n data_catchment_filename,\n ref_rdirs_filename=\\\n ref_rdirs_filename,\n data_rdirs_filename=None,\n reference_rmouth_outflows_filename=\\\n reference_rmouth_outflows_filename,\n data_rmouth_outflows_filename=\\\n data_rmouth_outflows_filename,\n lsmask_filename=lsmask_filename,\n minflowcutoff=75,\n flip_data=False,\n rotate_data=True,\n flip_ref=False,\n rotate_ref=True,\n lsmask_has_same_orientation_as_ref=False,\n invert_ls_mask=True,\n first_datasource_name=\"Present day\",\n second_datasource_name=\"LGM\",\n matching_parameter_set='extensive',\n catchment_and_outflows_mods_list_filename=\\\n \"ice6g_catch_and_outflow_mods_lgm_vs_present_day.txt\",\n additional_matches_list_filename=\\\n \"ice6g_additional_matches_10min_upscaled_lgm_vs_present.txt\",\n use_single_color_for_discrepancies=True,\n use_only_one_color_for_flowmap=False,\n use_title=False,remove_antartica=True,\n difference_in_catchment_label=\"Difference\",\n rivers_to_plot=[(216,433),(117,424),(112,380),(146,327),\n (132,496),(120,176),(251,638),(115,603),\n (33,571),(34,571),(36,660),(181,256),\n (120,457),(77,365),(258,235),(167,361),\n (219,598)],\n rivers_to_plot_alt_color=[(237,393),(192,384),(169,371),\n (119,399),(72,640),(126,165),\n (87,112),(88,419),(160,237),\n (60,35),(147,552),(245,635),\n (86,460),(33,603),\n (247,243),(41,682),(185,276),\n (147,522),(244,612)],\n rivers_to_plot_secondary_alt_color=[(230,427),(170,376),\n (180,446),(143,327),\n (201,287),(136,538),\n (100,467),(116,130),\n (160,572),(32,614),\n (50,712),(210,619),\n (179,445),(212,384),\n (261,230),(85,438)],\n glacier_mask_filename=glacier_mask_filename,\n extra_lsmask_filename=extra_lsmask_filename,\n glacier_mask_grid_type='LatLong10min',\n flip_glacier_mask=True,\n rotate_glacier_mask=True,\n grid_type='HD')", "def dop_comp(field) :\n dop = fits.open(field+'/'+field+'_rv.fits')\n r13 = apload.ApLoad(apred='r13')\n old = r13.apField(field)\n\n i1,i2 = match.match(dop[1].data['APOGEE_ID'],old[1].data['APOGEE_ID'])\n print(len(dop[1].data),len(old[1].data),len(i1))\n\n fig,ax=plots.multi(1,1)\n plots.plotc(ax,dop[1].data['RV_TEFF'][i1],dop[1].data['VHELIO_AVG'][i1]-old[1].data['VHELIO_AVG'][i2],dop[1].data['VSCATTER'][i1])\n\n j=np.argsort(np.abs(dop[1].data['VHELIO_AVG'][i1]-old[1].data['VHELIO_AVG'][i2],dop[1].data['VSCATTER'][i1]))\n\n plots._data = dop[1].data\n plots._id_cols=['APOGEE_ID']\n plots.event(fig)\n key=' '\n sf,sax=plots.multi(1,2,sharex=True,hspace=0.001)\n while key != 'e' :\n x,y,key,index = plots.mark(fig,index=True)\n obj = dop[1].data['APOGEE_ID'][i1[index]]\n #jv = np.where(dop[2].data['APOGEE_ID'] == dop[1].data['APOGEE_ID'][i1])[0]\n out=pickle.load(open(field+'/'+obj+'_out.pkl','rb'))\n print(obj,old[1].data['APOGEE_ID'][i2[index]])\n print(out[0])\n sax[0].cla()\n spec=old[2].data['SPEC'][i2[index]]\n plots.plotl(sax[0],old[3].data['WAVE'][0,:],spec/convolve(spec,np.ones(500)/500,mode='same'),xr=[15000,17000],yr=[0.5,1.5])\n for mod,obs in zip(out[2],out[3]) :\n sax[1].cla()\n for chip in range(3) :\n plots.plotl(sax[1],obs.wave[:,chip],obs.flux[:,chip],color='k',yr=[0.5,1.5])\n gd = np.where(obs.mask[:,chip] == False)[0]\n plots.plotl(sax[1],obs.wave[gd,chip],obs.flux[gd,chip],color='g')\n plots.plotl(sax[1],mod.wave[:,chip],mod.flux[:,chip],color='r')\n plt.draw()\n input('hit a key: ')", "def read_lads():\n lad_shapes = os.path.join(\n DATA_RAW, 'shapes', 'lad_uk_2016-12.shp'\n )\n\n with fiona.open(lad_shapes, 'r') as lad_shape:\n return [lad for lad in lad_shape if\n not lad['properties']['name'].startswith((\n 'E06000053',\n 'S12000027',\n 'N09000001',\n 'N09000002',\n 'N09000003',\n 'N09000004',\n 'N09000005',\n 'N09000006',\n 'N09000007',\n 'N09000008',\n 'N09000009',\n 'N09000010',\n 'N09000011',\n ))]", "def test_lfc_and_el_below_lcl():\n dewpoint = [264.5351, 261.13443, 259.0122, 252.30063, 248.58017, 242.66582] * units.kelvin\n temperature = [273.09723, 268.40173, 263.56207, 260.257, 256.63538,\n 252.91345] * units.kelvin\n pressure = [1017.16, 950, 900, 850, 800, 750] * units.hPa\n el_pressure, el_temperature = el(pressure, temperature, dewpoint)\n lfc_pressure, lfc_temperature = lfc(pressure, temperature, dewpoint)\n assert_nan(lfc_pressure, pressure.units)\n assert_nan(lfc_temperature, temperature.units)\n assert_nan(el_pressure, pressure.units)\n assert_nan(el_temperature, temperature.units)", "def check_latlon(self):\n\n for station in list(self.station_list.values()):\n station_def = self.station_definitions[station.name]\n lat = float(station.get_obs('LAT')[0])\n lon = float(station.get_obs('LON')[0])\n lat_diff = abs(lat - station_def['lat'])\n lon_diff = abs(lon - station_def['lon'])\n if lat_diff > .1:\n qc_error.all_qc_errors.append(\n qce(\n station_name=station.name,\n error_code=9000,\n old_data_value=lat,\n explanation=\"lats are different for: \" + station.name +\n \". Old value : \" + str(station_def['lat'])\n ))\n if lon_diff > .1:\n qc_error.all_qc_errors.append(\n qce(\n station_name=station.name,\n error_code=9000,\n old_data_value=lon,\n explanation=\"lons are different for: \" + station.name +\n \". Old value : \" + str(station_def['lon'])\n ))" ]
[ "0.5914553", "0.58950686", "0.58319104", "0.5644557", "0.56231236", "0.56231236", "0.55532014", "0.55138475", "0.55106986", "0.551042", "0.54737157", "0.5461819", "0.54479355", "0.53951114", "0.5363747", "0.5324874", "0.53132623", "0.5280498", "0.5247656", "0.52454746", "0.5236143", "0.5196184", "0.5125795", "0.5079289", "0.5061536", "0.5058586", "0.50571626", "0.504627", "0.5040402", "0.50251716" ]
0.5927307
0
Store main graph and all topic graphs into collection.
def store(self, graphs, start_date, end_date): documents = [{'topic_id': key, 'graph': graph, 'start_date': start_date, 'end_date': end_date} for key, graph in graphs.items()] self.collection.insert_many(documents)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _initilise_graph_db(self):\n for collector in self.collectors:\n collector.init_graph_db()", "def populate_graph(self):", "def _store(self, cuds_object):\n assert cuds_object.session == self\n self._registry.put(cuds_object)\n for t in cuds_object._graph:\n self.graph.add(t)\n cuds_object._graph = self.graph\n if self.root is None:\n self.root = cuds_object.uid", "def populate_graph(self):\n if self.edges and self.vertices:\n graph = Graph()\n for edge in self.edges:\n graph.add_edge(edge)\n self.graph = graph\n else:\n print(\"Populate edges & vertices first, then populate graph!\")", "def store_edgelists(self, train_path, test_path):\n stt.store_edgelists(train_path, test_path, self.train_edges, self.test_edges)", "def build(self):\n self.logger.info('Rebuilding adjacency information')\n self.edges = collections.defaultdict(list)\n\n topic_to_publisher = collections.defaultdict(list)\n topic_to_subscribers = collections.defaultdict(list)\n node_to_missing_deps = collections.defaultdict(list)\n\n result = True\n\n for node in self.nodes.values():\n for topic in node.provided_topics.keys():\n topic_to_publisher[topic].append(node)\n\n for topic in node.required_topics:\n topic_to_subscribers[topic].append(node)\n\n for dep in node.additional_dependencies:\n if dep not in self.nodes:\n node_to_missing_deps[node].append(dep)\n\n if len(node_to_missing_deps) > 0:\n result = False\n msg = io.StringIO()\n print('Found [{}] managed processes with missing dependencies'.format(len(node_to_missing_deps)), file=msg)\n fmt = ' Managed process [{}] is missing [{}]'\n\n for (node, missing) in node_to_missing_deps.items():\n print(fmt.format(node.name, ', '.join(missing)), file=msg)\n self.logger.error(msg.getvalue())\n\n missing_publishers = []\n for topic in topic_to_subscribers.keys():\n if topic not in topic_to_publisher:\n missing_publishers.append(topic)\n\n if len(missing_publishers) > 0:\n result = False\n msg = io.StringIO()\n print('Found [{}] topics that do not have publishers'.format(len(missing_publishers)), file=msg)\n fmt = ' Topic [{}] with subscribers [{}]'\n\n for topic in missing_publishers:\n print(fmt.format(topic, ', '.join([x.name for x in topic_to_subscribers[topic]])), file=msg)\n self.logger.error(msg.getvalue())\n\n if not result:\n self.logger.error('Found errors when building adjacency information')\n raise GraphBuildError(\n 'Found errors when building adjacency information / graph edges. Check log for details')\n\n # Now we have enough information to build our edges. Phase 1: pub/sub stuff\n for (topic, subscribers) in topic_to_subscribers.items():\n publishers = topic_to_publisher[topic]\n\n for p in publishers:\n for s in subscribers:\n self.edges[p].append(s)\n\n # Phase 2: additional dependencies\n for node in self.nodes.values():\n for dep in node.additional_dependencies:\n src = self.nodes[dep]\n self.edges[src].append(node)", "def initGraphs(self):\n \n self.graph = ConjunctiveGraph()\n # Create a separate graph for annotations\n self.annotationGraph = ConjunctiveGraph()\n \n self.log.debug('Adding namespaces to graphs')\n # Bind namespaces to graphs\n for namespace in self.namespaces:\n self.graph.namespace_manager.bind(namespace, self.namespaces[namespace])\n\n # Same for annotation graph\n for namespace in self.annotationNamespaces:\n self.annotationGraph.namespace_manager.bind(namespace, self.annotationNamespaces[namespace])\n \n # Add schema information\n self.log.debug('Adding some schema information (dimension and measure properties) ')\n self.addDataCellProperty()\n\n # Add dimensions \n self.graph.add((self.namespaces['tablink']['dimension'], RDF.type, self.namespaces['qb']['DimensionProperty']))\n \n #self.graph.add((self.namespaces['tablink']['label'], RDF.type, RDF['Property']))", "def store(self):\n articles = []\n for entry in self.feed():\n key = self.datastore_client.key(self.DATASTORE_KIND, entry['id'])\n article = datastore.Entity(key=key)\n article.update(entry)\n articles.append(article)\n self.datastore_client.put_multi(articles)", "def publish(self):\n # Create a public collection with the same uuid and same fields\n public_collection = Collection.get_collection(self.session, self.id, CollectionVisibility.PUBLIC)\n if public_collection:\n public_collection.update(\n **self.to_dict(remove_attr=(\"update_at\", \"created_at\", \"visibility\", \"id\"), remove_relationships=True)\n )\n else:\n public_collection = Collection(\n clone(self.db_object, primary_key=dict(id=self.id, visibility=CollectionVisibility.PUBLIC))\n )\n self.session.add(public_collection)\n\n # Copy over relationships\n for link in self.links:\n link.collection_visibility = CollectionVisibility.PUBLIC\n for dataset in self.datasets:\n if dataset.original_id:\n \"skip modified datasets\"\n continue # TODO: expand to support tombstone and refresh corpora-data-portal/1177\n else:\n dataset.collection_visibility = CollectionVisibility.PUBLIC\n dataset.published = True\n self.session.commit()\n self.delete()\n self.db_object = public_collection.db_object", "def __saveEdges(self, edges):", "def create_wiki_graph(self):\n\n print 'Creating wiki corpus graph representation'\n\n for path, subdirs, files in os.walk(self.wk_path):\n\n here = os.path.split(path)[1]\n parent = os.path.split(os.path.split(path)[0])[1]\n\n self.categories.add_edge(parent, here)\n\n self.categories[parent][\"path\"] = path\n self.categories[here][\"path\"] = path\n\n for name in files:\n if fnmatch(name, \"*.yaml\") and \"Index\" not in name and \"index\" not in name: # check if there is a text file\n \n category_name = name[0:-5]\n yaml_file_path = os.path.join(\n path, category_name + \".yaml\")\n\n # yaml\n yaml_file = open(yaml_file_path, \"r\")\n docs = yaml.load_all(yaml_file)\n\n # category_name\n for doc in docs:\n cat_parent = doc[\"CategoryPath\"][0]\n\n self.categories.add_edge(\n slugify(cat_parent), slugify(category_name))\n self.categories[slugify(cat_parent)][\"path\"] = path\n self.categories[slugify(category_name)][\"path\"] = path\n\n for cat in doc[\"Categories\"][0][self.language]:\n self.categories.add_edge(\n slugify(category_name), slugify(cat))\n self.categories[slugify(cat)][\"path\"] = path\n\n print(\"The categories graph %s has %d nodes with %d edges\"\n % (self.categories.name,\n nx.number_of_nodes(self.categories),\n nx.number_of_edges(self.categories)))\n for node in nx.nodes(self.categories):\n self.get_corpus_from_node(node)\n\n pickle.dump(self.categories, open(self.graph_path, 'w'))\n\n print \"Graph saved as %s\"%(self.graph_path)", "def _setup_graph(self):\n pass", "def _setup_graph(self):\n pass", "def store(self) -> None:\n # Store the centroids\n if self._centroids != {}:\n with open(self._path_model / f\"{self}\", 'w') as file:\n json.dump({k: v.tolist() for k, v in self._centroids.items()}, file, sort_keys=True)\n else:\n print(\"No centroids created yet to store!\")\n \n # Store the (validation) clusters\n with open(self._path_data / f\"{self}-train\", 'w') as file:\n json.dump(self._clusters, file, indent=2, sort_keys=True)\n with open(self._path_data / f\"{self}-val\", 'w') as file:\n json.dump(self._clusters_val, file, indent=2, sort_keys=True)", "def prepare_to_store(self, storage):\n if self._referent:\n self._referent.store_refs(storage)", "def __init__(self):\r\n self.vertices = col.defaultdict()\r\n self.edges = col.defaultdict(list)", "def build_inference_graph(self):\n self.build_train_graph()", "def _store_graph(self, graph, name=None):\n if graph == self:\n raise ValueError(\"Cannot add graph into itself\")\n if name is None:\n root = self\n else:\n # When a name is specified, create or merge into a new subgraph\n try:\n root = self._items[name]\n if isinstance(root, NodeDef):\n raise ValueError(\"Graph already contains node by name %s\" % name)\n except KeyError:\n root = SubGraph(self._prefix + (name,), self._downstream, self._upstream, self._root)\n self._items[name] = root\n\n # Function for recursively copying over the source graph structure to the target\n def _copy_structure(source_graph, target_graph, prefix):\n for key, value in source_graph._items.items():\n if isinstance(value, NodeDef):\n target_graph._store_node(value)\n else:\n new_prefix = prefix + (key,)\n\n try:\n new_graph = target_graph._items[key]\n except KeyError:\n target_graph._items[key] = new_graph = SubGraph(new_prefix,\n self._downstream,\n self._upstream,\n self._root)\n\n _copy_structure(value, new_graph, new_prefix)\n\n _copy_structure(graph, root, root._prefix)\n\n def _copy_edges(source_edges, target_edges):\n for source, targets in source_edges.items():\n target_edge = target_edges[root._prefix + source]\n\n # Filter out edge definitions already present.\n # This needs to be done in two passes because there can be deliberately duplicate edge definitions\n # for nodes.\n rebased_targets = []\n for target in targets:\n edge = EdgeDef(root._prefix + target.node, target.param)\n if edge not in target_edge:\n rebased_targets.append(edge)\n\n target_edge.extend(rebased_targets)\n\n _copy_edges(graph._downstream, self._downstream)\n _copy_edges(graph._upstream, self._upstream)", "def __init__(self):\n self._graph = DirectedGraph()\n self._graph_copies = []", "def __init__(self):\n self.data_graph = self._initialise_data()\n\n self.messages_sent = []\n self.messages_received = []\n\n self.answered_true = set()\n self.implied_true = set()\n\n self.current_subgraph = set()", "def update(self):\r\n self.g = self.create_graph()", "def complete_graph(self):\n root_nodes = set()\n\n for name, a_block in self.wf['action'].items():\n\n a_block['name'] = name\n\n for n in a_block.get('needs', []):\n if not self.wf['action'][n].get('next', None):\n self.wf['action'][n]['next'] = set()\n self.wf['action'][n]['next'].add(name)\n\n if not a_block.get('needs', None):\n root_nodes.add(name)\n\n self.wf['root'] = root_nodes", "def generate(self):\n self.graph_repl = self.master.graph_repl", "def store_graph(self, output_file='links.json'):\n\n links = {}\n for word in self.words:\n links[word] = []\n word_ind = self.index[word]\n for i in self.graph[word_ind]:\n links[word].append(self.words[i])\n with open(output_file, 'w') as f_out:\n json.dump(links, f_out, indent=4)", "def flush_graph(self) -> Any:\n raise NotImplementedError", "def finalize_graph(self) -> None:\n digraph = nx.MultiDiGraph()\n\n for node in self.graph.iternodes():\n attributes = self.get_attributes(node)\n attributes[\"schema\"] = node.type.name\n if node.caption is not None:\n attributes[\"label\"] = node.caption\n if node.is_entity and node.schema is not None:\n attributes[\"schema\"] = node.schema.name\n digraph.add_node(node.id, **attributes)\n\n for edge in self.graph.iteredges():\n attributes = self.get_attributes(edge)\n attributes[\"schema\"] = edge.type_name\n attributes[\"weight\"] = str(edge.weight)\n digraph.add_edge(edge.source_id, edge.target_id, key=edge.id, **attributes)\n\n for line in generate_gexf(digraph, prettyprint=True):\n self.fh.write(line)\n self.fh.write(\"\\n\")", "def clean_local_memory(self):\n self.namespaces = self._rdf_builder.namespaces\n self.dataset = self._rdf_builder.dataset\n\n self.ontology_graph = self._rdf_builder.ontology_graph\n self.instance_graph = self._rdf_builder.instance_graph\n self.claim_graph = self._rdf_builder.claim_graph\n self.perspective_graph = self._rdf_builder.perspective_graph\n self.interaction_graph = self._rdf_builder.interaction_graph", "def __init__(self):\n\t\tself.edges = defaultdict(list)\n\t\tself.weights = {}\n\t\tself.connections = {}", "def build_graph(self):\n\t\tself._create_placeholders()\n\t\tself._create_embedding()\n\t\tself._create_recurrent_layers()\n\t\tself._create_de_embedding()\n\t\tself._create_loss()\n\t\tself._create_optimizer()\n\t\tself._create_summaries()", "def _store(self, item, name=None):\n if isinstance(item, SubGraph):\n raise ValueError(\"Merging sub-graphs is not supported\")\n elif isinstance(item, Graph):\n self._store_graph(item, name=name)\n else:\n self._store_node(item, name=name)" ]
[ "0.6284722", "0.60069364", "0.59175754", "0.574157", "0.5713566", "0.56346995", "0.56080204", "0.55680335", "0.556138", "0.550367", "0.543818", "0.5379034", "0.5379034", "0.53369987", "0.5320596", "0.5315405", "0.5294312", "0.5285918", "0.52664405", "0.52534753", "0.52492744", "0.52464366", "0.52420855", "0.52337444", "0.52218354", "0.51978624", "0.51859087", "0.5179569", "0.5167488", "0.5140959" ]
0.63743895
0
Check if the local toon is allowed to enter.
def allowedToEnter(self): if base.cr.isPaid(): return True place = base.cr.playGame.getPlace() myHoodId = ZoneUtil.getCanonicalHoodId(place.zoneId) if myHoodId in \ (ToontownGlobals.ToontownCentral, ToontownGlobals.MyEstate, ToontownGlobals.GoofySpeedway, ): # trialer going to TTC/Estate/Goofy Speedway, let them through return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_telescope_on_allowed(self):\n handler = self.get_command_object(\"TelescopeOn\")\n return handler.check_allowed()", "def __TipIsForbiddenToEnter(self, vial, tipId = None):\n if tipId == None:\n tipId = self.__m_Platform.CurrentTipID()[1]\n if vial.getLabel() in self.__forbiddenTipList.keys():\n result = tipId in self.__forbiddenTipList[vial.getLabel()]\n else:\n result = False\n return result", "async def control_checks(self, ctx):\n server_id = ctx.message.server.id\n requester = ctx.message.author\n #silently drop if not in voice\n if not self.in_voice(server_id):\n return False\n #refuse if user not in the same channel\n if not self.user_in_channel(server_id, requester):\n vcname = self.get_server_dict(server_id)['voice'].channel.name\n await ctx.bot.send_message(ctx.message.channel, \"You can't control me outside of {}.\".format(vcname))\n return False\n return True", "async def __local_check(self, ctx):\n if not isinstance(ctx.channel, discord.TextChannel):\n raise InvalidChannelCheck(ctx.command)\n me = ctx.me.guild_permissions\n perms = (me.manage_messages, me.manage_nicknames, me.ban_members, me.kick_members)\n if not all(perms):\n raise BotPermissionsCheck(ctx.command)\n else:\n return True", "def check_allowed(self):\n if self.state_model.op_state in [\n DevState.FAULT,\n DevState.UNKNOWN,\n DevState.DISABLE,\n ]:\n return False\n\n return True", "def verify_local_token(self, token):\n return token == self.master_local_token.get_token()", "def can_exist_outside_of_game(self):\n return True", "def can_exist_outside_of_game(self):\n return True", "async def should_handle(self):\n return self.main.base_amount > 4 and self.main.can_build_unique(UnitTypeId.INFESTATIONPIT, self.main.pits)", "def is_legal(self, start, end) -> bool:\n return self.board(end) == 0 \\\n and self.board(start) > 0 \\\n and self._check_zone_locks(start, end) \\\n and self.exists_path(start, end)", "def allow_entry(self, mover):\n return True", "def is_only_valid_allowed(self) -> bool:\n return self.get_allow_scope() is TxAllowScope.VALID", "def valid_for_send(self, app):\n return (\n (self.to is not None) and\n (self.next_hop is not None) and\n (self.source is not None) and\n (self.command is not None) and\n (self.handler is not None) and\n (self.kind is not None) and\n (self.time_to_live is not None) and\n (self.time_to_live >= app.tick)\n )", "def is_local_administrator(self):\n\t\treturn bool(call_sdk_function('PrlUsrCfg_IsLocalAdministrator', self.handle))", "def local_is_up(self, target):\n try:\n check_address(target)\n except ValueError:\n self.logger.warning('Target must be a tuple (IP, port), where IP '\n 'is a string (i.e. \"192.168.0.1\") and port is '\n 'an integer (i.e. 40000). Alternatively '\n 'target can be a valid UNIX domain socket.')\n return False\n\n self.check_tunnels()\n return self.tunnel_is_up.get(target, True)", "def is_allowed(self) -> bool:\n return self.effect == ALLOW_ACCESS", "def _check_local_constraints(launcher: str, batch: bool) -> None:\n if launcher == \"local\" and batch:\n msg = \"Local orchestrator can not be launched with batch=True\"\n raise SmartSimError(msg)", "def still_valid(self) -> bool:\n return self._data.player_alive(self._data.player_turn)", "def local_network_check():\n return (\n network.show_active() in LOCAL_BLOCKCHAIN_ENVINROMENTS\n or network.show_active() in FORKED_LOCAL_ENVIRONMENTS\n )", "def _client_allowed(self):\r\n client_ip = self._client_address[0]\r\n if not client_ip in self._settings.allowed_clients and \\\r\n not 'ALL' in self._settings.allowed_clients:\r\n self._send_content('Access from host %s forbidden.' % client_ip, 'text/html')\r\n return False\r\n return True", "def is_controlled(self):\n return False if self._remote_controller == \"\" else True", "def is_allowed(self, cpos):\n if self.step is None:\n return True\n \n # has the player clicked on one of the allowed cells?\n if (cpos in self.step.toclick):\n # mark step as finished\n self.step.finished = True\n return True\n return False", "def validate_can_enter(self, user, contest_pool):\n\n # the contest attempting to be joined\n target_skill_level = contest_pool.skill_level\n if target_skill_level.enforced == False:\n return # the skill level of this contest is not enforced -- anyone can join no matter what\n\n # find any enforced skill_levels we have an entry in not matching our target.\n # if any are found, that means we cant join and must raise exception\n entries = Entry.objects.filter(\n user=user,\n contest_pool__draft_group=contest_pool.draft_group,\n contest_pool__skill_level__enforced=True\n ).exclude(contest_pool__skill_level=target_skill_level)\n\n if entries.count() > 0:\n raise self.CanNotEnterSkillLevel()", "def can_pickup(self):\n return False", "def is_accessible(self):\n if self._is_accessible:\n return self._is_accessible\n\n check_host_cmd = '/usr/rift/bin/ssh_root {ip} -q -n -o BatchMode=yes -o StrictHostKeyChecking=no ls > /dev/null'\n rc = subprocess.call(check_host_cmd.format(ip=self._ip), shell=True)\n logger.info(\"Checking if {} is accessible\".format(self._ip))\n\n\n\n if rc != 0:\n return False\n\n self._is_accessible = True\n return self._is_accessible", "def gatekeeper():\n\n if user.name in GATEKEEPERS:\n return True\n\n return False", "def checkInTerritory(self):\n\t\treturn AI.checkInTerritory(self)", "def is_telescope_standby_allowed(self):\n handler = self.get_command_object(\"TelescopeStandby\")\n return handler.check_allowed()", "def validate_login(self, request):\n\n if 'id' not in request.session or 'steam_id' not in request.session:\n raise PermissionDenied('You need to login')\n\n # if self.mode9:\n # if 'team' not in PlayerList[request.session['id']]:\n # raise PermissionDenied('Player is not in a team!')", "def is_allowed(self, request, credentials: dict = None) -> bool:\n\n if self.is_admin_site(request):\n return True\n\n if self.is_blacklisted(request, credentials):\n return False\n\n if self.is_whitelisted(request, credentials):\n return True\n\n if self.is_locked(request, credentials):\n return False\n\n return True" ]
[ "0.6684113", "0.62243545", "0.61877936", "0.61838484", "0.61765593", "0.611133", "0.60216933", "0.60216933", "0.59972715", "0.59768903", "0.58596575", "0.58082616", "0.5807725", "0.58012944", "0.5789078", "0.5779336", "0.57534045", "0.57501376", "0.5747623", "0.5714134", "0.570829", "0.5702368", "0.5692986", "0.5692796", "0.5669489", "0.5658105", "0.56567824", "0.5654172", "0.56413376", "0.56330246" ]
0.7630505
0
Check if the library supports the language.
def language_supported(self, iso_lang="ca-ES"): # -> bool test_lang = "" if len(iso_lang) == 0: return False try: for sep in ["-", "_"]: if sep in iso_lang: test_lang = iso_lang.split(sep)[0] break except (AttributeError, NameError): return False try: for _test in [iso_lang, test_lang]: if _test in gtts.tts.tts_langs(): return True except NameError: pass return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def can_handle_language(cls, language: Hashable) -> bool:\n\n # if language_list is set to `None` it means: support all languages\n if language is None or cls.language_list is None:\n return True\n\n return language in cls.language_list", "def compare_language(language):\n if language in module.availableLanguages:\n return True\n else:\n return False", "def validate_lang(lang):\n if lang in LANGUAGE_OPTIONS.keys():\n return True", "def is_installed(cls, language=None):\n if language == 'python':\n return True\n return False", "def language_supported(self,\n _iso_lang=\"en-US\",\n alt_local_url=\"\"): # -> bool\n _found_name = \"\"\n if alt_local_url.startswith(\"http\"):\n self.url = alt_local_url\n if self.ok:\n return self.ok\n if not bool(self.verified_voices):\n self.update_rhvoice_checklist()\n if not bool(self.verified_voices):\n self.ok = False\n return False\n self.ok = False\n for _search in [_iso_lang.lower(), _iso_lang.split(\"-\")[0].lower()]:\n for item in self.checklist:\n if item[0].lower().startswith(_search):\n self.checked_lang = item[0]\n self.ok = True\n break\n if len(self.checked_lang) != 0:\n break\n if len(self.checked_lang) != 0:\n for item in self.checklist:\n if bool(self.common.debug):\n print(item)\n if item[2] == _iso_lang.lower():\n self.checked_lang = item[0]\n self.ok = True\n break\n if self.ok:\n help_heading = self.help_heading\n help_url = self.help_url\n print(f\"\"\"\nChecking {help_heading} voices for `{_iso_lang}`\n========================================\n\n<{help_url}>\n\"\"\")\n return self.ok", "def supported_languages(self):\n return SUPPORT_LANGUAGES", "def has_language(lang):\n kn = _get_keyboard_names()\n return kn.has_language(lang)", "def detect_language(self):\n if not self.clean:\n self._text_clean()\n if not self.clean:\n return\n self.payload = \"q={}\".format(self.text)\n resp = requests.request('POST', self.url_language, data=self.payload.encode('utf-8'),\n headers=self.translate_headers)\n try:\n self.language = json.loads(resp.text)['data']['detections'][0][0]['language']\n except KeyError:\n return", "def is_valid_language(self, file):\n if not self.languages or get_file_type(file[\"path\"]) in self.languages:\n return True\n return False", "def is_valid_language(self, file):\n if not self.languages or get_file_type(file[\"path\"]) in self.languages:\n return True\n return False", "def requires_matching_languages(self):\n return self._requires_matching_languages", "def has_languages(self):\n return bool(self._languages)", "def check_supported_features(self):", "def Locale_IsAvailable(*args, **kwargs):\n return _gdi_.Locale_IsAvailable(*args, **kwargs)", "def test_langs(self):\n self._api.Init(lang=\"eng\")\n lang = self._api.GetInitLanguagesAsString()\n self.assertEqual(lang, \"eng\")\n langs = self._api.GetLoadedLanguages()\n self.assertEqual(langs, [\"eng\"])\n self.assertIn(\"eng\", self._api.GetAvailableLanguages())", "def is_supported_locale(locale):\n\n en_name = get_english_name(locale)\n return bool(en_name)", "def not_supported_languages() -> Optional[List[Text]]:\n return [\"zh\", \"ja\", \"th\"]", "def IsUnicodeSupported(self):\n return self._common_type.IsUnicodeSupported()", "def isEditorInfoSupported(self, language):\n documentationViewer = self.ui.documentationViewer()\n if documentationViewer:\n return documentationViewer.isSupportedLanguage(language)\n else:\n return False", "def is_forced(self, lang):\r\n return False", "def IsAvailable(*args, **kwargs):\n return _gdi_.Locale_IsAvailable(*args, **kwargs)", "def detect(text, format='plain'):\n if format not in FORMATS:\n raise TypeError('The format should be one of %s' % (FORMATS,))\n params = {'text': text, 'format': format}\n r = requests.get('http://translate.yandex.net/api/v1/tr.json/detect', params=params)\n code = r.json['code']\n if code == 200:\n lang = r.json['lang']\n if lang:\n return lang\n raise LanguageNotDetected\n else:\n raise TranslationError(code)", "def test_lang_is_not_supported(app):\n rv = app.test_client().post('/tokenize', json={\n 'text':'这是中文'})\n json_data = rv.get_json()\n msg = json_data['message']\n assert msg == 'Language not supported'", "def platform_supported(self):\n return platform.system().lower() in self.platforms if self.platforms else False", "def is_installed(cls, language=None):\n # Filesystem is implied\n return True", "def is_valid_language(args, skip=False):\n if (is_valid_file_and_directory(args) and is_valid_comments(args)) or skip:\n if args.language is not None:\n return True\n return False", "def detect(text):\n try:\n return langdetect.detect(text)\n except LangDetectException:\n return None", "def language(self, target):\n self._check_target(target)\n return target.language or self._default_language", "def detect_languages(text):\n try:\n return langdetect.detect_langs(text)\n except LangDetectException:\n return None", "def testLanguage(self):\n if self.language in tools.LANGUAGES:\n self.assertEqual(\n self.language,\n self.config.language\n )\n else:\n self.assertNotEqual(\n self.language,\n self.config.language\n )\n self.assertEqual(\n tools.LANGUAGE_DEFAULT,\n self.config.language\n )" ]
[ "0.73111284", "0.72048944", "0.714278", "0.70415515", "0.6972099", "0.690896", "0.6857913", "0.67120904", "0.66275656", "0.66275656", "0.6623935", "0.6518069", "0.64795995", "0.64205503", "0.640512", "0.63772917", "0.63161093", "0.6273078", "0.6245229", "0.61978394", "0.6196551", "0.6169984", "0.6163703", "0.6163105", "0.612885", "0.61110836", "0.61097497", "0.61029977", "0.6070391", "0.6047618" ]
0.75273556
0
This method will sweep through the range of standard ids given from low to high. This will actively filter for 6 ids at a time and sniff for the given amount of time in seconds. If at least one message is read in then it will go individually through the 6 ids and sniff only for that id for the given amount of time. This does not save any sniffed packets.
def filterStdSweep(self, freq, low, high, time = 5): msgIDs = [] self.client.serInit() self.client.MCPsetup() for i in range(low, high+1, 6): print "sniffing id: %d, %d, %d, %d, %d, %d" % (i,i+1,i+2,i+3,i+4,i+5) comment= "sweepFilter: " #comment = "sweepFilter_%d_%d_%d_%d_%d_%d" % (i,i+1,i+2,i+3,i+4,i+5) description = "Running a sweep filer for all the possible standard IDs. This run filters for: %d, %d, %d, %d, %d, %d" % (i,i+1,i+2,i+3,i+4,i+5) count = self.sniff(freq=freq, duration = time, description = description,comment = comment, standardid = [i, i+1, i+2, i+3, i+4, i+5]) if( count != 0): for j in range(i,i+5): comment = "sweepFilter: " #comment = "sweepFilter: %d" % (j) description = "Running a sweep filer for all the possible standard IDs. This run filters for: %d " % j count = self.sniff(freq=freq, duration = time, description = description,comment = comment, standardid = [j, j, j, j]) if( count != 0): msgIDs.append(j) return msgIDs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sweepRandom(self, freq, number = 5, time = 5):\n msgIDs = [] #standard IDs that we have observed during run\n ids = [] #standard IDs that have been tried\n self.client.serInit()\n self.client.MCPsetup()\n for i in range(0,number+1,6):\n idsTemp = []\n comment = \"sweepFilter: \"\n for j in range(0,6,1):\n id = randrange(2047)\n #comment += \"_%d\" % id\n idsTemp.append(id)\n ids.append(id)\n #print comment\n description = \"Running a sweep filer for all the possible standard IDs. This runs the following : \" + comment\n count = self.sniff(freq=freq, duration=time, description=description, comment = comment, standardid = idsTemp)\n if( count != 0):\n for element in idsTemp:\n #comment = \"sweepFilter: %d\" % (element)\n comment=\"sweepFilter: \"\n description = \"Running a sweep filer for all the possible standard IDs. This run filters for: %d \" % element\n count = self.sniff(freq=freq, duration = time, description = description,comment = comment, standardid = [element, element, element])\n if( count != 0):\n msgIDs.append(j)\n return msgIDs, ids", "def rtrSweep(self,freq,lowID,highID, attempts = 1,duration = 1, verbose = True):\n #set up file for writing\n now = datetime.datetime.now()\n datestr = now.strftime(\"%Y%m%d\")\n path = self.DATA_LOCATION+datestr+\"_rtr.csv\"\n filename = path\n outfile = open(filename,'a');\n dataWriter = csv.writer(outfile,delimiter=',');\n dataWriter.writerow(['# Time Error Bytes 1-13']);\n dataWriter.writerow(['#' + \"rtr sweep from %d to %d\"%(lowID,highID)])\n if( verbose):\n print \"started\"\n #self.client.serInit()\n #self.spitSetup(freq)\n \n #for each id\n for i in range(lowID,highID+1, 1):\n self.client.serInit()\n self.spitSetup(freq) #reset the chip to try and avoid serial timeouts\n #set filters\n standardid = [i, i, i, i]\n self.addFilter(standardid, verbose = True)\n \n #### split SID into different areas\n SIDlow = (standardid[0] & 0x07) << 5; # get SID bits 2:0, rotate them to bits 7:5\n SIDhigh = (standardid[0] >> 3) & 0xFF; # get SID bits 10:3, rotate them to bits 7:0\n #create RTR packet\n packet = [SIDhigh, SIDlow, 0x00,0x00,0x40]\n dataWriter.writerow([\"#requested id %d\"%i])\n #self.client.poke8(0x2C,0x00); #clear the CANINTF register; we care about bits 0 and 1 (RXnIF flags) which indicate a message is being held \n #clear buffer\n packet1 = self.client.rxpacket();\n packet2 = self.client.rxpacket();\n #send in rtr request\n self.client.txpacket(packet)\n ## listen for 2 packets. one should be the rtr we requested the other should be\n ## a new packet response\n starttime = tT.time()\n while ((time.time() - starttime) < duration): #listen for the given duration time period\n packet = self.client.rxpacket()\n if( packet == None):\n continue\n # we have sniffed a packet, save it\n row = []\n row.append(\"%f\"%time.time()) #timestamp\n row.append(0) #error flag (not checkign)\n row.append(\"rtrRequest_%d\"%i) #comment\n row.append(duration) #sniff time\n row.append(1) # filtering boolean\n for byte in packet:\n row.append(\"%02x\"%ord(byte));\n dataWriter.writerow(row)\n print self.client.packet2parsedstr(packet)\n trial= 2;\n # for each trial repeat\n while( trial <= attempts):\n print \"trial: \", trial\n self.client.MCPrts(TXB0=True);\n starttime = time.time()\n # this time we will sniff for the given amount of time to see if there is a\n # time till the packets come in\n while( (time.time()-starttime) < duration):\n packet=self.client.rxpacket();\n if( packet == None):\n continue\n row = []\n row.append(\"%f\"%time.time()) #timestamp\n row.append(0) #error flag (not checking)\n row.append(\"rtrRequest_%d\"%i) #comment\n row.append(duration) #sniff time\n row.append(1) # filtering boolean\n for byte in packet:\n row.append(\"%02x\"%ord(byte));\n dataWriter.writerow(row)\n print self.client.packet2parsedstr(packet)\n trial += 1\n print \"sweep complete\"\n outfile.close()", "def _filterTimes(self):\n print(self.tRange)\n idT = np.where((self.tRange[0] > np.array(self.rawD['Epoch'][:])) & \n (self.tRange[1] < np.array(self.rawD['Epoch'][:])))[0]\n #print(self.rawD['Epoch'][:100])\n print(idT)\n # Filter data\n for key in filter(lambda x: ('Epoch' in x or \n ('Counts' in x and x[-1] == 's')), self.rawD.keys()):\n self.d[key] = self.rawD[key].copy()[idT]\n return", "def split_to_cycles(self, msg_type_filter = None, separation = 0.5):\n\n ids = {msg_type.get_message_id() for msg_type in msg_type_filter}\n\n if not len(ids):\n class _Everything:\n def __contains__(self, x):\n return True\n ids = _Everything()\n\n out = []\n last_msg_time = float(\"nan\")\n while True:\n data = self._read_binary_sirf_msg()\n\n if sirf.bytes_to_message_id(data) in ids:\n out.append(sirf.from_bytes(data))\n\n if self.last_msg_time - last_msg_time > separation:\n yield out\n out = []\n\n last_msg_time = self.last_msg_time", "def __process(self, int_data):\n attack_map_key = hash(str(int_data))\n logger.debug('Attack map key - [%s]', attack_map_key)\n if not self.count_map.get(attack_map_key):\n self.count_map[attack_map_key] = list()\n\n curr_time = datetime.datetime.now()\n self.count_map.get(attack_map_key).append(curr_time)\n times = self.count_map.get(attack_map_key)\n count = 0\n for eval_time in times:\n delta = (curr_time - eval_time).total_seconds()\n if delta > self.sample_interval:\n times.remove(eval_time)\n else:\n count += 1\n\n if count > self.packet_count:\n logger.debug('Attack detected - count [%s] with key [%s]',\n count, attack_map_key)\n\n attack_dict = dict(\n src_mac=int_data['devMac'],\n src_ip=int_data['devAddr'],\n dst_ip=int_data['dstAddr'],\n dst_port=int_data['dstPort'],\n packet_size=int_data['packetLen'],\n attack_type='UDP Flood')\n\n # Send to SDN\n last_attack = self.attack_map.get(attack_map_key)\n if not last_attack or time.time() - last_attack > 1:\n logger.info('Calling SDN, last attack sent - [%s]',\n last_attack)\n try:\n self.attack_map[attack_map_key] = time.time()\n self._send_attack(**attack_dict)\n return True\n except Exception as e:\n logger.error('Unexpected error [%s]', e)\n return False\n else:\n logger.debug(\n 'Not calling SDN as last attack notification for %s'\n ' was only %s seconds ago',\n attack_dict, time.time() - last_attack)\n return True\n else:\n logger.debug('No attack detected - count [%s]', count)\n return False", "def sniffing():\n sniff(store=False, prn=lambda p: threading.Thread(target=next, args=(p,)).start(), iface=IFACE)", "def detection_algorithm(f_blacklist, f_seconds, f_spikes):\n blacklist = create_blacklist_dict()\n filtered_traces_user_dict = defaultdict(list)\n\n file_type = get_file_type(f_blacklist, f_seconds, f_spikes)\n\n inspection_interval = 60*5\n\n bucket_list = [1, 5, 10, 30, 60]\n traces_file_1 = open('final_files/user_packets_1_%s'%(file_type), 'w')\n traces_file_5 = open('final_files/user_packets_5_%s'%(file_type), 'w')\n traces_file_10 = open('final_files/user_packets_10_%s'%(file_type), 'w')\n traces_file_30 = open('final_files/user_packets_30_%s'%(file_type), 'w')\n traces_file_60 = open('final_files/user_packets_bucket_60_%s'%(file_type), 'w')\n packets_file = open('final_files/user_packets_true_false_%s'%(file_type), 'w') \n\n for user in users:\n devids = []\n for d in user.devices:\n devids.append(str(d.id))\n\n devs = {}\n for d in user.devices:\n devs[d.id] = d.platform\n\n for elem_id in devids:\n sql_userid = \"\"\"SELECT login FROM devices WHERE id =:d_id\"\"\"\n user_id = ses.execute(text(sql_userid).bindparams(d_id = elem_id)).fetchone()\n idt = user_id[0]\n\n print idt\n packets_file.write(str(idt)+'\\n')\n\n if idt != 'bowen.laptop':\n continue\n\n #list contains Traces -> timestamp, url\n http_traces_list, dns_traces_list = get_test_data(elem_id)\n print len(http_traces_list)\n print len(dns_traces_list)\n\n cont = 0\n packets_true = defaultdict(list)\n packets_false = defaultdict(list)\n for packet in http_traces_list:\n print cont\n packets_list = get_packets_in_interval(packet, http_traces_list, inspection_interval)\n pkt_user_gen = filter_packet(packet, packets_list, blacklist, f_blacklist, f_seconds, f_spikes, packets_true, packets_false)\n packets_file.write(str(packet.timst) + ' ' + str(pkt_user_gen) + '\\n')\n if pkt_user_gen:\n filtered_traces_user_dict[idt].append(packet.timst)\n cont+=1\n\n packets_true = defaultdict(list)\n packets_false = defaultdict(list)\n for packet in dns_traces_list:\n packets_list = get_packets_in_interval(packet, dns_traces_list, inspection_interval)\n pkt_user_gen = filter_packet(packet, packets_list, blacklist, f_blacklist, f_seconds, f_spikes, packets_true, packets_false)\n packets_file.write(str(packet.timst) + ' ' + str(pkt_user_gen) + '\\n')\n if pkt_user_gen:\n filtered_traces_user_dict[idt].append(packet.timst)\n\n for bucket in bucket_list:\n print bucket\n traces_bucket = []\n traces_bucket = get_interval_list_predefined_gap(sorted(filtered_traces_user_dict[idt]), bucket)\n if bucket == 1:\n traces_file_1.write(idt + '\\n')\n elif bucket == 5:\n traces_file_5.write(idt + '\\n')\n elif bucket == 10:\n traces_file_10.write(idt + '\\n')\n elif bucket == 30:\n traces_file_30.write(idt + '\\n')\n elif bucket == 60:\n traces_file_60.write(idt + '\\n')\n\n print len(traces_bucket)\n for timst in traces_bucket:\n if bucket == 1:\n traces_file_1.write(str(timst) + '\\n')\n elif bucket == 5:\n traces_file_5.write(str(timst) + '\\n')\n elif bucket == 10:\n traces_file_10.write(str(timst) + '\\n')\n elif bucket == 30:\n traces_file_30.write(str(timst) + '\\n')\n elif bucket == 60:\n traces_file_60.write(str(timst) + '\\n')\n\n traces_file_1.close()\n traces_file_5.close()\n traces_file_10.close()\n traces_file_30.close()\n traces_file_60.close()", "def update_timestamp_values(min_id=0, max_id=1000):\n lower_limit = 0\n min_time = 75\n max_time = 80\n for upper_limit in range(min_id, max_id+10, 100): \n stmt = sqlalchemy.select([sqlalchemy.func.current_timestamp() - datetime.timedelta(\n hours=random.randint(min_time, max_time))])\n timestamp = execute_command(stmt).fetchall()[0][0]\n stmt = _READING_TABLE.update().values(ts=timestamp, user_ts=timestamp).where(\n _READING_TABLE.c.id <= upper_limit).where(_READING_TABLE.c.id > lower_limit)\n execute_command(stmt)\n lower_limit = upper_limit \n min_time = max_time\n max_time = max_time+5", "def sniffing():\n sniff(store=False, prn=lambda p: threading.Thread(target=sendHash, args=(p,)).start(), iface=IFACE)", "def aws_waits ( func, matching_ids ) :\n done = False\n found_ids = []\n while not done :\n found_ids = []\n time.sleep( 1 )\n items = func( )\n for item in items :\n for matching_id in matching_ids :\n if item.id == matching_id :\n found_ids.append( item )\n break\n\n if len( found_ids ) == len( matching_ids ) :\n done = True\n break\n\n return found_ids", "def preprocess_scans(self, scan_ids, width, height, depth, clipping=True, loop=False,\n shuffle=False):\n while True:\n if shuffle:\n random.shuffle(scan_ids)\n for scan_id in scan_ids:\n ct_scan, origin, spacing = self.get_scan(scan_id, resample=True)\n if clipping:\n scan, origin, spacing = self.rescale_scan(\n ct_scan, origin, spacing, width, height, depth, normalize=True\n )\n yield self.clip_scan(scan), origin, spacing\n else:\n yield self.rescale_scan(\n ct_scan, origin, spacing, width, height, depth, normalize=True\n )\n if not loop:\n break", "def generationFuzzer(self,freq, standardIDs, dbLimits, period, writesPerFuzz, Fuzzes):\n #print \"Fuzzing on standard ID: %d\" %standardId\n self.client.serInit()\n self.spitSetup(freq)\n packet = [0,0,0x00,0x00,0x08,0,0,0,0,0,0,0,0] #empty packet template\n \n\n #get folder information (based on today's date)\n now = datetime.datetime.now()\n datestr = now.strftime(\"%Y%m%d\")\n path = self.DATA_LOCATION+\"InjectedData/\"+datestr+\"_GenerationFuzzedPackets.csv\"\n filename = path\n outfile = open(filename,'a');\n dataWriter = csv.writer(outfile,delimiter=',');\n #dataWriter.writerow(['# Time Error Bytes 1-13']);\n #dataWriter.writerow(['#' + description])\n \n numIds = len(standardIDs)\n fuzzNumber = 0; #: counts the number of packets we have generated\n while( fuzzNumber < Fuzzes):\n id_new = standardIDs[random.randint(0,numIds-1)]\n print id_new\n #### split SID into different regs\n SIDhigh = (id_new >> 3) & 0xFF; # get SID bits 10:3, rotate them to bits 7:0\n SIDlow = (id_new & 0x07) << 5; # get SID bits 2:0, rotate them to bits 7:5\n packet[0] = SIDhigh\n packet[1] = SIDlow\n \n #generate a fuzzed packet\n for i in range(0,8): # for each data byte, fuzz it\n idx = \"db%d\"%i\n limits = dbLimits[idx]\n value = random.randint(limits[0],limits[1]) #generate pseudo-random integer value\n packet[i+5] = value\n print packet\n #put a rough time stamp on the data and get all the data bytes \n row = [tT.time(), id_new,8] # could make this 8 a variable \n msg = \"Injecting: \"\n for i in range(5,13):\n row.append(packet[i])\n msg += \" %d\"%packet[i]\n #print msg\n dataWriter.writerow(row)\n self.client.txpacket(packet)\n tT.sleep(period/1000)\n \n #inject the packet the given number of times. \n for i in range(1,writesPerFuzz):\n self.client.MCPrts(TXB0=True)\n tT.sleep(period/1000)\n fuzzNumber += 1\n print \"Fuzzing Complete\" \n SIDhigh = (1056 >> 3) & 0xFF; # get SID bits 10:3, rotate them to bits 7:0\n SIDlow = (1056 & 0x07) << 5; # get SID bits 2:0, rotate them to bits 7:5\n packet = [SIDhigh, SIDlow, 0, 0, 8, 65, 255, 32, 120, 0, 0, 1, 247]\n self.client.txpacket(packet)\n for i in range(0,100):\n self.client.MCPrts(TXB0=True)\n tT.sleep(.01)\n outfile.close()", "def isolate_self_reporting_cases(self, time: int):", "def get_sampled_ids(self):\n seed = 123\n #initiate two lists, to save randomly picked positive and negative cases respectively\n positiveIds = []\n negativeIds = []\n i = 0\n print \"==> resampling ... \",\n while len(positiveIds)+len(negativeIds)<self.ntotal:\n # start a loop from 0 to total size of the new sampe\n # if it catches a number divisable by the sought ratio, update the list of positive cases ids\n # otherwise keep update the list of negative cases ids\n try:\n if i%int(100 / self.posRate) == 0: \n positiveIds.append(self.posId.next())\n else:\n negativeIds.append(self.negId.next())\n except:\n print \"Enter posRate higher than the initial rate\"\n break\n i+=1\n print \"Done sampling\"\n print \"positive:\", len(positiveIds)\n print \"negative:\", len(negativeIds)\n print \"final size:\", len(positiveIds)+len(negativeIds)\n #return sorted list of the two list of ids combined\n return sorted(positiveIds+negativeIds)", "def snip_audio(data, snip_length=4, cutoff=0.25, min_snips=None, max_snips=None,\n num_jitters=None, jitter=0.25,\n rate=44100, log=False):\n if max_snips is None:\n if min_snips is None:\n min_snips = 1\n max_snips = max(min_snips, int((float(len(data))/rate)/3.0))\n # Pad data with (snip_length * rate / 2) zeros.\n chop = np.lib.pad(data, int(snip_length*rate/2), 'constant')\n if log:\n logging.info(\"Data padded with %.1f s of zeros.\" %\n (float(snip_length)/2))\n snips = []\n logs = []\n max_sum = 0\n count = 0\n sum_ratio = 1\n\n while True:\n current_sum, start_idx, end_idx = find_loudest_subset(chop, snip_length,\n rate=rate)\n max_sum = max(max_sum, current_sum)\n sum_ratio = float(current_sum) / max_sum\n if sum_ratio < cutoff:\n break\n collection = []\n if num_jitters is None:\n collection.append(np.copy(chop[start_idx : end_idx]))\n else:\n for j in xrange(num_jitters):\n offset = int(jitter * rate * random.uniform(-1, 1))\n try:\n collection.append(np.copy(chop[start_idx+offset : end_idx+offset]))\n except IndexError:\n collection.append(np.copy(chop[start_idx : end_idx]))\n logs.append((sum_ratio, max_sum, start_idx, end_idx))\n chop[start_idx : end_idx] = 0\n snips.append(collection)\n count += 1\n if count >= max_snips:\n break\n return snips, logs", "def stats(containerids, stream):\n click.echo('*** MONITORING IS INITIATED')\n if(stream):\n while True:\n for x in containerids:\n index = slice(12)\n monitoring(containerid=x[index])\n else:\n for x in containerids:\n index = slice(12)\n monitoring(containerid=x[index])", "def sniff_ip(time_to_sniff):\r\n ip_dict = dict()\r\n port_dict = dict()\r\n packets = sniff(timeout=time_to_sniff, filter=\"ip\")\r\n\r\n for i in packets:\r\n sport = 0\r\n src = i['IP'].src\r\n\r\n if \"TCP\" in i:\r\n sport = i['TCP'].sport\r\n\r\n elif \"UDP\" in i:\r\n sport = i['UDP'].sport\r\n\r\n if not src in ip_dict.keys():\r\n ip_dict[src] = 1\r\n\r\n else:\r\n ip_dict[src] += 1\r\n\r\n if sport:\r\n if not sport in port_dict.keys():\r\n port_dict[sport] = 1\r\n\r\n else:\r\n port_dict[sport] += 1\r\n\r\n return ip_dict, port_dict", "def run_forever(self):\n scapy.sniff(prn=self.arp_cb, filter=\"arp\", store=0, count=0)", "def batch_process(minID, maxID, side='blue', **kwargs):\r\n\r\n if side == 'both':\r\n sides = ['blue','red']\r\n else:\r\n sides = [side]\r\n for side in sides:\r\n for i in range(minID, maxID+1, 1):\r\n filename = '%s%04d.fits' % (side, i)\r\n if os.path.exists(filename):\r\n try:\r\n extract1D(i, side=side, **kwargs)\r\n except iraf.IrafError:\r\n # some errors just require you to try again...\r\n print 'Hit error, retrying...'\r\n extract1D(i, side=side, **kwargs)", "def unsend_scheduled_messages_after(self, time_cutoff):\n for user_id in self.user_id_to_scheduled_message_ts:\n for scheduled_ts in list(self.user_id_to_scheduled_message_ts[user_id]):\n if scheduled_ts >= time_cutoff:\n # The below if statement is likley redundant\n if scheduled_ts in self.user_id_to_scheduled_message_ts[user_id]:\n self.user_id_to_scheduled_message_ts[user_id].remove(scheduled_ts)", "def main():\n with open (\"input.txt\", 'r') as f:\n read_data = f.readlines()\n\n guards = dict()\n\n # Sort the data since we have to look at shifts.\n for code in sorted(read_data):\n month, date, minute = split_code(code)\n # The sleep/wake is based on most recent guard.\n # So we can just pick the guard first and then pick times.\n # This is limited by _very strict_ input validation.\n if ('Guard' in code):\n guard = (code.split('#')[1]).split(' ')[0]\n if guard not in guards:\n guards[guard] = 60 * [0]\n\n if ('sleep' in code):\n start_minute = minute\n\n if ('wakes' in code):\n stop_minute = minute\n for i in range(int(start_minute),int(stop_minute)):\n guards[guard][i] += 1\n\n # These variable names could be better.\n guard_max_slept = 0;\n max_slept_guard = 0;\n max_slept_min = 0;\n min_max_slept = 0;\n max_min_slept_guard = 0;\n\n for i in guards:\n if sum(guards[i]) > guard_max_slept:\n max_slept_guard = i;\n guard_max_slept = sum(guards[i])\n\n if max(guards[i]) > min_max_slept:\n max_min_slept_guard = i\n max_slept_min = guards[i].index(max(guards[i]))\n min_max_slept = max(guards[i])\n\n\n chosen_id_part_2 = int(max_min_slept_guard) * int(max_slept_min)\n max_slept_min = guards[max_slept_guard].index(max(guards[max_slept_guard]))\n chosen_id_part_1 = int(max_slept_guard) * int(max_slept_min)\n print (\"PART 1: ID of chosen guard = \", chosen_id_part_1)\n print (\"PART 2: ID of chosen guard = \", chosen_id_part_2)", "def sniff_traffic(hs, count, timeout, recipient_type, pkt_type,\n exp_src, exp_dst, testlog):\n iface = hs.ports['eth1']\n\n # If host is NVP, sniff using a filter that checks for UDP packets\n if (\"NVP\" in recipient_type):\n packet = hs.libs.scapy.sniff2(\"iface='{}', count={}, timeout={}, \"\n \" filter='port 4789 and (!icmp or !ip6)', \"\n \" prn=lambda x:x.show()\".format(\n iface, count, timeout), True)\n parse_packet(packet, recipient_type, pkt_type,\n exp_src, exp_dst, testlog)\n # If host is AVP, sniff using a filter that checks for Ethernet packets\n elif (\"AVP\" in recipient_type):\n packet = hs.libs.scapy.sniff2(\"iface='{}', count={}, \"\n \" timeout={}, filter='!icmp or !ip6', \"\n \" prn=lambda x:x.show()\".format(\n iface, count, timeout), True)\n parse_packet(packet, recipient_type, pkt_type,\n exp_src, exp_dst, testlog)", "def part2(puzzle_input):\n\n puzzle_input_arr = puzzle_input.split('\\n')\n seconds_to_action = {} # {timestamp: (datetime, action)\n for line in puzzle_input_arr:\n m = re.match(r'\\[(\\d+)-(\\d+)-(\\d+) (\\d+):(\\d+)\\](.*)', line)\n dt = datetime(1970, int(m.group(2)), int(m.group(3)), int(m.group(4)), int(m.group(5)))\n seconds = dt.timestamp()\n seconds_to_action[seconds] = (dt, m.group(6))\n seconds_sorted = sorted(seconds_to_action.keys())\n guard_to_minutes_slept = {} # {Guard ID: number of minutes slept}\n guard_to_minute = {} # {Guard Id: [5, 6, 7, 8, 9...24, 30, 31...54, 24, 25, 26, 27, 28]}\n minute_to_guard_slept= {} # {minute: [guard IDs]}\n guard_id = 0\n sleep_time = None\n for second in seconds_sorted:\n dt, action = seconds_to_action[second]\n if \"begins\" in action:\n guard_id = int(re.match(r' Guard #(\\d+)', action).group(1))\n if guard_id not in guard_to_minutes_slept:\n guard_to_minutes_slept[guard_id] = 0\n guard_to_minute[guard_id] = []\n elif \"falls\" in action:\n sleep_time = dt\n elif \"wakes\" in action:\n difference_in_minutes = int((dt.timestamp() - sleep_time.timestamp()) // 60)\n guard_to_minutes_slept[guard_id] += difference_in_minutes\n for i in range(difference_in_minutes):\n if (sleep_time.minute + i) % 60 not in minute_to_guard_slept:\n minute_to_guard_slept[(sleep_time.minute + i) % 60] = [guard_id]\n else:\n minute_to_guard_slept[(sleep_time.minute + i) % 60].append(guard_id)\n most_frequent_number_of_occurrences, sleepiest_guard_id = (0, 0)\n sleepiest_minute = 0\n for minute in minute_to_guard_slept:\n c = collections.Counter(minute_to_guard_slept[minute])\n if c.most_common(1)[0][1] > most_frequent_number_of_occurrences:\n sleepiest_guard_id, most_frequent_number_of_occurrences = c.most_common(1)[0]\n sleepiest_minute = minute\n return sleepiest_guard_id * sleepiest_minute", "def test_fetch_with_max_id_and_since_id(self):\n # when from_date/to_date are not set, max result len = DIRECT_MESSAGES_LIMIT\n FakeTwitterApi.restore_settings()\n api = FakeTwitterApi()\n last_id, _ = api.DM[0]\n first_id, _ = api.DM[DirectMessagesFetcher.DIRECT_MESSAGES_LIMIT - 1]\n\n res = DirectMessagesFetcher(api)\n statuses = list(res.fetch())\n self.assertEqual(len(statuses), DirectMessagesFetcher.DIRECT_MESSAGES_LIMIT)\n self.assertEqual(statuses[0]['id'], last_id)\n self.assertEqual(statuses[-1]['id'], first_id)\n\n # let set max_id to 51th item\n FakeTwitterApi.restore_settings()\n api = FakeTwitterApi()\n id_50, _ = api.DM[50] # go to 51th dm\n\n res = DirectMessagesFetcher(api, **{\"max_id\": id_50})\n statuses = list(res.fetch())\n self.assertEqual(len(statuses), DirectMessagesFetcher.DIRECT_MESSAGES_LIMIT - 50)\n self.assertTrue(all(s['id'] <= id_50 for s in statuses))\n self.assertEqual(statuses[0]['id'], id_50)\n\n # let check since_id\n FakeTwitterApi.restore_settings()\n api = FakeTwitterApi()\n since_id, _ = api.DM[100]\n max_id, _ = api.DM[20]\n\n res = DirectMessagesFetcher(api, **{'max_id': max_id, 'since_id': since_id})\n statuses = list(res.fetch())\n self.assertEqual(len(statuses), 100 - 20)", "def test_stream_loop(self):\n chans, gains, scans, rate = (10,10,10,10), (1,2,4,5), 1024, 500\n v = [v[0] for v in self.l.stream_sync(\n channels=chans, gains=gains,\n num_scans=scans, rate=rate)]\n for vi in v:\n for r in vi:\n self.assertTrue(abs(r-2.5) < .1,\n \"%s should be cal, 2.5v\" % vi[0])", "def update_live_data(n, last_time, id1, id2, power):\n if power:\n raise PreventUpdate\n\n timer_start = perf_counter()\n # 1 sec delay so server has time to add live data\n end_time = datetime.now(timezone.utc) - timedelta(seconds=1)\n\n # Initialization and lag prevention\n if last_time is None or end_time - strptime_fix(last_time) > timedelta(seconds=3):\n logging.warning('Falling behind! Start %s End %s', last_time, end_time)\n return dash.no_update, dash.no_update, end_time.isoformat(), dash.no_update\n\n # Query data from SMIP\n logging.info(f'start_time {last_time} end_time {end_time}')\n timer_query_start = perf_counter()\n r = conn.get_data(last_time, end_time.isoformat(),\n [id1, id2], timeout=1)\n timer_query_end = perf_counter()\n response_json: dict = r.json()\n logging.debug(response_json.keys())\n if 'errors' in response_json:\n logging.error(response_json)\n raise Exception()\n data = response_json['data']['getRawHistoryDataWithSampling']\n logging.info('Got %s responses in %s seconds', len(\n data), timer_query_end - timer_query_start)\n\n # Used for measuring performance\n start_processing = perf_counter()\n\n # Unpack data\n def unpack(id: int):\n \"\"\"Unpacks return data into time and value lists\"\"\"\n id = int(id)\n time_list = [i['ts'] for i in data if int(i['id']) == id]\n val_list = [i['floatvalue'] for i in data if int(i['id']) == id]\n # SMIP always returns one entry before the start time for each ID, we don't need this\n if len(time_list) < 2 or len(val_list) < 2:\n return dash.no_update\n time_list.pop(0)\n val_list.pop(0)\n # Measure sampling rate\n rate = nan\n if len(time_list) > 1:\n rate = (strptime_fix(time_list[1])\n - strptime_fix(time_list[0])).total_seconds()\n return {'time_list': time_list, 'val_list': val_list, 'rate': rate}\n\n # Used for measuring performance\n data_processed = perf_counter()\n logging.info('Total %s Query %s Processing %s', data_processed - timer_start, timer_query_end - timer_query_start,\n data_processed - start_processing)\n\n return unpack(id1), unpack(id2), end_time.isoformat(), \\\n [f'Last updated {end_time.astimezone()},',\n html.Br(),\n f'received {len(data)} samples in {round(data_processed - timer_start, 3)} seconds']", "def filtered_messages(self, msg_type_set):\n ids = {msg_type.get_message_id() for msg_type in msg_type_set}\n\n while True:\n data = self._read_binary_sirf_msg()\n if sirf.bytes_to_message_id(data) in ids:\n yield sirf.from_bytes(data)", "def smoothing(time, heart, pace):\n\n heart_filt = []\n pace_filt = []\n for ind in range(60, len(time)):\n segment = (heart[(ind-60):ind])\n if (max(segment)-min(segment)) < 15:\n print \"got one!\"\n heart_filt.append(heart[ind-30]) # TODO improvement: use the average\n pace_filt.append(pace[ind-30])\n return (heart_filt, pace_filt)", "def delete_by_time():\n while True:\n try:\n now = time.time()\n for user in get_time_start():\n ip, start, protocol = str(user[0][0]), user[1][0], str(user[2][0])\n \n if now - start >= 60 and ip not in BLACK_LIST:\n delete_ip(ip)\n\n except Exception as e:\n logging.info(e)", "def pick_ids(self, nb_obj, nb_rounds):\n self.file.seek(0)\n ids=set()\n \n nb = 0\n \n while nb < nb_rounds:\n nb +=1\n \n while len(ids) < nb*nb_obj:\n cur_line = self.file.readline()\n cur_values = cur_line.split()\n try:\n ids.add(cur_values[1])\n except Exception:\n if not cur_line:\n print 'eof', nb, len(ids)\n self.file.seek(0)\n \n \n time_1 = int(cur_values[0])\n \n while int(cur_values[0]) is time_1:\n cur_line = self.file.readline()\n cur_values = cur_line.split()\n \n self.file.seek(0)\n #print ids\n print len(ids)\n return ids" ]
[ "0.6650663", "0.60136783", "0.5309716", "0.5241036", "0.5212497", "0.5179154", "0.5103345", "0.5085045", "0.50332683", "0.49909282", "0.49524868", "0.49447545", "0.49435183", "0.4942979", "0.49187246", "0.49124974", "0.48578656", "0.48059002", "0.4800731", "0.47861597", "0.4763296", "0.47399947", "0.47287792", "0.47162947", "0.46989596", "0.46909845", "0.46782547", "0.46713674", "0.4663959", "0.46558735" ]
0.6956039
0
This method will choose random values to listen out of all the possible standard ids up to the given number. It will sniff for the given amount of time on each set of ids on the given frequency. Sniffs in groups of 6 but when at least one message is read in it will go through all six individually before continuing. This does not save any sniffed packets.
def sweepRandom(self, freq, number = 5, time = 5): msgIDs = [] #standard IDs that we have observed during run ids = [] #standard IDs that have been tried self.client.serInit() self.client.MCPsetup() for i in range(0,number+1,6): idsTemp = [] comment = "sweepFilter: " for j in range(0,6,1): id = randrange(2047) #comment += "_%d" % id idsTemp.append(id) ids.append(id) #print comment description = "Running a sweep filer for all the possible standard IDs. This runs the following : " + comment count = self.sniff(freq=freq, duration=time, description=description, comment = comment, standardid = idsTemp) if( count != 0): for element in idsTemp: #comment = "sweepFilter: %d" % (element) comment="sweepFilter: " description = "Running a sweep filer for all the possible standard IDs. This run filters for: %d " % element count = self.sniff(freq=freq, duration = time, description = description,comment = comment, standardid = [element, element, element]) if( count != 0): msgIDs.append(j) return msgIDs, ids
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generationFuzzer(self,freq, standardIDs, dbLimits, period, writesPerFuzz, Fuzzes):\n #print \"Fuzzing on standard ID: %d\" %standardId\n self.client.serInit()\n self.spitSetup(freq)\n packet = [0,0,0x00,0x00,0x08,0,0,0,0,0,0,0,0] #empty packet template\n \n\n #get folder information (based on today's date)\n now = datetime.datetime.now()\n datestr = now.strftime(\"%Y%m%d\")\n path = self.DATA_LOCATION+\"InjectedData/\"+datestr+\"_GenerationFuzzedPackets.csv\"\n filename = path\n outfile = open(filename,'a');\n dataWriter = csv.writer(outfile,delimiter=',');\n #dataWriter.writerow(['# Time Error Bytes 1-13']);\n #dataWriter.writerow(['#' + description])\n \n numIds = len(standardIDs)\n fuzzNumber = 0; #: counts the number of packets we have generated\n while( fuzzNumber < Fuzzes):\n id_new = standardIDs[random.randint(0,numIds-1)]\n print id_new\n #### split SID into different regs\n SIDhigh = (id_new >> 3) & 0xFF; # get SID bits 10:3, rotate them to bits 7:0\n SIDlow = (id_new & 0x07) << 5; # get SID bits 2:0, rotate them to bits 7:5\n packet[0] = SIDhigh\n packet[1] = SIDlow\n \n #generate a fuzzed packet\n for i in range(0,8): # for each data byte, fuzz it\n idx = \"db%d\"%i\n limits = dbLimits[idx]\n value = random.randint(limits[0],limits[1]) #generate pseudo-random integer value\n packet[i+5] = value\n print packet\n #put a rough time stamp on the data and get all the data bytes \n row = [tT.time(), id_new,8] # could make this 8 a variable \n msg = \"Injecting: \"\n for i in range(5,13):\n row.append(packet[i])\n msg += \" %d\"%packet[i]\n #print msg\n dataWriter.writerow(row)\n self.client.txpacket(packet)\n tT.sleep(period/1000)\n \n #inject the packet the given number of times. \n for i in range(1,writesPerFuzz):\n self.client.MCPrts(TXB0=True)\n tT.sleep(period/1000)\n fuzzNumber += 1\n print \"Fuzzing Complete\" \n SIDhigh = (1056 >> 3) & 0xFF; # get SID bits 10:3, rotate them to bits 7:0\n SIDlow = (1056 & 0x07) << 5; # get SID bits 2:0, rotate them to bits 7:5\n packet = [SIDhigh, SIDlow, 0, 0, 8, 65, 255, 32, 120, 0, 0, 1, 247]\n self.client.txpacket(packet)\n for i in range(0,100):\n self.client.MCPrts(TXB0=True)\n tT.sleep(.01)\n outfile.close()", "def generalFuzz(self,freq, Fuzzes, period, writesPerFuzz):\n #print \"Fuzzing on standard ID: %d\" %standardId\n self.client.serInit()\n self.spitSetup(freq)\n packet = [0,0,0x00,0x00,0x08,0,0,0,0,0,0,0,0] #empty template\n \n #get folder information (based on today's date)\n now = datetime.datetime.now()\n datestr = now.strftime(\"%Y%m%d\")\n path = self.DATA_LOCATION+\"InjectedData/\"+datestr+\"_GenerationFuzzedPackets.csv\"\n filename = path\n outfile = open(filename,'a');\n dataWriter = csv.writer(outfile,delimiter=',');\n #dataWriter.writerow(['# Time Error Bytes 1-13']);\n #dataWriter.writerow(['#' + description])\n \n fuzzNumber = 0; #: counts the number of packets we have generated\n while( fuzzNumber < Fuzzes):\n #generate new random standard id in the full range of possible values\n id_new = random.randint(0,4095) \n #print id_new\n #### split SID into different regs\n SIDhigh = (id_new >> 3) & 0xFF; # get SID bits 10:3, rotate them to bits 7:0\n SIDlow = (id_new & 0x07) << 5; # get SID bits 2:0, rotate them to bits 7:5\n packet[0] = SIDhigh\n packet[1] = SIDlow\n \n #generate a fuzzed packet\n for i in range(0,8): # for each data byte, fuzz it\n idx = \"db%d\"%i\n \n value = random.randint(0, 255) #generate pseudo-random integer value\n packet[i+5] = value\n #print packet\n #put a rough time stamp on the data and get all the data bytes \n row = [time.time(), id_new,8] \n \"\"\"@todo: allow for varied packet lengths\"\"\"\n msg = \"Injecting: \"\n for i in range(5,13):\n row.append(packet[i])\n msg += \" %d\"%packet[i]\n #print msg\n dataWriter.writerow(row)\n self.client.txpacket(packet)\n time.sleep(period/1000)\n \n #inject the packet the given number of times. \n for i in range(1,writesPerFuzz):\n self.client.MCPrts(TXB0=True)\n time.sleep(period/1000)\n fuzzNumber += 1\n print \"Fuzzing Complete\" \n outfile.close()", "def randomNumberGenerator(self):\n #infinite loop of magical random numbers\n print(\"Making random numbers\")\n while not thread_stop_event.isSet():\n number = random.randint(10000,99999)\n print(number)\n socketio.emit('newQrCode', str(number), namespace='/test')\n time.sleep(5)", "def randomNumberGenerator(self):\n # infinite loop of magical random numbers\n print(\"Making random numbers\")\n while not thread_stop_event.isSet():\n number = round(random() * 10, 3)\n print(number)\n self.socketio.emit('newnumber', {'number': number}, namespace='/test')\n sleep(self.delay)", "def data_feeder_2():\n return random.sample(range(100), 10)", "def sample_n_unique(sampling_f, n):\n res = []\n while len(res) < n:\n candidate = sampling_f()\n if candidate not in res:\n res.append(candidate)\n return res", "def generate_number_of_events(max_number):\n\n return randint(1, max_number)", "def get_sampled_ids(self):\n seed = 123\n #initiate two lists, to save randomly picked positive and negative cases respectively\n positiveIds = []\n negativeIds = []\n i = 0\n print \"==> resampling ... \",\n while len(positiveIds)+len(negativeIds)<self.ntotal:\n # start a loop from 0 to total size of the new sampe\n # if it catches a number divisable by the sought ratio, update the list of positive cases ids\n # otherwise keep update the list of negative cases ids\n try:\n if i%int(100 / self.posRate) == 0: \n positiveIds.append(self.posId.next())\n else:\n negativeIds.append(self.negId.next())\n except:\n print \"Enter posRate higher than the initial rate\"\n break\n i+=1\n print \"Done sampling\"\n print \"positive:\", len(positiveIds)\n print \"negative:\", len(negativeIds)\n print \"final size:\", len(positiveIds)+len(negativeIds)\n #return sorted list of the two list of ids combined\n return sorted(positiveIds+negativeIds)", "def add_silent_values(noiseValues):\n for i in range(0, groupSize):\n noiseRecord = random.choice(noiseValues)\n noiseValue = noiseRecord.audio\n newSample = AudioData(\n noiseValue,\n \"W6\",\n 1,\n \"Washing Room\"\n )\n newSample.processedValue = noiseValue\n newSample.noiseValue = noiseValue\n db.session.add(newSample)\n db.session.commit()", "def randomNumberGenerator(self):\n #infinite loop of magical random numbers\n print \"Making random numbers\"\n while not thread_stop_event.isSet():\n global counter\n #if counter == 8:\n # emit('done', {'data': 'finito'})\n # break\n if (counter == 20):\n socketio.emit('done', {'data': 'Connected'},namespace='/test')\n counter = 0\n break\n number = round(random()*10, 3)\n print number\n print counter\n counter = counter +1\n socketio.emit('my response', {\n 'data': number,\n 'message': 'Commit message!',\n 'hash': number,\n 'url': 'http://andyzg.github.io',\n }, namespace='/test')\n sleep(self.delay)", "def generate_fake_ping_data(random_state, size):\n values = random_state.random_integers(low=5, high=20, size=size)\n picked_low_latency_values_indexes = random_state.choice(\n size, round(0.001 * len(values)), replace=False\n )\n\n # Sets the picked value to a random low ping (e.g.: [100, 200]),\n # and sets the direct close values to a ping between 40 and 80ms\n for index in picked_low_latency_values_indexes:\n if index - 1 >= 0:\n values[index - 1] = random_state.random_integers(40, 80)\n\n values[index] = random_state.random_integers(100, 200)\n\n if index + 1 < size:\n values[index + 1] = random_state.random_integers(40, 80)\n\n return values.tolist()", "def generate_packets():\n num_packets = randrange(10)\n temp_packets = []\n for i in range(num_packets):\n temp_packets.append(randrange(1000))\n return temp_packets", "def rtrSweep(self,freq,lowID,highID, attempts = 1,duration = 1, verbose = True):\n #set up file for writing\n now = datetime.datetime.now()\n datestr = now.strftime(\"%Y%m%d\")\n path = self.DATA_LOCATION+datestr+\"_rtr.csv\"\n filename = path\n outfile = open(filename,'a');\n dataWriter = csv.writer(outfile,delimiter=',');\n dataWriter.writerow(['# Time Error Bytes 1-13']);\n dataWriter.writerow(['#' + \"rtr sweep from %d to %d\"%(lowID,highID)])\n if( verbose):\n print \"started\"\n #self.client.serInit()\n #self.spitSetup(freq)\n \n #for each id\n for i in range(lowID,highID+1, 1):\n self.client.serInit()\n self.spitSetup(freq) #reset the chip to try and avoid serial timeouts\n #set filters\n standardid = [i, i, i, i]\n self.addFilter(standardid, verbose = True)\n \n #### split SID into different areas\n SIDlow = (standardid[0] & 0x07) << 5; # get SID bits 2:0, rotate them to bits 7:5\n SIDhigh = (standardid[0] >> 3) & 0xFF; # get SID bits 10:3, rotate them to bits 7:0\n #create RTR packet\n packet = [SIDhigh, SIDlow, 0x00,0x00,0x40]\n dataWriter.writerow([\"#requested id %d\"%i])\n #self.client.poke8(0x2C,0x00); #clear the CANINTF register; we care about bits 0 and 1 (RXnIF flags) which indicate a message is being held \n #clear buffer\n packet1 = self.client.rxpacket();\n packet2 = self.client.rxpacket();\n #send in rtr request\n self.client.txpacket(packet)\n ## listen for 2 packets. one should be the rtr we requested the other should be\n ## a new packet response\n starttime = tT.time()\n while ((time.time() - starttime) < duration): #listen for the given duration time period\n packet = self.client.rxpacket()\n if( packet == None):\n continue\n # we have sniffed a packet, save it\n row = []\n row.append(\"%f\"%time.time()) #timestamp\n row.append(0) #error flag (not checkign)\n row.append(\"rtrRequest_%d\"%i) #comment\n row.append(duration) #sniff time\n row.append(1) # filtering boolean\n for byte in packet:\n row.append(\"%02x\"%ord(byte));\n dataWriter.writerow(row)\n print self.client.packet2parsedstr(packet)\n trial= 2;\n # for each trial repeat\n while( trial <= attempts):\n print \"trial: \", trial\n self.client.MCPrts(TXB0=True);\n starttime = time.time()\n # this time we will sniff for the given amount of time to see if there is a\n # time till the packets come in\n while( (time.time()-starttime) < duration):\n packet=self.client.rxpacket();\n if( packet == None):\n continue\n row = []\n row.append(\"%f\"%time.time()) #timestamp\n row.append(0) #error flag (not checking)\n row.append(\"rtrRequest_%d\"%i) #comment\n row.append(duration) #sniff time\n row.append(1) # filtering boolean\n for byte in packet:\n row.append(\"%02x\"%ord(byte));\n dataWriter.writerow(row)\n print self.client.packet2parsedstr(packet)\n trial += 1\n print \"sweep complete\"\n outfile.close()", "def test(model,gen,n_id,threshold=0.5,verbose=True,print_every_n=10):\n t_start = time()\n ce_avg,tp,tn,fp,fn = 0.,0.,0.,0.,0.\n for i in range(n_id):\n X_test,y_test = gen.next()\n y_test = y_test.mean()\n Y_pred = model.predict(X_test)\n y_pred = Y_pred.max(axis=0)\n ce = np.mean(- y_test * np.log(y_pred) - (1-y_test) * np.log(1-y_pred))\n ce_avg += ce/n_id\n y_predr = y_pred.round()\n tp += sum((y_test == 1) & (y_predr == 1))\n tn += sum((y_test == 0) & (y_predr == 0))\n fp += sum((y_test == 0) & (y_predr == 1))\n fn += sum((y_test == 1) & (y_predr == 0))\n if i % print_every_n == 0:\n print(i)\n prec,recall,acc = tp/(tp+fp+1e-15),tp/(tp+fn+1e-15),(tp+tn)/n_id\n F1 = 2*tp/(2*tp+fp+fn)\n if verbose:\n print('Valid F1 %.3f tp %.3f tn %.3f fp %.3f fn %.3f' % (F1,tp,tn,fp,fn))\n print('Took %.1fs' % (time()-t_start))\n return(ce_avg,prec,recall,F1,acc,tp,tn,fp,fn)", "def sample(self, n):\n raise NotImplementedError", "def run(self, match_payloads=False):\n\n self.responses.clear_responses()\n\n identifier = self.seed_id\n sequence = 1\n for payload in self.provider:\n payload_bytes_sent = self.send_ping(identifier, sequence, payload)\n\n if not match_payloads:\n self.responses.append(self.listen_for(identifier, self.timeout))\n else:\n self.responses.append(self.listen_for(identifier, self.timeout, payload_bytes_sent))\n\n sequence = self.increase_seq(sequence)", "def generate_rand_7():\n\n while(True):\n # This generates a random number uniformly distributed between 1 and 24.\n # The first term is 5 times a rand num between 1 - 4, yielding {5, 10,\n # 15, 20}. The second is a rand num between 1 - 4.\n # Since the two numbers are *independent*, adding them gives a rand num\n # uniformly distributed between 1 - 24.\n # The test then rejects any number that is 21 or above. This is then\n # divided into 7 numbers between 1 - 7 using % 7. Since there are 21\n # numbers in the interval [1, 21] and 21 is divisble by 7, the numbers\n # between 1 and 7 will occur with equal probability.\n num = 5 * (np.random.uniform(1, 5, 1) - 1) +\\\n (np.random.uniform(1, 5, 1) - 1)\n if num[0] < 21:\n return int(num[0] % 7 + 1)", "def run(self, repetitions, **kwargs):\n\t\tself.sampler.sample(repetitions, **kwargs)", "def fiducial_snapshots(Nsnap=5):\n \n times = np.linspace(0,0.5,Nsnap)\n \n for t in times:\n generate_fiducial(time=t)", "def Chose_rand():\r\n total_list=list(range(1,467681))\r\n select=13788\r\n random_selected= random.sample(total_list,select)\r\n return (random_selected)", "def test_sample(system_generator):\n\n name, test = system_generator()\n print(name)\n\n w_F, w_R, N_k = test.sample([10, 8], mode=\"wFwR\")\n w_F, w_R, N_k = test.sample([1, 1], mode=\"wFwR\")\n w_F, w_R, N_k = test.sample([10, 0], mode=\"wFwR\")\n w_F, w_R, N_k = test.sample([0, 5], mode=\"wFwR\")", "def test_measure_nondeterministic_with_sampling(self):\n shots = 2000\n circuits = ref_measure.measure_circuits_nondeterministic(allow_sampling=True)\n targets = ref_measure.measure_counts_nondeterministic(shots)\n job = execute(circuits, QasmSimulator(), shots=shots)\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0.05 * shots)", "def silent_n_random_genes(genome, n):\n genes_to_silence = randint(0, len(genome), n)\n for i in genes_to_silence:\n genome[i].is_silent = True", "def random_sampling(predictions, number):\n return random.sample(range(len(predictions)), number)", "def filterStdSweep(self, freq, low, high, time = 5):\n msgIDs = []\n self.client.serInit()\n self.client.MCPsetup()\n for i in range(low, high+1, 6):\n print \"sniffing id: %d, %d, %d, %d, %d, %d\" % (i,i+1,i+2,i+3,i+4,i+5)\n comment= \"sweepFilter: \"\n #comment = \"sweepFilter_%d_%d_%d_%d_%d_%d\" % (i,i+1,i+2,i+3,i+4,i+5)\n description = \"Running a sweep filer for all the possible standard IDs. This run filters for: %d, %d, %d, %d, %d, %d\" % (i,i+1,i+2,i+3,i+4,i+5)\n count = self.sniff(freq=freq, duration = time, description = description,comment = comment, standardid = [i, i+1, i+2, i+3, i+4, i+5])\n if( count != 0):\n for j in range(i,i+5):\n comment = \"sweepFilter: \"\n #comment = \"sweepFilter: %d\" % (j)\n description = \"Running a sweep filer for all the possible standard IDs. This run filters for: %d \" % j\n count = self.sniff(freq=freq, duration = time, description = description,comment = comment, standardid = [j, j, j, j])\n if( count != 0):\n msgIDs.append(j)\n return msgIDs", "def test_repeated_simuations_with_fixed_seed(self):\n random.seed(175203)\n expected_results = {-1: 5, 0: 36, 1: 43, 2: 16}\n self._setup_random_gen([0.01, 0.3, 0.58, 0.1, 0.01], [-1, 0, 1, 2, 3])\n\n simulation_results = Counter()\n for _ in range(100):\n simulation_results[self._random_gen.next_num()] += 1\n\n self.assertDictEqual(simulation_results, expected_results)", "def packetRespond(self,freq, time, repeats, period, responseID, respondPacket,listenID, listenPacket = None):\n \n \n self.client.serInit()\n self.spitSetup(freq)\n \n #formulate response packet\n SIDhigh = (responseID >> 3) & 0xFF; # get SID bits 10:3, rotate them to bits 7:0\n SIDlow = (responseID & 0x07) << 5; # get SID bits 2:0, rotate them to bits 7:5\n #resPacket[0] = SIDhigh\n #resPacket[1] = SIDlow\n resPacket = [SIDhigh, SIDlow, 0x00,0x00, # pad out EID regs\n 0x08, # bit 6 must be set to 0 for data frame (1 for RTR) \n # lower nibble is DLC \n respondPacket[0],respondPacket[1],respondPacket[2],respondPacket[3],respondPacket[4],respondPacket[5],respondPacket[6],respondPacket[7]]\n #load packet/send once\n \"\"\"@todo: make this only load the data onto the chip and not send \"\"\"\n self.client.txpacket(resPacket) \n self.addFilter([listenID,listenID,listenID,listenID, listenID, listenID]) #listen only for this packet\n startTime = tT.time()\n packet = None\n while( (tT.time() - startTime) < time):\n packet = self.client.rxpacket()\n if( packet != None):\n print \"packet read in, responding now\"\n # assume the ids already match since we are filtering for the id\n \n #compare packet received to desired packet\n if( listenPacket == None): # no packets given, just want the id\n for i in range(0,repeats):\n self.client.MCPrts(TXB0=True)\n tT.sleep(period/1000)\n else: #compare packets\n sid = ord(packet[0])<<3 | ord(packet[1])>>5\n print \"standard id of packet recieved: \", sid #standard ID\n msg = \"\"\n for i in range(0,8):\n idx = 5 + i\n byteIn = ord(packet[idx])\n msg += \" %d\" %byteIn\n compareIn = listenPacket[i]\n print byteIn, compareIn\n if( byteIn != compareIn):\n packet == None\n print \"packet did not match\"\n break\n print msg\n if( packet != None ):\n self.client.MCPrts(TXB0=True)\n tT.sleep(period/1000)\n print \"Response Listening Terminated.\"", "def take_spectra(self, datagroup):\n N = 0\n self.taking_spectra = True\n try:\n while N < self.number_of_spectra and self.taking_spectra: \n if N!=0: # starting data collection immediately\n time.sleep(self.time_interval_seconds) \n ds = datagroup.create_dataset(\"spectrum_%d\", \n data=self.spectrometer.read_spectrum(bundle_metadata=True),\n attrs=self.spectrometer.get_metadata(),\n )\n ds.attrs.create(\"time_interval\", self.time_interval_seconds)\n ds.attrs.create(\"information\", self.info_string)\n datagroup.file.flush()\n N += 1\n print \"Spectra %d of %d recorded\" % (N,self.number_of_spectra)\n print \"Done!\\n\"\n finally:\n self.taking_spectra = False", "def sdd(events,probs):\n \n import random\n nprobs=[x*1000 for x in probs] #so, here i multiply each float in 'probs' by 1000 and store the products in 'nprobs'\n newlist=[]\n for a in range(len(events)) : #then, in this loop, i create a list (newlist), in which each event appears 1000*its probability times\n b=nprobs[a]\n b=int(b)\n for c in range(b) :\n newlist.append(events[a]) \n return (random.choice(newlist)) #and finally, i ramdonly sample ", "def generate_synth_data(n):" ]
[ "0.600158", "0.5833272", "0.5673142", "0.56084114", "0.5455134", "0.5315466", "0.52986", "0.52894807", "0.5211797", "0.51809776", "0.5144543", "0.5077216", "0.5045132", "0.5042023", "0.5041329", "0.5027075", "0.50139046", "0.5008723", "0.5007171", "0.50008476", "0.49974468", "0.4991392", "0.4958771", "0.49500754", "0.49473104", "0.4940757", "0.49206904", "0.49170327", "0.49137026", "0.49117175" ]
0.73567533
0
This method will sweep through the range of ids given by lowID to highID and send a remote transmissions request (RTR) to each id and then listen for a response. The RTR will be repeated in the given number of attempts and will sniff for the given duration continuing to the next id. Any messages that are sniffed will be saved to a csv file. The filename will be stored in the DATA_LOCATION folder with a filename that is the date (YYYYMMDD)_rtr.csv. If the file already exists it will append to the end of the file
def rtrSweep(self,freq,lowID,highID, attempts = 1,duration = 1, verbose = True): #set up file for writing now = datetime.datetime.now() datestr = now.strftime("%Y%m%d") path = self.DATA_LOCATION+datestr+"_rtr.csv" filename = path outfile = open(filename,'a'); dataWriter = csv.writer(outfile,delimiter=','); dataWriter.writerow(['# Time Error Bytes 1-13']); dataWriter.writerow(['#' + "rtr sweep from %d to %d"%(lowID,highID)]) if( verbose): print "started" #self.client.serInit() #self.spitSetup(freq) #for each id for i in range(lowID,highID+1, 1): self.client.serInit() self.spitSetup(freq) #reset the chip to try and avoid serial timeouts #set filters standardid = [i, i, i, i] self.addFilter(standardid, verbose = True) #### split SID into different areas SIDlow = (standardid[0] & 0x07) << 5; # get SID bits 2:0, rotate them to bits 7:5 SIDhigh = (standardid[0] >> 3) & 0xFF; # get SID bits 10:3, rotate them to bits 7:0 #create RTR packet packet = [SIDhigh, SIDlow, 0x00,0x00,0x40] dataWriter.writerow(["#requested id %d"%i]) #self.client.poke8(0x2C,0x00); #clear the CANINTF register; we care about bits 0 and 1 (RXnIF flags) which indicate a message is being held #clear buffer packet1 = self.client.rxpacket(); packet2 = self.client.rxpacket(); #send in rtr request self.client.txpacket(packet) ## listen for 2 packets. one should be the rtr we requested the other should be ## a new packet response starttime = tT.time() while ((time.time() - starttime) < duration): #listen for the given duration time period packet = self.client.rxpacket() if( packet == None): continue # we have sniffed a packet, save it row = [] row.append("%f"%time.time()) #timestamp row.append(0) #error flag (not checkign) row.append("rtrRequest_%d"%i) #comment row.append(duration) #sniff time row.append(1) # filtering boolean for byte in packet: row.append("%02x"%ord(byte)); dataWriter.writerow(row) print self.client.packet2parsedstr(packet) trial= 2; # for each trial repeat while( trial <= attempts): print "trial: ", trial self.client.MCPrts(TXB0=True); starttime = time.time() # this time we will sniff for the given amount of time to see if there is a # time till the packets come in while( (time.time()-starttime) < duration): packet=self.client.rxpacket(); if( packet == None): continue row = [] row.append("%f"%time.time()) #timestamp row.append(0) #error flag (not checking) row.append("rtrRequest_%d"%i) #comment row.append(duration) #sniff time row.append(1) # filtering boolean for byte in packet: row.append("%02x"%ord(byte)); dataWriter.writerow(row) print self.client.packet2parsedstr(packet) trial += 1 print "sweep complete" outfile.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generationFuzzer(self,freq, standardIDs, dbLimits, period, writesPerFuzz, Fuzzes):\n #print \"Fuzzing on standard ID: %d\" %standardId\n self.client.serInit()\n self.spitSetup(freq)\n packet = [0,0,0x00,0x00,0x08,0,0,0,0,0,0,0,0] #empty packet template\n \n\n #get folder information (based on today's date)\n now = datetime.datetime.now()\n datestr = now.strftime(\"%Y%m%d\")\n path = self.DATA_LOCATION+\"InjectedData/\"+datestr+\"_GenerationFuzzedPackets.csv\"\n filename = path\n outfile = open(filename,'a');\n dataWriter = csv.writer(outfile,delimiter=',');\n #dataWriter.writerow(['# Time Error Bytes 1-13']);\n #dataWriter.writerow(['#' + description])\n \n numIds = len(standardIDs)\n fuzzNumber = 0; #: counts the number of packets we have generated\n while( fuzzNumber < Fuzzes):\n id_new = standardIDs[random.randint(0,numIds-1)]\n print id_new\n #### split SID into different regs\n SIDhigh = (id_new >> 3) & 0xFF; # get SID bits 10:3, rotate them to bits 7:0\n SIDlow = (id_new & 0x07) << 5; # get SID bits 2:0, rotate them to bits 7:5\n packet[0] = SIDhigh\n packet[1] = SIDlow\n \n #generate a fuzzed packet\n for i in range(0,8): # for each data byte, fuzz it\n idx = \"db%d\"%i\n limits = dbLimits[idx]\n value = random.randint(limits[0],limits[1]) #generate pseudo-random integer value\n packet[i+5] = value\n print packet\n #put a rough time stamp on the data and get all the data bytes \n row = [tT.time(), id_new,8] # could make this 8 a variable \n msg = \"Injecting: \"\n for i in range(5,13):\n row.append(packet[i])\n msg += \" %d\"%packet[i]\n #print msg\n dataWriter.writerow(row)\n self.client.txpacket(packet)\n tT.sleep(period/1000)\n \n #inject the packet the given number of times. \n for i in range(1,writesPerFuzz):\n self.client.MCPrts(TXB0=True)\n tT.sleep(period/1000)\n fuzzNumber += 1\n print \"Fuzzing Complete\" \n SIDhigh = (1056 >> 3) & 0xFF; # get SID bits 10:3, rotate them to bits 7:0\n SIDlow = (1056 & 0x07) << 5; # get SID bits 2:0, rotate them to bits 7:5\n packet = [SIDhigh, SIDlow, 0, 0, 8, 65, 255, 32, 120, 0, 0, 1, 247]\n self.client.txpacket(packet)\n for i in range(0,100):\n self.client.MCPrts(TXB0=True)\n tT.sleep(.01)\n outfile.close()", "def __send_reports__(self,config,mockdb):\n numbers = config.get('Flowcell_reports','numbers').split(',')\n for number in numbers:\n flowcell_report_key = getattr(self,'flowcell_report_' + str(number) + '_key')\n if flowcell_report_key is None:\n continue\n report = mockdb['FlowcellStatisticReport'].objects[flowcell_report_key]\n if report.report_sent is True: #If the report is already sent, next.\n continue\n if not report.__is_complete__(): #If the qsub script is still running, next.\n continue\n if self.sequencing_run_type == 'RapidRun' and str(number) == '16':\n recipients = config.get('Flowcell_reports','last_recipients')\n subject, body = report.__generate_flowcell_report_text__(config,mockdb,report_type=\"last_report\")\n #Add samples to the all sample list\n sample_keys = self.__completed_samples_list__(mockdb)\n write_list_file(sample_keys,config.get('Filenames','all_samples'),original_list_file=config.get('Filenames','all_samples'))\n self.__finish__()\n elif self.sequencing_run_type == 'HighThroughputRun' and str(number) == '64':\n recipients = config.get('Flowcell_reports','last_recipients')\n subject, body = report.__generate_flowcell_report_text__(config,mockdb,report_type=\"last_report\")\n #Add samples to the all sample list\n sample_keys = self.__completed_samples_list__(mockdb)\n write_list_file(sample_keys,config.get('Filenames','all_samples'),original_list_file=config.get('Filenames','all_samples'))\n self.__finish__()\n else:\n recipients = config.get('Flowcell_reports','subset_recipients')\n subject, body = report.__generate_flowcell_report_text__(config,mockdb,report_type=\"subset_report\")\n files = []\n files.append(report.report_pdf)\n files.append(report.full_report)\n files.append(report.current_report)\n send_email(subject,body,recipients=recipients,files=files)\n report.__finish__()\n report.report_sent = True\n return 1", "def main():\n\tif len(sys.argv) > 1:\n\t\tfilename = sys.argv[1]\n\t\twith open(filename, 'r') as myfile:\n\t\t\tdata = myfile.read().replace('\\n', '').replace('\\t','')\n\telse:\n\t\tdata = sys.stdin.read().replace('\\n', '').replace('\\t','')\n\n\totp = \"\"\n\tif len(sys.argv) == 3:\n\t\totp = sys.argv[2]\n\t\tprint(\"otp : \" + otp)\n\t\tprint\n\n\n\tPATH = \"report/\"\n\tidtemplate = \"$UNIQUE_IDENTIFIER_\"\n\tsock = create_socket()\n\n\tconn= connecthttps(\"https://kryptus.dyndns.biz\", 49193)\n\texpectedResults = \"\"\n\tresults = \"\"\n\ttestcase = ElementTree.fromstring(data)\n\tidStore = {}\n\tfor i in range(0,len(testcase), 2):\n\t\t# Get XML request and expected response.\n\t\tereq = testcase[i]\n\t\teres = testcase[i+1]\n\t\texpectedResults += parse_xml_to_pretty_string(ereq)\n\t\texpectedResults += parse_xml_to_pretty_string(eres)\n\t\texpectedResults += \"\\n\\\\newpage\\n\"\n\t\t# Append expected req and resp to string for report\n\n\t\t#parse req for ID\n\t\tprint('\\033[92m'+parse_xml_to_pretty_string(ereq)+'\\033[0m')\n\t\tparse_xml_unique_id(ereq, idStore, idtemplate)\n\t\tparse_xml_timestamp(ereq)\n\t\tif len(otp) > 0:\n\t\t\tparse_xml_otp(ereq, otp)\n\t\tresults += parse_xml_to_pretty_string(ereq)\n\t\tprint(idStore)\n\n\t\t#Parse xml to TTLV and send to HSM\n\t\tsend = parse_xml_to_pretty_string(ereq)\n\t\treceived = send_receive_https(conn, send)\n\t\t\n\t\t#Parse response to store IDs and append to report\n\t\tresponse = ElementTree.fromstring(received)\n\t\tprint('\\033[94m'+parse_xml_to_pretty_string(response)+'\\033[0m')\n\t\tparse_xml_unique_id(response, idStore, idtemplate)\n\t\tparse_xml_unique_id(eres, idStore, idtemplate)\n\t\tresults += parse_xml_to_pretty_string(response)\n\t\tresults += \"\\n\\\\newpage\\n\"\n\t\t\n\t\tprint(idStore)\n\t\tdisconnect(sock)\n\t\twriteToFile(expectedResults.replace(\"$\", \"\"), PATH, \"expected.tex\")\n\t\twriteToFile(results, PATH, \"results.tex\")\n\texit()", "def runner(id = -1, filename = 'twitter.log', direction = 'past'):\n if os.path.isfile(filename):\n fp = open(filename, 'a+')\n else:\n fp = open(filename, 'w+')\n fp.write(xml_header)\n fp.close()\n fp = open(filename, 'a+')\n try:\n xml = 'initial string...'\n if id==-1:\n print '...done'\n return False\n else:\n print direction + \" : \" + str(id)\n if direction == 'past':\n xml = _past_retreiver(id)\n elif direction == 'future':\n xml = _future_retreiver(id)\n else:\n return id\n fp.write(xml)\n fp.close()\n\n min_id = minimum_id(xml)\n print 'minimum id : ' + min_id\n \n sleep(interval)\n print 'passing ' + str(int(min_id)-1)\n return int(min_id)-1\n\n # Exception is for \"Twitter is over capacity\"\n except IndexError, e:\n print xml + ' -> ' + str(e)\n fp.close()\n sleep(interval)\n return id\n \n except ValueError, e:\n print xml + ' -> ' + str(e)\n fp.close()\n sleep(interval)\n return False\n\n except Exception, e:\n print xml + ' -> ' + str(e)\n fp.close()\n sleep(interval)\n return id", "def packetRespond(self,freq, time, repeats, period, responseID, respondPacket,listenID, listenPacket = None):\n \n \n self.client.serInit()\n self.spitSetup(freq)\n \n #formulate response packet\n SIDhigh = (responseID >> 3) & 0xFF; # get SID bits 10:3, rotate them to bits 7:0\n SIDlow = (responseID & 0x07) << 5; # get SID bits 2:0, rotate them to bits 7:5\n #resPacket[0] = SIDhigh\n #resPacket[1] = SIDlow\n resPacket = [SIDhigh, SIDlow, 0x00,0x00, # pad out EID regs\n 0x08, # bit 6 must be set to 0 for data frame (1 for RTR) \n # lower nibble is DLC \n respondPacket[0],respondPacket[1],respondPacket[2],respondPacket[3],respondPacket[4],respondPacket[5],respondPacket[6],respondPacket[7]]\n #load packet/send once\n \"\"\"@todo: make this only load the data onto the chip and not send \"\"\"\n self.client.txpacket(resPacket) \n self.addFilter([listenID,listenID,listenID,listenID, listenID, listenID]) #listen only for this packet\n startTime = tT.time()\n packet = None\n while( (tT.time() - startTime) < time):\n packet = self.client.rxpacket()\n if( packet != None):\n print \"packet read in, responding now\"\n # assume the ids already match since we are filtering for the id\n \n #compare packet received to desired packet\n if( listenPacket == None): # no packets given, just want the id\n for i in range(0,repeats):\n self.client.MCPrts(TXB0=True)\n tT.sleep(period/1000)\n else: #compare packets\n sid = ord(packet[0])<<3 | ord(packet[1])>>5\n print \"standard id of packet recieved: \", sid #standard ID\n msg = \"\"\n for i in range(0,8):\n idx = 5 + i\n byteIn = ord(packet[idx])\n msg += \" %d\" %byteIn\n compareIn = listenPacket[i]\n print byteIn, compareIn\n if( byteIn != compareIn):\n packet == None\n print \"packet did not match\"\n break\n print msg\n if( packet != None ):\n self.client.MCPrts(TXB0=True)\n tT.sleep(period/1000)\n print \"Response Listening Terminated.\"", "def send(self, seq_number, *voltage_list):\r\n\r\n timestamp = time.perf_counter()\r\n volt_list = list()\r\n for volt in voltage_list:\r\n volt_list.append(volt)\r\n try:\r\n self.sock.sendto(struct.pack(packet.H2R_PACKET_FORMAT, seq_number, packet.time2int(timestamp), *volt_list),\r\n (self.robot_ip, self.robot_port))\r\n\r\n self.tx_cntr.inc()\r\n\r\n self.tastx_ks[seq_number + 1] = timestamp\r\n\r\n try:\r\n tsr_k = self.tsr_ks[seq_number]\r\n tsstx_k = self.tsstx_ks[seq_number]\r\n tssrx_k = self.tssrx_ks[seq_number]\r\n tastx_k = self.tastx_ks[seq_number]\r\n tasrx_k = self.tasrx_ks[seq_number]\r\n taw_k = self.taw_ks[seq_number]\r\n\r\n if packet.time2int(tasrx_k) is not 0:\r\n self.tsr_k_logger.timestamp(timestamp=tsr_k, value=tsr_k)\r\n self.tsstx_k_logger.timestamp(timestamp=tsstx_k, value=tsstx_k)\r\n\r\n self.tssrx_k_logger.timestamp(timestamp=tssrx_k, value=tssrx_k)\r\n self.tastx_k_logger.timestamp(timestamp=tastx_k, value=tastx_k)\r\n\r\n self.tasrx_k_logger.timestamp(timestamp=tasrx_k, value=tasrx_k)\r\n self.taw_k_logger.timestamp(timestamp=taw_k, value=taw_k)\r\n\r\n del self.tsr_ks[seq_number - 1]\r\n del self.tsstx_ks[seq_number - 1]\r\n\r\n del self.tasrx_ks[seq_number - 1]\r\n del self.taw_ks[seq_number - 1]\r\n\r\n except KeyError:\r\n logging.debug(\"Packet not found\")\r\n\r\n except socket.error:\r\n logging.error('Tx error')\r\n return", "def write_file(origin,dest):\n file_name = 'traveltimes.csv'\n f = open('traveltimes.csv','w') #changed from 'wb' to 'w'\n wr = csv.writer(f)\n\n wr.writerow( ('Timestamp','Origin_Lat','Origin_Lon','Dest_Lat','Dest_Lon','Travel_Time_Str','Travel_Time_Sec','Distance_Str','Distance_M') )\n\n while True:\n\n start = time.time()\n\n for node_o in origin:\n for node_d in dest:\n\n try:\n timeStamp = int(time.time())\n (timeStr,timeSec,distStr,distM) = getTravelTime(node_o.lat,node_o.lon,node_d.lat,node_d.lon)\n print((timeStamp,node_o.lat,node_o.lon,node_d.lat,node_d.lon,timeStr,timeSec,distStr,distM))\n wr.writerow( (timeStamp,node_o.lat,node_o.lon,node_d.lat,node_d.lon,timeStr,timeSec,distStr,distM) )\n except:\n print ('No response received...') #added parentheses\n continue\n \n time.sleep(1)\n \n while time.time() - start < 3600:\n time.sleep(2)\n print(('Waiting: %f') % ((time.time() - start)/60)) #added parentheses'''\n break\n break\n return file_name", "def _sample_failed_report(imissfile: str=\"plink.imiss\", lmissfile: str=\"plink.lmiss\",\n sexcheckfile: str=\"plink.sexcheck\",\n hetfailedfile: str=\"het_fail_ind.txt\",\n ibdfile: str=\"pihat_min0.2_in_founders.genome\",\n write: bool=True):\n\n ids = {}\n ids_list = []\n\n # SNP missingness\n imiss = pd.read_csv(imissfile, delimiter=\" \", skipinitialspace=True)\n lmiss = pd.read_csv(lmissfile, delimiter=\" \", skipinitialspace=True)\n ind_missing_filtered = calculate_missingness(imiss, 'F_MISS', 0.2)\n missing_ids = get_sample_ids(imiss, 'IID', ind_missing_filtered)\n\n ids['missing'] = missing_ids.tolist()\n ids_list.append(missing_ids.tolist())\n\n # mismatched sex\n sex = pd.read_csv(\"plink.sexcheck\", delimiter=\" \", skipinitialspace=True)\n sex_mismatches = sex.loc[sex['STATUS'] == \"PROBLEM\"]\n sex_mismatches_counts = sex['STATUS'].value_counts()\n sex_mismatches_ids = sex_mismatches['IID'].tolist()\n\n ids['sex_mismatches'] = sex_mismatches_ids\n ids_list.append(sex_mismatches_ids)\n\n # outlying heterozygosity\n het_failed = pd.read_csv(\"het_fail_ind.txt\", delimiter=\" \")\n het_failed_ids = het_failed['IID'].tolist()\n\n ids['het_failed'] = het_failed_ids\n ids_list.append(het_failed_ids)\n\n # high IBD - pi_hat threshold\n ibd = pd.read_csv(\"pihat_min0.2_in_founders.genome\", delimiter=\" \", skipinitialspace=True)\n ibd_ids = ibd['IID1'].tolist()\n\n ids['relatedness_failed'] = ibd_ids\n ids_list.append(ibd_ids)\n\n # graph everything\n tests = ['SNP Missingness', 'Sex Mismatches', 'Outlying Heterozygosity', 'Cryptic Relatedness']\n fail_counts = [len(missing_ids), len(sex_mismatches_ids), len(het_failed_ids), len(ibd_ids)]\n total_fails = set(x for l in ids_list for x in l)\n print(\"total samples failed: {}/{}\".format(len(total_fails), imiss.shape[0]))\n\n fig = plt.figure(figsize=(8,6))\n plt.tight_layout()\n plt.bar(x=tests, height=fail_counts)\n plt.title(\"Samples failing QC checks (total: {}/{})\".format(len(total_fails), imiss.shape[0]))\n plt.xlabel(\"QC Test\")\n plt.ylabel(\"Number of samples\")\n plt.tick_params(axis='x', rotation=90)\n\n if write:\n write_fail_file(ids, \"failed_sample_ids\")\n return fig", "def Conn_analysis(self, hazard, path_to_guids, retrofit_key, eretrofit, n_samples):\n\t\tfor fast in range(2):\n\t\t\tif fast == 0:\n\t\t\t\tn_workers = 32\n\t\t\t\tfast_mult = 1.\n\t\t\telif fast == 1: \n\t\t\t\tn_workers = 64\n\t\t\t\tfast_mult = 0.5\n\t\t\t\"\"\" using the probability of failure, rather than leak/break.\n\t\t\t\tassuming that the repair rate is the average of the leak/break\n\t\t\t\trepair rates from hazus.\n\t\t\t\t\t\tbreak \tleak\tavg.\n\t\t\t\t> 20\" - 0.33 \t0.66\t0.5\n\t\t\t\t< 20\"\t0.5\t\t1.0 \t0.75\n\t\t\t\"\"\"\n\t\t\tpipe_reprate = [0.5, 0.75]\t# Fixed pipes per Day per Worker (>20\", <20\" diameter)\n\n\t\t\t# repair time parameters for roads\n\t\t\tif hazard == 'eq':\n\t\t\t\twtp_rep_time_mu = np.array([0.9, 1.9, 32, 95])*fast_mult # mean repair time for water treatement plants for DS2-DS5\n\t\t\t\twtp_rep_time_std = np.array([0.3, 1.2, 31, 65])*fast_mult # std for repair time\t\t\n\n\t\t\t\twps_rep_time_mu = np.array([0.9, 3.1, 13.5, 35])*fast_mult # mean repair time for water treatement plants for DS2-DS5\n\t\t\t\twps_rep_time_std = np.array([0.3, 2.7, 10, 18])*fast_mult # std for repair time\t\t\n\n\t\t\telif hazard == 'tsu':\n\t\t\t\twtp_rep_time_mu = np.array([1, 6, 20, 90])*fast_mult # mean repair time for water treatement plants for DS2-DS5\n\t\t\t\twtp_rep_time_std = np.array([1, 6, 20, 90])*fast_mult # std for repair time\t\t\n\n\t\t\t\twps_rep_time_mu = np.array([1, 6, 20, 240])*fast_mult # mean repair time for water treatement plants for DS2-DS5\n\t\t\t\twps_rep_time_std = np.array([1, 6, 20, 120])*fast_mult # std for repair time\t\t\n\n\t\t\telif hazard == 'cumulative':\n\t\t\t\t\"\"\" assuming that the repair time parameters for cumulative \n\t\t\t\t\tdamage are the max of eq and tsu. \"\"\"\n\t\t\t\twtp_rep_time_mu = np.array([1, 6, 32, 95])*fast_mult\n\t\t\t\twtp_rep_time_std = np.array([1, 6, 31, 65])*fast_mult\n\n\t\t\t\twps_rep_time_mu = np.array([1, 6, 20, 240])*fast_mult\n\t\t\t\twps_rep_time_std = np.array([1, 6, 20, 120])*fast_mult\n\n\t\t\twtp_rep_time_cov = wtp_rep_time_std/wtp_rep_time_mu # COV of repiar time\n\t\t\twtp_rep_time_log_med = np.log(wtp_rep_time_mu/np.sqrt(wtp_rep_time_cov**2+1)) # lognormal parameters for repair time model\n\t\t\twtp_rep_time_beta = np.sqrt(np.log(wtp_rep_time_cov**2+1))\n\t\t\twtp_rep_time_covm = wtp_rep_time_beta[:,None]*wtp_rep_time_beta\n\n\t\t\twps_rep_time_cov = wps_rep_time_std/wps_rep_time_mu # COV of repiar time\n\t\t\twps_rep_time_log_med = np.log(wps_rep_time_mu/np.sqrt(wps_rep_time_cov**2+1)) # lognormal parameters for repair time model\n\t\t\twps_rep_time_beta = np.sqrt(np.log(wps_rep_time_cov**2+1))\n\t\t\twps_rep_time_covm = wps_rep_time_beta[:,None]*wps_rep_time_beta\n\n\t\t\trts = [100, 250, 500, 1000, 2500, 5000, 10000]\n\t\t\tcolumn_keys = ['iter_{}' .format(i) for i in range(n_samples)]\n\t\t\tguids = os.listdir(path_to_guids)\n\n\t\t\tbldg_dataset_id = \"5df40388b9219c06cf8b0c80\" # building dataset\n\t\t\tpipe_dataset_id = \"5d2666b5b9219c3c5595ee65\" # water pipes\n\t\t\twterfclty_dataset_id = \"5d266507b9219c3c5595270c\"\n\t\t\tbldg_to_network_id = \"5f171ffbc98cf43417c21381\" # links buildings to road edges\n\t\t\t\n\t\t\t\"\"\" the way critical nodes is setup is best given through an example:\n\t\t\t\twith the setup below, the connectivity analysis\n\t\t\t\tdetermines whether each tax-lot is connected to:\n\t\t\t\t\t- (node 229 OR node 230) AND (node 300)\n\t\t\t\t\n\t\t\t\tso the nodes in each inner lists undergo a logical_or \n\t\t\t\tstatement, whereas these results undergo a logical_and.\n\n\t\t\t\"\"\"\n\n\t\t\tconn = WterConnectivity(self.client)\n\n\t\t\tconn.load_remote_input_dataset(\"buildings\", bldg_dataset_id)\n\t\t\tconn.load_remote_input_dataset(\"pipe_dataset\", pipe_dataset_id)\n\t\t\tconn.load_remote_input_dataset(\"wterfclty_dataset\", wterfclty_dataset_id)\n\t\t\tconn.load_remote_input_dataset(\"building_to_network\", bldg_to_network_id)\n\n\t\t\tconn.set_parameter('n_workers', n_workers)\n\t\t\tconn.set_parameter('pipe_reprate', pipe_reprate)\n\t\t\tconn.set_parameter('wtp_rep_time_log_med', wtp_rep_time_log_med)\n\t\t\tconn.set_parameter('wtp_rep_time_covm', wtp_rep_time_covm)\n\t\t\tconn.set_parameter('wps_rep_time_log_med', wps_rep_time_log_med)\n\t\t\tconn.set_parameter('wps_rep_time_covm', wps_rep_time_covm)\n\n\t\t\tfor efast in range(2):\n\t\t\t\t# --- performing connectivity analysis\n\t\t\t\tfunc = {}\n\t\t\t\trep = {}\n\t\t\t\tfor rt_i, rt in enumerate(rts):\n\t\t\t\t\tprint_msg = '\\tconn_analysis: {}, rt_{}, {}, fast{}, eretrofit{}, efast{}:' \\\n\t\t\t\t\t\t\t\t\t.format(hazard, rt, retrofit_key, fast, eretrofit, efast)\n\t\t\t\t\t\n\t\t\t\t\tconn.set_parameter('prnt_msg', print_msg)\n\n\t\t\t\t\twter2elec_func = 'func_cumulative_{}yr_wter2elec_eretrofit{}_efast{}.csv' \\\n\t\t\t\t\t\t\t\t\t.format(rt, eretrofit, efast)\n\t\t\t\t\twter2elec_func = os.path.join(self.output_path,'..','wter2elec',wter2elec_func)\n\n\t\t\t\t\twter2elec_rept = 'reptime_cumulative_{}yr_wter2elec_eretrofit{}_efast{}.csv' \\\n\t\t\t\t\t\t\t\t\t.format(rt, eretrofit, efast)\n\t\t\t\t\twter2elec_rept = os.path.join(self.output_path,'..','wter2elec',wter2elec_rept)\n\n\t\t\t\t\tpipe_dmg_file = 'pipe_DS_{}_{}yr_{}.csv' .format(hazard, rt, retrofit_key)\n\t\t\t\t\tpipe_dmg_file = os.path.join(self.mc_path, pipe_dmg_file)\n\t\t\t\t\t\n\t\t\t\t\twterfclty_dmg_file = 'wterfclty_DS_{}_{}yr_{}.csv' .format(hazard, rt, retrofit_key)\n\t\t\t\t\twterfclty_dmg_file = os.path.join(self.mc_path, wterfclty_dmg_file)\n\n\t\t\t\t\t# ---\n\t\t\t\t\twter2elec_func_dset = Dataset.from_file(wter2elec_func, \"ergo:DamageInventory\")\n\t\t\t\t\tconn.set_input_dataset(\"wter2elec_func\", wter2elec_func_dset)\n\n\t\t\t\t\twter2elec_rept_dset = Dataset.from_file(wter2elec_rept, \"ergo:DamageInventory\")\n\t\t\t\t\tconn.set_input_dataset(\"wter2elec_rep\", wter2elec_rept_dset)\n\t\t\t\t\t\n\t\t\t\t\tpipe_dmg_dset = Dataset.from_file(pipe_dmg_file, \"ergo:DamageInventory\")\n\t\t\t\t\tconn.set_input_dataset(\"pipe_dmg\", pipe_dmg_dset)\n\n\t\t\t\t\twterfclty_damage_dataset = Dataset.from_file(wterfclty_dmg_file, \"ergo:DamageInventory\")\n\t\t\t\t\tconn.set_input_dataset(\"wterfclty_dmg\", wterfclty_damage_dataset)\n\t\t\t\t\t\n\t\t\t\t\tfunc[rt], rep[rt] = conn.WterConn_run()\n\t\t\t\t\t\n\t\t\t\t\t# temp_func = func[rt].head(5)\n\t\t\t\t\t# temp_rep = rep[rt].head(5)\n\t\t\t\t\t# print(temp_func.mean(axis=1))\n\t\t\t\t\t# print(temp_rep.mean(axis=1))\n\n\t\t\t\t# --- writing results for each guid\n\t\t\t\tfor guid_i, guid in enumerate(guids):\n\t\t\t\t\tprnt_msg = 'writing {} guids' .format(len(guids))\n\t\t\t\t\tself.print_percent_complete(prnt_msg, guid_i, len(guids))\n\n\t\t\t\t\to_path = os.path.join(path_to_guids, \n\t\t\t\t\t\t\t\t\t\t guid, \n\t\t\t\t\t\t\t\t\t\t 'mc_results', \n\t\t\t\t\t\t\t\t\t\t 'water',\n\t\t\t\t\t\t\t\t\t\t )\n\t\t\t\t\tif not os.path.exists(o_path):\n\t\t\t\t\t\tos.makedirs(o_path)\n\n\t\t\t\t\to_file_func = os.path.join(o_path, \n\t\t\t\t\t\t\t\t\t\t 'func_{}_wter_{}_fast{}_eretrofit{}_efast{}.gz' \n\t\t\t\t\t\t\t\t\t\t .format(hazard, retrofit_key, fast, eretrofit, efast))\n\t\t\t\t\t\n\t\t\t\t\to_file_rep = os.path.join(o_path, \n\t\t\t\t\t\t\t\t\t\t 'reptime_{}_wter_{}_fast{}_eretrofit{}_efast{}.gz' \n\t\t\t\t\t\t\t\t\t\t .format(hazard, retrofit_key, fast, eretrofit, efast))\n\n\t\t\t\t\ttemp_data_func = np.zeros((len(rts), n_samples))\n\t\t\t\t\ttemp_data_rep = np.zeros((len(rts), n_samples))\n\t\t\t\t\tfor rt_i, rt in enumerate(rts):\n\t\t\t\t\t\ttemp_data_func[rt_i] = func[rt].loc[guid]\n\t\t\t\t\t\ttemp_data_rep[rt_i] = rep[rt].loc[guid]\n\n\t\t\t\t\to_df_func = pd.DataFrame(temp_data_func, index=rts, columns=column_keys)\n\t\t\t\t\to_df_func.to_csv(o_file_func, compression='gzip')\n\n\t\t\t\t\to_df_rep = pd.DataFrame(temp_data_rep, index=rts, columns=column_keys)\n\t\t\t\t\to_df_rep.to_csv(o_file_rep, compression='gzip')", "def download_assignments(opener, fasta_fname, interval=3):\n params = {\"file\" : open(fasta_fname, \"rb\") }\n #submit and refresh until processed\n result = opener.open(rdp_base+servlet, params)\n while is_processing(result):\n sleep(interval)\n result = opener.open(rdp_base + check_page)\n\n #download the detailed text result\n result = opener.open(rdp_base + get_download_url(result))\n return result", "def open_service_loop(self):\n\t\n\tprint \"Attempting to receive file\", self.file_read, \"from\", self.ip, \"at port\", self.port, \".\" \n\trecv_data = None\n\tnum_retransmits = 0\n\t#Start timer, retransmit after each timeout of one second. If receive response within the timer, move on to next step. \n\t#Limit number of retransmits to 60 so as not to enter infinite loop.\n\twhile(num_retransmits < 60):\n\t num_retransmits += 1\n\t self.send_open_request()\n\n\t input_socket = [self.client_socket]\n\t inputready,outputready,exceptready = select.select(input_socket,[],[], 1)\n\t #if timer expires without input becoming ready, empty list is returned. So go to next iteration of loop (retransmit)\n\t if (inputready == []):\n\t\tcontinue\n\t else:\n\t\ttry:\n\t\t recv_data = self.client_socket.recv(self.buffer_)\n\t\texcept Exception as exception_:\n\t\t print(\"Wrong port number or IP address provided, or server is not available at the moment.\")\n\t\t sys.exit()\n\t\tprint(\"Received a packet.\")\n\t\t\n\t\t#Generate a random number between 0 and 1 with uniform distribution to simulate packet loss.\n\t\tif (random.uniform(0,1) < self.p):\n\t\t recv_data = None\n\t\t print(\"Packet dropped randomly to simulate packet losses\")\n\t\t continue\n\t\t\n\t\tbit_signature = recv_data[0:4]\n\t\tresponse_type = recv_data[4:8]\n\t\trecv_payload = recv_data[8:]\n\n\t\t#Check that bit signature is valid (packet is from our network)\n\t\tif bit_signature != \"\\x00\\x00\\x00\\r\": \n\t\t recv_invalid_response(recv_data, \"bit_signature\")\n\t\t continue\n\t\telse:\n\t\t #We have only ever sent a open_request, so the only viable response at this point is an open_response. \n\t\t #If this field contains anything else, it is an invalid packet. Retransmit request.\n\t\t if response_type != \"\\x00\\x00\\x00\\x08\": \n\t\t\tself.recv_invalid_response(recv_data, \"response_type\")\n\t\t\tcontinue\t\t\n\t\t else:\n\t\t\t#Bit signature and response type fields are both valid.\n\t\t\tprint(\"Received open response from server...\")\n\t\t\tself.recv_open_response(recv_payload)\n\t\t\tbreak\n\t\n\tif (num_retransmits >= 60):\n\t print (\"Exceeded number of retransmissions allowed. Exiting program.\")\n\t sys.exit()\t\n\treturn", "def send_pulses(index):\n path = str(DAL.__file__[:-7])\n conn = DAL.connect(path + r'\\DBProject.db')\n patients = [item[0] for item in DAL.get_patient(conn)]\n\n while get_running():\n time.sleep(1)\n thread_input = get_thread_input(index)\n with PRINT_LOCK:\n print(\"Thread num:\", index, \", input: \", thread_input)\n data = {}\n #TO DO-complete thread_input =1,2,3\n data[\"input\"] = thread_input\n data[\"client_num\"] = patients[index]\n data[\"position\"] = \"123\"\n data[\"event_time\"] = datetime.datetime.now()\n data[\"value\"] = \"123\"\n\n\n\n #{\"input\": thread_input, \"client num\": index, \"start_time\": \"123\"}\n requests.post(\n f\"http://{SERVER_IP}:{SERVER_PORT}/add_data\", data)\n if (thread_input==b\"2\"):\n change_thread_input(index)", "def worker_file_write(self):\r\n\r\n \"\"\" time-based file naming\"\"\"\r\n time_now = time.ctime().lower()\r\n str_time = time_now[4:7] + time_now[8:10] + '_' + time_now[11:13] + time_now[14:16] + '_'\r\n # file format: mrr_mmmdd_hhmm_filname.dat\r\n full_f_name = 'C:\\\\work\\\\rcube_extract\\\\demo_project\\\\captured_data\\\\soheil_rcube\\\\demo_' + str_time + 'out' + self.data_file_name + '.dat'\r\n \r\n while self.active_flag.is_set():\r\n self.data_recieved_flag.wait()\r\n \r\n file_dat = open(full_f_name,'ab+')\r\n # file_dat = open('captured_data\\Record_' + self.radar_name + '_' + str_time + '.dat', 'ab+')\r\n \r\n # self.buffer_busy_flag.wait();\r\n self.buffer_busy_flag.clear()\r\n tmpdat = self.recieved_data\r\n # self.recieved_data=b'';\r\n self.buffer_busy_flag.set()\r\n file_dat.write(tmpdat)\r\n # print(tmpdat)\r\n # file_dat.write(self.recieved_data)\r\n file_dat.close()\r\n # self.recieved_data=b'';\r\n self.data_recieved_flag.clear()\r\n return", "def generalFuzz(self,freq, Fuzzes, period, writesPerFuzz):\n #print \"Fuzzing on standard ID: %d\" %standardId\n self.client.serInit()\n self.spitSetup(freq)\n packet = [0,0,0x00,0x00,0x08,0,0,0,0,0,0,0,0] #empty template\n \n #get folder information (based on today's date)\n now = datetime.datetime.now()\n datestr = now.strftime(\"%Y%m%d\")\n path = self.DATA_LOCATION+\"InjectedData/\"+datestr+\"_GenerationFuzzedPackets.csv\"\n filename = path\n outfile = open(filename,'a');\n dataWriter = csv.writer(outfile,delimiter=',');\n #dataWriter.writerow(['# Time Error Bytes 1-13']);\n #dataWriter.writerow(['#' + description])\n \n fuzzNumber = 0; #: counts the number of packets we have generated\n while( fuzzNumber < Fuzzes):\n #generate new random standard id in the full range of possible values\n id_new = random.randint(0,4095) \n #print id_new\n #### split SID into different regs\n SIDhigh = (id_new >> 3) & 0xFF; # get SID bits 10:3, rotate them to bits 7:0\n SIDlow = (id_new & 0x07) << 5; # get SID bits 2:0, rotate them to bits 7:5\n packet[0] = SIDhigh\n packet[1] = SIDlow\n \n #generate a fuzzed packet\n for i in range(0,8): # for each data byte, fuzz it\n idx = \"db%d\"%i\n \n value = random.randint(0, 255) #generate pseudo-random integer value\n packet[i+5] = value\n #print packet\n #put a rough time stamp on the data and get all the data bytes \n row = [time.time(), id_new,8] \n \"\"\"@todo: allow for varied packet lengths\"\"\"\n msg = \"Injecting: \"\n for i in range(5,13):\n row.append(packet[i])\n msg += \" %d\"%packet[i]\n #print msg\n dataWriter.writerow(row)\n self.client.txpacket(packet)\n time.sleep(period/1000)\n \n #inject the packet the given number of times. \n for i in range(1,writesPerFuzz):\n self.client.MCPrts(TXB0=True)\n time.sleep(period/1000)\n fuzzNumber += 1\n print \"Fuzzing Complete\" \n outfile.close()", "async def _async_refresh_device_detail_by_ids(self, device_ids_list):\n for device_id in device_ids_list:\n try:\n await self._async_refresh_device_detail_by_id(device_id)\n except asyncio.TimeoutError:\n _LOGGER.warning(\n \"Timed out calling august api during refresh of device: %s\",\n device_id,\n )\n except (ClientResponseError, CannotConnect) as err:\n _LOGGER.warning(\n \"Error from august api during refresh of device: %s\",\n device_id,\n exc_info=err,\n )", "def transfers(league_name, league_id, start, stop):\r\n try:\r\n for i in range(start, stop + 1):\r\n league_transfers = []\r\n season_id = str(i)\r\n for window in ['e', 'i']:\r\n league_transfers.append(scrape_season_transfers(league_name, league_id, season_id, window))\r\n sleep(3)\r\n df = pd.concat(league_transfers)\r\n df = df[~df['Name'].isna()]\r\n df.reset_index(drop=True, inplace=True)\r\n export_csv(df, season_id, league_name, league_id)\r\n except TypeError:\r\n print(\"Make sure league parameters are STRINGS and years are INTEGERS.\")", "def collect_data(table_name):\n global NTU2_Med\n global NTU3_Med\n # Start date for data collection, should be fifteen minutes in the past\n start_date_form = datetime.now() - timedelta(minutes=15)\n\n # End date for data collection, should be now, to complete our 15 minute interval\n end_date_form = datetime.now()\n \n # Check which platform program is running on, if windows treat as binary\n if platform == 'Linux':\n table_file = os.open(table_name + '.csv', os.O_WRONLY | os.O_APPEND | os.O_CREAT)\n else:\n table_file = os.open(table_name + '.csv', os.O_BINARY | os.O_WRONLY | os.O_APPEND | os.O_CREAT)\n \n #Pull data from table on logger\n table_data = device.get_data(table_name, start_date_form, end_date_form)\n\n # Get 15 minute medians\n if table_name == \"Table15min\":\n # Iterate through table data, and set medians\n for i in table_data:\n NTU2_Med = int(i['TurbNTU2_Med'])\n output = \"NTU2_Med: \" + str(i['TurbNTU2_Med']) + \"\\n\" \n print(output)\n os.write(log_file, output)\n NTU3_Med = int(i['TurbNTU3_Med'])\n output = \"NTU3_Med: \" + str(i['TurbNTU3_Med']) + \"\\n\"\n print(output)\n os.write(log_file, output)\n # Set headers if applicable and convert dictionary to csv file\n if has_ran:\n output = \"Script has already ran at least once\\n\"\n os.write(log_file, output)\n table_csv = utils.dict_to_csv(table_data, \",\", header=False)\n else:\n output = \"Script has not already ran\\n\"\n os.write(log_file, output)\n table_csv = utils.dict_to_csv(table_data, \",\", header=True)\n\n output = \"Writing file to local storage\\n\"\n os.write(log_file, output)\n\n # Write table file to system\n os.write(table_file, table_csv.encode('UTF-8'))\n\n #Close file descriptor\n os.close(table_file)\n\n output = \"uploading file to server\\n\"\n os.write(log_file, output)\n\n # Upload/Append data to server\n put_data(table_name)\n\n output = \"Wrote file to server\\n\"\n os.write(log_file, output)\n\n return 0", "def receiver_chain(self,h):\n\n self.host = h\n\n n = self.data.get_rx_number()\n beacon_packet = self.data.get_beacon_packet()\n time_base = self.data.get_timestamp_base()\n\n # lists containing data for all current teams\n team_id = self.data.get_rx_team_id()\n location = self.data.get_rx_location()\n if ENABLE_LOCATION_HISTORY:\n self.record_location_history(location)\n tof = self.data.get_rx_time_delay()\n\n if self.DEBUG:\n print \"\\n\\n\\n\\n\\n\\nretrieve location: \", location\n print ''\n print \"type(tof): \", type(tof)\n\n\n\n conn = psycopg2.connect(host = self.host,\n user = \"sdrc_user\",\n password = \"sdrc_pass\",\n database = \"sdrc_db\")\n\n cur = conn.cursor()\n\n\n for i in range(n):\n f = open('data_in.data', 'a')\n\n (rx_pktno,) = struct.unpack('!H', beacon_packet[0:2])\n (beacon_ID,) = struct.unpack('!H', beacon_packet[2:4])\n\n # packet number\n payload1 = struct.pack('!H', self.packet_number & 0xffff)\n f.write(str(self.packet_number) + ';')\n\n # team id\n ident = team_id[i]\n payload2 = struct.pack('!H', ident & 0xffff)\n f.write(str(ident) + ';')\n\n # location\n if (self.iterator == 1):\n loc = location[i]\n else:\n # old_loc = location[i]\n # loc = alex_random.random_move(old_loc)\n loc = alex_random.get_random_coord()\n self.data.set_rx_location(i,loc)\n\n f.write(str(loc)+';')\n\n self.iterator += 1\n payload3 = new_sim_utils.pack_loc(loc)\n \n\n # toa\n t = tof[i]\n toa = time_base + t\n # if (ENABLE_JITTER):\n # jitter = self.random_timing_jitter()\n # toa = toa+jitter\n # else:\n # pass\n if self.DEBUG:\n print \"t = tof[i]: \", repr(t)\n print \"type(t): \", type (t)\n print \"toa = time_base + t: \", repr(toa)\n print \"type(toa): \", type(toa)\n payload4 = new_sim_utils.pack_time(toa)\n\n f.write(repr(toa)+';')\n\n\n # beacon payload\n payload5 = struct.pack('!H', rx_pktno & 0xffff)\n f.write(str(rx_pktno) + ';')\n payload6 = struct.pack('!H', beacon_ID & 0xffff)\n f.write(str(beacon_ID) + '\\n')\n f.close()\n # check if packet dropped\n drop = self.drop_packet()\n # this if evaluates true even if drop == False\n # if (ENABLE_DROPPED_PACKETS and drop): # if drop == 'True'\n # print 'ENABLE_DROPPED_PACKETS ', ENABLE_DROPPED_PACKETS\n # print 'drop ', drop\n # print (ENABLE_DROPPED_PACKETS and drop)\n # print 'packet dropped'\n # payload = ''\n if ENABLE_DROPPED_PACKETS:\n print 'ENABLE_DROPPED_PACKETS ', ENABLE_DROPPED_PACKETS\n print 'drop ', drop\n if drop: # if drop == 'True'\n print 'drop ', drop\n print 'packet dropped'\n payload = ''\n else: # if drop == 'False'\n payload = (payload1 + payload2 +\n payload3 + payload4 +\n payload5 + payload6)\n else: # if drop == 'False'\n payload = (payload1 + payload2 +\n payload3 + payload4 +\n payload5 + payload6)\n\n\n print \"len(payload): \", len(payload)\n cur.execute(\"INSERT INTO blob_table (field_1) VALUES (%s)\", (psycopg2.Binary(payload),))\n\n\n conn.commit()\n cur.close() \n conn.close()\n\n self.packet_number += 1", "def on_slave_report(client_id, data):", "def write_report(self):\r\n self.end_time = time.strftime('%Y-%m-%d_%H:%M:%S')\r\n server_log.info('')\r\n server_log.info('=========================================================')\r\n server_log.info('All test clients completed!')\r\n server_log.info(' Start time: {}'.format(self.start_time))\r\n server_log.info(' End time: {}'.format(self.end_time))\r\n server_log.info('')\r\n server_log.info('Total of {} client(s) ran. Data for each client:'.format(len(self.client_list)))\r\n for client in self.client_list.values():\r\n server_log.info('---------------------------------------------------------')\r\n server_log.info(' Client {}'.format(client.client_id))\r\n server_log.info(' Test status: {}'.format(client.status))\r\n server_log.info(' Time ran: {:.2f} sec'.format(client.time_ran)) \r\n server_log.info(' Avg CPU usage: {:.2f}%'.format(client.cpu_avg))\r\n server_log.info(' Avg MEM usage: {:.2f}%'.format(client.mem_avg))\r\n server_log.info(' Files written: {}'.format(client.files_written))\r\n server_log.info(' File size: {}'.format(client.file_size))\r\n server_log.info(' Chunk size: {}'.format(client.chunk_size))\r\n server_log.info('=========================================================')\r\n server_log.info('')", "def get_intent_response(date_start_slot,date_end_slot):\n print(\"here\",date_start_slot,date_end_slot) \n if date_start_slot != 'NA' and date_end_slot != 'NA':\n speechOutput = re.sub(' +',' ','Parsing error Logs from '+ date_start_slot + ' to '+date_end_slot)\n\n elif date_start_slot == 'NA' and date_end_slot != 'NA':\n speechOutput = 'Parsing error logs at '+date_end_slot\n\n elif date_start_slot != 'NA' and date_end_slot == 'NA':\n speechOutput = 'Parsing error logs at '+date_start_slot\n\n else:\n speechOutput = 'Start and end times are unrecognizable'\n \n return_value = parseLogs(date_start_slot,date_end_slot,dataframe)\n logs = return_value[0]\n stats = return_value[1]\n \n with open('logs.txt','w') as f:\n for each in logs:\n f.write(str(each))\n\n with open('log_stats.txt','w') as f:\n with open('log_numbers.txt','w') as f_new:\n for each_key,each_value in stats.items():\n f.write(\"Time : \"+str(each_key)+'\\t'+\"Number of logs : \"+str(len(each_value))+\"\\n\\n\")\n f_new.write(\"Time : \"+str(each_key)+'\\t'+\"Number of logs : \"+str(len(each_value))+\"\\n\\n\")\n for each_logs in each_value:\n f.write(each_logs+\"\\n\")\n f.write(\"\\n\\n\\n\\n\")\n\n os.system(\"nohup python send_mail.py &\")\n \n return response(speech_response(speechOutput, True))", "def _route_chunk(data, host_url, annotations='duration', retries=10, extra_params=None):\n\t# offsets are used to make correct indice of the result dataframe\n\tsources, destinations, sources_offset, destinations_offset = data\n\tsources_count = len(sources)\n\tdestinations_count = len(destinations)\n\n\t# OSRM takes all points as one list, and then numbers of sources & dests in it\n\tall_points = sources + destinations\n\tencoded = encode_poly([(p.y, p.x) for p in all_points])\n\n\t# numerate sources & dests. sources come first\n\tsource_numbers = ';'.join(map(str, range(sources_count)))\n\tdestination_numbers = ';'.join(map(str,\n\t\trange(sources_count, sources_count + destinations_count)))\n\n\n\textra_params = extra_params or {}\n\tparams = {\n\t\t'sources': source_numbers,\n\t\t'destinations': destination_numbers,\n\t\t'generate_hints': 'false',\n\t\t'annotations': annotations,\n\t\t**extra_params\n\t}\n\n\tencoded_params = urllib.parse.quote_plus(urllib.parse.urlencode(params))\n\t# if we pass url and params separately to requests.get, it will make a malformed URL\n\tencoded_url = f'{host_url}/table/v1/driving/polyline({encoded})?{encoded_params}'\n\tresp = get_retry(encoded_url, {}, retries)\n\n\tif resp.status_code != 200:\n\t\traise RuntimeError(f'OSRM server responded with {resp.status_code} code. Content: {resp.content}')\n\n\tresp_data = resp.json()\n\tif resp_data.get('code', 'Ok') != 'Ok':\n\t\traise RuntimeError(f'OSRM server responded with error message: {resp_data[\"message\"]}')\n\n\t# if 'duration' is requested, then take resp_data['durations'], or resp_data['distances'] if distances.\n\t# also, 'duration,distance' might be requested, then take both and concatenate results (= join columns)\n\tresults = []\n\t\n\tfor key in annotations.split(','):\n\t\tdf = pd.DataFrame(resp_data[f'{key}s']).reset_index().rename(columns={'index': 'source'}).melt(id_vars='source', var_name='destination', value_name=key)\n\t\tdf[key] = df[key].astype(float)\n\t\tif len(results) > 0:\n\t\t\t# only append the data column\n\t\t\tresults.append(df[[key]])\n\t\telse:\n\t\t\tresults.append(df)\n\n\tresult_df = pd.concat(results, axis=1)\n\n\t# snapping distances\n\tresult_df['source_snap'] = result_df.source.map(pd.DataFrame(resp_data['sources'])['distance'])\n\tresult_df['destination_snap'] = result_df.destination.map(pd.DataFrame(resp_data['destinations'])['distance'])\n\n\t# instead of join/merge lookup\n\tresult_df['geometry'] = result_df['source'].map({i: g for i, g in enumerate(sources)})\n\tresult_df['geometry_dest'] = result_df['destination'].map({i: g for i, g in enumerate(destinations)})\n\n\t# shift back by the given offset\n\tresult_df['destination'] = result_df['destination'].astype(int) + destinations_offset\n\tresult_df['source'] = result_df['source'].astype(int) + sources_offset\n\treturn result_df", "def runIDS(date):\n createDirectories(date)\n input_bucket = 'emg-author-subreddit-pairs'\n output_bucket = 'emg-author-subreddit-pairs-ids'\n df = streamBlob(input_bucket, date)\n df = df.reset_index().astype({'author':str,'subreddit':str,'num_comments':int})\n\n print(\"getting subreddit ids\")\n subIds = sortedIds(df['subreddit'])\n df['subreddit_id'] = df['subreddit'].map(lambda x: subIds[x])\n\n print(\"getting author ids\")\n authorIds = sortedIds(df['author'])\n df['author_id']=df['author'].map(lambda x: authorIds[x])\n\n print(\"storing dataset w/ ids\")\n\n filename = cachePath(f\"\"\"{date}/author-subbreddit-pairs-IDs.gzip\"\"\")\n df.to_csv(filename,compression='gzip')\n\n uploadCommands(filename, output_bucket, date)", "def send_logs(self):\n for i in range(30):\n with open('{}-{}.log'.format(self._log_file_path, i), 'a') as log_file:\n for _ in range(self._log_rate):\n log_file.write(self._log_record + '\\n')", "def request_heartbeat_loop(serialport, seriallock, idlist_file):\n #Read list of slave IDs to get heartbeat from\n try:\n idfile = open(idlist_file,'r')\n except FileNotFoundError:\n raise\n\n idlist = []\n while True:\n idline = idfile.readline()\n #if EOF\n if len(idline) == 0:\n break\n #if line only contains whitespace\n elif idline.isspace():\n continue\n\n idline_list = idline.split('.')\n #if line starts with '#'\n if '#' in idline_list[0]:\n continue\n\n idline_list = list(map(int, idline_list))\n idlist.append( idline_list )\n\n idfile.close()\n success_slave = 0\n\n logging.debug(\"Heartbeat - Acquiring lock..\")\n seriallock.acquire()\n try:\n logging.debug(\"Heartbeat - Lock Acquired..\")\n time.sleep(2)\n for i in range(len(idlist)):\n group = idlist[i][0]\n unique = idlist[i][1]\n #get heartbeat from slave\n ret = request_heartbeat(serialport, group, unique)\n\n ids = str(group) + '.' + str(unique)\n if ret == True:\n success_slave += 1\n idlist[i].append('Alive')\n logging.debug(ids + \" alive\")\n else:\n idlist[i].append('Dead')\n logging.debug(ids + \" dead\")\n finally:\n seriallock.release()\n logging.debug(\"Heartbeat - Released lock..\")\n\n #log slave status\n datetoday = str(datetime.date.today().strftime(\"%y%m%d\"))\n datetoday = datetoday + '.log'\n logging.debug(\"Logging to \" + datetoday)\n\n logfile = open('./log/'+ datetoday, 'a')\n logfile.write('\\n'+datetime.datetime.now().isoformat(' ') + '\\n')\n for i in range(len(idlist)):\n line = str(idlist[i][0])+'.'+str(idlist[i][1])+' \\t'+idlist[i][2]+'\\n'\n logfile.write(line)\n logfile.close()\n logging.debug(\"Heartbeat - Done. Ending thread.\")", "def SERIAL_RTRV_record(self):\n # Assumes records are separated by a '>' prompt.\n # Must be connected & operational\n if self.State == 0:\n # a slightly more informative result might help\n return []\n # max seconds to wait for data\n max_wait = 3\n # seconds to wait before trying again\n try_wait = 0.1\n tries = max_wait / try_wait\n # how much we have waited so far\n waited = 0\n # RECV\n raw_record = []\n # raw_record is a list of non-empty strings, \n # each string is a line of info from the reader\n word = ''\n linebuf = []\n while len(raw_record) < 1 :\n # we need to have something to reply.. \n #print \"chars waiting:\", self.Port.inWaiting()\n #sys.stdout.flush()\n while self.Port.inWaiting() > 0:\n while 1:\n # read 1 char at a time \n # until we get to the '>' prompt\n # \n c = self.Port.read(1)\n # \n if self.RecordTrace == 1:\n self.tf_out.write(c)\n # \n # we are done once we see the prompt\n if c == '>':\n if self.debug > 2 :\n print \"Raw Record: \",\n pprint.pprint(raw_record)\n return raw_record\n # \\r = CR , \\n = LF \n # (serial device uses CR + optionally LF, unix text only uses LF)\n # new array entry but only if there is something to add\n elif c == '\\r' or c == '\\n':\n if word != '':\n linebuf.append(word)\n word = ''\n if linebuf != []:\n raw_record.append(linebuf)\n linebuf = []\n # split line into words\n elif c == ' ':\n if word != '':\n linebuf.append(word)\n word = ''\n # all other chars\n else : \n word = word + c\n \n # wait a bit for the serial line to respond\n if self.debug > 1 :\n print \"NO DATA TO READ!!\"\n if waited < max_wait :\n waited += try_wait\n time.sleep(try_wait)\n else:\n self.recwaiting = 0\n return []", "def write_and_response(self, vsr, address_h, address_l, value_h, value_l, masked=True, delay=False):\n resp, reply = self.read_and_response(vsr, address_h, address_l)\n # print(\" WaR. RD1 reply: {} (resp: {})\".format(reply, resp))\n resp = resp[2:-1] # Extract payload\n if masked:\n value_h, value_l = self.mask_aspect_encoding(value_h, value_l, resp)\n # print(\" WaR Write: {0:X} {1:X} {2:X} {3:X} {4:X}\".format(vsr, address_h, address_l, value_h, value_l))\n self.send_cmd([vsr, 0x40, address_h, address_l, value_h, value_l])\n if delay:\n time.sleep(0.2)\n resp = self.read_response() # ie resp = [42, 144, 48, 49, 13]\n if delay:\n time.sleep(0.2)\n reply = resp[4:-1] # Omit start char, vsr & register addresses, and end char\n reply = \"{}\".format(''.join([chr(x) for x in reply])) # Turn list of integers into ASCII string\n # print(\" WR. RD2 reply: {} (resp: {})\".format(reply, resp)) # ie reply = '01'\n return resp, reply", "def receiver(ip, port, target_loc=None):\n sct = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sct.bind((ip, port))\n sct.listen(5)\n initialed = False\n thd_list = []\n total = None\n while True:\n conn, addr = sct.accept()\n if not initialed:\n ctrl_conn = conn\n buf = []\n end = -1\n while end == -1:\n bys = ctrl_conn.recv(4096)\n end = bys.find(b'\\n')\n buf.append(bys)\n file_name, total, _ = b\"\".join(buf).decode(\"utf8\").split(sep=',')\n total_thd = total = int(total)\n temp_dir = pathlib.Path(tempfile.mkdtemp(), file_name)\n os.mkdir(str(temp_dir))\n file_need = set(str(x) for x in range(0, total))\n initialed = True\n ctrl_conn.send(b\"\\n\")\n # print(file_need,total_thd)\n # sys.exit(0)\n else:\n thd = threading.Thread(group=None, target=receive,\n args=(conn, temp_dir))\n thd.start()\n thd_list.append(thd)\n\n if len(thd_list) >= total_thd:\n while True:\n if len(file_received) == total:\n break\n evnt.wait()\n file_remain = file_need - file_received - file_receiving\n # print(total_thd, \":\", file_received, \",\", file_receiving,\n # \",\", file_remain)\n # time.sleep(180)\n if file_remain:\n total_thd = len(file_remain)\n file_remain = \",\".join(file_remain)\n file_remain += \"\\n\"\n ctrl_conn.send(file_remain.encode())\n break\n else:\n for i in thd_list:\n i.join()\n thd_list.remove(i)\n if len(file_received) == total:\n break\n\n ctrl_conn.send(b\"\\n\")\n ctrl_conn.close()\n sct.close()\n with open(pathlib.Path(target_loc, file_name), 'wb') as f:\n for i in range(0, total):\n with open(pathlib.Path(temp_dir, str(i)), 'rb') as j:\n f.write(j.read())\n shutil.rmtree(str(temp_dir))", "def get_seq_and_id(fasta_file, promoter_seq, promoter_ids, threshold, scores_file, delimiter):\n\n map_txt = \"DDB_DDB_G/DDB-GeneID-UniProt.txt\"\n df = pd.read_csv(map_txt, sep=\"\\t\")\n ddb_id = list(df['DDBDDB ID'].as_matrix())\n ddb_g_id = list(df['DDB_G ID'].as_matrix())\n\n all_valid_records = get_data_target.get_ids(scores_file, delimiter, 'ID')\n print(all_valid_records)\n sequences = []\n record_ids = []\n for record in SeqIO.parse(fasta_file, \"fasta\"):\n record_id = str(record.id)\n end = record_id.find('|')\n record_id_short = record_id\n if end != -1:\n record_id_short = record_id[:end]\n print(record_id_short)\n try:\n ddbg_record_id_short = ddb_g_id[ddb_id.index(record_id_short)]\n except ValueError:\n ddbg_record_id_short = record_id_short\n if ddbg_record_id_short in all_valid_records:\n record_ids.append(ddbg_record_id_short)\n seq = str(record.seq)[-threshold:]\n sequences.append(seq)\n data_record_ids = pd.DataFrame({\"record_id\": record_ids})\n data_sequences = pd.DataFrame({\"record_sequence\": sequences})\n data_record_ids.to_csv(promoter_ids, index=False, header=False)\n data_sequences.to_csv(promoter_seq, index=False, header=False)", "def main():\n #refreshes mbta route list, isn't ideal, but simplest implementation at the moment\n fetch_mbta_routes()\n subway_route_list = mbta_route_list()\n #delays for 10 seconds to prevent exceeding api request limit\n time.sleep(10)\n\n for x in subway_route_list:\n #gets system time, system time is assumed to be in EST/EDT\n year = datetime.now().strftime('%Y')\n month = datetime.now().strftime('%m')\n day = datetime.now().strftime('%d')\n hour = datetime.now().strftime('%H')\n minute = datetime.now().strftime('%M')\n sec = datetime.now().strftime('%S')\n\n save_path = year + '/' + month + '/' + day + '/'\n file_name = hour + '.' + minute + '.' + sec + '.txt'\n complete_path = save_path + file_name\n\n train_route = fetch_trains_per_route(x)\n\n if train_route == None:\n pass\n else:\n #checks if save path exists, creates save path if it doesn't exist\n if os.path.exists(save_path):\n f = open(complete_path, 'a')\n f.write(str(train_route))\n f.close()\n #delays loop for 10 seconds to prevent exceeding api request limit\n time.sleep(10)\n else:\n os.makedirs(save_path)\n f = open(complete_path, 'a')\n f.write(str(train_route))\n f.close()\n #delays loop for 10 seconds to prevent exceeding api request limit\n time.sleep(10)" ]
[ "0.5480776", "0.54575336", "0.53615844", "0.53565055", "0.51353186", "0.5096511", "0.5049164", "0.4956873", "0.49354088", "0.48810115", "0.4833697", "0.48231986", "0.48105225", "0.4800622", "0.47857952", "0.47641757", "0.47444397", "0.47235665", "0.47144476", "0.47072086", "0.47017467", "0.47007304", "0.46910137", "0.4656707", "0.46545422", "0.4646098", "0.4640146", "0.4632684", "0.46237117", "0.4618318" ]
0.77570313
0
This method will perform generation based fuzzing on the bus. The method will inject properly formatted, randomly generated messages at a given period for a I{writesPerFuzz} number of times. The packets that are injected into the bus will all be saved in the following path DATALOCATION/InjectedData/(today's date (YYYYMMDD))_GenerationFuzzedPackets.csv. An example filename would be 20130222_GenerationFuzzedPackets.csv Where DATALOCATION is provided when the class is initiated. The data will be saved as integers.
def generationFuzzer(self,freq, standardIDs, dbLimits, period, writesPerFuzz, Fuzzes): #print "Fuzzing on standard ID: %d" %standardId self.client.serInit() self.spitSetup(freq) packet = [0,0,0x00,0x00,0x08,0,0,0,0,0,0,0,0] #empty packet template #get folder information (based on today's date) now = datetime.datetime.now() datestr = now.strftime("%Y%m%d") path = self.DATA_LOCATION+"InjectedData/"+datestr+"_GenerationFuzzedPackets.csv" filename = path outfile = open(filename,'a'); dataWriter = csv.writer(outfile,delimiter=','); #dataWriter.writerow(['# Time Error Bytes 1-13']); #dataWriter.writerow(['#' + description]) numIds = len(standardIDs) fuzzNumber = 0; #: counts the number of packets we have generated while( fuzzNumber < Fuzzes): id_new = standardIDs[random.randint(0,numIds-1)] print id_new #### split SID into different regs SIDhigh = (id_new >> 3) & 0xFF; # get SID bits 10:3, rotate them to bits 7:0 SIDlow = (id_new & 0x07) << 5; # get SID bits 2:0, rotate them to bits 7:5 packet[0] = SIDhigh packet[1] = SIDlow #generate a fuzzed packet for i in range(0,8): # for each data byte, fuzz it idx = "db%d"%i limits = dbLimits[idx] value = random.randint(limits[0],limits[1]) #generate pseudo-random integer value packet[i+5] = value print packet #put a rough time stamp on the data and get all the data bytes row = [tT.time(), id_new,8] # could make this 8 a variable msg = "Injecting: " for i in range(5,13): row.append(packet[i]) msg += " %d"%packet[i] #print msg dataWriter.writerow(row) self.client.txpacket(packet) tT.sleep(period/1000) #inject the packet the given number of times. for i in range(1,writesPerFuzz): self.client.MCPrts(TXB0=True) tT.sleep(period/1000) fuzzNumber += 1 print "Fuzzing Complete" SIDhigh = (1056 >> 3) & 0xFF; # get SID bits 10:3, rotate them to bits 7:0 SIDlow = (1056 & 0x07) << 5; # get SID bits 2:0, rotate them to bits 7:5 packet = [SIDhigh, SIDlow, 0, 0, 8, 65, 255, 32, 120, 0, 0, 1, 247] self.client.txpacket(packet) for i in range(0,100): self.client.MCPrts(TXB0=True) tT.sleep(.01) outfile.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generalFuzz(self,freq, Fuzzes, period, writesPerFuzz):\n #print \"Fuzzing on standard ID: %d\" %standardId\n self.client.serInit()\n self.spitSetup(freq)\n packet = [0,0,0x00,0x00,0x08,0,0,0,0,0,0,0,0] #empty template\n \n #get folder information (based on today's date)\n now = datetime.datetime.now()\n datestr = now.strftime(\"%Y%m%d\")\n path = self.DATA_LOCATION+\"InjectedData/\"+datestr+\"_GenerationFuzzedPackets.csv\"\n filename = path\n outfile = open(filename,'a');\n dataWriter = csv.writer(outfile,delimiter=',');\n #dataWriter.writerow(['# Time Error Bytes 1-13']);\n #dataWriter.writerow(['#' + description])\n \n fuzzNumber = 0; #: counts the number of packets we have generated\n while( fuzzNumber < Fuzzes):\n #generate new random standard id in the full range of possible values\n id_new = random.randint(0,4095) \n #print id_new\n #### split SID into different regs\n SIDhigh = (id_new >> 3) & 0xFF; # get SID bits 10:3, rotate them to bits 7:0\n SIDlow = (id_new & 0x07) << 5; # get SID bits 2:0, rotate them to bits 7:5\n packet[0] = SIDhigh\n packet[1] = SIDlow\n \n #generate a fuzzed packet\n for i in range(0,8): # for each data byte, fuzz it\n idx = \"db%d\"%i\n \n value = random.randint(0, 255) #generate pseudo-random integer value\n packet[i+5] = value\n #print packet\n #put a rough time stamp on the data and get all the data bytes \n row = [time.time(), id_new,8] \n \"\"\"@todo: allow for varied packet lengths\"\"\"\n msg = \"Injecting: \"\n for i in range(5,13):\n row.append(packet[i])\n msg += \" %d\"%packet[i]\n #print msg\n dataWriter.writerow(row)\n self.client.txpacket(packet)\n time.sleep(period/1000)\n \n #inject the packet the given number of times. \n for i in range(1,writesPerFuzz):\n self.client.MCPrts(TXB0=True)\n time.sleep(period/1000)\n fuzzNumber += 1\n print \"Fuzzing Complete\" \n outfile.close()", "def generate_random_testing(file_name, nb_points):\n file_name = _format_file_extension(file_name)\n acoustic_data = _generate_random_acoustic(nb_points)\n data = pd.DataFrame(acoustic_data, columns=[fmd.COLUMN_NAME[0]])\n data.to_csv(file_name, index=False)", "def test_save_and_load_generation(logger):\n\n generations = 2\n\n options = {}\n options[\"population_size\"] = 10000\n options[\"in-trees\"] = 0\n options[\"out-trees\"] = 1\n options[\"in-actions\"] = 0\n options[\"out-actions\"] = 3\n options[\"library\"] = False\n options[\"seed\"] = None\n\n for generation_index in range(generations):\n population = []\n population_str = ''\n\n # Generate random strategies to initialize the population\n for i in range(options[\"population_size\"]):\n p = evolve.generate_strategy(logger, options[\"in-trees\"], options[\"out-trees\"], options[\"in-actions\"],\n options[\"out-actions\"],\n options[\"seed\"], environment_id=None)\n actions.utils.parse(str(p), logger)\n population.append(p)\n if i == options[\"population_size\"] - 1:\n population_str += str(p)\n else:\n population_str += str(p) + \"\\n\"\n\n # Write the generation file\n filename = os.path.join(test_files_directory, \"generation\" + str(generation_index))\n evolve.write_generation(filename, population)\n\n check_one_file(logger, options, filename, population)", "def fakedata():\n if User.query.filter_by(email='[email protected]').first():\n print ('fake data already generated')\n else:\n generate_test_confs() # load testing confs and tracks\n generate_fake_tickets() # create fake tickets\n generate_test_users() # create named fake users\n # generate_fake_users(100) # create random users\n # add_self_follows() # create self-follows for all users\n generate_fake_papers(100) # create random papers\n generate_fake_reviews() # create random reviews\n generate_fake_transactions() # create fake tickets\n generate_fake_schedule()\n generate_default_addons()", "def test_back_fill(self):\n self.driver.start_sampling()\n\n # step 2 contains 3 blocks (4 records), start with this and get both since we used them\n # separately in other tests\n self.create_sample_data_set_dir(\"node59p1_step2.dat\", TELEM_DIR, \"node59p1.dat\",\n copy_metadata=False)\n self.assert_data((PhsenParserDataParticle,PhsenControlDataParticle),\n 'test_data_1-2.txt.result.yml', count=4)\n\n # This file has had a section of data replaced with 0s (14171-14675),\n # replacing PH1236501_01D6u51F11341_5D_E538\n self.create_sample_data_set_dir('node59p1_step3.dat', TELEM_DIR, \"node59p1.dat\",\n copy_metadata=False)\n self.assert_data(PhsenParserDataParticle, 'test_data_3.txt.result.yml',\n count=5)\n\n # Now fill in the zeroed section from step3, this should just return the new\n # data \n self.create_sample_data_set_dir('node59p1_step4.dat', TELEM_DIR, \"node59p1.dat\",\n copy_metadata=False)\n self.assert_data(PhsenParserDataParticle, 'test_data_4.txt.result.yml',\n count=1)\n\n # start over now using step 4\n self.driver.stop_sampling()\n # Reset the driver with no memento\n self.driver = self._get_driver_object(memento=None)\n self.driver.start_sampling()\n\n self.clear_async_data()\n self.create_sample_data_set_dir('node59p1_step4.dat', TELEM_DIR, \"node59p1.dat\",\n copy_metadata=False)\n self.assert_data((PhsenParserDataParticle,PhsenControlDataParticle),\n 'test_data_1-4.txt.result.yml', count=10)", "def test_file_gen(num, date_from, date_to, encoding, out_dir_path):\n # Validate requested encoding\n if encoding not in ['utf-8', 'utf-16']:\n print('Invalid encoding!')\n return\n\n # Create descriptive file name\n file_name = 'test_{}_{}.csv'.format(num, encoding[4:])\n file_path = os.path.join(out_dir_path, file_name)\n\n # Get list of all state names in pycountry\n states = [subdiv.name for subdiv in pycountry.subdivisions]\n num_states = len(states)\n\n # Create test file\n with open(file_path, 'w', encoding=encoding) as f:\n for i in range(num):\n state = states[randint(0, num_states - 1)]\n clicks = randint(0, 2000)\n ctr = randint(0, 200) / 100 # percentage\n date = random_date(date_from, date_to)\n line = '{},{},{},{}%\\n'.format(date, state, clicks, ctr)\n f.write(line)\n print('Test csv file created!')", "def run_random(self):\n manager = Manager()\n self.all_probabilities = manager.list()\n self.total_iterations = manager.Value('d', 0)\n num_counterexamples = manager.Value('d', 0)\n counter_lock = manager.Lock()\n all_probabilities_lock = manager.Lock()\n\n file_q = manager.Queue()\n\n self.mug_pipeline.set_folder_names(self.folder_name)\n self.mug_pipeline.set_optimizer_type(OptimizerType.RANDOM)\n pool = Pool(self.num_processes + 1, maxtasksperchild=60)\n\n filename = '{}/logs/results_{}.csv'.format(self.folder_name, self.trial_folder)\n watcher = Process(target=self.listener, args=(file_q, filename))\n watcher.start()\n\n iter_num = 0\n start_time = time.time()\n max_time_per_map = 60*60\n\n try:\n # TODO: change this from while true to terminate by timeout (try/except)\n while ((self.retrain_with_random and self.total_iterations.value < self.max_added_to_training) or\n (self.retrain_with_counterexamples and num_counterexamples.value < self.max_added_to_training)):\n result = None\n\n while result is None:\n try:\n result = func_timeout(max_time_per_map, pool.starmap,\n args=(self.mug_pipeline.run_inference,\n zip(self.generate_all_mug_initial_poses(), \n range(iter_num, iter_num + self.num_processes),\n repeat(self.all_probabilities), repeat(all_probabilities_lock), \n repeat(self.total_iterations), repeat(num_counterexamples),\n repeat(counter_lock), repeat(file_q), repeat(False), repeat(False))))\n except FunctionTimedOut:\n print('FUNCTION TIMED OUT, MORE THAN {} SECONDS!!!!'.format(max_time_per_map))\n\n # all_mug_initial_poses = []\n # for j in range(self.num_processes):\n # mug_initial_poses = []\n # for i in range(self.num_mugs):\n # mug_initial_poses += \\\n # RollPitchYaw(np.random.uniform(0.0, 2.0*np.pi, size=3)).ToQuaternion().wxyz().tolist() + \\\n # [np.random.uniform(-0.1, 0.1), np.random.uniform(-0.1, 0.1), np.random.uniform(0.1, 0.2)]\n # all_mug_initial_poses.append(mug_initial_poses)\n\n # result = pool.starmap(self.mug_pipeline.run_inference,\n # zip(all_mug_initial_poses, \n # range(iter_num, iter_num + self.num_processes),\n # repeat(self.all_probabilities), repeat(all_probabilities_lock), \n # repeat(self.total_iterations), repeat(num_counterexamples),\n # repeat(counter_lock), repeat(file_q), repeat(False), repeat(False)))\n\n iter_num += self.num_processes\n print('new iter_num: {}'.format(iter_num), flush=True)\n total_min = (time.time() - start_time)/60.0\n print('avg min/image: {}, total minutes: {}'.format(total_min/(iter_num + 1), total_min))\n print('------------------------------------------------', flush=True)\n sys.stdout.flush()\n except Exception as e:\n raise e\n\n pool.close()\n pool.join()\n\n sys.stdout.flush()", "def fuzz(self):\n self.total_mutant_index = 0\n self.total_num_mutations = self.num_mutations()\n\n self._main_fuzz_loop(self._iterate_protocol())", "def main():\r\n # handle arguments\r\n parser = argparse.ArgumentParser()\r\n\r\n parser.add_argument('-t', '--time', help = 'start time', default = \"2018-12-26 18:11:08.509654\")\r\n parser.add_argument('-bd', '--min_duration', type = int, help = 'minimum duration', default = 25)\r\n parser.add_argument('-td', '--max_duration', type = int, help = 'maximum duration', default = 70)\r\n parser.add_argument('-e', '--events', type = int, help = 'how many events to generate', default = 1000)\r\n\r\n args = parser.parse_args()\r\n\r\n f = open(f\"tests/test_1.json\", \"a\")\r\n\r\n string_time = \"2019-07-08 10:40:00.423123\"\r\n\r\n current_time = datetime.datetime.strptime(string_time, '%Y-%m-%d %H:%M:%S.%f')\r\n\r\n for i in range(0, args.events):\r\n\r\n duration = random.randint(args.min_duration, args.max_duration)\r\n\r\n json = \"{\\\"timestamp\\\": \\\"\" \\\r\n + str(current_time) \\\r\n + \"\\\", \\\"translation_id\\\": \\\"5aa5b2f39f7254a75aa5\\\", \" \\\r\n \"\\\"source_language\\\": \\\"en\\\",\\\"target_language\\\":\" \\\r\n \" \\\"fr\\\",\\\"client_name\\\": \\\"easyjet\\\",\\\"event_name\\\":\" \\\r\n \"\\\"translation_delivered\\\",\\\"nr_words\\\": 30, \\\"duration\\\": \"\\\r\n + str(duration) + \"}\\n\"\r\n\r\n f.write(json)\r\n\r\n minutes = random.randint(0, 59)\r\n seconds = random.randint(0, 59)\r\n\r\n current_time += datetime.timedelta(minutes=minutes, seconds=seconds)\r\n\r\n print(f\"New file is located at inputs/{args.events}.json\")", "def _generate_data(self, codec='deflate'):\n _logger.info('generating fake data')\n (desc, path) = mkstemp()\n os.close(desc)\n os.remove(path)\n try:\n call([\n 'node', osp.join(DPATH, os.pardir, os.pardir, 'scripts', 'random'),\n self.path, str(self.n_records), path\n ])\n yield path\n finally:\n if osp.exists(path):\n os.remove(path)", "def test_all_good(self):\n self.driver.start_sampling()\n\n self.create_sample_data_set_dir(\n \"node59p1_all_good1.dat\",\n TELEM_DIR,\n \"node59p1.dat\"\n )\n self.assert_data(\n (DostadParserTelemeteredDataParticle, DostadParserTelemeteredMetadataDataParticle),\n 'test_data_1-2.txt.result.yml',\n count=3\n )\n\n self.create_sample_data_set_dir(\n \"node59p1_all_good.dat\",\n TELEM_DIR,\n \"node59p1.dat\"\n )\n self.assert_data(\n DostadParserTelemeteredDataParticle,\n 'test_data_all_good.txt.result.yml',\n count=1\n )", "def gen_test_data(filename, nelems, concat):\n\n start = time.time()\n\n # The file just contains a sequentially\n # increasing list of numbers\n\n print('Generating test data ({} elems, {} bytes -> {})'.format(\n nelems,\n nelems * 8,\n filename))\n\n # Generate the data as a numpy memmap array.\n # Allocate at most 128MB at a time\n toWrite = nelems\n offset = 0\n writeBlockSize = min(16777216, nelems)\n\n datafile = '{}_temp'.format(filename)\n\n open(datafile, 'wb+').close()\n data = np.memmap(datafile, dtype=np.uint64, shape=nelems)\n idx = 0\n\n while toWrite > 0:\n\n if idx % 10 == 0:\n print('Generated to {}...'.format(offset))\n\n thisWrite = min(writeBlockSize, toWrite)\n\n vals = np.arange(offset, offset + thisWrite, dtype=np.uint64)\n\n data[offset:offset + thisWrite] = vals\n\n toWrite -= thisWrite\n offset += thisWrite\n idx += 1\n data.flush()\n\n if not concat: maxBufSize = -1\n else: maxBufSize = 8 * min(16777216, nelems // 50)\n\n compress(datafile, filename, maxBufSize)\n\n end = time.time()\n del data\n os.remove(datafile)\n\n print('Done in {:0.2f} seconds'.format(end - start))", "def postprocess(self, attempt_number, buffer_info, data, data_file, missed_packet=False):\n\t\t#print \"postprocessing {}\".format(packet_number)\n\n\t\tpp_start = time.clock()\n\t\tif missed_packet:\n\t\t\tprint \"supposedly missed packet {}\".format(attempt_number)\n\t\t\tattempt_number = -1 * attempt_number\n\n\t\twith open(data_file, 'a') as f:\n\t\t\t# postprocessing\n\t\t\tindex, prev_sample, latency = 0, 0, 0\n\t\t\tfor sample in data:\n\t\t\t\tif (index > buffer_info[0]):\n\t\t\t\t\t# we want to check every sample if test harness thinks packet was missed\n\t\t\t\t\tif latency == 4.096:\n\t\t\t\t\t\tprint \"only took 4096 samples?\"\n\t\t\t\t\tbreak\n\n\t\t\t\tif ((prev_sample ^ sample) != 0):\n\t\t\t\t\t# if sample is zero something wrong happened when reading from the buffer\n\n\t\t\t\t#if ((prev_sample ^ sample) & self.bits_to_monitor) != 0:\n\t\t\t\t\t# one or more of the bits to monitor have changed\n\n\t\t\t\t\t#TODO: add check to see if sample is weird. every output bit other than button press must always be high.\n\t\t\t\t\t#sample_output_str = str(get_bit(sample, self.packet_received_pos))\n\t\t\t\t\t#sample_output_str += str(get_bit(sample, self.packet_created_pos))\n\t\t\t\t\t#sample_output_str += str(get_bit(sample, self.button_press_mirror_pos))\n\t\t\t\t\tsample_output_str = binary_num_str(sample, split=True)\n\t\t\t\t\t#print sample_output_str\n\t\t\t\t\tlatency = index * self.period_ms\n\t\t\t\t\tf.write(\"{}, {}, {}, {}\\n\".format(attempt_number, index, latency, sample_output_str))\n\n\t\t\t\tindex += 1\n\t\t\t\tprev_sample = sample\n\n\t\tpp_stop = time.clock()\n\t\tif missed_packet:\n\t\t\tprint \"postprocessing took {} seconds\".format(pp_stop - pp_start)\n\t\telse:\t\t\n\t\t\tprint \"{} {}\".format(attempt_number, latency)\n\t\t#print \"cSamples: {}, cLost: {}, cCorrupted: {}\".format(buffer_info[0], buffer_info[1], buffer_info[2])\n\t\t#print \"took {} samples for packet {}\".format(index, packet_number)\n\t\t#print \"postprocessing took {} seconds\".format(pp_stop - pp_start)\n\t\t#print \"\\n\"\n\t\treturn", "def generate(self):\n super().generate()\n records = random.random((self._dimension, self._dimension, self.num_samples))\n record_labels = [0] * self.num_samples\n prev_out_spec =\"\"\n count = 0\n for i in range(0, int(self.num_files)):\n if i % self.comm_size == self.my_rank:\n progress(i+1, self.num_files, \"Generating NPZ Data\")\n out_path_spec = \"{}_{}_of_{}.npz\".format(self._file_prefix, i, self.num_files)\n if count == 0:\n prev_out_spec = out_path_spec\n if self.compression != Compression.ZIP:\n np.savez(out_path_spec, x=records, y=record_labels)\n else:\n np.savez_compressed(out_path_spec, x=records, y=record_labels)\n count += 1\n else:\n copyfile(prev_out_spec, out_path_spec)", "def genAndSaveMoving1DMassData(saveName='movingPointMassData/testPointMassData000.pkl',Iterations=10):\n #How many iterations we want to include\n #Iterations = 10 # No more hard coding! This is now an input into the function.\n xmin = 0.0\n xmax = 20.0\n vmin = 1.0\n vmax = 5.0\n amin = 1.0\n amax = 2.0\n dt = .001\n tmax = 20.0\n dataOut = []\n for i in range(Iterations):\n funcOuts = movingPointMass1D.randomMassMovement(xmin, xmax, vmin, vmax, amin, amax, dt, tmax)\n dataOut.append(funcOuts)\n toSave = [dataOut, xmin, xmax, vmin, vmax, amin, amax, dt, tmax]\n outputFile = open(saveName, \"wb\")\n pickle.dump(toSave,outputFile)\n outputFile.close()", "def test_report(self):\n # Delete some ghost waters so they can be written out\n ghosts = [3054, 3055, 3056, 3057, 3058]\n base_gcmc_sampler.deleteGhostWaters(ghostResids=ghosts)\n\n # Report\n base_gcmc_sampler.report(base_gcmc_simulation)\n\n # Check the output to the ghost file\n assert os.path.isfile(os.path.join(outdir, 'bpti-ghost-wats.txt'))\n # Read which ghosts were written\n with open(os.path.join(outdir, 'bpti-ghost-wats.txt'), 'r') as f:\n n_lines = 0\n lines = f.readlines()\n for line in lines:\n if len(line.split()) > 0:\n n_lines += 1\n assert n_lines == 1\n ghosts_read = [int(resid) for resid in lines[0].split(',')]\n assert all(np.isclose(ghosts, ghosts_read))\n\n return None", "def generation(Duration, amount):\n # Generate group sizes, the total group number is \"amount\", the number of people in each group is between 1 and 6\n size = np.random.randint(1, 7, amount)\n # Generate vip situation, based on the probability of 8%\n vip = []\n for i in range(amount):\n num = np.random.randint(0, 101, 1)\n if (num >= 0) & (num <= 8):\n vip.append(True)\n else:\n vip.append(False)\n # Generate the registration time for each group\n timestamp_list = mod_pert_random(0, Duration // 2, Duration, samples=amount).astype(int)\n timestamp_list = list(timestamp_list)\n\n counter = 0\n queue_2 = Queue()\n queue_4 = Queue()\n queue_6 = Queue()\n\n table_2, table_4, table_6 = tablesSetting(6, 4, 2) # Initializing tables\n\n total_timeR_2 = [] # For calculating total average waiting time\n nextGroup_endTime_2 = {} # {No. of table: the ending time of the table}\n\n total_timeR_4 = []\n nextGroup_endTime_4 = {}\n total_timeR_6 = []\n nextGroup_endTime_6 = {}\n\n groupNumb = 0 # all group have their unique ID\n\n for i in range(Duration):\n while i in timestamp_list:\n if size[counter] == 1 or size[counter] == 2:\n queue_2.add_queue(Group(i, 2, vip[counter], groupNumb))\n counter += 1\n groupNumb += 1\n elif size[counter] == 3 or size[counter] == 4:\n queue_4.add_queue(Group(i, 4, vip[counter], groupNumb))\n counter += 1\n groupNumb += 1\n elif size[counter] == 5 or size[counter] == 6:\n queue_6.add_queue(Group(i, 6, vip[counter], groupNumb))\n counter += 1\n groupNumb += 1\n timestamp_list.remove(i) # Deal with the situation that several groups arrive at the same time point\n\n # Run the simulation\n simulation(i, table_2, Duration, queue_2, total_timeR_2, nextGroup_endTime_2)\n simulation(i, table_4, Duration, queue_4, total_timeR_4, nextGroup_endTime_4)\n simulation(i, table_6, Duration, queue_6, total_timeR_6, nextGroup_endTime_6)\n\n # Summary\n if i == Duration-1:\n print(\"Total groups served (groups who finished their meal or on the table currently):\",\n len(total_timeR_2)+len(total_timeR_4)+len(total_timeR_6))\n avg=(sum(total_timeR_2)+sum(total_timeR_4)+sum(total_timeR_6))/(len(total_timeR_2)+len(total_timeR_4)+len(total_timeR_6))\n print('Average waiting time for groups served: {0:.2f}'.format(avg), \"minute(s)\")", "def write_data(self, file_path, success_cutoff):\n agg_df = pd.DataFrame(columns=tf.Move)\n for game in self.game_list:\n agg_df = agg_df.add(game, fill_value = 0)\n agg_df.to_csv(file_path)\n pass", "def generateRandomInput(filename, num_people, travel_db):\n import random\n routes = []\n for i in range(num_people):\n route = travel_db.randomRoute()\n route.insert(0,\"Person \" + str(i)) # Add a name for each route.\n routes.append(route)\n if FileHandler.writeRoutesCSV(filename,routes): # If it's successful writing the file\n print(\"File {0} created successfully with {1} people.\".format(filename, num_people))\n else:\n print(\"File {0} could not be created.\".format(filename))", "def test_shuffled(self):\n self.setup_flags()\n self.io_args.matches = os.path.join(\n self.io_args.output_root, \"shuffled\", \"matches.json\"\n )\n self._calibration_error_test(\"shuffled\", \"GeometricCalibration\")", "def generate(self):\n t_0 = time()\n\n if self.random_towers:\n self.towers = np.random.rand(self.number_towers, 2)\n else:\n step = np.ceil(np.sqrt(self.number_towers)).astype('int')\n\n if step ** 2 != self.number_towers:\n self.number_towers = step ** 2\n print(f'WARNING: number of towers changed to {self.number_towers}')\n\n X, Y = np.mgrid[0:1:step * 1j, 0:1:step * 1j]\n positions = np.vstack([X.ravel(), Y.ravel()])\n self.towers = positions.swapaxes(1, 0)\n\n self.towers_manager = TowersManager(self.towers, self.vel_friction)\n\n self.distances = self.towers_manager.generate_distances()\n self.print(f'Took {time() - t_0} to create distrances matrix')\n\n t = time()\n self.probabilities = self.generate_probabilities()\n self.print(f'Took {time() - t} to create probabilities matrix')\n\n t = time()\n self.traces = self.generate_weighted_users_traces()\n self.print(f'Took {time() - t} to create user traces')\n\n t = time()\n self.aggregated_data = self.generate_aggregate_data()\n self.print(f'Took {time() - t} to build aggregated data')\n\n self.print(f'Took {time() - t_0} to generate all')", "def produce_data2(self, filename):\n filepath = f'data/{filename}-sorted.csv'\n\n time_by_guards = defaultdict(int)\n guard = None\n asleep = None\n\n with open(filepath) as in_file:\n line = in_file.readline()\n while line:\n line = line.replace('\\n', '')\n if line == '':\n break\n _, time, occurrence = line.split(',')\n time = int(time)\n if '#' in occurrence:\n guard = int(occurrence[1:])\n asleep = None\n elif occurrence == 'FA':\n asleep = time\n elif occurrence == 'WU':\n if asleep is None:\n # exceptional case\n breakpoint()\n line = in_file.readline()\n # need to readline() because is exiting this iteration\n continue\n for t in range(asleep, time):\n time_by_guards[(guard, t)] += 1\n\n line = in_file.readline()\n\n return time_by_guards", "def __create_test_file(self):\n self.test_file = os.path.join(os.path.dirname(self.server_path), \"data\")\n with open(self.test_file, \"ab+\") as f:\n n_blocks = int(self.args.size) // self.max_block_size\n for i in range(n_blocks):\n f.write(bytearray(os.urandom(self.max_block_size)))\n remaining = int(self.args.size) % self.max_block_size\n if remaining > 0:\n f.write(bytearray(os.urandom(remaining)))\n self.assertEqual(int(self.args.size), os.path.getsize(self.test_file))", "def test_bos_forwardeuler(bos_config):\n bos_config[\"PhysicsModules\"][\"BlockOnSpring\"][\"pusher\"] = \"ForwardEuler\"\n bos_config[\"Diagnostics\"][\"directory\"] = (\"test_data/test_output/\"\n \"output_ForwardEuler/\")\n sim = Simulation(bos_config)\n sim.run()\n for filename in ['block_p', 'block_x', 'time']:\n ref_data = np.genfromtxt('test_data/reference_output/'\n f'output_ForwardEuler/{filename}.csv',\n delimiter=',')\n tmp_data = np.genfromtxt('test_data/test_output/'\n f'output_ForwardEuler/{filename}.csv',\n delimiter=',')\n assert np.allclose(ref_data, tmp_data)", "def gen_int(filename):\n random.seed()\n random.randint(-100,100)\n with open(filename, \"w\") as f:\n for i in range(1000):\n f.write(str(random.randint(-100,100)))\n f.write(\" \")\n # f.write(\"hello\")", "def make_test_data(self):\r\n\r\n \r\n\r\n print (\"Creating Test Sample:\")\r\n\r\n print (' Period, rate, reps, phases: ', self.period, self.framerate, self.nrepetitions, self.nPhases)\r\n\r\n nframes = int(self.period * self.framerate * self.nrepetitions)\r\n\r\n print (' nframes: ', nframes)\r\n\r\n if self.bkgdNoise > 0.:\r\n\r\n d = np.random.normal(size=(nframes,self.imageSize[0],self.imageSize[1]),\r\n\r\n loc=self.bkgdIntensity, scale=self.bkgdNoise).astype('float32')\r\n\r\n else:\r\n\r\n d = self.bkgdIntensity*np.ones((nframes,self.imageSize[0],self.imageSize[1])).astype('float32')\r\n\r\n \r\n\r\n ds = d.shape\r\n\r\n print (' data shape: ', ds)\r\n\r\n dx = int(ds[2]/4)\r\n\r\n xc = int(ds[2]/2)\r\n\r\n xo = [xc-dx, xc+dx]\r\n\r\n ywidth = int(ds[2]/(self.nPhases+2))\r\n\r\n framedelay = 4\r\n\r\n\r\n\r\n if not self.mode:\r\n\r\n self.phasex = []\r\n\r\n self.phasey = []\r\n\r\n for i in range(0,self.nPhases):\r\n\r\n dy = int((i+1)*ds[2]/(self.nPhases+2)) # each phase is assigned to a region\r\n\r\n self.resp = np.zeros((nframes,))\r\n\r\n self.resp = np.cos(\r\n\r\n np.linspace(0, 2.0*np.pi*nframes/(self.period*self.framerate), nframes-framedelay)+i*np.pi/8 - np.pi/2.0)\r\n\r\n self.resp = np.concatenate((np.zeros(framedelay), self.resp))\r\n\r\n d[:, xo[0]:xo[1], dy:dy+ywidth ] += self.resp[:, np.newaxis, np.newaxis]\r\n\r\n self.phasey.append( (2+(dy+int(ds[2]/self.nPhases))/2))\r\n\r\n self.phasex.append((6+int(ds[1]/2)/2)) # make the signal equivalent of digitized one (baseline 3000, signal at 1e-4 of baseline)\r\n\r\n else:\r\n\r\n self.nPhases = 4\r\n\r\n self.spotsize = 16\r\n\r\n nrpts = 20\r\n\r\n nsites = 4\r\n\r\n one_rep = int(self.period*self.framerate)\r\n\r\n isi = int(self.period*self.framerate/self.nPhases)\r\n\r\n print('period, isi: ', self.period, isi)\r\n\r\n r = np.arange(0, nrpts, 1.)\r\n\r\n alpha = 4.\r\n\r\n A = r/alpha *np.exp(-(r-alpha)/alpha) # scaled alpha function\r\n\r\n self.spot= self.gauss_spot(self.spotsize, 3.) # the 2d spot\r\n\r\n sigsize = np.random.normal(size=self.nPhases, loc=self.signal_size, scale=self.signal_size*2)\r\n\r\n sigsize = [np.abs(s) for s in sigsize] # restrict to positive amplitudes\r\n\r\n print ('sigsize: ', sigsize)\r\n\r\n for j in range(self.nrepetitions):\r\n\r\n for i in range(self.nPhases):\r\n\r\n self.resp = np.zeros((nrpts, self.spot.shape[0], self.spot.shape[1]))\r\n\r\n for k in range(nrpts):\r\n\r\n self.resp[k,:,:] += sigsize[i]*A[k] * self.spot # make response an alpha time course of gaussian spot\r\n\r\n start = j*one_rep + i*isi + framedelay\r\n\r\n stop = start + nrpts\r\n\r\n dy = int((i+1)*ds[2]/(self.nPhases+2)) # location for phase\r\n\r\n #dy = dy + 2*z\r\n\r\n# print ('start, stop: ', start, stop)\r\n\r\n for z in range(nsites):\r\n\r\n #self.resp = np.concatenate((np.zeros(framedelay), self.resp))\r\n\r\n xp = xo[0] + i*10 - 10*z\r\n\r\n yp = dy - i*10 + 10*z\r\n\r\n d[start:stop, xp:xp+self.spotsize, yp:yp+self.spotsize ] += self.resp\r\n\r\n self.imageData = d # reduce to a 16-bit map to match camera data type\r\n\r\n self.nFrames = self.imageData.shape[0]\r\n\r\n self.times = np.arange(0, nframes/self.framerate, 1.0/self.framerate)\r\n\r\n print( \" Test Image Created\")\r\n\r\n # imv = pg.ImageView()\r\n\r\n # imv.show()\r\n\r\n # imv.setImage(self.imageData)\r\n\r\n\r\n\r\n if self.layout is not None:\r\n\r\n self.layout.addWidget(imv, 0, 0)\r\n\r\n\r\n\r\n avgImage = np.mean(self.imageData, axis=0)\r\n\r\n ima = pg.ImageView()\r\n\r\n ima.setImage(avgImage)\r\n\r\n self.layout.addWidget(ima, 0, 1)\r\n\r\n self.adjust_image_data()\r\n\r\n self.avgimg = np.mean(self.imageData, axis=0) # get mean image for reference later: average across all time\r\n\r\n print (' Test file, original Image Info: ')\r\n\r\n self.print_image_info()\r\n\r\n self.rebin_image()\r\n\r\n #self.clean_windowerrors()\r\n\r\n # pg.image(self.imageData)\r\n\r\n # pg.show()\r\n\r\n # mpl.figure(1)\r\n\r\n # mpl.show()\r\n\r\n if not self.mode: # FFT analysis\r\n\r\n self.analysis_fourier_map(target=1, mode=0)\r\n\r\n self.plot_maps(mode=2, gfilter=self.gfilter)\r\n\r\n else:\r\n\r\n self.analysis_dFF_map()\r\n\r\n mpl.show()", "def test_generate_all_training(self):\n facade = ChatetteFacade.get_or_create()\n\n input_dir_path = \"tests/system-testing/inputs/generate-all/\"\n input_filenames = [\n \"simplest.chatette\", \"only-words.chatette\",\n \"words-and-groups.chatette\", \"alias.chatette\", \"include.chatette\",\n \"slot.chatette\", \"slotrolegroup.chatette\"\n ]\n for filename in input_filenames:\n file_path = os.path.join(input_dir_path, filename)\n facade.run(file_path)\n if not TestSystem.check_no_duplicates(facade.train_examples):\n pytest.fail(\n \"Some examples were generated several times \" +\n \"when dealing with file '\" + filename + \"'.\\nGenerated: \" + \\\n str(facade.train_examples)\n )\n legal_examples = TestSystem.get_legal_examples(file_path)\n for ex in facade.train_examples:\n formatted_ex = {\"intent\": ex.intent_name, \"text\": ex.text}\n if formatted_ex not in legal_examples:\n pytest.fail(\n str(formatted_ex) + \" is not a legal example for '\" + \\\n file_path + \"'\"\n )\n if len(legal_examples) != len(facade.train_examples):\n training_texts = [ex.text for ex in facade.train_examples]\n for legal_ex in legal_examples:\n if legal_ex[\"text\"] not in training_texts:\n pytest.fail(\n \"Example '\" + legal_ex[\"text\"] + \\\n \"' was not generated.\"\n )\n pytest.fail(\n \"An unknown example was not generated (\" + \\\n str(len(facade.train_examples)) + \\\n \" generated instead of \" + str(len(legal_examples)) + \\\n \").\\nGenerated: \" + str(facade.train_examples)\n )\n legal_syn = TestSystem.get_legal_synonyms(file_path)\n if legal_syn is not None:\n synonyms = AST.get_or_create().get_entities_synonyms()\n for key in synonyms:\n if key not in legal_syn:\n pytest.fail(\n \"'\" + key + \"' shouldn't have any synonyms.\"\n )\n for syn in synonyms[key]:\n if syn not in legal_syn[key]:\n pytest.fail(\n \"'\" + syn + \"' shouldn't be a synonym of '\" + \\\n key + \"'\"\n )", "def test_back_fill(self):\n self.driver.start_sampling()\n\n # step 2 contains 2 blocks, start with this and get both since we used them\n # separately in other tests \n self.clear_async_data()\n self.create_sample_data_set_dir(\n \"node59p1_step2.dat\",\n TELEM_DIR,\n \"node59p1.dat\",\n copy_metadata=False\n )\n self.assert_data(\n (DostadParserTelemeteredDataParticle, DostadParserTelemeteredMetadataDataParticle),\n 'test_data_1-2.txt.result.yml',\n count=3\n )\n\n # This file has had a section of DO data replaced with 0s\n self.clear_async_data()\n self.create_sample_data_set_dir(\n 'node59p1_step3.dat',\n TELEM_DIR,\n \"node59p1.dat\",\n copy_metadata=False\n )\n self.assert_data(\n DostadParserTelemeteredDataParticle,\n 'test_data_3.txt.result.yml',\n count=3\n )\n\n # Now fill in the zeroed section from step3, this should just return the new\n # data\n self.clear_async_data()\n self.create_sample_data_set_dir(\n 'node59p1_step4.dat',\n TELEM_DIR,\n \"node59p1.dat\",\n copy_metadata=False\n )\n self.assert_data(\n DostadParserTelemeteredDataParticle,\n 'test_data_4.txt.result.yml',\n count=1\n )\n\n # start over now, using step 4\n self.driver.stop_sampling()\n\n # Reset the driver with no memento\n self.memento = None\n self.driver = MflmDOSTADDataSetDriver(\n self._driver_config()['startup_config'],\n self.memento,\n self.data_callback,\n self.state_callback,\n self.event_callback,\n self.exception_callback)\n self.driver.start_sampling()\n\n self.clear_async_data()\n self.create_sample_data_set_dir(\n 'node59p1_step4.dat',\n TELEM_DIR,\n \"node59p1.dat\",\n copy_metadata=False\n )\n self.assert_data(\n (DostadParserTelemeteredDataParticle, DostadParserTelemeteredMetadataDataParticle),\n 'test_data_1-4.txt.result.yml',\n count=7\n )", "def fake_destination_data():\n\n for _ in range(0, 35):\n user_id = random.randint(1, 8)\n\n print (str(user_id) + '|' + fake.company() + '|' + fake.street_address()\n + '|' + fake.city() + '|' + fake.state_abbr() + '|' +\n fake.postalcode())", "def log_file1D(fast5_data , basecall_stat):\n\n version, flowcell_id, hostname, numMinion, run_id = fast5_data\n\n #Retrieve the dataframe with statitstics such as the quartile or std\n #Retrieve the dictionary from albacore summary log\n\n num_called_template, mean_qscore_template = basecall_stat.stat_generation()\n\n counter_template, total_nucleotide_template = basecall_stat.counter()\n\n occupancy_pore = basecall_stat.occupancy_pore()\n\n completeName = os.path.join('/home/ferrato/Documents/fast5', \"fichier_aozan.txt\")\n\n with open(completeName, 'w') as file_data:\n\n for index, element in num_called_template.iteritems():\n file_data.write(\"num.called.template.{}={}\\n\".format(index, element))\n\n for index, element in num_called_template.iteritems():\n file_data.write(\"mean.qscore.template.{}={}\\n\".format(index, element))\n\n for nucleotide, count in counter_template.items():\n file_data.write(\"nucleotide.{}.template={}\\n\".format(nucleotide,count))\n if nucleotide == 'total':\n continue\n calcul = float(count) / float(total_nucleotide_template)\n file_data.write(\"nucleotide.{}.proportion={}\\n\".format(nucleotide, calcul))\n\n\n file_data.write(\"total.number.of.sequence={}\\n\".format(basecall_stat.fast5_tot))\n\n for index, value in occupancy_pore.items():\n file_data.write(\"pore.occupancy.{}={}\\n\".format(index, value))\n\n\n file_data.write(\"flowcell.serial.number={}\\n\".format(flowcell_id))\n file_data.write(\"minknown.version={}\\n\".format(version))\n file_data.write(\"hostname={}\\n\".format(hostname))\n file_data.write(\"minion.serial.number={}\\n\".format(numMinion))\n file_data.write((\"run.id={}\\n\".format(run_id)))\n\n for index, element in basecall_stat.statistics_read_size().iteritems():\n file_data.write(\"Read.fastq.length.{}={}\\n\".format(index, element))" ]
[ "0.8051458", "0.54595256", "0.53430116", "0.5291932", "0.52867705", "0.52761495", "0.52647626", "0.5254595", "0.52455837", "0.5188657", "0.5176086", "0.5169036", "0.51669353", "0.5132858", "0.5126628", "0.5115051", "0.5114212", "0.5111231", "0.510516", "0.50725013", "0.5072439", "0.50687855", "0.5060421", "0.50596", "0.50507027", "0.5041776", "0.50349426", "0.5025772", "0.5019891", "0.5015183" ]
0.8079699
0
The method will inject properly formatted, randomly generated messages at a given period for a I{writesPerFuzz} number of times. A new random standard id will be chosen with each newly generated packet. IDs will be chosen from the full range of potential ids ranging from 0 to 4095. The packets that are injected into the bus will all be saved in the following path DATALOCATION/InjectedData/(today's date (YYYYMMDD))_GenerationFuzzedPackets.csv. An example filename would be 20130222_GenerationFuzzedPackets.csv Where DATALOCATION is provided when the class is initiated. The data will be saved as integers.
def generalFuzz(self,freq, Fuzzes, period, writesPerFuzz): #print "Fuzzing on standard ID: %d" %standardId self.client.serInit() self.spitSetup(freq) packet = [0,0,0x00,0x00,0x08,0,0,0,0,0,0,0,0] #empty template #get folder information (based on today's date) now = datetime.datetime.now() datestr = now.strftime("%Y%m%d") path = self.DATA_LOCATION+"InjectedData/"+datestr+"_GenerationFuzzedPackets.csv" filename = path outfile = open(filename,'a'); dataWriter = csv.writer(outfile,delimiter=','); #dataWriter.writerow(['# Time Error Bytes 1-13']); #dataWriter.writerow(['#' + description]) fuzzNumber = 0; #: counts the number of packets we have generated while( fuzzNumber < Fuzzes): #generate new random standard id in the full range of possible values id_new = random.randint(0,4095) #print id_new #### split SID into different regs SIDhigh = (id_new >> 3) & 0xFF; # get SID bits 10:3, rotate them to bits 7:0 SIDlow = (id_new & 0x07) << 5; # get SID bits 2:0, rotate them to bits 7:5 packet[0] = SIDhigh packet[1] = SIDlow #generate a fuzzed packet for i in range(0,8): # for each data byte, fuzz it idx = "db%d"%i value = random.randint(0, 255) #generate pseudo-random integer value packet[i+5] = value #print packet #put a rough time stamp on the data and get all the data bytes row = [time.time(), id_new,8] """@todo: allow for varied packet lengths""" msg = "Injecting: " for i in range(5,13): row.append(packet[i]) msg += " %d"%packet[i] #print msg dataWriter.writerow(row) self.client.txpacket(packet) time.sleep(period/1000) #inject the packet the given number of times. for i in range(1,writesPerFuzz): self.client.MCPrts(TXB0=True) time.sleep(period/1000) fuzzNumber += 1 print "Fuzzing Complete" outfile.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generationFuzzer(self,freq, standardIDs, dbLimits, period, writesPerFuzz, Fuzzes):\n #print \"Fuzzing on standard ID: %d\" %standardId\n self.client.serInit()\n self.spitSetup(freq)\n packet = [0,0,0x00,0x00,0x08,0,0,0,0,0,0,0,0] #empty packet template\n \n\n #get folder information (based on today's date)\n now = datetime.datetime.now()\n datestr = now.strftime(\"%Y%m%d\")\n path = self.DATA_LOCATION+\"InjectedData/\"+datestr+\"_GenerationFuzzedPackets.csv\"\n filename = path\n outfile = open(filename,'a');\n dataWriter = csv.writer(outfile,delimiter=',');\n #dataWriter.writerow(['# Time Error Bytes 1-13']);\n #dataWriter.writerow(['#' + description])\n \n numIds = len(standardIDs)\n fuzzNumber = 0; #: counts the number of packets we have generated\n while( fuzzNumber < Fuzzes):\n id_new = standardIDs[random.randint(0,numIds-1)]\n print id_new\n #### split SID into different regs\n SIDhigh = (id_new >> 3) & 0xFF; # get SID bits 10:3, rotate them to bits 7:0\n SIDlow = (id_new & 0x07) << 5; # get SID bits 2:0, rotate them to bits 7:5\n packet[0] = SIDhigh\n packet[1] = SIDlow\n \n #generate a fuzzed packet\n for i in range(0,8): # for each data byte, fuzz it\n idx = \"db%d\"%i\n limits = dbLimits[idx]\n value = random.randint(limits[0],limits[1]) #generate pseudo-random integer value\n packet[i+5] = value\n print packet\n #put a rough time stamp on the data and get all the data bytes \n row = [tT.time(), id_new,8] # could make this 8 a variable \n msg = \"Injecting: \"\n for i in range(5,13):\n row.append(packet[i])\n msg += \" %d\"%packet[i]\n #print msg\n dataWriter.writerow(row)\n self.client.txpacket(packet)\n tT.sleep(period/1000)\n \n #inject the packet the given number of times. \n for i in range(1,writesPerFuzz):\n self.client.MCPrts(TXB0=True)\n tT.sleep(period/1000)\n fuzzNumber += 1\n print \"Fuzzing Complete\" \n SIDhigh = (1056 >> 3) & 0xFF; # get SID bits 10:3, rotate them to bits 7:0\n SIDlow = (1056 & 0x07) << 5; # get SID bits 2:0, rotate them to bits 7:5\n packet = [SIDhigh, SIDlow, 0, 0, 8, 65, 255, 32, 120, 0, 0, 1, 247]\n self.client.txpacket(packet)\n for i in range(0,100):\n self.client.MCPrts(TXB0=True)\n tT.sleep(.01)\n outfile.close()", "def gen_int(filename):\n random.seed()\n random.randint(-100,100)\n with open(filename, \"w\") as f:\n for i in range(1000):\n f.write(str(random.randint(-100,100)))\n f.write(\" \")\n # f.write(\"hello\")", "def gen_random_fightID():\n pass", "def random_fuzz(static_arb_id, static_payload, logging=0, filename=None, id_length=MAX_ID_LENGTH - 1,\n payload_length=MAX_PAYLOAD_LENGTH):\n # Define a callback function which will handle incoming messages\n def response_handler(msg):\n print(\"Directive: \" + arb_id + \"#\" + payload)\n print(\" Received Message: \" + str(msg))\n\n log = [None] * logging\n counter = 0\n while True:\n arb_id = (static_arb_id if static_arb_id is not None else get_random_id(id_length))\n payload = (static_payload if static_payload is not None else get_random_payload(payload_length))\n\n directive_send(arb_id, payload, response_handler)\n\n counter += 1\n if logging != 0:\n log[counter % logging] = arb_id + \"#\" + payload\n\n if filename is not None:\n write_directive_to_file(filename, arb_id, payload)", "def write_rand(self, report_id):\n reports = _REPORT_TEMPLATE.copy()\n reports[_REPORT_KEY] = map(DataGen.__make_json, self.__randomize_data())\n rand_filename, _ = filename(report_id)\n rand_file = open(rand_filename, 'w')\n rand_file.write(json.dumps(reports, indent=2))\n rand_file.close()", "def simulate_logging(self):\n data = self.generator.generate_data()\n data_json = json.dumps(data)\n\n part_id = self.get_highest_id() + 1\n component_id = 1\n\n # Add new part\n insert_command = \"INSERT INTO data (time, part_id, component_id, processed, classified, data)\" \\\n \" VALUES ('{}', {}, {}, {}, '{}', '{}')\".format('now()',\n part_id,\n component_id,\n 'False',\n 'False',\n data_json)\n\n self.cursor.execute(insert_command)\n\n string = \"Neuer Datenbankeintrag: time={},\" \\\n \" part_id={}, component_id={}, data={}\".format(datetime.datetime,\n part_id,\n component_id,\n data_json)\n\n return string", "def generate_random_testing(file_name, nb_points):\n file_name = _format_file_extension(file_name)\n acoustic_data = _generate_random_acoustic(nb_points)\n data = pd.DataFrame(acoustic_data, columns=[fmd.COLUMN_NAME[0]])\n data.to_csv(file_name, index=False)", "def write_numbers(file_path):\n count = random.randint(20, 40)\n try:\n with open(file_path, 'w') as f:\n for _ in range(count):\n f.write(' '.join([str(x) for x in random.sample(range(10, 90), random.randint(4, 12))]))\n f.write('\\n')\n except Exception as err:\n print('Unexpected error:', err)", "def fake_destination_data():\n\n for _ in range(0, 35):\n user_id = random.randint(1, 8)\n\n print (str(user_id) + '|' + fake.company() + '|' + fake.street_address()\n + '|' + fake.city() + '|' + fake.state_abbr() + '|' +\n fake.postalcode())", "def _make_random_file(self, dir, num_chars=10000):\n filename = os.path.join(dir, \"f-%d\" % random.randint(1, 2**63 - 1))\n content = \"\".join([random.choice(\"0123456789abcdefghijklmnopqrstuvwxyz\\n\") for _ in range(num_chars)])\n with open(filename, \"w\") as f:\n f.writelines(content)\n return filename", "def main():\r\n # handle arguments\r\n parser = argparse.ArgumentParser()\r\n\r\n parser.add_argument('-t', '--time', help = 'start time', default = \"2018-12-26 18:11:08.509654\")\r\n parser.add_argument('-bd', '--min_duration', type = int, help = 'minimum duration', default = 25)\r\n parser.add_argument('-td', '--max_duration', type = int, help = 'maximum duration', default = 70)\r\n parser.add_argument('-e', '--events', type = int, help = 'how many events to generate', default = 1000)\r\n\r\n args = parser.parse_args()\r\n\r\n f = open(f\"tests/test_1.json\", \"a\")\r\n\r\n string_time = \"2019-07-08 10:40:00.423123\"\r\n\r\n current_time = datetime.datetime.strptime(string_time, '%Y-%m-%d %H:%M:%S.%f')\r\n\r\n for i in range(0, args.events):\r\n\r\n duration = random.randint(args.min_duration, args.max_duration)\r\n\r\n json = \"{\\\"timestamp\\\": \\\"\" \\\r\n + str(current_time) \\\r\n + \"\\\", \\\"translation_id\\\": \\\"5aa5b2f39f7254a75aa5\\\", \" \\\r\n \"\\\"source_language\\\": \\\"en\\\",\\\"target_language\\\":\" \\\r\n \" \\\"fr\\\",\\\"client_name\\\": \\\"easyjet\\\",\\\"event_name\\\":\" \\\r\n \"\\\"translation_delivered\\\",\\\"nr_words\\\": 30, \\\"duration\\\": \"\\\r\n + str(duration) + \"}\\n\"\r\n\r\n f.write(json)\r\n\r\n minutes = random.randint(0, 59)\r\n seconds = random.randint(0, 59)\r\n\r\n current_time += datetime.timedelta(minutes=minutes, seconds=seconds)\r\n\r\n print(f\"New file is located at inputs/{args.events}.json\")", "def test_token_replacement(eventgen_test_helper):\n events = eventgen_test_helper(\"eventgen_token_replacement.conf\").get_events()\n # assert the events size is 10 since end = 1\n assert len(events) == 10\n\n with open(os.path.join(base_dir, \"sample\", \"id.csv\"), \"rt\") as f:\n id_content = f.read()\n with open(os.path.join(base_dir, \"sample\", \"ip.csv\"), \"rt\") as f:\n ip_content = f.read()\n with open(os.path.join(base_dir, \"sample\", \"cp.csv\"), \"rt\") as f:\n cp_content = f.read()\n with open(os.path.join(base_dir, \"sample\", \"city.csv\"), \"rt\") as f:\n reader = csv.reader(f)\n country = []\n city = []\n latitude = []\n longitude = []\n for row in reader:\n country.append(row[0])\n city.append(row[1])\n latitude.append(row[3])\n longitude.append(row[4])\n\n integer_id_seed = 1\n for event in events:\n try:\n event_obj = json.loads(event)\n except ValueError:\n raise Exception(\"Token replacement error\")\n\n # assert replacementType = integerid\n assert int(event_obj[\"ppcustomdata\"][\"receiver_id\"]) == integer_id_seed\n integer_id_seed += 1\n\n # assert replacementType = file\n assert event_obj[\"id\"] in id_content\n assert event_obj[\"cp\"] in cp_content\n assert event_obj[\"message\"][\"cliIP\"] in ip_content\n\n # assert replacementType = static\n assert event_obj[\"netPerf\"][\"lastByte\"] == \"0\"\n\n # assert replacementType = random and replacement = integer[<start>:<end>]\n assert 5000 >= int(event_obj[\"message\"][\"bytes\"]) > 40\n\n # assert replacementType = random and replacement = float[<start>:<end>]\n assert 3.0 >= float(event_obj[\"netPerf\"][\"lastMileRTT\"]) >= -3.0\n\n # assert replacementType = random and replacement = ipv4 | ipv6 | mac\n ipv4_pattern = re.compile(r\"^(?:[0-9]{1,3}\\.){3}[0-9]{1,3}$\")\n ipv6_pattern = re.compile(r\"^([A-Fa-f0-9]{1,4}:){7}[A-Fa-f0-9]{1,4}$\")\n mac_pattern = re.compile(r\"^([0-9A-Fa-f]{2}[:-]){5}([0-9A-Fa-f]{2})$\")\n\n assert ipv4_pattern.match(event_obj[\"akadebug\"][\"Ak_IP\"]) is not None\n assert (\n ipv6_pattern.match(event_obj[\"akadebug\"][\"forward-origin-ip\"]) is not None\n )\n assert mac_pattern.match(event_obj[\"akadebug\"][\"end-user-ip\"]) is not None\n\n # assert replacementType = file | mvfile and replacement = <replacement file name>:<column number>\n assert event_obj[\"geo\"][\"city\"] in city\n assert event_obj[\"geo\"][\"country\"] in country\n assert event_obj[\"geo\"][\"lat\"] in latitude\n assert event_obj[\"geo\"][\"long\"] in longitude", "def update_seed_parameters(parameters, samples):\n\n with open(\"../../output/seed.tmp\", \"w\") as f:\n f.write(f\"{parameters[0]+parameters[1]}\\n\")\n f.write(f\"{samples}\")", "def test_insertRandomWater(self):\n # Insert a random water\n new_positions, wat_id, atom_ids = gcmc_system_sampler.insertRandomWater()\n\n # Check that the indices returned are integers - may not be type int\n assert wat_id == int(wat_id)\n assert all([i == int(i) for i in atom_ids])\n\n # Check that the new positions are different to the old positions\n assert all([any([new_positions[i][j] != gcmc_system_sampler.positions[i][j] for j in range(3)])\n for i in atom_ids])\n assert all([all([new_positions[i][j] == gcmc_system_sampler.positions[i][j] for j in range(3)])\n for i in range(len(new_positions)) if i not in atom_ids])\n\n return None", "def test_save_and_load_generation(logger):\n\n generations = 2\n\n options = {}\n options[\"population_size\"] = 10000\n options[\"in-trees\"] = 0\n options[\"out-trees\"] = 1\n options[\"in-actions\"] = 0\n options[\"out-actions\"] = 3\n options[\"library\"] = False\n options[\"seed\"] = None\n\n for generation_index in range(generations):\n population = []\n population_str = ''\n\n # Generate random strategies to initialize the population\n for i in range(options[\"population_size\"]):\n p = evolve.generate_strategy(logger, options[\"in-trees\"], options[\"out-trees\"], options[\"in-actions\"],\n options[\"out-actions\"],\n options[\"seed\"], environment_id=None)\n actions.utils.parse(str(p), logger)\n population.append(p)\n if i == options[\"population_size\"] - 1:\n population_str += str(p)\n else:\n population_str += str(p) + \"\\n\"\n\n # Write the generation file\n filename = os.path.join(test_files_directory, \"generation\" + str(generation_index))\n evolve.write_generation(filename, population)\n\n check_one_file(logger, options, filename, population)", "def sweepRandom(self, freq, number = 5, time = 5):\n msgIDs = [] #standard IDs that we have observed during run\n ids = [] #standard IDs that have been tried\n self.client.serInit()\n self.client.MCPsetup()\n for i in range(0,number+1,6):\n idsTemp = []\n comment = \"sweepFilter: \"\n for j in range(0,6,1):\n id = randrange(2047)\n #comment += \"_%d\" % id\n idsTemp.append(id)\n ids.append(id)\n #print comment\n description = \"Running a sweep filer for all the possible standard IDs. This runs the following : \" + comment\n count = self.sniff(freq=freq, duration=time, description=description, comment = comment, standardid = idsTemp)\n if( count != 0):\n for element in idsTemp:\n #comment = \"sweepFilter: %d\" % (element)\n comment=\"sweepFilter: \"\n description = \"Running a sweep filer for all the possible standard IDs. This run filters for: %d \" % element\n count = self.sniff(freq=freq, duration = time, description = description,comment = comment, standardid = [element, element, element])\n if( count != 0):\n msgIDs.append(j)\n return msgIDs, ids", "def generateRandomInput(filename, num_people, travel_db):\n import random\n routes = []\n for i in range(num_people):\n route = travel_db.randomRoute()\n route.insert(0,\"Person \" + str(i)) # Add a name for each route.\n routes.append(route)\n if FileHandler.writeRoutesCSV(filename,routes): # If it's successful writing the file\n print(\"File {0} created successfully with {1} people.\".format(filename, num_people))\n else:\n print(\"File {0} could not be created.\".format(filename))", "def test_back_fill(self):\n self.driver.start_sampling()\n\n # step 2 contains 3 blocks (4 records), start with this and get both since we used them\n # separately in other tests\n self.create_sample_data_set_dir(\"node59p1_step2.dat\", TELEM_DIR, \"node59p1.dat\",\n copy_metadata=False)\n self.assert_data((PhsenParserDataParticle,PhsenControlDataParticle),\n 'test_data_1-2.txt.result.yml', count=4)\n\n # This file has had a section of data replaced with 0s (14171-14675),\n # replacing PH1236501_01D6u51F11341_5D_E538\n self.create_sample_data_set_dir('node59p1_step3.dat', TELEM_DIR, \"node59p1.dat\",\n copy_metadata=False)\n self.assert_data(PhsenParserDataParticle, 'test_data_3.txt.result.yml',\n count=5)\n\n # Now fill in the zeroed section from step3, this should just return the new\n # data \n self.create_sample_data_set_dir('node59p1_step4.dat', TELEM_DIR, \"node59p1.dat\",\n copy_metadata=False)\n self.assert_data(PhsenParserDataParticle, 'test_data_4.txt.result.yml',\n count=1)\n\n # start over now using step 4\n self.driver.stop_sampling()\n # Reset the driver with no memento\n self.driver = self._get_driver_object(memento=None)\n self.driver.start_sampling()\n\n self.clear_async_data()\n self.create_sample_data_set_dir('node59p1_step4.dat', TELEM_DIR, \"node59p1.dat\",\n copy_metadata=False)\n self.assert_data((PhsenParserDataParticle,PhsenControlDataParticle),\n 'test_data_1-4.txt.result.yml', count=10)", "def set_random_festivals(self, num):\n try:\n self.cursor.execute(\"insert into festivals (place_id, name, date) \"\n \"select rand.place_id, rand.name, rand.date \"\n \"from (select places.id as place_id, \"\n \"md5(random()::text) as name, \"\n \"((current_date - '70 years'::interval) + trunc(random() * 365) * '1 day'::interval + trunc(random() * 3) * '1 year'::interval ) as date \"\n f\"from generate_series(1, 1), places ORDER BY random() limit {num}) as rand\")\n self.connection.commit()\n if self.cursor.rowcount:\n return \"generated festivals\"\n else:\n return \"NULL\"\n except(Exception, psycopg2.Error) as error:\n self.connect.rollback()\n print(\"error in generate\", error)", "def write_test_data(sql):\n for fname in sorted(glob.glob(\"mock_data/*.csv\")):\n print(fname)\n with open(fname, 'r', encoding='utf8') as csvfile:\n reader = csv.reader(csvfile, delimiter=\",\", quoting=csv.QUOTE_MINIMAL)\n i = 0\n for row in reader:\n if i == 0:\n if row != '' and ''.join(row) != '':\n sql.write(\"INSERT INTO \" + \"_\".join(fname.split('_')[2:])[:-4] + commajoin(row, [], 0) + \" VALUES\\n\")\n else:\n sql.write(\"INSERT INTO \" + \"_\".join(fname.split('_')[2:])[:-4] + \" VALUES\\n\")\n i += 1\n continue\n if row == '' or ''.join(row) == '':\n continue\n if i > 1:\n sql.write(\",\\n\")\n sql.write(commajoin(row, list(range(len(row))), 4))\n i += 1\n sql.write(\";\\n\\n\")", "def generate_ev_file(id_test):\n print(\"generate_ev_file\")\n \n ev_output_file_name=id_test+\".ev\"\n ev_input_file_name=id_test+\"_events.csv\"\n f_output = io.open(INPUT_PARSER_RESULTS_DIR+ev_output_file_name, \"w\",newline='\\n')\n f_input = io.open(AGRODEVS_INPUT_DIR+ev_input_file_name, \"r\")\n \n input_reader = csv.reader(f_input, delimiter=',')\n field_names_list = next(input_reader)\n if (field_names_list[0]!=\"campaign\"):\n print(\"First field of events file input should be 'campaing' but is:\"+field_names_list[0])\n print(\"Cannot generate event file\")\n return\n else:\n print(field_names_list)\n for line in input_reader:\n #generate timestamp for campaign\n #campania = int(int(ms)/100)+int(ss)*10+int(mm)*600+int(hh)*36000\n campaign = int(line[0])\n ms = (campaign*100)%1000\n ss = ((campaign*100)//1000)%60\n mm = ((campaign*100)//60000)%60\n hh = ((campaign*100)//360000)\n timeFormat = \"{:0>2d}\"\n msFormat = \"{:0>3d}\"\n timestamp_begin_event = str(timeFormat.format(hh))+\":\"+ \\\n str(timeFormat.format(mm))+\":\"+ \\\n str(timeFormat.format(ss))+\":\"+ \\\n str(msFormat.format(ms))\n timestamp_end_event = str(timeFormat.format(hh))+\":\"+ \\\n str(timeFormat.format(mm))+\":\"+ \\\n str(timeFormat.format(ss))+\":\"+ \\\n str(msFormat.format(ms+1))\n \n print(\"timestamp generated: \"+timestamp_begin_event)\n \n #generate events\n #begin events\n \n \n port_idx =0\n for event_port in line[1:]:\n port_idx=port_idx+1\n #print(\"processing port: \"+str(field_names_list[port_idx]))\n begin_event=CELL_DEVS_EXTERNAL_EVENT_BEGIN+ \\\n field_names_list[port_idx]+ \\\n \" \"+str(line[port_idx])\n \n f_output.write(timestamp_begin_event+\" \"+begin_event+\"\\n\")\n \n #end events\n port_idx=0\n for event_port in line[1:]:\n port_idx=port_idx+1\n #print(\"processing port: \"+str(field_names_list[port_idx]))\n end_event=CELL_DEVS_EXTERNAL_EVENT_ENDS+ \\\n field_names_list[port_idx]+ \\\n \" \"+str(line[port_idx])\n f_output.write(timestamp_end_event+\" \"+end_event+\"\\n\")\n \n \n \n f_input.close()\n f_output.close()", "def insert_test( hash, random, seq ):\n try:\n with open(os.path.join(SEED_DIRECTORY, \"%s_%s\" % (hash, 0)), \"w+\") as f:\n record_used('seeds', hash)\n pickle.dump({'hash': hash, 'random': random, 'seq': seq }, f)\n except IOError:\n if not os.environ.get('CALIENDO_TEST_SUITE', None):\n logger.warning( \"Failed to open %s\" % hash)", "def main():\n sleep(3)\n diceresult = randint(1, 6)\n if diceresult == 6:\n raise Exception('The \"data retrieval\" flaked out. :(')\n else:\n share_dir = os.environ['CYLC_WORKFLOW_SHARE_DIR']\n cycle_point = os.environ['CYLC_TASK_CYCLE_POINT']\n fp = Path(f'{share_dir}/{cycle_point}.dat')\n fp.write_text(f'diceresult = {diceresult}')", "def test_all_good(self):\n self.driver.start_sampling()\n\n self.create_sample_data_set_dir(\n \"node59p1_all_good1.dat\",\n TELEM_DIR,\n \"node59p1.dat\"\n )\n self.assert_data(\n (DostadParserTelemeteredDataParticle, DostadParserTelemeteredMetadataDataParticle),\n 'test_data_1-2.txt.result.yml',\n count=3\n )\n\n self.create_sample_data_set_dir(\n \"node59p1_all_good.dat\",\n TELEM_DIR,\n \"node59p1.dat\"\n )\n self.assert_data(\n DostadParserTelemeteredDataParticle,\n 'test_data_all_good.txt.result.yml',\n count=1\n )", "def create_msgs():\n getcontext().prec = 3 # will round to 3 decimal places\n orig_times = sorted(dat)\n for n in range(len(dat) - 1):\n linfun = interp1d([orig_times[n], orig_times[n+1]], \\\n [dat[orig_times[n]], dat[orig_times[n+1]]])\n dt = orig_times[n+1] - orig_times[n] # current\n freq = 1/dt # current\n if dt < (1/desHz):\n print('found instance where Freq already at/above desired Freq')\n else:\n new_dt = dt*freq/desHz\n new_times = linspace(orig_times[n],orig_times[n+1],floor(dt/new_dt))\n # print(new_times)\n new_values = linfun(new_times)\n # rounded_values = [float(Decimal(\"%.3f\" % e)) for e in new_values]\n rounded_times = [float(Decimal(\"%.3f\" % e)) for e in new_times]\n for m in range(len(rounded_times)):\n # this_time = int(new_times[m]*100000)/100000 # 5 decimal places in timstamp\n self.outData[sens][meas][rounded_times[m]] = new_values[m]", "def generate(count):\n lst = []\n with open('data.txt', 'w+') as f:\n for i in range(0, count):\n st = str(random.random())\n f.write(st+\"\\n\")\n lst.append(st)\n return lst", "def generate(methods,variables_file,seed_string,length):\n saver = tf.train.Saver()\n tf.get_default_graph().finalize()\n with tf.Session() as sess:\n for method in methods:\n meas.reset(method.meas)\n for var in tf.global_variables():\n sess.run(var.initializer)\n saver.restore(sess,variables_file) \n meas.update(method.meas)\n for mea in method.meas:\n cur_state = sess.run(mea.zero_state)\n X = seed_string\n Y = \" \" * STEPS\n mea.ydata[:] = stringToNumber(Y)\n start = len(X) - STEPS\n while len(X) <= length:\n #print start\n mea.xdata[:] = stringToNumber(X[start:start+STEPS])\n mea.feed_dict[mea.init_state] = cur_state\n cur_loss, cur_state = sess.run((mea.loss, mea.final_state),\n feed_dict=mea.feed_dict)\n probability = []\n for item in cur_loss[-1]:\n probability.append(item/sum(cur_loss[-1]))\n temp = np.random.choice(np.arange(50),p=probability)\n X += numberToString([temp])\n start += 1\n print X", "def gen_test_data(filename, nelems, concat):\n\n start = time.time()\n\n # The file just contains a sequentially\n # increasing list of numbers\n\n print('Generating test data ({} elems, {} bytes -> {})'.format(\n nelems,\n nelems * 8,\n filename))\n\n # Generate the data as a numpy memmap array.\n # Allocate at most 128MB at a time\n toWrite = nelems\n offset = 0\n writeBlockSize = min(16777216, nelems)\n\n datafile = '{}_temp'.format(filename)\n\n open(datafile, 'wb+').close()\n data = np.memmap(datafile, dtype=np.uint64, shape=nelems)\n idx = 0\n\n while toWrite > 0:\n\n if idx % 10 == 0:\n print('Generated to {}...'.format(offset))\n\n thisWrite = min(writeBlockSize, toWrite)\n\n vals = np.arange(offset, offset + thisWrite, dtype=np.uint64)\n\n data[offset:offset + thisWrite] = vals\n\n toWrite -= thisWrite\n offset += thisWrite\n idx += 1\n data.flush()\n\n if not concat: maxBufSize = -1\n else: maxBufSize = 8 * min(16777216, nelems // 50)\n\n compress(datafile, filename, maxBufSize)\n\n end = time.time()\n del data\n os.remove(datafile)\n\n print('Done in {:0.2f} seconds'.format(end - start))", "def test_large_import(self):\n # the original file (from the IDD) is a previous version of the file from\n # the data server for the gp03flmb platform\n self.create_sample_data_set_dir('node59p1_orig.dat', TELEM_DIR, 'node59p1.dat')\n self.assert_initialize()\n # one bad sample in here:\n # PH1236501_01D5u51F361E0_EC_162E has non ascii bytes at the end and is missing \\r\n result = self.data_subscribers.get_samples(DataParticleType.CONTROL, 1, 60)\n result = self.data_subscribers.get_samples(DataParticleType.SAMPLE, 49, 100)\n\n # this file is the more recent file off the data server for gp03flmb/d00001\n # this file appends more data to that in node59p1_orig\n self.create_sample_data_set_dir('node59p1.dat', TELEM_DIR)\n # several bad samples in here:\n # PH1236501_01D5u521208B4_A1_D274 doesn't have enough bytes (469 not 470)\n # PH1236501_01D5u52461BDC_CF_55BD doesn't have enough bytes (469 not 470)\n # PH1236501_01D5u5266BCF1_DA_6466 doesn't have enough bytes (469 not 470)\n # PH1236501_01DAu5288AF85_C9_7365, PH1236501_01DAu529E1BDF_42_4835\n # have extra bytes after the sample, not an error anymore\n # PH1236501_01D5u52B090DA_BA_8CC1 doesn't have enough bytes (469 not 470)\n # PH1236501_01DAu52B38839_BB_4134, PH1236501_01DAu52C8F493_34_3FC2\n # PH1236501_01DAu52ECE16B_79_F727, PH1236501_01DAu53024DC6_F2_7EC9 \n # have extra bytes after sample, not an error anymore\n result = self.data_subscribers.get_samples(DataParticleType.SAMPLE, 751, 430)", "def data_feeder_2():\n return random.sample(range(100), 10)" ]
[ "0.80933756", "0.56880844", "0.54426545", "0.54172146", "0.53790885", "0.5184654", "0.5158563", "0.51489717", "0.51385695", "0.5079167", "0.5074281", "0.5022694", "0.5019261", "0.5008377", "0.5006981", "0.498719", "0.49849373", "0.49766207", "0.49589202", "0.49365693", "0.4929201", "0.49213076", "0.4900595", "0.4889536", "0.4876637", "0.48729977", "0.4859017", "0.48453012", "0.48436934", "0.4828344" ]
0.79087734
1
This method will allow the user to listen for a specific packet and then respond with a given message. If no listening packet is included then the method will only listen for the id and respond with the specified packet when it receives a message from that id. This process will continue for the given amount of time (in seconds). and with each message received that matches the listenPacket and ID the transmit message will be sent the I{repeats} number of times at the specified I{period}. This message assumes a packet length of 8 for both messages, although the listenPacket can be None
def packetRespond(self,freq, time, repeats, period, responseID, respondPacket,listenID, listenPacket = None): self.client.serInit() self.spitSetup(freq) #formulate response packet SIDhigh = (responseID >> 3) & 0xFF; # get SID bits 10:3, rotate them to bits 7:0 SIDlow = (responseID & 0x07) << 5; # get SID bits 2:0, rotate them to bits 7:5 #resPacket[0] = SIDhigh #resPacket[1] = SIDlow resPacket = [SIDhigh, SIDlow, 0x00,0x00, # pad out EID regs 0x08, # bit 6 must be set to 0 for data frame (1 for RTR) # lower nibble is DLC respondPacket[0],respondPacket[1],respondPacket[2],respondPacket[3],respondPacket[4],respondPacket[5],respondPacket[6],respondPacket[7]] #load packet/send once """@todo: make this only load the data onto the chip and not send """ self.client.txpacket(resPacket) self.addFilter([listenID,listenID,listenID,listenID, listenID, listenID]) #listen only for this packet startTime = tT.time() packet = None while( (tT.time() - startTime) < time): packet = self.client.rxpacket() if( packet != None): print "packet read in, responding now" # assume the ids already match since we are filtering for the id #compare packet received to desired packet if( listenPacket == None): # no packets given, just want the id for i in range(0,repeats): self.client.MCPrts(TXB0=True) tT.sleep(period/1000) else: #compare packets sid = ord(packet[0])<<3 | ord(packet[1])>>5 print "standard id of packet recieved: ", sid #standard ID msg = "" for i in range(0,8): idx = 5 + i byteIn = ord(packet[idx]) msg += " %d" %byteIn compareIn = listenPacket[i] print byteIn, compareIn if( byteIn != compareIn): packet == None print "packet did not match" break print msg if( packet != None ): self.client.MCPrts(TXB0=True) tT.sleep(period/1000) print "Response Listening Terminated."
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def listen_for(self, packet_id, timeout, payload_pattern=None):\n\n time_left = timeout\n response = ICMPPacket()\n while time_left > 0:\n raw_received, address, time_left = self.socket.receive_packet(time_left)\n\n if raw_received != b'':\n response.unpack(raw_received)\n\n if response.identifier == packet_id and response.message_type != Types.EchoRequest.type_id:\n if payload_pattern is None:\n payload_matched = True\n else:\n payload_matched = (payload_pattern == response.payload)\n\n if payload_matched:\n return Response(Message('', response, address[0]), (timeout - time_left))\n\n return Response(None, timeout)", "def datagramReceived(self, datagram_, address):\n #if DEBUG: print \"Datagram received from \"+ repr(address) \n datagram = simplejson.loads(datagram_)\n if not hasattr(datagram,'keys'):\n if DEBUG: print \"unknown UDP message:\\n\", datagram\n pdb.set_trace()\n return\n if 'loop_started' in datagram.keys():\n return\n if 'shotnumber_started' in datagram.keys():\n #dc.get('_exp_sync').shotnumber = datagram['shotnumber_started']\n #return\n self.server.pxi_time = float(datagram['time'])\n self.server.pxi_time_server_time = float(datagram['time']) - float(time.time())#Make this so that it synchronizes the clocks CP\n\n msg = {\"data_context\": 'PXI',\n \"shotnumber\":datagram['shotnumber_started']}\n msg = simplejson.dumps(msg, ensure_ascii = False).encode('utf8')\n self.server.broadcast(msg) \n if DEBUG: print datagram\n \n self.server.active_parser_ip = datagram['server_ip_in_charge']#Make this so that it synchronizes the clocks CP\n self.server.active_parser_port = datagram['server_port_in_charge']#Make this so that it synchronizes the clocks CP\n dc = self.server.command_library.__determineContext__({'data_context':'PXI'}) \n if not dc.dict.has_key('_exp_sync'):\n exp_sync = sync.Experiment_Sync_Group(self.server, dc.name)\n dc.update({'_exp_sync':exp_sync})\n dc.get('_exp_sync').shotnumber = int(datagram['shotnumber_started'])\n print \"Shot started:\", datagram['shotnumber_started'], \"pxi_time:\", self.server.pxi_time, \"time.time():\", float(time.time())\n return\n \n \n if 'fake_shotnumber_started' in datagram.keys():\n if self.server.ip == '10.1.1.124':\n return\n print datagram\n msg = {\"data_context\": datagram['data_context'],\n \"shotnumber\":datagram['fake_shotnumber_started']}\n msg = simplejson.dumps(msg, ensure_ascii = False).encode('utf8')\n self.server.broadcast(msg) \n dc = self.server.command_library.__determineContext__(datagram) \n if not dc.dict.has_key('_exp_sync'):\n exp_sync = sync.Experiment_Sync_Group(self.server, dc.name)\n dc.update({'_exp_sync':exp_sync})\n dc.get('_exp_sync').shotnumber = int(datagram['fake_shotnumber_started'])\n if DEBUG: print \"Fake Shot started:\", datagram['fake_shotnumber_started'], \"pxi_time:\", datagram['time'], \"time.time():\", float(time.time())\n dc.update({'Test_instrument':glab_instrument.Glab_Instrument(params={'server':self.server,'create_example_pollcallback':True})})\n return\n \n try:\n datagram[\"server_ping\"] \n except KeyError:\n if DEBUG: print \"unknown UDP message:\\n\", datagram\n return\n ping_command = commands.ServerCommand(self.server, self.server.catch_ping, datagram)\n self.server.command_queue.add(ping_command)", "def listen(self):\n while not self.stop.is_set():\n rlist, _, _ = select([self.udpsock], [], [], LISTEN_TIMEOUT)\n if self.udpsock in rlist:\n try:\n data, addr = self.udpsock.recvfrom(4096)\n if should_respond(data.decode(\"ascii\")):\n response_str = self.response.format(\n strftime(\"%a, %d %b %Y %H:%M:%S GMT\", gmtime())\n )\n self.udpsock.sendto(\n bytearray(response_str, \"ascii\"),\n addr\n )\n except Exception as e:\n logging.error(e)\n self._shutdown()", "def run(self, match_payloads=False):\n\n self.responses.clear_responses()\n\n identifier = self.seed_id\n sequence = 1\n for payload in self.provider:\n payload_bytes_sent = self.send_ping(identifier, sequence, payload)\n\n if not match_payloads:\n self.responses.append(self.listen_for(identifier, self.timeout))\n else:\n self.responses.append(self.listen_for(identifier, self.timeout, payload_bytes_sent))\n\n sequence = self.increase_seq(sequence)", "def __call__(self):\n hub.sleep(random.randint(1, self.interval))\n while True:\n self.send_req()\n self.reply_pending = True\n hub.sleep(self.interval)\n if self.reply_pending:\n self.no_response()", "def sendData(packet: FrameStruct, repeats: int) -> NoReturn:\n ftype = b'\\x08\\x00'\n dur = b'\\x00\\x00'\n # random hex stream, could be used as additional space of bits\n src = b'\\x08\\x00\\x27\\x8e\\x75\\x44'\n # broadcast address is used to stop certain drivers retransmitting frames\n dst = b'\\xff\\xff\\xff\\xff\\xff\\xff'\n bssid = src\n # semi unique id, annoyingly not usable due to lack of bits for this appli\n sn = (random.randint(0, 4096))\n sn = sn << 4\n seq = sn.to_bytes(4, 'little')\n\n # generate 80211 header\n header80211 = ftype + dur + dst + src + bssid + seq\n\n # combine header with other data to create valid frame\n data = globaldat.RADIO_TAP + header80211 + b\"\\x72\\x6f\\x62\\x6f\\x74\" + \\\n packet # attach radiotap headers, 80211 headers and yodel payload\n #globaldat.bytesPrint(data)\n #print(repeats)\n for i in range(repeats): # re-transmmit message a couple times\n globaldat.yodelSocket.send(data) # send the data", "def _send_packet(self, pkt, port):\n self._open_socket()\n\n for attempt in range(self.retries):\n if attempt and pkt.code == PacketCode.ACCOUNTING_REQUEST:\n if \"Acct-Delay-Time\" in pkt:\n pkt[\"Acct-Delay-Time\"] = \\\n pkt[\"Acct-Delay-Time\"][0] + self.timeout\n else:\n pkt[\"Acct-Delay-Time\"] = self.timeout\n\n now = time.time()\n waitto = now + self.timeout\n\n self._socket.sendto(pkt.create_raw_request(), (self.server, port))\n\n while now < waitto:\n ready = self._poll.poll((waitto - now) * 1000)\n\n if ready:\n rawreply = self._socket.recv(4096)\n else:\n now = time.time()\n continue\n\n try:\n reply = pkt.create_reply(packet=rawreply)\n if pkt.verify_reply(reply, rawreply):\n return reply\n except PacketCode.PacketError:\n pass\n\n now = time.time()\n\n raise Timeout", "def receive(self, packet, time):\n raise NotImplementedError", "def _send(self,msg):\n attempts = 3\n while attempts > 0:\n self.sock.sendto(msg, self.ip_port)\n ready = select.select([self.sock], [], [], self.timeout)\n if ready[0]:\n data, ip_port = self.sock.recvfrom(60)\n if ip_port != self.ip_port: continue\n return decode(data)\n attempts -= 1\n print(\"Retrying send\")\n return None", "def listenRtp(self):\r\n\t\twhile True:\r\n\t\t\tstartTime = time()\r\n\t\t\tdata, address = self.rtpSocket_client.recvfrom(16384)\r\n\t\t\tendTime = time()\r\n\r\n\t\t\tif (data):\r\n\t\t\t\tself.recvRtpPacket.decode(data)\r\n\t\t\t\tself.cacheFile = self.writeFrame(self.recvRtpPacket.getPayload())\r\n\t\t\t\tself.updateMovie(self.cacheFile)\r\n\r\n\t\t\t\tcurrentFrameNbr = self.recvRtpPacket.seqNum()\r\n\t\t\t\tcurrent = self.totalTime - 0.05 * currentFrameNbr\r\n\t\t\t\tcurrMin = current / 60\r\n\t\t\t\tcurrSec = current % 60\r\n\t\t\t\t\r\n\t\t\t\tself.progress['value'] = 0.05 * currentFrameNbr\r\n\r\n\t\t\t\tif currMin < 10:\r\n\t\t\t\t\tself.time.configure(text=\"Time Left: 0%d:%d\" % (currMin, currSec), width=12, heigh=2)\r\n\t\t\t\t\tif currSec < 10:\r\n\t\t\t\t\t\tself.time.configure(text=\"Time Left: 0%d:0%d\" % (currMin, currSec), width=12, heigh=2)\r\n\r\n\t\t\t\telse:\r\n\t\t\t\t\tself.time.configure(text=\"Time Left: %d:%d\" % (currMin, currSec), width=12, heigh=2)\r\n\t\t\t\t\tif currSec < 10:\r\n\t\t\t\t\t\tself.time.configure(text=\"Time Left: %d:0%d\" % (currMin, currSec), width=12, heigh=2)\r\n\r\n\t\t\t\tself.networkStat.receivedPacketCount += 1\r\n\t\t\t\tself.networkStat.totalADR += (sys.getsizeof(data) / (endTime - startTime))\r\n\t\t\t\r\n\t\t\telse:\r\n\t\t\t\tcontinue", "def _schedule_send_in_order(self, rudp_packet, timeout):\n final_packet = self._finalize_packet(rudp_packet)\n seqnum = rudp_packet.sequence_number\n timeout_cb = REACTOR.callLater(0, self._do_send_packet, seqnum)\n self._sending_window[seqnum] = self.ScheduledPacket(\n final_packet,\n timeout,\n timeout_cb,\n 0\n )", "def _on_message(self, packet: Packet, channel_id: str):\n live_run = self.get_live_run()\n # TODO(#102) this method currently assumes that the packet's subject_id will\n # always be a valid agent in our list of agent_infos. This isn't always the case\n # when relaunching with the same URLs.\n with PACKET_PROCESSING_LATENCY.labels(packet_type=packet.type).time():\n if packet.type == PACKET_TYPE_SUBMIT_ONBOARDING:\n self._on_submit_onboarding(packet, channel_id)\n elif packet.type == PACKET_TYPE_SUBMIT_UNIT:\n self._on_submit_unit(packet, channel_id)\n self.log_metrics_for_packet(packet)\n self.last_submission_time = time.time()\n elif packet.type == PACKET_TYPE_SUBMIT_METADATA:\n self._on_submit_metadata(packet)\n elif packet.type == PACKET_TYPE_MEPHISTO_BOUND_LIVE_UPDATE:\n update_id = packet.data.get(\"update_id\")\n if update_id is not None and update_id in self.seen_update_ids:\n return # Processing duplicated packet\n self._on_live_update(packet, channel_id)\n self.log_metrics_for_packet(packet)\n self.seen_update_ids.add(update_id)\n elif packet.type == PACKET_TYPE_REGISTER_AGENT:\n self._register_agent(packet, channel_id)\n elif packet.type == PACKET_TYPE_RETURN_STATUSES:\n # Record this status response\n live_run.worker_pool.handle_updated_agent_status(packet.data)\n self.log_metrics_for_packet(packet)\n elif packet.type == PACKET_TYPE_ERROR:\n self._log_frontend_error(packet)\n self.log_metrics_for_packet(packet)\n else:\n # PACKET_TYPE_REQUEST_STATUSES, PACKET_TYPE_ALIVE,\n # PACKET_TYPE_CLIENT_BOUND_LIVE_UPDATE, PACKET_TYPE_AGENT_DETAILS\n raise Exception(f\"Unexpected packet type {packet.type}\")", "def receive_one_ping(mySocket, myID, timeout):\n timeLeft = timeout/1000\n\n while True: # Loop enquanto aguarda o pacote ou o timeout \n startedSelect = default_timer() # Essa função retorna o tempo de espera junto com o tempo da CPU e depende da plataforma. \n\n whatReady = select.select([mySocket], [], [], timeLeft)\n howLongInSelect = (default_timer() - startedSelect)\n if whatReady[0] == []: # timeout\n return None, 0, 0, 0, 0\n\n timeReceived = default_timer() # Essa função retorna o tempo de espera junto com o tempo da CPU e depende da plataforma. \n\n \"\"\"\n Receba dados do soquete. O valor de retorno é um par (bytes, endereço) em que bytes é um objeto de bytes que representa\n os dados recebidos e endereço é o endereço do soquete que envia os dados \n \"\"\"\n recPacket, addr = mySocket.recvfrom(ICMP_MAX_RECV) \n\n \"\"\"\n struct.unpack( fmt , string ) \n Descompacte a sequência (presumivelmente empacotada por ) de acordo com o formato fornecido. \n O resultado é uma tupla, mesmo que contenha exatamente um item.\n \"\"\"\n ipHeader = recPacket[:20]\n iphVersion, iphTypeOfSvc, iphLength, \\\n iphID, iphFlags, iphTTL, iphProtocol, \\\n iphChecksum, iphSrcIP, iphDestIP = struct.unpack(\n \"!BBHHHBBHII\", ipHeader\n )\n\n icmpHeader = recPacket[20:28]\n icmpType, icmpCode, icmpChecksum, \\\n icmpPacketID, icmpSeqNumber = struct.unpack(\n \"!BBHHH\", icmpHeader\n )\n\n if icmpPacketID == myID:\n dataSize = len(recPacket) - 28\n # retorna o tempo de resposta, o tamanho dado, o ping pingado, o numero de seq, id e timeout \n return timeReceived, (dataSize+8), iphSrcIP, icmpSeqNumber, iphTTL\n\n timeLeft = timeLeft - howLongInSelect\n if timeLeft <= 0:\n return None, 0, 0, 0, 0 # retorna nada ", "def open_service_loop(self):\n\t\n\tprint \"Attempting to receive file\", self.file_read, \"from\", self.ip, \"at port\", self.port, \".\" \n\trecv_data = None\n\tnum_retransmits = 0\n\t#Start timer, retransmit after each timeout of one second. If receive response within the timer, move on to next step. \n\t#Limit number of retransmits to 60 so as not to enter infinite loop.\n\twhile(num_retransmits < 60):\n\t num_retransmits += 1\n\t self.send_open_request()\n\n\t input_socket = [self.client_socket]\n\t inputready,outputready,exceptready = select.select(input_socket,[],[], 1)\n\t #if timer expires without input becoming ready, empty list is returned. So go to next iteration of loop (retransmit)\n\t if (inputready == []):\n\t\tcontinue\n\t else:\n\t\ttry:\n\t\t recv_data = self.client_socket.recv(self.buffer_)\n\t\texcept Exception as exception_:\n\t\t print(\"Wrong port number or IP address provided, or server is not available at the moment.\")\n\t\t sys.exit()\n\t\tprint(\"Received a packet.\")\n\t\t\n\t\t#Generate a random number between 0 and 1 with uniform distribution to simulate packet loss.\n\t\tif (random.uniform(0,1) < self.p):\n\t\t recv_data = None\n\t\t print(\"Packet dropped randomly to simulate packet losses\")\n\t\t continue\n\t\t\n\t\tbit_signature = recv_data[0:4]\n\t\tresponse_type = recv_data[4:8]\n\t\trecv_payload = recv_data[8:]\n\n\t\t#Check that bit signature is valid (packet is from our network)\n\t\tif bit_signature != \"\\x00\\x00\\x00\\r\": \n\t\t recv_invalid_response(recv_data, \"bit_signature\")\n\t\t continue\n\t\telse:\n\t\t #We have only ever sent a open_request, so the only viable response at this point is an open_response. \n\t\t #If this field contains anything else, it is an invalid packet. Retransmit request.\n\t\t if response_type != \"\\x00\\x00\\x00\\x08\": \n\t\t\tself.recv_invalid_response(recv_data, \"response_type\")\n\t\t\tcontinue\t\t\n\t\t else:\n\t\t\t#Bit signature and response type fields are both valid.\n\t\t\tprint(\"Received open response from server...\")\n\t\t\tself.recv_open_response(recv_payload)\n\t\t\tbreak\n\t\n\tif (num_retransmits >= 60):\n\t print (\"Exceeded number of retransmissions allowed. Exiting program.\")\n\t sys.exit()\t\n\treturn", "def loop_udp(imu, poll_interval, fields):\n config = configparser.ConfigParser()\n config.read('config.ini')\n host = str(config['CLIENTUDP']['HOST'])\n port = int(config['CLIENTUDP']['PORT'])\n\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\n printCounter = 0\n while True:\n\n if imu.IMURead():\n data = imu.getIMUData()\n selected_data = [data.get(key) for key in fields]\n sock.sendto(str(selected_data), (host, port))\n\n if printCounter % 100 == 0:\n print selected_data\n\n time.sleep(poll_interval*1.0/1000.0)\n printCounter += 1", "def run(self): \n \n while self.event.is_set():\n \n if self.isEnabled:\n \n try:\n \n # Get and clear all the messages from the listener \n buffered_messages = self.a_listener.get_messages() \n self.a_listener.clear_messages()\n \n #grabs the unique ID's \n unique_ids = list({m.arbitration_id for m in buffered_messages}) \n \n #iterates through the ID's and gets the first instance of each unique one\n for i in unique_ids:\n loop_msg = next( obj for obj in buffered_messages if obj.arbitration_id == i)\n date_str = loop_msg.timestamp\n if date_str not in self.unique_messages:\n self.unique_messages[date_str] = []\n \n # store the unique messages in a dictionary with timestamp index\n self.unique_messages[date_str].append(loop_msg) \n \n except:\n print(\"Unexpected error:\", sys.exc_info()[0])\n self.ReEstablishConnection()\n \n time.sleep(self.timer)", "def poll_data(self):\n with s.socket(s.AF_INET, s.SOCK_DGRAM) as sock:\n sock.bind(('', self.__port))\n while True:\n message, address = sock.recvfrom(1024)\n self.__address = address\n logging.debug('Received: {}'.format(message))\n self.process_data(message)", "def directive_send(arb_id, payload, response_handler):\n arb_id = \"0x\" + arb_id\n send_msg = payload_to_str_base(payload)\n with CanActions(int_from_str_base(arb_id)) as can_wrap:\n # Send the message on the CAN bus and register a callback\n # handler for incoming messages\n can_wrap.send_single_message_with_callback(list_int_from_str_base(send_msg), response_handler)\n # Letting callback handler be active for CALLBACK_HANDLER_DURATION seconds\n sleep(CALLBACK_HANDLER_DURATION)\n # can_wrap.clear_listeners()", "def run(self):\r\n waiting_packet = None\r\n while True:\r\n if waiting_packet is not None:\r\n packet = waiting_packet\r\n waiting_packet = None\r\n else:\r\n packet = yield self.buffer.get()\r\n self.channel.add_sender(self)\r\n yield self.env.timeout(packet.size/self.service_rate)\r\n self.channel.remove_sender(self)\r\n packet.output_timestamp= env.now\r\n if self.destination is None:\r\n self.packet_list.append(packet)\r\n if (not self.collision):\r\n if self.destination is not None:\r\n self.destination.put(packet)\r\n self.channel.packet_list.append(packet)\r\n else:\r\n if self.debug:\r\n print(\"Packet %d is discarded. Reason: Collision\" \r\n % (packet.id))\r\n self.packets_drop += 1\r\n waiting_packet = packet\r\n self.collision = False\r\n yield self.env.timeout(self.random_delay())", "def receive_one_ping(my_socket, id, timeout):\n time_left = timeout\n while True:\n started_select = time.time()\n what_ready = select.select([my_socket], [], [], time_left)\n how_long_in_select = (time.time() - started_select)\n if what_ready[0] == []: # Timeout\n return\n\n time_received = time.time()\n rec_packet, addr = my_socket.recvfrom(1024)\n icmp_header = rec_packet[20:28]\n icmp_type, code, checksum, packet_id, sequence = struct.unpack(\n \"bbHHh\", icmp_header\n )\n # Filters out the echo request itself. \n # This can be tested by pinging 127.0.0.1 \n # You'll see your own request\n if icmp_type != 8 and packet_id == id:\n bytes_in_double = struct.calcsize(\"d\")\n time_sent = struct.unpack(\"d\", rec_packet[28:28 + bytes_in_double])[0]\n return time_received - time_sent\n\n time_left = time_left - how_long_in_select\n if time_left <= 0:\n return", "async def packets(self):\n\n async def registrator_task(sock):\n while True:\n try:\n await self._send(sock, \"reglistener\")\n _LOGGER.info(\n \"Registered self as listener for device at %s\",\n self._address,\n )\n except OSError: # e.g. Network is unreachable\n # just retry\n _LOGGER.warning(\"Could not send registration packet\")\n pass\n await asyncio.sleep(REGISTRATION_INTERVAL.seconds)\n\n with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.bind((\"\", COMMAND_PORT))\n sock.setblocking(0)\n loop = asyncio.get_event_loop()\n loop.create_task(registrator_task(sock))\n while True:\n try:\n response, address = await sock_recvfrom(sock, 1024)\n _LOGGER.debug(\"Got packet from %s\", address)\n if address == self._address:\n yield response.decode(\"ascii\")\n else:\n _LOGGER.warning(\n \"Got unknown response from %s: %s\",\n address,\n response,\n )\n except OSError as e:\n _LOGGER.warning(\"Could not receive from socket: %s\", e)", "def IRC_send_called_every_three_seconds(self):\n\n if (self.ircMessageBuffer):\n try:\n # print(\"Buffered\")\n stringToSend = str(self.ircMessageBuffer.popleft())\n print(\"string to send : \" + stringToSend)\n if self.ircSocket:\n self.ircSocket.send((stringToSend).encode('utf8'))\n except Exception as e:\n logging.error(\"IRC send error:\")\n logging.error(\"In IRCSendCalledEveryThreeSeconds\")\n logging.error(str(e))\n logging.exception(\"Exception : \")", "def ListenToMessage(self, expectedTone, interrupt=1, duration=0, promptDuration=25):\n self.CheckMessage(expectedTone, interrupt, duration, promptDuration)\n self.getMailBoxDN().MessageRetrieved()\n for owner in self.getMailBoxDN().owners:\n owner.CheckMWI()", "def sendAndReceive(self, request):\n count = 0\n while count < 100: # 5 seconds\n try:\n count += 1\n self.sock.sendto(request, self.server_addr)\n reply, _ = self.sock.recvfrom(1024)\n return reply\n except:\n pass", "def REP_watcher(self):\n while True:\n sleep(self.patience) # how often to check\n try:\n recent_REQ_sent_time = self.REQ_sent_time.popleft()\n # if we got here; we have a recent_REQ_sent_time\n sleep(1.0) # allow time for receipt of a REP\n try:\n recent_REP_recd_time = self.REP_recd_time.popleft()\n except IndexError: # there was a REQ, but no REP was received\n self.fix_comm_link()\n # if we got here; we have a recent_REP_recd_time\n interval = recent_REP_recd_time - recent_REQ_sent_time\n if interval.total_seconds() <= 0.0:\n # recent_REP_recd_time is not later than recent_REQ_sent_time\n self.fix_comm_link()\n except IndexError: # there wasn't a time in REQ_sent_time\n # so there is no REP expected,\n # ... so continue to loop until there is a time in REQ_sent_time\n pass", "def receive_ping(my_socket, ID, timeout):\n start_time = timeout\n while True:\n start_select = time.clock()\n # select.select(rlist, wlist, xlist[, timeout])\n # wait until ready for read / write / exceptional condition\n # The return value is a triple of lists\n what_ready = select.select([my_socket], [], [], start_time)\n how_long = (time.clock() - start_select)\n if what_ready[0] == []: #timeout\n return\n\n time_received = time.clock()\n # socket.recvfrom(bufsize[, flags])\n # The return value is a pair (string, address)\n rec_packet, addr = my_socket.recvfrom(1024)\n icmp_header = rec_packet[20 : 28]\n ip_type, code, checksum, packet_ID, sequence = struct.unpack(\"bbHHh\", icmp_header)\n if ip_type != 8 and packet_ID == ID: # ip_type should be 0\n byte_in_double = struct.calcsize(\"d\")\n time_sent = struct.unpack(\"d\", rec_packet[28 : 28 + byte_in_double])[0]\n return time_received - time_sent\n\n start_time = start_time - how_long\n if start_time <= 0:\n return", "def _do_send_packet(self, seqnum):\n sch_packet = self._sending_window[seqnum]\n if sch_packet.retries >= constants.MAX_RETRANSMISSIONS:\n self.shutdown()\n else:\n self._proto.send_datagram(sch_packet.rudp_packet, self.relay_addr)\n sch_packet.timeout_cb = REACTOR.callLater(\n sch_packet.timeout,\n self._do_send_packet,\n seqnum\n )\n sch_packet.retries += 1\n self._cancel_ack_timeout()", "def voip(self):\n logging.debug(\n \">>> VoIP started for '%s'\" % self.session_key\n )\n while True:\n try:\n talk_data = self.talk_queue.get(True)\n except Queue.Empty:\n pass\n else:\n if not self.participants:\n logging.debug(\n \">>> Data present, but nobody is listening: '%s'\" % talk_data.data\n )\n for conn in self.participants:\n if conn != talk_data.address:\n logging.debug(\">>> Sending data '%s' from %s to %s\" % (talk_data.data, talk_data.address, conn))\n try:\n self.udp_server.sendto(talk_data.data, conn)\n except TypeError:\n logging.exception(\">>> Invalid data?: '%s'\" % talk_data.data)\n logging.debug(\n \">>> VoIP ended for '%s'\" % self.session_key\n )", "def tcp_server_thread(id, tcpPort):\n global server\n \n # TCP Connection\n conn,adrr = server.accept()\n received_time_stamp = struct.unpack('!f', conn.recv(4096))[0]\n \n exchange = daemon_thread_builder(CalcDelay, args=(received_time_stamp, id,tcpPort ))\n exchange.start()\n exchange.join()\n\n pass", "def REP_watcher():\n global REQ_sent_time, REP_recd_time, pid, patience_seconds\n while True:\n time.sleep(patience_seconds) # how often to check\n try:\n recent_REQ_sent_time = REQ_sent_time.popleft()\n # if we got here; we have a recent_REQ_sent_time\n time.sleep(patience_seconds) # allow time for receipt of the REP\n try:\n recent_REP_recd_time = REP_recd_time.popleft()\n # if we got here; we have a recent_REP_recd_time\n interval = recent_REP_recd_time - recent_REQ_sent_time\n if interval.total_seconds() <= 0.0:\n # recent_REP_recd_time is not later than recent_REQ_sent_time\n print('After image send in REP_watcher test,')\n print('No REP received within', patience_seconds, 'seconds.')\n print('Ending sending program.')\n os.kill(pid, signal.SIGTERM)\n pass\n continue # Got REP after REQ so continue to next REQ\n except IndexError: # there was a REQ, but no timely REP\n print('After image send in REP_watcher test,')\n print('No REP received within', patience_seconds, 'seconds.')\n print('Ending sending program.')\n os.kill(pid, signal.SIGTERM)\n pass\n except IndexError: # there wasn't a time in REQ_sent_time\n # so there is no REP expected,\n # ... continue to loop until there is a time in REQ_sent_time\n pass" ]
[ "0.60647124", "0.55641264", "0.5346171", "0.52303034", "0.5217701", "0.51774013", "0.51698893", "0.5100064", "0.5037555", "0.4994777", "0.49564165", "0.49480218", "0.49438927", "0.49380833", "0.49273694", "0.49032032", "0.4898641", "0.48416683", "0.48128486", "0.47973782", "0.4784589", "0.47640234", "0.4748435", "0.47392052", "0.47339705", "0.4724216", "0.47240233", "0.47172308", "0.47036228", "0.46674287" ]
0.7383639
0
Derive the file name of a modelspecific config file
def modelconfigfile(modelfile): return os.path.splitext(modelfile)[0] + '.vars'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_model_filename(config):\n base = os.path.splitext(config['corpus'])[0]\n return '%s--%dT.model' % (base, config['T'])", "def _setupFilename(self):\n try:\n os.mkdir('./.netModel')\n except:\n pass # hope it's already there...\n filenames = os.listdir('./.netModel')\n configNum = 1\n i = 0\n configNumString = '%(c)04d' % {'c':configNum}\n while i < len(filenames):\n configNumString = '%(c)04d' % {'c':configNum}\n if (filenames[i][:4]==configNumString):\n configNum += 1\n i = 0\n else:\n i += 1\n return os.path.realpath('.')+'/.netModel/'+configNumString", "def configFilename(self):\n return self.name()+'.py'", "def filename(self):\n return self.config.get('filename', self.id) + f'_{self.file_suffix}'", "def get_config_file_name(self):\n argv = sys.argv\n config_type = \"dev\" # default configuration type\n if None != argv and len(argv) > 1 :\n config_type = argv[1]\n config_file = config_type + \".cfg\"\n logger.info(\"get_config_file_name() return : \" + config_file)\n return config_file", "def get_config_filepath(config: configs.Config) -> str:\n return os.path.join(config.model_training.dir_out, configs.DEFAULT_FILENAME_CONFIG)", "def filename():\n\tglobal _cfgfn\n\treturn _cfgfn", "def config_file_name(self):\n return self._config_file_name", "def getGenericConfigFileName(self):\n executePkgDir = lsst.utils.getPackageDir('ctrl_execute')\n\n name = \"config_with_%s.py.template\" % self.setup_using\n genericConfigName = os.path.join(executePkgDir,\n \"etc\", \"templates\", self.manager, name)\n if os.path.exists(genericConfigName):\n return genericConfigName\n raise RuntimeError(\"File %s not found; check etc/templates.\" %\n genericConfigName)", "def _make_config_file_name(environment, out=False):\n return os.path.join(PH_HOME_DIR, \"etc/config\", \"%s.conf\" % environment) if out else \\\n os.path.join(PH_HOME_DIR, \"config\", \"%s.conf.in\" % environment)", "def get_model_name(file_path_model):\n\n tmp = parse_file_path(file_path_model)[1]\n model_name = tmp[:len(tmp) - len('.h5')]\n\n return model_name", "def get_model_filepath(config: configs.Config) -> str:\n return os.path.join(config.model_training.dir_out, models.DEFAULT_FILENAME_MODEL)", "def GetModelName(filename, model):\n\n is_srn_model = translator.IsSrnModel(model)\n if(is_srn_model):\n model_name = filename + \"SrnModel\"\n else:\n model_name = filename + \"CellCycleModel\"\n\n return model_name", "def getConfigFileName(self):\n return self._configFileName", "def get_conf_filename (self, directory):\n return os.path.join(directory, \"_%s_configdata.py\" % self.get_name())", "def filename(self):\n return f'{self._peer.interface}.conf'", "def filename(self):\n # Just the name of the file\n filename = self.use_name\n if self.extension:\n filename = \"{0}.{1}\".format(self.use_name, self.extension)\n # Architecture sub-folder\n arch_folder_conf = spack.config.get(\"modules:%s:arch_folder\" % self.conf.name, True)\n if arch_folder_conf:\n # include an arch specific folder between root and filename\n arch_folder = str(self.spec.architecture)\n filename = os.path.join(arch_folder, filename)\n # Return the absolute path\n return os.path.join(self.dirname(), filename)", "def extractFileName(fileType, modelName, modelVersion, modelState):\n fileName = '{}_{}_{}'.format(modelName, modelVersion, fileType) if modelState == 'national' else '{}_{}_{}_{}'.format(modelName, modelVersion, modelState, fileType)\n return fileName", "def _get_file_name(name: types.TSeedName) -> str:\n return f\"{name}.yml\"", "def _getConfigName(self):\n pass", "def _get_config_fname():\n directory = _get_vispy_app_dir()\n if directory is None:\n return None\n fname = op.join(directory, 'vispy.json')\n if os.environ.get('_VISPY_CONFIG_TESTING', None) is not None:\n fname = op.join(_TempDir(), 'vispy.json')\n return fname", "def _get_filename():\n dirname = os.path.dirname(__file__)\n return os.path.join(dirname, 'occulttraining.txt')", "def id(self):\n if settings.env_root:\n retpath = self.filename[len(settings.cases_dir):]\\\n .lstrip(os.path.sep)\n base = os.path.splitext(retpath)[0]\n else:\n base = os.path.splitext(os.path.basename(self.filename))[0]\n return base.replace(os.path.sep, '.')", "def get_model_name_from_raw_file(yaml_file: str) -> str:\n pattern = re.compile(r'^model:\\s*(?P<model>\\w+)')\n entries = find_all_entries(\n yaml_file=yaml_file, pattern=pattern, pattern_keyword='model')\n\n if not entries:\n logging.error(f\"Unable to find the model name in {yaml_file}\")\n entries.append('')\n\n return entries[0]", "def generate_name(config):\n\n name = basename(config.name)\n if config.prepro is not None:\n name += \"_\" + config.prepro\n if config.extract_pos:\n name += \"_pos\"\n return name", "def model_path(model_file):\n return os.path.join(MODEL, model_file)", "def _get_config_filename():\n return 'pylidc.conf' if sys.platform.startswith('win') else '.pylidcrc'", "def _get_image_name(self) -> str:\n dirname = os.path.basename(os.getcwd())\n default_image_name = f\"{dirname}_{self.config_name}\"\n image_name = self.config_options.get(\"image\", default_image_name)\n return image_name", "def config_identifier(converter, model_name):\n return model_name.lower().replace('-', '_') + '_' + converter", "def file_name(self) -> str:\n if self.service == \"all\":\n service_string = \"\"\n else:\n service_string = f\"_{self.service.lower().strip()}\"\n\n if self.no_params:\n return f\"no_params{service_string}.tf\"\n elif self.params_optional:\n return f\"params_optional{service_string}.tf\"\n elif self.params_required:\n return f\"params_required{service_string}.tf\"" ]
[ "0.7968517", "0.75344723", "0.73829776", "0.73329455", "0.7190793", "0.709332", "0.70396525", "0.7021086", "0.7006523", "0.69161147", "0.68957627", "0.6892056", "0.6885437", "0.6775761", "0.6737451", "0.67183787", "0.6651527", "0.65063626", "0.6502298", "0.648453", "0.64746475", "0.64541936", "0.6453348", "0.6433681", "0.64267415", "0.6393122", "0.6392025", "0.638892", "0.63497925", "0.6347392" ]
0.7901898
1
Apply postprocessing `methods` to `preds` of shape (items, classes, time).
def postprocess(preds, methods): for method in methods: if method == 'sigmoid': preds = torch.as_tensor(preds).sigmoid().numpy() else: raise ValueError("Unknown postprocessing method %s" % method) return preds
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process(self, method):\n process_dicts = []\n for d in self.data_dicts:\n dd = copy.deepcopy(d)\n for ap in self.aps:\n dd[ap] = method(d[ap])\n process_dicts.append(dict2str(dd))\n\n # print(process_dicts)\n # print(type(process_dicts[0]))\n return Dataset(process_dicts)", "def apply_method_to_multiple_sinograms(data, method, para, ncore=None,\n prefer=\"threads\"):\n if ncore is None:\n ncore = np.clip(mp.cpu_count() - 1, 1, None)\n else:\n ncore = np.clip(ncore, 1, None)\n if not isinstance(para, list):\n para = tuple(list([para]))\n else:\n para = tuple(para)\n (depth, height, width) = data.shape\n if method in dir(remo):\n method_used = getattr(remo, method)\n elif method in dir(filt):\n method_used = getattr(filt, method)\n elif method in dir(rec):\n method_used = getattr(rec, method)\n else:\n raise ValueError(\"Can't find the method: '{}' in the namespace\"\n \"\".format(method))\n data_out = Parallel(n_jobs=ncore, prefer=prefer)(\n delayed(method_used)(data[:, i, :], *para) for i in range(height))\n data_out = np.moveaxis(np.asarray(data_out), 0, 1)\n return data_out", "def postprocessing(self, postprocessing):\n\n self._postprocessing = postprocessing", "def _post_transform(self):\n # Reclassify strategy post __init__, if needed.\n for (reclassifier, args, kwargs) in self._reclassifiers:\n self.classifier = reclassifier(self.classifier, *args, **kwargs)", "def apply_preprocessing(\n data: List[np.ndarray],\n sample_rate: Union[int, List[int]],\n funcs: List[str],\n *,\n kwargs: dict = {},\n) -> np.ndarray:\n if isinstance(sample_rate, int):\n sample_rate = [sample_rate] * len(data)\n try:\n assert len(data) == len(sample_rate)\n except AssertionError:\n raise ValueError(\n \"The number of sample rates must be the same as the number of data\"\n )\n for func in funcs:\n if hasattr(preprocessing, func):\n preproc: Callable = getattr(preprocessing, func, **kwargs)\n data = p_map(preproc, data, sample_rate,\n desc=f\"Applying {func}...\")\n else:\n raise ValueError(f\"{func} is not a valid preprocessing function\")\n return data", "def _create_methods(self):\n\n logger.debug('call %s presets._create_methods()', self._device.name)\n for preset_type in self._paths.keys():\n add, add_here = self._make_add(preset_type)\n self._register_method(self, 'add_' + preset_type, add)\n self._register_method(self, 'add_here_' + preset_type, add_here)\n for preset_type, data in self._cache.items():\n for name, info in data.items():\n if info['active']:\n mv, umv = self._make_mv_pre(preset_type, name)\n wm = self._make_wm_pre(preset_type, name)\n self._register_method(self._device, 'mv_' + name, mv)\n self._register_method(self._device, 'umv_' + name, umv)\n self._register_method(self._device, 'wm_' + name, wm)\n setattr(self.positions, name,\n PresetPosition(self, preset_type, name))", "def preprocess(self, instances, stats=None, **kwargs):\n pass", "def post_processor(self):", "def _post_process(self, preds) -> List[Dict]:\n if isinstance(preds, tuple):\n dets = preds[0]\n segms = preds[1]\n else:\n dets = preds\n segms = [[]] * len(dets)\n\n classes = self.model.CLASSES\n if isinstance(classes, str):\n classes = (classes, )\n\n assert len(dets) == len(classes)\n assert len(segms) == len(classes)\n\n objects = []\n\n for i, (label, bboxes, masks) in enumerate(zip(classes, dets, segms)):\n\n for bbox, mask in zip_longest(bboxes, masks):\n if bbox[4] < self.bbox_thr:\n continue\n obj = {\n 'class_id': i,\n 'label': label,\n 'bbox': bbox,\n 'mask': mask,\n 'det_model_cfg': self.model.cfg\n }\n objects.append(obj)\n\n return objects", "def post_process_wrapper(cls: Type[T]) -> Type[T]:\n\n class _Wrapper(cls):\n def __init__(self, table, *additional_tables, **kwargs):\n postprocessors = kwargs.pop(\"postprocessors\", dict())\n if not hasattr(postprocessors, \"get\"):\n postprocessors = {0: postprocessors}\n for key, value in list(postprocessors.items()):\n value = tuple(\n alias_factory_subclass_from_arg(PostProcessor, postprocessor)\n for postprocessor in value\n )\n postprocessors[key] = value\n self.postprocessors = postprocessors\n postprocess_axis = kwargs.pop(\"postprocess_axis\", -1)\n if not hasattr(postprocess_axis, \"__len__\"):\n postprocess_axis = (postprocess_axis,)\n if not hasattr(postprocess_axis, \"get\"):\n post_dict = dict()\n for key in postprocessors:\n post_dict[key] = postprocess_axis\n postprocess_axis = post_dict\n self.postprocess_axis = postprocess_axis\n super(_Wrapper, self).__init__(table, *additional_tables, **kwargs)\n\n def batch_generator(self, repeat=False):\n subsamples = self.num_sub != 1\n for batch in super(_Wrapper, self).batch_generator(repeat=repeat):\n if subsamples:\n cur_batch = []\n for sub_batch_idx, sub_batch in enumerate(batch):\n for postprocessor, axis in zip(\n self.postprocessors.get(sub_batch_idx, tuple()),\n cycle(self.postprocess_axis.get(sub_batch_idx, tuple())),\n ):\n sub_batch = postprocessor.apply(\n sub_batch, axis=axis, in_place=True\n )\n cur_batch.append(sub_batch)\n yield tuple(cur_batch)\n else:\n for postprocessor, axis in zip(\n self.postprocessors[0], cycle(self.postprocess_axis[0])\n ):\n batch = postprocessor.apply(batch, axis=axis, in_place=True)\n yield batch\n\n _Wrapper.__doc__ = cls.__doc__ + post_process_wrapper.WRAPPED_DATA_DOC\n return _Wrapper", "def process_method_on_list(method_to_run, items):\n all_items = []\n if items is not None:\n if SUPPORTS_POOL:\n pool = ThreadPool()\n try:\n all_items = pool.map(method_to_run, items)\n except Exception:\n # catch exception to prevent threadpool running forever\n log_msg(format_exc(sys.exc_info()))\n log_msg(\"Error in %s\" % method_to_run)\n pool.close()\n pool.join()\n else:\n try:\n all_items = [method_to_run(item) for item in list(items)]\n except Exception:\n log_msg(format_exc(sys.exc_info()))\n log_msg(\"Error in %s with %s\" % method_to_run, items)\n all_items = filter(None, all_items)\n return all_items", "def apply_on_all(seq, method, *args, **kwargs):\n if seq:\n for obj in seq:\n getattr(obj, method)(*args, **kwargs)", "def postprocess(\n self,\n preds: Any,\n visualization: List[np.ndarray],\n return_datasample=False,\n **kwargs,\n ) -> dict:", "def get_optimal_postprocess(loaders=None, runner=None, logdir: str = \"\"):\n loaders[\"infer\"] = loaders[\"valid\"]\n\n runner.infer(\n model=runner.model,\n loaders=loaders,\n callbacks=[\n CheckpointCallback(resume=f\"{logdir}/checkpoints/best.pth\"),\n InferCallback(),\n ],\n )\n valid_masks = []\n probabilities = np.zeros((2220, 350, 525))\n for i, (batch, output) in enumerate(\n zip(loaders[\"infer\"].dataset, runner.callbacks[0].predictions[\"logits\"])\n ):\n image, mask = batch\n for m in mask:\n if m.shape != (350, 525):\n m = cv2.resize(m, dsize=(525, 350), interpolation=cv2.INTER_LINEAR)\n valid_masks.append(m)\n\n for j, probability in enumerate(output):\n if probability.shape != (350, 525):\n probability = cv2.resize(\n probability, dsize=(525, 350), interpolation=cv2.INTER_LINEAR\n )\n probabilities[i * 4 + j, :, :] = probability\n\n class_params = {}\n for class_id in range(4):\n print(class_id)\n attempts = []\n for t in range(0, 100, 10):\n t /= 100\n for ms in [\n 0,\n 100,\n 1000,\n 5000,\n 10000,\n 11000,\n 14000,\n 15000,\n 16000,\n 18000,\n 19000,\n 20000,\n 21000,\n 23000,\n 25000,\n 27000,\n 30000,\n 50000,\n ]:\n masks = []\n for i in range(class_id, len(probabilities), 4):\n probability = probabilities[i]\n predict, num_predict = post_process(sigmoid(probability), t, ms)\n masks.append(predict)\n\n d = []\n for i, j in zip(masks, valid_masks[class_id::4]):\n if (i.sum() == 0) & (j.sum() == 0):\n d.append(1)\n else:\n d.append(dice(i, j))\n\n attempts.append((t, ms, np.mean(d)))\n\n attempts_df = pd.DataFrame(attempts, columns=[\"threshold\", \"size\", \"dice\"])\n\n attempts_df = attempts_df.sort_values(\"dice\", ascending=False)\n print(attempts_df.head())\n best_threshold = attempts_df[\"threshold\"].values[0]\n best_size = attempts_df[\"size\"].values[0]\n\n class_params[class_id] = (best_threshold, int(best_size))\n\n print(class_params)\n return class_params", "def multi_label_cls_head__post_process(ctx, self, pred, **kwargs):\n return pred", "def hpost(self, *p):\n raise UnsupportedMethodError('post')", "def _apply_method(self, X, method):\n n_epochs, n_channels, n_times = X.shape\n # trial as time samples\n X = np.transpose(X, [1, 0, 2])\n X = np.reshape(X, [n_channels, n_epochs * n_times]).T\n # apply method\n method = getattr(self.estimator, method)\n X = method(X)\n # put it back to n_epochs, n_dimensions\n X = np.reshape(X.T, [-1, n_epochs, n_times]).transpose([1, 0, 2])\n return X", "def _postprocessor(self) :\n\t\tlogging.debug(\"nning AR post processor\")\n\t\tpass", "def post_processing(\n cfg: CfgNode, y: torch.Tensor, orig_img_size: torch.Tensor, transformed_labels: torch.Tensor\n) -> Tuple[Tuple[List[np.array], List[np.array]], float]:\n post_processing_start_time = time.time()\n pruned_preds_batch = post_process_prediction(y, orig_img_size, cfg)\n post_processing_end_time = time.time()\n processed_labels_batch = post_process_labels(transformed_labels, orig_img_size, cfg)\n\n return (pruned_preds_batch, processed_labels_batch), (post_processing_end_time - post_processing_start_time)", "def run_methods(self):\n results = {}\n methods = self.converter.available_methods[:] # a copy !\n\n if self.include_dummy:\n methods += ['dummy']\n\n if self.to_include:\n methods = [x for x in methods if x in self.to_include]\n elif self.to_exclude:\n methods = [x for x in methods if x not in self.to_exclude]\n\n for method in methods:\n print(\"\\nEvaluating method %s\" % method)\n times = []\n pb = Progress(self.N)\n for i in range(self.N):\n with Timer(times):\n self.converter(method=method)\n pb.animate(i+1)\n results[method] = times\n self.results = results", "def post_processing(self) -> Optional[Callable]:\n if (\n \"transforms\" not in self._spec\n or \"post\" not in self._spec[\"transforms\"]\n ):\n # Passthrough\n return lambda x: x\n f = find_class(self._spec[\"transforms\"][\"post\"])\n return f(self.options)", "def predict(self, instances, stats=None, **kwargs):\n\n stats = stats or Stats()\n self._validate_kwargs(kwargs)\n\n with stats.time(PREPROCESS_TIME):\n preprocessed = self.preprocess(instances, stats=stats, **kwargs)\n with stats.time(ENGINE_RUN_TIME):\n predicted_outputs = self._client.predict(\n preprocessed, stats=stats, **kwargs)\n with stats.time(POSTPROCESS_TIME):\n postprocessed = self.postprocess(\n predicted_outputs, original_input=instances, stats=stats, **kwargs)\n return postprocessed", "def post_process(cls, *args, **kwargs):\n pass", "def post_process(cls, *args, **kwargs):\n pass", "def post_process(cls, *args, **kwargs):\n pass", "def post_process(cls, *args, **kwargs):\n pass", "def predict(self, corpus, preprocessed_information, timeline_properties, params):\n sents = [sent for doc in corpus for sent in doc]\n\n random.shuffle(sents)\n\n post_processed = post_processing.post_process(\n sents,\n None,\n timeline_properties.daily_summary_length,\n timeline_properties.num_dates,\n timeline_properties.start,\n timeline_properties.end\n )\n\n return post_processed", "def setup_postprocessors(self) -> List[Callable[[], None]]:\n return [\n self._get_time,\n ]", "def get_preprocessing_methods(self):\n\t\treturn self.preprocessing_methods", "def populate_processing_methods(self):\n self.FIELDS_PRE_PROCESSING_METHODS = {\n 'publisher': self.join_all,\n 'description': self.join_all,\n 'format': self.join_all,\n 'language': self.join_all,\n 'type': self.get_alignment,\n 'rights': self.join_all,\n 'date': self.get_alignment,\n 'relation': self.join_all,\n 'source': self.join_all,\n 'coverage': self.get_alignment,\n 'contributor': self.join_all,\n 'title': self.join_all,\n 'identifier': self.join_all,\n 'subject': self.get_alignment,\n 'creator': self.get_alignment\n }" ]
[ "0.56834024", "0.565969", "0.53935933", "0.53585386", "0.53097564", "0.5251589", "0.5239719", "0.5229128", "0.5157222", "0.5151197", "0.5110115", "0.5072888", "0.5009041", "0.4979346", "0.49096116", "0.48874912", "0.4867647", "0.48647955", "0.48600864", "0.48440596", "0.48146597", "0.4747903", "0.47377613", "0.47377613", "0.47377613", "0.47377613", "0.47297597", "0.47189498", "0.4694815", "0.4689765" ]
0.6608731
0
Returns whether the object was closed. This includes both thrown exceptions, and clean exits.
def closed(self): return self.__closeEvent.is_set()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def closed(self):\n return self._close_state.is_set()", "def closed(self) -> bool:\n return self._closed", "def closed(self) -> bool:\n return self._closed", "def is_closed(self) -> bool:\n return self._closed", "def is_closed(self) -> bool:\n return self._closed", "def is_closing(self):\n return self._is_closing", "def is_closing(self) -> bool:\n return self._state == STATE_CLOSING", "def is_closed(self):\n return self.__com.has_quit()", "def isClosed(self) -> bool:\r\n\r\n return self.__is_closed", "def is_close(self) -> bool:\n return not self.open", "def is_closed(self) -> bool:\n return self._closed or super().is_closed", "def closed(self):\n\n return not self.open", "def is_closed(self) -> bool:\n raise NotImplementedError() # pragma: nocover", "def is_closed(self) -> bool:", "def is_closed(self) -> bool | None:\n if self._is_open is None:\n return None\n return not self._is_open", "def closed(self) -> bool:\n return self._out_of_scope or self._consumed", "def is_closed(self):\n return self.state == CLOSED or self.state == CLOSING", "def _is_closed(self) -> bool:\n return self._status == Status.CLOSED", "def closed(self):\n return self.state == \"CLOSED\"", "def is_closed(self) -> bool:\n return self._http_connection is None", "def isClosed(self) -> bool:\n return self._connection is None", "def isClosed(self):\n pass", "def closed(self):\n return not self._file.is_open()", "def closed(self):\n return self._stream is None", "def is_closed(self):\n return None", "def is_closed(self):\n raise NotImplementedError", "def closed(self):\r\n return self._closed", "def is_closed(self): # -> bool | Any:\n ...", "def closed(self) -> bool:\n return self.pgconn.status == ConnStatus.BAD", "def close(self) -> bool:\n return True" ]
[ "0.80961", "0.78408647", "0.78408647", "0.7829112", "0.7829112", "0.77884793", "0.77741545", "0.77116525", "0.7690002", "0.7661896", "0.76284987", "0.7603319", "0.754891", "0.75396085", "0.7534947", "0.75115013", "0.7438865", "0.74171317", "0.7379942", "0.73350513", "0.73201287", "0.73044807", "0.7299074", "0.726384", "0.7256172", "0.7220233", "0.7193472", "0.7003478", "0.69919413", "0.6963524" ]
0.8093589
1
Imports the symbol defined by 'symbol_path'. 'symbol_path' is a string in the form 'foo.bar.baz' which is turned into an import statement 'from foo.bar import baz' (ie. the last component of the name is the symbol name, the rest is the package/ module path to load it from).
def _import_symbol(symbol_path): components = symbol_path.split(".") module_name = ".".join(components[:-1]) symbol_name = components[-1] module = __import__(module_name, globals(), locals(), [symbol_name]) symbol = getattr(module, symbol_name) return symbol
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def import_from_path(module: str, path: str, name: str):\n\n spec = importlib.util.spec_from_file_location(module, path)\n foo = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(foo)\n return getattr(foo, name)", "def import_from_string(import_path: str) -> Any:\n\n import_classname = import_path.split(\".\")[-1]\n import_module = \".\".join(import_path.split(\".\")[:-1])\n\n module = importlib.import_module(import_module)\n return getattr(module, import_classname)", "def import_module(name, path):\n spec = importlib.util.spec_from_file_location(name, path)\n module = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(module)\n return module", "def _import_by_path(path):\n module_path, attr_name = path.rsplit('.', 1)\n module = import_module(module_path)\n return getattr(module, attr_name)", "def _load_module(modulepath):\n\n mod = __import__(modulepath)\n path = []\n for token in modulepath.split(\".\")[1:]:\n path.append(token)\n mod = getattr(mod, token)\n return mod", "def import_by_path(name, path_list):\n try:\n # Handle submodules and additional paths\n path_index = len(sys.path)\n sys.path.extend(path_list)\n # Attempt the actual import\n return __import__(name)\n finally:\n # Safely remove paths\n for path in path_list:\n if sys.path.pop(path_index) != path:\n raise ImportError('Returned path entry from sys.path does not match appended path')", "def import_grammar(path):\n grammar_name = os.path.basename(path).replace(\".py\", \"\")\n grammar_file = f'restler_grammar_{grammar_name}_{os.getpid()}.py'\n\n # import req_collection from given grammar\n sys.path.append(os.path.dirname(path))\n grammar = importlib.import_module(grammar_name)\n req_collection = getattr(grammar, \"req_collection\")\n # copy grammar inside experiment's folder (for debugging purposes mainly)\n try:\n target_path = os.path.join(logger.EXPERIMENT_DIR, grammar_file)\n shutil.copyfile(path, target_path)\n except shutil.Error:\n pass\n\n return req_collection", "def import_module_from_module_path(path):\n return SourceFileLoader('', path).load_module()", "def DynamicImport(import_path, alias=dict(), log=None):\n if import_path not in alias and ':' not in import_path:\n raise ValueError(\n 'import_path should be one of {} or '\n 'include \":\", e.g. \"locata_wrapper.utils.music:MUSIC\" : '\n '{}'.format(set(alias), import_path))\n if ':' not in import_path:\n import_path = alias[import_path]\n\n module_name, objname = import_path.split(':')\n try:\n m = importlib.import_module(module_name)\n except Exception as e: # NOQA\n log.error('Function specified by my_alg_name not found!')\n sys.exit(1)\n return getattr(m, objname)", "def import_module_from(mod_path):\n if '.' in mod_path:\n bits = mod_path.split('.')\n mod_name = bits.pop()\n mod_path = '.'.join(bits)\n return import_module(mod_path, mod_name)\n else:\n return import_module(mod_path)", "def importFromPath(filename):\n try:\n path, name = os.path.split(filename)\n name, ext = os.path.splitext(name)\n file, filename, data = imp.find_module(name, [path])\n importedModule = imp.load_module(name, file, filename, data)\n except Exception as ae:\n raise Exception('Importing module '+ filename + ' at ' + path + os.sep + name + ' failed with error '+ str(ae))\n return importedModule", "def import_by_source(path: str):\n\n module = splitext(basename(path))[0]\n\n sys.path.append(dirname(path))\n\n spec = importlib.util.spec_from_file_location(module, path)\n module = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(module)\n\n sys.path.pop()\n\n return module", "def import_file(name: Text, file_path: Text):\n\n spec = spec_from_file_location(f\"luh3417.{name}\", file_path)\n module = module_from_spec(spec)\n spec.loader.exec_module(module)\n\n return module", "def import_module(module_name,module_path):\n try:\n if not isinstance(module_path,list):\n module_path = [module_path]\n file,filename,desc = imp.find_module(module_name,module_path)\n globals()[module_name] = imp.load_module(module_name, file, filename, desc)\n return\n except Exception as err:\n print 'import_module error', err\n traceback.print_exc()\n\n sys.exit()", "def load(self,path):\n try:\n # Darwin requires dlopen to be called with mode RTLD_GLOBAL instead\n # of the default RTLD_LOCAL. Without this, you end up with\n # libraries not being loadable, resulting in \"Symbol not found\"\n # errors\n if sys.platform == 'darwin':\n return ctypes.CDLL(path, ctypes.RTLD_GLOBAL)\n else:\n return ctypes.cdll.LoadLibrary(path)\n except OSError,e:\n raise ImportError(e)", "def load(self,path):\n try:\n # Darwin requires dlopen to be called with mode RTLD_GLOBAL instead\n # of the default RTLD_LOCAL. Without this, you end up with\n # libraries not being loadable, resulting in \"Symbol not found\"\n # errors\n if sys.platform == 'darwin':\n return ctypes.CDLL(path, ctypes.RTLD_GLOBAL)\n else:\n return ctypes.cdll.LoadLibrary(path)\n except OSError,e:\n raise ImportError(e)", "def import_from_dotted_path(dotted_names, path=None):\n next_module, remaining_names = dotted_names.split('.', 1)\n file, pathname, description = imp.find_module(next_module, path)\n module = imp.load_module(next_module, file, pathname, description)\n\n if hasattr(module, remaining_names):\n return getattr(module, remaining_names)\n\n if '.' not in remaining_names:\n return module\n\n return import_from_dotted_path(remaining_names, path=module.__path__)", "def _imports(graph: mapry.Graph, py: mapry.Py) -> str:\n # pylint: disable=too-many-branches\n # pylint: disable=too-many-statements\n stdlib_block = {'import typing'}\n\n third_party_block = set() # type: Set[str]\n\n if mapry.needs_type(a_type=graph, query=mapry.Path):\n if py.path_as == 'str':\n pass\n elif py.path_as == \"pathlib.Path\":\n stdlib_block.add(\"import pathlib\")\n else:\n raise NotImplementedError(\n \"Unhandled path_as: {!r}\".format(py.path_as))\n\n if mapry.needs_type(a_type=graph, query=mapry.TimeZone):\n if py.timezone_as == 'str':\n pass\n\n elif py.timezone_as == 'pytz.timezone':\n third_party_block.update(\n ('import pytz', 'import pytz.exceptions # type: ignore'))\n\n else:\n raise NotImplementedError(\n 'Unhandled timezone_as: {}'.format(py.timezone_as))\n\n # yapf: disable\n if any(mapry.needs_type(a_type=graph, query=query)\n for query in\n (mapry.Date, mapry.Time, mapry.Datetime, mapry.Duration)):\n # yapf: enable\n stdlib_block.add('import datetime')\n\n if mapry.needs_type(a_type=graph, query=mapry.Map):\n stdlib_block.add(\"import collections\")\n\n if len(graph.classes) > 0:\n stdlib_block.add(\n 'import collections'\n ) # needed for the initialization of class registries\n\n ##\n # Needs regex?\n ##\n\n import_re = False\n for a_type, _ in mapry.iterate_over_types(graph=graph):\n if isinstance(a_type, (mapry.String, mapry.Path)) and a_type.pattern:\n import_re = True\n break\n\n if isinstance(a_type, mapry.Duration):\n import_re = True\n break\n\n for cls in graph.classes.values():\n if cls.id_pattern is not None:\n import_re = True\n break\n\n if import_re:\n stdlib_block.add(\"import re\")\n\n ##\n # First party\n ##\n\n first_party_block = {\n 'import {}'.format(py.module_name),\n 'import {}.parse'.format(py.module_name)\n }\n\n block_strs = [] # type: List[str]\n if len(stdlib_block) > 0:\n block_strs.append('\\n'.join(sorted(stdlib_block)))\n\n if len(third_party_block) > 0:\n block_strs.append('\\n'.join(sorted(third_party_block)))\n\n if len(first_party_block) > 0:\n block_strs.append('\\n'.join(sorted(first_party_block)))\n\n return '\\n\\n'.join(block_strs)", "def __import_locustfile__(filename, path):\n try:\n # Python 3 compatible\n source = importlib.machinery.SourceFileLoader(os.path.splitext(locustfile)[0], path)\n imported = source.load_module()\n except AttributeError:\n # Python 2.7 compatible\n import imp\n imported = imp.load_source(os.path.splitext(locustfile)[0], path)\n\n return imported", "def import_module(module_name, path):\n file, path, description = imp.find_module(module_name, [path])\n # Close the .so file after load.\n with file:\n return imp.load_module(module_name, file, path, description)", "def import_string(dotted_path):\n try:\n module_path, class_name = dotted_path.rsplit('.', 1)\n except ValueError as err:\n raise ImportError(\"%s doesn't look like a module path\" % dotted_path) from err\n\n module = importlib.import_module(module_path)\n\n try:\n return getattr(module, class_name)\n except AttributeError as err:\n raise ImportError('Module \"%s\" does not define a \"%s\" attribute/class' % (\n module_path, class_name)\n ) from err", "def import_file(path: Union[PurePath, str]) -> Generator[ModuleType, None, None]:\n\n pathdir = os.path.dirname(path)\n if pathdir in sys.path:\n added_to_sys_path = False\n else:\n sys.path.insert(0, pathdir)\n added_to_sys_path = True\n try:\n name = os.path.basename(path).split(\".\")[0]\n spec = spec_from_file_location(name, str(path))\n assert isinstance(spec, ModuleSpec)\n module = module_from_spec(spec)\n assert isinstance(spec.loader, Loader)\n loader: Loader = spec.loader\n try:\n loader.exec_module(module)\n except Exception as error:\n log.bad(error)\n raise\n yield module\n finally:\n if added_to_sys_path:\n sys.path.remove(pathdir)", "def reload_import(path, hard = True):\r\n\r\n # in case the path is not present in the\r\n # system modules no need to reload\r\n if not path in sys.modules: return\r\n\r\n # in case the hard approach for reloading is\r\n # taken the system modules should be changed\r\n if hard:\r\n # retrieves the module for the given path from\r\n # system module and then removes it from the system\r\n # modules and then deletes it from the virtual\r\n # machine environment\r\n module = sys.modules[path]\r\n del sys.modules[path]\r\n del module\r\n # otherwise the \"soft\" reload provides the normal\r\n # module reload method\r\n else:\r\n # retrieves the module for the given path from\r\n # system module and then forces a reload on the\r\n # module (to flush the contents)\r\n module = sys.modules[path]\r\n legacy.reload(module)", "def import_string(dotted_path):\n try:\n module_path, class_name = dotted_path.rsplit('.', 1)\n module = import_module(module_path)\n\n try:\n return getattr(module, class_name)\n except AttributeError:\n msg = 'Module \"%s\" does not define a \"%s\" attribute/class' % (\n module_path, class_name)\n six.reraise(ImportError, ImportError(msg), sys.exc_info()[2])\n\n except ValueError:\n msg = \"%s doesn't look like a module path\" % dotted_path\n six.reraise(ImportError, ImportError(msg), sys.exc_info()[2])", "def run_import(path: Path) -> None:\n if not (path / \"__main__.py\").exists():\n return\n try:\n subprocess.check_call(\n [sys.executable, \"-m\", \"pip\", \"install\", \"--no-input\", path.parent.as_posix()],\n stdout=subprocess.DEVNULL,\n )\n if (path / \"__main__.py\").exists():\n subprocess.check_call(\n [sys.executable, \"-c\", f\"import {path.name}\"],\n stdout=subprocess.DEVNULL,\n )\n subprocess.check_call(\n [sys.executable, \"-m\", \"pip\", \"uninstall\", \"--no-input\", \"-y\", path.name],\n stdout=subprocess.DEVNULL,\n )\n except subprocess.CalledProcessError as e:\n raise SnapshotMismatchError(f\"Path {path} cannot be imported: {e}\") from None", "def _importer(name, root_package=False, relative_globals=None, level=0):\n return __import__(name, locals=None, # locals has no use\n globals=relative_globals,\n fromlist=[] if root_package else [None],\n level=level)", "def import_string(dotted_path):\n try:\n module_path, class_name = dotted_path.rsplit('.', 1)\n except ValueError as err:\n raise ImportError(\"%s doesn't look like a module path\" % dotted_path) from err\n\n module = import_module(module_path)\n\n try:\n return getattr(module, class_name)\n except AttributeError as err:\n raise ImportError('Module \"%s\" does not define a \"%s\" attribute/class' % (\n module_path, class_name)) from err", "def load_module(name, path):\n loader = importlib.machinery.SourceFileLoader(name, path)\n module = types.ModuleType(loader.name)\n loader.exec_module(module)\n return module", "def importfile(path):\n path = getpath(path, custom=True)\n assert _os.path.isfile(path) == True\n\n file_handler = _SourceFileLoader(*path.splitpath())\n return file_handler", "def load_module(path: os.PathLike):\n path = Path(path)\n pwd = Path(os.getcwd())\n os.chdir(path.parent)\n try:\n mod = import_module(path.stem)\n except ModuleNotFoundError as err:\n raise err\n finally:\n os.chdir(pwd)\n return mod" ]
[ "0.60043275", "0.57454276", "0.5705168", "0.5638744", "0.5635315", "0.55979246", "0.5471517", "0.54315394", "0.54311717", "0.5304224", "0.530391", "0.52805257", "0.5262261", "0.5223097", "0.5155832", "0.5155832", "0.51526827", "0.5150379", "0.50937927", "0.5057649", "0.50040865", "0.500351", "0.49576858", "0.4956577", "0.49426195", "0.49359718", "0.4912444", "0.49091578", "0.4898265", "0.48911455" ]
0.88491184
0
Figuring out if a type is a named tuple is not as trivial as one may expect
def type_is_namedtuple(t) -> bool: try: return issubclass(t, tuple) and hasattr(t, "_fields") except TypeError: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_namedtuple(v) -> bool:\n try:\n return isinstance(v, tuple) and hasattr(v, \"_fields\")\n except TypeError:\n return False", "def is_namedtuple(obj):\n return isinstance(obj, tuple) and hasattr(obj, '_asdict')", "def isnamedtuple(obj):\n return isinstance(obj, tuple) \\\n and hasattr(obj, \"_fields\") \\\n and hasattr(obj, \"_asdict\") \\\n and callable(obj._asdict)", "def _verify_named_tuple(named_tuple):\n\n if not bool(\n isclass(named_tuple)\n and issubclass(named_tuple, tuple)\n and callable(named_tuple)\n and hasattr(named_tuple, \"_fields\")\n ):\n raise TypeError(\n \"named_tuple parameter should be a tuple subclass created \"\n \"by the collections.namedtuple factory function, or a \"\n \"subclass of typing.NamedTuple.\"\n )", "def is_namedtuple(data):\n data_type = type(data)\n bases = data_type.__bases__\n if len(bases) != 1 or bases[0] != tuple:\n return False\n fields = getattr(data_type, \"_fields\", None)\n if not isinstance(fields, tuple):\n return False\n return all(isinstance(member, str) for member in fields)", "def is_tuple(obj):\n return type(obj) is tuple", "def isTuple(data):\n\ttry:\n\t\tfrom types import TupleType\n\t\tif type(data) == TupleType:\n\t\t\treturn True\n\texcept ImportError:\n\t\tif type(data) == type((0,0)):\n\t\t\treturn True\n\treturn False", "def _check_namedtuple(self) -> PossibleResult[T]:\n if isinstance(self.constructor, NamedTupleType):\n if not isinstance(self.obj, Mapping):\n raise DeserializeError(\n Mapping, self.obj, self.new_depth, self.key\n )\n parameters = inspect.signature(self.constructor).parameters\n return self.constructor(\n **{\n name: Deserialize(\n obj=self.obj.get(name, UNDEFINED),\n constructor=_type,\n depth=self.new_depth,\n convert_primitives=self.convert_primitives,\n key=name,\n ).run()\n for name, _type in get_type_hints(self.constructor).items()\n if not (\n name not in self.obj\n and name in parameters\n and parameters[name].default != inspect.Signature.empty\n )\n }\n ) # type: ignore\n return NO_RESULT", "def test_tuples():\n\n @type_checked\n def _run_test(something:(str, int, bool)):\n assert isinstance(something[0], str)\n assert isinstance(something[1], int)\n assert isinstance(something[2], bool)\n\n _run_test(something=(None, \"12\", 1))", "def is_tuple_of(seq, expected_type):\n return is_seq_of(seq, expected_type, seq_type=tuple)", "def testTypTagsTupleEnforced(self) -> None:\n fake_typ_tuple = typing.cast(tuple, ['win', 'x86'])\n with self.assertRaises(AssertionError):\n _ = data_types.Result('test', fake_typ_tuple, (1, 10), 'build_id')", "def __type_correct_tuple(self):\n\n strTestName = 'Type (tuple) is given (correct)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddOpt('parameter1', 'type \\'tuple\\' parameter')\n RxCSObject.paramType('parameter1', (tuple))\n RxCSObject.parameter1 = (1, 4)\n\n RxCSObject.paramAddOpt('parameter2', 'type \\'list\\' parameter')\n RxCSObject.paramType('parameter2', (list))\n RxCSObject.parameter2 = [10, 40]\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def is_typed_tuple(tpl: object, obj_type: type, allow_none: bool = False, allow_empty: bool = True) -> bool:\n assert isinstance(tpl, object)\n assert isinstance(obj_type, type)\n assert isinstance(allow_none, bool)\n assert isinstance(allow_empty, bool)\n\n if allow_none and tpl is None:\n return True\n\n if not isinstance(tpl, tuple):\n return False\n\n if not allow_empty and len(tpl) == 0:\n return False\n\n for obj in tpl:\n if not isinstance(obj, obj_type):\n return False\n\n return True", "def tuple_check(*args, func=None):\n func = func or inspect.stack()[2][3]\n for var in args:\n if not isinstance(var, (tuple, collections.abc.Sequence)):\n name = type(var).__name__\n raise TupleError(\n 'Function {} expected tuple, {} got instead.'.format(func, name))", "def _is_well_formed(l):\n\tif _is_symbol(l):\n\t\treturn 1\n\tif (type(l) == types.TupleType and len(l) == 2\n\t\t\tand l[0] == neg and _is_well_formed(l[1])):\n\t\treturn 1\n\tif (type(l) == types.TupleType and len(l) == 3\n\t\t\tand _is_binary(l[1])\n\t\t\tand _is_well_formed(l[0]) and _is_well_formed(l[2])):\n\t\treturn 1\n\treturn 0", "def __type_of_elements_incorrect_dicts_in_tuple(self):\n strTestName = 'Elements (dicts) given in a tuple (incorrect)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'type \\'tuple\\' parameter #1')\n RxCSObject.paramType('parameter1', (tuple))\n\n RxCSObject.paramAddMan('parameter2', 'type \\'tuple\\' parameter #2')\n RxCSObject.paramType('parameter2', (tuple))\n RxCSObject.paramTypeEl('parameter2', (int))\n\n RxCSObject.parameter1 = (1, 10)\n dD1 = {}\n dD2 = {}\n RxCSObject.parameter2 = (dD1, dD2)\n\n self.__parametersCheck_error(RxCSObject, ElementTypeError, strTestName)", "def is_Tuple_ellipsis(tpl):\n try:\n return tpl.__tuple_use_ellipsis__\n except AttributeError:\n try:\n if tpl.__args__ is None:\n return False\n # Python 3.6\n if tpl.__args__[-1] is Ellipsis:\n return True\n except AttributeError:\n pass\n return False", "def test_complex_to_tuple():\n\n @type_checked\n def _run_test(thing:(complex,)):\n assert thing == (complex(15, 2),)\n\n _run_test(complex(15, 2))", "def __type_incorrect_tuple_lists(self):\n\n strTestName = 'Type (dict instead of tuple or list) is given (incorrect)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddOpt('parameter1', 'type \\'tuple\\' parameter')\n RxCSObject.paramType('parameter1', (tuple))\n RxCSObject.parameter1 = (1, 4)\n\n RxCSObject.paramAddOpt('parameter2', 'type \\'tuple or list\\' parameter')\n RxCSObject.paramType('parameter2', (tuple, list))\n RxCSObject.parameter2 = {}\n\n self.__parametersCheck_error(RxCSObject, ParameterTypeError, strTestName)", "def is_pyxb_d1_type_name(pyxb_obj, expected_pyxb_type_name):\n try:\n return pyxb_get_type_name(pyxb_obj) == expected_pyxb_type_name\n except AttributeError:\n return False", "def single_element_tuple():\n single = (1,)\n print(type(single)) # <type 'tuple'>", "def _decompose_type(_type: Type[Any]) -> Tuple[Type[Any], bool]:\n\n if not typing_inspect.is_optional_type(_type):\n return _type, True\n\n args: Set[Type[Any]] = set(typing_inspect.get_args(_type))\n args.remove(NoneType)\n\n if len(args) != 1:\n _type.__args__ = tuple(args)\n return _type, False\n\n return args.pop(), False", "def tuple_namer(name,tupl):\n tupl_templ = collections.namedtuple(name, 'battery status neighbour')\n named = tupl_templ(battery = tupl[0], status = tupl[1], neighbour = tupl[2])\n return named", "def is_tuple_consists_of_strings(an_array):\n return isinstance(an_array, tuple) and is_array_type(an_array, str)", "def is_sequence_of_tuple(items):\n return all(isinstance(item, tuple) for item in items)", "def _match_entry_type_tuple(code_entry, type_tuple):\n entry_type = code_entry['type']\n return entry_type in type_tuple", "def valid_tuple(obj):\r\n try:\r\n assert isinstance(obj, tuple)\r\n assert isinstance(obj[0], str)\r\n assert isinstance(obj[1], str)\r\n except:\r\n raise Invalid(\"{} is not a valid key tuple\".format(obj))\r\n return obj", "def is_unknown(t):\n if isinstance(t, (pytd.ClassType, pytd.NamedType, pytd.Class, StrictType)):\n return escape.is_unknown(t.name)\n elif isinstance(t, str):\n return escape.is_unknown(t)\n else:\n return False", "def type(name):", "def empty_tuple():\n empty = ()\n print(type(empty)) # <type 'tuple'>" ]
[ "0.7847265", "0.77559423", "0.76825786", "0.76751727", "0.7643817", "0.6838801", "0.6626756", "0.6541308", "0.63720953", "0.6256021", "0.61520916", "0.61458665", "0.6096234", "0.60237575", "0.58922905", "0.58663714", "0.5831151", "0.5825571", "0.57641757", "0.5760455", "0.5751342", "0.5741397", "0.5735952", "0.5719128", "0.570869", "0.56641036", "0.5648087", "0.56332856", "0.56017464", "0.5589628" ]
0.82478565
0
Determine if value is a subclass of type_ Will work even value is not a class
def issubclass_safe(value, type_): try: return issubclass(value, type_) except (TypeError, AttributeError): # Cannot perform issubclass on some types return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_type(value):\n if isinstance(value, type):\n return issubclass(value, Type)\n return isinstance(value, Type)", "def isinstance_safe(value, type_):\n try:\n return isinstance(value, type_)\n except TypeError:\n # Cannot perform isinstance on some types\n return False", "def is_type(obj):\n return type(obj) is type or type(obj) is types.ClassType", "def issubclass_(type_, dtype):\n if not isinstance(type_, typing.Type):\n return False\n return typing.is_subclass(type_, dtype)", "def is_instance_of_type(object_a, type_a):\n\n return is_type_subclass_of_type(type(object_a), type_a)", "def is_type(self, typ):\n return typ == self.__class__.__name__", "def _isinstancetype(an_obj):\n if an_obj is None: return False\n if not PY3K:\n return isinstance(an_obj, types.InstanceType)\n typstr = str(type(an_obj))\n # the following logic works, as PyRAF users expect, in both v2 and v3\n return typstr==\"<type 'instance'>\" or \\\n (typstr.startswith(\"<class '\") and ('.' in typstr))", "def verify_type(self, obj):\n return isinstance(obj, self.type_)", "def check_class(instance, type):\n\tif not issubclass(instance, type):\n\t\traise TypeError('Subclass expected type {0}, but got: {1}', type(type), type(instance))", "def _isinstance(cls, x):\n return isinstance(x, cls.PYTHON_TYPE_CHECK)", "def is_type(obj: Any) -> bool:\n return type(obj).__name__ == \"type\"", "def isclass(object):\r\n return isinstance(object, (type, types.ClassType))", "def subclassof(c, b):\n try:\n return issubclass(c, b)\n except TypeError:\n return False", "def check_type(instance, type):\n\tif not isinstance(instance, type):\n\t\traise TypeError('Instance expected type {0}, but got: {1}', type(type), type(instance))", "def _is_valid(self, value):\n\n # Entities have an istypeof method that can perform more sophisticated\n # type checking.\n if hasattr(self._type, \"istypeof\"):\n return self._type.istypeof(value)\n else:\n return isinstance(value, self._type)", "def inherits_from(obj, a_class):\n if a_class == type(obj):\n return False\n return isinstance(obj, a_class)", "def _isinstance(self, value: Any, typ: Any) -> bool:\n typ_args = getattr(typ, '__args__', ())\n if hasattr(typ, '__origin__'):\n # Drop subscripted extra type parameters from generic type.\n # (e.g. Dict[str, str].__origin__ == dict)\n # See https://www.python.org/dev/peps/pep-0585 for more information.\n typ = typ.__origin__\n if typ == Union:\n return any(self._isinstance(value, t) for t in typ_args)\n else:\n return isinstance(value, typ)", "def _is_typevar(typeval: Type) -> bool:\n return isinstance(typeval, TypeVar) # type: ignore", "def is_(t, x):\n return type(x) is t", "def match(self, cls):\n return isinstance(self, cls)", "def _check_value_type(self, value):\n if value is not None and self.value_type is not None:\n valid = isinstance(value, self.value_type)\n if not valid:\n return False\n return True", "def is_kind_of_class(obj, a_class):\n return(isinstance(obj, a_class))", "def is_kind_of_class(obj, a_class):\n if isinstance(obj, a_class):\n return True\n else:\n return False", "def is_kind_of_class(obj, a_class):\n if isinstance(obj, a_class):\n return True\n else:\n return False", "def is_kind_of_class(obj, a_class):\n if isinstance(obj, a_class) is True:\n return True\n else:\n return False", "def checkType(self, value):\n pass", "def inherits_from(obj, a_class):\n if type(obj) is not a_class:\n return(issubclass(type(obj), a_class))\n else:\n return False", "def has_exactly_type(obj, tpe):\r\n return type(obj) == tpe", "def is_kind_of_class(obj, a_class):\n return (isinstance(obj, a_class))", "def inherits_from(obj, a_class):\n if issubclass(type(obj), a_class) and not type(obj) == a_class:\n return True\n else:\n return False" ]
[ "0.7880763", "0.7379336", "0.7272714", "0.72481436", "0.72121257", "0.7209383", "0.6984322", "0.68737435", "0.68694156", "0.6789055", "0.67275107", "0.6715976", "0.670521", "0.66198355", "0.6616635", "0.6616613", "0.6574762", "0.6546906", "0.64954215", "0.6469347", "0.64501196", "0.6447857", "0.6445621", "0.6445621", "0.6443805", "0.6432457", "0.64301413", "0.6423312", "0.6422392", "0.6420341" ]
0.7830925
1
Parse a typing hint into its type and and arguments.
def parse_hint(hint: Type) -> Tuple[Type, Optional[List]]: if hasattr(hint, "__origin__"): # This is a type hint (eg typing.Union) # Filter out TypeVars such as KT & VT_co (they generally # indicate that no explicit hint was given) hint_args = [a for a in getattr(hint, "__args__", []) if not isinstance(a, TypeVar)] return hint.__origin__, hint_args or None else: # This is something other than a type hint # (e.g. an int or datetime) return hint, None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_type_arg(self, parse_input):\n with pytest.warns(SyntaxWarning, match=\"Only keyword options of the form\"):\n parse_input(\"name testname\\nversion 1.0\\ntarget example (6)\\ntype example (42)\")", "def parse(cls, buf: memoryview, params: Params) \\\n -> tuple[AnyParseable, memoryview]:\n for data_type in params.expected:\n try:\n return data_type.parse(buf, params)\n except NotParseable:\n pass\n raise UnexpectedType(buf)", "def _parse_and_validate(self, val):\n if self._is_parameter_type:\n val = self._parse(val) if isinstance(val, str) else val\n self._validate_or_throw(val)\n return val", "def parse_params(txt):\n res = list()\n # First, slipt with stuff looking like \\TYPE:\n splitted = re.split(r'\\s*\\\\(\\w+)\\s*:', txt)\n # We now have a list looking like:\n # ['', 'flag', '....', 'param', '...']\n i = 1\n while i < len(splitted) - 1:\n type = splitted[i]\n rest = splitted[i+1]\n if type == \"argn\":\n name = \"remaining args\"\n desc = rest\n else:\n # first word is the name, the rest is the description:\n match = re.match(r'\\s*(\\w+)\\s*(.*)', rest, re.DOTALL)\n if not match:\n print(\"warning, failed to parse parameters\")\n print(\"near\", rest)\n break\n (name, desc) = match.groups()\n desc = clean_indent(desc)\n res.append((type, name, desc))\n i += 2\n return res", "def type(self, tokens):\n if len(tokens) != 1:\n raise Exception(\"Unexpected argument counts\")\n return tokens[0].value", "def parse_arguments(args):", "def _consume_type(self):\n try:\n self._consume(self.VARIABLE_TYPES)\n except CompilationEngineError:\n self._consume(TokenTypes.IDENTIFIER) # Class name", "def parse(self, input):\n pass", "def test_short_form_multi():\n from typing import Any, AnyStr\n\n def func(arg1, arg2):\n # type: (AnyStr, int) -> Any\n pass\n\n assert get_type_hints(func, globals(), locals()) == {\n 'return': Any,\n 'arg1': AnyStr,\n 'arg2': int\n }", "def parse_as(val, *types):\n for typ in types:\n try:\n return typ(val)\n except ValueError:\n pass\n return quote(val)", "def _preprocess_typecheck(argSig, argspecs, slf_or_clsm=False):\n # todo: Maybe move also slf-logic here\n vargs = argspecs.varargs\n try:\n kw = argspecs.keywords\n except AttributeError:\n kw = argspecs.varkw\n try:\n kwonly = argspecs.kwonlyargs\n except AttributeError:\n kwonly = None\n if not vargs is None or not kw is None:\n arg_type_lst = list(get_Tuple_params(argSig))\n if not vargs is None:\n vargs_pos = (len(argspecs.args)-1) \\\n if slf_or_clsm else len(argspecs.args)\n # IndexErrors in this section indicate that a child-method was\n # checked against a parent's type-info with the child featuring\n # a more wider type on signature level (e.g. adding vargs)\n try:\n vargs_type = typing.Sequence[arg_type_lst[vargs_pos]]\n except IndexError:\n vargs_type = typing.Sequence[typing.Any]\n try:\n arg_type_lst[vargs_pos] = vargs_type\n except IndexError:\n arg_type_lst.append(vargs_type)\n if not kw is None:\n kw_pos = len(argspecs.args)\n if slf_or_clsm:\n kw_pos -= 1\n if not vargs is None:\n kw_pos += 1\n if not kwonly is None:\n kw_pos += len(kwonly)\n try:\n kw_type = typing.Dict[str, arg_type_lst[kw_pos]]\n except IndexError:\n kw_type = typing.Dict[str, typing.Any]\n try:\n arg_type_lst[kw_pos] = kw_type\n except IndexError:\n arg_type_lst.append(kw_type)\n return typing.Tuple[tuple(arg_type_lst)]\n else:\n return argSig", "def test_parameters_with_mixed_inferred_and_declared_types(self):\n with self.assertRaises(parser.JavaSyntaxError):\n parse.parse(setup_java_class(\"(x, int y) -> x+y;\"))", "def parse(self):\n\n special_vars = {'amplification', 'copy number loss', \n 'epigenetic silencing', 'overexpression'}\n\n special_terms = ['dna binding domain', 'egfrv', 'truncating mutation',\n 'fusion', 'mutation', 'deletion', 'duplication', 'insertion',\n 'hypermethylation']\n\n var = self.var.lower()\n\n # Check if the stop sign '*' in the variation\n if '*' in var:\n self.stop_sign = True\n \n # Type \"exact match with special pre-difined variations\"\n if var in special_vars:\n self.type = var\n return\n \n # Type \"with special term\"\n for term in special_terms:\n if term in var:\n self.type = term\n return\n\n # Type \"point\": A123B or A123* or A123\n if re.match('^[a-z][0-9]+[a-z|*]?$', var):\n split = re.split('[0-9]+', var)\n self.type = 'point'\n self.start_amino = split[0]\n self.end_amino = split[1]\n s = re.search('[0-9]+', var)\n self.pos = int(s.group())\n return\n\n # Type \"del/ins/trunc/splice/dup/fs\": A123del or A123_B234del\n for suffix in ['del', 'ins', 'trunc', 'splice', 'dup', 'fs']:\n if suffix in var:\n self.type = self.alias_dict.get(suffix, suffix)\n self._parse_suffix(var, suffix)\n return\n\n print('[INFO] variation cannot be parsed: %s' % self.var)", "def parse(arg: Tuple[str, str, str, str, str]) -> Tuple[str, str, str]:\n return (arg[2], arg[3], arg[4])", "def parse(self, buf: memoryview, params: Params) \\\n -> tuple[ParseableTypeT_co, memoryview]:\n ...", "def parse(cls: type[Parseable[ParsedT]], buf: memoryview, params: Params) \\\n -> tuple[Parseable[ParsedT], memoryview]:\n ...", "def typed_line(line, parser):\n user, item, rating = parser(line)\n return int(user), int(item), float(rating)", "def parse_arguments():\n parser = argparse.ArgumentParser(description=\"Parse library type information.\")\n parser.add_argument(\"input_file\", help=\"Salmon library type information file.\")\n return parser.parse_args()", "def __parse_docstring(docstring):\n\t\tif docstring is None or docstring == \"\":\n\t\t\treturn {}\n\t\tlines = docstring.replace(\"\\t\", \"\").split(\"\\n\")\n\t\tresult = {}\n\t\thelp_line = \"\"\n\t\targuments = {}\n\n\t\ts_argument = False\n\t\twhile lines != []:\n\t\t\tline = lines.pop(0).strip()\n\n\t\t\tif line.strip() == \"\":\n\t\t\t\tcontinue\n\n\t\t\telse:\n\t\t\t\tif not s_argument:\n\t\t\t\t\tif line == \"Arguments:\":\n\t\t\t\t\t\ts_argument = True\n\t\t\t\t\telse:\n\t\t\t\t\t\thelp_line += \" \" + line\n\t\t\t\telse:\n\t\t\t\t\tif line[0] in [\"@\", \"#\"]:\n\t\t\t\t\t\topt = line[0]\n\t\t\t\t\t\targ = line[1:]\n\t\t\t\t\t\tvariable, _, values = arg.partition(\" = \")\n\t\t\t\t\t\tname, _, typ = variable.partition(':')\n\n\t\t\t\t\t\tif typ in Command.TYPES:\n\t\t\t\t\t\t\ttyp = Command.TYPES[typ]\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\traise CommandTypeError(\"{typ} not supported by commandparse\".format(typ))\n\n\t\t\t\t\t\talias = name[0]\n\t\t\t\t\t\targuments[name] = {\n\t\t\t\t\t\t\t\"alias\": \"-{alias}\".format(alias=alias),\n\t\t\t\t\t\t\t\"name\": \"--{name}\".format(name=name),\n\t\t\t\t\t\t\t\"type\": typ,\n\t\t\t\t\t\t\t\"help_line\": \"\",\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif values:\n\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\tv = literal_eval(values)\n\t\t\t\t\t\t\texcept ValueError:\n\t\t\t\t\t\t\t\traise CommandDefaultValueError(\"Incorret value(s) in a placeholder: {v}\".format(v=values))\n\t\t\t\t\t\t\tif isinstance(v, list):\n\t\t\t\t\t\t\t\targuments[name][\"values\"] = v\n\t\t\t\t\t\t\telif isinstance(v, str) or isinstance(v, int) or isinstance(v, float):\n\t\t\t\t\t\t\t\targuments[name][\"value\"] = v\n\n\t\t\t\t\t\tif opt == \"#\":\n\t\t\t\t\t\t\targuments[name][\"pos\"] = True\n\t\t\t\t\t\telif opt == \"@\":\n\t\t\t\t\t\t\targuments[name][\"pos\"] = False\n\n\t\t\t\t\telif line: # if no prefix is found, read the help line of the previous argument.\n\t\t\t\t\t\tif not arguments[name][\"help_line\"]:\n\t\t\t\t\t\t\targuments[name][\"help_line\"] = line\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\targuments[name][\"help_line\"] += \" \" + line\n\n\t\treturn {\"help_line\": help_line.strip(), \"arguments\": arguments}", "def unparse_type(type_str):\n if not type_str.startswith('array'):\n return type_str\n arg_dim = type_str.lstrip('array')[0]\n data_type = type_str.lstrip('array')[1:]\n arg_type = \"vizgen.ndarray('\" + data_type + \"', \" + arg_dim + \")\"\n return arg_type", "def _parse_parameters(args: Iterable[Any]) -> Generator[TypeVar, None, None]:\n for i in args:\n if hasattr(i, \"__parameters__\"):\n yield from i.__parameters__\n elif isinstance(i, TypeVar):\n yield i", "def convert_type_hint(self, hint: Any, unsupported: ProperType = ANY) -> ProperType:\n # We must handle a lot of special cases, so try to give an example for each one.\n if hint is Any or hint is None:\n # typing.Any or empty\n return ANY\n if hint is type(None):\n # None\n return NONE_TYPE\n if hint is tuple:\n # tuple\n # TODO(fk) Tuple without size. Should use tuple[Any, ...] ?\n # But ... (ellipsis) is not a type.\n return TupleType((ANY,), unknown_size=True)\n if get_origin(hint) is tuple:\n # Type is `tuple[int, str]` or `typing.Tuple[int, str]` or `typing.Tuple`\n args = self.__convert_args_if_exists(hint, unsupported=unsupported)\n if not args:\n return TupleType((ANY,), unknown_size=True)\n return TupleType(args)\n if is_union_type(hint) or isinstance(hint, types.UnionType):\n # Type is `int | str` or `typing.Union[int, str]`\n # TODO(fk) don't make a union including Any.\n return UnionType(\n tuple(\n sorted(self.__convert_args_if_exists(hint, unsupported=unsupported))\n )\n )\n if isinstance(hint, _BaseGenericAlias | types.GenericAlias):\n # `list[int, str]` or `List[int, str]` or `Dict[int, str]` or `set[str]`\n result = Instance(\n self.to_type_info(hint.__origin__),\n self.__convert_args_if_exists(hint, unsupported=unsupported),\n )\n # TODO(fk) remove this one day.\n # Hardcoded support generic dict, list and set.\n return self._fixup_known_generics(result)\n\n if isinstance(hint, type):\n # `int` or `str` or `MyClass`\n return self._fixup_known_generics(Instance(self.to_type_info(hint)))\n # TODO(fk) log unknown hints to so we can better understand what\n # we should add next\n # Remove this or log to statistics?\n _LOGGER.debug(\"Unknown type hint: %s\", hint)\n # Should raise an error in the future.\n return unsupported", "def injectTypes (g):\n\tself=__module__\n\ts=g.symbols\n\tg.token('TYPE_VAR', '_|[A-Z][A-Z0-9]*')\n\tg.rule('TypeParameter', s.LSB, listOf(g.agroup(s.TYPE_VAR, s.FQNAME), s.COMMA, g), s.RSB)\n\tg.rule('TypeReference', s.FQNAME._as('name'), s.TypeParameter.optional()._as('parameters'))\n\tg.group('TypeValue')\n\tg.rule('TypeExpression')\n\tg.rule('TypeUnionSuffix', s.PIPE, s.TypeValue)\n\tg.group('TypePrefix', s.TypeReference)\n\tg.group('TypeSuffix', s.TypeUnionSuffix)\n\tg.rule('TypeExpression', s.TypePrefix, s.TypeSuffix.zeroOrMore())\n\tg.rule('TypeParens', s.LP, listOf(s.TypeExpression, s.COMMA, g), s.RP)\n\ts.TypeValue.set(s.TypeParens, s.TypeExpression)\n\tg.rule('TypeSlot', s.CheckIndent, g.aword('@slot'), s.NAME._as('name'), g.arule(s.COLON, s.TypeValue).optional()._as('value'), s.EOL, s.Documentation.optional()._as('documentation'))\n\tg.group('TypeLine', s.TypeSlot)\n\tg.group('TypeCode', s.COMMENT, s.TypeLine)\n\tg.rule('TypeBody', s.Indent, s.TypeCode.zeroOrMore(), s.Dedent)\n\tg.rule('Type', s.CheckIndent, g.aword('@type'), s.TypeReference._as('name'), g.arule(s.COLON, s.TypeValue).optional()._as('value'), s.EOL, s.Documentation.optional()._as('documentation'), s.TypeBody.optional())", "def parse(args, query):\n\n global query_type\n\n # Deal first with requests for definition or pronunciation\n # 1. Make the code easier to read\n first_word = args[0]\n second_word = args[1] if len(args) > 1 else \"\"\n third_word = args[2] if len(args) > 2 else \"\"\n fourth_word = args[3] if len(args) > 3 else \"\"\n # we use the teranary operator (this if ____ else that) to avoid an IndexError\n # IndexError would be raised if we tried to access the second element (args[1])\n # in a list which contained only one item (eg args == [\"lonely\"])\n # the teranary operator (in most languages it looks like \"____ ? this : that\")\n # returns \"this\" when the if is true and \"that\" when the if is false\n # meaning, if len(args) is NOT greater than 1, second_word == \"\"\n\n # 2. Check for keywords in the list of arguments\n # Example: nostrum defined\n # Example: pronunciation of otolaryngology\n if first_word == \"define\":\n # e.g. if the first word is \"define\" we'll add the second word to the query\n query = {\"sp\": second_word, \"md\": \"d\", \"max\": \"1\", \"qe\": \"sp\", \"ipa\": \"1\"}\n # the query is a dictionary of GET parameters for the http request, eg\n # https://api.datamuse.com/words?max=1&sp=SECOND_WORD_HERE&qe=sp&md=d&ipa=1\n elif second_word == \"defined\" or second_word == \"definition\":\n query = {\"sp\": first_word, \"md\": \"d\", \"max\": \"1\", \"qe\": \"sp\", \"ipa\": \"1\"}\n # this one uses string interpolation (the f\"\" stuff)\n elif f\"{second_word} {third_word}\" == \"means what\":\n query = {\"sp\": first_word, \"md\": \"d\", \"max\": \"1\", \"qe\": \"sp\", \"ipa\": \"1\"}\n elif f\"{second_word} {third_word} {fourth_word}\" == \"is said how\":\n query = {\"sp\": first_word, \"md\": \"r\", \"max\": \"1\", \"qe\": \"sp\", \"ipa\": \"1\"}\n # this one uses regular expressions -- i.e. if the second_word is \"of\" or \"for\"\n elif first_word == \"definition\" and re.match(r'(of)|(for)',second_word):\n query = {\"sp\": third_word, \"md\": \"d\", \"max\": \"1\", \"qe\": \"sp\", \"ipa\": \"1\"}\n # the is_pronounced function returns true if first_word is a (mis)spelling of pronounced\n elif re.match(r'(of)|(for)',second_word) and is_pronounced(first_word):\n query = {\"sp\": third_word, \"md\": \"r\", \"max\": \"1\", \"qe\": \"sp\", \"ipa\": \"1\"}\n # the ordering in the above list is not entirely random\n # since an if-elif-else statement won't keep evaluating after it finds a match\n # it makes sense to put the most computationally complex clauses at the end\n # >>> import timeit\n # >>> timeit.timeit('from word_helpers import is_pronounced; is_pronounced(\"pronounced\")', number=10000)\n # 0.022870146989589557\n # >>> timeit.timeit('args = [\"defined\"]; args[0] == \"defined\"', number=10000)\n # 0.002359684993280098\n # it takes 2 milliseconds to compare a string in a list 10,000 times\n # -- versus 2 centiseconds to run is_pronounced 10,000 times\n # (on my Intel Core i5 2.67GHz CPU -- obviously speed depends on the processor)\n # it's also worth noting that readability counts more than speed optimization (most of the time!)\n\n # Quick way to check if any of the above if statements matched\n if \"sp\" in query:\n # if so, we are done in this function\n if query[\"md\"] == \"r\": query_type = \"PRO\"\n if query[\"md\"] == \"d\": query_type = \"DEF\"\n return query\n\n # these will be useful later\n STOP_WORDS = (\"and\", \"meaning\", \"means\", \"max\", \"about\", \"which\", \"that\")\n\n # Parse more complicated requests for synonyms, etc\n # 0 is false in python, so this loop will run until we've removed all the args\n while len(args):\n # we must reset these vars each time the loop starts\n # in case we've deleted items from the args list\n first_word = args[0]\n second_word = args[1] if len(args) > 1 else \"\"\n third_word = args[2] if len(args) > 2 else \"\"\n # we use the teranary operator (this if ____ else that) to avoid an IndexError\n # IndexError would be raised if we tried to access the second element (args[1])\n # in a list which contained only one item (eg args == [\"lonely\"])\n # the teranary operator (in most languages it looks like \"____ ? this : that\")\n # returns \"this\" when the if is true and \"that\" when the if is false\n # meaning, if len(args) is NOT greater than 1, second_word == \"\"\n\n # Disambiguate homonym requests from spelling correction requests\n # Example: sounding like tung\n # Example: sounds like doe but spelled differently\n if re.match(r'sound((s)|(ing)) like',f\"{first_word} {second_word}\"):\n\n # again, use len(args) to avoid an IndexError\n if len(args) >= 6 and \\\n re.match(r'((but)|(except)) spelled different(ly)?',f\"{args[3]} {args[4]} {args[5]}\"):\n # but instead of teranary operator,\n # use \"short circuit logic\" -- when python sees \"if __A__ and __B__ \",\n # it knows that if A is false, the whole thing will be false\n # (you can't have \"ice cream and potatoes\" for dinner if you don't have ice cream)\n # and it won't waste time evaluating B, so re.match won't run and args[4]\n # won't be accessed and no IndexError will be raised, yay!\n # regex explained: ? means the prior thing matched zero or one times\n # different(ly)? matches \"different\" and \"differently\"\n query[\"rel_hom\"] = third_word\n # now, delete 6 items from args, starting at item 0\n del args[0:6]\n else:\n query[\"sl\"] = third_word\n del args[0:3]\n\n # Example: spelled like 'cens?r'\n elif re.match(r'spell((ed)|(ing)) like',f\"{first_word} {second_word}\"):\n # two stars (**) means \"unpack\" a dictionary\n # just like unpacking a suitcase, we've dumped the old contents of query\n # into a new dictionary (which we are saving with the same variable name!)\n query = {**query,\"sp\": third_word}\n # query[\"sp\"] = third_word also works fine\n # just showing off how to combine two dictionaries :)\n del args[0:3]\n\n # Example: rhymes with culminate\n elif len(args) > 2 and second_word == \"with\" and is_rhymes(first_word):\n query[\"rel_rhy\"] = third_word\n del args[0:3]\n\n # Example: almost rhymes with culminate\n elif len(args) > 3 and \\\n f\"{first_word} {third_word}\" == \"almost with\" and \\\n is_rhymes(second_word):\n query[\"rel_nry\"] = args[3] # fourth_word\n del args[0:4]\n\n # Example: comes after sea\n elif f\"{first_word} {second_word}\" == \"comes after\":\n query[\"lc\"] = third_word\n del args[0:3]\n elif first_word == \"follows\":\n query[\"lc\"] = second_word\n del args[0:2]\n elif f\"{first_word} {second_word}\" == \"comes before\":\n query[\"rc\"] = third_word\n del args[0:3]\n elif first_word == \"preceeds\":\n query[\"rc\"] = second_word\n del args[0:2]\n\n # Example: describes paint\n elif first_word == \"describes\":\n query[\"rel_jjb\"] = second_word\n del args[0:2]\n\n # Example: associated with feet\n elif f\"{first_word} {second_word}\" == \"associated with\" or \\\n f\"{first_word} {second_word}\" == \"triggered by\":\n query[\"rel_trg\"] = third_word\n del args[0:3]\n\n # Example: meaning feeling tired\n elif first_word in [\"means\",\"meaning\",\"like\"]:\n # get rid of first_word\n del args[0]\n # now short circuit logic again, plus using the tuple from ealier\n # b/c if we have \"meaning deer and sounds like roe\" we don't want\n # query[\"ml\"] == \"deer and sounds like roe\" -- it should be just \"deer\"\n while len(args) and args[0] not in STOP_WORDS:\n # teranary operator prevents KeyError if \"ml\" not already in query dictionary\n query[\"ml\"] = f\"{query['ml']} {args[0]}\" if \"ml\" in query else args[0]\n del args[0]\n # an example with the previous code to make things clearer\n # say args == [\"means\", \"egg\", \"beater\", \"and\", \"max\", \"35\"]\n # first_word IS in [\"means\",\"meaning\",\"like\"]\n # del first_word, args is now [\"egg\", \"beater\", \"and\", \"max\", \"35\"]\n # len(args) == 5, args[0] is NOT in STOP_WORDS\n # \"ml\" is NOT in query, so teranary returns args[0] (\"egg\")\n # args[0] is copied to query[\"ml\"] (query is now {ml: \"egg\"})\n # del args[0], args is now [\"beater\", \"and\", \"max\", \"35\"]\n # return to top of while loop, len(args) == 4, args[0] is NOT in STOP_WORDS\n # \"ml\" IS in query, so teranary returns f\"{query['ml']} {args[0]}\" (\"egg beater\") \n # f\"{query['ml']} {args[0]}\" is copied to query[\"ml\"]\n # (query is now {ml: \"egg beater\"})\n # del args[0], args is now [\"and\", \"max\", \"35\"]\n # return to top of while loop, len(args) == 3,\n # args[0] IS in STOP_WORDS (args[0] == \"and\")\n # DO NOT enter the while loop, continue past this code block\n\n # Discover the topic of our query\n elif first_word == \"about\":\n del args[0]\n count = 0\n # Datamuse allows a max of five topic words\n while len(args) and args[0] not in STOP_WORDS and count <= 5:\n query[\"topics\"] = f\"{query['topics']} {args[0]}\" if \"topics\" in query else args[0]\n del args[0]\n # count += 1 is the same as count = count + 1\n count += 1\n\n # How many results to return (max 1000)\n elif first_word in [\"max\", \"maximum\", \"only\"]:\n user_max = convert_num(second_word)\n if user_max and int(user_max) <= 1000:\n query[\"max\"] = user_max\n del args[0:2]\n\n # Remove filler words if they weren't parsed out above\n elif first_word in [\"that\",\"which\",\"and\",\"like\",\"is\"]:\n del args[0]\n\n # Add anything not otherwise parsable to the ml parameter\n else:\n query[\"ml\"] = f\"{query['ml']} {first_word}\" if \"ml\" in query else first_word\n del args[0]\n\n # this is the bottom of that massive while loop\n # if args is not empty by now, we'll start over from the top ^\n\n return query\n # and this is the end of the \"def parse(args, query)\" function\n # whew!", "def cmd_type(args):", "def parser(line):\n # Remove comment and whitespace\n line = re.sub(r'//.*', '' , line) # remove comment\n line = line.strip() # remove whitespace\n\n # Parse A instruction, return int or string\n if line.find('@') == 0:\n try:\n parsed = int(line[1:])\n flag = \"A_DECIMAL\"\n except:\n parsed = line[1:]\n flag = \"A_INSTRUCTION\"\n\n elif line.startswith(\"(\") and line.endswith(\")\"):\n parsed = line[1:-1]\n flag = \"GOTO_INSTRUCTION\"\n else:\n # Parse C instruction, return tuple\n if line.find(';') != -1:\n comp, jump = line.split(';') # comp ; jump\n dest = \"null\"\n if comp.find('=') != -1:\n dest, comp = comp.split('=') # dest = comp ; jump\n parsed = comp, dest, jump\n flag = \"C_INSTRUCTION\"\n\n elif line.find('=') != -1:\n dest, comp = line.split('=') # dest = comp\n jump = \"null\"\n parsed = comp, dest, jump\n flag = \"C_INSTRUCTION\"\n else:\n parsed = None\n flag = None\n\n return parsed, flag", "def parse(token):\n\n pass", "def _parse_arguments():\n parser = get_base_arguments(get_parser())\n parser = get_tc_arguments(parser)\n args, unknown = parser.parse_known_args()\n return args, unknown", "def parse_in_argument_lines(\n argument_line: str, file_import: Optional[Any] = None\n ) -> Tuple[Optional[InputArgument], Any]:\n regex_args_with_type = r\"^(?: *|\\t)(?P<name>\\*{0,4}(\\w+|\\w+\\s|\\w+\\.\\w+\\s)\\((?P<type>.*)\\)):(?P<desc>(\\s|\\S)*)\"\n argument_sections = re.findall(\n regex_args_with_type, argument_line, re.MULTILINE\n )\n if len(argument_sections) < 1:\n regex_args_no_type = r\"^(?: *|\\t)(?P<name>)(\\w+|\\w+\\s|\\w+\\.\\w+\\s|\\w+\\.\\w+):(?P<desc>(\\s|\\S)*)\"\n argument_sections = re.findall(\n regex_args_no_type, argument_line, re.MULTILINE\n )\n if len(argument_sections) < 1:\n return None, None\n else:\n name = argument_sections[0][1].strip()\n description = argument_sections[0][2].strip()\n return InputArgument(name=name, description=description), None\n else:\n name = argument_sections[0][1].strip()\n description = argument_sections[0][3].strip()\n input_type_str = argument_sections[0][2]\n try:\n if file_import and input_type_str in dir(file_import):\n input_type = file_import.__getattribute__(input_type_str)\n else:\n input_type = eval(input_type_str)\n except Exception as err:\n logger.debug(\n f\"[yellow]Problems parsing input type {input_type_str}, setting isArray=False.\"\n f\"Error was: {err}[/yellow]\"\n )\n input_type = None\n\n return InputArgument(name=name, description=description), input_type", "def _parse_name_type_pairs(self, array, types):\n pred_list = []\n if len(array)%3 != 0:\n print(\"Expected predicate to be typed \" + str(array))\n sys.exit()\n for i in range(0, int(len(array)/3)):\n if array[3*i+1] != '-':\n print(\"Expected predicate to be typed\")\n sys.exit()\n if array[3*i+2] in types:\n pred_list.append((array[3*i], array[3*i+2]))\n else:\n print(\"PARSING ERROR {} not in types list\".format(array[3*i+2]))\n print(\"Types list: {}\".format(self.type_list))\n sys.exit()\n return pred_list" ]
[ "0.6011422", "0.5526414", "0.54611135", "0.53997856", "0.5341764", "0.53028625", "0.5287242", "0.52829146", "0.528289", "0.5266029", "0.5262158", "0.52545506", "0.52489644", "0.5247131", "0.52150744", "0.52064526", "0.5188259", "0.51840985", "0.5177085", "0.5135521", "0.51304114", "0.5128835", "0.51240194", "0.5109414", "0.50875187", "0.5054045", "0.5047146", "0.5040012", "0.5038182", "0.50181854" ]
0.7484519
0
Get the default value for a property with name `property_name` on class `type_`
def get_property_default(type_: Type, property_name: str) -> ...: if issubclass_safe(type_, tuple): # namedtuple if hasattr(type_, "_field_defaults"): default = type_._field_defaults.get(property_name, inspect.Parameter.empty) else: default = inspect.Parameter.empty else: # everything else default = getattr(type_, property_name, inspect.Parameter.empty) if callable(default) or isinstance(default, property): default = inspect.Parameter.empty return default
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_property_default_value(property):\n return _get_default_value(get_type_name(property.type),\n property.type.is_simple,\n property.is_iterative,\n property.is_required)", "def get_property_default(self, name, default):\n if (not name in self.properties):\n return default\n return self.properties[name]", "def _get_default_value(type_name, is_simple, is_iterative, is_required):\n # Iterables: convert via pre-defined mappings.\n if is_iterative:\n if is_required:\n return _get_iterative_default_value()\n else:\n return _get_iterative_null_value()\n # Simple types: convert via pre-defined mappings.\n elif is_simple:\n if is_required:\n return _get_simple_default_value(type_name)\n else:\n return _get_simple_null_value(type_name)\n # Complex types: convert via pre-defined mappings.\n else:\n if is_required:\n return _get_complex_default_value(type_name)\n else:\n return _get_complex_null_value(type_name)", "def _get_simple_default_value(simple):\n return _SIMPLE_DEFAULT_VALUES[simple]", "def get(node_key:str, property_name:str, default=None):\r\n node_names = split_node_key(node_key)\r\n node = root\r\n try:\r\n property = node.properties[property_name]\r\n except KeyError:\r\n property = default\r\n for node_name in node_names:\r\n try:\r\n node = node.nodes[node_name]\r\n except KeyError:\r\n break\r\n try:\r\n property = node.properties[property_name]\r\n except KeyError:\r\n pass\r\n return property", "def get(self, key, default=None, type=None):\n if key not in self:\n return default\n value = self[key]\n if type is not None:\n value = type(value)\n return value", "def _get_model_db_property(self, property_name, default_value=None):\n model_db_config = self._resource_config.get(\"shared_resource\").get(\"model_db\")\n return model_db_config.get(property_name, default_value)", "def _default_field_value(field):\n return field.default or ([field.value_cls()] if field.is_list else field.value_cls())", "def get_property(self, key: str, default_value=None) -> Any:\n return_value = default_value\n if \"properties\" in self._node[\"app_data\"]:\n return_value = self._node[\"app_data\"][\"properties\"].get(key, default_value)\n\n return return_value", "def get(self, name, default=UNDEFINED):\n try:\n return self.__getattr__(name)\n except AttributeError:\n return default", "def default_value(self):\n return self.properties.get('DefaultValue', None)", "def getValue(name, default=None):", "def interpret(self, value):\n if value is not PROPERTY_SENTINEL:\n if self.kind is not None:\n # The config system handles type checking for us here.\n self.kind.set_val(value)\n return value\n\n if self.default is not PROPERTY_SENTINEL:\n return self.default\n\n raise ValueError(\n \"No default specified and no value provided for '{}' from {} '{}'\".format(\n self.name, self.__property_type, self.module))", "def get_dynamic_property(vim, mobj, type, property_name):\n properties = get_dynamic_properties(vim, mobj, [property_name], type)\n property_value = None\n if property_name in properties:\n property_value = properties.get(property_name)\n return property_value", "def get(self, key: str, default=None):\n\n if not hasattr(self, key):\n if key in LIMB_KEY_LOOKUP:\n key = LIMB_KEY_LOOKUP[key]\n else:\n raise KeyError('\"{}\" not a valid Property key'.format(key))\n\n out = getattr(self, key)\n return default if out is None else out", "def get_default_value(self):\n pass", "def get_default_value_of_type(self, primitive_type):\n if primitive_type == primitives.FUZZABLE_STRING:\n return 'fuzzstring'\n elif primitive_type == primitives.FUZZABLE_INT:\n return '0'\n elif primitive_type == primitives.FUZZABLE_BOOL:\n return 'false'\n elif primitive_type == primitives.FUZZABLE_OBJECT:\n return '{ \"fuzz\" : false }'\n else:\n logger.raw_network_logging(f'Unknown type {primitive_type} for default')\n return 'null'", "def get_default_value(self, name):\n return self.get_attribute_schema(name).default", "def get(self, name, default=None):\n try:\n return self.__getattribute__(name, default)\n except AttributeError:\n return default", "def _get(self):\n var = self.variables.get(prop_name)\n if var is None:\n result = default() if callable(default) else default\n if objtype:\n self.variables[prop_name] = result\n return result\n if not isinstance(var, prop_type):\n raise ValueError(\"unexpected property type\")\n if objtype:\n return cast(T, var)\n return cast(T, var.value)", "def getvalue(self, name, *default):\n try:\n return self.getattr(name).value\n except KeyError:\n if default:\n return default[0]\n raise", "def get_instance_attr(obj, name, default=None) :\n class_value = get_class_attr(obj, name)\n obj_value = getattr(obj, name, default)\n \n # If there was no instance attribute by that name, getattr will return the\n # class_value if it exists, which we do not want.\n if has_value(class_value) and obj_value is class_value :\n return default\n \n return obj_value", "def _infer_default_value_type(default_value):\n if default_value is Missing:\n return DefaultValue.missing\n elif default_value is Self:\n return DefaultValue.object\n elif isinstance(default_value, TraitListObject):\n return DefaultValue.trait_list_object\n elif isinstance(default_value, TraitDictObject):\n return DefaultValue.trait_dict_object\n elif isinstance(default_value, TraitSetObject):\n return DefaultValue.trait_set_object\n elif isinstance(default_value, list):\n return DefaultValue.list_copy\n elif isinstance(default_value, dict):\n return DefaultValue.dict_copy\n else:\n return DefaultValue.constant", "def __get__(self, instance, owner):\n attr_name = self.get_attr_name(instance)\n return instance.__dict__.get('_%s_typed' % attr_name, self.default)", "def _get_default_column_value(column_type):\n type_schema = {\n 'datetime': None,\n 'big_integer': 0,\n 'integer': 0,\n 'string': ''\n }\n\n if isinstance(column_type, sa_sql.type_api.Variant):\n return _get_default_column_value(column_type.impl)\n\n return type_schema[column_type.__visit_name__]", "def _get_resource_property(self, resource_name, property_name, default_value=None):\n if resource_name == \"ExperimentDb\":\n return self._get_experiment_db_property(property_name, default_value)\n elif resource_name == \"ModelDb\":\n return self._get_model_db_property(property_name, default_value)\n elif resource_name == \"JoinDb\":\n return self._get_join_db_property(property_name, default_value)\n elif resource_name == \"IAMRole\":\n return self._get_iam_role_property(property_name, default_value)\n else:\n return None", "def get_attr_default(self, attr_name):\n for defaults in (self._ATTRIBUTE_DEFAULTS.get(self.field_type, {}),\n self._ATTRIBUTE_DEFAULTS['*']):\n try:\n return defaults[attr_name]\n except KeyError:\n continue\n\n return None", "def get_attr_default(self, attr_name):\n for defaults in (self._ATTRIBUTE_DEFAULTS.get(self.field_type, {}),\n self._ATTRIBUTE_DEFAULTS['*']):\n try:\n return defaults[attr_name]\n except KeyError:\n continue\n\n return None", "def getdefault(self, option, type=str, default=None):\r\n return self.get(Config.DEFAULT_SECTION, option, type, default=default)", "def get_prop(prop, config_type=\"\", config_path=\"\"):\n\n paths = []\n\n if len(config_type):\n paths = [get_config_path(config_type, config_path)]\n else:\n paths = [get_global_config_path()]\n\n user_path = get_user_config_path()\n if os.path.exists(user_path):\n paths.append(user_path)\n\n return get_property_value(prop, paths)" ]
[ "0.8010006", "0.7963241", "0.71551555", "0.6325218", "0.63015527", "0.6284296", "0.6281772", "0.6276523", "0.62745297", "0.6232446", "0.6232106", "0.62256825", "0.6220216", "0.6199206", "0.6176899", "0.6167932", "0.6161065", "0.61468476", "0.6142605", "0.6139256", "0.6119665", "0.60997283", "0.6095409", "0.6085421", "0.6085332", "0.607201", "0.6054589", "0.6054589", "0.6038982", "0.60275257" ]
0.85256344
0
Parse website URL from proxy page
def get_website_url(self, proxy): body = BeautifulSoup( proxy_get_url(proxy) ) refresh_meta = body.find('meta', attrs={'http-equiv': 'refresh'})['content'] return refresh_meta.split('=')[-1].strip()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _parse_source(self, response):\n return response.url", "def _parse_source(self, response) -> str:\n return response.url", "def _parseurl(url):\n tracker1=url\n port=int(re.findall(\"[0-9]+\",tracker1)[0])\n host=re.findall(\"[^0-9]+\",tracker1)[0]\n host=host[:-1]\n host=host[6:]\n return host,port", "def extract_real_link(self, text):\n if text.startswith('https://www.google.com/url?'):\n return parse_qs(urlparse(text).query)['url'][0]\n\n return text", "def proxy_ref_info(request):\n ref = request.headers.get('referer')\n if ref:\n _, _, uri = split_url(ref)\n if uri.find(\"/\") < 0:\n return None\n first, rest = uri.split(\"/\", 1)\n if first in \"pd\":\n parts = rest.split(\"/\", 1)\n r = (parts[0], parts[1]) if len(parts) == 2 else (parts[0], \"\")\n print(\"Referred by proxy host, uri: %s, %s\", r[0], r[1])\n return r\n return None", "def _get_url(self, url, proxies=None):\n scraper = cloudscraper.create_scraper()\n try:\n html_rsp = scraper.get(url, proxies=proxies).text\n if html_rsp is None:\n logging.info('Error in SBScraper._get_url with url %s and proxy %s.', url, proxies)\n logging.info('Web response had NoneType.')\n self.html_response = False\n return\n self.html_response = html_rsp\n return\n # General exception as there are lots of errors with cloudflare. Every exception is handled via return values.\n except Exception as err: # pylint: disable=broad-except\n logging.info('Error in SBScraper._get_url with url %s and proxy %s.', url, proxies)\n logging.info('Error message was: %s', err)\n self.html_response = False\n return", "def parse(self, html):\n \n result =json.loads(html)\n if result['code'] != 0:\n return\n MAX_PAGE = int(result['data']['last_page'])\n hosts_ports = result['data']['data']\n for ip_address in hosts_ports:\n if(ip_address):\n host = ip_address['ip']\n port = ip_address['port']\n yield Proxy(host=host, port=port)", "async def _free_proxy_list_parser(content):\n page = lxml.html.fromstring(content)\n proxies = []\n for row in page.cssselect('tbody tr'):\n tds = row.cssselect('td')\n if 'yes' in tds[6].text_content():\n proxies.append('https://%s:%s' % (tds[0].text_content(), tds[1].text_content()))\n else:\n proxies.append('http://%s:%s' % (tds[0].text_content(), tds[1].text_content()))\n\n return proxies", "def get_random_proxy():\n url=requests.get(proxypool_url).text.strip()\n #logger.info(\"now url is\",url)\n return url", "def parse(html, url, bases): \n\n soup = BeautifulSoup(html, 'lxml')\n htmlBody = soup.find('body').get_text().strip()\n links = [urljoin(url, l.get('href')) for l in soup.findAll('a')]\n links = [l for l in links if urlparse(l).netloc in bases]\n return url, htmlBody, links", "def get_info_of_url(url):\n pass", "def parse(self, url):\n pass", "def parse_urls(record):\n url_list = []\n try:\n page_url = record['WARC-Header-Metadata']['WARC-Target-URI']\n x = urlparse.urlparse(page_url)\n url_list += [(x.netloc, x.path)]\n except:\n pass\n try: \n links = record['Payload-Metadata']['HTTP-Response-Metadata']['HTML-Metadata']['Links']\n for url in links:\n x = urlparse.urlparse(url['url'])\n url_list += [(x.netloc, x.path)]\n except:\n pass\n \n return url_list", "def __parse_urls_from_response(cls, target_str):\n\t\ttree = html.fromstring(target_str)\n\t\turls = tree.xpath('//a[@class=\"layer-link\"]/@href')\n\t\treturn urls", "def getDomain(url):\n domain = string.replace(url,\"https://www.\",\"\")\n domain = string.replace(domain,\"http://www.\",\"\")\n domain = string.replace(domain,\"http://\",\"\")\n domain = string.replace(domain,\".com/\",\"\")\n domain = string.replace(domain,\".com\",\"\")\n return domain", "def follow_redirects(self, url):\n try:\n return requests.get(url).url\n except requests.RequestException:\n return None", "def extract_link(url):\n\theaders = {\"Host\": \"www.zomato.com\",\n\t \"User-Agent\": \"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:50.0) Gecko/20100101 Firefox/50.0\",\n\t \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\",\n\t \"Accept-Language\": \"en-US,en;q=0.5\",\n\t \"Accept-Encoding\": \"gzip, deflate, br\",\n\t \"Referer\": \"https://www.zomato.com/\",\n\t \"Connection\": \"keep-alive\"}\n\n\tif url.startswith('file'):\n\t\twith open(url.replace('file:\\\\\\\\', ''), encoding='utf-8') as fp:\n\t \t\tpage_source = fp.read()\n\n\telse:\n\t\tr = requests.get(url, headers=headers)\n\t\tif r.status_code == 404:\n\t\t\treturn None\n\t\tpage_source = r.text\n\n\tpage_source = re.sub('<br>', '', page_source)\n\tpage_source = re.sub('<br />', '', page_source)\n\tpage_source = re.sub('<br/>', '', page_source)\n\tsoup = BeautifulSoup(page_source, 'html.parser')\n\n\treturn soup", "def parse_start_url(self, response):\n self.parse_obj(response)", "async def _proxy_list_parser_http(content):\n return ['http://%s' % proxy_string for proxy_string in content.split(\"\\r\\n\") if proxy_string]", "def _parsing_url(self, base_url):\n url2 = f\"{self.location}?apikey={self.api_key}&details=true\"\n absolute_url = urljoin(base_url, url2)\n return absolute_url", "def _urlopen(url):\n headers = config.get(\"extra_headers\",{})\n headers['User-Agent'] = config.user_agent\n\n type, host, selector = split_type_host(url)\n\n if type.lower() == \"https\":\n conn = ProxyHTTPSConnection(host, url=url)\n else:\n conn = ProxyHTTPConnection(host, url=url)\n\n conn.request(\"GET\", selector, headers=headers)\n return conn.getresponse()", "def fetch_url(session, url):\n return session.get(url).text", "def url(result):\n return result.entities.get(u'urls')", "def GetUrlFirst(self):\n self.url = \"https://www.taobao.com/\"\n self.host = \"www.taobao.com\"\n self.referer = \"https://www.taobao.com/\"\n content = self.GetContent()\n __clear__ = '<a href=.*?</a>'\n match = open(self.base_dir_url+\"url_first.html\", 'w')\n try:\n all_link = re.findall(__clear__, content, re.S)\n print \"All links of the web page is: \", len(all_link)\n self.DealUrlFirst(match, all_link)\n except:\n print \"Something wrong is happening!\"\n finally:\n match.close()\n match.close()", "def _real_extract(self, url):\n pass", "def processUrl(url):\n domain = 'http://www.gsmarena.com/'\n if domain not in url:\n url = urllib.parse.urljoin(domain, url)\n return url", "def __ParseUrl(url):\n return urlparse(url)", "def scrap_site(link):\n pass # Scrapy or BeautifulSoup", "def proxy_url(self):\n return self.__proxy_url", "def _parse_proxy(proxy):\r\n scheme, r_scheme = _splittype(proxy)\r\n if not r_scheme.startswith(\"/\"):\r\n # authority\r\n scheme = None\r\n authority = proxy\r\n else:\r\n # URL\r\n if not r_scheme.startswith(\"//\"):\r\n raise ValueError(\"proxy URL with no authority: %r\" % proxy)\r\n # We have an authority, so for RFC 3986-compliant URLs (by ss 3.\r\n # and 3.3.), path is empty or starts with '/'\r\n end = r_scheme.find(\"/\", 2)\r\n if end == -1:\r\n end = None\r\n authority = r_scheme[2:end]\r\n userinfo, hostport = _splituser(authority)\r\n if userinfo is not None:\r\n user, password = _splitpasswd(userinfo)\r\n else:\r\n user = password = None\r\n return scheme, user, password, hostport" ]
[ "0.64632994", "0.6277717", "0.6234803", "0.6130124", "0.6128327", "0.6115169", "0.60802186", "0.6072408", "0.59983116", "0.59591407", "0.59323686", "0.5928596", "0.590725", "0.5872435", "0.5863363", "0.58452016", "0.5842432", "0.58215463", "0.5814262", "0.57957", "0.577748", "0.5767904", "0.5766858", "0.57588184", "0.5751538", "0.5750791", "0.5749256", "0.57486784", "0.5731561", "0.5729455" ]
0.65610504
0
Obtiene una lista con todos las categorias existentes en la base de datos
def get_all() -> list: categorias = [] conn = GenericDao.connect() cursor = conn.execute("SELECT * FROM categorias") for row in cursor: categoria = Categoria(row[1], row[0]) categorias.append(categoria) if debug: print(str(categoria)) conn.close() return categorias
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def categories(self):\n pass", "def categories(self):\n game_categories = self.game_categories.all()\n return [ gc.category for gc in game_categories ]", "def categories(self):\n cur = self.con.execute('select category from cc');\n return [d[0] for d in cur]", "def get_used():\r\n sql = text('''\r\n SELECT category.* FROM category, app\r\n WHERE app.category_id=category.id GROUP BY category.id\r\n ''')\r\n results = db.engine.execute(sql)\r\n categories = []\r\n for row in results:\r\n category = dict(id=row.id, name=row.name, short_name=row.short_name,\r\n description=row.description)\r\n categories.append(category)\r\n return categories", "def _get_all_categories() -> dict:\n categories = database.fetchall(\"Categories\", \"id\", \"name_ua\")\n return categories", "def get_all_categorizations():\n try:\n query = run_select_query('SELECT id, category_name, category_item_type, name \\\n FROM categorization \\\n JOIN item ON (categorization.item_id = item.id)')\n return [{'id': obj[0], 'category_name': obj[1], 'category_item_type': obj[2].capitalize(), 'item_name': obj[3]} for obj in query]\n except (Exception, psycopg2.Error) as error:\n return {\"status\": \"error\", \"error\": error}", "def categories(self) -> List[Category]:\n return list(set(self.mapping.values()))", "def categories(self):\n\t\treturn (sorted(self.dictData.keys()))", "def list(self):\n return list(sorted(self.manager.data[\"category\"].keys()))", "def get_list(self):\n categories = []\n for attribut in self.attributes:\n attr = getattr(self, attribut, False)\n if attr is True:\n categories.append(attribut)\n if getattr(self, 'education') is True:\n categories.append(_(u'education'))\n if getattr(self, 'training') is True:\n categories.append(_(u'training'))\n if getattr(self, 'tutoring') is True:\n categories.append(_(u'tutoring'))\n\n return categories", "def Categories():\n cat = {\n \t \"Featured\": 0,\n \t \"All\": 1,\n \t \"Collectibles\": 2,\n \t \"Clothing\": 3,\n \t \"BodyParts\": 4,\n \t \"Gear\": 5,\n \t \"Models\": 6,\n \t \"Plugins\": 7,\n\t \"Decals\": 8,\n \t \"Audio\": 9,\n \t \"Meshes\": 10,\n\t \"Accessories\": 11,\n\t \"AvatarAnimations\": 12,\n\t \"CommunityCreations\": 13,\n\t \"Video\": 14,\n\t \"Recommended\": 15\n }\n return cat", "def list_categories(self):\n raise NotImplementedError()", "def categories(self):\n return self.env.categories", "def get_category_list():\n return Category.objects.filter(active=True)", "def print_categories_list():\n\n categories = []\n for item in data:\n cat = item[\"category\"]\n\n if cat not in categories:\n categories.append(cat)\n\n print(categories) # print the list", "def getCategories(self):\n return self.categories.keys()", "def getCategories(self):\r\n return self.categories", "def categories(self):\n\t\treturn self._categories", "def categories(self):\n return self._data[\"categories\"]", "def equipos_categoria(self):\n equipos = dict()\n qs = self.equipo_set.all()\n for equipo in qs:\n nombre = equipo.modelo.categoria.nombre\n if nombre in equipos:\n equipos[nombre].append(equipo)\n else:\n equipos[nombre] = [equipo]\n return equipos", "def get_categories(self):\n categories = self.session.query(Category).all()\n return categories", "def serialize_categories(md: Metadata) -> list:\n categories = []\n all_cat = md.categories.all()\n for cat in all_cat:\n category = OrderedDict()\n\n category[\"id\"] = cat.id\n category[\"type\"] = cat.type\n category[\"title_EN\"] = cat.title_EN\n category[\"description_EN\"] = cat.description_EN\n category[\"title_locale_1\"] = cat.title_locale_1\n category[\"description_locale_1\"] = cat.description_locale_1\n category[\"title_locale_2\"] = cat.title_locale_2\n category[\"description_locale_2\"] = cat.description_locale_2\n category[\"symbol\"] = cat.symbol\n category[\"online_link\"] = cat.online_link\n\n categories.append(category)\n\n return categories", "def categories(self):\n return self.__categories", "def category_list():\n categories = Category.objects.filter(active=True)\n return {'categories': categories}", "def get_categories():\n return session.query(Category)", "def getAllCategories(self):\n return self.categories", "def get_categories():\n categories = app.preprocessed.uniq_categs\n result = {\n 'success': True,\n 'data': {\n 'categories': categories\n }\n }\n return jsonify(result)", "def categories(self):\n return self._categories", "def Categories(self):\r\n return self._categories", "def search_categories(self):\n with Transaction().start(DBNAME, 1):\n categorieslist = self.Category.search(['parent', '=', 'Ingredients'])\n return tuple(i.name for i in categorieslist)" ]
[ "0.7249249", "0.7223673", "0.7196713", "0.7126999", "0.7086236", "0.69820863", "0.6896232", "0.6895573", "0.6892413", "0.6884656", "0.68071264", "0.6762062", "0.6733714", "0.6714556", "0.6665258", "0.6662005", "0.6654026", "0.6646227", "0.66089505", "0.6584195", "0.6580966", "0.6574989", "0.656291", "0.6559897", "0.6530247", "0.64809364", "0.64619935", "0.6451316", "0.6451079", "0.64473987" ]
0.77978975
0
Busca 1 categoria en la base de datos proporcionando el id
def get_id(idd: int) -> Categoria: conn = GenericDao.connect() cursor = conn.execute('SELECT * FROM categorias where categoria_id = ?', (str(idd),)) row = cursor.fetchone() categoria = Categoria(row[1], row[0]) if debug: print(str(categoria)) conn.close() return categoria
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_id_categorie_in_database(self, db):\n\n try:\n select_query = \"SELECT id_categorie FROM categorie WHERE categorie_name='\"+self.categorie_name+\"';\"\n result = db.query(select_query)\n self.id_categorie = result[0][\"id_categorie\"]\n\n\n except IntegrityError as int_err:\n print(\"There was an integrity error while selecting id categorie\")\n print(int_err)\n\n except ProgrammingError as prg_err:\n print(\"There was a programming error while selecting id categorie\")\n print(prg_err)", "def get_id_nombre(categoria_nombre: str) -> str:\n conn = GenericDao.connect()\n cursor = conn.execute('SELECT categoria_id FROM categorias where categoria_nombre=?', (categoria_nombre,))\n row = cursor.fetchone()\n id_categoria = row[0]\n if debug:\n print(str(id_categoria))\n\n conn.close()\n return id_categoria", "def get_Category(id):\n category= Category.objects.get(id=id)\n return category", "def getCategory():", "def select_categories_database(self):\n # connection to the database\n self.cursor = self.data_base.cursor(MySQLCursorPrepared)\n self.cursor.execute(\"USE Purbeurre\")\n self.cursor.execute(\"SELECT id, categories FROM Category ORDER BY id\")\n id_name_categories = self.cursor.fetchall()\n id_name_categories = self.new_orm.transform_categories_to_object(id_name_categories)\n return id_name_categories", "def categories_choice(self):\n\n self.cursor.execute(\"\"\" SELECT id, name\n FROM category\n ORDER BY id LIMIT 5 OFFSET 0\"\"\")\n rows = self.cursor.fetchall()\n print(\"Choisissez votre catégorie :\")\n possible_choice = []\n while True:\n try:\n for row in rows:\n possible_choice.append(row[0])\n print(row[0], row[1])\n choice = int(input(\"Entrez votre choix: \\n\"))\n if choice in possible_choice:\n break\n except ValueError:\n continue\n\n return choice", "def get_all() -> list:\n categorias = []\n conn = GenericDao.connect()\n cursor = conn.execute(\"SELECT * FROM categorias\")\n for row in cursor:\n categoria = Categoria(row[1], row[0])\n categorias.append(categoria)\n if debug:\n print(str(categoria))\n\n conn.close()\n return categorias", "def buscar_categorias(id):\n categorias = buscar_categorias_produtos(id)\n categorias = [c.json() for c in categorias]\n\n return jsonify(categorias)", "def fill_category(self):\n cursor = self.conn.cursor(cursor_factory=psycopg2.extras.DictCursor)\n categories = dict()\n result = requests.get('https://fr.openfoodfacts.org/categories.json').json()\n for element in result['tags']:\n try:\n cursor.execute(\"INSERT INTO category (tag, name, url) VALUES (%s, %s, %s) RETURNING id, tag\",\n (element[\"id\"], element[\"name\"], element[\"url\"]))\n query_result = cursor.fetchone()\n categories.__setitem__(query_result[1], query_result[0])\n except self.conn.OperationalError:\n print(\"operation Error\")\n except self.conn.DataError:\n print(\"Data Error\")\n self.conn.commit()\n cursor.close()\n return categories", "def get_category_id(self):\n return self.cleaned_data['category_id']", "def food_choice(self, category_id):\n\n self.cursor.execute(\"\"\" SELECT food.id, food.name\n FROM food\n INNER JOIN category_food\n ON food.id = category_food.food_id\n WHERE category_food.category_id = %s && nutriscore > 'b'\n ORDER BY id LIMIT 8 OFFSET 0\"\"\", category_id)\n rows = self.cursor.fetchall()\n print(\"Choisissez votre aliment :\")\n possible_choice = []\n while True:\n try:\n for row in rows:\n possible_choice.append(row[0])\n print(row[0], row[1])\n choice = int(input(\"Entrez votre choix: \\n\"))\n if choice in possible_choice:\n break\n except ValueError:\n continue\n\n return choice", "def getcategory(self):\n\n response = requests.get(\"https://fr.openfoodfacts.org/categories.json\")\n\n data = response.json()\n\n self.rawcategorydata = data", "def insert(categoria: Categoria) -> int:\n conn = GenericDao.connect()\n cursor = conn.cursor()\n\n sql = 'INSERT INTO categorias(categoria_id) VALUES (?)'\n values = (int(categoria.categoria_id),\n categoria.categoria_nombre)\n cursor.execute(sql, values)\n conn.commit()\n conn.close()\n categoria.idd = cursor.lastrowid\n if debug:\n print(\"Categoria insertada: \" + str(categoria))\n return categoria.idd", "def get_category(id):\n\n category = get_db().execute('SELECT * FROM categories WHERE id = ?', (id, )).fetchone()\n\n if not category:\n abort(404, \"category id {0} does not exist.\".format(id))\n\n return category", "def search_categorie(input) :\n j = _jpdb()\n _input = _process_search_input(input)\n if not _input : return None\n f = j.base_format\n q = Query().select(f.categories, f.categories.id, f.categories.name)\n q.where().equal(f.categories.name, _input)\n categorie_data = j.executeQuery(q)\n\n if categorie_data: \n cat_id, cat_name = categorie_data[0]\n examples = _create_examples(j.list_word_by_categorie, cat_name)\n return SelectorResult('categorie', cat_id, cat_name, *examples)", "def __get_category_from_id(self, id, resp):\n # Getting ID and position of category in relation to response\n _id_list = [_.get('id') for _ in resp]\n idx = _id_list.index(id)\n return resp[idx]", "def select_foods_database(self, user_answer_id_category):\n self.cursor = self.data_base.cursor(MySQLCursorPrepared)\n self.cursor.execute(\"\"\"SELECT id, name_food\n FROM Food\n WHERE category_id = {}\"\"\".format(user_answer_id_category))\n id_name_food = self.cursor.fetchall()\n id_name_food = self.new_orm.transform_foods_to_object(id_name_food)\n return id_name_food", "def read_one(id):\r\n # Build the initial query\r\n category = Category.query.filter(Category.id == id).one_or_none()\r\n\r\n if category is not None:\r\n # Serialize the data for the response\r\n category_schema = CategorySchema()\r\n data = category_schema.dump(category)\r\n return data\r\n # Otherwise, nope, didn't find that category\r\n else:\r\n abort(404, f\"Category not found for id: {id}\")", "def __str__(self):\n \n return \"Category ID: %s %s\" % (self.category_id, self.name)", "async def get_category(cls, session: AsyncSession, id: int) -> Optional[Category]:\n\n stmt = select(Category).where(Category.id == id)\n result = await session.execute(stmt)\n return result.scalars().first()", "def get_category_name(self, selected_option):\r\n try:\r\n conn = self.create_connection()\r\n query = \"\"\"SELECT distinct category\r\n\t\t\t\t\t FROM categories\r\n\t\t\t\t\t WHERE sub_category = '%s'\"\"\"%(selected_option)\r\n equipment = pd.read_sql(query, conn).iloc[0, 0]\r\n conn.close()\r\n except (psycopg2.Error, ValueError):\r\n print(\"Error at get_category_name, check connection or query\")\r\n return equipment", "def get_id(category, name):\n item = Ingredients.query.filter_by(category=category).filter_by(name=name).first()\n if(item):\n return item.id \n else:\n return None", "def __str__(self):\n return self.category_name", "def category(self):\n return self._ctx.get(\"name\", self._ctx[\"id\"])", "def get_targeted_category(self):\n\n db.execute(\"\"\"\n SELECT Category.id,\n (\n SELECT COUNT(Product_per_category.product_id)\n FROM Category AS category_duplicate\n INNER JOIN Product_per_category\n ON Product_per_category.category_id = category_duplicate.id\n WHERE Category.id = category_duplicate.id\n ) AS products_count\n FROM Product\n INNER JOIN Product_per_category\n ON Product.id = Product_per_category.product_id\n INNER JOIN Category\n ON Category.id = Product_per_category.category_id\n WHERE Product.id = %s\n ORDER BY products_count\n \"\"\", (self.id,))\n try:\n self.category_id = db.fetch(True)[self.category_concordance][0]\n except IndexError:\n return", "def get_categories(self, categories):\r\n category, created = Categories.objects.get_or_create(name=categories)\r\n category.save()", "def get_categoria_cmd(categoria_id):\n return NodeSearch(categoria_id)", "def set_category(dbtype):\n # define object and object lots\n obj = eval(dbtype.capitalize())\n items = eval(dbtype.capitalize()+'Lot')\n\n # query the object items and object lots items\n cat = session.query(obj).order_by(obj.name).all()\n lots = session.query(items).all()\n\n # create a dict to associate object id with its respective object lot items\n lot_dict = {}\n for x in range(1, session.query(obj).count()+1):\n lot_dict[x] = (session.query(items)\n .filter(getattr(items, dbtype+'_id') == x)\n .order_by(items.date).all())\n return (cat, lot_dict, lots)", "def categories(data):\n if data:\n for i in data:\n category = CategoriesModel(categories=i['categories'])\n category.save()", "def test_view_category_by_id(self):\n rv = self.client().post('/categories/', data=self.category)\n self.assertEqual(rv.status_code, 201)\n result_in_json = json.loads(rv.data.decode('utf-8').replace(\"'\", \"\\\"\"))\n result = self.client().get(\n '/categories/{}'.format(result_in_json['category_name']))\n self.assertEqual(result.status_code, 200)\n self.assertIn('Stews', str(result.data))" ]
[ "0.7121691", "0.6554141", "0.6497267", "0.6486848", "0.604737", "0.599137", "0.5966648", "0.59002024", "0.58759624", "0.58522", "0.5840264", "0.58314836", "0.58063036", "0.5780173", "0.5775938", "0.56541747", "0.56518245", "0.56508154", "0.5645675", "0.5632842", "0.56258434", "0.5597908", "0.5588339", "0.55750036", "0.5526884", "0.55090827", "0.5508984", "0.54887205", "0.54724205", "0.5460079" ]
0.7208243
0
Busca el id de 1 categoria en la base de datos proporcionando el nombre
def get_id_nombre(categoria_nombre: str) -> str: conn = GenericDao.connect() cursor = conn.execute('SELECT categoria_id FROM categorias where categoria_nombre=?', (categoria_nombre,)) row = cursor.fetchone() id_categoria = row[0] if debug: print(str(id_categoria)) conn.close() return id_categoria
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_id(idd: int) -> Categoria:\n conn = GenericDao.connect()\n cursor = conn.execute('SELECT * FROM categorias where categoria_id = ?', (str(idd),))\n row = cursor.fetchone()\n categoria = Categoria(row[1], row[0])\n if debug:\n print(str(categoria))\n\n conn.close()\n return categoria", "def find_id_categorie_in_database(self, db):\n\n try:\n select_query = \"SELECT id_categorie FROM categorie WHERE categorie_name='\"+self.categorie_name+\"';\"\n result = db.query(select_query)\n self.id_categorie = result[0][\"id_categorie\"]\n\n\n except IntegrityError as int_err:\n print(\"There was an integrity error while selecting id categorie\")\n print(int_err)\n\n except ProgrammingError as prg_err:\n print(\"There was a programming error while selecting id categorie\")\n print(prg_err)", "def getCategory():", "def category(self):\n return self._ctx.get(\"name\", self._ctx[\"id\"])", "def get_category_name(self, selected_option):\r\n try:\r\n conn = self.create_connection()\r\n query = \"\"\"SELECT distinct category\r\n\t\t\t\t\t FROM categories\r\n\t\t\t\t\t WHERE sub_category = '%s'\"\"\"%(selected_option)\r\n equipment = pd.read_sql(query, conn).iloc[0, 0]\r\n conn.close()\r\n except (psycopg2.Error, ValueError):\r\n print(\"Error at get_category_name, check connection or query\")\r\n return equipment", "def get_Category(id):\n category= Category.objects.get(id=id)\n return category", "def __str__(self):\n return self.category_name", "def __str__(self):\n \n return \"Category ID: %s %s\" % (self.category_id, self.name)", "def select_categories_database(self):\n # connection to the database\n self.cursor = self.data_base.cursor(MySQLCursorPrepared)\n self.cursor.execute(\"USE Purbeurre\")\n self.cursor.execute(\"SELECT id, categories FROM Category ORDER BY id\")\n id_name_categories = self.cursor.fetchall()\n id_name_categories = self.new_orm.transform_categories_to_object(id_name_categories)\n return id_name_categories", "def __str__(self):\n return self.cat_name", "def get_category_name(self):\n return self._ctx.get(\"name\", self._ctx[\"id\"])", "def return_cat_name(json_coco, category):\n for cat in json_coco['categories']:\n if cat['id'] == category:\n return cat['name']\n print(\"Categoria não encontrada: \", category)\n sys.exit()", "def get_id_by_name(self, name):\n\n for category in self.category_list:\n if name == category.name:\n return category.id\n response = self.client._get(f\"/site.json\")[\"categories\"]\n for dic in response:\n if dic[\"name\"] == name:\n return dic[\"id\"]\n raise ValueError(\"no Category with given name has been found\")", "def get_id(category, name):\n item = Ingredients.query.filter_by(category=category).filter_by(name=name).first()\n if(item):\n return item.id \n else:\n return None", "def search_categorie(input) :\n j = _jpdb()\n _input = _process_search_input(input)\n if not _input : return None\n f = j.base_format\n q = Query().select(f.categories, f.categories.id, f.categories.name)\n q.where().equal(f.categories.name, _input)\n categorie_data = j.executeQuery(q)\n\n if categorie_data: \n cat_id, cat_name = categorie_data[0]\n examples = _create_examples(j.list_word_by_categorie, cat_name)\n return SelectorResult('categorie', cat_id, cat_name, *examples)", "def get_name(self):\n return self.category_name", "def categories_choice(self):\n\n self.cursor.execute(\"\"\" SELECT id, name\n FROM category\n ORDER BY id LIMIT 5 OFFSET 0\"\"\")\n rows = self.cursor.fetchall()\n print(\"Choisissez votre catégorie :\")\n possible_choice = []\n while True:\n try:\n for row in rows:\n possible_choice.append(row[0])\n print(row[0], row[1])\n choice = int(input(\"Entrez votre choix: \\n\"))\n if choice in possible_choice:\n break\n except ValueError:\n continue\n\n return choice", "def select_foods_database(self, user_answer_id_category):\n self.cursor = self.data_base.cursor(MySQLCursorPrepared)\n self.cursor.execute(\"\"\"SELECT id, name_food\n FROM Food\n WHERE category_id = {}\"\"\".format(user_answer_id_category))\n id_name_food = self.cursor.fetchall()\n id_name_food = self.new_orm.transform_foods_to_object(id_name_food)\n return id_name_food", "def name(self) -> str:\n return str(self.category.value)", "def get_category_id(self):\n return self.cleaned_data['category_id']", "def get_name(self, id):\n\t\treturn self.name_by_index[id]", "def get_category(id):\n\n category = get_db().execute('SELECT * FROM categories WHERE id = ?', (id, )).fetchone()\n\n if not category:\n abort(404, \"category id {0} does not exist.\".format(id))\n\n return category", "def category_title(self):\n categories = {c[0]:c[1] for c in self.CATEGORY_CHOICES}\n if self.category in categories:\n return categories[self.category]", "def PrimaryCategory(self, default=None):\n return self.data.get('categories', [default])[0]", "def category_name(self):\n return self.category.name", "def get_categoria_cmd(categoria_id):\n return NodeSearch(categoria_id)", "def food_choice(self, category_id):\n\n self.cursor.execute(\"\"\" SELECT food.id, food.name\n FROM food\n INNER JOIN category_food\n ON food.id = category_food.food_id\n WHERE category_food.category_id = %s && nutriscore > 'b'\n ORDER BY id LIMIT 8 OFFSET 0\"\"\", category_id)\n rows = self.cursor.fetchall()\n print(\"Choisissez votre aliment :\")\n possible_choice = []\n while True:\n try:\n for row in rows:\n possible_choice.append(row[0])\n print(row[0], row[1])\n choice = int(input(\"Entrez votre choix: \\n\"))\n if choice in possible_choice:\n break\n except ValueError:\n continue\n\n return choice", "def get_by_name(self, name):\n category = Category.query.filter_by(name=name).first()\n\n return category", "def category(self) -> str:\n return pulumi.get(self, \"category\")", "def category(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"category\")" ]
[ "0.68540025", "0.6634235", "0.63437605", "0.6210306", "0.6125233", "0.6113372", "0.61019367", "0.6087021", "0.598077", "0.59659487", "0.5924591", "0.5912172", "0.5867398", "0.5840604", "0.57927936", "0.57337314", "0.5698949", "0.5683739", "0.5681899", "0.5614703", "0.56006783", "0.5559367", "0.5541696", "0.5533214", "0.55210197", "0.55101264", "0.5464586", "0.54435396", "0.54371536", "0.542767" ]
0.7045538
0
Get the network object from a given address and netmask
def get_network(address: str, netmask: str) -> IPv4Network: net = IPv4Network(f"{address}/{netmask}", strict=False) return net
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dotted_netmask(mask):\n mask = int(mask)\n bits = 0xffffffff ^ (1 << 32 - mask) - 1\n return socket.inet_ntoa(struct.pack('>I', bits))", "def get_net_obj(host, object_type, name, refresh=False):\n objs = get_net_objs(host=host, object_type=object_type, refresh=refresh)\n obj_name = name.lower()\n if objs is not None:\n for obj in objs:\n if object_type == \"portgroup\" or object_type == \"proxyswitch\":\n if obj.spec.name.lower() == obj_name:\n return obj\n elif object_type == \"pnic\" or object_type == \"vnic\":\n if obj.device.lower() == obj_name:\n return obj\n elif obj.name.lower() == obj_name:\n return obj\n return None", "def getnetwork(ipaddr):\n return '192.168.1.0/24'", "def get_net_item(host, object_type, name):\n if name:\n return get_net_obj(host, object_type, name)\n else:\n return get_net_objs(host, object_type)[0]", "def IpNetwork(address, version=None):\n\n if version:\n if version == 4:\n return Ipv4Network(address)\n elif version == 6:\n return Ipv6Network(address)\n\n try:\n return Ipv4Network(address)\n except (ValueError):\n pass\n\n try:\n return Ipv6Network(address)\n except (ValueError):\n pass\n\n raise ValueError('%r does not appear to be an IPv4 or IPv6 network' % address)", "def filter_netmask(prefix):\n try:\n prefix_str = unicode(prefix)\n except NameError as ex:\n prefix_str = str(prefix)\n return IPv4Network(\"1.0.0.0/\"+prefix_str).netmask", "def __init__(self, address, netmask=None):\n\n if netmask:\n ip = Ipv4Address(address)\n address = \"%s/%s\" % (ip,netmask)\n\n google.ipaddr.IPv4Network.__init__(self, address, strict=False)", "def add_network(self, addr, netmask):\n\n if len(addr) == 4:\n return ipset.ipset_ipv4_add_network(self.set, addr, netmask)\n\n elif len(addr) == 16:\n return ipset.ipset_ipv6_add_network(self.set, addr, netmask)\n\n else:\n raise ValueError(\"Invalid address\")", "def get_address_without_netmask(device, interface, address_family,\r\n return_all=False):\r\n ip_addr_with_mask = get_interface_ip_address(\r\n device=device,\r\n interface=interface, \r\n address_family=address_family)\r\n\r\n if ip_addr_with_mask:\r\n return ip_addr_with_mask.split('/')[0]\r\n\r\n return None", "def __init__(self, address, netmask=None):\n\n if netmask:\n ip = Ipv6Address(address)\n address = \"%s/%s\" % (ip,netmask)\n\n google.ipaddr.IPv6Network.__init__(self, address, strict=False)", "def network(self):\n address = unicode(\"%s/%s\" % (self.address, _get_cidr(self.netmask)))\n return IPv4Network(address, strict=False)", "def _get_network(name):\n\n if name not in _NAME_TO_NETS:\n raise ValueError('Network name [%s] not recognized.' % name)\n return _NAME_TO_NETS[name].model", "def filter_ipnet_broadcast(network_cidr):\n try:\n network_cidr_str = unicode(network_cidr)\n except NameError as ex:\n network_cidr_str = str(network_cidr)\n try:\n return IPv4Network(network_cidr_str).broadcast_address\n except ValueError as ex:\n logging.error(network_cidr_str + \" is not a valid network address\")\n raise", "def network(ip):\n ip, prefix = netParse(ip)\n return \"{}/{}\".format(\n ipStr(ip & (0xffffffff << (32 - prefix))),\n prefix\n )", "def __get_addr_grp(self, objtype, address=None):\n if address is not None:\n return self.get(\"/%s/entry[@name='%s']\"%(objtype, address))\n else:\n return self.get(\"/%s/entry\"%objtype)", "def lookup_socket(self, address): # TODO: optimize me\n\n net_tuple = self.read_nodestate(0)\n for item in net_tuple:\n discovered_address = item[1]\n if address == discovered_address:\n return item[0]", "def cidr_to_netmask(cidr):\n net_bits = cidr\n host_bits = 32 - int(net_bits)\n netmask = socket.inet_ntoa(struct.pack('!I', (1 << 32) - (1 << host_bits)))\n return netmask", "def filter_ipnet_hostmin(network_cidr):\n try:\n network_cidr_str = unicode(network_cidr)\n except NameError as ex:\n network_cidr_str = str(network_cidr)\n try:\n return IPv4Network(network_cidr_str)[1]\n except ValueError as ex:\n logging.error(network_cidr_str + \" is not a valid network address\")\n raise", "def isIpv4AddrWithNetmask(string):\n return (True)", "def get_network_on_vc(options):\n datacenter = get_datacenter(options)\n networks = datacenter.network\n\n name = get_network_name(options)\n for network in networks:\n if re.search(name, network.name):\n return network", "def netmask_to_cidr(value):\n return netaddr.IPAddress(value).netmask_bits()", "def filter_ipnet_hostmax(network_cidr):\n try:\n network_cidr_str = unicode(network_cidr)\n except NameError as ex:\n network_cidr_str = str(network_cidr)\n try:\n return IPv4Network(network_cidr_str)[-2]\n except ValueError as ex:\n logging.error(network_cidr_str + \" is not a valid network address\")\n raise", "def get_network_info() -> tuple:\n # Getting LAN IP adress\n # A big part of the code here has been extracted from the question of this man.\n # https://stackoverflow.com/questions/41625274/extracting-subnet-mask-from-my-computer-python\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect((\"8.8.8.8\", 80))\n lan_ip = s.getsockname()[0]\n s.close()\n\n # Checking network interfaces for a convincing Gateway and Mask\n for i in netifaces.interfaces():\n try:\n\n pc_ip = netifaces.ifaddresses(i)[netifaces.AF_INET][0]['addr']\n mask = netifaces.ifaddresses(i)[netifaces.AF_INET][0]['netmask']\n gateway = netifaces.gateways()['default'][netifaces.AF_INET][0]\n\n if pc_ip == lan_ip:\n break\n except:\n pass\n\n else:\n # mask and gateway not found, using default values\n mask = DEFAULT_NETMASK\n gateway = str(lan_ip)\n\n # If invalid netmask we put the default netmask\n if mask == '255.255.255.255': mask = DEFAULT_NETMASK\n\n # Now we need to set to zero the host ports.\n splitted_ip = gateway.split('.')\n splitted_mask = mask.split('.')\n\n for i in range(4):\n if splitted_mask[i] == '0':\n splitted_ip[i] = '0'\n elif splitted_mask[i] != '255':\n num = bin(int(splitted_ip[i]))[2:]\n pat = bin(int(splitted_mask[i]))[2:]\n\n # Adding 0s if needed\n while len(num) < 8:\n num = '0' + num\n while len(pat) < 8:\n pat = '0' + pat\n\n for i in range(8):\n if pat[i] == '0':\n num = num[:i] + '0' + num[i+1:]\n\n splitted_ip[i] = str(int(num, 2))\n\n\n correct_ip = '.'.join(splitted_ip)\n return correct_ip, mask", "def filter_ipnet_netmask(network_cidr):\n try:\n network_cidr_str = unicode(network_cidr)\n except NameError as ex:\n network_cidr_str = str(network_cidr)\n try:\n return IPv4Network(network_cidr_str).netmask\n except ValueError as ex:\n logging.error(network_cidr_str + \" is not a valid network address\")\n raise", "def IP(address):\n for klass in (V4Address, V6Address):\n try:\n ip = klass(address)\n except ValueError, e:\n error = e\n else:\n return ip\n\n raise error", "def set_network(self, addr, netmask, value):\n\n if len(addr) == 4:\n ipset.ipmap_ipv4_set_network(self.map, addr, netmask, value)\n return\n\n elif len(addr) == 16:\n ipset.ipmap_ipv6_set_network(self.map, addr, netmask, value)\n return\n\n else:\n raise ValueError(\"Invalid address\")", "def url_cidr_to_mask():\n res = {\n \"message\": os.environ.get(\"MESSAGE\", \"nothing\"),\n }\n return jsonify(res)", "def _parse_inet(line):\n tokens = line.split()\n return netaddr.IPNetwork(tokens[1])", "def filter_ipnet_hostaddr(network_cidr, index):\n try:\n network_cidr_str = unicode(network_cidr)\n except NameError as ex:\n network_cidr_str = str(network_cidr)\n try:\n return IPv4Network(network_cidr_str)[index]\n except ValueError as ex:\n logging.error(network_cidr_str + \" is not a valid network address\")\n raise\n except IndexError as ex:\n logging.error(network_cidr_str + \" has not enough range for \"\n + str(index) + \" host IPs.\")\n raise", "def get_network_by_id(self, id):\n return self.network.get_network(id)" ]
[ "0.63848794", "0.6259637", "0.61794454", "0.6175556", "0.6134683", "0.6117697", "0.59853756", "0.59502894", "0.59267926", "0.5924149", "0.59040844", "0.58833677", "0.5831049", "0.5792555", "0.5789383", "0.5780396", "0.5660327", "0.5654742", "0.56288487", "0.56134087", "0.55994076", "0.559173", "0.5521871", "0.55166", "0.5483201", "0.54732895", "0.5461731", "0.54570127", "0.54543126", "0.54434335" ]
0.7700801
0
Check if a given IP address is within a given network
def check_network_contains_ip(network: IPv4Network, address: str) -> bool: ip = IPv4Address(address) if ip in network: return True else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def address_exists_in_network(ip_address, net_n_bits):\n ip_address = struct.unpack('<L', socket.inet_aton(ip_address))[0]\n net, bits = net_n_bits.split('/')\n net_address = struct.unpack('<L', socket.inet_aton(net))[0]\n net_mask = ((1L << int(bits)) - 1)\n return ip_address & net_mask == net_address & net_mask", "def checklan(ipaddr, network):\n return True", "def address_in_network(ip, net):\n full_path = os.path.join(root, \"file.txt\")\n \n ipaddr = struct.unpack('=L', socket.inet_aton(ip))[0]\n netaddr, bits = net.split('/')\n netmask = struct.unpack('=L', socket.inet_aton(dotted_netmask(int(bits))))[0]\n network = struct.unpack('=L', socket.inet_aton(netaddr))[0] & netmask\n return (ipaddr & netmask) == (network & netmask)", "def ipcalc(self, ipaddr, subnet):\n if IPNetwork(ipaddr) in IPNetwork(subnet):\n return True\n else:\n return False", "def __contains__(self, address):\n return any([\n ipaddress.ip_address(address) in network\n for network in self.networks\n ])", "def match(self, _ip):\n try:\n return bool(ip_address(_ip) in self.network)\n except ValueError:\n return False", "def test_ipv4_in_net_internal_v6(self):\n test_ip = ip_address.IPAddress(\"192.168.178.4\")\n assert test_ip.in_network(\"192.168.178.0/24\")\n assert test_ip.in_network(\"192.168.178.0/29\")\n \n test_ip = ip_address.IPAddress(\"192.168.178.4/2\")\n assert test_ip.in_network(\"192.0.0.0/2\")\n\n test_ip = ip_address.IPAddress(\"192.168.178.4\")\n assert test_ip.in_network(\"10.0.11.0/4\") == False\n assert test_ip.in_network(\"192.169.178.0/24\") == False\n \n \n test_ip = ip_address.IPAddress(\"192.168.67.3\")\n assert test_ip.in_network(\"192.168.0.0/16\")", "def valid_ip_network(network):\n try:\n ipaddr.IPNetwork(network)\n except ValueError:\n return False\n\n return True", "def test_ipv4_in_net(self):\n test_ip = ip_address.IPAddress(\"192.168.178.4\", force_v4=True)\n assert test_ip.in_network(\"192.168.178.0/24\")\n assert test_ip.in_network(\"192.168.178.0/29\")\n \n test_ip = ip_address.IPAddress(\"192.168.178.4/2\", force_v4=True)\n assert test_ip.in_network(\"192.0.0.0/2\")\n\n test_ip = ip_address.IPAddress(\"192.168.178.4\", force_v4=True)\n assert test_ip.in_network(\"10.0.11.0/4\") == False\n assert test_ip.in_network(\"192.169.178.0/24\") == False\n \n \n test_ip = ip_address.IPAddress(\"192.168.67.3\")\n assert test_ip.in_network(\"192.168.0.0/16\")", "def checkNet(net,mask,ipAddr):\n\tbinNet = ''\n\tbinIPaddr = ''\n\tfor i in net.split('.'):\n\t\tbinNet += bin(int(i))[2:].zfill(8)\n\tfor j in ipAddr.split('.'):\n\t\tbinIPaddr += bin(int(j))[2:].zfill(8)\n\tfor m in range(mask):\n\t\tif binNet[m] != binIPaddr[m]:\n\t\t\treturn False\n\treturn True", "def ip_in_subnetwork(ip_address, subnetwork):\n \n (ip_integer, version1) = ip_to_integer(ip_address)\n (ip_lower, ip_upper, version2) = subnetwork_to_ip_range(subnetwork)\n \n if version1 != version2:\n raise ValueError(\"incompatible IP versions\")\n \n return (ip_lower <= ip_integer <= ip_upper)", "def check_if_ip(address):\n address_list = map(lambda x: int(x), address.split('.'))\n\n if len(address_list) != 4:\n return False\n for octet in address_list:\n if not 0 <= octet <= 255:\n return False\n if address_list[0] in [0, 10, 127, 255]:\n return False\n return True", "def _is_valid_ip(self, address):\r\n try:\r\n # Is this an valid ip address?\r\n ipaddr.IPNetwork(address)\r\n except ValueError:\r\n return False\r\n return True", "def test_ipv6_in_net(self):\n test_ip = ip_address.IPAddress(\"2001:0db8:85a3:08d3:1319:8a2e:0370:7344/24\")\n assert test_ip.in_network(\"2001:0d00::/24\")\n assert test_ip.in_network(\"2001:0d00::/29\")", "def isIP(ipToTest):\n \n try:\n socket.inet_aton(ipToTest)\n return True\n except socket.error:\n return False", "def is_ip_addr(addr: str, strict: bool = True) -> bool:\n\n try:\n ipaddress.ip_network(addr, strict=strict)\n return True\n except ValueError:\n return False", "def filter_ipnet_contains_ip(network_cidr, ip_address):\n try:\n network_cidr_str = unicode(network_cidr)\n ip_address_str = unicode(ip_address)\n except NameError as ex:\n network_cidr_str = str(network_cidr)\n ip_address_str = str(ip_address)\n try:\n return IPv4Address(ip_address_str) in IPv4Network(network_cidr_str)\n except ValueError as ex:\n logging.error(network_cidr_str + \" is not a valid network address\")\n raise", "def is_valid_ip(ip):\n ...", "def is_reserved(ip):\n if ip_between(ip, \"0.0.0.0\", \"0.255.255.255\"):\n return True\n elif ip_between(ip, \"10.0.0.0\", \"10.255.255.255\"):\n return True\n elif ip_between(ip, \"100.64.0.0\", \"100.127.255.255\"):\n return True\n elif ip_between(ip, \"127.0.0.0\", \"127.255.255.255\"):\n return True\n elif ip_between(ip, \"169.254.0.0\", \"169.254.255.255\"):\n return True\n elif ip_between(ip, \"172.16.0.0\", \"172.31.255.255\"):\n return True\n elif ip_between(ip, \"192.0.0.0\", \"192.0.0.255\"):\n return True\n elif ip_between(ip, \"192.0.2.0\", \"192.0.2.255\"):\n return True\n elif ip_between(ip, \"192.88.99.0\", \"192.88.99.255\"):\n return True\n elif ip_between(ip, \"192.168.0.0\", \"192.168.255.255\"):\n return True\n elif ip_between(ip, \"198.18.0.0\", \"198.19.255.255\"):\n return True\n elif ip_between(ip, \"198.51.100.0\", \"198.51.100.255\"):\n return True\n elif ip_between(ip, \"203.0.113.0\", \"203.0.113.255\"):\n return True\n elif ip_between(ip, \"224.0.0.0\", \"255.255.255.255\"):\n return True\n else:\n return False", "def is_valid_ip(address):\n return is_valid_ipv4_address(address) or is_valid_ipv6_address(address)", "def validIPAddress(ip):\n try:\n socket.inet_aton(ip)\n return True\n except socket.error:\n return False", "def bridge_network_check(ip, bridge_ip, bridge_netmask):\n# convert vars to unicode \n ip = unicode(ip)\n bridge_ip = unicode(bridge_ip)\n bridge_netmask = unicode(bridge_netmask)\n# by default ip is not in bridge network \n brctl = 0\n\n# bridge insterface ip network\n brdige_network = IPv4Interface('%s/%s' % (bridge_ip, bridge_netmask)).network\n\n# check if ip is from bridge network and return bridge control var (brctl) = true\n if IPv4Address(ip) in list(IPv4Network(brdige_network)):\n brctl = 1\n\n# return brctl and bridge ip network \n return brctl, brdige_network", "def is_actual_ip(self, ip_addr):\n try:\n socket.inet_aton(ip_addr)\n return True\n except socket.error:\n return False", "def containsip(url):\r\n try:\r\n if ip.ip_address(url):\r\n return 1\r\n except:\r\n return 0", "def is_ip(address):\n try:\n socket.inet_pton(socket.AF_INET, address)\n except socket.error:\n try:\n socket.inet_pton(socket.AF_INET6, address)\n except socket.error:\n return False\n return True", "def isIpv4AddrWithNetmask(string):\n return (True)", "def is_valid_ip(addr):\n\n try:\n socket.inet_aton(addr)\n except socket.error:\n return False\n return True", "def valid_ip(ip_address, strict=True):\n import socket\n try:\n socket.inet_aton(ip_address)\n return True\n except socket.error:\n if strict:\n raise ValueError(\"Invalid IP address\")\n return False", "def checkIPValid(self, ipAddr):\n try:\n socket.inet_aton(ipAddr)\n return True\n except socket.error:\n return False", "def isUseableIP(ip_add, mask=None):\n if _check_ip(ip_add):\n ip_split = ip_add.split('.')\n # 如果IP地址以0开头,则不可用\n if ip_split[0] == '0':\n return False\n # 如果IP地址以255开头,则不可用\n if ip_split[0] == '255':\n return False\n # 如果IP地址以127开头,则不可用\n if ip_split[0] == '127':\n return False\n # 如果IP地址以169.254开头,则不可用\n if ip_split[0] == '169' and ip_split[1] == '254':\n return False\n\n ip_num = ip2int(ip_add)\n # 2进制字符串,左补零,共32位\n ip_bit = bin(ip_num)[2:].zfill(32)\n # 过滤全零地址\n if ip_num == 0:\n return False\n # 如果是A类地址,则掩码为255.0.0.0\n if ip_bit[0] == '0':\n mask = mask or \"255.0.0.0\"\n # 如果是B类地址,则掩码为255.255.0.0\n elif ip_bit[:2] == '10':\n mask = mask or \"255.255.0.0\"\n # 如果是C类地址,则掩码为255.255.255.0\n elif ip_bit[:3] == '110':\n mask = mask or \"255.255.255.0\"\n # 其余地址全部不可用\n else:\n return False\n\n # 掩码不合法则不可用\n if not isValidMask(mask):\n return False\n\n # 根据掩码计算子网地址,如果IP为子网地址,则不可用\n subnet = calcSubnet(ip_add, mask)\n if ip_add == subnet:\n return False\n # 根据子网以及掩码计算广播地址,如果IP为广播地址,则不可用\n if ip_add == calcBroadcastBySubnet(subnet, mask):\n return False\n\n return True\n else:\n return False" ]
[ "0.75938654", "0.7511004", "0.7479366", "0.7437868", "0.73920935", "0.73480964", "0.7324643", "0.7290853", "0.7193748", "0.7108727", "0.7097012", "0.70429254", "0.6999613", "0.694608", "0.6937112", "0.69068915", "0.6902325", "0.6866631", "0.683682", "0.6792288", "0.6748791", "0.66555524", "0.66179127", "0.66116565", "0.6610721", "0.66005075", "0.6594828", "0.6583793", "0.65055656", "0.6485216" ]
0.81156
0