query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
sequencelengths
30
30
negative_scores
sequencelengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Return a Matplotlib Axes array to be used in all visualizations in the notebook. Provide a central point to control graph sizes. Adjust the size attribute to control how big to render images
def get_ax(rows=1, cols=1, size=16): _, ax = plt.subplots(rows, cols, figsize=(size * cols, size * rows)) return ax
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_ax(rows=1, cols=1, size=16):\n _, ax = plt.subplots(rows, cols, figsize=(size*cols, size*rows))\n return ax", "def get_ax(rows=1, cols=1, size=16):\n _, ax = plt.subplots(rows, cols, figsize=(size*cols, size*rows))\n return ax", "def get_ax(rows=1, cols=1, size=8):\r\n _, ax = plt.subplots(rows, cols, figsize=(size * cols, size * rows))\r\n return ax", "def get_ax(rows=1, cols=1, size=32):\n _, ax = plt.subplots(rows, cols, figsize=(size * cols, size * rows))\n return ax", "def get_ax(rows=1, cols=1, size=16):\n size = 5\n _, ax = plt.subplots(rows, cols, figsize=(size*cols, size*rows))\n return ax", "def get_ax(rows=1, cols=1, size=8):\n _, ax = plt.subplots(rows, cols, figsize=(size*cols, size*rows))\n return ax", "def get_ax(rows=1, cols=1, size=8):\n _, ax = plt.subplots(rows, cols, figsize=(size*cols, size*rows))\n return ax", "def get_ax(rows=1, cols=1, size=8):\n _, ax = plt.subplots(rows, cols, figsize=(size * cols, size * rows))\n return ax", "def get_ax(rows=1, cols=1, size=8):\n fig , ax = plt.subplots(rows, cols, figsize=(size*cols, size*rows))\n return fig,ax", "def init_axes(self):\n plt.switch_backend(\"cairo\")\n fig = plt.figure(figsize=(15,10))\n ax = fig.add_axes([0.05, 0.15, 0.9, 0.80,])\n return (fig, ax)", "def __init__(self, nx, ny, nxsize=5.4, nysize=6.2):\n self.nx = nx\n self.ny = ny\n self.n = 1\n plt.figure(figsize=(nysize*ny, nxsize*nx))\n plt.subplot(nx, ny, self.n)", "def setup_figure_1ax(x_label='', y_label='', size=(13, 9), shrink_ax=True):\n\n matplotlib.rcParams.update({'font.size': 20})\n fig, ax = plt.subplots()\n fig.set_size_inches(size)\n ax.set_xlabel(x_label)\n ax.set_ylabel(y_label)\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.spines['bottom'].set_visible(False)\n ax.spines['left'].set_visible(False)\n # Shrink current axis by 20%\n if shrink_ax:\n box = ax.get_position()\n ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])\n ax.grid()\n return fig, ax", "def frame():\n fig = plt.figure(figsize = (6, 3))\n\n plt.subplots_adjust(left=.15, bottom=.2, right=.95, top=.9)\n ax = fig.add_subplot(111)\n \n ax.tick_params(axis=\"x\", labelsize=12)\n ax.tick_params(axis=\"y\", labelsize=12)\n\n return fig, ax", "def plot_all(self, cmap='Greys', size=(10,10)):\n\n fig, ((ax0, ax1), (ax2, ax3)) = plt.subplots(nrows=2,\n ncols=2,\n sharex=True,\n sharey=True)\n\n ax0.imshow(self.I, cmap=cmap)\n ax0.set_title(f'Original {self.I.shape}',\n fontsize=15)\n ax1.imshow(self.W, cmap=cmap)\n ax1.set_title(f'W Loadings {self.W.shape}',\n fontsize=15)\n ax2.imshow(self.H, cmap=cmap)\n ax2.set_title(f'H Loadings {self.H.shape}',\n fontsize=15)\n ax3.imshow(self.E, cmap=cmap)\n ax3.set_title(f'W * H with n={self._n_components} {self.E.shape}',\n fontsize=15)\n\n fig.set_figheight(size[0])\n fig.set_figwidth(size[1])\n fig.tight_layout()\n plt.show()", "def _InitAxes( self ):\n self.ax = self.fig.add_subplot( 111 )", "def common_set_up(ax_size):\n\n sns.set_style(\"whitegrid\")\n sns.set_style(\"ticks\",\n {'axes.grid': True,\n 'grid.color': '.99', # Very faint grey grid\n 'ytick.color': '.4', # Lighten the tick labels\n 'xtick.color': '.4'}\n )\n sns.set_context(\n \"poster\",\n font_scale=0.8,\n rc={\"figure.figsize\": ax_size,\n 'font.sans-serif': 'Gill Sans MT'}\n )", "def createFigure(self):\n\n SMALL_SIZE = 14\n MEDIUM_SIZE = 18\n BIGGER_SIZE = 36\n\n plt.rc('font', size=SMALL_SIZE) # controls default text sizes\n plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title\n plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels\n plt.rc('xtick', labelsize=MEDIUM_SIZE) # fontsize of the tick labels\n plt.rc('ytick', labelsize=MEDIUM_SIZE) # fontsize of the tick labels\n plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize\n plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title\n\n fig, axes = plt.subplots()\n fig.set_size_inches(10, 6, forward=True)\n serialNumber = self.spectrometer.getSerialNumber()\n model = self.spectrometer.model\n fig.canvas.manager.set_window_title('Spectrometer [serial # {0}, model {1}]'.format(serialNumber, model))\n axes.set_xlabel(\"Wavelength [nm]\")\n axes.set_ylabel(\"Intensity [arb.u]\")\n return fig, axes", "def __init__(self):\n\n fig_width_pt = 800.0 \n pylab.rcParams.update(plot_params)", "def create_figure(self) -> None:\n plt.ion()\n self.fig = plt.figure(1)\n self.axis = self.fig.add_subplot(111, xlim=(0, 1), ylim=(0, 1))\n self.axis.grid(True)\n plt.xticks(np.linspace(0, 1, self._param[\"n_v\"] + 1))\n plt.yticks(np.linspace(0, 1, self._param[\"n_v\"] + 1))\n a_plt, = self.axis.plot([], [], 'bx', markersize=5)\n l_plt, = self.axis.plot([], [], 'r.', markersize=15)\n self.plots = [a_plt, l_plt]", "def figure():\n fig = plt.figure()\n ax = fig.add_subplot()\n ax.set_aspect('equal')\n return fig, ax", "def __init__(self):\n import matplotlib.pyplot as plt\n\n\n SMALL_SIZE = 12\n MEDIUM_SIZE = 14\n BIGGER_SIZE = 16\n\n plt.rc('font', size=SMALL_SIZE) # controls default text sizes\n plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title\n plt.rc('axes', labelsize=BIGGER_SIZE) # fontsize of the x and y labels\n plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels\n plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels\n plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize\n plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title", "def plot_insertsize():", "def embed_matplotlib(self):", "def initialize_figure(\n self,\n mosaic: Optional[List[List[str]]] = None,\n figsize: Tuple[int, int] = (10, 8),\n cmap: str = \"tab10\",\n return_ax: bool = False,\n ) -> None:\n if mosaic is None:\n mosaic = self.get_default_mosaic()\n\n self.cmap = plt.get_cmap(cmap)\n\n figure, axes = plt.subplot_mosaic(mosaic, figsize=figsize)\n if return_ax:\n return axes\n\n self.figure = figure\n self.axes = axes\n self.mosaic = mosaic", "def create_figure(self):\n plt.rcParams.update(general_utils.returnGraphConfigs(\"anim\"))\n self.fig = plt.figure()\n self.axes = plt.axes()\n self.axes.set_xlabel(\"Cells In X (Columns)\")\n self.axes.set_ylabel(\"Cells In Y (Rows)\")\n self.axes.set_xlim(0, self.dimensions - 1)\n self.axes.set_ylim(0, self.dimensions - 1)", "def set_axes(self, a):\r\n self.axes = a", "def create_plot():\n\n fig, ax = plt.subplots()\n return fig, ax", "def plot_IE(self, cmap='Greys', size=(10,10)):\n fig, (ax0, ax1) = plt.subplots(nrows=1,\n ncols=2,\n sharex=True,\n sharey=True)\n\n ax0.imshow(self.I, cmap=cmap)\n ax0.set_title(f'Original {self.I.shape}',\n fontsize=15)\n ax1.imshow(self.E, cmap=cmap)\n ax1.set_title(f'W * H with n={self._n_components} {self.E.shape}',\n fontsize=15)\n\n fig.set_figheight(size[0])\n fig.set_figwidth(size[1])\n fig.tight_layout()\n plt.show()", "def plot_main(self):\n\n f, axes = plt.subplots(2, 3, figsize=(16, 8))\n self.data_plot(ax=axes[0, 0])\n self.model_plot(ax=axes[0, 1])\n self.normalized_residual_plot(ax=axes[0, 2], v_min=-6, v_max=6)\n self.source_plot(ax=axes[1, 0], convolution=False, deltaPix_source=0.01, numPix=100)\n self.convergence_plot(ax=axes[1, 1], v_max=1)\n self.magnification_plot(ax=axes[1, 2])\n f.tight_layout()\n f.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0., hspace=0.05)\n return f, axes", "def _newax(ax=None):\n from matplotlib import pyplot as plt\n if ax is not None:\n return ax\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n return ax" ]
[ "0.6995708", "0.6995708", "0.69909465", "0.6967519", "0.69618386", "0.69150853", "0.69150853", "0.6912052", "0.6782566", "0.6779475", "0.63615865", "0.63033676", "0.62792975", "0.62403536", "0.6224871", "0.6140819", "0.6097152", "0.60868704", "0.604939", "0.6045033", "0.6036806", "0.601763", "0.60174084", "0.5974862", "0.59265107", "0.5925675", "0.5910351", "0.5876128", "0.5874489", "0.5853796" ]
0.69959086
0
open sql file and read result to pandas dataframe
def read_sql_from_file(path, conn): with open(path, 'r', encoding='utf-8') as f: qu = f.read() df = read_sql(qu, conn) return df
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def query(self, sql):\n df = pd.read_sql(sql, self.conn)\n return df", "def query_to_df(db, sql):\n conn_string = return_connection(db)\n with pg2.connect(conn_string) as conn:\n return psql.read_sql(sql, conn)", "def read_sql_query(sql: str, con: RdsDataApi, database: Optional[str] = None) -> pd.DataFrame:\n return con.execute(sql, database=database)", "def read_sql(self, sql_query: str, **kwargs) -> pd.DataFrame:\n connection_infos = {\n param: getattr(self, param) for param in [\"host\", \"port\", \"dbname\", \"user\"]\n }\n connection_infos[\"password\"] = pgpasslib.getpass(**connection_infos)\n connection = pg.connect(**connection_infos)\n if self.schema:\n connection.cursor().execute(f\"SET SCHEMA '{self.schema}'\")\n\n df = pd.read_sql(sql_query, con=connection, **kwargs)\n\n connection.close()\n return df", "def read_sql(qu, conn):\n df = pd.read_sql(qu, conn)\n df.columns = [col.lower() for col in df.columns] \n \n return df", "def db_to_df(query):\n conn = loader.database._connection\n return sql.read_frame(query, conn)", "def get_df_from_db(self, query):\n cursor = self.conn.cursor()\n cursor.execute(\"set hive.execution.engine = tez\")\n cursor.execute(\"set tez.queue.name = sephora_internal\")\n cursor.execute(query)\n data = cursor.fetchall()\n col_des = cursor.description\n col_des = [tuple([x[0].split('.')[1] if '.' in x[0] else x[0]] + list(x[1:])) for x in col_des]\n col_name = [col_des[i][0] for i in range(len(col_des))]\n df = pd.DataFrame([list(i) for i in data], columns=col_name)\n return df", "def load_renter_data():\n return pd.read_sql_query(_sql_query, _con)", "def read_data(db_name, query_file):\r\n con = sqlite3.connect(db_name)\r\n cursor = con.cursor()\r\n\r\n sql = open(query_file,'r')\r\n query = sql.read()\r\n sql.close()\r\n\r\n data = pd.read_sql_query(query, con=con)\r\n data.drop_duplicates(subset=['Title'], inplace=True)\r\n data = data[data['Type']=='movie']\r\n data.set_index('imdbID', inplace=True)\r\n\r\n con.commit()\r\n con.close()\r\n\r\n return data", "def read(name, db):\n \n # Make connection with the database\n\tconn = sqlite3.connect(db)\n\tdf = pd.read_sql_query(\"select * from \" + name + ';', conn)\n \n # Print loaded data table name and return DataFrame\n\tprint(name + ': loaded')\n\treturn df", "def get_dataframe(q):\n cnx = create_engine(postgres_str)\n query = q\n return pd.read_sql_query(query, cnx)", "def get_df_from_db(self, query):\n cursor = self.conn.cursor()\n cursor.execute(query)\n data = cursor.fetchall()\n col_des = cursor.description\n col_des = [tuple([x[0].split('.')[1] if '.' in x[0] else x[0]] + list(x[1:])) for x in col_des]\n col_name = [col_des[i][0] for i in range(len(col_des))]\n ret_df = pd.DataFrame([list(i) for i in data], columns=col_name)\n return ret_df", "def open_data(table):\n engine = create_engine(myDB, encoding='latin1') \n conn = engine.connect()\n select = conn.execute('select * from ' + table)\n\n df = pd.DataFrame(select.fetchall()) \n df.columns = select.keys()\n\n conn.close()\n return df", "def load_file(self):\n\n self.df = self.sqlContext.read.csv(self.source, sep=self.sep, header=True, inferSchema=True)", "def read_file(fname: str) -> pd.DataFrame:\n raw_data = (\n pd.read_hdf(fname).to_frame().reset_index(level=[0, 1]).loc[ANALYSIS_DATE]\n )\n raw_data[\"date\"] = raw_data.index\n return raw_data", "def get_data_from_database(query, db_connection):\n\n dataframe = pandas.read_sql(query, con=db_connection)\n print(\"Data from database: \", dataframe.head(5))\n print(\"Size of dataframe from database: \", dataframe.shape)\n\n return dataframe", "def get_database_data(file_name=''):\n if not os.path.exists(file_name):\n raise IOError(\"File {} does not exist!\".format(file_name))\n df = pd.read_csv(file_name, header=1)\n return df", "def read_sql_from_file(self, filename):\n tmpLines = ''\n logger.info(\"Reading from {}\".format(filename))\n\n with open(filename, 'r') as fh:\n tmpLines = fh.readlines()\n \n sqlquery = \"\".join(tmpLines)\n cursor = self.conn.cursor()\n\n try:\n cursor.execute(sqlquery)\n except Exception as e:\n logger.info(e)\n sys.exit(1)\n return", "def load(file):\n return pq.read_table(file).to_pandas()", "def get_query_result_to_df(self, query):\r\n try:\r\n return pd.read_sql_query(query, self.conn)\r\n except pd.pandas.io.sql.DatabaseError:\r\n print('Execution failed. Database error')", "def read_sql(self):\n pass", "def get_db_data(self, sql_string):\n connection_string = f\"\"\"\n host='{self.host}' \n dbname='{self.db_name}' \n user='{self.user}' \n password='{self.password}' \n port='{self.port}'\n \"\"\"\n\n with psycopg2.connect(connection_string) as connection:\n cursor = connection.cursor()\n cursor.execute(sql_string)\n\n dataframe = pd.DataFrame(cursor.fetchall())\n dataframe.columns = [desc[0] for desc in cursor.description]\n\n return dataframe", "def sql_return_df(query, params, date_cols):\n conn = sqlite3.connect(db_filepath)\n df = pd.read_sql(query, conn, params=params, parse_dates=date_cols)\n conn.close()\n return df", "def OSW2df(osw_file, table_name):\n conn = connOSW(osw_file)\n df = pd.read_sql_query(\"SELECT * FROM \" + table_name, conn)\n conn.close()\n return df", "def sql_execute(sql_query, create_con_obj=None, n_row=0):\r\n\r\n if create_con_obj is None:\r\n create_con_obj = create_connection()\r\n print (sql_query)\r\n df = pd.read_sql(sql_query, create_con_obj)\r\n print (df.head(2))\r\n\r\n return df", "def acquire_only(db,query):\n url = get_connection(db)\n df = pd.read_sql(query, url)\n return df", "def sql_query_fetch_df(self,sql,primary_key = None):\n\t\tif not self.connected:\n\t\t\tprint ('db not connected yet. Do connect first')\n\t\t\treturn\n\t\tresults = pd.read_sql_query(sql,self.__engine,index_col = primary_key)\n\t\treturn results", "def load_data_sql(): \r\n conn = mysql.connect(**st.secrets[\"mysql\"])\r\n\r\n data = pd.read_sql('SELECT * FROM song_data', conn)\r\n lookup_table = pd.read_sql('SELECT * FROM lookup_table', conn)\r\n \r\n return data, lookup_table", "def read_as_pandas_dataframe(self, sql_query, params=None):\n return pandas.read_sql_query(sql_query, self._conn, params=params)", "def grr_osqueryi(line: Text) -> pd.DataFrame:\n args = grr_osqueryi.parser.parse_args(shlex.split(line))\n return magics_impl.grr_osqueryi_impl(args.sql)" ]
[ "0.7578803", "0.7521947", "0.7340611", "0.73007333", "0.71697307", "0.7145733", "0.7115768", "0.7113902", "0.71017885", "0.70614535", "0.7037162", "0.7007734", "0.6967868", "0.69536024", "0.6907001", "0.69034314", "0.6900759", "0.6888985", "0.6885968", "0.6831431", "0.68228275", "0.68096405", "0.67905277", "0.6768645", "0.670595", "0.66791695", "0.6590051", "0.65808773", "0.65554935", "0.653312" ]
0.7833117
0
for current target and dataframe (pre_clust_df) transform all features to woe buckets and learn model
def full_modeling(target, pre_clust_df, model_path, id_column): targets = [x for x in pre_clust_df.columns if x[:8] == 'default_'] # folders for result saving folder_auc = model_path + '/pictures/roc_auc' folder_column_pics = model_path + '/pictures' folder_model_output = model_path + '/model_output' create_folder(folder_auc) create_folder(folder_model_output) #take only matured loans pre_clust_df = pre_clust_df[pre_clust_df[target]>-.5] pre_clust_df = pre_clust_df.set_index(id_column) #drop all target columns except current tarhet column drop_targets = [col for col in targets if col != target] drop_targets = list(set(drop_targets) & set(pre_clust_df)) pre_clust_df = pre_clust_df.drop(drop_targets, 1) #transform continous variables to bucket columns dfPreWoe, clustVarsInfo = sf.continuousVariables(pre_clust_df, columnLimit=10) #trassform to woe columns dfPostWoe, woeVarsInfo = sf.woeVariables(dfPreWoe,target) #look at information value of variables gg = sf.giniGrowth(dfPostWoe,woeVarsInfo,target) #chose best columns goodColumns, badColumns = sf.chooseColumnsFromIT(gg, badFlag=target, min_limit=0.01) #create log regression model model = sf.logReg(preLR=dfPostWoe[goodColumns], badFlag=target) #save roc_auc picture model.print_roc_curve(to_file=True, folder=folder_auc) #generate doc information about model and variables intercept, woeOut = sf.modelOutput(folder_model_output, woeVarsInfo, goodColumns, model, gg, rewrite=True) #generate and save pictures of feature distribution bad_columns = woe.save_pictures(woeVarsInfo, folder = folder_column_pics, badRateLimit=100)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def preprocess_feature(df):", "def transform_train_data(df):\n return df.rdd.map(\n lambda x: (\n Vectors.dense([x.amount, x.split, x.maintain4, x.maintain12]),\n x.intime\n )\n ).toDF([\"features\", \"label\"])", "def cluster_by_split(filtered_df):\n global features_in_range\n global table\n # make a copy of the entire data set\n unfiltered_df = table\n # get total number of robot faces in data set\n total_rows = len(unfiltered_df)\n\n # drop any column that is not included in our list of 11 features\n # 11 features = 16 features with no dependencies filtered via 20-80% range\n for col in unfiltered_df:\n if not unfiltered_df[col].name in features_in_range:\n unfiltered_df = unfiltered_df.drop(unfiltered_df[col].name, 1)\n\n # iterate over the dataframe of columns generated by the range\n for col in filtered_df:\n try:\n # for each column, call groupby() and calculate percentage\n check_for_20 = unfiltered_df.groupby(col).size().reset_index(name='count')\n check_for_20['as_percent'] = 100 * check_for_20['count'] / float(total_rows)\n # ignore feature values that represent less than 20% of all faces\n cluster_by_feature = check_for_20[check_for_20['as_percent'] >= 20]\n # if feature has values over 20%, iterate over\n # each feature_value and generate clusters\n if not cluster_by_feature.empty:\n # iterate over every value of the feature\n for index, row in cluster_by_feature.iterrows():\n # use feature value to call groupby() on the entire data set\n results = unfiltered_df[unfiltered_df[col] == row[0]]\n results = results \\\n .groupby(list(unfiltered_df)) \\\n .size() \\\n .reset_index(name='count')\n # calculate count as a percentage\n results['as_percent'] = 100 * results['count'] / float(total_rows)\n results = results.sort_values(by='as_percent', ascending=False)\n # store results in a .tsv file\n filename = str(col) + \"_\" + str(row[0]) + '_feature_cluster.tsv'\n results.to_csv(filename.replace(\"/\", \"-\"), header=True, sep='\\t')\n print(\"results written to file\")\n except:\n # 'count' and 'percentage' columns will generate errors\n # since they don't exist in the original data set\n pass", "def pre_processing_(data_df , serialized_objects):\n max_recency_acc_dig = serialized_objects['max_recency_acc_dig'] # These values are taken from trained model values\n max_recency_dig_2yr = serialized_objects['max_recency_dig_2yr'] # These values are taken from trained model values\n max_acc_recency_mf = serialized_objects['max_acc_recency_mf'] #These are values imported in training dataset. Same values needs to be used to impute missing values in unseen data\n\n data_df = data_df.na.fill({\n 'recency_acc_dig' : max_recency_acc_dig, # Filling missing values\n 'recency_dig_2yr' : max_recency_dig_2yr,\n 'acc_recency_mf' : max_acc_recency_mf\n })\n\n freq_acc_upg_2yrs_split = [-float('inf'), 0, 1, 2, float('inf')]\n bucketizer_freq_acc_upg_2yrs = Bucketizer(splits=freq_acc_upg_2yrs_split, inputCol='freq_acc_upg_acc_2yrs', outputCol='freq_acc_upg_acc_2yrs_bkt')\n data_df = bucketizer_freq_acc_upg_2yrs.setHandleInvalid('keep').transform(data_df) # Binning the freq_acc_upg_acc_2yrs column\n\n tot_purchase_split = [-float('inf'), 0, 1, 2, 3, float('inf')]\n bucketizer_tot_purchase = Bucketizer(splits=tot_purchase_split, inputCol='tot_accsry_purchse', outputCol='tot_accsry_purchse_bkt')\n data_df = bucketizer_tot_purchase.setHandleInvalid('keep').transform(data_df) # Binning the tot_accsry_purchse column\n\n del_cols_new = ['freq_acc_upg_acc_2yrs', 'tot_accsry_purchse']\n data_df = data_df.drop(*del_cols_new) # Dropping the older continuous columns\n return data_df", "def transform_data(data_df, target_df = None):\n rec_idx, rec_col, rec_data = create_recency_feature(data_df)\n freq_idx, freq_col, freq_data = create_frequency_feature(data_df)\n norm_idx, norm_col, norm_data = create_norm_feature(data_df)\n\n # with hstack function we are concatinating a sparse matrix and a dense matirx :)\n feat_df = hstack((rec_data, freq_data, norm_data))\n print('Final feature matrix shape:', feat_df.shape)\n \n # merge all the feature names\n feat_names = list(rec_col) + list(freq_col) + list(norm_col)\n \n if isinstance(target_df, pd.core.frame.DataFrame):\n # get +ve & -ve indices\n one_idx = target_df[target_df['outcome_flag'] == 1]['id'].index.tolist()\n zero_idx = target_df[target_df['outcome_flag'] == 0]['id'].index.tolist()\n \n # calculate fitness values of features\n rcdf = create_fitness_stats(rec_data, rec_col, one_idx, zero_idx, nans = True)\n fqdf = create_fitness_stats(freq_data, freq_col, one_idx, zero_idx, nans = False)\n nrdf = create_fitness_stats(norm_data, norm_col, one_idx, zero_idx, nans=False)\n fit_df = rcdf.append(fqdf).append(nrdf)\n fit_df.reset_index(drop=1)\n return feat_df, feat_names, fit_df\n \n return feat_df, feat_names", "def apply(self, df):\n encoded = []\n for feature_name, encoder in zip(self.feature_names, self.encoders):\n column = df[feature_name].to_numpy().reshape(-1, 1)\n encoded.append(pd.DataFrame(\n encoder.transform(column).todense(),\n index=df.index,\n columns=encoder.categories_[0]\n ))\n df = df.drop(columns=self.feature_names)\n df = pd.concat((df, *encoded), axis=1)\n return df", "def __create_cluster_profiles(self,\n clustered_dataframes,\n shrunken_df,\n numerical_features,\n le_map,\n output_path,\n find_nearest_on_cols=False,\n show=True):\n\n def find_nearest(numbers, target):\n \"\"\"\n Find the closest fitting number to the target number\n \"\"\"\n numbers = np.asarray(numbers)\n idx = (np.abs(numbers - target)).argmin()\n return numbers[idx]\n\n cluster_profiles_df = pd.DataFrame(columns=shrunken_df.columns).drop(\n 'Cluster_Name', axis=1)\n rows_count = 0\n for cluster_identfier, cluster_dataframe in \\\n clustered_dataframes.items():\n df = pd.DataFrame(columns=cluster_dataframe.columns)\n df = df.append(cluster_dataframe.mean(), ignore_index=True)\n df.index = [cluster_identfier]\n\n if cluster_dataframe.shape[0] <= 1:\n continue\n\n # Attempt to convert numbers found within the full set of data\n for col in cluster_dataframe.columns:\n if col not in numerical_features or find_nearest_on_cols:\n df[col] = find_nearest(numbers=shrunken_df[\n col].value_counts().index.tolist(),\n target=df[col].values[0])\n\n # Evaluate cluster dataframe by dataframe\n eval_df = pd.DataFrame(columns=cluster_dataframe.columns)\n eval_df = eval_df.append(\n cluster_dataframe.mean(), ignore_index=True)\n eval_df = eval_df.append(\n cluster_dataframe.min(), ignore_index=True)\n eval_df = eval_df.append(\n cluster_dataframe.median(),\n ignore_index=True)\n eval_df = eval_df.append(\n cluster_dataframe.max(), ignore_index=True)\n eval_df = eval_df.append(\n cluster_dataframe.std(), ignore_index=True)\n eval_df = eval_df.append(\n cluster_dataframe.var(), ignore_index=True)\n eval_df.index = [\"Mean\", \"Min\", \"Median\",\n \"Max\", \"Standard Deviation\", \"Variance\"]\n\n if show:\n print(\"Total found in {0} is {1}\".format(\n cluster_identfier, cluster_dataframe.shape[0]))\n self.__render_mpl_table(\n df,\n sub_dir=output_path,\n filename=cluster_identfier +\n \"_Means_Rounded_To_Nearest_Real_Numbers\",\n header_columns=0,\n col_width=4.0)\n\n self.__render_mpl_table(\n eval_df,\n sub_dir=output_path,\n filename=cluster_identfier +\n \"_Eval_Df\",\n header_columns=0,\n col_width=4.0)\n display(df)\n display(eval_df)\n self.__vertical_spacing(7)\n\n cluster_profiles_df = cluster_profiles_df.append(\n self.__decode_df(df, le_map))\n\n rows_count += cluster_dataframe.shape[0]\n\n return rows_count, cluster_profiles_df", "def training(df, type=None):\r\n df=dataCleaner(df[DISC_FEATURES_COL_TO_USE+CONT_FEATURES_COL_TO_USE+[DISC_TARGET_COL_TO_USE]])\r\n print(\"Using %d numbers of features\"%len(DISC_FEATURES_COL_TO_USE + CONT_FEATURES_COL_TO_USE))\r\n df_coded = trainEncode(df)\r\n df_coded = scalarNormalizer(df_coded)\r\n visualizeHistogram(df_coded)\r\n # visualizePCA(df_coded)\r\n df_shuffled = df_coded.sample(frac=1, random_state=100).reset_index(drop=True)\r\n X, y = df_shuffled[DISC_FEATURES_COL_TO_USE + CONT_FEATURES_COL_TO_USE], df_shuffled[DISC_TARGET_COL_TO_USE]\r\n X, y = resampling(X, y)\r\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = TEST_FR, random_state = 42)\r\n\r\n print(\"Training the classifier!\")\r\n if type=='LR':\r\n print(\"Using Logistic Regression Classifier\")\r\n cls=LogisticRegression(n_jobs=2, class_weight='balanced', tol=1e-4, C=1,random_state=111)\r\n elif type=='SVM':\r\n print(\"Using Support Vector Machine Classifier\")\r\n cls=SVC(class_weight='balanced', probability=True)\r\n elif type=='RF':\r\n print(\"Using Random Forst Classifier\")\r\n cls=RandomForestClassifier( n_jobs=3, n_estimators=8192, class_weight='balanced', max_depth=8,\r\n min_samples_leaf=1, random_state=24)\r\n elif type=='GBC':\r\n print(\"Using Gradient Boosting Classifier\")\r\n cls = GradientBoostingClassifier(n_estimators=2048, max_depth=4,\r\n subsample=0.8, learning_rate=0.004,\r\n random_state=34, min_samples_split=4,\r\n max_features=\r\n int(0.4*len(DISC_FEATURES_COL_TO_USE+\r\n CONT_FEATURES_COL_TO_USE)))\r\n else:\r\n print(\"Using Naive Bayes Classifier\")\r\n cls = GaussianNB()\r\n model = cls.fit(X_train, y_train)\r\n print (\"Cross-validated scores:\", cross_val_score(model, X_train, y_train, cv=10))\r\n print (\"Score:\", model.score(X_test, y_test))\r\n predict_test = model.predict(X_test)\r\n\r\n print('precision_score=%f\\nrecall_score=%f'%(precision_score(y_test, predict_test),recall_score(y_test, predict_test)))\r\n\r\n print(metrics.roc_auc_score(y_test, predict_test))\r\n\r\n cm=confusion_matrix(y_test, predict_test)\r\n print(\"Confusion matrix:\\n\" + str(cm))\r\n # showConfusionMatrix(cm)\r\n\r\n pickle.dump(model, open(MODEL_FILENAME, 'wb'))\r\n print(\"Model Created!\")", "def scale_continous(df):\n\n # locate the scaler object, and load it\n\n file_path = '/Users/davidodwyer/Desktop' # to the directory\n file_name = 'basic_mlr_145_scaler.joblib' \n the_file = os.path.join(file_path, file_name)\n\n scaler = load(the_file)\n\n # create a sub-dataframe of non-continuous features\n\n non_continuous_features = df[['stop_id', 'month', 'day', \\\n 'TIME_PERIOD_ARRIVAL', 'SCHOOL_OFF']] \n\n # create list of continuous feautres\n\n continuous_features = ['planned_arrival', 'rain', 'temp']\n\n # scale the continuous features, form as new dataframe\n\n scaled_continuous = pd.DataFrame(scaler.transform(df[continuous_features])\\\n , columns=continuous_features)\n\n # reset the sub-dataframes indices\n\n scaled_continuous.reset_index(drop=True, inplace=True)\n\n non_continuous_features.reset_index(drop=True, inplace=True)\n\n # join the sub-dataframes / rejoin to reform the \"original\"\n\n join = pd.concat([scaled_continuous, non_continuous_features]\\\n , axis=1)\n\n # reset the index\n\n join.reset_index(drop=True, inplace=True)\n\n return join", "def prep_func(data_dic):\n\n df_all = pd.DataFrame(columns=datasets[dataset_labels[0]].columns.tolist())\n\n # combine desired datasets into one dataframe\n for label in dataset_labels:\n df_all = pd.concat([df_all, data_dic[label]], ignore_index=True)\n\n df_all_columns = df_all.copy() # keep a copy of the original dataframes before dropping the trial names\n\n # dropping unused columns/features\n for col in ['Time', 'trial', 'maneuver']:\n if col in df_all.columns:\n df_all = df_all.drop(columns=[col])\n\n columns_all = df_all.columns.tolist()\n columns_torque = [col for col in df_all.columns.tolist() if 'Torque' in col] # all torque data\n\n # all torque features except for roc (mean/std/... & left/right/sum/diff)\n columns_2d_torque = [col for col in df_all.columns.tolist()\n if 'Torque_sum' in col or 'Torque_diff' in col and 'roc' not in col]\n\n # all torque features of left and right only (mean/std/... & left/right)\n columns_lr_torque = [col for col in df_all.columns.tolist()\n if ('Torque_L' in col or 'Torque_R' in col) and 'roc' not in col]\n\n columns_lr_torque_mean = ['Mean Torque_L', 'Mean Torque_R'] # mean torque left and right only\n columns_2d_torque_mean = ['Mean Torque_sum', 'Mean Torque_diff'] # mean torque left and right only\n\n # dictionary of list of feature subsets to be used for dimension_reduction or clustering\n featureSet_dic = {'ALL': columns_all, 'ALL_TORQUE': columns_torque,\n '2D_TORQUE': columns_2d_torque, '2D_TORQUE_MEAN': columns_2d_torque_mean,\n 'LR_TORQUE': columns_lr_torque, 'LR_TORQUE_MEAN': columns_lr_torque_mean}\n\n # Standardize features by removing the mean and scaling to unit variance\n scaler = StandardScaler()\n feat_all_stand = scaler.fit_transform(df_all.values)\n df_all_stand = pd.DataFrame(feat_all_stand, columns=data_columns) # normalized dataset\n\n return df_all_stand, df_all_columns, featureSet_dic", "def featureprepare(self,df):\n try:\n df = self.featureselection(df)\n emp_len_dict= pickleload(self.dict_employ_len) # Load emp len\n df['emp_length'] = df['emp_length'].map(emp_len_dict)\n df['Long_emp_length'] = df['emp_length'].apply(lambda x: 'Yes' if x == 10 else 'No') # creating new feature\n df[\"emp_title\"].fillna('Missing', inplace=True)\n\n # Handling missing numerical value\n dict_Mean_var = pickleload(self.dict_Mean_var)\n for col, mean_val in dict_Mean_var.items():\n df[col].fillna(mean_val, inplace=True)\n\n # Handling rare values\n Freqlabels = pickleload(self.Freqlabels)\n for variable, frequent_labels in Freqlabels.items():\n df[variable] = np.where(df[variable].isin(frequent_labels), df[variable], 'Rare')\n\n # Encoding Categorical features\n x = pickleload(self.labelEncoder)\n for features, labels in x.items():\n df.loc[:, features] = labels.transform(df.loc[:, features])\n return df\n except Exception as e:\n self._Logger.error(\"Error in feature preparation: {}\".format(e))", "def transform_features(context, params):\n\n input_features_ds = \"train/sales/features\"\n input_target_ds = \"train/sales/target\"\n\n artifacts_folder = DEFAULT_ARTIFACTS_PATH\n\n # load datasets\n train_X = load_dataset(context, input_features_ds)\n train_y = load_dataset(context, input_target_ds)\n\n cat_columns = train_X.select_dtypes(\"object\").columns\n num_columns = train_X.select_dtypes(\"number\").columns\n\n # Treating Outliers\n outlier_transformer = Outlier(method=params[\"outliers\"][\"method\"])\n train_X = outlier_transformer.fit_transform(\n train_X, drop=params[\"outliers\"][\"drop\"]\n )\n\n # NOTE: You can use ``Pipeline`` to compose a collection of transformers\n # into a single transformer. In this case, we are composing a\n # ``TargetEncoder`` and a ``SimpleImputer`` to first encode the\n # categorical variable into a numerical values and then impute any missing\n # values using ``most_frequent`` strategy.\n tgt_enc_simple_impt = Pipeline(\n [\n (\"target_encoding\", TargetEncoder(return_df=False)),\n (\"simple_impute\", SimpleImputer(strategy=\"most_frequent\")),\n ]\n )\n\n # NOTE: the list of transformations here are not sequential but weighted\n # (if multiple transforms are specified for a particular column)\n # for sequential transforms use a pipeline as shown above.\n features_transformer = ColumnTransformer(\n [\n # categorical columns\n (\n \"tgt_enc\",\n TargetEncoder(return_df=False),\n list(\n set(cat_columns)\n - set([\"technology\", \"functional_status\", \"platforms\"])\n ),\n ),\n (\n \"tgt_enc_sim_impt\",\n tgt_enc_simple_impt,\n [\"technology\", \"functional_status\", \"platforms\"],\n ),\n # numeric columns\n (\"med_enc\", SimpleImputer(strategy=\"median\"), num_columns),\n ]\n )\n\n # Check if the data should be sampled. This could be useful to quickly run\n # the pipeline for testing/debugging purposes (undersample)\n # or profiling purposes (oversample).\n # The below is an example how the sampling can be done on the train data if required.\n # Model Training in this reference code has been done on complete train data itself.\n sample_frac = params.get(\"sampling_fraction\", None)\n if sample_frac is not None:\n logger.warn(f\"The data has been sample by fraction: {sample_frac}\")\n sample_X = train_X.sample(frac=sample_frac, random_state=context.random_seed)\n else:\n sample_X = train_X\n sample_y = train_y.loc[sample_X.index]\n\n\n # Train the feature engg. pipeline prepared earlier. Note that the pipeline is\n # fitted on only the **training data** and not the full dataset.\n # This avoids leaking information about the test dataset when training the model.\n # In the below code train_X, train_y in the fit_transform can be replaced with\n # sample_X and sample_y if required. \n train_X = get_dataframe(\n features_transformer.fit_transform(train_X, train_y),\n get_feature_names_from_column_transformer(features_transformer),\n )\n\n # Note: we can create a transformer/feature selector that simply drops\n # a specified set of columns. But, we don't do that here to illustrate\n # what to do when transformations don't cleanly fall into the sklearn\n # pattern.\n curated_columns = list(\n set(train_X.columns.to_list())\n - set(\n [\n \"manufacturer\",\n \"inventory_id\",\n \"ext_grade\",\n \"source_channel\",\n \"tgt_enc_iter_impt_platforms\",\n \"ext_model_family\",\n \"order_no\",\n \"line\",\n \"inventory_id\",\n \"gp\",\n \"selling_price\",\n \"selling_cost\",\n \"invoice_no\",\n \"customername\",\n ]\n )\n )\n\n # saving the list of relevant columns and the pipeline.\n save_pipeline(\n curated_columns, op.abspath(op.join(artifacts_folder, \"curated_columns.joblib\"))\n )\n save_pipeline(\n features_transformer, op.abspath(op.join(artifacts_folder, \"features.joblib\"))\n )", "def fit(self):\n self.cluseter_agglomerative(n_clusters=20, linkage='average', iterate=5)\n self.sub_clustering(n_clusters=3, index_cluster=[79], linkage='complete')\n self.merge_clusters([[0,9,53],[1,83],[46,35,67],[88,23],[6,68]])\n self.merge_clusters([[6,33,52],[17,14]])\n self.sub_clustering(n_clusters=2, index_cluster=[0], linkage='average')\n self.sub_clustering(n_clusters=3, index_cluster=[2], linkage='average')\n self.sub_clustering(n_clusters=3, index_cluster=[85], linkage='average')\n self.sub_clustering(n_clusters=2, index_cluster=[14], linkage='complete')\n self.sub_clustering(n_clusters=2, index_cluster=[16], linkage='average')\n self.sub_clustering(n_clusters=3, index_cluster=[22], linkage='average')\n self.sub_clustering(n_clusters=2, index_cluster=[24], linkage='complete')\n self.sub_clustering(n_clusters=2, index_cluster=[26], linkage='complete')\n self.sub_clustering(n_clusters=3, index_cluster=[28], linkage='ward')\n self.merge_clusters([[6,98,99]])\n self.merge_clusters([[35,80]])\n self.sub_clustering(n_clusters=4, index_cluster=[35], linkage='complete')\n self.merge_clusters([[76,98]])\n self.sub_clustering(n_clusters=3, index_cluster=[35], linkage='complete')\n self.merge_clusters([[39,42]])\n self.sub_clustering(n_clusters=3, index_cluster=[47], linkage='complete')\n self.sub_clustering(n_clusters=3, index_cluster=[51], linkage='average')\n self.merge_clusters([[70,101]])\n self.sub_clustering(n_clusters=3, index_cluster=[51], linkage='complete')\n self.sub_clustering(n_clusters=3, index_cluster=[61], linkage='ward')\n self.merge_clusters()\n return", "def dataClust(resAttrDF, infCol = 'Dollars', resName = None):\n \n if resName is None:\n raise Exception('**** RESTAURANT NAME WAS NOT PROVIDED ****')\n \n ## COPY AND PREPROCESS RESTAURANT ATTRIBUTE DATA\n print(f'\\n**** PREPROCESSING AND CLUSTERING DATA ACCORDING TO...{infCol.upper()} COLUMN ****')\n\n k_clust = resAttrDF.copy()\n k_clust = k_clust.reset_index(drop = True)\n \n labelEncoder = LabelEncoder()\n k_clust['Name'] = labelEncoder.fit_transform(k_clust['Name'])\n for col in k_clust.columns:\n if k_clust[col].dtypes == 'object':\n k_clust[col] = pd.to_numeric(k_clust[col])\n\n kprot_data = k_clust.copy()\n for c in k_clust.select_dtypes(exclude='object').columns:\n pt = PowerTransformer()\n kprot_data[c] = pt.fit_transform(np.array(kprot_data[c]).reshape(-1, 1))\n\n categorical_columns = [0] ## MAKE SURE TO SPECIFY CURRECT INDICES\n\n ## ACTUAL CLUSTERING\n if infCol != 'Dollars':\n kproto = KPrototypes(n_clusters= len(k_clust[infCol].unique()), init='Cao', n_jobs = 4)\n clusters = kproto.fit_predict(kprot_data, categorical=categorical_columns)\n else:\n kproto = KPrototypes(n_clusters= len(k_clust['Dollars'].unique()), init='Cao', n_jobs = 4)\n clusters = kproto.fit_predict(kprot_data, categorical=categorical_columns) \n\n ## PRINT COUNT OF EACH CLUSTER GROUP\n print('The count for each cluster group is printed below')\n pd.Series(clusters).value_counts()\n \n ## EVALUATE CLUSTER ACCURACY WITH LGBMCLASSIFIER\n clf_kp = lgb.LGBMClassifier(colsample_by_tree=0.8, random_state=1)\n cv_scores_kp = cross_val_score(clf_kp, k_clust, clusters, scoring='f1_weighted')\n print(f'CV F1 score for K-Prototypes clusters is {np.mean(cv_scores_kp)}')\n\n ## PLOT INFLUENTIAL COLOUMNS\n clf_kp.fit(k_clust, clusters)\n explainer_kp = shap.TreeExplainer(clf_kp)\n shap_values_kp = explainer_kp.shap_values(k_clust)\n shap.summary_plot(shap_values_kp, k_clust, plot_type=\"bar\", plot_size=(15, 10))\n\n ## ADD CLUSTERS TO ORIGINAL DATAFRAME AND INVERSE LABEL ENCODE RESTAURANT NAMES\n k_clust['Cluster'] = clusters\n k_clust['Name'] = labelEncoder.inverse_transform(k_clust['Name'])\n\n ## FILTER RESTAURNAT CLUSTER OF CHOICE\n clusterVal = clusters[list(k_clust['Name']).index(resName)]\n k_clust = k_clust[k_clust['Cluster'] == clusterVal]\n k_clust = k_clust.reset_index(drop = True)\n k_clust = k_clust[['Name', 'ZipCode', 'Dollars', 'Photos']]\n\n print('**** CLUSTERING COMPLETED AND SAVING CLUSTER DATAFRAME LOCALLY ****\\n')\n resFileName = resName.replace(' ', '_')\n fileName = f'{resFileName.upper()}_CLUSTER_DATA.csv'\n k_clust.to_csv(fileName)\n\n return k_clust", "def add_features(df_in, rolling_win_size,columns_to_treat):\n \n av_cols = [nm+'__av' for nm in columns_to_treat]\n sd_cols = [nm+'__sd' for nm in columns_to_treat]\n min_cols =[nm+'__min' for nm in columns_to_treat]\n max_cols =[nm+ '__max' for nm in columns_to_treat]\n \n df_out = pd.DataFrame()\n \n ws = rolling_win_size\n \n #calculate rolling stats for each engine (engine.id)\n \n for m_id in pd.unique(df_in['id.engine.id']):\n \n # get a subset for each engine sensors\n df_engine = df_in[df_in['id.engine.id'] == m_id]\n df_sub = df_engine[columns_to_treat]\n\n \n # get rolling mean for the subset\n av = df_sub.rolling(ws, min_periods=1).mean()\n av.columns = av_cols\n \n # get the rolling standard deviation for the subset\n sd = df_sub.rolling(ws, min_periods=1).std().fillna(0)\n sd.columns = sd_cols\n\n # get rolling rolling max for the subset\n max = df_sub.rolling(ws, min_periods=1).max()\n max.columns = max_cols\n \n # get the rolling standard deviation for the subset\n min = df_sub.rolling(ws, min_periods=1).min().fillna(0)\n min.columns = min_cols\n \n # combine the two new subset dataframes columns to the engine subset\n new_ftrs = pd.concat([df_engine,av,sd,min,max], axis=1)\n \n # add the new features rows to the output dataframe\n df_out = pd.concat([df_out,new_ftrs])\n \n return df_out", "def train(self, df):\n self.encoders = []\n self.feature_names = []\n for feature_name, column in df.iteritems():\n if column.dtype != object:\n continue\n self.feature_names.append(feature_name)\n column = column.to_numpy().reshape(-1, 1)\n ohe = sklearn.preprocessing.OneHotEncoder()\n ohe.fit(column)\n self.encoders.append(ohe)\n return self", "def update_features(df, config):\n coefs = np.ones(shape=df.shape[1])\n df_coefs = pd.DataFrame(np.expand_dims(coefs, 0), columns=df.columns)\n for key in config[\"clustering\"]:\n if key in (\"r\", \"g\", \"b\", \"z\"):\n df_coefs[key] = float(config[\"clustering\"][key])\n else:\n key_columns = df.columns.str.startswith(key)\n coefs[key_columns] = float(config[\"clustering\"][key])\n coefs = np.squeeze(np.array(df_coefs))\n for idx, column in enumerate(df.columns):\n if coefs[idx] != 1:\n logger.info(\"Multiply %s feature by %s\", column, coefs[idx])\n df[column] = coefs[idx] * df[column]", "def executeFeatures(dfIn, train = True):\n\n if train == True:\n dfOut = dfIn['TARGET'] #update this with numerical columns that don't need cleaning\n dfOut = standardizedIncome(dfIn, dfOut)\n dfOut = engineerDays(dfIn, dfOut)\n dfOut = createEncoders(dfIn, dfOut)\n dfOut = simplifyEducation(dfIn, dfOut)\n dfOut = simplifyFamily(dfIn, dfOut)\n dfOut = simplifyIncome(dfIn, dfOut)\n dfOut = addExtSources(dfIn, dfOut)\n dfOut = cleanNames(dfOut)\n dfOut = createPolyFeatures(dfOut)\n else:\n dfOut = dfIn['SK_ID_CURR'] ## tags from test set\n dfOut = standardizedIncome(dfIn, dfOut)\n dfOut = engineerDays(dfIn, dfOut)\n dfOut = createEncoders(dfIn, dfOut)\n dfOut = simplifyEducation(dfIn, dfOut)\n dfOut = simplifyFamily(dfIn, dfOut)\n dfOut = simplifyIncome(dfIn, dfOut)\n dfOut = addExtSources(dfIn, dfOut)\n dfOut = dfOut.drop('CODE_GENDER', axis = 1) ## Need to fix this\n #print(dfOut.columns)\n dfOut = cleanNamesTest(dfOut)\n dfOut = createPolyFeatures(dfOut)\n\n return dfOut", "def apply_catboost_model(float_features, cat_features=[], ntree_start=0, ntree_end=catboost_model.tree_count):\n if ntree_end == 0:\n ntree_end = catboost_model.tree_count\n else:\n ntree_end = min(ntree_end, catboost_model.tree_count)\n\n model = catboost_model\n\n assert len(float_features) >= model.float_feature_count\n assert len(cat_features) >= model.cat_feature_count\n\n # Binarise features\n binary_features = [0] * model.binary_feature_count\n binary_feature_index = 0\n\n for i in range(len(model.float_feature_borders)):\n for border in model.float_feature_borders[i]:\n binary_features[binary_feature_index] += 1 if (float_features[model.float_features_index[i]] > border) else 0\n binary_feature_index += 1\n transposed_hash = [0] * model.cat_feature_count\n for i in range(model.cat_feature_count):\n transposed_hash[i] = hash_uint64(cat_features[i])\n\n if len(model.one_hot_cat_feature_index) > 0:\n cat_feature_packed_indexes = {}\n for i in range(model.cat_feature_count):\n cat_feature_packed_indexes[model.cat_features_index[i]] = i\n for i in range(len(model.one_hot_cat_feature_index)):\n cat_idx = cat_feature_packed_indexes[model.one_hot_cat_feature_index[i]]\n hash = transposed_hash[cat_idx]\n for border_idx in range(len(model.one_hot_hash_values[i])):\n binary_features[binary_feature_index] |= (1 if hash == model.one_hot_hash_values[i][border_idx] else 0) * (border_idx + 1)\n binary_feature_index += 1\n\n if hasattr(model, 'model_ctrs') and model.model_ctrs.used_model_ctrs_count > 0:\n ctrs = [0.] * model.model_ctrs.used_model_ctrs_count;\n calc_ctrs(model.model_ctrs, binary_features, transposed_hash, ctrs)\n for i in range(len(model.ctr_feature_borders)):\n for border in model.ctr_feature_borders[i]:\n binary_features[binary_feature_index] += 1 if ctrs[i] > border else 0\n binary_feature_index += 1\n\n # Extract and sum values from trees\n result = 0.\n tree_splits_index = 0\n current_tree_leaf_values_index = 0\n for tree_id in range(ntree_start, ntree_end):\n current_tree_depth = model.tree_depth[tree_id]\n index = 0\n for depth in range(current_tree_depth):\n border_val = model.tree_split_border[tree_splits_index + depth]\n feature_index = model.tree_split_feature_index[tree_splits_index + depth]\n xor_mask = model.tree_split_xor_mask[tree_splits_index + depth]\n index |= ((binary_features[feature_index] ^ xor_mask) >= border_val) << depth\n result += model.leaf_values[current_tree_leaf_values_index + index]\n tree_splits_index += current_tree_depth\n current_tree_leaf_values_index += (1 << current_tree_depth)\n return model.scale * result + model.bias", "def cluster_by_range(df, lower_bound):\n global features_in_range\n # get total number of robot faces in data set\n total_rows = len(df)\n # determine the upper bound percentage\n upper_bound = 100 - lower_bound\n # lists of which columns to fix and which to cluster\n cols_to_fix = []\n cols_to_cluster = []\n # iterate over every column in dataframe\n for col in df:\n \"\"\"\n # drop any column we are ignoring\n if df[col].name in cols_to_ignore:\n df = df.drop(df[col].name, 1)\n continue\n \"\"\"\n if df[col].name not in cols_no_NA_option:\n df = df.drop(df[col].name, 1)\n continue\n # count & calculate percentage representation for each value of the column\n col_check = df.groupby(col).size().reset_index(name='count')\n col_check['as_percent'] = 100 * col_check['count'] / float(total_rows)\n # if percentage is over the upper bound, add it to list of features to fix\n if not col_check[col_check['as_percent'] >= upper_bound].empty:\n cols_to_fix.append(df[col].name)\n # if percentage is over the lower bound, add it to list of features to vary\n elif not col_check[col_check['as_percent'] >= lower_bound].empty \\\n and col_check[col_check['as_percent'] >= upper_bound].empty:\n cols_to_cluster.append(df[col].name)\n\n # generate clusters based on list of what features to vary\n groupby_result = df.groupby(cols_to_cluster).size().reset_index(name='count')\n groupby_result['as_percent'] = 100 * groupby_result['count'] / float(total_rows)\n groupby_result = groupby_result.sort_values(by='as_percent', ascending=False)\n\n # store list of features to test in a global list;\n # don't include count and percentage columns\n features_in_range = list(groupby_result)[:-2]\n\n cluster_by_split(groupby_result)\n\n \"\"\"\n # print results to file\n filename = str(lower_bound) + '_percent_clusters_noNA.tsv'\n groupby_result.to_csv(filename, header=True, sep='\\t')\n print(\"results written to file\")\n \"\"\"\n\n \"\"\"\n groupby_result = df.groupby(cols_to_fix).size().reset_index(name='count')\n groupby_result['as_percent'] = 100 * groupby_result['count'] / float(total_rows)\n groupby_result = groupby_result.sort_values(by='as_percent', ascending=False)\n print(\"cluster: %s, fix: %s, total: %s\"\n % (len(cols_to_cluster), len(cols_to_fix), count))\n print(\"RANGE: \", lower_bound, upper_bound)\n print(groupby_result.head(1))\n \"\"\"", "def feature_engineer_segment(self):\n df = self.get_all_data()\n\n # join with census\n census_df = self.get_df_census()\n\n # drop row that has null more than 20%\n thresh = len(df) * .8\n df = df.dropna(thresh = thresh, axis = 1)\n \n # select only numeric col\n c_list = [c for c in list(df.columns) if c not in ['idd', 'ft_data_dt', 'target']]\n for col in c_list:\n if not is_numeric_dtype(df[col]):\n df = df.drop(columns=[col])\n \n to_calculate_col = [c for c in list(df.columns) if c not in ['idd', 'ft_data_dt', 'target']]\n df = df.merge(census_df, on=['idd'], how='left')\n\n # join with lag target\n target_df = self.get_target_df()\n target_df = target_df.rename(columns = {'target' : 'lag_target'})\n target_df['lag_target'] = target_df['lag_target'].replace(np.nan, 0)\n df = df.merge(target_df, on = ['idd', 'ft_data_dt'], how='left')\n \"\"\"\n age segment will be calculated by //10\n \"\"\"\n def impute_age(age):\n return age//10\n df['age_group'] = df['PatientAge'].apply(lambda x:impute_age(x))\n for feature in to_calculate_col:\n if df[feature].std(ddof=1) == 0:\n continue\n df['zscore_age_' + feature] = df.groupby(\"age_group\")[feature].transform(lambda x : stats.zscore(x,ddof=1))\n df['zscore_sex_' + feature] = df.groupby(\"male\")[feature].transform(lambda x : stats.zscore(x,ddof=1))\n df['zscore_Diabetes_' + feature] = df.groupby(\"Diabetes\")[feature].transform(lambda x : stats.zscore(x,ddof=1))\n # deal with null value\n thresh = len(df) * .7\n df = df.dropna(thresh = thresh, axis = 1)\n for col in df.columns:\n if col in ['idd', 'ft_data_dt']:\n continue\n if col in ['male', 'Diabetes']:\n df[col] = df[col].replace(np.nan, 0)\n elif 'target' not in col:\n df[col] = df[col].replace(np.nan, df[col].median())\n self.set_all_data(df)", "def data_preprocessing(dat: pd.DataFrame, art='C', y=None, logger=None, remove=True):\n if logger == None:\n logging.basicConfig(\n level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n logger = logging.getLogger(__name__)\n \n logger.info('Start data preprocessing')\n # replace original indeices with default ones\n dat = dat.reset_index(drop=True)\n\n if art == 'C':\n logger.info('Start to label target feature y for classification task')\n dat.iloc[:, -1] = LabelEncoder().fit_transform(dat.iloc[:, -1])\n logger.info('End with label encoding the target feature')\n if remove:\n # remove columns with more than 1/2 na\n dat = dat.loc[:, dat.isna().sum()/len(dat) < .5]\n logger.info('Following features are removed from the dataframe because half of their value are NA: %s' %\n (dat.columns[dat.isna().sum()/len(dat) > .5].to_list()))\n # Encoding\n oe = OneHotEncoder(drop='first')\n # get categorical columns\n if y:\n dat_y = dat[[y]]\n cols = dat.columns.to_list()\n cols.remove(y)\n dat_x = dat[cols]\n else:\n dat_y = dat[[dat.columns[-1]]]\n dat_x = dat[dat.columns[:-1]]\n dat_categ = dat_x.select_dtypes(include=['object'])\n # get kterm of categ features\n for i in dat_categ.columns:\n # save output to dat\n tmp = dat_x[i].value_counts()\n dat_x[i + '_kterm'] = dat_x[i].map(lambda x: tmp[x] if x in tmp.index else 0)\n # float columns including the k term cols\n dat_numeric = dat_x.select_dtypes(include=['float32', 'float64', 'int32', 'int64'])\n # onehot encoding and label encoding\n dat_categ_onehot = dat_categ.iloc[:, dat_categ.apply(lambda x: len(x.unique())).values < 8]\n dat_categ_label = dat_categ.iloc[:, dat_categ.apply(lambda x: len(x.unique())).values >= 8]\n flag_onehot = False\n flag_label = False\n # oe\n if dat_categ_onehot.shape[1] > 0:\n logger.info('Start to do onehot to the following categoric features: %s' %\n (str(dat_categ_onehot.columns.to_list())))\n dat_onehot = pd.DataFrame(oe.fit_transform(dat_categ_onehot.astype(str)).toarray(),\n columns=oe.get_feature_names(dat_categ_onehot.columns))\n logger.info('End with onehot')\n flag_onehot = True\n else:\n dat_onehot = None\n # le\n if dat_categ_label.shape[1] > 0:\n logger.info('Start to do label encoding to the following categoric features: %s' %\n (str(dat_categ_label.columns.to_list())))\n dat_categ_label = dat_categ_label.fillna('NULL')\n dat_label = pd.DataFrame(columns=dat_categ_label.columns)\n for i in dat_categ_label.columns:\n dat_label[i] = LabelEncoder().fit_transform(dat_categ_label[i].astype(str))\n flag_label = True\n logger.info('End with label encoding')\n else:\n dat_label = None\n # scaling\n # combine\n dat_new = pd.DataFrame()\n if flag_onehot and flag_label:\n dat_new = pd.concat([dat_numeric, dat_onehot, dat_label], axis=1)\n elif flag_onehot:\n dat_new = pd.concat([dat_numeric, dat_onehot], axis=1)\n elif flag_label:\n dat_new = pd.concat([dat_numeric, dat_label], axis=1)\n else:\n dat_new = dat_numeric\n dat_new = pd.concat([dat_new, dat_y], axis=1)\n # imputation\n dat_new = dat_new.dropna(axis=1, how='all')\n if dat_new.isna().sum().sum() > 0:\n logger.info('Nan value exist, start to fill na with iterative imputer: ' +\n str(dat_new.isna().sum().sum()))\n # include na value, impute with iterative Imputer or simple imputer\n columns = dat_new.columns\n imp = IterativeImputer(max_iter=10, random_state=0)\n # imp = SimpleImputer(missing_values=np.nan, strategy='mean')\n dat_new = imp.fit_transform(dat_new)\n dat_new = pd.DataFrame(dat_new, columns=columns)\n dat_numeric = dat_new.iloc[:, :-1].select_dtypes(include=['float32', 'float64', 'int32', 'int64'])\n logger.info('End with fill nan')\n return dat_new, dat_numeric.columns", "def generate_clusters(df):\n\n df_size = df.shape[0]\n print(df_size)\n n_clusters = 0\n percent_min_pts = 0.105\n min_clusters = 3\n while (n_clusters != min_clusters):\n print(\"percent_min_pts\", percent_min_pts)\n min_cluster_pts = math.floor(df_size * percent_min_pts)\n print(\"min_cluster_pts\", min_cluster_pts)\n\n clusterer = hdbscan.HDBSCAN(min_cluster_size=min_cluster_pts)\n print(df.head())\n clusterer.fit(df)\n cluster_groups = {}\n labels = clusterer.labels_\n for i in labels:\n if cluster_groups.get(i):\n cluster_groups[i] = cluster_groups[i] + 1\n else:\n cluster_groups[i] = 1\n print(\"cluster_groups\", cluster_groups)\n n_clusters = len(set(labels))\n print(\"n_clusters\", n_clusters)\n multiplier = abs(n_clusters - min_clusters) * 0.001\n print(\"multiplier\", multiplier)\n if n_clusters > min_clusters:\n percent_min_pts += multiplier\n else:\n percent_min_pts -= multiplier\n print(\"percent_min_pts\", percent_min_pts)\n return labels", "def compute_training_features(train_df, df_config, feature_config_list, feature_map, max_horizon):\n pipeline_steps = []\n for feature_config in feature_config_list:\n feature_name, feature_args, featurizer = parse_feature_config(feature_config, feature_map)\n if feature_name in FEATURES_REQUIRE_MAX_HORIZON:\n feature_args[\"max_horizon\"] = max_horizon\n pipeline_steps.append((feature_name, featurizer(df_config=df_config, **feature_args)))\n\n feature_engineering_pipeline = Pipeline(pipeline_steps)\n feature_engineering_pipeline_fitted = feature_engineering_pipeline.fit(train_df)\n train_features = feature_engineering_pipeline_fitted.transform(train_df)\n\n return train_features, feature_engineering_pipeline_fitted", "def prep_tree_data(self, number: int):\n filename = \"data-before-normalization-{}-out-of-7.csv\".format(number)\n path = str(DATA_PATH.joinpath(\"data-splitted\", filename))\n df = pandas.read_csv(path)\n\n df.drop(df.columns[0], axis=1, inplace=True)\n assessments = [x for x in df.columns.values if x.split(\"_\")[0] == \"assessment\"]\n df['average_score'] = df[assessments].mean(skipna=True, axis=1)\n for assessment in assessments: # somehow he doesn't want to fillna in a batch?\n df[assessment].fillna(df['average_score'], inplace=True)\n clicks = [x for x in df.columns.values if x.split(\"_\")[0] == \"vle\"]\n df['vle_click_average'] = df[clicks].mean(skipna=True, axis=1)\n for click in clicks: # somehow he doesn't want to fillna in a batch?\n df[click].fillna(df['vle_click_average'], inplace=True)\n df.dropna()\n\n self.change_oh_cat(\"gender\", df)\n self.change_oh_cat(\"highest_education\", df)\n self.change_oh_cat(\"imd_band\", df)\n self.change_oh_cat(\"age_band\", df)\n self.change_oh_cat(\"disability\", df)\n result_order = {'final_result__Fail': 0, 'final_result__Withdrawn': 2,\n 'final_result__Pass': 1, 'final_result__Distinction': 3}\n self.change_oh_cat(\"final_result\", df, result_order)\n df[\"final_result\"].replace(2, 0, inplace=True)\n df[\"final_result\"].replace(3, 1, inplace=True)\n\n target = df[\"final_result\"]\n df.drop([\"final_result\"], axis=1, inplace=True)\n\n x_train, x_test, y_train, y_test = train_test_split(df, target, test_size=0.1,\n random_state=32, shuffle=True,\n stratify=target)\n\n return x_train, x_test, y_train, y_test", "def reweigh(self, **kwargs):\n if kwargs:\n bl_df = kwargs['bl_df']\n else:\n bl_df = self.binary_label_df\n\n rw = Reweighing(unprivileged_groups=self.unprivileged_groups,\n privileged_groups=self.privileged_groups)\n\n transformed_data = rw.fit_transform(bl_df)\n return transformed_data", "def prepareSplitClassifier(df, models, choice):\n\n\n def classificationOutput(clf, X, Y):\n \"\"\"\n Fit the model and print the classification results\n - confusion_matrix\n - avg scores etc\n \"\"\"\n n_samples = 36\n\n print \"\\n\\nClassifier: \\n %s\" % (clf)\n print \"#\" * 79\n # classifier_gnb = naive_bayes.GaussianNB() # initiating the classifier\n\n clf.fit(X[:n_samples], Y[:n_samples]) # train on first n_samples and test on last 10\n\n expected = Y[n_samples:]\n predicted = clf.predict(X[n_samples:])\n print(\"Classification report:\\n%s\\n\" % (metrics.classification_report(expected, predicted)))\n print(\"\\nConfusion matrix:\\n%s\" % metrics.confusion_matrix(expected, predicted))\n\n\n\n\n def splitclassify(cDf):\n \"\"\"\n Given the dataframe combined with equal fair and unfair apps,\n classify them\n \"\"\"\n cDf = cDf.reindex(np.random.permutation(cDf.index)) # shuffle the dataframe\n featCols = set(cDf.columns)\n featCols.remove('appLabel')\n\n features = cDf[list(featCols)].astype('float')\n\n ## Scale the features to a common range\n min_max_scaler = preprocessing.MinMaxScaler()\n X = min_max_scaler.fit_transform(features.values)\n\n Y = cDf['appLabel'].values\n\n\n if choice == 'all':\n for key in models:\n classifier = models[key]\n classificationOutput(classifier, X, Y)\n else:\n if choice in models:\n classifier = models[choice]\n classificationOutput(classifier, X, Y)\n else:\n print \"Incorrect Choice\"\n\n\n\n fairDf = df[df['appLabel'] == False]\n unfairDf = df[df['appLabel'] == True]\n\n\n # calculate total possible splits of fair data frame relatie to\n # size of unfair dataframe\n splits = len(fairDf) // len(unfairDf)\n\n for i in range(splits):\n clDf = fairDf[i : i+len(unfairDf)].append(unfairDf)\n\n # print fairDf.values, unfairDf.values\n print \"Classifying %d th split of fair apps with unfair app\" % (i)\n print \"-\" * 79\n splitclassify(clDf)\n print \"\\n\\n\"", "def clus_func(df_all, n_components, feat_subset):\n\n df = df_all[featureSet_dic[feat_subset]].copy()\n\n X = df.values\n\n # # Fit a Gaussian mixture with EM\n # gmm_model = mixture.GaussianMixture(n_components=n_components,\n # covariance_type=cv_type,\n # random_state=1,\n # n_init=10)\n # gmm_model = gmm_model.fit(X)\n\n model_path = os.path.join(CURR_PATH, 'clustering_model') # create directiry for the current time\n model_name = os.path.join(model_path, 'gmm.joblib')\n gmm_model = joblib.load(model_name)\n\n # predic labels & probabilities\n labels = gmm_model.predict(X)\n labels_prob = gmm_model.predict_proba(X)\n\n # adding all droped features (for plotting purposes) of the standardized dataframe\n added_feat = [feat for feat in data_columns if feat not in df.columns]\n df[added_feat] = df_all_stand[added_feat].copy()\n df = df[data_columns]\n\n # adding the labels to the dataframe\n df.insert(0, 'Clus_label', labels)\n\n for n in range(n_components):\n df['Prob_L'+str(n)] = labels_prob[:, n]\n\n return gmm_model, df # export all gmm models and a dictionary of all labeled datasets", "def prepare_class_data_for_prediction(dataframe, model_dict, user_keyword, task_name):\r\n test_tweets = dataframe.iloc[:, [0, 1, 2]]\r\n\r\n parent_dir = Path.cwd().parent\r\n pickle_dir = parent_dir.joinpath('default_results', 'pickle_files_feat_eng')\r\n feature_X_user = pd.DataFrame\r\n emo_X_test_dict = {}\r\n\r\n\r\n for emotion, model_prop in model_dict.items():\r\n preprocessed_X_user = Preprocessor.perform(test_tweets, emotion, user_keyword, task_name)\r\n feature_X_user = Feature_Transformer.perform(preprocessed_X_user, emotion, user_keyword, task_name)\r\n vectorizer = Dictionaries.vectorizer_dict[model_prop[2]]\r\n\r\n #Fit transform the vectorizer with the corresponding preprocessed training data\r\n if os.path.exists(pickle_dir.joinpath(emotion + '_c_train_preprocess_df.pkl')):\r\n preprocess_train_df = pd.read_pickle(pickle_dir.joinpath(emotion + '_c_train_preprocess_df.pkl'))\r\n train_vect = vectorizer.fit_transform(preprocess_train_df['preprocessed_text'].values)\r\n print(emotion + 'TRAIN', train_vect.shape)\r\n train_vect_df = pd.DataFrame(train_vect.toarray(), columns=vectorizer.get_feature_names())\r\n else:\r\n #If the file doesnt exist, exit the program with instructions\r\n print('\\nRequired files does not exist.\\n\\n Please, train the models first by running > Modelling.py')\r\n sys.exit(1)\r\n\r\n # Use the same vectorizer to transform test data and then perform the feature union\r\n vector_X = vectorizer.transform(preprocessed_X_user['preprocessed_text'].values)\r\n test_vect_df = pd.DataFrame(vector_X.toarray(), columns=vectorizer.get_feature_names())\r\n X_test = pd.concat([test_vect_df, feature_X_user], axis=1)\r\n emo_X_test_dict[emotion] = X_test\r\n print(emotion + 'TEST', test_vect_df.shape, X_test.shape)\r\n return emo_X_test_dict", "def topic_extraction(df, col_name):\n tfidf_vectorizer = TfidfVectorizer(max_df=0.95, min_df=2,\n max_features=200,\n stop_words='english')\n tfidf = tfidf_vectorizer.fit_transform(df[col_name])\n\n tf_vectorizer = CountVectorizer(max_df=0.95, min_df=2,\n max_features=200,\n stop_words='english')\n tf = tf_vectorizer.fit_transform(df[col_name])\n nmf = NMF(n_components=20, random_state=1,\n alpha=.1, l1_ratio=.5)\n tfidf_feature_names = tfidf_vectorizer.get_feature_names()\n nmf_w = nmf.fit_transform(tfidf)\n nmf_h = nmf.components_\n df['labels'] = nmf_w.argmax(axis=1) # this was the right code to get labels/clusters\n\n\n print(\"\\nTopics in NMF model:\")\n print_top_words(nmf, tfidf_feature_names)\n\n\n lda = LatentDirichletAllocation(n_topics=20, max_iter=5,\n learning_method='online',\n learning_offset=50.,\n random_state=0,\n n_jobs=-1)\n lda.fit(tf)\n doc_topic_distrib = lda.transform(tf)\n lda_labels = doc_topic_distrib.argmax(axis=1)\n print lda_labels[:100]\n df['lda_labels'] = lda_labels\n print(\"\\nTopics in LDA model:\")\n tf_feature_names = tf_vectorizer.get_feature_names()\n print_top_words(lda, tf_feature_names)\n return df" ]
[ "0.6235735", "0.6067175", "0.6045808", "0.6000694", "0.5986487", "0.5972561", "0.59375983", "0.5912315", "0.590315", "0.5823235", "0.58231467", "0.58022463", "0.5763611", "0.57518566", "0.57234585", "0.57188344", "0.57007515", "0.569017", "0.5686344", "0.56673455", "0.561648", "0.56091994", "0.5599795", "0.5582364", "0.55548334", "0.5551856", "0.5550337", "0.5524624", "0.55203265", "0.5504742" ]
0.6397659
0
Generate GradCAM at different layers of ResNet152
def demo2(image_paths, output_dir, cuda): device = get_device(cuda) # Synset words classes = get_classtable() # Model model = models.resnet152(pretrained=True) model.to(device) model.eval() # The four residual layers target_layers = ["relu", "layer1", "layer2", "layer3", "layer4"] target_class = 243 # "bull mastif" # Images images, raw_images = load_images(image_paths) images = torch.stack(images).to(device) gcam = GradCAM(model=model) probs, ids = gcam.forward(images) # ids_ = torch.LongTensor([[target_class]] * len(images)).to(device) ids_ = torch.tensor([[target_class]] * len(images), dtype=torch.long).to(device) gcam.backward(ids=ids_) for target_layer in target_layers: print("Generating Grad-CAM @{}".format(target_layer)) # Grad-CAM regions = gcam.generate(target_layer=target_layer) for j in range(len(images)): print( "\t#{}: {} ({:.5f})".format( j, classes[target_class], float(probs[ids == target_class]) ) ) # save_gradcam( # filename=osp.join( # output_dir, # "{}-{}-gradcam-{}-{}.png".format( # j, "resnet152", target_layer, classes[target_class] # ), # ), # gcam=regions[j, 0], # raw_image=raw_images[j], # )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def grad_cam_batch(input_model, images, classes, layer_name):\n loss = tf.gather_nd(input_model.output, np.dstack([range(images.shape[0]), classes])[0])\n layer_output = input_model.get_layer(layer_name).output\n grads = K.gradients(loss, layer_output)[0]\n gradient_fn = K.function([input_model.input, K.learning_phase()], [layer_output, grads])\n\n conv_output, grads_val = gradient_fn([images, 0]) \n weights = np.mean(grads_val, axis=(1, 2))\n cams = np.einsum('ijkl,il->ijk', conv_output, weights)\n \n # Process CAMs\n new_cams = np.empty((images.shape[0], H, W))\n for i in range(new_cams.shape[0]):\n cam_i = cams[i] - cams[i].mean()\n cam_i = (cam_i + 1e-10) / (np.linalg.norm(cam_i, 2) + 1e-10)\n new_cams[i] = cv2.resize(cam_i, (H, W), cv2.INTER_LINEAR)\n new_cams[i] = np.maximum(new_cams[i], 0)\n new_cams[i] = new_cams[i] / new_cams[i].max()\n \n return new_cams", "def grad_cam(input_model, image, cls, layer_name):\n y_c = input_model.output[0, cls]\n conv_output = input_model.get_layer(layer_name).output\n grads = K.gradients(y_c, conv_output)[0]\n # Normalize if necessary\n # grads = normalize(grads)\n gradient_function = K.function([input_model.input], [conv_output, grads])\n\n output, grads_val = gradient_function([image])\n output, grads_val = output[0, :], grads_val[0, :, :, :]\n\n weights = np.mean(grads_val, axis=(0, 1))\n cam = np.dot(output, weights)\n\n # Process CAM\n cam = cv2.resize(cam, (H, W), cv2.INTER_LINEAR)\n cam = np.maximum(cam, 0)\n cam = cam / cam.max()\n return cam", "def grad_cam(input_model, image, clss, layer_name, H=180, W=180):\r\n y_c = input_model.output[0, clss]\r\n conv_output = input_model.get_layer(layer_name).output\r\n grads = K.gradients(y_c, conv_output)[0]\r\n\r\n gradient_function = K.function([input_model.input], [conv_output, grads])\r\n\r\n output, grads_val = gradient_function([image])\r\n output, grads_val = output[0, :], grads_val[0, :, :, :]\r\n\r\n weights = np.mean(grads_val, axis=(0, 1))\r\n cam = np.dot(output, weights)\r\n\r\n # Process CAM\r\n cam = cv2.resize(cam, (W, H), cv2.INTER_LINEAR)\r\n cam = np.maximum(cam, 0)\r\n cam = cam / cam.max()\r\n return cam", "def build(self):\n\n # bgr_ = bgr*255.0\n bgr_= self.X\n start_time = time.time()\n print(\"build model started\")\n\n # blue ,green, red = tf.split(axis=3, num_or_size_splits=3, value= bgr)\n red ,green, blue, = tf.split(axis=3, num_or_size_splits=3, value= bgr_)\n assert red.get_shape().as_list()[1:] == [224, 224, 1]\n assert green.get_shape().as_list()[1:] == [224, 224, 1]\n assert blue.get_shape().as_list()[1:] == [224, 224, 1]\n bgr = tf.concat(axis=3, values=[\n # blue - VGG_MEAN[0],\n # green - VGG_MEAN[1],\n # red - VGG_MEAN[2],\n\n red - VGG_MEAN[0],\n green - VGG_MEAN[1],\n blue - VGG_MEAN[2],\n ])\n assert bgr.get_shape().as_list()[1:] == [224, 224, 3]\n\n\n\n print(bgr.shape)\n\n self.conv1_1 = self.conv_layer(bgr, \"conv1_1\")\n self.conv1_2 = self.conv_layer(self.conv1_1, \"conv1_2\")\n self.pool1 = self.max_pool(self.conv1_2, 'pool1')\n\n self.conv2_1 = self.conv_layer(self.pool1, \"conv2_1\")\n self.conv2_2 = self.conv_layer(self.conv2_1, \"conv2_2\")\n self.pool2 = self.max_pool(self.conv2_2, 'pool2')\n\n\n\n\n self.conv3_1 = self.conv_layer(self.pool2, \"conv3_1\")\n self.conv3_2 = self.conv_layer(self.conv3_1, \"conv3_2\")\n self.conv3_3 = self.conv_layer(self.conv3_2, \"conv3_3\")\n self.pool3 = self.max_pool(self.conv3_3, 'pool3')\n\n self.conv4_1 = self.conv_layer(self.pool3, \"conv4_1\")\n self.conv4_2 = self.conv_layer(self.conv4_1, \"conv4_2\")\n self.conv4_3 = self.conv_layer(self.conv4_2, \"conv4_3\")\n self.pool4 = self.max_pool(self.conv4_3, 'pool4')\n\n\n\n\n\n self.conv5_1 = self.conv_layer(self.pool4, \"conv5_1\")\n self.conv5_2 = self.conv_layer(self.conv5_1, \"conv5_2\")\n self.conv5_3 = self.conv_layer(self.conv5_2, \"conv5_3\")\n self.pool5 = self.max_pool(self.conv5_3, 'pool5')\n\n self.fc6 = self.fc_layer(self.pool5, \"fc6\")\n assert self.fc6.get_shape().as_list()[1:] == [4096]\n self.relu6 = tf.nn.relu(self.fc6)\n\n self.fc7 = self.fc_layer(self.relu6, \"fc7\")\n self.relu7 = tf.nn.relu(self.fc7)\n\n self.fc8 = self.fc_layer(self.relu7, \"fc8\")\n\n # self.fc9 = self.fc_layer(self.fc8,'fc9')\n # self.relu9 = tf.nn.relu(self.fc9)\n\n\n\n\n relu8 = tf.nn.relu(self.fc8)\n fc9 = self.fc_layer(relu8, 'fc9')\n print((\"build model finished: %ds\" % (time.time() - start_time)))\n return fc9\n\n # self.prob = tf.nn.softmax(self.fc8, name=\"prob\")", "def CameraNet(features, is_training):\n with tf.variable_scope(\"pose_net\"):\n batch_norm_params = {\"is_training\": is_training}\n\n pyramid_src_img_1 = features[0]\n pyramid_tgt_img = features[1]\n pyramid_src_img_2 = features[2]\n input_batch = tf.concat(\n [pyramid_src_img_1[4], pyramid_tgt_img[4], pyramid_src_img_2[4]], axis=3\n )\n\n with tf.variable_scope(\"conv1_a\"):\n conv1_a = conv2d(\n input_batch,\n NUM_FEATURES * 8,\n 3,\n 1,\n normalizer_params=batch_norm_params,\n activation_fn=tf.nn.relu,\n )\n with tf.variable_scope(\"conv1_b\"):\n conv1_b = conv2d(\n conv1_a,\n NUM_FEATURES * 8,\n 3,\n 2,\n normalizer_params=batch_norm_params,\n activation_fn=tf.nn.relu,\n )\n with tf.variable_scope(\"conv2_a\"):\n conv2_a = conv2d(\n conv1_b,\n NUM_FEATURES * 16,\n 3,\n 1,\n normalizer_params=batch_norm_params,\n activation_fn=tf.nn.relu,\n )\n with tf.variable_scope(\"conv2_b\"):\n conv2_b = conv2d(\n conv2_a,\n NUM_FEATURES * 16,\n 3,\n 2,\n normalizer_params=batch_norm_params,\n activation_fn=tf.nn.relu,\n )\n\n # POSE ESTIMATOR\n with tf.variable_scope(\"pred\"):\n pose_pred = conv2d(\n conv2_b, 12, 1, 1, normalizer_fn=None, activation_fn=None\n )\n pose_avg = tf.reduce_mean(pose_pred, [1, 2])\n pose_final = POSE_SCALING * tf.reshape(pose_avg, [-1, 2, 6])\n\n # INTRINSIC ESTIMATOR\n s = tf.shape(pyramid_tgt_img[0])\n h = tf.to_float(s[1])\n w = tf.to_float(s[2])\n intrinsics_mat = _estimate_intrinsics(conv2_b, w, h)\n\n return pose_final, intrinsics_mat", "def forward(self, x, fm, camera_mat):\n\n batch_size = x.shape[0]\n # List of initial 3D coordinates (first item) and outputs of the layers\n out = list()\n\n initial_coordinates_expanded = self.initial_coordinates.expand(\n batch_size, -1, -1)\n out.append(initial_coordinates_expanded)\n \n # #######################\n # First Projection Block\n # Layer 0: 156 x feat_dim\n out.append(self.gp(initial_coordinates_expanded, fm, camera_mat))\n out.append(self.gc1(out[-1])) # Layer 1: 156 x hidden_dim\n for i in range(0, 12): # GraphConvs from and to 156 x hidden_dim\n val = self.gc2[i](out[-1])\n if (i % 2) == 1:\n # Add previous output (Restnet style)\n val = torch.add(val, out[-2]) * 0.5\n out.append(val)\n # Layer 14: Out of dim 156x3, will be used as outputs_2[1]\n out.append(self.gc3(out[-1]))\n\n # #######################\n # Second Projection Block\n # Layer 15: 156 x (hidden_dim + feat_dim)\n v = self.gp(out[-1], fm, camera_mat)\n v = torch.cat([v, out[-2]], dim=2)\n out.append(v)\n # Layer 16: 618x (hidden_dim + feat_dim)\n out.append(self.gup1(out[-1]))\n out.append(self.gc4(out[-1])) # Layer 17: 618 x hidden_dim\n for i in range(0, 12): # GraphConvs from and to 618 x hidden_dim\n val = self.gc5[i](out[-1])\n if (i % 2) == 1:\n # Add previous output (Restnet style)\n val = torch.add(val, out[-2]) * 0.5\n out.append(val)\n # Layer 30: 618 x 3, will be used as outputs_2[2]\n out.append(self.gc6(out[-1]))\n\n # #######################\n # Third Projection Block\n # Layer 31: 618 x hidden_dim + feat_dim\n v = self.gp(out[-1], fm, camera_mat) # 618 x feat_dim\n v = torch.cat([v, out[-2]], dim=2)\n out.append(v)\n # Layer 32: 2466 x hidden_dim + feat_dim\n out.append(self.gup2(out[-1]))\n out.append(self.gc7(out[-1])) # Layer 33: 2466 x hidden_dim\n for i in range(0, 13): # GraphConvs from and to 2466 x hidden_dim\n val = self.gc8[i](out[-1])\n if i % 2 == 1:\n # Add previous output (Restnet style)\n val = torch.add(val, out[-2]) * 0.5\n out.append(val)\n out.append(self.gc9(out[-1])) # Layer 47: 2466 x 3\n # 156 x hidden_dim, 618 x hidden_dim, 2466 x hidden_dim\n outputs = (out[15], out[31], out[-1])\n outputs_2 = (initial_coordinates_expanded,\n self.gup1(out[15]), self.gup2(out[31]))\n\n return outputs, outputs_2", "def create_cam_colored(dl: DatasetLoader, model, outname: str, im_width=256, n=8, s=256):\n\n heatmaps = []\n for i in range(0, dl.nb_classes):\n predict_input = (cv2.imread(dl.baseDirectory + \"/\" + dl.picker[i].directory + \"/\" +\n dl.picker[i].name, cv2.IMREAD_COLOR))\n base = Image.open(dl.baseDirectory + \"/\" + dl.picker[i].directory + \"/\" +\n dl.picker[i].name)\n predict_input = predict_input.astype('float32')\n predict_input = np.expand_dims(predict_input, axis=0)\n predict_input = preprocess_input(predict_input)\n\n output_generator = get_outputs_generator(model, 'CAM')\n layer_outputs = output_generator(predict_input)[0]\n\n inputs = model.input\n output_predict = model.get_layer('W').output\n fn_predict = K.function([inputs], [output_predict])\n prediction = fn_predict([predict_input])[0]\n value = np.argmax(prediction)\n\n w = model.get_layer(\"W\").get_weights()[0]\n heatmap = cv2.resize(layer_outputs[:, :, 0], (im_width, im_width), interpolation=cv2.INTER_CUBIC)\n heatmap *= w[0][value]\n for z in range(1, layer_outputs.shape[2]): # Iterate through the number of kernels\n img = cv2.resize(layer_outputs[:, :, z], (im_width, im_width), interpolation=cv2.INTER_CUBIC)\n heatmap += img * w[z][value]\n\n heatmap = cv2.applyColorMap(np.uint8(np.asarray(ImageOps.invert(toimage(heatmap)))), cv2.COLORMAP_JET)\n heatmap = cv2.putText(heatmap, str(dl.picker[i].img_class), (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 0),\n 2)\n heatmap = toimage(heatmap)\n heatmap = reduce_opacity(heatmap, 0.5)\n base.paste(heatmap, (0, 0), heatmap)\n heatmaps.append(base)\n\n result = Image.new(\"RGB\", (n * s, (len(heatmaps) // n + 1) * s))\n for index, img in enumerate(heatmaps):\n x = index % n * 256\n y = index // n * 256\n w, h = img.size\n print('pos {0},{1} size {2},{3}'.format(x, y, w, h))\n result.paste(img, (x, y, x + w, y + h))\n\n result.save(outname)", "def generate_maps(dl: DatasetLoader, model, map_out: str, graph, all_classes=True, batch_size=10, mode='cv2'):\n # o_generator = get_outputs_generator(model, 'CAM')\n input = model.input\n output = model.get_layer('CAM').output\n output_predict = model.get_layer('W').output\n output_fn = K.function([input], [output])\n fn_predict = K.function([input], [output_predict])\n\n o_resizer = tf.image.resize_images\n o_dot = K.dot\n\n # plot CAMs only for the validation data:\n k = 0\n counter = 0\n img_arr = []\n with K.get_session() as sess:\n in_place = tf.placeholder(tf.float32, [None, None, None, 512])\n size_place = tf.placeholder(tf.int32, [2])\n convert_place = tf.placeholder(tf.float32, [512, len(dl.directories)])\n first_func = o_resizer(in_place, size_place, ResizeMethod.BICUBIC)\n second_func = o_dot(in_place, convert_place)\n graph.finalize()\n\n for i in range(dl.number_of_imgs_for_train, dl.number_of_imgs):\n with graph.as_default() as gr:\n if i == dl.number_of_imgs - 1:\n k = batch_size - 1\n rpath = dl.baseDirectory + \"/\" + dl.imgDataArray[i].directory + \"/\" + dl.imgDataArray[i].name\n img = cv2.imread(rpath, cv2.IMREAD_COLOR)\n # print('!!!!!!!!debug', rpath, i)\n img_arr.append(img)\n k += 1\n if k == batch_size:\n start_time = time.time()\n predict_input = np.asarray(img_arr, dtype='float32')\n predict_input = preprocess_input(predict_input)\n\n k = 0\n img_arr = []\n\n layer_outputs = output_fn([predict_input])[0]\n predictions = fn_predict([predict_input])[0]\n\n if mode == 'cv2': # model, layer_outputs, nb_classes, im_width=256):\n maps_arr = cam_generate_cv2(model, layer_outputs, dl.nb_classes)\n else:\n maps_arr = cam_generate_tf_ops(model, layer_outputs, sess, first_func, second_func, in_place,\n size_place,\n convert_place)\n\n for l, prediction in enumerate(predictions):\n inc = i - batch_size + l + 1\n outpath = map_out + \"/\" + dl.imgDataArray[inc].directory + \"/\" + dl.imgDataArray[inc].name\n # print('[DEBUG]', outpath, inc, i, batch_size, l)\n\n try:\n os.makedirs(outpath)\n except OSError:\n continue\n\n value = np.argmax(prediction)\n if all_classes:\n a = 0\n b = dl.nb_classes\n else:\n a = value\n b = value + 1\n for j in range(a, b):\n outname = outpath + \"/\" + str(j) + '.tiff'\n if mode == 'cv2':\n Image.fromarray(maps_arr[l][j]).save(outname)\n else:\n Image.fromarray(maps_arr[l, :, :, j]).save(outname)\n with open(outpath + '/resuts.json', 'w') as outfile:\n json.dump({'predicted': str(value), \"true_label\": str(dl.imgDataArray[inc].img_class)},\n outfile)\n print(\"cam(\", counter, \"/\", dl.number_of_imgs_for_test, \"completed\")\n counter += 1\n print(\"got cams in\", time.time() - start_time)", "def build_resnet50(self):\n use_batch_norm = self.use_batch_norm\n\n imgs = tf.placeholder(tf.float32, [self.batch_size]+self.img_shape)\n is_train = tf.placeholder(tf.bool)\n\n conv1_feats = convolution(imgs, 7, 7, 64, 2, 2, 'conv1')\n conv1_feats = batch_norm(conv1_feats, 'bn_conv1', is_train, use_batch_norm)\n conv1_feats = nonlinear(conv1_feats, 'relu')\n pool1_feats = max_pool(conv1_feats, 3, 3, 2, 2, 'pool1')\n\n res2a_feats = self.basic_block(pool1_feats, 'res2a', 'bn2a', is_train, use_batch_norm, 64, 1)\n res2b_feats = self.basic_block2(res2a_feats, 'res2b', 'bn2b', is_train, use_batch_norm, 64)\n res2c_feats = self.basic_block2(res2b_feats, 'res2c', 'bn2c', is_train, use_batch_norm, 64)\n \n res3a_feats = self.basic_block(res2c_feats, 'res3a', 'bn3a', is_train, use_batch_norm, 128)\n res3b_feats = self.basic_block2(res3a_feats, 'res3b', 'bn3b', is_train, use_batch_norm, 128)\n res3c_feats = self.basic_block2(res3b_feats, 'res3c', 'bn3c', is_train, use_batch_norm, 128)\n res3d_feats = self.basic_block2(res3c_feats, 'res3d', 'bn3d', is_train, use_batch_norm, 128)\n\n res4a_feats = self.basic_block(res3d_feats, 'res4a', 'bn4a', is_train, use_batch_norm, 256)\n res4b_feats = self.basic_block2(res4a_feats, 'res4b', 'bn4b', is_train, use_batch_norm, 256)\n res4c_feats = self.basic_block2(res4b_feats, 'res4c', 'bn4c', is_train, use_batch_norm, 256)\n res4d_feats = self.basic_block2(res4c_feats, 'res4d', 'bn4d', is_train, use_batch_norm, 256)\n res4e_feats = self.basic_block2(res4d_feats, 'res4e', 'bn4e', is_train, use_batch_norm, 256)\n res4f_feats = self.basic_block2(res4e_feats, 'res4f', 'bn4f', is_train, use_batch_norm, 256)\n\n res5a_feats = self.basic_block(res4f_feats, 'res5a', 'bn5a', is_train, use_batch_norm, 512)\n res5b_feats = self.basic_block2(res5a_feats, 'res5b', 'bn5b', is_train, use_batch_norm, 512)\n res5c_feats = self.basic_block2(res5b_feats, 'res5c', 'bn5c', is_train, use_batch_norm, 512)\n\n res5c_feats_flat = tf.reshape(res5c_feats, [self.batch_size, 49, 2048])\n self.conv_feats = res5c_feats_flat\n self.conv_feat_shape = [49, 2048]\n self.num_ctx = 49 \n self.dim_ctx = 2048\n\n self.imgs = imgs\n self.is_train = is_train", "def resnet50():\n\n X = K.Input(shape=(224, 224, 3))\n init = K.initializers.he_normal(seed=None)\n\n conv1 = K.layers.Conv2D(\n filters=64,\n kernel_size=(\n 7,\n 7),\n padding='same',\n strides=2,\n kernel_initializer=init)(X)\n\n bn1 = K.layers.BatchNormalization(axis=3)(conv1)\n\n activation1 = K.layers.Activation('relu')(bn1)\n\n maxpool1 = K.layers.MaxPooling2D(\n pool_size=(\n 3, 3), strides=(\n 2, 2), padding='same',)(activation1)\n\n Projection1 = projection_block(maxpool1, [64, 64, 256], s=1)\n IdenBlock1 = identity_block(Projection1, [64, 64, 256])\n IdenBlock2 = identity_block(IdenBlock1, [64, 64, 256])\n\n Projection2 = projection_block(IdenBlock2, [128, 128, 512])\n IdenBlock3 = identity_block(Projection2, [128, 128, 512])\n IdenBlock4 = identity_block(IdenBlock3, [128, 128, 512])\n IdenBlock5 = identity_block(IdenBlock4, [128, 128, 512])\n\n Projection3 = projection_block(IdenBlock5, [256, 256, 1024])\n IdenBlock6 = identity_block(Projection3, [256, 256, 1024])\n IdenBlock7 = identity_block(IdenBlock6, [256, 256, 1024])\n IdenBlock8 = identity_block(IdenBlock7, [256, 256, 1024])\n IdenBlock9 = identity_block(IdenBlock8, [256, 256, 1024])\n IdenBlock10 = identity_block(IdenBlock9, [256, 256, 1024])\n\n Projection4 = projection_block(IdenBlock10, [512, 512, 2048])\n IdenBlock11 = identity_block(Projection4, [512, 512, 2048])\n IdenBlock12 = identity_block(IdenBlock11, [512, 512, 2048])\n\n avgpool = K.layers.AveragePooling2D(\n pool_size=(\n 1, 1), strides=(\n 7, 7), padding='same',)(IdenBlock12)\n\n SoftMax = K.layers.Dense(\n units=1000,\n kernel_initializer=init,\n activation='softmax',\n )(avgpool)\n\n Keras = K.Model(inputs=X, outputs=SoftMax)\n\n return Keras", "def init(self):\n self.reparam_layers = []\n if self.model_type == \"GCN\":\n for i in range(self.num_layers):\n if self.reparam_all_layers is True:\n is_reparam = True\n elif isinstance(self.reparam_all_layers, tuple):\n reparam_all_layers = tuple([kk + self.num_layers if kk < 0 else kk for kk in self.reparam_all_layers])\n is_reparam = i in reparam_all_layers\n else:\n raise\n if is_reparam:\n self.reparam_layers.append(i)\n setattr(self, \"conv{}\".format(i + 1),\n GCNConv(self.num_features if i == 0 else self.latent_size,\n self.latent_size if i != self.num_layers - 1 else self.num_classes,\n cached=True,\n reparam_mode=self.reparam_mode if is_reparam else None,\n prior_mode=self.prior_mode if is_reparam else None,\n sample_size=self.sample_size,\n bias=True if self.with_relu else False,\n val_use_mean=self.val_use_mean,\n normalize=self.normalize,\n ))\n # self.conv1 = ChebConv(self.num_features, 16, K=2)\n # self.conv2 = ChebConv(16, self.num_features, K=2)\n\n elif self.model_type == \"GAT\":\n latent_size = int(self.latent_size / 2) # Under the default setting, latent_size = 8\n for i in range(self.num_layers):\n if i == 0:\n input_size = self.num_features\n else:\n if self.struct_dropout_mode[0] == 'DNsampling' or (self.struct_dropout_mode[0] == 'standard' and len(self.struct_dropout_mode) == 3):\n input_size = latent_size * 8 * 2\n else:\n input_size = latent_size * 8\n if self.reparam_all_layers is True:\n is_reparam = True\n elif isinstance(self.reparam_all_layers, tuple):\n reparam_all_layers = tuple([kk + self.num_layers if kk < 0 else kk for kk in self.reparam_all_layers])\n is_reparam = i in reparam_all_layers\n else:\n raise\n if is_reparam:\n self.reparam_layers.append(i)\n setattr(self, \"conv{}\".format(i + 1), GATConv(\n input_size,\n latent_size if i != self.num_layers - 1 else self.num_classes,\n heads=8 if i != self.num_layers - 1 else 1, concat=True,\n reparam_mode=self.reparam_mode if is_reparam else None,\n prior_mode=self.prior_mode if is_reparam else None,\n val_use_mean=self.val_use_mean,\n struct_dropout_mode=self.struct_dropout_mode,\n sample_size=self.sample_size,\n ))\n if self.struct_dropout_mode[0] == 'DNsampling' or (self.struct_dropout_mode[0] == 'standard' and len(self.struct_dropout_mode) == 3):\n setattr(self, \"conv{}_1\".format(i + 1), GATConv(\n input_size,\n latent_size if i != self.num_layers - 1 else self.num_classes,\n heads=8 if i != self.num_layers - 1 else 1, concat=True,\n reparam_mode=self.reparam_mode if is_reparam else None,\n prior_mode=self.prior_mode if is_reparam else None,\n val_use_mean=self.val_use_mean,\n struct_dropout_mode=self.struct_dropout_mode,\n sample_size=self.sample_size,\n ))\n # On the Pubmed dataset, use heads=8 in conv2.\n \n else:\n raise Exception(\"Model_type {} is not valid!\".format(self.model_type))\n\n self.reparam_layers = sorted(self.reparam_layers)\n \n if self.model_type == \"GCN\":\n if self.with_relu:\n reg_params = [getattr(self, \"conv{}\".format(i+1)).parameters() for i in range(self.num_layers - 1)]\n self.reg_params = itertools.chain(*reg_params)\n self.non_reg_params = getattr(self, \"conv{}\".format(self.num_layers)).parameters()\n else:\n self.reg_params = OrderedDict()\n self.non_reg_params = self.parameters()\n else:\n self.reg_params = self.parameters()\n self.non_reg_params = OrderedDict()\n self.to(self.device)", "def model(x_crop, y_, reuse):\n with tf.variable_scope(\"model\", reuse=reuse):\n net = tl.layers.InputLayer(x_crop, name='input')\n output1 = tl.layers.Conv2d(net, 64, (5, 5), (1, 1), act=tf.nn.relu, padding='SAME', name='cnn1')\n net = tl.layers.MaxPool2d(output1, (3, 3), (2, 2), padding='SAME', name='pool1')\n output2 = tl.layers.Conv2d(net, 64, (5, 5), (1, 1), act=tf.nn.relu, padding='SAME', name='cnn2')\n net = tl.layers.MaxPool2d(output2, (3, 3), (2, 2), padding='SAME', name='pool2')\n net = tl.layers.FlattenLayer(net, name='flatten')\n output3 = tl.layers.DenseLayer(net, 384, act=tf.nn.relu, name='d1relu')\n output4 = tl.layers.DenseLayer(output3, 192, act=tf.nn.relu, name='d2relu')\n output5 = tl.layers.DenseLayer(output4, 10, act=None, name='output')\n\n return output1.outputs, output2.outputs, output3.outputs, output4.outputs, output5.outputs, output5", "def update_net(optimizer):\n assert kl_train_dataset.bp_mode\n frames_gen, frame_cnt, rel_props, prop_ticks, prop_scaling = kl_train_dataset[index]\n\n optimizer.zero_grad()\n \n num_crop = 1\n length = 3\n if args.modality == 'Flow':\n length = 10\n elif args.modality == 'RGBDiff':\n length = 18\n \n for frames in frames_gen:\n # frames.shape == [frame_batch_size * num_crop * 3, 224, 224]\n assert len(frames) == length * frame_cnt\n input_var = torch.autograd.Variable(frames.view(-1, length, frames.size(-2), frames.size(-1)).cuda())\n base_out = net(input_var, None, None, None, None)\n assert base_out.size(0) == frame_cnt and base_out.size(1) == base_out_dim\n step_features = base_out.mean(dim=0).unsqueeze(0)\n gate, glcu_task_pred = net.glcu(step_features)\n gate = gate.repeat(1, frame_cnt).view(frame_cnt, base_out_dim)\n assert glcu_task_pred.size(0) == 1\n glcu_task_pred = F.softmax(glcu_task_pred.squeeze(), dim=0)\n if net.additive_glcu:\n base_out = base_out + gate\n else:\n base_out = base_out * gate\n\n output = net.test_fc(base_out)\n assert output.size(0) == frame_cnt and output.size(1) == output_dim\n act_scores, comp_scores, reg_scores = reorg_stpp.forward(output, prop_ticks, prop_scaling, bp_mode=True)\n\n # Task Head\n combined_scores = F.softmax(act_scores[:, 1:], dim=1) * torch.exp(comp_scores)\n combined_scores = combined_scores.mean(dim=0).unsqueeze(0)\n task_pred = net.task_head(combined_scores)\n assert task_pred.size(0) == 1\n task_pred = F.softmax(net.task_head(combined_scores).squeeze(), dim=0)\n\n loss = KL(task_pred, glcu_task_pred)\n loss.backward()\n torch.cuda.empty_cache() # To empty the cache from previous iterations\n break\n\n optimizer.step()\n optimizer.zero_grad()\n torch.cuda.empty_cache()\n\n return float(loss.data), frame_cnt", "def mgcNetArchMax(outLayer, l2_val, **kwargs):\n\n def_vals = {\"input_img_rows\" : 72,\n \"input_img_cols\" : 72,\n \"channels\" : 1,\n \"nb_classes\" : 13\n } # default parameters value\n\n for k, v in def_vals.items():\n kwargs.setdefault(k, v)\n\n input_img_rows = kwargs['input_img_rows']\n input_img_cols = kwargs['input_img_cols']\n channels = kwargs['channels']\n nb_classes = kwargs['nb_classes']\n\n \n # Input: 72 x 72 x 1\n img_shape = layers.Input(shape = (input_img_rows, input_img_cols, channels))\n\n # Layer 1\n #------------------------\n conv1 = layers.Conv2D(filters=32, kernel_size=(2, 2), padding='same', kernel_regularizer=regularizers.l2(l2_val))(img_shape)\n conv1 = layers.Activation('relu')(conv1)\n conv1 = layers.MaxPooling2D(pool_size=(2, 2))(conv1)\n conv1 = layers.Dropout(0.4)(conv1)\n\n # Layer 2\n #------------------------\n conv2 = layers.Conv2D(filters=64, kernel_size=(2,2), padding='same', kernel_regularizer=regularizers.l2(l2_val))(conv1)\n conv2 = layers.Activation('relu')(conv2) \n conv2 = layers.MaxPooling2D(pool_size=(2, 2))(conv2)\n conv2 = layers.Dropout(0.4)(conv2)\n\n # Layer 3\n #------------------------\n conv3 = layers.Conv2D(filters=128, kernel_size=(2,2), padding='same', kernel_regularizer=regularizers.l2(l2_val))(conv2)\n conv3 = layers.Activation('relu')(conv3) \n conv3 = layers.MaxPooling2D(pool_size=(2, 2))(conv3)\n conv3 = layers.Dropout(0.4)(conv3)\n\n # Layer 4\n #------------------------\n conv4 = layers.Conv2D(filters=256, kernel_size=(2,2), padding='same', dilation_rate = (2, 2), kernel_regularizer=regularizers.l2(l2_val))(conv3)\n conv4 = layers.Activation('relu')(conv4)\n conv4 = layers.MaxPooling2D(pool_size=(2, 2))(conv4)\n conv4 = layers.Dropout(0.4)(conv4)\n\n # Layer 5\n #------------------------\n output = layers.Conv2D(filters=128, kernel_size=(2,2), padding='same', kernel_regularizer=regularizers.l2(l2_val))(conv3) # skip layer 4\n output = layers.Activation('relu')(output) \n output = layers.MaxPooling2D(pool_size=(2, 2))(output)\n output = layers.Dropout(0.4)(output)\n\n\n \n # FC Layer\n #------------------------\n outputmlp = layers.Flatten()(output)\n outputmlp = layers.Dense(64, activation = 'relu')(outputmlp)\n outputmlp = layers.Dropout(0.5)(outputmlp)\n\n predictionsMlp = layers.Dense(nb_classes, activation='softmax')(outputmlp)\n \n \n # global averaging\n weight_decay=1E-4\n concat_axis = 1\n \n x = BatchNormalization(axis=concat_axis,\n gamma_regularizer=regularizers.l2(weight_decay),\n beta_regularizer=regularizers.l2(weight_decay))(output)\n x = Activation('relu')(x)\n x = layers.Dropout(0.4)(x)\n x = GlobalAveragePooling2D(data_format=K.image_data_format())(x)\n \n predictionsGloAvg = layers.Dense(nb_classes,\n activation='softmax',\n kernel_regularizer=regularizers.l2(weight_decay),\n bias_regularizer=regularizers.l2(weight_decay))(x)\n \n if outLayer == \"gloAvg\":\n predictions = predictionsGloAvg\n elif outLayer == \"mlp\":\n predictions = predictionsMlp\n \n # prediction model\n model = Model(img_shape, predictions, name = 'cnn_max')\n\n return model", "def build_model(self):\n \n start_time = time.time()\n print(\"build model started\")\n # label\n self.FA = tf.placeholder(dtype=tf.int32, shape=[None])\n self.ges = tf.placeholder(dtype=tf.int32, shape=[None])\n self.obj = tf.placeholder(dtype=tf.int32, shape=[None])\n \n self.images = tf.placeholder(dtype=tf.float32, shape=[None, height, width, 3])\n batch_size = tf.shape(self.images)[0]\n rgb_scaled = self.images * 255.0\n\n # Convert RGB to BGR\n VGG_MEAN = [103.939, 116.779, 123.68]\n red, green, blue = tf.split(axis=3, num_or_size_splits=3, value=rgb_scaled)\n assert red.get_shape().as_list()[1:] == [224, 224, 1]\n assert green.get_shape().as_list()[1:] == [224, 224, 1]\n assert blue.get_shape().as_list()[1:] == [224, 224, 1]\n bgr = tf.concat(axis=3, values=[\n blue - VGG_MEAN[0],\n green - VGG_MEAN[1],\n red - VGG_MEAN[2],\n ])\n assert bgr.get_shape().as_list()[1:] == [224, 224, 3]\n \n with tf.variable_scope(\"vgg19\"):\n self.conv1_1 = self.conv_layer(bgr, \"conv1_1\")\n self.conv1_2 = self.conv_layer(self.conv1_1, \"conv1_2\")\n self.pool1 = self.max_pool(self.conv1_2, 'pool1')\n\n self.conv2_1 = self.conv_layer(self.pool1, \"conv2_1\")\n self.conv2_2 = self.conv_layer(self.conv2_1, \"conv2_2\")\n self.pool2 = self.max_pool(self.conv2_2, 'pool2')\n\n self.conv3_1 = self.conv_layer(self.pool2, \"conv3_1\")\n self.conv3_2 = self.conv_layer(self.conv3_1, \"conv3_2\")\n self.conv3_3 = self.conv_layer(self.conv3_2, \"conv3_3\")\n self.conv3_4 = self.conv_layer(self.conv3_3, \"conv3_4\")\n self.pool3 = self.max_pool(self.conv3_4, 'pool3')\n\n self.conv4_1 = self.conv_layer(self.pool3, \"conv4_1\")\n self.conv4_2 = self.conv_layer(self.conv4_1, \"conv4_2\")\n self.conv4_3 = self.conv_layer(self.conv4_2, \"conv4_3\")\n self.conv4_4 = self.conv_layer(self.conv4_3, \"conv4_4\")\n self.pool4 = self.max_pool(self.conv4_4, 'pool4')\n\n self.conv5_1 = self.conv_layer(self.pool4, \"conv5_1\")\n self.conv5_2 = self.conv_layer(self.conv5_1, \"conv5_2\")\n self.conv5_3 = self.conv_layer(self.conv5_2, \"conv5_3\")\n self.conv5_4 = self.conv_layer(self.conv5_3, \"conv5_4\")\n self.pool5 = self.max_pool(self.conv5_4, 'pool5')\n\n \n shape = self.pool5.get_shape()\n size = 1\n for dim in shape[1:]:\n size *= dim.value\n \n # dense\n with tf.variable_scope('dense') as scope:\n # Move everything into depth so we can perform a single matrix multiply.\n reshape = tf.reshape(self.pool5, [-1, size])\n weights = tf.get_variable('weights', initializer=tf.truncated_normal(shape=[size, 192]))\n biases = tf.get_variable('biases', [192], initializer=tf.constant_initializer(0.1))\n dense = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)\n\n\n # linear layer(WX + b),\n with tf.variable_scope('softmax_linear_FA') as scope:\n weights = tf.get_variable('weights', initializer=tf.truncated_normal(shape=[192, 2]))\n biases = tf.get_variable('biases', [2], initializer=tf.constant_initializer(0.1))\n softmax_linear_FA = tf.add(tf.matmul(dense, weights), biases, name=scope.name)\n self.output_FA = tf.nn.softmax(softmax_linear_FA)\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.FA, logits=softmax_linear_FA, name='cross_entropy')\n cross_entropy_mean_FA = tf.reduce_mean(cross_entropy, name='cross_entropy_mean')\n\n with tf.variable_scope('softmax_linear_ges') as scope:\n weights = tf.get_variable('weights', initializer=tf.truncated_normal(shape=[192, 13]))\n biases = tf.get_variable('biases', [13], initializer=tf.constant_initializer(0.1))\n softmax_linear_ges = tf.add(tf.matmul(dense, weights), biases, name=scope.name)\n self.output_ges = tf.nn.softmax(softmax_linear_ges)\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.ges, logits=softmax_linear_ges, name='cross_entropy')\n cross_entropy_mean_ges = tf.reduce_mean(cross_entropy, name='cross_entropy_mean')\n\n with tf.variable_scope('softmax_linear_obj') as scope:\n weights = tf.get_variable('weights', initializer=tf.truncated_normal(shape=[192, 24]))\n biases = tf.get_variable('biases', [24], initializer=tf.constant_initializer(0.1))\n softmax_linear_obj = tf.add(tf.matmul(dense, weights), biases, name=scope.name)\n self.output_obj = tf.nn.softmax(softmax_linear_obj)\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.obj, logits=softmax_linear_obj, name='cross_entropy')\n cross_entropy_mean_obj = tf.reduce_mean(cross_entropy, name='cross_entropy_mean')\n\n self.loss = cross_entropy_mean_FA + cross_entropy_mean_ges + cross_entropy_mean_obj\n self.lr = tf.placeholder(tf.float32, [])\n self.global_step = tf.Variable(0, name=\"global_step\", trainable=False)\n optimizer = tf.train.AdamOptimizer(self.lr)\n grads_and_vars = optimizer.compute_gradients(self.loss)\n self.train_op = optimizer.apply_gradients(grads_and_vars, global_step=self.global_step)\n self.data_dict = None\n print((\"build model finished: %ds\" % (time.time() - start_time)))", "def ResNet18(input_shape = (28, 28, 1), classes = 24):\n \n # Define the input as a tensor with shape input_shape\n X = X_input = Input(input_shape)\n\n \n # Zero-Padding\n X = ZeroPadding2D((3, 3))(X_input)\n \n # Stage 1\n X = Conv2D(64, (7, 7), strides = (2, 2), name = 'conv1', kernel_initializer = glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis = 3, name = 'bn_conv1')(X)\n X = Activation('relu')(X)\n #X = MaxPooling2D((3, 3), strides=(2, 2))(X)\n\n # Stage 2\n X = convolutional_block(X, [64, 64], stage=2, block='a')\n X = identity_block(X, [64, 64], stage=2, block='b')\n\n # Stage 3\n X = convolutional_block(X, [128, 128], stage=3, block='a')\n X = identity_block(X, [128, 128], stage=3, block='b')\n\n # Stage 4\n X = convolutional_block(X, [256, 256], stage=4, block='a')\n X = identity_block(X, [256, 256], stage=4, block='b')\n\n # Stage 5\n X = convolutional_block(X, [512, 512], stage=5, block='a')\n X = identity_block(X, [512, 512], stage=5, block='b')\n\n # AVGPOOL\n # X = AveragePooling2D(pool_size=(2,2), name='avg_pool')(X)\n\n # output layer\n X = Flatten()(X)\n X = Dense(classes, activation='softmax', name='fc' + str(classes), kernel_initializer = glorot_uniform(seed=0))(X)\n \n # Create model\n model = Model(inputs = X_input, outputs = X, name='ResNet18')\n\n return model", "def build(imageWidth, imageHeight, imageDepth, classesNumber, finalAct=\"sigmoid\"):\n\n # inizializzo il modello come sequenziale\n model = Sequential()\n inputShape = (imageHeight, imageWidth, imageDepth)\n chanDim = -1\n\n # Primo blocco Conv2D, Relu, Normalization, MaxPool\n # Utilizzo 32 filtri 3*3\n model.add(Conv2D(filters=32, kernel_size=(3, 3), padding=\"same\", input_shape=inputShape))\n # con attivazione Rectified Linear Unit\n model.add(Activation(\"relu\"))\n # applico una batch normalization\n model.add(BatchNormalization(axis=chanDim))\n # un MaxPooling 3*3\n model.add(MaxPooling2D(pool_size=(3, 3)))\n # ed un 25% di dropout per ridurre overfitting\n model.add(Dropout(0.25))\n\n # Secondo blocco\n model.add(Conv2D(64, (3, 3), padding=\"same\"))\n model.add(Activation(\"relu\"))\n model.add(BatchNormalization(axis=chanDim))\n model.add(Conv2D(64, (3, 3), padding=\"same\"))\n model.add(Activation(\"relu\"))\n model.add(BatchNormalization(axis=chanDim))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n\n # Terzo blocco\n model.add(Conv2D(128, (3, 3), padding=\"same\"))\n model.add(Activation(\"relu\"))\n model.add(BatchNormalization(axis=chanDim))\n model.add(Conv2D(128, (3, 3), padding=\"same\"))\n model.add(Activation(\"relu\"))\n model.add(BatchNormalization(axis=chanDim))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n\n # Passo ai Fully Connected Layers\n # Trasformo il modello in un vettore\n model.add(Flatten())\n model.add(Dense(1024))\n model.add(Activation(\"sigmoid\"))\n model.add(BatchNormalization())\n model.add(Dropout(0.5))\n\n # Infine utilizzo l'attivazione per la rete\n model.add(Dense(classesNumber))\n model.add(Activation(finalAct))\n\n return model", "def build_resnet152(self):\n use_batch_norm = self.use_batch_norm\n\n imgs = tf.placeholder(tf.float32, [self.batch_size]+self.img_shape)\n is_train = tf.placeholder(tf.bool)\n\n conv1_feats = convolution(imgs, 7, 7, 64, 2, 2, 'conv1')\n conv1_feats = batch_norm(conv1_feats, 'bn_conv1', is_train, use_batch_norm)\n conv1_feats = nonlinear(conv1_feats, 'relu')\n pool1_feats = max_pool(conv1_feats, 3, 3, 2, 2, 'pool1')\n\n res2a_feats = self.basic_block(pool1_feats, 'res2a', 'bn2a', is_train, use_batch_norm, 64, 1)\n res2b_feats = self.basic_block2(res2a_feats, 'res2b', 'bn2b', is_train, use_batch_norm, 64)\n res2c_feats = self.basic_block2(res2b_feats, 'res2c', 'bn2c', is_train, use_batch_norm, 64)\n \n res3a_feats = self.basic_block(res2c_feats, 'res3a', 'bn3a', is_train, use_batch_norm, 128) \n temp = res3a_feats\n for i in range(1, 8):\n temp = self.basic_block2(temp, 'res3b'+str(i), 'bn3b'+str(i), is_train, use_batch_norm, 128)\n res3b7_feats = temp\n \n res4a_feats = self.basic_block(res3b7_feats, 'res4a', 'bn4a', is_train, use_batch_norm, 256)\n temp = res4a_feats\n for i in range(1, 36):\n temp = self.basic_block2(temp, 'res4b'+str(i), 'bn4b'+str(i), is_train, use_batch_norm, 256)\n res4b35_feats = temp\n\n res5a_feats = self.basic_block(res4b35_feats, 'res5a', 'bn5a', is_train, use_batch_norm, 512)\n res5b_feats = self.basic_block2(res5a_feats, 'res5b', 'bn5b', is_train, use_batch_norm, 512)\n res5c_feats = self.basic_block2(res5b_feats, 'res5c', 'bn5c', is_train, use_batch_norm, 512)\n\n res5c_feats_flat = tf.reshape(res5c_feats, [self.batch_size, 49, 2048])\n self.conv_feats = res5c_feats_flat\n self.conv_feat_shape = [49, 2048]\n self.num_ctx = 49 \n self.dim_ctx = 2048\n\n self.imgs = imgs\n self.is_train = is_train", "def genModel():\n inp = (160, 320, 3) # initial image size\n oup1 = (160, 320, 1) # gray image size\n oup2 = (80, 320, 1) # cropped image size\n\n model = Sequential()\n model.add(Lambda(color2gray, input_shape = inp, output_shape= oup1))\n # crop top 50 pixels, bottom 30 pixels, left/right 0 pixels\n model.add(Cropping2D(cropping=((50,30), (0,0))))\n # Preprocess incoming data, centered around zero with small standard deviation \n model.add(Lambda(lambda x: x/127.5 - 1., output_shape= oup2))\n model.add(Convolution2D(24,5,5,subsample=(1,2), activation=\"relu\"))\n model.add(Convolution2D(36,5,5,subsample=(2,2), activation=\"relu\"))\n model.add(Convolution2D(48,5,5,subsample=(2,2), activation=\"relu\"))\n model.add(Convolution2D(64,3,3, activation=\"relu\"))\n model.add(Convolution2D(64,3,3, activation=\"relu\"))\n model.add(Flatten())\n model.add(Dropout(0.3))\n model.add(Dense(180, activation=\"relu\"))\n model.add(Dense(60))\n model.add(Dense(10, activation=\"relu\"))\n model.add(Dense(1))\n # print layer size for each model layers\n for layer in model.layers:\n print(layer.get_output_at(0).get_shape().as_list())\n return model", "def mgcNetArchRes(outLayer, l2_val, **kwargs):\n\n def_vals = {\"input_img_rows\" : 72,\n \"input_img_cols\" : 72,\n \"channels\" : 1,\n \"nb_classes\" : 13\n } # default parameters value\n\n for k, v in def_vals.items():\n kwargs.setdefault(k, v)\n\n input_img_rows = kwargs['input_img_rows']\n input_img_cols = kwargs['input_img_cols']\n channels = kwargs['channels']\n nb_classes = kwargs['nb_classes']\n\n \n # Input: 72 x 72 x 1\n img_shape = layers.Input(shape = (input_img_rows, input_img_cols, channels))\n\n # Layer 1\n #------------------------\n conv1 = layers.Conv2D(filters=32, kernel_size=(2, 2), padding='same', kernel_regularizer=regularizers.l2(l2_val))(img_shape)\n conv1 = layers.Activation('relu')(conv1)\n conv1 = layers.MaxPooling2D(pool_size=(2, 2))(conv1)\n conv1 = layers.Dropout(0.4)(conv1)\n \n conv1 = residual_block(img_shape, 32, _strides=(1, 1), _project_shortcut=False)\n\n # Layer 2\n #------------------------\n conv2 = residual_block(conv1, 32, _strides=(1, 1), _project_shortcut=False)\n\n # Layer 3\n #------------------------\n conv3 = residual_block(conv2, 32, _strides=(1, 1), _project_shortcut=False)\n \n # Layer 4\n # -----------------------\n #residual = residual_block(conv3, 64, _strides=(1, 1), _project_shortcut=False)\n\n # Layer 5\n #------------------------\n output = layers.Conv2D(filters=128, kernel_size=(2,2), padding='same', kernel_regularizer=regularizers.l2(l2_val))(conv3) # skip layer 4\n output = layers.Activation('relu')(output)\n output = layers.Conv2D(filters=64, kernel_size=(2,2), padding='same', activation='relu', strides = 2)(output) \n output = layers.Dropout(0.4)(output)\n\n\n \n # FC Layer\n #------------------------\n outputmlp = layers.Flatten()(output)\n outputmlp = layers.Dense(64, activation = 'relu')(outputmlp)\n outputmlp = layers.Dropout(0.5)(outputmlp)\n\n predictionsMlp = layers.Dense(nb_classes, activation='softmax')(outputmlp)\n \n \n # global averaging\n weight_decay=1E-4\n concat_axis = 1\n \n x = BatchNormalization(axis=concat_axis,\n gamma_regularizer=regularizers.l2(weight_decay),\n beta_regularizer=regularizers.l2(weight_decay))(output)\n x = Activation('relu')(x)\n x = layers.Dropout(0.4)(x)\n x = GlobalAveragePooling2D(data_format=K.image_data_format())(x)\n \n predictionsGloAvg = layers.Dense(nb_classes,\n activation='softmax',\n kernel_regularizer=regularizers.l2(weight_decay),\n bias_regularizer=regularizers.l2(weight_decay))(x)\n \n if outLayer == \"gloAvg\":\n predictions = predictionsGloAvg\n elif outLayer == \"mlp\":\n predictions = predictionsMlp\n \n # prediction model\n model = Model(img_shape, predictions, name = 'resblock')\n\n return model", "def build_vgg16(self):\n use_batch_norm = self.use_batch_norm\n\n imgs = tf.placeholder(tf.float32, [self.batch_size]+self.img_shape)\n is_train = tf.placeholder(tf.bool)\n\n conv1_1_feats = convolution(imgs, 3, 3, 64, 1, 1, 'conv1_1')\n conv1_1_feats = nonlinear(conv1_1_feats, 'relu')\n conv1_2_feats = convolution(conv1_1_feats, 3, 3, 64, 1, 1, 'conv1_2')\n conv1_2_feats = nonlinear(conv1_2_feats, 'relu')\n pool1_feats = max_pool(conv1_2_feats, 2, 2, 2, 2, 'pool1')\n\n conv2_1_feats = convolution(pool1_feats, 3, 3, 128, 1, 1, 'conv2_1')\n conv2_1_feats = nonlinear(conv2_1_feats, 'relu')\n conv2_2_feats = convolution(conv2_1_feats, 3, 3, 128, 1, 1, 'conv2_2')\n conv2_2_feats = nonlinear(conv2_2_feats, 'relu')\n pool2_feats = max_pool(conv2_2_feats, 2, 2, 2, 2, 'pool2')\n\n conv3_1_feats = convolution(pool2_feats, 3, 3, 256, 1, 1, 'conv3_1')\n conv3_1_feats = nonlinear(conv3_1_feats, 'relu')\n conv3_2_feats = convolution(conv3_1_feats, 3, 3, 256, 1, 1, 'conv3_2')\n conv3_2_feats = nonlinear(conv3_2_feats, 'relu')\n conv3_3_feats = convolution(conv3_2_feats, 3, 3, 256, 1, 1, 'conv3_3')\n conv3_3_feats = nonlinear(conv3_3_feats, 'relu')\n pool3_feats = max_pool(conv3_3_feats, 2, 2, 2, 2, 'pool3')\n\n conv4_1_feats = convolution(pool3_feats, 3, 3, 512, 1, 1, 'conv4_1')\n conv4_1_feats = nonlinear(conv4_1_feats, 'relu')\n conv4_2_feats = convolution(conv4_1_feats, 3, 3, 512, 1, 1, 'conv4_2')\n conv4_2_feats = nonlinear(conv4_2_feats, 'relu')\n conv4_3_feats = convolution(conv4_2_feats, 3, 3, 512, 1, 1, 'conv4_3')\n conv4_3_feats = nonlinear(conv4_3_feats, 'relu')\n pool4_feats = max_pool(conv4_3_feats, 2, 2, 2, 2, 'pool4')\n\n conv5_1_feats = convolution(pool4_feats, 3, 3, 512, 1, 1, 'conv5_1')\n conv5_1_feats = nonlinear(conv5_1_feats, 'relu')\n conv5_2_feats = convolution(conv5_1_feats, 3, 3, 512, 1, 1, 'conv5_2')\n conv5_2_feats = nonlinear(conv5_2_feats, 'relu')\n conv5_3_feats = convolution(conv5_2_feats, 3, 3, 512, 1, 1, 'conv5_3')\n conv5_3_feats = nonlinear(conv5_3_feats, 'relu')\n\n conv5_3_feats_flat = tf.reshape(conv5_3_feats, [self.batch_size, 196, 512])\n self.conv_feats = conv5_3_feats_flat\n self.conv_feat_shape = [196, 512]\n self.num_ctx = 196 \n self.dim_ctx = 512\n\n self.imgs = imgs\n self.is_train = is_train", "def forward(self, x):\n for name, module in self.base._modules.items():\n if name == 'avgpool':\n break\n\n if name == 'layer3':\n l2 = Variable(x)\n\n x = Variable(module(x))\n l4 = Variable(x)\n\n \"\"\"for name, param in self.base.named_parameters():\n print(name, param.size())\n\n res50_model = self.base\n res50_conv2 = ResNet50Bottom(res50_model)\n for i,child in enumerate(self.base.children()):\n print(i)\n if i==8:\n l4=x\n break\n if i==6:\n l2=x\n x=res50_conv2(x.detach())\"\"\"\n\n s2 = l2.sum(1) #/ 100\n #\n s4 = l4.sum(1) #/ 1000\n\n\n sw2 = s2 / (s2.view(x.size(0), -1).sum(1)).unsqueeze(1).unsqueeze(2)\n\n sw4 = s4 / (s4.view(x.size(0), -1).sum(1)).unsqueeze(1).unsqueeze(2)\n\n\n l2 = l2 * sw2.unsqueeze(1)\n l4 = l4 * sw4.unsqueeze(1)\n\n \n c2 = self.inconv2(l2)\n c4 = self.inconv4(l4)\n c2 = self.bn2(c2)\n c4 = self.bn4(c4)\n \n n2 = F.softmax(torch.mean(torch.mean(c2, dim=2), dim=2), dim=1)\n n4 = F.softmax(torch.mean(torch.mean(c4, dim=2), dim=2), dim=1)\n nn2 = n2.data.cpu().numpy()\n nn4 = n4.data.cpu().numpy()\n cam2 = np.zeros((x.size(0), 28, 28), dtype=float)\n cam4 = np.zeros((x.size(0), 7, 7), dtype=float)\n\n\n for i in range(0, x.size(0)):\n for j in range(0, 2):\n temp1 = c2[i, j, :, :].data.cpu().numpy()\n temp1 = np.maximum(temp1, 0)\n temp1 = temp1 - np.min(temp1)\n temp1 = temp1 / (np.max(temp1)+1e-8)\n cam2[i] = cam2[i] + nn2[i, j] * temp1\n cam2 = torch.FloatTensor(cam2)\n l2 = l2 * (cam2.unsqueeze(1).cuda())\n l2 = self.stack1(l2)\n l2 = self.stack1_1(l2)\n\n for i in range(0, x.size(0)):\n for j in range(0, 8):\n temp2 = c4[i, j, :, :].data.cpu().numpy()\n temp2 = np.maximum(temp2, 0)\n temp2 = temp2 - np.min(temp2)\n temp2 = temp2 / (np.max(temp2)+1e-8)\n cam4[i] =cam4[i] + nn4[i, j] * temp2\n cam4 = torch.FloatTensor(cam4)\n l4 = l4 * cam4.unsqueeze(1).cuda()\n l4 = self.stack3(l4)\n X = l2.view(x.size(0), 512, 7 ** 2)\n Y = l4.view(x.size(0), 512, 7 ** 2)\n Z = self.cross_bilinear(X, Y)\n return n2, n4, Z", "def sweep_image_model():\n for c1 in [4, 8, 16]:\n for c2 in [2, 4]:\n for c3 in [2, 4]:\n for c4 in [1, 2]:\n flags = flag_reader.read_flag()\n print(c1)\n flags.channel_list = c1 * np.array([1, c2, c2*c3, c2*c3*c4])\n print('channel list = ', flags.channel_list)\n flags.last_dim = flags.channel_list[-1]\n flags.model_name = flags.data_set + '_channel_' + str(flags.channel_list).replace('[','').replace(']','').replace(' ','_') + \\\n '_dim_last_' + str(flags.last_dim) + '_ind_' + str(flags.comp_ind) + \\\n '_lr_{}_decay_{}_reg_{}_bs_{}'.format(flags.lr, flags.lr_decay_rate, flags.reg_scale, flags.batch_size)\n print(flags.model_name)\n training_from_flag(flags)", "def process(bayer_images, red_gains, blue_gains, cam2rgbs):\n bayer_images.shape.assert_is_compatible_with((None, None, None, 4))\n with tf.name_scope(None, 'process'):\n # White balance.\n bayer_images = apply_gains(bayer_images, red_gains, blue_gains)\n # Demosaic.\n bayer_images = tf.clip_by_value(bayer_images, 0.0, 1.0)\n images = demosaic(bayer_images)\n # Color correction.\n images = apply_ccms(images, cam2rgbs)\n # Gamma compression.\n images = tf.clip_by_value(images, 0.0, 1.0)\n images = gamma_compression(images)\n return images", "def define(self, optimizer = Adam(lr=1e-5)): \n \n self.optimizer = optimizer\n\n model = Sequential()\n\n #Layer 1\n model.add(Conv2D( filters = 96, \n kernel_size = (11,11), \n strides = 4, \n padding = 'same', \n activation = 'relu', \n input_shape = (224, 224, 3), \n kernel_initializer = 'he_normal'))\n model.add(MaxPooling2D( pool_size = (3,3), \n strides = (2,2), \n padding= 'same', \n data_format = None)) # overlapping pooling\n #Layer 2\n model.add(Conv2D( filters = 256, \n kernel_size = (5,5), \n strides = 1, \n padding = 'same', \n activation = 'relu', \n kernel_initializer = 'he_normal'))\n model.add(MaxPooling2D( pool_size = (3,3), \n strides = (2,2), \n padding= 'same', \n data_format = None)) \n #Layer 3\n model.add(Conv2D( filters = 384, \n kernel_size = (3,3), \n strides = 1, padding = 'same', \n activation = 'relu', kernel_initializer = 'he_normal'))\n #Layer 4\n model.add(Conv2D( filters = 384, \n kernel_size = (3,3), \n strides = 1, padding = 'same', \n activation = 'relu', \n kernel_initializer = 'he_normal'))\n #Layer 5\n model.add(Conv2D( filters = 256, \n kernel_size = (3,3), \n strides = 1, padding = 'same', \n activation = 'relu', \n kernel_initializer = 'he_normal'))\n #Layer 6\n model.add(MaxPooling2D( pool_size = (3,3), \n strides = (2,2), \n padding= 'same', \n data_format = None))\n \n #Layer 7\n model.add(Flatten())\n \n #Layer 8\n model.add(Dense( units = 4096, activation = 'relu'))\n model.add(Dense( units = 1024, activation = 'relu'))\n model.add(Dense( units = 512, activation = 'relu'))\n model.add(Dense( units = 256, activation = 'relu'))\n model.add(Dense( units = 128, activation = 'relu'))\n \n #Layer end\n model.add(Dense( units = 3, activation = 'softmax'))\n model.summary()\n \n self.model = model", "def build_resnet_generator(self, model_shape, filters=32, k_size=3, last_act='tanh', summary=False, model_file=None, name='gan_g_'):\n if (model_file):\n \"\"\"\n Load pretreined model\n \"\"\"\n model = self.utils.build_pretrained_model(model_file)\n if (summary):\n model.summary()\n return model\n else:\n init = RandomNormal(stddev=0.02)\n n_rows = model_shape[0]\n n_cols = model_shape[1]\n in_c_dims = model_shape[2]\n out_c_dims = model_shape[3]\n \n n_rows_e1, n_rows_e2, n_rows_e4, n_rows_e8 = n_rows//1, n_rows//2, n_rows//4, n_rows//8\n rows_matching = np.equal([2*n_rows_e2, 2*n_rows_e4, 2*n_rows_e8], [n_rows_e1, n_rows_e2, n_rows_e4])\n index_rows = np.where(np.logical_not(rows_matching))[0]\n \n n_cols_e1, n_cols_e2, n_cols_e4, n_cols_e8 = n_cols//1, n_cols//2, n_cols//4, n_cols//8\n cols_matching = np.equal([2*n_cols_e2, 2*n_cols_e4, 2*n_cols_e8], [n_cols_e1, n_cols_e2, n_cols_e4])\n index_cols = np.where(np.logical_not(cols_matching))[0]\n \n input_shape = (n_rows, n_cols, in_c_dims)\n input_layer = Input(shape=input_shape, name=name+'_input')\n \n e1 = self.Conv2D_Block(input_layer, n_kernels=filters, k_size=7, strides=1, bn=False,name=name+'e1') # rows, cols\n e2 = self.Conv2D_Block(e1, 2*filters, k_size=k_size, bn_training=True, name=name+'e2') # rows/2, cols/2\n e3 = self.Conv2D_Block(e2, 4*filters, k_size=k_size, bn_training=True, name=name+'e3') # rows/4, cols/4\n e4 = self.Conv2D_Block(e3, 8*filters, k_size=k_size, bn=False, name=name+'e4') # rows/8, cols/8\n\n rb1 = self.residual_block(e4, n_kernels=8*filters, k_size=k_size, bn_training=True, name=name+'1_')\n rb2 = self.residual_block(rb1, n_kernels=8*filters, k_size=k_size, bn_training=True, name=name+'2_')\n rb3 = self.residual_block(rb2, n_kernels=8*filters, k_size=k_size, bn_training=True, name=name+'3_')\n rb3 = Dropout(rate=0.5, name=name+'drop_1')(rb3, training=True)\n \n rb4 = self.residual_block(rb3, n_kernels=8*filters, k_size=k_size, bn_training=True, name=name+'4_')\n rb4 = Dropout(rate=0.5, name=name+'drop_2')(rb4, training=True) \n \n rb5 = self.residual_block(rb4, n_kernels=8*filters, k_size=k_size, bn_training=True, name=name+'5_')\n rb5 = Dropout(rate=0.5, name=name+'drop_3')(rb5, training=True) \n \n d1 = self.Conv2DTranspose_Block(rb5, 4*filters, k_size=k_size, activation='linear', name=name+'d1') # rows/4, cols/4\n if index_rows==2 or index_cols==2:\n d1 = BilinearUpsampling(output_size=(n_rows//4, n_cols//4), name=name+'_bilinear')(d1)\n d1 = Concatenate(name=name+'conc_1')([d1, e3])\n d1 = Activation('relu', name=name+'_act_1')(d1)\n \n d2 = self.Conv2DTranspose_Block(d1, 2*filters, k_size=k_size, activation='linear', name=name+'d2') # rows/2, cols/2\n if index_rows==1 or index_cols==1:\n d2 = BilinearUpsampling(output_size=(n_rows//2, n_cols//2), name=name+'_bilinear')(d2)\n d2 = Concatenate(name=name+'conc_2')([d2, e2])\n d2 = Activation('relu', name=name+'_act_2')(d2)\n \n d3 = self.Conv2DTranspose_Block(d2, 1*filters, k_size=k_size, activation='linear', name=name+'d3') # rows, cols\n if index_rows==0 or index_cols==0:\n d3 = BilinearUpsampling(output_size=(n_rows, n_cols), name=name+'_bilinear')(d2)\n d3 = Concatenate(name=name+'conc_3')([d3, e1])\n d3 = Activation('relu', name=name+'act_3')(d3)\n\n output = Conv2DTranspose(out_c_dims, 7, strides=1, padding='same', kernel_initializer=init, name=name+'d_out')(d3) # rows, cols\n output = Activation(last_act, name=name+last_act)(output)\n\n model = Model(inputs=[input_layer], outputs=[output], name='Generator'+name[-3:])\n if (summary):\n model.summary()\n return model", "def main():\n args = get_arguments()\n \n # Create queue coordinator.\n coord = tf.train.Coordinator()\n \n # Load reader.\n with tf.name_scope(\"create_inputs\"):\n reader = ImageReader_MultiClass_Loss(\n args.data_dir,\n args.data_list,\n None, # No defined input size.\n RANDOM_SEED,\n False, # No random scale.\n False, # No random mirror.\n coord)\n image, l2_catg, binary_catg, hinge_catg = reader.image, reader.l2_catg, reader.binary_catg, reader.hinge_catg\n image_batch = tf.expand_dims(image, dim=0)\n binary_catg_batch = tf.expand_dims(binary_catg, dim=0)\n\n # Create network.\n net = DeepLabResNetModel({'data': image_batch}, is_training=False)\n\n # Which variables to load.\n restore_var = tf.global_variables()\n \n # Predictions.\n raw_output = net.layers['fc1_voc12']\n\n # Do the global average pooling\n raw_output_bcgd_rmvd = raw_output[:,:,:,1:]\n g_avg_pool = tf.reduce_mean(tf.reduce_mean(raw_output_bcgd_rmvd, axis=1, keep_dims=True),\\\n axis=2, keep_dims=True) # Avg across the width and height dimension -> [Bx21]\n g_avg_pool_sqzd = tf.squeeze(g_avg_pool, axis=[1, 2])\n pred = tf.nn.softmax(g_avg_pool_sqzd)\n\n # Get the class activation map\n raw_output_up = tf.image.resize_bilinear(raw_output_bcgd_rmvd, tf.shape(image_batch)[1:3,])\n raw_output_up = raw_output_up - tf.reduce_min(tf.reduce_min(raw_output_up, axis=1, keep_dims=True), axis=2, keep_dims=True) + EPSILON\n raw_output_up = raw_output_up / tf.reduce_max(tf.reduce_max(raw_output_up, axis=1, keep_dims=True), axis=2, keep_dims=True)\n cam_m_1 = tf.argmax(raw_output_up, dimension=3) + 1\n raw_output_catgs_rmvd = raw_output_up * tf.expand_dims(tf.expand_dims(binary_catg_batch, 1), 2)\n cam_m_2 = tf.argmax(raw_output_catgs_rmvd, dimension=3) + 1\n cam = tf.cast(tf.equal(cam_m_1, cam_m_2), tf.int64) * cam_m_1\n\n cam_batch = tf.expand_dims(cam, dim=3)\n\n # Set up tf session and initialize variables. \n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n sess = tf.Session(config=config)\n init = tf.global_variables_initializer()\n \n sess.run(init)\n sess.run(tf.local_variables_initializer())\n \n # Load weights.\n loader = tf.train.Saver(var_list=restore_var)\n if args.restore_from is not None:\n load(loader, sess, args.restore_from)\n \n # Start queue threads.\n threads = tf.train.start_queue_runners(coord=coord, sess=sess)\n \n # Iterate over training steps.\n for step in range(args.num_steps):\n preds, images, cams, bin_catg = sess.run([pred, image_batch, cam_batch, binary_catg])\n \"\"\"\n print(bin_catg)\n print(np.unique(np.unique(cams)))\n \"\"\"\n img = inv_preprocess(images)\n attMap = decode_labels(cams)\n output_dir = './output_maps_binary_without_norm/'\n img_name = output_dir + str(step) + '.jpg'\n map_name = output_dir + str(step) + '.png'\n misc.imsave(img_name, img[0,:,:,:])\n misc.imsave(map_name, attMap[0,:,:,:])\n coord.request_stop()\n coord.join(threads)", "def __init__(self, img_rows=400, img_cols=400, vgg_weights=\"imagenet\", inference_only=False, net_name='default', gpus=1, vgg_device=None):\n \n # Settings\n self.img_rows = img_rows\n self.img_cols = img_cols\n self.img_overlap = 30\n self.inference_only = inference_only\n self.net_name = net_name\n self.gpus = gpus\n self.vgg_device = vgg_device\n\n # Scaling for VGG input\n self.mean = [0.485, 0.456, 0.406]\n self.std = [0.229, 0.224, 0.225]\n \n #get PowerSpect_CMB\n reader = np.zeros((2507,))\n fp = open('./data/COM_PowerSpect_CMB-base-plikHM-TTTEEE-lowl-lowE-lensing-minimum-theory_R3.01.txt')\n \n for i,line in enumerate(fp):\n if i >= 1:\n reader[i-1] = line.split()[1]\n \n fp.close() \n readers = np.log(reader)\n self.cl = K.constant(readers)\n # Assertions\n assert self.img_rows >= 256, 'Height must be >256 pixels'\n assert self.img_cols >= 256, 'Width must be >256 pixels'\n\n # Set current epoch\n self.current_epoch = 0\n \n # VGG layers to extract features from (first maxpooling layers, see pp. 7 of paper)\n self.vgg_layers = [3, 6, 10]\n\n # Instantiate the vgg network\n if self.vgg_device:\n with tf.device(self.vgg_device):\n self.vgg = self.build_vgg(vgg_weights)\n else:\n self.vgg = self.build_vgg(vgg_weights)\n \n # Create UNet-like model\n if self.gpus <= 1:\n self.model, inputs_mask= self.build_pconv_unet()\n self.compile_pconv_unet(self.model, inputs_mask) \n else:\n with tf.device(\"/cpu:0\"):\n self.model, inputs_mask = self.build_pconv_unet()\n self.model = multi_gpu_model(self.model, gpus=self.gpus)\n self.compile_pconv_unet(self.model, inputs_mask)", "def network_modified(input):\n\n up6 = upsample_and_concat( conv5, conv4, 256, 512 , 'up_conv1' )\n conv6=slim.conv2d(up6, 256,[3,3], rate=1, activation_fn=lrelu,scope='g_conv6_1')\n conv6=slim.conv2d(conv6,256,[3,3], rate=1, activation_fn=lrelu,scope='g_conv6_2')\n\n up7 = upsample_and_concat( conv6, conv3, 128, 256 , 'up_conv2' )\n conv7=slim.conv2d(up7, 128,[3,3], rate=1, activation_fn=lrelu,scope='g_conv7_1')\n conv7=slim.conv2d(conv7,128,[3,3], rate=1, activation_fn=lrelu,scope='g_conv7_2')\n\n up8 = upsample_and_concat( conv7, conv2, 64, 128 , 'up_conv3')\n conv8=slim.conv2d(up8, 64,[3,3], rate=1, activation_fn=lrelu,scope='g_conv8_1')\n conv8=slim.conv2d(conv8,64,[3,3], rate=1, activation_fn=lrelu,scope='g_conv8_2')\n\n up9 = upsample_and_concat( conv8, conv1, 32, 64 , 'up_conv4')\n conv9=slim.conv2d(up9, 32,[3,3], rate=1, activation_fn=lrelu,scope='g_conv9_1')\n conv9=slim.conv2d(conv9,32,[3,3], rate=1, activation_fn=lrelu,scope='g_conv9_2')\n\n conv10=slim.conv2d(conv9,12,[1,1], rate=1, activation_fn=None, scope='g_conv10')\n out = tf.depth_to_space(conv10,2)\n return out", "def inference(image,norm = True,phase_train = True):\n batch_size = image.shape[0]\n r,g,b = tf.split(axis = 3,num_or_size_splits = 3,value = image)\n p_image = tf.concat([r - 123.68,\n g - 116.78,\n b - 103.94],axis = 3)\n with tf.variable_scope('vgg_16'):\n with tf.variable_scope('conv1'):\n conv1_1 = layer.conv_layer('conv1_1',p_image,[3,3,3,64])\n conv1_2 = layer.conv_layer('conv1_2',conv1_1,[3,3,64,64])\n pool1 = layer.pool_layer('pool1',conv1_2)\n with tf.variable_scope('conv2'):\n conv2_1 = layer.conv_layer('conv2_1',pool1,[3,3,64,128])\n conv2_2 = layer.conv_layer('conv2_2',conv2_1,[3,3,128,128])\n pool2 = layer.pool_layer('pool2',conv2_2)\n with tf.variable_scope('conv3'):\n conv3_1 = layer.conv_layer('conv3_1',pool2,[3,3,128,256])\n conv3_2 = layer.conv_layer('conv3_2',conv3_1,[3,3,256,256])\n conv3_3 = layer.conv_layer('conv3_3',conv3_2,[3,3,256,256])\n pool3 = layer.pool_layer('pool3',conv3_3)\n with tf.variable_scope('conv4'):\n conv4_1 = layer.conv_layer('conv4_1',pool3,[3,3,256,512])\n conv4_2 = layer.conv_layer('conv4_2',conv4_1,[3,3,512,512])\n conv4_3 = layer.conv_layer('conv4_3',conv4_2,[3,3,512,512])\n pool4 = layer.pool_layer('pool4',conv4_3)\n with tf.variable_scope('conv5'):\n conv5_1 = layer.conv_layer('conv5_1',pool4,[3,3,512,512])\n conv5_2 = layer.conv_layer('conv5_2',conv5_1,[3,3,512,512])\n conv5_3 = layer.conv_layer('conv5_3',conv5_2,[3,3,512,512])\n pool5 = layer.pool_layer('pool5',conv5_3,ksize = [1,3,3,1],strides = [1,1,1,1])\n with tf.variable_scope('ssd'):\n conv6 = layer.atrous_conv('conv6',pool5,[3,3,512,1024],rate = 6,\n batch_normalization = norm,phase_train = phase_train)\n conv7 = layer.conv_layer('conv7',conv6,[1,1,1024,1024],\n batch_normalization = norm,phase_train = phase_train)\n with tf.variable_scope('conv8'):\n conv8_1 = layer.conv_layer('conv8_1',conv7,[1,1,1024,256],\n batch_normalization = norm,phase_train = phase_train)\n conv8_2 = layer.conv_layer('conv8_2',conv8_1,[3,3,256,512],\n stride = [1,2,2,1],batch_normalization = norm,phase_train = phase_train)\n with tf.variable_scope('conv9'):\n conv9_1 = layer.conv_layer('conv9_1',conv8_2,[1,1,512,128],\n batch_normalization = norm,phase_train = phase_train)\n conv9_2 = layer.conv_layer('conv9_2',conv9_1,[3,3,128,256],\n stride = [1,2,2,1],batch_normalization = norm,phase_train = phase_train)\n with tf.variable_scope('conv10'):\n conv10_1 = layer.conv_layer('conv10_1',conv9_2,[1,1,256,128],\n batch_normalization = norm,phase_train = phase_train)\n conv10_2 = layer.conv_layer('conv10_2',conv10_1,[3,3,128,256],\n padding = 'VALID',batch_normalization = norm,phase_train = phase_train)\n with tf.variable_scope('conv11'):\n conv11_1 = layer.conv_layer('conv11_1',conv10_2,[1,1,256,128],\n batch_normalization = norm,phase_train = phase_train)\n conv11_2 = layer.conv_layer('conv11_2',conv11_1,[3,3,128,256],\n padding = 'VALID',batch_normalization = norm,phase_train = phase_train)#vgg300\n with tf.variable_scope('multibox'):\n\n l2_conv4_3 = layer.l2_normalization('l2_normalization',conv4_3,scaling = True)\n cls4 = layer.conv_layer('cls4',l2_conv4_3,[3,3,512,84],activation = None)\n loc4 = layer.conv_layer('loc4',l2_conv4_3,[3,3,512,16],activation = None)\n\n cls4_reshape = tf.reshape(cls4,[batch_size,-1,21])\n loc4_reshape = tf.reshape(loc4,[batch_size,-1,4])\n\n\n cls7 = layer.conv_layer('cls7',conv7,[3,3,1024,126],activation = None)\n loc7 = layer.conv_layer('loc7',conv7,[3,3,1024,24],activation = None)\n\n cls7_reshape = tf.reshape(cls7,[batch_size,-1,21])\n loc7_reshape = tf.reshape(loc7,[batch_size,-1,4])\n\n cls8 = layer.conv_layer('cls8',conv8_2,[3,3,512,126],activation = None)\n loc8 = layer.conv_layer('loc8',conv8_2,[3,3,512,24],activation = None)\n\n cls8_reshape = tf.reshape(cls8,[batch_size,-1,21])\n loc8_reshape = tf.reshape(loc8,[batch_size,-1,4])\n\n cls9 = layer.conv_layer('cls9',conv9_2,[3,3,256,126],activation = None)\n loc9 = layer.conv_layer('loc9',conv9_2,[3,3,256,24],activation = None)\n\n cls9_reshape = tf.reshape(cls9,[batch_size,-1,21])\n loc9_reshape = tf.reshape(loc9,[batch_size,-1,4])\n\n cls10 = layer.conv_layer('cls10',conv10_2,[3,3,256,84],activation = None)\n loc10 = layer.conv_layer('loc10',conv10_2,[3,3,256,16],activation = None)\n\n cls10_reshape = tf.reshape(cls10,[batch_size,-1,21])\n loc10_reshape = tf.reshape(loc10,[batch_size,-1,4])\n\n cls11 = layer.conv_layer('cls11',conv11_2,[1,1,256,84],activation = None)\n loc11 = layer.conv_layer('loc11',conv11_2,[1,1,256,16],activation = None)\n\n cls11_reshape = tf.reshape(cls11,[batch_size,-1,21])\n loc11_reshape = tf.reshape(loc11,[batch_size,-1,4])\n\n cls_logit = tf.concat([\n cls4_reshape,\n cls7_reshape,\n cls8_reshape,\n cls9_reshape,\n cls10_reshape,\n cls11_reshape\n ],1)\n loc_logit = tf.concat([\n loc4_reshape,\n loc7_reshape,\n loc8_reshape,\n loc9_reshape,\n loc10_reshape,\n loc11_reshape\n ],1)\n \n return cls_logit,loc_logit" ]
[ "0.7020831", "0.6723679", "0.66918373", "0.6514261", "0.65074456", "0.6171934", "0.615975", "0.6125562", "0.61211646", "0.6104127", "0.60718155", "0.6071536", "0.6047656", "0.6044598", "0.60417295", "0.6018157", "0.6011776", "0.6005091", "0.5952455", "0.59439194", "0.5914834", "0.59140545", "0.5897333", "0.5863429", "0.5842409", "0.58219385", "0.58183706", "0.58145803", "0.580889", "0.5799146" ]
0.748433
0
Loads the existing OpenStack Keypair
def initialize(self): super(self.__class__, self).initialize() try: self.__keypair = nova_utils.get_keypair_by_name( self._nova, self.keypair_settings.name) return self.__keypair except Exception as e: logger.warn('Cannot load existing keypair - %s', e)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_key():", "def load_key():\n return open(\"Secret.key\",\"rb\").read()", "def create(self):\n self.initialize()\n\n if not self.__keypair:\n logger.info('Creating keypair %s...' % self.keypair_settings.name)\n\n if self.keypair_settings.public_filepath and os.path.isfile(\n self.keypair_settings.public_filepath):\n logger.info(\"Uploading existing keypair\")\n self.__keypair = nova_utils.upload_keypair_file(\n self._nova, self.keypair_settings.name,\n self.keypair_settings.public_filepath)\n\n if self.keypair_settings.delete_on_clean is not None:\n delete_on_clean = self.keypair_settings.delete_on_clean\n self.__delete_keys_on_clean = delete_on_clean\n else:\n self.__delete_keys_on_clean = False\n else:\n logger.info(\"Creating new keypair\")\n keys = nova_utils.create_keys(self.keypair_settings.key_size)\n self.__keypair = nova_utils.upload_keypair(\n self._nova, self.keypair_settings.name,\n nova_utils.public_key_openssh(keys))\n file_utils.save_keys_to_files(\n keys, self.keypair_settings.public_filepath,\n self.keypair_settings.private_filepath)\n\n if self.keypair_settings.delete_on_clean is not None:\n delete_on_clean = self.keypair_settings.delete_on_clean\n self.__delete_keys_on_clean = delete_on_clean\n else:\n self.__delete_keys_on_clean = True\n elif self.__keypair and not os.path.isfile(\n self.keypair_settings.private_filepath):\n logger.warn(\"The public key already exist in OpenStack \\\n but the private key file is not found ..\")\n\n return self.__keypair", "def load_key(self):\n\t return open(\"key.key\", \"rb\").read()", "def load_key():\n return open(\"secret.key\", \"rb\").read()", "def _load_key(self, path):\n with open(path, 'r') as f:\n self._key = f.readline().strip()\n self._secret = f.readline().strip()", "def load_key(key_name):\n if not p.exists(key_name):\n write_key(key_name)\n\n return open(key_name, \"rb\").read()", "def get(self, name):\n path = '/os-keypairs/%s' % name\n res = self.client.call(path, 'GET', data='', \n token=self.manager.identity.token)\n self.logger.debug('Get openstack key pair %s: %s' % (name, truncate(res)))\n return res[0]['keypair']", "def load_key():\r\n\r\n key_dir = os.path.join(os.path.dirname(__file__), \"resources/key\")\r\n\r\n try:\r\n return open(key_dir, \"rb\").read()\r\n except:\r\n return None", "def test_create_keypair_from_file(self):\n keys = RSA.generate(1024)\n nova_utils.save_keys_to_files(keys=keys, pub_file_path=pub_file_path)\n self.keypair_creator = create_keypairs.OpenStackKeypair(self.os_creds,\n create_keypairs.KeypairSettings(name=keypair_name,\n public_filepath=pub_file_path))\n self.keypair_creator.create()\n\n keypair = nova_utils.keypair_exists(self.keypair_creator.nova, self.keypair_creator.keypair)\n self.assertEquals(self.keypair_creator.keypair, keypair)\n\n file_key = open(os.path.expanduser(pub_file_path)).read()\n self.assertEquals(self.keypair_creator.keypair.public_key, file_key)", "def load_device_key(self, filename):\n pass", "def ex_import_keypair(self, name, keyfile):\n\n base64key = base64.b64encode(open(os.path.expanduser(keyfile)).read())\n\n params = {'Action': 'ImportKeyPair',\n 'KeyName': name,\n 'PublicKeyMaterial': base64key\n }\n\n response = self.connection.request(self.path, params=params).object\n key_name = self._findtext(response, 'keyName')\n key_fingerprint = self._findtext(response, 'keyFingerprint')\n return {\n 'keyName': key_name,\n 'keyFingerprint': key_fingerprint,\n }", "def load_key(self, key):\n self.key = key", "def load_key(self, type, keyid):\n pass", "def create(self, name, public_key=None):\n data = {\n \"keypair\": {\n \"name\": name\n }\n }\n if public_key is not None:\n data['keypair']['public_key'] = public_key\n \n path = '/os-keypairs'\n res = self.client.call(path, 'POST', data=json.dumps(data), \n token=self.manager.identity.token)\n self.logger.debug('Create/import openstack keypair: %s' % truncate(res))\n return res[0]['keypair']", "def load_key():\n return open(\"pass.key\", \"rb\").read()", "def test_aws_service_api_keypair_get(self):\n pass", "def load_key(self, pemfile_path_abs: str, set_priv=False) -> None:\n return None", "def load(self, key: str) -> str:\n pass", "def __loadKey(self, key_image_file_name: str):\n # get the key name from the file name e.g. ${key_name}.png\n key_name = key_image_file_name.split('.')[0]\n\n self.maple_logger.debug(\"Loading key: {0}\", key_name)\n\n self.key_locations[key_name] = self.__getKeyLocation(key_image_file_name)", "def create_keypair(econfig_file=None, region=None, keyname=\"bcbio\"):\n import boto\n import boto.ec2\n if econfig_file:\n keypair_dir = os.path.dirname(econfig_file).replace(\"elasticluster\", \"aws_keypairs\")\n else:\n keypair_dir = os.path.join(os.getcwd(), \"aws_keypairs\")\n if not os.path.exists(keypair_dir):\n os.makedirs(keypair_dir)\n private_key = os.path.join(os.path.join(keypair_dir, keyname))\n new_key = not os.path.exists(private_key)\n if new_key:\n cmd = [\"ssh-keygen\", \"-t\", \"rsa\", \"-N\", \"\", \"-f\", private_key, \"-C\", \"bcbio_aws_keypair\"]\n subprocess.check_call(cmd)\n public_key = private_key + \".pub\"\n if region:\n ec2 = boto.ec2.connect_to_region(region)\n else:\n ec2 = boto.connect_ec2()\n key = ec2.get_key_pair(keyname)\n if key and new_key:\n print(\"Non matching key %s found in AWS, removing.\" % keyname)\n ec2.delete_key_pair(keyname)\n key = None\n if not key:\n print(\"Key %s not found in AWS, importing created key\" % keyname)\n with open(public_key) as in_handle:\n body = in_handle.read()\n try:\n ec2.import_key_pair(keyname, body)\n except TypeError as e:\n body = body.encode('utf-8')\n ec2.import_key_pair(keyname, body)\n return {\"user_key_name\": keyname, \"user_key_private\": private_key,\n \"user_key_public\": public_key}", "def test_create_keypair_save_both(self):\n self.keypair_creator = create_keypairs.OpenStackKeypair(self.os_creds,\n create_keypairs.KeypairSettings(name=keypair_name,\n public_filepath=pub_file_path,\n private_filepath=priv_file_path))\n self.keypair_creator.create()\n\n keypair = nova_utils.keypair_exists(self.keypair_creator.nova, self.keypair_creator.keypair)\n self.assertEquals(self.keypair_creator.keypair, keypair)\n\n file_key = open(os.path.expanduser(pub_file_path)).read()\n self.assertEquals(self.keypair_creator.keypair.public_key, file_key)\n\n self.assertTrue(os.path.isfile(priv_file_path))", "def __set_or_create_key_if_not_exist(self):\n\n # instantiate PKI class:\n pki = PKI(username=self.username, password=self.password)\n\n # load private key into object. key is ready to be used to sign already imported\n privkey = pki.load_priv_key()\n\n # if it is an empty list then no key created and saved on username so generate new key\n if not privkey:\n pki.generate_pub_priv_key()\n privkey = pki.load_priv_key()\n\n # set self.privkey to privkey\n self.privkey = privkey", "def load_key(fn, psw=None):\n if not fn:\n die(\"Need private key\")\n if psw:\n psw = as_bytes(psw)\n data = load_gpg_file(fn)\n key = load_pem_private_key(data, password=psw, backend=get_backend())\n return key", "def _load_key(self):\n try:\n with open(self.gmaps_key_file) as fid:\n key = fid.read().strip()\n except FileNotFoundError:\n logging.warning(\"Failed to load Google Maps API key from '%s' - you \"\n \"will not be able to make new queries to the Google Maps API!\",\n self.gmaps_key_file)\n return None\n return key", "def get_keypair(ec2):\n # call the boto ec2 function to create a key pair\n key_pair = ec2.create_key_pair(KeyName=KEY_PAIR_NAME)\n print(\"\\n===Created a new key pair in AWS.\")\n\n # capture the key and store it in a file\n KeyPairOut = str(key_pair.key_material)\n\n # create a file to store the key locally\n print(\"Saving the keypair.\")\n key_pair_path = KEY_PAIR_NAME + \".pem\"\n with open(key_pair_path, \"w\") as f:\n f.write(KeyPairOut)\n os.chmod(key_pair_path, 0o600)\n print(\"===Changed access permission to read-only.\")", "def test_create_keypair_only(self):\n self.keypair_creator = create_keypairs.OpenStackKeypair(self.os_creds,\n create_keypairs.KeypairSettings(name=keypair_name))\n self.keypair_creator.create()\n\n keypair = nova_utils.keypair_exists(self.keypair_creator.nova, self.keypair_creator.keypair)\n self.assertEquals(self.keypair_creator.keypair, keypair)", "def new_key_pair(self):\n from plonevotecryptolib.KeyPair import KeyPair # avoids circular imports\n return KeyPair(self)", "def test_aws_service_api_keypairs_get(self):\n pass", "def load_keys(self, load_path=DEFAULT_KEY_PATH):\n try:\n with open(f'{load_path}/id_elgamal', 'r') as f:\n f.read(self.keys['private'])\n with open(f'{load_path}/id_elgamal.pub', 'r') as f:\n self.keys['public']['p'] = f.readline()\n self.keys['public']['g'] = f.readline()\n self.keys['public']['y'] = f.readline()\n debug_message('Loading successful!')\n return self.keys\n except FileNotFoundError:\n debug_message(f'Loading error! ({FileNotFoundError})')\n return 0" ]
[ "0.7189895", "0.6517558", "0.648187", "0.64195603", "0.64188176", "0.64004326", "0.6370742", "0.6292563", "0.62764764", "0.6230832", "0.6219282", "0.62125903", "0.6180529", "0.60919774", "0.6073888", "0.6063422", "0.60568666", "0.59565926", "0.5932089", "0.5882537", "0.58715916", "0.58264524", "0.582271", "0.58155394", "0.5789199", "0.57811254", "0.5780347", "0.57478714", "0.57129014", "0.56093425" ]
0.7845108
0
Responsible for creating the keypair object.
def create(self): self.initialize() if not self.__keypair: logger.info('Creating keypair %s...' % self.keypair_settings.name) if self.keypair_settings.public_filepath and os.path.isfile( self.keypair_settings.public_filepath): logger.info("Uploading existing keypair") self.__keypair = nova_utils.upload_keypair_file( self._nova, self.keypair_settings.name, self.keypair_settings.public_filepath) if self.keypair_settings.delete_on_clean is not None: delete_on_clean = self.keypair_settings.delete_on_clean self.__delete_keys_on_clean = delete_on_clean else: self.__delete_keys_on_clean = False else: logger.info("Creating new keypair") keys = nova_utils.create_keys(self.keypair_settings.key_size) self.__keypair = nova_utils.upload_keypair( self._nova, self.keypair_settings.name, nova_utils.public_key_openssh(keys)) file_utils.save_keys_to_files( keys, self.keypair_settings.public_filepath, self.keypair_settings.private_filepath) if self.keypair_settings.delete_on_clean is not None: delete_on_clean = self.keypair_settings.delete_on_clean self.__delete_keys_on_clean = delete_on_clean else: self.__delete_keys_on_clean = True elif self.__keypair and not os.path.isfile( self.keypair_settings.private_filepath): logger.warn("The public key already exist in OpenStack \ but the private key file is not found ..") return self.__keypair
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_keypair(self, username):\n msg = \"create_keypair not implemented\"\n raise NotImplementedError(msg)", "def create_key_pair(self) -> Keypair:\n res = self.context.post(\n \"/dsum/create_key_pair\", None, None, \"DSum: failed creating a Curve 25519 Keypair\")\n return Keypair(res['private_key_id'], res['public_key_id'])", "def create_key_pair(self, key_name):\r\n params = {'KeyName':key_name}\r\n return self.get_object('CreateKeyPair', params, KeyPair, verb='POST')", "def create_key ():", "def create(self, name, public_key=None):\n data = {\n \"keypair\": {\n \"name\": name\n }\n }\n if public_key is not None:\n data['keypair']['public_key'] = public_key\n \n path = '/os-keypairs'\n res = self.client.call(path, 'POST', data=json.dumps(data), \n token=self.manager.identity.token)\n self.logger.debug('Create/import openstack keypair: %s' % truncate(res))\n return res[0]['keypair']", "def create_key_pair(self, key_name):\n response = key_pair.create_key_pair(self.url, self.verb, self.headers,\n self.version, key_name)\n if response is not None :\n res = CreateKeyPairResponse.CreateKeyPairResponse()\n parseString(str(response.text), res)\n return res\n else :\n return None", "def create_keypair(self, name=None, public_key=None):\n body = {}\n if name is not None:\n body.update({\"name\": name})\n if public_key is not None:\n body.update({\"public_key\": public_key})\n return self._create(_keypair.Keypair, **body)", "def create_key_pair(self, keypair, **kwargs):\n\n if not isinstance(keypair, models.CreateKeyPairReq):\n raise HuaweiCloudSDKException(\n message=\"The datatype of parameter(keypair) \"\n \"is not CreateKeyPairReq\")\n body_params = keypair.serialize()\n\n header_params = {}\n header_params['Accept'] = util.select_header_accept(\n ['application/xml', 'application/json'])\n\n header_params['Content-Type'] = util.select_header_content_type(\n ['application/json', 'application/xml'])\n\n return_code, return_data, _ = self.api_client.handle_raw_request(\n 'compute', 'POST',\n '/os-keypairs',\n headers=header_params,\n body=body_params,\n timeout=kwargs.get('_request_timeout', None),\n _preload_content=kwargs.get('_preload_content', True))\n\n if return_data is not None:\n return_data = json.loads(return_data)\n else:\n return_data = {}\n if return_code not in [200, 201]:\n raise HuaweiCloudSDKException(\n return_code,\n \"Run create_key_pair failed, \"\n \"message=%s\" % return_data.get(\"message\"))\n return models.CreateKeyPairResp().deserialize(return_data)", "def create_keypair(self):\n # NOTE: currently we rely on zmq for convenience, but we may use libnacl directly\n # if we want to isolate this module from zmq dependency.\n public_key, private_key = zmq.curve_keypair()\n return public_key, private_key", "def new_key_pair(self):\n from plonevotecryptolib.KeyPair import KeyPair # avoids circular imports\n return KeyPair(self)", "def create_keypair(address_type, addresses_path, address_prefix, name):\n vkey_file = get_vkey_file(addresses_path, address_prefix, name)\n skey_file = get_skey_file(addresses_path, address_prefix, name)\n\n if(path.exists(vkey_file)) :\n print(address_prefix, \"key pair already exists for\", name)\n return\n \n makedirs(path.dirname(vkey_file), mode=0o777, exist_ok=True)\n\n run_params = ['cardano-cli', address_type, 'key-gen', '--verification-key-file', vkey_file, '--signing-key-file', skey_file]\n subprocess_run(run_params, capture_output=False, text=True)\n return", "def test_create_keypair_only(self):\n self.keypair_creator = create_keypairs.OpenStackKeypair(self.os_creds,\n create_keypairs.KeypairSettings(name=keypair_name))\n self.keypair_creator.create()\n\n keypair = nova_utils.keypair_exists(self.keypair_creator.nova, self.keypair_creator.keypair)\n self.assertEquals(self.keypair_creator.keypair, keypair)", "def ex_create_keypair(self, name):\n params = {\n 'Action': 'CreateKeyPair',\n 'KeyName': name,\n }\n response = self.connection.request(self.path, params=params).object\n key_material = self._findtext(response, 'keyMaterial')\n key_fingerprint = self._findtext(response, 'keyFingerprint')\n return {\n 'keyMaterial': key_material,\n 'keyFingerprint': key_fingerprint,\n }", "def test_aws_service_api_keypair_generate_post(self):\n pass", "def test_create_keypair_save_both(self):\n self.keypair_creator = create_keypairs.OpenStackKeypair(self.os_creds,\n create_keypairs.KeypairSettings(name=keypair_name,\n public_filepath=pub_file_path,\n private_filepath=priv_file_path))\n self.keypair_creator.create()\n\n keypair = nova_utils.keypair_exists(self.keypair_creator.nova, self.keypair_creator.keypair)\n self.assertEquals(self.keypair_creator.keypair, keypair)\n\n file_key = open(os.path.expanduser(pub_file_path)).read()\n self.assertEquals(self.keypair_creator.keypair.public_key, file_key)\n\n self.assertTrue(os.path.isfile(priv_file_path))", "def create_keypair(econfig_file=None, region=None, keyname=\"bcbio\"):\n import boto\n import boto.ec2\n if econfig_file:\n keypair_dir = os.path.dirname(econfig_file).replace(\"elasticluster\", \"aws_keypairs\")\n else:\n keypair_dir = os.path.join(os.getcwd(), \"aws_keypairs\")\n if not os.path.exists(keypair_dir):\n os.makedirs(keypair_dir)\n private_key = os.path.join(os.path.join(keypair_dir, keyname))\n new_key = not os.path.exists(private_key)\n if new_key:\n cmd = [\"ssh-keygen\", \"-t\", \"rsa\", \"-N\", \"\", \"-f\", private_key, \"-C\", \"bcbio_aws_keypair\"]\n subprocess.check_call(cmd)\n public_key = private_key + \".pub\"\n if region:\n ec2 = boto.ec2.connect_to_region(region)\n else:\n ec2 = boto.connect_ec2()\n key = ec2.get_key_pair(keyname)\n if key and new_key:\n print(\"Non matching key %s found in AWS, removing.\" % keyname)\n ec2.delete_key_pair(keyname)\n key = None\n if not key:\n print(\"Key %s not found in AWS, importing created key\" % keyname)\n with open(public_key) as in_handle:\n body = in_handle.read()\n try:\n ec2.import_key_pair(keyname, body)\n except TypeError as e:\n body = body.encode('utf-8')\n ec2.import_key_pair(keyname, body)\n return {\"user_key_name\": keyname, \"user_key_private\": private_key,\n \"user_key_public\": public_key}", "def __init__(self, os_creds, keypair_settings):\n super(self.__class__, self).__init__(os_creds)\n\n self.keypair_settings = keypair_settings\n self.__delete_keys_on_clean = True\n\n # Attributes instantiated on create()\n self.__keypair = None", "def initialize(self):\n super(self.__class__, self).initialize()\n\n try:\n self.__keypair = nova_utils.get_keypair_by_name(\n self._nova, self.keypair_settings.name)\n return self.__keypair\n except Exception as e:\n logger.warn('Cannot load existing keypair - %s', e)", "def get_keypair(ec2):\n # call the boto ec2 function to create a key pair\n key_pair = ec2.create_key_pair(KeyName=KEY_PAIR_NAME)\n print(\"\\n===Created a new key pair in AWS.\")\n\n # capture the key and store it in a file\n KeyPairOut = str(key_pair.key_material)\n\n # create a file to store the key locally\n print(\"Saving the keypair.\")\n key_pair_path = KEY_PAIR_NAME + \".pem\"\n with open(key_pair_path, \"w\") as f:\n f.write(KeyPairOut)\n os.chmod(key_pair_path, 0o600)\n print(\"===Changed access permission to read-only.\")", "def create_handshake_key_pair(cls) -> Tuple[bytes, bytes]:\n ...", "def new_key(self, key_name=None, key_type=Key.KEY_REGULAR_FILE):\r\n if key_name == '-':\r\n return Key(self.name, '-', key_type=Key.KEY_STREAM_WRITABLE)\r\n else:\r\n dir_name = os.path.dirname(key_name)\r\n if dir_name and not os.path.exists(dir_name):\r\n os.makedirs(dir_name)\r\n fp = open(key_name, 'wb')\r\n return Key(self.name, key_name, fp)", "def create_keys(self):\n crypto_tool = CryptoTools()\n # creating RSA keys for the signer user\n public_key, private_key = crypto_tool.create_key_with_entropy()\n self.priv_key = crypto_tool.get_pem_format(private_key).decode(\"utf-8\")\n self.pub_key = crypto_tool.get_pem_format(public_key).decode(\"utf-8\")", "def create_keypair(key_name):\n if os.path.isfile(SSH_FOLDER + key_name + \".pem\"):\n return # Key already created\n ec2 = boto.ec2.connect_to_region(AWS_REGION)\n key = ec2.create_key_pair(key_name)\n key.save(SSH_FOLDER)", "def test_create_keypair_save_pub_only(self):\n self.keypair_creator = create_keypairs.OpenStackKeypair(self.os_creds,\n create_keypairs.KeypairSettings(name=keypair_name,\n public_filepath=pub_file_path))\n self.keypair_creator.create()\n\n keypair = nova_utils.keypair_exists(self.keypair_creator.nova, self.keypair_creator.keypair)\n self.assertEquals(self.keypair_creator.keypair, keypair)\n\n file_key = open(os.path.expanduser(pub_file_path)).read()\n self.assertEquals(self.keypair_creator.keypair.public_key, file_key)", "def test_create_keypair_from_file(self):\n keys = RSA.generate(1024)\n nova_utils.save_keys_to_files(keys=keys, pub_file_path=pub_file_path)\n self.keypair_creator = create_keypairs.OpenStackKeypair(self.os_creds,\n create_keypairs.KeypairSettings(name=keypair_name,\n public_filepath=pub_file_path))\n self.keypair_creator.create()\n\n keypair = nova_utils.keypair_exists(self.keypair_creator.nova, self.keypair_creator.keypair)\n self.assertEquals(self.keypair_creator.keypair, keypair)\n\n file_key = open(os.path.expanduser(pub_file_path)).read()\n self.assertEquals(self.keypair_creator.keypair.public_key, file_key)", "def create(self, key, value):\n raise NotImplementedError", "def _init_keys(self):\n\n basic_constraints = crypto.X509Extension('basicConstraints'.encode('ascii'), True,\n 'CA:TRUE, pathlen:0'.encode('ascii'))\n serial = self._get_serial()\n pkey = self._create_pkey(self.commonname, serial)\n self._create_cert(pkey, self.commonname, serial, [basic_constraints], expire=30*365)", "def createAllKP():\n\tif not os.path.exists(keysDir):\n\t\tos.makedirs(keysDir)\n\tfor info in conf_HVM:\n\t\tkeyName = 'Key-'+info['region']+'-'+info['zone']\n\t\ttry:\n\t\t\tos.remove(keysDir+'/'+keyName+'.pem')\n\t\texcept OSError:\n\t\t\tpass\n\t\tprint \"Key creation :\",keyName\n\t\tec2 = boto.ec2.connect_to_region(info['region']+'-'+info['zone'])\n\t\t# check if the key pair exists\n\t\tkps = [kp for kp in ec2.get_all_key_pairs() if kp.name == keyName]\n\t\tif kps:\n\t\t\tec2.delete_key_pair(keyName)\t\n\t\tkey = ec2.create_key_pair(keyName)\n\t\tkey.save(keysDir)", "def createKeyPair(type, bits):\n pkey = crypto.PKey()\n pkey.generate_key(type, bits)\n return pkey", "def new_key(self, key_name=None):\r\n return self.key_class(self, key_name)" ]
[ "0.80888253", "0.78946847", "0.76902384", "0.7655257", "0.75913066", "0.7544867", "0.7514757", "0.747879", "0.74046516", "0.72713006", "0.7267716", "0.7267246", "0.72401583", "0.6886206", "0.685062", "0.6836581", "0.67714626", "0.666824", "0.6648626", "0.663982", "0.6611549", "0.6561172", "0.65186673", "0.6516455", "0.65121377", "0.6493865", "0.64760816", "0.6401291", "0.63594455", "0.6329672" ]
0.8068847
1
Returns the OpenStack keypair object
def get_keypair(self): return self.__keypair
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get(self, name):\n path = '/os-keypairs/%s' % name\n res = self.client.call(path, 'GET', data='', \n token=self.manager.identity.token)\n self.logger.debug('Get openstack key pair %s: %s' % (name, truncate(res)))\n return res[0]['keypair']", "def initialize(self):\n super(self.__class__, self).initialize()\n\n try:\n self.__keypair = nova_utils.get_keypair_by_name(\n self._nova, self.keypair_settings.name)\n return self.__keypair\n except Exception as e:\n logger.warn('Cannot load existing keypair - %s', e)", "def create_keypair(self):\n # NOTE: currently we rely on zmq for convenience, but we may use libnacl directly\n # if we want to isolate this module from zmq dependency.\n public_key, private_key = zmq.curve_keypair()\n return public_key, private_key", "def new_key_pair(self):\n from plonevotecryptolib.KeyPair import KeyPair # avoids circular imports\n return KeyPair(self)", "def create(self):\n self.initialize()\n\n if not self.__keypair:\n logger.info('Creating keypair %s...' % self.keypair_settings.name)\n\n if self.keypair_settings.public_filepath and os.path.isfile(\n self.keypair_settings.public_filepath):\n logger.info(\"Uploading existing keypair\")\n self.__keypair = nova_utils.upload_keypair_file(\n self._nova, self.keypair_settings.name,\n self.keypair_settings.public_filepath)\n\n if self.keypair_settings.delete_on_clean is not None:\n delete_on_clean = self.keypair_settings.delete_on_clean\n self.__delete_keys_on_clean = delete_on_clean\n else:\n self.__delete_keys_on_clean = False\n else:\n logger.info(\"Creating new keypair\")\n keys = nova_utils.create_keys(self.keypair_settings.key_size)\n self.__keypair = nova_utils.upload_keypair(\n self._nova, self.keypair_settings.name,\n nova_utils.public_key_openssh(keys))\n file_utils.save_keys_to_files(\n keys, self.keypair_settings.public_filepath,\n self.keypair_settings.private_filepath)\n\n if self.keypair_settings.delete_on_clean is not None:\n delete_on_clean = self.keypair_settings.delete_on_clean\n self.__delete_keys_on_clean = delete_on_clean\n else:\n self.__delete_keys_on_clean = True\n elif self.__keypair and not os.path.isfile(\n self.keypair_settings.private_filepath):\n logger.warn(\"The public key already exist in OpenStack \\\n but the private key file is not found ..\")\n\n return self.__keypair", "def get_keypair(self, keypair):\n return self._get(_keypair.Keypair, keypair)", "def get_key_object(self):\n key_type, data = self.key_data()\n data = base64.b64decode(data)\n\n if key_type == \"ssh-rsa\":\n key = rsakey.RSAKey(data=data)\n elif key_type == \"ssh-dss\":\n key = dsskey.DSSKey(data=data)\n else:\n raise Exception(\"Invalid key type\")\n\n return key", "def get_keypair(ec2):\n # call the boto ec2 function to create a key pair\n key_pair = ec2.create_key_pair(KeyName=KEY_PAIR_NAME)\n print(\"\\n===Created a new key pair in AWS.\")\n\n # capture the key and store it in a file\n KeyPairOut = str(key_pair.key_material)\n\n # create a file to store the key locally\n print(\"Saving the keypair.\")\n key_pair_path = KEY_PAIR_NAME + \".pem\"\n with open(key_pair_path, \"w\") as f:\n f.write(KeyPairOut)\n os.chmod(key_pair_path, 0o600)\n print(\"===Changed access permission to read-only.\")", "def get_key_pair() -> typing.Tuple[bytes, bytes]: \n return _get_key_pair_from_sk(ecdsa.SigningKey.generate(curve=CURVE))", "def create(self, name, public_key=None):\n data = {\n \"keypair\": {\n \"name\": name\n }\n }\n if public_key is not None:\n data['keypair']['public_key'] = public_key\n \n path = '/os-keypairs'\n res = self.client.call(path, 'POST', data=json.dumps(data), \n token=self.manager.identity.token)\n self.logger.debug('Create/import openstack keypair: %s' % truncate(res))\n return res[0]['keypair']", "def test_aws_service_api_keypair_get(self):\n pass", "def _get_key():\n conn = boto.connect_s3()\n bucket = conn.create_bucket(settings.MESSY_BUCKET)\n key = Key(bucket)\n key.key = settings.MESSY_KEY\n return key", "def get_key(self, key_id):\r\n return self.sshkey.getObject(id=key_id)", "def test_create_keypair_only(self):\n self.keypair_creator = create_keypairs.OpenStackKeypair(self.os_creds,\n create_keypairs.KeypairSettings(name=keypair_name))\n self.keypair_creator.create()\n\n keypair = nova_utils.keypair_exists(self.keypair_creator.nova, self.keypair_creator.keypair)\n self.assertEquals(self.keypair_creator.keypair, keypair)", "def create_key_pair(self) -> Keypair:\n res = self.context.post(\n \"/dsum/create_key_pair\", None, None, \"DSum: failed creating a Curve 25519 Keypair\")\n return Keypair(res['private_key_id'], res['public_key_id'])", "def key_pair_name(self) -> str:\n return pulumi.get(self, \"key_pair_name\")", "def keypair_lookup(session):\n if session is None:\n return None\n\n client = session.client('ec2')\n response = client.describe_key_pairs()\n\n # If SSH_KEY exists and points to a valid Key Pair, use it\n key = os.environ.get(\"SSH_KEY\", None) # reuse bastion.py env vars\n if key is not None:\n kp_name = os.path.basename(key)\n if kp_name.endswith(\".pem\"):\n kp_name = kp_name[:-4]\n for kp in response['KeyPairs']:\n if kp[\"KeyName\"] == kp_name:\n return kp_name\n\n print(\"Key Pairs\")\n for i in range(len(response['KeyPairs'])):\n print(\"{}: {}\".format(i, response['KeyPairs'][i]['KeyName']))\n if len(response['KeyPairs']) == 0:\n return None\n while True:\n try:\n idx = input(\"[0]: \")\n idx = int(idx if len(idx) > 0 else \"0\")\n return response['KeyPairs'][idx]['KeyName']\n except KeyboardInterrupt:\n sys.exit(1)\n except:\n print(\"Invalid Key Pair number, try again\")", "def keypair(self, i, keypair_class):\n\n # Make sure keypair_class is a valid cryptocurrency keypair\n if not is_cryptocurrency_keypair_class(keypair_class):\n raise Exception(_messages[\"INVALID_KEYPAIR_CLASS\"])\n\n currency_name = keypair_class.__name__.lower().replace('keypair', '')\n\n k = keypair_class.from_passphrase(\n self._passphrase + \" \" + currency_name + str(i))\n\n return k", "def unwrap(self):\n\n if self.algorithm == 'ec':\n return self.asn1['public_key']\n return self.asn1['public_key'].parsed", "def _get_key_pair_by_id(key_pair_id):\n\n ec2_client = connection.EC2ConnectionClient().client()\n\n try:\n key_pairs = ec2_client.get_all_key_pairs(keynames=key_pair_id)\n except (boto.exception.EC2ResponseError,\n boto.exception.BotoServerError) as e:\n raise NonRecoverableError('{0}'.format(str(e)))\n\n return key_pairs[0] if key_pairs else None", "def getAwsKeypair(directory=None):\n if directory is None:\n directory = './'\n with open(directory + 'access.key', 'r+') as fp:\n access_key = fp.read()\n with open(directory + 'secret.key', 'r+') as fp:\n secret_key = fp.read()\n return (access_key, secret_key)", "def encryption_key(self) -> typing.Optional[aws_cdk.aws_kms.IKey]:\n ...", "def create_key_pair(self, keypair, **kwargs):\n\n if not isinstance(keypair, models.CreateKeyPairReq):\n raise HuaweiCloudSDKException(\n message=\"The datatype of parameter(keypair) \"\n \"is not CreateKeyPairReq\")\n body_params = keypair.serialize()\n\n header_params = {}\n header_params['Accept'] = util.select_header_accept(\n ['application/xml', 'application/json'])\n\n header_params['Content-Type'] = util.select_header_content_type(\n ['application/json', 'application/xml'])\n\n return_code, return_data, _ = self.api_client.handle_raw_request(\n 'compute', 'POST',\n '/os-keypairs',\n headers=header_params,\n body=body_params,\n timeout=kwargs.get('_request_timeout', None),\n _preload_content=kwargs.get('_preload_content', True))\n\n if return_data is not None:\n return_data = json.loads(return_data)\n else:\n return_data = {}\n if return_code not in [200, 201]:\n raise HuaweiCloudSDKException(\n return_code,\n \"Run create_key_pair failed, \"\n \"message=%s\" % return_data.get(\"message\"))\n return models.CreateKeyPairResp().deserialize(return_data)", "def keyify(self):\n return keyify_obj(self)", "def create_key_pair(self, key_name):\n response = key_pair.create_key_pair(self.url, self.verb, self.headers,\n self.version, key_name)\n if response is not None :\n res = CreateKeyPairResponse.CreateKeyPairResponse()\n parseString(str(response.text), res)\n return res\n else :\n return None", "def key_pair_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"key_pair_name\")", "def root(self):\n pairs = self.__pairs\n if len(pairs) <= 1:\n return self\n return Key(pairs=pairs[:1], app=self.__app, namespace=self.__namespace)", "def private_key(self):", "def create_keypair(econfig_file=None, region=None, keyname=\"bcbio\"):\n import boto\n import boto.ec2\n if econfig_file:\n keypair_dir = os.path.dirname(econfig_file).replace(\"elasticluster\", \"aws_keypairs\")\n else:\n keypair_dir = os.path.join(os.getcwd(), \"aws_keypairs\")\n if not os.path.exists(keypair_dir):\n os.makedirs(keypair_dir)\n private_key = os.path.join(os.path.join(keypair_dir, keyname))\n new_key = not os.path.exists(private_key)\n if new_key:\n cmd = [\"ssh-keygen\", \"-t\", \"rsa\", \"-N\", \"\", \"-f\", private_key, \"-C\", \"bcbio_aws_keypair\"]\n subprocess.check_call(cmd)\n public_key = private_key + \".pub\"\n if region:\n ec2 = boto.ec2.connect_to_region(region)\n else:\n ec2 = boto.connect_ec2()\n key = ec2.get_key_pair(keyname)\n if key and new_key:\n print(\"Non matching key %s found in AWS, removing.\" % keyname)\n ec2.delete_key_pair(keyname)\n key = None\n if not key:\n print(\"Key %s not found in AWS, importing created key\" % keyname)\n with open(public_key) as in_handle:\n body = in_handle.read()\n try:\n ec2.import_key_pair(keyname, body)\n except TypeError as e:\n body = body.encode('utf-8')\n ec2.import_key_pair(keyname, body)\n return {\"user_key_name\": keyname, \"user_key_private\": private_key,\n \"user_key_public\": public_key}", "def test_create_keypair_from_file(self):\n keys = RSA.generate(1024)\n nova_utils.save_keys_to_files(keys=keys, pub_file_path=pub_file_path)\n self.keypair_creator = create_keypairs.OpenStackKeypair(self.os_creds,\n create_keypairs.KeypairSettings(name=keypair_name,\n public_filepath=pub_file_path))\n self.keypair_creator.create()\n\n keypair = nova_utils.keypair_exists(self.keypair_creator.nova, self.keypair_creator.keypair)\n self.assertEquals(self.keypair_creator.keypair, keypair)\n\n file_key = open(os.path.expanduser(pub_file_path)).read()\n self.assertEquals(self.keypair_creator.keypair.public_key, file_key)" ]
[ "0.74607193", "0.71862775", "0.71742755", "0.7043159", "0.6730009", "0.6715334", "0.6648596", "0.66434497", "0.6613817", "0.6516933", "0.6420968", "0.6287733", "0.6248867", "0.6180987", "0.612444", "0.6101417", "0.60537064", "0.60257906", "0.60006696", "0.5963234", "0.5896387", "0.588707", "0.5877574", "0.5877217", "0.5872522", "0.5871128", "0.5856074", "0.58478945", "0.58408207", "0.5826726" ]
0.7459602
1
Sets the last_modification of this AdditionalInfoResponseTimestamps.
def last_modification(self, last_modification): self._last_modification = last_modification
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def last_modified(self, last_modified):\n\n self._last_modified = last_modified", "def last_modified_dts(self, last_modified_dts):\n\n self._last_modified_dts = last_modified_dts", "def last_updated(self, last_updated):\n\n self._last_updated = last_updated", "def last_updated(self, last_updated):\n\n self._last_updated = last_updated", "def last_updated(self, last_updated):\n\n self._last_updated = last_updated", "def last_updated(self, last_updated):\n\n self._last_updated = last_updated", "def last_modified_on(self, last_modified_on):\n\n self._last_modified_on = last_modified_on", "def last_updated(self, last_updated: str):\n\n self._last_updated = last_updated", "def lastmod_time(self, lastmod_time):\n\n self._lastmod_time = lastmod_time", "def last_update_timestamp(self, last_update_timestamp):\n\n self._last_update_timestamp = last_update_timestamp", "def last_update(self, last_update):\n\n self._last_update = last_update", "def modified_at(self, modified_at):\n\n self._modified_at = modified_at", "def modified_at(self, modified_at):\n\n self._modified_at = modified_at", "def last_modified_by(self, last_modified_by):\n\n self._last_modified_by = last_modified_by", "def last_modified_by(self, last_modified_by):\n\n self._last_modified_by = last_modified_by", "def last_modified_by(self, last_modified_by):\n if last_modified_by is not None and len(last_modified_by) > 100:\n raise ValueError(\"Invalid value for `last_modified_by`, length must be less than or equal to `100`\")\n\n self._last_modified_by = last_modified_by", "def file_last_updated(self, file_last_updated):\n\n self._file_last_updated = file_last_updated", "def date_modified(self, date_modified):\n \n self._date_modified = date_modified", "def last_updated(self, value):\n self._last_updated = value", "def date_modified(self, date_modified):\n\n self._date_modified = date_modified", "def date_modified(self, date_modified):\n\n self._date_modified = date_modified", "def date_modified(self, date_modified):\n\n self._date_modified = date_modified", "def date_modified(self, date_modified):\n\n self._date_modified = date_modified", "def date_modified(self, date_modified):\n\n self._date_modified = date_modified", "def date_modified(self, date_modified):\n\n self._date_modified = date_modified", "def date_modified(self, date_modified):\n\n self._date_modified = date_modified", "def setLastModified(when):", "def modified(self, modified):\n\n self._modified = modified", "def modification_time(self) -> str:\n return pulumi.get(self, \"modification_time\")", "def set_LastUpdatedAfter(self, value):\n super(ListOrdersInputSet, self)._set_input('LastUpdatedAfter', value)" ]
[ "0.72340417", "0.68768615", "0.6674735", "0.6674735", "0.6674735", "0.6674735", "0.6600833", "0.6493505", "0.63677263", "0.63109577", "0.6299651", "0.6257775", "0.6257775", "0.6247048", "0.6247048", "0.6176834", "0.6173237", "0.6095075", "0.6063976", "0.5996479", "0.5996479", "0.5996479", "0.5996479", "0.5996479", "0.5996479", "0.5996479", "0.5966445", "0.59175444", "0.59064615", "0.58938223" ]
0.79767364
0
Sets the availability of this AdditionalInfoResponseTimestamps.
def availability(self, availability): self._availability = availability
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_available(self):\n self._available = True\n self._timeout_stamp = datetime.now()", "def set_availability(self, available: bool) -> None:\n self._attr_available = available\n self.schedule_update_ha_state()", "def set_availability(self, available: bool) -> None:\n self._attr_available = available\n self.schedule_update_ha_state()", "def setAvailability(self):\n self.available = not self.available", "def set_time_available(self, new_value):\n\n self.available_at = new_value\n self.save()", "def description_availability(self, description_availability):\n\n self._description_availability = description_availability", "def prices_available(self, prices_available):\n\n self._prices_available = prices_available", "def available(self):\r\n\r\n self._available = True\r\n self.owner.trigger(\"on_available\")", "def available(self):\n last_update_success = super().available\n if last_update_success and self.vin not in self.coordinator.data:\n return False\n return last_update_success", "def set_available(self, state, request_info=False):\n if state == True:\n if self._available == False:\n self._available = True\n self.stick.logger.debug(\n \"Mark node %s available\",\n self.get_mac(),\n )\n self.do_callback(SENSOR_AVAILABLE[\"id\"])\n if request_info:\n self._request_info()\n else:\n if self._available == True:\n self._available = False\n self.stick.logger.debug(\n \"Mark node %s unavailable\",\n self.get_mac(),\n )\n self.do_callback(SENSOR_AVAILABLE[\"id\"])", "def availability(self) -> list:\n availability = self._availability\n return availability", "def set_available_time_slot():\n if request.content_type != 'application/json':\n error = json.dumps({'error': 'Invalid Content Type'})\n return make_response(error, 400, InterviewCalendarApi.HEADERS)\n\n data = request.json\n # For Temporary purpose, stored in flat file database\n with open(InterviewCalendarApi.DB_FILE, \"a+\") as fd:\n record = \"%s|%s|%s|%s\\n\" %(data[\"Category\"], data[\"Name\"],\n data[\"Email\"], \",\".join(data[\"AvailablityDateTime\"]))\n fd.write(record)\n msg = json.dumps({\"Status\": \"Success\"})\n return make_response(msg, 200, InterviewCalendarApi.HEADERS)", "def land_line_availability(self, land_line_availability):\n\n self._land_line_availability = land_line_availability", "def meta_available(self, meta_available):\n self._meta_available = meta_available", "def set_unavailable(self):\n self[\"available\"] = False", "def update_available(self) -> pulumi.Output[bool]:\n return pulumi.get(self, \"update_available\")", "def available(self):\n return self[\"available\"]", "def availability(self):\n availability_value = self._get_col(10).strip()\n if availability_value == 'Duration':\n return ''\n else:\n return availability_value", "def available(self) -> bool:\n return self._coordinator.last_update_success", "def get_availability(self, field_name='AVAILABILITY'):\n return self.get_default(field_name)", "def available(self):\n return self._coordinator.last_update_success", "def async_mark_unavailable(self):\n self._available = False", "def available(self) -> bool:\n return pulumi.get(self, \"available\")", "def add_not_available(self, start: datetime.datetime=None, end: datetime.datetime=None):\n if not start and not end:\n return\n elif start and not end:\n end = self.conference.ends_at\n elif end and not start:\n start = self.conference.starts_at\n self.unavailable.append((start, end))", "def available(self):\n return self.coordinator.last_update_success", "def available(self):\n return self.coordinator.last_update_success", "def available(self):\n return self.coordinator.last_update_success", "def available(self):\n return self.coordinator.last_update_success", "def available(self):\n return self.coordinator.last_update_success", "def available(self):\n return self.coordinator.last_update_success" ]
[ "0.63755864", "0.6079537", "0.6079537", "0.59843475", "0.5740844", "0.5688291", "0.56257373", "0.5434451", "0.5280206", "0.52409625", "0.5186938", "0.51610863", "0.5156062", "0.51475394", "0.5111984", "0.50719017", "0.50218046", "0.4982686", "0.49308842", "0.49190474", "0.49162304", "0.4905628", "0.48930657", "0.48864335", "0.4857392", "0.4857392", "0.4857392", "0.4857392", "0.4857392", "0.4857392" ]
0.67319524
0
Sets the validity of this AdditionalInfoResponseTimestamps.
def validity(self, validity): self._validity = validity
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validity(self, validity):\n if self.local_vars_configuration.client_side_validation and validity is None: # noqa: E501\n raise ValueError(\"Invalid value for `validity`, must not be `None`\") # noqa: E501\n\n self._validity = validity", "def setValid(self):\n self.valid = True", "def date_validity(self, date_validity):\n self._date_validity = date_validity", "def SetValid(self, valid):\r\n\r\n self._valid = valid", "def SetValid(self, valid):\r\n\r\n self._valid = valid", "def update_rates_valid_data(self):\n date_cet = str(self.response_data.json()[self.strs[jpn.key_date]])\n fmt = self.strs[jpn.key_date_format]\n date_time = arrow.get(date_cet, fmt)\n self.valid_from_utc = self.__class__.stamp_time(date_time)\n self.valid_to_utc = self.__class__.stamp_valid_to(self.valid_from_utc)\n\n prinf('%s valid_from_utc', self.valid_from_utc)\n prinf('%s valid_to_utc', self.valid_to_utc)\n prinf('%s now', arrow.utcnow())", "def ValidLifeTime(self):\n if self.force_auto_sync:\n self.get('ValidLifeTime')\n return self._ValidLifeTime", "def date_validity(self):\n return self._date_validity", "def allow_version_invalid_attributes(self):\n return self._allow_version_invalid_attributes", "def is_valid(self):\n return self.startTime <= ApiKey.get_now() < self.endTime", "def test_update_invalid_dates(self):\n expiration_datetime = datetime.now(pytz.utc)\n verification_deadline = datetime(year=1915, month=5, day=7, tzinfo=pytz.utc)\n response, __ = self._get_update_response_and_expected_data(expiration_datetime, verification_deadline)\n assert response.status_code == 400\n\n # Verify the error message is correct\n actual = json.loads(response.content.decode('utf-8'))\n expected = {\n 'non_field_errors': ['Verification deadline must be after the course mode upgrade deadlines.']\n }\n assert actual == expected", "def validate_timestamps(self, format, attr='timestamp'):\n for signal in self.last_notified[DEFAULT_TERMINAL]:\n datetime.strptime(getattr(signal, attr), format)", "def is_valid(self):\n return self.is_signed and not self.is_expired", "def set_creation_info(self, creation_ts, creation_seq):\n if not (creation_ts and (creation_ts > 0) and\n creation_seq and (creation_seq > 0)):\n raise ValueError\n \n self.creation_ts = creation_ts\n self.creation_seq = creation_seq\n \n return", "def is_valid(self, is_valid):\n\n self._is_valid = is_valid", "def is_valid(self, is_valid):\n\n self._is_valid = is_valid", "def getValidityFlag(self):\n return _libsbml.SBMLLevelVersionConverter_getValidityFlag(self)", "def Validate(self):\n return _gmat_py.Attitude_Validate(self)", "def is_valid(self):\n if self.hour < 0 or self.minute < 0 or self.second < 0:\n return False\n if self.minute >= 60 or self.second >= 60:\n return False\n return True", "def try_valid(ctx, fields):\n if fields.get(\"valid\") is None:\n return\n # parse at least the YYYY-mm-ddTHH:MM\n ts = datetime.datetime.strptime(fields[\"valid\"][:16], \"%Y-%m-%dT%H:%M\")\n ctx[\"valid\"] = utc(ts.year, ts.month, ts.day, ts.hour, ts.minute)", "def validate_lifetime(self, for_policy, policy_info):\n units = policy_info['lifetime']['units']\n if units != 'seconds':\n raise CsrValidationFailure(resource=for_policy,\n key='lifetime:units',\n value=units)\n value = policy_info['lifetime']['value']\n if (value < LIFETIME_LIMITS[for_policy]['min'] or\n value > LIFETIME_LIMITS[for_policy]['max']):\n raise CsrValidationFailure(resource=for_policy,\n key='lifetime:value',\n value=value)", "def valid(self):\n return self.expiry > timezone.now()", "def valid(self):\n return (self.get(\"~#mtime\", 0) and\n self[\"~#mtime\"] == util.mtime(self[\"~filename\"]))", "def validate(self):\n if self._inc_begin is None:\n raise ValueError((\"TimeRange {self} missing begin point\")\n .format(self=self))\n if self._exc_end is None:\n raise ValueError((\"TimeRange {self} missing end point\")\n .format(self=self))", "def validation_time(self):\n return self._validation_time", "def validate(self, attrs):\n verification_deadline = attrs.get('verification_deadline', None)\n\n if verification_deadline:\n upgrade_deadline = None\n\n # Find the earliest upgrade deadline\n for mode in attrs['modes']:\n expires = mode.get(\"expiration_datetime\")\n if expires:\n # If we don't already have an upgrade_deadline value, use datetime.max so that we can actually\n # complete the comparison.\n upgrade_deadline = min(expires, upgrade_deadline or datetime.max.replace(tzinfo=pytz.utc))\n\n # In cases where upgrade_deadline is None (e.g. the verified professional mode), allow a verification\n # deadline to be set anyway.\n if upgrade_deadline is not None and verification_deadline < upgrade_deadline:\n raise serializers.ValidationError(\n 'Verification deadline must be after the course mode upgrade deadlines.')\n\n return attrs", "def timestamp(self, value):\n value = util.parse_valid_date(value)\n self._set_attr('timestamp', value)", "def is_invalid(self):\n self._is_valid = False", "def valid_days(self, valid_days):\n\n self._valid_days = valid_days", "def is_valid(self):\n\n return True" ]
[ "0.5790018", "0.5446713", "0.539757", "0.5382739", "0.5382739", "0.5377016", "0.5358312", "0.5197369", "0.50806177", "0.50763357", "0.4945376", "0.49413902", "0.49362656", "0.48737335", "0.4814998", "0.4814998", "0.48023972", "0.47987747", "0.4789244", "0.4780952", "0.47636205", "0.47594064", "0.4745335", "0.47107592", "0.46933582", "0.46771777", "0.46759373", "0.4665997", "0.46624413", "0.46523932" ]
0.6337611
0
Initialize LSH algorithm with a hashing functor, descriptor index and hash nearestneighbor index. In order to provide outofthebox neighbor querying ability, at least the ``descriptor_index`` and ``hash2uuids_kvstore`` must be provided. The UIDs of descriptors in the ``descriptor_index`` should be fully mapped by the keyvalue mapping (``hash2uuids_kvstore``). If not, not all descriptors will be accessible via the nearestneighbor query (not referenced in ``hash2uuids_kvstore`` map), or the requested number of neighbors might not be returned (descriptors hashed in ``hash_index`` disjoint from ``descriptor_index``). An ``LSHNearestNeighborIndex`` instance is effectively readonly if any of its input structures (`descriptor_index`, `hash2uuids_kvstore`, `hash_index`) are readonly.
def __init__(self, lsh_functor, descriptor_index, hash2uuids_kvstore, hash_index=None, distance_method='cosine', read_only=False): super(LSHNearestNeighborIndex, self).__init__() # TODO(paul.tunison): Add in-memory empty defaults for # descriptor_index/hash2uuids_kvstore attributes. self.lsh_functor = lsh_functor self.descriptor_index = descriptor_index self.hash_index = hash_index # Will use with int|long keys and set[collection.Hashable] values. self.hash2uuids_kvstore = hash2uuids_kvstore self.distance_method = distance_method self.read_only = read_only # Lock for model component access (combination of descriptor-set, # hash_index and kvstore). Multiprocessing because resources can be # potentially modified on other processes. self._model_lock = multiprocessing.RLock() self._distance_function = self._get_dist_func(self.distance_method)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _new_lsh_index(self):\n minhashes = {}\n lsh = MinHashLSH(self._config.threshold, self._config.num_perm)\n\n # Event generator for streaming Elasticsearch results.\n events = self._datastore.search_stream(\n query_string=self._config.query,\n query_filter={},\n indices=[self._config.index],\n return_fields=[self._config.field]\n )\n\n with lsh.insertion_session() as lsh_session:\n for event in events:\n event_id = event['_id']\n index_name = event['_index']\n event_type = event['_type']\n event_text = event['_source'][self._config.field]\n\n # Insert minhash in LSH index\n key = (event_id, event_type, index_name)\n minhash = self._minhash_from_text(event_text)\n minhashes[key] = minhash\n lsh_session.insert(key, minhash)\n\n return lsh, minhashes", "def initialize_(self):\n super(NeighborhoodHash, self).initialize_()\n\n if not self.initialized_[\"random_seed\"]:\n seed(self.random_seed)\n self.initialized_[\"random_seed\"] = True\n\n if not self.initialized_[\"R\"]:\n if type(self.R) is not int or self.R <= 0:\n raise TypeError('R must be an intger bigger than zero')\n self.initialized_[\"R\"] = True\n\n if not self.initialized_[\"nh_type\"]:\n if self.nh_type == 'simple':\n self._noc_f = False\n self._NH = lambda G: self.neighborhood_hash_simple(G)\n elif self.nh_type == 'count_sensitive':\n self._noc_f = True\n self._NH = lambda G: self.neighborhood_hash_count_sensitive(G)\n else:\n raise TypeError('unrecognised neighborhood hashing type')\n self.initialized_[\"nh_type\"] = True\n\n if not self.initialized_[\"bits\"]:\n if type(self.bits) is not int or self.bits <= 0:\n raise TypeError('illegal number of bits for hashing')\n\n self._max_number = 1 << self.bits\n self._mask = self._max_number-1\n self.initialized_[\"bits\"] = True", "def test_count_empty_hash2uid(self):\n descr_set = MemoryDescriptorIndex()\n hash_kvs = MemoryKeyValueStore()\n self.assertEqual(descr_set.count(), 0)\n self.assertEqual(hash_kvs.count(), 0)\n\n lsh = LSHNearestNeighborIndex(DummyHashFunctor(), descr_set, hash_kvs)\n self.assertEqual(lsh.count(), 0)\n\n # Additions to the descriptor-set should not impact LSH index \"size\"\n lsh.descriptor_index.add_descriptor(DescriptorMemoryElement('t', 0))\n self.assertEqual(lsh.descriptor_index.count(), 1)\n self.assertEqual(lsh.hash2uuids_kvstore.count(), 0)\n self.assertEqual(lsh.count(), 0)\n\n lsh.descriptor_index.add_descriptor(DescriptorMemoryElement('t', 1))\n self.assertEqual(lsh.descriptor_index.count(), 2)\n self.assertEqual(lsh.hash2uuids_kvstore.count(), 0)\n self.assertEqual(lsh.count(), 0)\n\n lsh.hash2uuids_kvstore.add(0, {0})\n self.assertEqual(lsh.descriptor_index.count(), 2)\n self.assertEqual(lsh.count(), 1)\n\n lsh.hash2uuids_kvstore.add(0, {0, 1})\n self.assertEqual(lsh.descriptor_index.count(), 2)\n self.assertEqual(lsh.count(), 2)\n\n lsh.hash2uuids_kvstore.add(0, {0, 1, 2})\n self.assertEqual(lsh.descriptor_index.count(), 2)\n self.assertEqual(lsh.count(), 3)", "def __init__(self,\n n_jobs=None,\n normalize=False,\n verbose=False,\n random_seed=42,\n R=3,\n nh_type='simple',\n bits=8):\n super(NeighborhoodHash, self).__init__(n_jobs=n_jobs,\n normalize=normalize,\n verbose=False)\n\n self.random_seed = random_seed\n self.R = R\n self.nh_type = nh_type\n self.bits = bits\n self.initialized_.update({\"random_seed\": False, \"R\": False, \"nh_type\": False,\n \"bits\": False})", "def lsh_search(self,query_index, num_neighbors = 10):\r\n def l1(u,v):\r\n return dt.norm(np.array(u)-np.array(v), ord=1)\r\n \r\n start_time = time.time()\r\n #print(start_time)\r\n buckets = self.get_candidates(query_index)\r\n distance1 = buckets.map(lambda p : p + (l1(p[0],query_index[0]),))\r\n distance_sort = distance1.map(lambda y : (y[3],y[1]))\r\n distance_sorted = distance_sort.sortByKey()\r\n lsh_End_time = time.time()- start_time\r\n return (distance_sorted.take(num_neighbors),lsh_End_time)\r\n raise NotImplementedError", "def _build_index(self, descriptors):\n with self._model_lock:\n if self.read_only:\n raise ReadOnlyError(\"Cannot modify container attributes due to \"\n \"being in read-only mode.\")\n\n self._log.debug(\"Clearing and adding new descriptor elements\")\n self.descriptor_index.clear()\n self.descriptor_index.add_many_descriptors(descriptors)\n\n self._log.debug(\"Generating hash codes\")\n #: :type: collections.deque[numpy.ndarray[bool]]\n hash_vectors = collections.deque()\n self.hash2uuids_kvstore.clear()\n prog_reporter = ProgressReporter(self._log.debug, 1.0).start()\n for d in self.descriptor_index:\n h_vec = self.lsh_functor.get_hash(d.vector())\n hash_vectors.append(h_vec)\n\n h_int = bit_vector_to_int_large(h_vec)\n\n # Get, update and reinsert hash UUID set object\n #: :type: set\n hash_uuid_set = self.hash2uuids_kvstore.get(h_int, set())\n hash_uuid_set.add(d.uuid())\n self.hash2uuids_kvstore.add(h_int, hash_uuid_set)\n\n prog_reporter.increment_report()\n prog_reporter.report()\n\n if self.hash_index is not None:\n self._log.debug(\"Clearing and building hash index of type %s\",\n type(self.hash_index))\n # a build is supposed to clear previous state.\n self.hash_index.build_index(hash_vectors)", "def from_config(cls, config_dict, merge_default=True):\n # Controlling merge here so we can control known comment stripping from\n # default config.\n if merge_default:\n merged = cls.get_default_config()\n merge_dict(merged, config_dict)\n else:\n merged = config_dict\n\n merged['lsh_functor'] = \\\n plugin.from_plugin_config(merged['lsh_functor'],\n get_lsh_functor_impls())\n merged['descriptor_index'] = \\\n plugin.from_plugin_config(merged['descriptor_index'],\n get_descriptor_index_impls())\n\n # Hash index may be None for a default at-query-time linear indexing\n if merged['hash_index'] and merged['hash_index']['type']:\n merged['hash_index'] = \\\n plugin.from_plugin_config(merged['hash_index'],\n get_hash_index_impls())\n else:\n cls.get_logger().debug(\"No HashIndex impl given. Passing ``None``.\")\n merged['hash_index'] = None\n\n # remove possible comment added by default generator\n if 'hash_index_comment' in merged:\n del merged['hash_index_comment']\n\n merged['hash2uuids_kvstore'] = \\\n plugin.from_plugin_config(merged['hash2uuids_kvstore'],\n get_key_value_store_impls())\n\n return super(LSHNearestNeighborIndex, cls).from_config(merged, False)", "def test_remove_from_index_shared_hashes_partial(self):\n # Simulate initial state with some descriptor hashed to one value and\n # other descriptors hashed to another.\n\n descriptors = [\n DescriptorMemoryElement('t', 0),\n DescriptorMemoryElement('t', 1),\n DescriptorMemoryElement('t', 2),\n DescriptorMemoryElement('t', 3),\n DescriptorMemoryElement('t', 4),\n ]\n # Vectors of length 1 for easy dummy hashing prediction.\n for d in descriptors:\n d.set_vector(np.ones(1, float) * d.uuid())\n\n # Dummy hash function to do the simulated thing\n hash_func = DummyHashFunctor()\n hash_func.get_hash = mock.Mock(\n # Vectors of even sum hash to 0, odd to 1.\n side_effect=lambda vec: [vec.sum() % 2]\n )\n\n d_set = MemoryDescriptorIndex()\n d_set._table = {\n 0: descriptors[0],\n 1: descriptors[1],\n 2: descriptors[2],\n 3: descriptors[3],\n 4: descriptors[4],\n }\n\n hash2uid_kvs = MemoryKeyValueStore()\n hash2uid_kvs._table = {\n 0: {0, 2, 4},\n 1: {1, 3},\n }\n\n idx = LSHNearestNeighborIndex(hash_func, d_set, hash2uid_kvs)\n idx.hash_index = mock.Mock(spec=HashIndex)\n\n idx.remove_from_index([1, 2, 3])\n # Check that only one hash vector was passed to hash_index's removal\n # method (deque of hash-code vectors).\n idx.hash_index.remove_from_index.assert_called_once_with(\n collections.deque([\n [1],\n ])\n )\n self.assertDictEqual(d_set._table, {\n 0: descriptors[0],\n 4: descriptors[4],\n })\n self.assertDictEqual(hash2uid_kvs._table, {0: {0, 4}})", "def __init__(self, nearest_neighbor_vals, gc_init, at_init, sym_correct):\n \n self.nearestNeighbors = dict(nearest_neighbor_vals)\n self.gcInit = float(gc_init)\n self.atInit = float(at_init)\n self.symCorrection = float(sym_correct)", "def build_knn_index(self, data, min_n_neighbors=MIN_N_NEIGHBORS, rho=RHO):\n # Add one extra neighbor because querying on the points that are part of the KNN index will result in\n # the neighbor set containing the queried point. This can be removed from the query result\n if self.shared_nearest_neighbors:\n k = max(1 + self.n_neighbors_snn, min_n_neighbors)\n else:\n k = max(1 + self.n_neighbors, min_n_neighbors)\n\n # KNN index based on the primary distance metric\n if self.approx_nearest_neighbors:\n params = {\n 'metric': self.metric,\n 'metric_kwds': self.metric_kwargs,\n 'n_neighbors': k,\n 'rho': rho,\n 'random_state': self.seed_rng,\n 'n_jobs': self.n_jobs,\n 'low_memory': self.low_memory\n }\n index_knn_primary = NNDescent(data, **params)\n\n self.nn_indices, self.nn_distances = remove_self_neighbors(index_knn_primary._neighbor_graph[0],\n index_knn_primary._neighbor_graph[1])\n else:\n # Exact KNN graph\n index_knn_primary = NearestNeighbors(\n n_neighbors=k,\n algorithm='brute',\n metric=self.metric,\n metric_params=self.metric_kwargs,\n n_jobs=self.n_jobs\n )\n index_knn_primary.fit(data)\n\n self.nn_indices, self.nn_distances = remove_self_neighbors(\n *self._query(data, index_knn_primary, k)\n )\n\n if self.shared_nearest_neighbors:\n # Construct a second KNN index that uses the shared nearest neighbor distance\n data_neighbors = self.nn_indices[:, 0:self.n_neighbors_snn]\n if self.approx_nearest_neighbors:\n params = {\n 'metric': distance_SNN,\n 'n_neighbors': max(1 + self.n_neighbors, min_n_neighbors),\n 'rho': rho,\n 'random_state': self.seed_rng,\n 'n_jobs': self.n_jobs,\n 'low_memory': self.low_memory\n }\n index_knn_secondary = NNDescent(data_neighbors, **params)\n\n # Save the nearest neighbor information of the data used to build the KNN index\n self.nn_indices, self.nn_distances = remove_self_neighbors(index_knn_secondary._neighbor_graph[0],\n index_knn_secondary._neighbor_graph[1])\n else:\n index_knn_secondary = NearestNeighbors(\n n_neighbors=(1 + self.n_neighbors),\n algorithm='brute',\n metric=distance_SNN,\n n_jobs=self.n_jobs\n )\n index_knn_secondary.fit(data_neighbors)\n\n # Save the nearest neighbor information of the data used to build the KNN index\n self.nn_indices, self.nn_distances = remove_self_neighbors(\n *self._query(data_neighbors, index_knn_secondary, 1 + self.n_neighbors)\n )\n\n index_knn = [index_knn_primary, index_knn_secondary]\n else:\n index_knn = [index_knn_primary]\n\n return index_knn", "def __init__(self, config, random_seed=None, hash_function=HashFunction):\n assert len(config.value_functions) == 1, 'Now we support one ValueFunction.'\n self.config = config\n self.random_seed = random_seed\n random_state = np.random.RandomState(random_seed)\n self.sketch = np.zeros(\n tuple(len(i.distribution) for i in config.index_specs),\n dtype=np.int32)\n # We create config.num_hashes * #indexes hashes. Idealy we would\n # only need one hash per index dimension, but multiple makes the\n # implementation easier. There is probably a better way that\n # allows hash bits to be consumed as we traverse the indexes.\n\n # This is a list of list of hash functions where the sublists\n # correspond to each \"hash function\" that is requested in the\n # config\n self.hash_functions = []\n for _ in range(config.num_hashes):\n self.hash_functions.append([\n hash_function(random_state.randint(sys.maxsize))\n for _ in range(len(config.index_specs))\n ])", "def __init__(self, hash_dict: Dict, distance_function: Callable) -> None:\n self.hash_dict = hash_dict # database\n self.distance_function = distance_function\n self.all_keys = list(self.hash_dict.keys())\n self.ROOT = self.all_keys[0]\n self.all_keys.remove(self.ROOT)\n self.dict_all = {self.ROOT: BkTreeNode(self.ROOT, self.hash_dict[self.ROOT])}\n self.candidates = [self.dict_all[self.ROOT].node_name] # Initial value is root\n self.construct_tree()", "def _nn(self, d, n=1):\n self._log.debug(\"generating hash for descriptor\")\n d_v = d.vector()\n d_h = self.lsh_functor.get_hash(d_v)\n\n def comp_descr_dist(d2_v):\n return self._distance_function(d_v, d2_v)\n\n with self._model_lock:\n self._log.debug(\"getting near hashes\")\n hi = self.hash_index\n if hi is None:\n # Make on-the-fly linear index\n hi = LinearHashIndex()\n # not calling ``build_index`` because we already have the int\n # hashes.\n hi.index = numpy.array(list(self.hash2uuids_kvstore.keys()))\n near_hashes, _ = hi.nn(d_h, n)\n\n self._log.debug(\"getting UUIDs of descriptors for nearby hashes\")\n neighbor_uuids = []\n for h_int in map(bit_vector_to_int_large, near_hashes):\n # If descriptor hash not in our map, we effectively skip it.\n # Get set of descriptor UUIDs for a hash code.\n #: :type: set[collections.Hashable]\n near_uuids = self.hash2uuids_kvstore.get(h_int, set())\n # Accumulate matching descriptor UUIDs to a list.\n neighbor_uuids.extend(near_uuids)\n self._log.debug(\"-- matched %d UUIDs\", len(neighbor_uuids))\n\n self._log.debug(\"getting descriptors for neighbor_uuids\")\n neighbors = \\\n list(self.descriptor_index.get_many_descriptors(neighbor_uuids))\n\n # Done with model parts at this point, so releasing lock.\n\n self._log.debug(\"ordering descriptors via distance method '%s'\",\n self.distance_method)\n self._log.debug('-- getting element vectors')\n neighbor_vectors = elements_to_matrix(neighbors,\n report_interval=1.0)\n self._log.debug('-- calculating distances')\n distances = list(map(comp_descr_dist, neighbor_vectors))\n self._log.debug('-- ordering')\n ordered = sorted(zip(neighbors, distances),\n key=lambda p: p[1])\n self._log.debug('-- slicing top n=%d', n)\n return list(zip(*(ordered[:n])))", "def __init__(self, index, previous_hash, timestamp, payload, nonce):\n\n self.index = index\n self.previous_hash = previous_hash\n self.timestamp = timestamp\n self.payload = payload\n self.nonce = nonce\n self.hash = None", "def init_hash_uuid_lut(session, hashes):\n # Note: unhexlify is necessary since the database stores\n # binary representations of the hashes\n bin_hashes = [binascii.unhexlify(ahash.encode('utf-8'))\n for ahash in hashes]\n # print(\"==> Query hashes: {}\".format(bin_hashes))\n links = session.query(LinkageEntity).filter(\n LinkageEntity.linkage_hash.in_(bin_hashes)).all()\n\n # lut = defaultdict(lambda: [])\n lut = {}\n\n for ahash in hashes:\n # instantiate every bucket even if the hash has no record in the db\n lut[ahash] = []\n\n for link in links:\n # collect every link in the corresponding bucket\n lut[link.friendly_hash()].append(link)\n\n return lut", "def test_remove_from_index_shared_hashes(self):\n # Simulate descriptors all hashing to the same hash value: 0\n hash_func = DummyHashFunctor()\n hash_func.get_hash = mock.Mock(return_value=np.asarray([0], bool))\n\n d_set = MemoryDescriptorIndex()\n hash2uids_kvs = MemoryKeyValueStore()\n idx = LSHNearestNeighborIndex(hash_func, d_set, hash2uids_kvs)\n\n # Descriptors are 1 dim, value == index.\n descriptors = [\n DescriptorMemoryElement('t', 0),\n DescriptorMemoryElement('t', 1),\n DescriptorMemoryElement('t', 2),\n DescriptorMemoryElement('t', 3),\n DescriptorMemoryElement('t', 4),\n ]\n # Vectors of length 1 for easy dummy hashing prediction.\n for d in descriptors:\n d.set_vector(np.ones(1, float) * d.uuid())\n idx.build_index(descriptors)\n # We expect the descriptor-set and kvs to look like the following now:\n self.assertDictEqual(d_set._table, {\n 0: descriptors[0],\n 1: descriptors[1],\n 2: descriptors[2],\n 3: descriptors[3],\n 4: descriptors[4],\n })\n self.assertDictEqual(hash2uids_kvs._table, {0: {0, 1, 2, 3, 4}})\n\n # Mock out hash index as if we had an implementation so we can check\n # call to its remove_from_index method.\n idx.hash_index = mock.Mock(spec=HashIndex)\n\n idx.remove_from_index([2, 4])\n\n # Only uid 2 and 4 descriptors should be gone from d-set, kvs should\n # still have the 0 key and its set value should only contain uids 0, 1\n # and 3. `hash_index.remove_from_index` should not be called because\n # no hashes should be marked for removal.\n self.assertDictEqual(d_set._table, {\n 0: descriptors[0],\n 1: descriptors[1],\n 3: descriptors[3],\n })\n self.assertDictEqual(hash2uids_kvs._table, {0: {0, 1, 3}})\n idx.hash_index.remove_from_index.assert_not_called()", "def get_num_slots(self):\n Return the load factor for this hash table.\n\n Implement this.\n \"\"\"\n return self.elements / self.capacity\n\n\n def fnv1(self, key):\n \"\"\"\n FNV-1 Hash, 64-bit\n\n Implement this, and/or DJB2.pyy\n \"\"\"\n\n # Your code here\n\n\n def djb2(self, key):\n \"\"\"\n DJB2 hash, 32-bit\n\n Implement this, and/or FNV-1.\n \"\"\"\n # Your code here\n\n\n def hash_index(self, key):\n \"\"\"\n Take an arbitrary key and return a valid integer index\n between within the storage capacity of the hash table.\n \"\"\"\n #return self.fnv1(key) % self.capacity\n<<<<<<< Updated upstream\n return self.djb2(key) % self.capacity\n=======\n return self.djb2(key) % len(self.storage)\n>>>>>>> Stashed changes\n\n def put(self, key, value):\n \"\"\"\n Store the value with the given key.\n\n Hash collisions should be handled with Linked List Chaining.\n\n Implement this.\n \"\"\"\n<<<<<<< Updated upstream\n # Your code here\n=======\n # need to account for if the key value is the same \n\n i = self.hash_index(key)\n if not self.storage[i]:\n hte = HashTableEntry(key, value)\n self.storage[i] = hte\n self.elements += 1\n hte.head = HashTableEntry(key, value)\n elif self.storage[i] and self.storage[i].key != key:\n self.storage[i].insert_at_head(HashTableEntry(key, value))\n>>>>>>> Stashed changes\n\n\n def delete(self, key):\n \"\"\"\n Remove the value stored with the given key.\n\n Print a warning if the key is not found.\n\n Implement this.\n \"\"\"\n<<<<<<< Updated upstream\n # Your code here\n=======\n i = self.hash_index(key)\n node = self.storage[i]\n prev = None\n if node.key == key:\n self.storage[i] = node.next\n return\n while node != None:\n if node.key == key:\n prev.next = node.next\n self.storage[i].next = None\n return\n prev = node\n node = node.next\n self.elements -= 1\n return\n>>>>>>> Stashed changes\n\n\n def get(self, key):\n \"\"\"\n Retrieve the value stored with the given key.\n\n Returns None if the key is not found.\n\n Implement this.\n \"\"\"\n<<<<<<< Updated upstream\n # Your code here\n=======\n # - find the index in the hash table for the key\n i = self.hash_index(key)\n # - search the list for that key\n if not self.storage[i]:\n return None\n else:\n if self.storage[i].find_key(key) == key:\n return self.storage[i].value\n>>>>>>> Stashed changes\n\n\n def resize(self, new_capacity):\n \"\"\"\n Changes the capacity of the hash table and\n rehashes all key/value pairs.\n\n Implement this.\n \"\"\"\n<<<<<<< Updated upstream\n # Your code here\n=======\n prev_storage = self.storage\n self.capacity = new_cap\n self.storage = [None] * new_cap\n for i in range(len(prev_storage)):\n prev = prev_storage[i]\n if prev:\n while prev:\n if prev.key:\n self.put(prev.key, prev.value)\n prev = prev.next\n\n>>>>>>> Stashed changes\n\n\n\nif __name__ == \"__main__\":\n ht = HashTable(8)\n\n ht.put(\"line_1\", \"'Twas brillig, and the slithy toves\")\n ht.put(\"line_2\", \"Did gyre and gimble in the wabe:\")\n ht.put(\"line_3\", \"All mimsy were the borogoves,\")\n ht.put(\"line_4\", \"And the mome raths outgrabe.\")\n ht.put(\"line_5\", '\"Beware the Jabberwock, my son!')\n ht.put(\"line_6\", \"The jaws that bite, the claws that catch!\")\n ht.put(\"line_7\", \"Beware the Jubjub bird, and shun\")\n ht.put(\"line_8\", 'The frumious Bandersnatch!\"')\n ht.put(\"line_9\", \"He took his vorpal sword in hand;\")\n ht.put(\"line_10\", \"Long time the manxome foe he sought--\")\n ht.put(\"line_11\", \"So rested he by the Tumtum tree\")\n ht.put(\"line_12\", \"And stood awhile in thought.\")\n\n print(\"\")\n\n # Test storing beyond capacity\n for i in range(1, 13):\n print(ht.get(f\"line_{i}\"))\n\n # Test resizing\n old_capacity = ht.get_num_slots()\n ht.resize(ht.capacity * 2)\n new_capacity = ht.get_num_slots()\n\n print(f\"\\nResized from {old_capacity} to {new_capacity}.\\n\")\n\n # Test if data intact after resizing\n for i in range(1, 13):\n print(ht.get(f\"line_{i}\"))\n\n print(\"\")", "def __init__(self):\n self.size = 1000\n self.hash_table = [None] * self.size", "def __init__(self):\n # Length of hash table.\n self.length = 10 # type: int\n self.hashTable = [HeadNode() for i in range(self.length)] # type: List[HeadNode]\n\n # The following variables are used for implementing iterator.\n self.iter_head_node_index = 0 # type: int\n self.iter_chain_node_index = -1 # type: int\n self.iter_value_index = -1 # type: int\n # To store values and key if there are multiple values.\n self.iter_values = [] # type:List[valueType]\n # The reason for the assignment is the same as the key assignment in ChainNode.\n self.iter_key = -1 # type: keyType", "def _update_index(self, descriptors):\n with self._model_lock:\n if self.read_only:\n raise ReadOnlyError(\"Cannot modify container attributes due \"\n \"to being in read-only mode.\")\n # tee out iterable for use in adding to index as well as hash code\n # generation.\n d_for_index, d_for_hashing = itertools.tee(descriptors, 2)\n\n self._log.debug(\"Updating descriptor index.\")\n self.descriptor_index.add_many_descriptors(d_for_index)\n\n self._log.debug(\"Generating hash codes for new descriptors\")\n prog_reporter = ProgressReporter(self._log.debug, 1.0).start()\n #: :type: collections.deque[numpy.ndarray[bool]]\n hash_vectors = collections.deque() # for updating hash_index\n for d in d_for_hashing:\n h_vec = self.lsh_functor.get_hash(d.vector())\n hash_vectors.append(h_vec)\n h_int = bit_vector_to_int_large(h_vec)\n # Get, update and reinsert hash UUID set object\n #: :type: set\n hash_uuid_set = self.hash2uuids_kvstore.get(h_int, set())\n hash_uuid_set.add(d.uuid())\n self.hash2uuids_kvstore.add(h_int, hash_uuid_set)\n prog_reporter.increment_report()\n prog_reporter.report()\n\n if self.hash_index is not None:\n self._log.debug(\"Updating hash index structure.\")\n self.hash_index.update_index(hash_vectors)", "def __init__(self):\n self.space = 1000\n self.hash_table = [Node(-1, -1)] * self.space", "def __init__(self, index_name, height, disk_mode=True, leaves_path=None, use_similarity=False):\n self.root = None\n self.index_name = index_name\n self.height = height #to review\n self.disk_mode = disk_mode\n self.leaves_path = leaves_path\n self.use_similarity=use_similarity\n self.distance_computed = 0\n self.file_accessed = 0\n self.file_created = 0", "def build_index(dataset, n_neighbors):\n# Initialize FLANN\n pyflann.set_distance_type(distance_type='euclidean')\n flann = pyflann.FLANN()\n params = flann.build_index(dataset,algorithm='kdtree',trees=4)\n #print params\n nearest_neighbors, dists = flann.nn_index(dataset, n_neighbors, checks=params['checks'])\n return nearest_neighbors, dists", "def _get_single_direction_neighbors(object_idx, ui_v_dist, ui_h_dist):\n neighbor_dict = {}\n vertical_dist = ui_v_dist[object_idx]\n horizontal_dist = ui_h_dist[object_idx]\n bottom_neighbors = np.array([\n idx for idx in range(len(vertical_dist)) if vertical_dist[idx] > 0 and\n abs(horizontal_dist[idx]) < config.NORM_HORIZONTAL_NEIGHBOR_MARGIN\n ])\n top_neighbors = np.array([\n idx for idx in range(len(vertical_dist)) if vertical_dist[idx] < 0 and\n abs(horizontal_dist[idx]) < config.NORM_HORIZONTAL_NEIGHBOR_MARGIN\n ])\n right_neighbors = np.array([\n idx for idx in range(len(horizontal_dist)) if horizontal_dist[idx] > 0 and\n abs(vertical_dist[idx]) < config.NORM_VERTICAL_NEIGHBOR_MARGIN\n ])\n left_neighbors = np.array([\n idx for idx in range(len(horizontal_dist)) if horizontal_dist[idx] < 0 and\n abs(vertical_dist[idx]) < config.NORM_VERTICAL_NEIGHBOR_MARGIN\n ])\n\n if bottom_neighbors.size:\n neighbor_dict[NeighborContextDesc.TOP] = bottom_neighbors[np.argmin(\n vertical_dist[bottom_neighbors])]\n if top_neighbors.size:\n neighbor_dict[NeighborContextDesc.BOTTOM] = top_neighbors[np.argmax(\n vertical_dist[top_neighbors])]\n if right_neighbors.size:\n neighbor_dict[NeighborContextDesc.LEFT] = right_neighbors[np.argmin(\n horizontal_dist[right_neighbors])]\n if left_neighbors.size:\n neighbor_dict[NeighborContextDesc.RIGHT] = left_neighbors[np.argmax(\n horizontal_dist[left_neighbors])]\n\n return neighbor_dict", "def init_hash_state(self) -> None:\n self.hash_states = [hashlib.sha1()]", "def __init__(self, num_words, num_hash_functions, num_buckets, embedding_size, agg_function, use_cuda):\n super(HashEmbedding, self).__init__()\n self.num_words = num_words # K\n self.num_hash_functions = num_hash_functions # k\n self.num_buckets = num_buckets # B\n self.embedding_size = embedding_size # d\n self.W = nn.Parameter(torch.FloatTensor(\n num_buckets, embedding_size)) # B x d\n self.agg_func = agg_function\n self.hash_table = torch.LongTensor(np.random.randint(0, 2**30,\n size=(num_words, num_hash_functions))) % num_buckets # K x k\n\n self.P = nn.Parameter(torch.FloatTensor(\n num_words, num_hash_functions)) # K x k\n self.hash_table = self.hash_table.cuda() if use_cuda else self.hash_table", "def __init__(self, *args):\n _snap.TIntFltHI_swiginit(self, _snap.new_TIntFltHI(*args))", "def __init__(\n self,\n unique_id: str,\n zha_device: ZHADevice,\n cluster_handlers: list[ClusterHandler],\n **kwargs,\n ) -> None:\n super().__init__(unique_id, zha_device, cluster_handlers, **kwargs)\n self._on_off_cluster_handler = self.cluster_handlers[CLUSTER_HANDLER_ON_OFF]\n self._level_cluster_handler = self.cluster_handlers[CLUSTER_HANDLER_LEVEL]\n self._position: int | None = None\n self._is_open: bool | None = None", "def __init__(self, *args):\n _snap.TIntFltVHI_swiginit(self, _snap.new_TIntFltVHI(*args))", "def _initialize_hash(self):\n\n # action\n if isinstance(self.env.action_space, gym.spaces.Discrete):\n self._hash_action = lambda x: x\n elif isinstance(self.env.action_space, gym.spaces.Box):\n if self.__class__.__name__ == \"MCTS\":\n raise Exception(\"Cannot run vanilla MCTS on continuous actions\")\n else:\n self._hash_action = lambda x: tuple(x)\n else:\n mex = \"Action space has to be Discrete or Box, instead is {}\".format(type(self.env.action_space))\n raise TypeError(mex)\n\n # observation\n if isinstance(self.env.observation_space, gym.spaces.Discrete):\n self._hash_space = lambda x: x\n elif isinstance(self.env.observation_space, gym.spaces.Box):\n self._hash_space = lambda x: tuple(x)\n else:\n mex = \"Action space has to be Discrete or Box, instead is {}\".format(type(self.env.observation_space))\n raise TypeError(mex)" ]
[ "0.6456378", "0.60631484", "0.5868888", "0.5725651", "0.5480085", "0.54548675", "0.5306687", "0.5269298", "0.5245594", "0.5240167", "0.5228141", "0.5226449", "0.52048737", "0.51841885", "0.51767963", "0.51213276", "0.5079123", "0.5058296", "0.5052293", "0.5040301", "0.50329524", "0.5032658", "0.5015626", "0.49741894", "0.49444106", "0.49389258", "0.49350932", "0.49101418", "0.49014416", "0.48874077" ]
0.8218425
0
Internal method to be implemented by subclasses to build the index with the given descriptor data elements. Subsequent calls to this method should rebuild the current index. This method shall not add to the existing index nor raise an exception to as to protect the current index.
def _build_index(self, descriptors): with self._model_lock: if self.read_only: raise ReadOnlyError("Cannot modify container attributes due to " "being in read-only mode.") self._log.debug("Clearing and adding new descriptor elements") self.descriptor_index.clear() self.descriptor_index.add_many_descriptors(descriptors) self._log.debug("Generating hash codes") #: :type: collections.deque[numpy.ndarray[bool]] hash_vectors = collections.deque() self.hash2uuids_kvstore.clear() prog_reporter = ProgressReporter(self._log.debug, 1.0).start() for d in self.descriptor_index: h_vec = self.lsh_functor.get_hash(d.vector()) hash_vectors.append(h_vec) h_int = bit_vector_to_int_large(h_vec) # Get, update and reinsert hash UUID set object #: :type: set hash_uuid_set = self.hash2uuids_kvstore.get(h_int, set()) hash_uuid_set.add(d.uuid()) self.hash2uuids_kvstore.add(h_int, hash_uuid_set) prog_reporter.increment_report() prog_reporter.report() if self.hash_index is not None: self._log.debug("Clearing and building hash index of type %s", type(self.hash_index)) # a build is supposed to clear previous state. self.hash_index.build_index(hash_vectors)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_index(self):\n self.rebuild_index()", "def build_index():\n pass", "def _update_index(self, descriptors):\n with self._model_lock:\n if self.read_only:\n raise ReadOnlyError(\"Cannot modify container attributes due \"\n \"to being in read-only mode.\")\n # tee out iterable for use in adding to index as well as hash code\n # generation.\n d_for_index, d_for_hashing = itertools.tee(descriptors, 2)\n\n self._log.debug(\"Updating descriptor index.\")\n self.descriptor_index.add_many_descriptors(d_for_index)\n\n self._log.debug(\"Generating hash codes for new descriptors\")\n prog_reporter = ProgressReporter(self._log.debug, 1.0).start()\n #: :type: collections.deque[numpy.ndarray[bool]]\n hash_vectors = collections.deque() # for updating hash_index\n for d in d_for_hashing:\n h_vec = self.lsh_functor.get_hash(d.vector())\n hash_vectors.append(h_vec)\n h_int = bit_vector_to_int_large(h_vec)\n # Get, update and reinsert hash UUID set object\n #: :type: set\n hash_uuid_set = self.hash2uuids_kvstore.get(h_int, set())\n hash_uuid_set.add(d.uuid())\n self.hash2uuids_kvstore.add(h_int, hash_uuid_set)\n prog_reporter.increment_report()\n prog_reporter.report()\n\n if self.hash_index is not None:\n self._log.debug(\"Updating hash index structure.\")\n self.hash_index.update_index(hash_vectors)", "def build(self):\n\t\tself.documents = self.get_items_to_index()\n\t\tself.build_index()", "def build_index(self):\n \n \n geoids = self.partitions.find_or_new(table='facilities_geoids')\n addresses = self.partitions.find_or_new(table='facilities_addresses')\n facilities = self.partitions.find(table='facilities')\n \n facilities.attach(addresses,'addresses')\n facilities.attach(geoids,'geoids')\n \n q = \"\"\"\n SELECT year, type, oshpd_id, facility_name, dba_city, dba_zip_code, blockgroup_gvid, tract_gvid, county_gvid\n FROM facilities\n JOIN geoids.facilities_geoids AS geoids ON geoids.facilities_id = facilities.id\n JOIN addresses.facilities_addresses AS addresses ON addresses.facilities_id = facilities.id\n \"\"\"\n \n p = self.partitions.find_or_new(table='facilities_index')\n p.clean()\n lr = self.init_log_rate()\n \n with p.inserter() as ins:\n for row in facilities.query(q):\n ins.insert(row)\n lr(str(p.identity))", "def create_index():", "def build_index(self):\r\n date_time('Building indexes in citations table')\r\n self.cursor.execute('DROP INDEX IF EXISTS IDX_citations ;')\r\n self.cursor.execute('CREATE INDEX IDX_citations ON citations (citation);')\r\n self.conn.commit()\r\n gc.collect()", "def build_index(self):\n\t\tix = self.create_index()\n\t\twriter = AsyncWriter(ix)\n\n\t\tfor i, document in enumerate(self.documents):\n\t\t\tif document:\n\t\t\t\twriter.add_document(**document)\n\t\t\tupdate_progress_bar(\"Building Index\", i, len(self.documents))\n\n\t\twriter.commit(optimize=True)", "def build_index(self):\n self.create_index()\n logger.debug(f\"Building index with {self.n_trees} trees.\")\n\n for i in range(len(self.corpus_embeddings)):\n self.index.add_item(i, self.corpus_embeddings[i])\n self.index.build(self.n_trees)", "def build_index(self):\n # Init the HNSWLIB index\n self.create_index()\n logger.info(f\"Building HNSWLIB index, max_elements: {len(self.corpus)}\")\n logger.debug(f\"Parameters Required: M: {self.M}\")\n logger.debug(f\"Parameters Required: ef_construction: {self.ef_construction}\")\n logger.debug(f\"Parameters Required: ef(>topn): {self.ef}\")\n\n # Then we train the index to find a suitable clustering\n self.index.add_items(self.corpus_embeddings, list(range(len(self.corpus_embeddings))))", "def create_index(self):\n self.send_robust(self.es_index, data=self.es_meta)\n self.set_index_normal_settings()", "def create_index(self):\r\n\r\n #### Begin functionality here\r\n\r\n return()", "def build_index(self, dict_pg_info, list_insert):\n flag_exit = True\n if flag_exit is False:\n self.create_new_index(dict_pg_info)\n self.insert_index(dict_pg_info, list_insert)", "def indices(self):\n if self._indices is None:\n i = []\n\n # TODO: this is not right for multi-column keys\n # TODO: new style indexes\n\n global_name = '^DD(%s,0,\"IX\",\"0\")' % self.fileid\n prefix = '^DD(%s,0,\"IX\",' % self.fileid\n while 1:\n global_name = M.mexec('set s0=$query(%s)' % global_name, M.INOUT(\"\"))[0]\n if not global_name or not global_name.startswith(prefix):\n break\n suffix = global_name[len(prefix):-1]\n parts = suffix.split(\",\")\n idx_name = parts[0][1:-1]\n idx_table = parts[1]\n idx_columns = parts[2:]\n index = Index(idx_name, idx_table, idx_columns)\n i.append(index)\n\n # A second list, gives indices for a field\n columns = {}\n for idx in i:\n for c in idx.columns:\n columns[c] = 1\n\n # Now trawl the listed columns in the data dictionary, and load their\n # cross references.\n cr_names = {}\n for c in columns.keys():\n idx_root = M.Globals[\"^DD\"][self.fileid][c][1]\n if not idx_root[0].exists():\n continue\n for cr_id, val in idx_root.keys_with_decendants():\n if float(cr_id) > 0:\n cr_header = idx_root[cr_id][0].value\n parts = cr_header.split(\"^\")\n if len(parts) == 2 and parts[1]: # if more than 2 parts, assume MUMPs trigger\n f = cr_names.get(parts[1], list())\n f.append(c)\n cr_names[parts[1]] = f\n\n # Now, just delete items from the index list if they are not in cr_names\n self._indices = []\n for index in i:\n cr = cr_names.get(index.name)\n if cr:\n # verify columns - lots of errors in real systems\n if len(cr) == len(index.columns):\n invalid = False\n for c in cr:\n if c not in index.columns:\n invalid = True\n continue\n if not invalid:\n self._indices.append(index)\n\n return self._indices", "def _create_indices(cls):\r\n from thunderdome.connection import _hosts, _index_all_fields, create_key_index\r\n \r\n if not _hosts: return\r\n for column in cls._columns.values():\r\n if column.index or _index_all_fields:\r\n create_key_index(column.db_field_name)", "def _initIndexes(self):\n class Record:\n \"\"\" a moron simple object for carrying the 'extra'-payload to index\n constructors\n \"\"\"\n def __init__(self, **kw):\n self.__dict__.update(kw)\n\n addIndex = self.addIndex\n addColumn = self.addColumn\n\n # Content indexes\n self._catalog.indexes.clear()\n for (index_name, index_type, extra) in self.enumerateIndexes():\n if extra is None:\n addIndex( index_name, index_type)\n else:\n if isinstance(extra, StringTypes):\n p = Record(indexed_attrs=extra)\n elif isinstance(extra, DictType):\n p = Record(**extra)\n else:\n p = Record()\n addIndex( index_name, index_type, extra=p )\n\n # Cached metadata\n self._catalog.names = ()\n self._catalog.schema.clear()\n for column_name in self.enumerateColumns():\n addColumn( column_name )", "def rebuild_index():\n print('Building indexes...')\n print(data_fldr)\n ndx = []\n for root, _, files in os.walk(data_fldr):\n for f in files:\n if f[-3:].upper() in ['CSV','TXT']:\n ndx.extend(get_index_terms(root + os.sep + f))\n with open(ndx_file, 'w') as fio:\n for i in ndx:\n fio.write(i + '\\n')", "def build_index():\n print \"building index..\"\n\n index_dir = INDEX_DIR_CODE\n if TEST_COLLECTION:\n index_dir = INDEX_DIR_TEST\n CR_DOCS_DB.drop()\n CR_DOCS_DB.ensure_index(\"code_id\", unique=True)\n if os.path.exists(index_dir):\n shutil.rmtree(index_dir)\n os.mkdir(index_dir)\n schema = get_schema()\n storage = FileStorage(index_dir)\n ix = storage.create_index(schema)\n w = ix.writer()\n print \"finding posts..\"\n posts_with_code = POSTS_DB.find({\"answers.Body\": {\"$regex\": \"/.*<code>.*/\"}}, timeout=False)\n print \"adding files..\"\n q = add_from_file(w) if TEST_COLLECTION else 0\n for i, question in enumerate(posts_with_code):\n if TEST_COLLECTION:\n q += add_one_code(w, question, q)\n if q > 999:\n break\n else:\n q += add_doc(w, question)\n if i % 1000 == 0 and not i == 0:\n print \"commit number:\", str(i/1000), \"with\", q, \"codes\"\n w.commit()\n w = ix.writer()\n\n w.commit()\n posts_with_code.close()\n print \"the index was built!\"\n return ix", "def init_index(self):\n raise NotImplementedError", "def index(self, index):\n index.column_protein[self.column].add((self.protein,self.protein_res))\n index.protein_domain[(self.protein.id,self.protein_res)] = (self.domain,self.domain_res)\n index.domain_structure[(self.domain.id,self.domain_res)].add((self.structure,self.structure_res))\n index.structure[(self.structure.index, self.structure_res)] = self", "def create_index(self, builder: Builder, group_entries: bool = True,\n _fixre: re.Pattern = re.compile(r'(.*) ([(][^()]*[)])'),\n ) -> list[tuple[str, list[tuple[str, Any]]]]:\n new: dict[str, list] = {}\n\n rel_uri: str | Literal[False]\n index_domain = self.env.domains['index']\n for docname, entries in index_domain.entries.items():\n try:\n rel_uri = builder.get_relative_uri('genindex', docname)\n except NoUri:\n rel_uri = False\n\n # new entry types must be listed in directives/other.py!\n for entry_type, value, target_id, main, category_key in entries:\n uri = rel_uri is not False and f'{rel_uri}#{target_id}'\n try:\n if entry_type == 'single':\n try:\n entry, sub_entry = _split_into(2, 'single', value)\n except ValueError:\n entry, = _split_into(1, 'single', value)\n sub_entry = ''\n _add_entry(entry, sub_entry, main,\n dic=new, link=uri, key=category_key)\n elif entry_type == 'pair':\n first, second = _split_into(2, 'pair', value)\n _add_entry(first, second, main,\n dic=new, link=uri, key=category_key)\n _add_entry(second, first, main,\n dic=new, link=uri, key=category_key)\n elif entry_type == 'triple':\n first, second, third = _split_into(3, 'triple', value)\n _add_entry(first, second + ' ' + third, main,\n dic=new, link=uri, key=category_key)\n _add_entry(second, third + ', ' + first, main,\n dic=new, link=uri, key=category_key)\n _add_entry(third, first + ' ' + second, main,\n dic=new, link=uri, key=category_key)\n elif entry_type == 'see':\n first, second = _split_into(2, 'see', value)\n _add_entry(first, _('see %s') % second, None,\n dic=new, link=False, key=category_key)\n elif entry_type == 'seealso':\n first, second = _split_into(2, 'see', value)\n _add_entry(first, _('see also %s') % second, None,\n dic=new, link=False, key=category_key)\n else:\n logger.warning(__('unknown index entry type %r'), entry_type,\n location=docname)\n except ValueError as err:\n logger.warning(str(err), location=docname)\n\n for (targets, sub_items, _category_key) in new.values():\n targets.sort(key=_key_func_0)\n for (sub_targets, _0, _sub_category_key) in sub_items.values():\n sub_targets.sort(key=_key_func_0)\n\n new_list = sorted(new.items(), key=_key_func_1)\n\n if group_entries:\n # fixup entries: transform\n # func() (in module foo)\n # func() (in module bar)\n # into\n # func()\n # (in module foo)\n # (in module bar)\n old_key = ''\n old_sub_items: dict[str, list] = {}\n i = 0\n while i < len(new_list):\n key, (targets, sub_items, category_key) = new_list[i]\n # cannot move if it has sub_items; structure gets too complex\n if not sub_items:\n m = _fixre.match(key)\n if m:\n if old_key == m.group(1):\n # prefixes match: add entry as subitem of the\n # previous entry\n old_sub_items.setdefault(\n m.group(2), [[], {}, category_key])[0].extend(targets)\n del new_list[i]\n continue\n old_key = m.group(1)\n else:\n old_key = key\n old_sub_items = sub_items\n i += 1\n\n return [(key_, list(group))\n for (key_, group) in groupby(new_list, _key_func_3)]", "def build_indexer(self, with_distances = False):\n assert self.is_fitted_\n if self.missing_action_ == \"divide\":\n raise ValueError(\"Cannot build tree indexer when using missing_action='divide'.\")\n if self.new_categ_action_ == \"weighted\" and self.categ_split_type_ != \"single_categ\":\n if self._ncols_categ or self.cols_categ_.shape[0]:\n raise ValueError(\"Cannot build tree indexer when using new_categ_action='weighted'.\")\n self._cpp_obj.build_tree_indices(self._is_extended_, bool(with_distances), _process_nthreads(self.nthreads))\n return self", "async def build_secret_index(self):\n pass", "def build_index(self):\n url = self.start_url\n\n # Search from last available date if not rebuilding and index is not empty\n if not self.rebuild > 0:\n recent_filings = self.get_most_recent_filings()\n pdt = recent_filings[0].date_filing\n # Reformat date to SEC format MM/DD/YYYY\n formatted_date = f\"{pdt:02}/{pdt:02}/{pdt.year}\"\n url = self.url_str.format(domain=self.domain_name, start=formatted_date, end=defaults['end_date'])\n\n page_counter = 0\n entries_counter = 0\n\n print(f\"{ats()} Starting index build...\" if self.rebuild else f\"{ats()} Starting index update...\")\n # Iterate through search results pages until no Next button found\n while True:\n page = self.load_page(url)\n # Scrape, parse and record into database current search results page\n entries_counter += self.scrape_page(page)\n page_counter += 1\n print(f\"{ats()} Scraped results page {page_counter}, {entries_counter} entries...\")\n # Get url of next search results page\n url = self.get_next(page)\n if url is None:\n # Exit loop if no more search results\n break\n if self.n_limit and entries_counter >= self.n_limit:\n # Exit if reached user-specified limit\n break\n\n # Do some reporting\n if self.rebuild:\n print(f'{ats()} Index built! Total {page_counter} search result pages scraped. '\n f'{entries_counter} index entries created.')\n else:\n print(f'{ats()} Index updated! Total {page_counter} search result page(s) scraped. '\n f'{entries_counter} index entries (re)added.')", "def _es_push_indexes(self, content):\n for c in self.es_clients:\n c.create_index(content)", "def __reduce__(self):\n d = {\n \"levels\": list(self.levels),\n \"codes\": list(self.codes),\n \"sortorder\": self.sortorder,\n \"names\": list(self.names),\n }\n return ibase._new_Index, (type(self), d), None", "def build_coverage_index(\n self,\n data_pack: DataPack,\n outer_type: Type[Annotation],\n inner_type: Type[EntryType]):\n if not isinstance(inner_type, (Annotation, Link, Group)):\n raise ValueError(f\"Do not support coverage index for {inner_type}.\")\n\n if not self.coverage_index_is_valid:\n self._coverage_index = dict()\n\n # prevent the index from being used during construction\n self.deactivate_coverage_index()\n\n self._coverage_index[(outer_type, inner_type)] = dict()\n for range_annotation in data_pack.get_entries_by_type(outer_type):\n if isinstance(range_annotation, Annotation):\n entries = data_pack.get(inner_type, range_annotation)\n entry_ids = {e.tid for e in entries}\n self._coverage_index[\n (outer_type, inner_type)][range_annotation.tid] = entry_ids\n\n self.activate_coverage_index()", "def index_schema_builder(table):\n conn = table.parent.parent.connection\n\n idx = OrderedDict()\n indexes = conn.execute(\"SHOW INDEXES FROM `%s`.`%s`\" % (table.parent.name, table.name))\n\n if not indexes:\n return idx\n\n for index in indexes:\n n = index['Key_name']\n if n not in idx:\n indexitem = IndexSchema(name=n, parent=table)\n indexitem.non_unique = (bool(index['Non_unique'])) # == not unique\n indexitem.table_name = index['Table']\n\n key_type = index['Index_type'].upper()\n\n if index['Key_name'].upper() == \"PRIMARY\":\n indexitem.kind = \"PRIMARY\"\n elif not indexitem.non_unique:\n indexitem.kind = \"UNIQUE\"\n elif key_type in ('FULLTEXT', 'SPATIAL'):\n indexitem.kind = key_type\n else:\n indexitem.kind = \"INDEX\"\n\n if key_type in ('BTREE', 'HASH', 'RTREE'):\n indexitem.type = key_type\n\n indexitem.collation = index['Collation']\n indexitem.comment = index['Comment']\n\n idx[n] = indexitem\n\n if index['Column_name'] not in idx[n].fields:\n idx[n].fields.insert(index['Seq_in_index'], (index['Column_name'], index['Sub_part'] or 0))\n\n return idx", "def build_single_file_index(cls, index_path, d):\n index = json.load(open(index_path))\n info_list = cls.list_from_index_path(index_path)\n\n sub_d = d\n for entry in info_list:\n if entry[0] not in sub_d:\n sub_d[entry[0]] = {}\n if entry[1] not in sub_d[entry[0]]:\n sub_d[entry[0]][entry[1]] = {}\n sub_d = sub_d[entry[0]][entry[1]]\n\n current_dir = os.path.dirname(index_path)\n rel_dirname = os.path.relpath(current_dir, paths.db_root)\n if 'files' in index:\n for name, file in list(index['files'].items()):\n sub_d[name] = os.path.join(rel_dirname, file)\n if 'info' in index:\n sub_d.update(index['info'])", "def create_indices():\n destroy_indices()\n\n ActionDocument._index.create(ignore=[400, 404])\n ClassificationDocument._index.create(ignore=[400, 404])\n FunctionDocument._index.create(ignore=[400, 404])\n PhaseDocument._index.create(ignore=[400, 404])\n RecordDocument._index.create(ignore=[400, 404])\n\n yield\n\n destroy_indices()" ]
[ "0.72547895", "0.6932546", "0.6901113", "0.65515655", "0.6488637", "0.63804775", "0.63155717", "0.6282419", "0.6277858", "0.6277478", "0.61938465", "0.6155492", "0.6088519", "0.6073517", "0.60093737", "0.6001238", "0.59837264", "0.5975098", "0.5934633", "0.592091", "0.5897822", "0.5859265", "0.568866", "0.56558865", "0.5651716", "0.5644329", "0.5602532", "0.55776924", "0.55554134", "0.5542402" ]
0.76118064
0
Internal method to be implemented by subclasses to additively update the current index with the one or more descriptor elements given. If no index exists yet, a new one should be created using the given descriptors.
def _update_index(self, descriptors): with self._model_lock: if self.read_only: raise ReadOnlyError("Cannot modify container attributes due " "to being in read-only mode.") # tee out iterable for use in adding to index as well as hash code # generation. d_for_index, d_for_hashing = itertools.tee(descriptors, 2) self._log.debug("Updating descriptor index.") self.descriptor_index.add_many_descriptors(d_for_index) self._log.debug("Generating hash codes for new descriptors") prog_reporter = ProgressReporter(self._log.debug, 1.0).start() #: :type: collections.deque[numpy.ndarray[bool]] hash_vectors = collections.deque() # for updating hash_index for d in d_for_hashing: h_vec = self.lsh_functor.get_hash(d.vector()) hash_vectors.append(h_vec) h_int = bit_vector_to_int_large(h_vec) # Get, update and reinsert hash UUID set object #: :type: set hash_uuid_set = self.hash2uuids_kvstore.get(h_int, set()) hash_uuid_set.add(d.uuid()) self.hash2uuids_kvstore.add(h_int, hash_uuid_set) prog_reporter.increment_report() prog_reporter.report() if self.hash_index is not None: self._log.debug("Updating hash index structure.") self.hash_index.update_index(hash_vectors)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _build_index(self, descriptors):\n with self._model_lock:\n if self.read_only:\n raise ReadOnlyError(\"Cannot modify container attributes due to \"\n \"being in read-only mode.\")\n\n self._log.debug(\"Clearing and adding new descriptor elements\")\n self.descriptor_index.clear()\n self.descriptor_index.add_many_descriptors(descriptors)\n\n self._log.debug(\"Generating hash codes\")\n #: :type: collections.deque[numpy.ndarray[bool]]\n hash_vectors = collections.deque()\n self.hash2uuids_kvstore.clear()\n prog_reporter = ProgressReporter(self._log.debug, 1.0).start()\n for d in self.descriptor_index:\n h_vec = self.lsh_functor.get_hash(d.vector())\n hash_vectors.append(h_vec)\n\n h_int = bit_vector_to_int_large(h_vec)\n\n # Get, update and reinsert hash UUID set object\n #: :type: set\n hash_uuid_set = self.hash2uuids_kvstore.get(h_int, set())\n hash_uuid_set.add(d.uuid())\n self.hash2uuids_kvstore.add(h_int, hash_uuid_set)\n\n prog_reporter.increment_report()\n prog_reporter.report()\n\n if self.hash_index is not None:\n self._log.debug(\"Clearing and building hash index of type %s\",\n type(self.hash_index))\n # a build is supposed to clear previous state.\n self.hash_index.build_index(hash_vectors)", "def index_object(idxs=None):", "def add_many_descriptors(self, descriptors):", "def _create_update_index(self) -> Result[Ok, Err]:\n collection_status = self.collection\n if collection_status.is_err():\n return collection_status\n collection: MongoCollection = collection_status.ok()\n\n def check_index_keys(current_keys, new_index_keys):\n current_keys.sort()\n new_index_keys.sort()\n return current_keys == new_index_keys\n\n syft_obj = self.settings.object_type\n\n unique_attrs = getattr(syft_obj, \"__attr_unique__\", [])\n object_name = syft_obj.__canonical_name__\n\n new_index_keys = [(attr, ASCENDING) for attr in unique_attrs]\n\n try:\n current_indexes = collection.index_information()\n except BaseException as e:\n return Err(str(e))\n index_name = f\"{object_name}_index_name\"\n\n current_index_keys = current_indexes.get(index_name, None)\n\n if current_index_keys is not None:\n keys_same = check_index_keys(current_index_keys[\"key\"], new_index_keys)\n if keys_same:\n return Ok()\n\n # Drop current index, since incompatible with current object\n try:\n collection.drop_index(index_or_name=index_name)\n except Exception:\n return Err(\n f\"Failed to drop index for object: {object_name} with index keys: {current_index_keys}\"\n )\n\n # If no new indexes, then skip index creation\n if len(new_index_keys) == 0:\n return Ok()\n\n try:\n collection.create_index(new_index_keys, unique=True, name=index_name)\n except Exception:\n return Err(\n f\"Failed to create index for {object_name} with index keys: {new_index_keys}\"\n )\n\n return Ok()", "def _Dynamic_UpdateIndex(self, index, void, request_id=None):\n self._RemoteSend(index, void, \"UpdateIndex\", request_id)\n return", "def _recompute_indexes(self, first_index=0, free_index=None):\n if free_index is None:\n free_index = self.index + 1\n\n # Cleanup the linkable_vars for all the pulses which will be reindexed.\n linked_vars = self.root.linkable_vars\n for var in linked_vars[:]:\n if var[0].isdigit() and int(var[0]) >= free_index:\n linked_vars.remove(var)\n\n for item in self.items[first_index:]:\n\n item.index = free_index\n prefix = '{}_'.format(free_index)\n linkable_vars = [prefix + var for var in item.linkable_vars]\n linked_vars.extend(linkable_vars)\n\n if isinstance(item, Sequence):\n item.unobserve('_last_index', self._item_last_index_updated)\n item._recompute_indexes()\n item.observe('_last_index', self._item_last_index_updated)\n free_index = item._last_index + 1\n\n # We have a non indexed item (pulse or template).\n else:\n free_index += 1\n\n self._last_index = free_index - 1", "def update_idx(self):\n self.idx = (self.F * self.FMUL +\n self.E * self.EMUL +\n self.Z * self.ZMUL +\n self.A * self.AMUL +\n self.B * self.BMUL )", "def update_index(self, ref_gen):\n testing = True\n logging.warning('Updating index')\n es_insert.index(es, ref_gen, self.index_name, testing, action=\"update\")\n logging.warning('Finished updating')", "def _add_to_index_operations(self, which, reconstrained, what, warning):\n if warning and reconstrained.size > 0:\n # TODO: figure out which parameters have changed and only print those\n print(\"WARNING: reconstraining parameters {}\".format(self.hierarchy_name() or self.name))\n index = self._raveled_index()\n which.add(what, index)\n return index", "def _add_to_index_operations(self, which, reconstrained, what, warning):\n if warning and reconstrained.size > 0:\n # TODO: figure out which parameters have changed and only print those\n print(\"WARNING: reconstraining parameters {}\".format(self.hierarchy_name() or self.name))\n index = self._raveled_index()\n which.add(what, index)\n return index", "def set_index(self, idx, rel, attrs):\n\n query = 'CREATE INDEX {} ON {} ({})'.format(idx, rel, ','.join(attrs))\n\n with self.tpch_cxn.cursor() as curs:\n try:\n curs.execute(query)\n except pg.ProgrammingError as e:\n print(e)", "def index(self, index):\n index.column_protein[self.column].add((self.protein,self.protein_res))\n index.protein_domain[(self.protein.id,self.protein_res)] = (self.domain,self.domain_res)\n index.domain_structure[(self.domain.id,self.domain_res)].add((self.structure,self.structure_res))\n index.structure[(self.structure.index, self.structure_res)] = self", "def __generate_features_index__(self, feature_names, dictionaries):\n keys = []\n for name, dictionary in zip(feature_names, dictionaries):\n features = []\n for feature in dictionary.keys():\n if dictionary.get(feature) > self._cutoff:\n features.append((name, feature))\n self.feature_freq[name] += 1\n keys.extend(features)\n for i in range(len(keys)):\n self._features_index[keys[i]] = i\n self.features_list = tuple(keys)\n self._features_vector_length = len(keys)", "def exercise_indexes():\n print(exercise_indexes.__doc__)\n print(\"The indexes of 'data' are:\", data.index)\n print(data, \"\\n\")\n print(\"Changing the indexes of 'data'\")\n print(data.reindex([2, 0, 1]), \"\\n\")\n print(\"Changing the indexes of 'data' randomly\")\n print(data.reindex(np.random.permutation(data.index)))", "def reindex(self):\n self._index = {w: i for i, w in enumerate(self._words)}\n self.n, self.d = self._vecs.shape\n assert self.n == len(self._words) == len(self._index)\n self._neighbors = None", "def init_index(self):\n raise NotImplementedError", "def priority_update(self, indices, priorities):\n for i, p in zip(indices, priorities):\n self.tree.val_update(i, p**self.alpha)", "def add_many_descriptors(self, descriptors):\n documents = []\n for d in descriptors:\n doc = self._doc_for_code_descr(d)\n doc[self.descriptor_field] = cPickle.dumps(d, self.pickle_protocol)\n doc[self.timestamp_field] = time.time()\n documents.append(doc)\n self.solr.add_many(documents)\n if self.commit_on_add:\n self.solr.commit()", "def _setitem2d(self, index, value):\n ix = index[0]\n iz = index[2]\n\n lovects = self._getlovects()\n hivects = self._gethivects()\n fields = self._getfields()\n\n if len(fields[0].shape) > self.dim:\n ncomps = fields[0].shape[-1]\n else:\n ncomps = 1\n\n if len(index) > self.dim:\n if ncomps > 1:\n ic = index[2]\n else:\n raise Exception('Too many indices given')\n else:\n ic = None\n\n nx = hivects[0,:].max() - self.nghosts\n nz = hivects[2,:].max() - self.nghosts\n\n # --- Add extra dimensions so that the input has the same number of\n # --- dimensions as array.\n if isinstance(value, np.ndarray):\n value3d = np.array(value, copy=False)\n sss = list(value3d.shape)\n if not isinstance(ix, slice): sss[0:0] = [1]\n if not isinstance(iz, slice): sss[1:1] = [1]\n value3d.shape = sss\n\n if isinstance(ix, slice):\n ixstart = max(ix.start or -self.nghosts, -self.nghosts)\n ixstop = min(ix.stop or nx + 1 + self.nghosts, nx + self.overlaps[0] + self.nghosts)\n else:\n ixstart = ix\n ixstop = ix + 1\n if isinstance(iz, slice):\n izstart = max(iz.start or -self.nghosts, -self.nghosts)\n izstop = min(iz.stop or nz + 1 + self.nghosts, nz + self.overlaps[2] + self.nghosts)\n else:\n izstart = iz\n izstop = iz + 1\n\n for i in range(len(fields)):\n\n # --- The ix1, 2 etc are relative to global indexing\n ix1 = max(ixstart, lovects[0,i])\n ix2 = min(ixstop, lovects[0,i] + fields[i].shape[0])\n iz1 = max(izstart, lovects[2,i])\n iz2 = min(izstop, lovects[2,i] + fields[i].shape[2])\n\n if ix1 < ix2 and iz1 < iz2:\n\n sss = (slice(ix1 - lovects[0,i], ix2 - lovects[0,i]),\n slice(iz1 - lovects[2,i], iz2 - lovects[2,i]))\n if ic is not None:\n sss = tuple(list(sss) + [ic])\n\n if isinstance(value, np.ndarray):\n vslice = (slice(ix1 - ixstart, ix2 - ixstart),\n slice(iz1 - izstart, iz2 - izstart))\n fields[i][sss] = value3d[vslice]\n else:\n fields[i][sss] = value", "def priority_update(self, indices, priorities):\n for i, p in zip(indices, priorities):\n self.tree.val_update(i, p ** self.alpha)", "def priority_update(self, indices, priorities):\n for i, p in zip(indices, priorities):\n self.tree.val_update(i, p ** self.alpha)", "def build_index(self):\n self.rebuild_index()", "def setSpecificIndices(\n self,\n indicesOfFreeBetas,\n indicesOfFixedBetas,\n indicesOfRandomVariables,\n indicesOfDraws,\n ):\n for e in self.children:\n e.setSpecificIndices(\n indicesOfFreeBetas,\n indicesOfFixedBetas,\n indicesOfRandomVariables,\n indicesOfDraws,\n )", "def index_update(tensor, indices, values):\n tensor[indices] = values\n return tensor", "def __checkFeatureIndex__(self, index, indexes):\n if index is not False:\n indexes.append(index)", "def __index__(self, *args, **kwargs): # real signature unknown\n pass", "def __index__(self, *args, **kwargs): # real signature unknown\n pass", "def __index__(self, *args, **kwargs): # real signature unknown\n pass", "def __index__(self, *args, **kwargs): # real signature unknown\n pass", "def __index__(self, *args, **kwargs): # real signature unknown\n pass" ]
[ "0.7290595", "0.5932061", "0.58223623", "0.5640807", "0.56363523", "0.56255984", "0.55559283", "0.5552263", "0.55315506", "0.55315506", "0.5506679", "0.54696727", "0.5453186", "0.5447808", "0.54461026", "0.54410577", "0.54367346", "0.54315275", "0.54052234", "0.54015446", "0.54015446", "0.5400177", "0.5388684", "0.53796726", "0.53629935", "0.5331688", "0.5331688", "0.5331688", "0.5331688", "0.5331688" ]
0.83484805
0
Remove descriptors from this index associated with the given UIDs.
def _remove_from_index(self, uids): with self._model_lock: if self.read_only: raise ReadOnlyError("Cannot modify container attributes due " "to being in read-only mode.") uids = list(uids) # Remove UIDs from our hash2uid-kvs # - get the hash for each input UID's descriptor, remove UID from # recorded association set. # - `get_many_descriptors` fails when bad UIDs are provided # (KeyError). self._log.debug("Removing hash2uid entries for UID's descriptors") h_vectors = collections.deque() h_ints = collections.deque() for d in self.descriptor_index.get_many_descriptors(uids): h_vec = self.lsh_functor.get_hash(d.vector()) h_vectors.append(h_vec) h_int = bit_vector_to_int_large(h_vec) h_ints.append(h_int) # If we're here, then all given UIDs mapped to an indexed # descriptor. Proceed with removal from hash2uids kvs. If a hash # no longer maps anything, remove that hash from the hash index if # we have one. hashes_for_removal = collections.deque() for uid, h_int, h_vec in zip(uids, h_ints, h_vectors): # noinspection PyUnresolvedReferences new_uid_set = self.hash2uuids_kvstore.get(h_int) - {uid} # If the resolved UID set is not empty re-add it, otherwise # remove the if new_uid_set: self.hash2uuids_kvstore.add(h_int, new_uid_set) else: hashes_for_removal.append(h_vec) self.hash2uuids_kvstore.remove(h_int) # call remove-from-index on hash-index if we have one and there are # hashes to be removed. if self.hash_index and hashes_for_removal: self.hash_index.remove_from_index(hashes_for_removal) # Remove descriptors from our set matching the given UIDs. self.descriptor_index.remove_many_descriptors(uids)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_descriptor(self, uuid):\n self.remove_many_descriptors([uuid])", "def remove_many_descriptors(self, uuids):", "def remove_descriptor(self, uuid):", "def remove_many_descriptors(self, uuids):\n # Chunk up operation based on max clauses available to us\n\n def batch_op(_batch):\n \"\"\"\n :param _batch: UIDs to remove from index.\n :type _batch: collections.Iterable[collections.Hashable]\n \"\"\"\n uuid_query = ' OR '.join([self.d_uid_field + (':%s' % str(_uid))\n for _uid in _batch])\n self.solr.delete(\"%s:%s AND (%s)\"\n % (self.index_uuid_field, self.index_uuid,\n uuid_query))\n\n batch = collections.deque()\n for uid in uuids:\n batch.append(uid)\n\n # Will end up using max_clauses-1 OR statements, and one AND\n if len(batch) == self.max_boolean_clauses:\n batch_op(batch)\n batch.clear()\n\n # tail batch\n if batch:\n batch_op(batch)", "def remove(self, ids, delete_data=False):\n self._rpc_version_warning(3)\n self._request('torrent-remove',\n {'delete-local-data':rpc_bool(delete_data)}, ids, True)", "async def remove(self, container, uids):", "def test_remove_from_index_shared_hashes(self):\n # Simulate descriptors all hashing to the same hash value: 0\n hash_func = DummyHashFunctor()\n hash_func.get_hash = mock.Mock(return_value=np.asarray([0], bool))\n\n d_set = MemoryDescriptorIndex()\n hash2uids_kvs = MemoryKeyValueStore()\n idx = LSHNearestNeighborIndex(hash_func, d_set, hash2uids_kvs)\n\n # Descriptors are 1 dim, value == index.\n descriptors = [\n DescriptorMemoryElement('t', 0),\n DescriptorMemoryElement('t', 1),\n DescriptorMemoryElement('t', 2),\n DescriptorMemoryElement('t', 3),\n DescriptorMemoryElement('t', 4),\n ]\n # Vectors of length 1 for easy dummy hashing prediction.\n for d in descriptors:\n d.set_vector(np.ones(1, float) * d.uuid())\n idx.build_index(descriptors)\n # We expect the descriptor-set and kvs to look like the following now:\n self.assertDictEqual(d_set._table, {\n 0: descriptors[0],\n 1: descriptors[1],\n 2: descriptors[2],\n 3: descriptors[3],\n 4: descriptors[4],\n })\n self.assertDictEqual(hash2uids_kvs._table, {0: {0, 1, 2, 3, 4}})\n\n # Mock out hash index as if we had an implementation so we can check\n # call to its remove_from_index method.\n idx.hash_index = mock.Mock(spec=HashIndex)\n\n idx.remove_from_index([2, 4])\n\n # Only uid 2 and 4 descriptors should be gone from d-set, kvs should\n # still have the 0 key and its set value should only contain uids 0, 1\n # and 3. `hash_index.remove_from_index` should not be called because\n # no hashes should be marked for removal.\n self.assertDictEqual(d_set._table, {\n 0: descriptors[0],\n 1: descriptors[1],\n 3: descriptors[3],\n })\n self.assertDictEqual(hash2uids_kvs._table, {0: {0, 1, 3}})\n idx.hash_index.remove_from_index.assert_not_called()", "def _remove_by_rids(self, rids):\n self._database_writeable_check()\n self.graph.client.command(\"\"\"delete vertex {}\"\"\".format(\n ','.join(rids)))", "def removeKeys(self, attributeIndex, view) -> None:\n ...", "def _RemoveUsers(self, remove_users):\n for username in remove_users:\n self.utils.RemoveUser(username)\n self.user_ssh_keys.pop(username, None)\n self.invalid_users -= set(remove_users)", "def remove_feature_accessors(obj, feats: FeaturesTuple):\n for feat in feats:\n try:\n delattr(obj, feat.get_name())\n\n except AttributeError:\n pass", "def remove_users(self, *users):\r\n pass", "def distribution_removed(self, uid):\n with self.__lock:\n for store in self._stores:\n store.remove(uid)", "def remove(self, *args):\n return _libsbml.ListOfSpeciesTypeComponentIndexes_remove(self, *args)", "def unregister(self, rtypes=None, accessors=None):\n\n if rtypes is not None:\n for rtype in rtypes:\n del self[rtype]\n\n if accessors is not None:\n for accessor in accessors:\n for rtype in accessor.__rtypes__:\n if rtype in self:\n del self[rtype]", "def clear_indexes(self):\n for keypoints in self:\n keypoints.clear_index()", "def delete_many(self, keys):\n raise NotImplementedError()", "def drop_indices(self, df) -> None:\n assert self.is_appropriate_data_instance(df)\n # no operation needed", "def remove_descriptor(self, fileno):\n self.listeners[READ].pop(fileno, None)\n self.listeners[WRITE].pop(fileno, None)\n self.secondaries[READ].pop(fileno, None)\n self.secondaries[WRITE].pop(fileno, None)", "def remove(self, *args):\n return _libsbml.ListOfDeletions_remove(self, *args)", "def remove_users(self, *users):\r\n entries = CourseAccessRole.objects.filter(\r\n user__in=users, role=self._role_name, org=self.org, course_id=self.course_key\r\n )\r\n entries.delete()\r\n for user in users:\r\n if hasattr(user, '_roles'):\r\n del user._roles", "def remove():", "def remove(self, indices):\n if isinstance(indices, collections.Iterable):\n particles = [self.particles[i] for i in indices]\n else:\n particles = self.particles[indices]\n self.particles = np.delete(self.particles, indices)\n if self.ptype.uses_jit:\n self._particle_data = np.delete(self._particle_data, indices)\n # Update C-pointer on particles\n for p, pdata in zip(self.particles, self._particle_data):\n p._cptr = pdata\n return particles", "def remove(self, indices):\n if isinstance(indices, Iterable):\n particles = [self.particles[i] for i in indices]\n else:\n particles = self.particles[indices]\n self.particles = np.delete(self.particles, indices)\n if True: # self.ptype.uses_jit:\n self._particle_data = np.delete(self._particle_data, indices)\n # Update C-pointer on particles\n for p, pdata in zip(self.particles, self._particle_data):\n p._cptr = pdata\n return particles", "def deindex(self, values=None):\n if values is None:\n values = self.proxy_get()\n for value in values:\n self.deindex_value(value)", "def remove(self, *names):\n for name in names:\n self._storage.pop(name, None)", "def removeIrisToOcc(self):\n\t\tshas = self._getShapes()\n\t\tfor sha in shas:\n\t\t\tif sha.a.iris_Occ.exists:\n\t\t\t\tsha.a.iris_Occ.delete()", "def remove_indexes(self, indexes):\n # Create a set of the rows (as int) to delete\n selected_rows = set()\n for index in indexes:\n selected_rows.add(index.row())\n\n # Delete all of them one by one (easy but maybe not the best performance-wise)\n for index, row in enumerate(sorted(selected_rows)):\n self.removeRow(row - index) # The actual target row to be removed decreases by one when a previous is removed", "def delete_at_index(self, idx):\n del self.timeseries[idx]\n del self.freq[idx]\n del self.ch_name[idx]\n del self.units[idx]\n\n if self.trigger_idx == idx:\n LGR.warning(\"Removing trigger channel - are you sure you are doing\" \"the right thing?\")\n self.trigger_idx = 0", "def handle_remove(self):\r\n self.del_common()" ]
[ "0.70615095", "0.704063", "0.6577618", "0.63564056", "0.5699009", "0.56984663", "0.5652547", "0.5646998", "0.5586256", "0.5575667", "0.55703604", "0.55475426", "0.5403445", "0.53883266", "0.5363939", "0.5342305", "0.53421175", "0.533988", "0.5328503", "0.52953845", "0.5286483", "0.528458", "0.5278575", "0.5263941", "0.52536196", "0.5249393", "0.5209864", "0.5201535", "0.5196506", "0.5185156" ]
0.83948106
0
Internal method to be implemented by subclasses to return the nearest `N` neighbors to the given descriptor element. When this internal method is called, we have already checked that there is a vector in ``d`` and our index is not empty.
def _nn(self, d, n=1): self._log.debug("generating hash for descriptor") d_v = d.vector() d_h = self.lsh_functor.get_hash(d_v) def comp_descr_dist(d2_v): return self._distance_function(d_v, d2_v) with self._model_lock: self._log.debug("getting near hashes") hi = self.hash_index if hi is None: # Make on-the-fly linear index hi = LinearHashIndex() # not calling ``build_index`` because we already have the int # hashes. hi.index = numpy.array(list(self.hash2uuids_kvstore.keys())) near_hashes, _ = hi.nn(d_h, n) self._log.debug("getting UUIDs of descriptors for nearby hashes") neighbor_uuids = [] for h_int in map(bit_vector_to_int_large, near_hashes): # If descriptor hash not in our map, we effectively skip it. # Get set of descriptor UUIDs for a hash code. #: :type: set[collections.Hashable] near_uuids = self.hash2uuids_kvstore.get(h_int, set()) # Accumulate matching descriptor UUIDs to a list. neighbor_uuids.extend(near_uuids) self._log.debug("-- matched %d UUIDs", len(neighbor_uuids)) self._log.debug("getting descriptors for neighbor_uuids") neighbors = \ list(self.descriptor_index.get_many_descriptors(neighbor_uuids)) # Done with model parts at this point, so releasing lock. self._log.debug("ordering descriptors via distance method '%s'", self.distance_method) self._log.debug('-- getting element vectors') neighbor_vectors = elements_to_matrix(neighbors, report_interval=1.0) self._log.debug('-- calculating distances') distances = list(map(comp_descr_dist, neighbor_vectors)) self._log.debug('-- ordering') ordered = sorted(zip(neighbors, distances), key=lambda p: p[1]) self._log.debug('-- slicing top n=%d', n) return list(zip(*(ordered[:n])))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_nearest_neighbour_1d(self):\n x = np.array([2., 1., 4., 5., 3.])\n x_new = np.array([-3, 0, 1.2, 3, 3, 2.5, 4.7, 6])\n val, ind = _nearest_neighbour_1d(x, x_new)\n np.testing.assert_array_equal(val, [1., 1., 1., 3., 3., 2., 5., 5.])\n np.testing.assert_array_equal(ind, [1, 1, 1, 4, 4, 0, 3, 3])", "def get_n_nearest_neighbors(self, query, n_neighbors):\n if not isinstance(n_neighbors, int) or n_neighbors < 1:\n raise ValueError('n_neighbors must be strictly positive integer')\n neighbors = vptree._AutoSortingList(max_size=n_neighbors)\n nodes_to_visit = [(self, 0)]\n furthest_d = np.inf\n while len(nodes_to_visit) > 0:\n node, d0 = nodes_to_visit.pop(0)\n if node is None or d0 > furthest_d:\n continue\n d = self.dist_fn(query, node.vp)\n if d <= furthest_d: #Replaced < with <=\n neighbors.append((d, node.vp))\n furthest_d, _ = neighbors[-1]\n if node._is_leaf():\n continue\n if node.left_min <= d <= node.left_max:\n nodes_to_visit.insert(0, (node.left, 0))\n elif node.left_min - furthest_d <= d <= node.left_max + furthest_d:\n nodes_to_visit.append((node.left,\n node.left_min - d if d < node.left_min\n else d - node.left_max))\n if node.right_min <= d <= node.right_max:\n nodes_to_visit.insert(0, (node.right, 0))\n elif node.right_min - furthest_d <= d <= node.right_max + furthest_d:\n nodes_to_visit.append((node.right,\n node.right_min - d if d < node.right_min\n else d - node.right_max))\n if len(neighbors) == 0:\n neighbors = [(np.nan, point) for point in self.points[:n_neighbors]] #Return any point(s) if query contains np.nan\n return list(neighbors)", "def n_nearest_sparse(self, query, n=1):\n if n <= 1:\n return [self.nearest_sparse(query)]\n self.best_dist = float(\"inf\")\n self.best_elements = [(None, self.best_dist)] # guardian element\n self.n = n\n self._register_best_element = self._register_best_element_multi\n self._nearest_sparse_recursive(self._sparse2seq(query), self.root, 0.0)\n return self.best_elements", "def nearest_neigh(self, atom):\n atoms = self.hutch.get_atoms_in_same_hutch(atom)[:]\n if atom in atoms: atoms.remove(atom)\n\n # This generation of nearby hutches isn't perfect but it will work\n rots = [(1,0,0),(0,1,0),(0,0,1)]\n i = 0\n while len(atoms) == 0:\n hutch = ((hutch[0]+rots[i][0])%self.hutch.nhutchs,(hutch[1]+rots[i][1])%self.hutch.nhutchs,(hutch[2]+rots[i][2])%self.hutch.nhutchs)\n i = (i+1) % 3\n atoms = self.hutch.hutchs[hutch]\n if atom in atoms: atoms.remove(atom)\n start = atoms[0]\n\n atoms = self.get_atoms_in_cutoff(atom,self.dist(atom,start))\n #if atom in atoms: atoms.remove(atom)\n d = float(\"inf\")\n for atomi in atoms:\n dt = self.dist(atom,atomi)\n if dt < d:\n d = dt\n a = atomi\n return a", "def _get_single_direction_neighbors(object_idx, ui_v_dist, ui_h_dist):\n neighbor_dict = {}\n vertical_dist = ui_v_dist[object_idx]\n horizontal_dist = ui_h_dist[object_idx]\n bottom_neighbors = np.array([\n idx for idx in range(len(vertical_dist)) if vertical_dist[idx] > 0 and\n abs(horizontal_dist[idx]) < config.NORM_HORIZONTAL_NEIGHBOR_MARGIN\n ])\n top_neighbors = np.array([\n idx for idx in range(len(vertical_dist)) if vertical_dist[idx] < 0 and\n abs(horizontal_dist[idx]) < config.NORM_HORIZONTAL_NEIGHBOR_MARGIN\n ])\n right_neighbors = np.array([\n idx for idx in range(len(horizontal_dist)) if horizontal_dist[idx] > 0 and\n abs(vertical_dist[idx]) < config.NORM_VERTICAL_NEIGHBOR_MARGIN\n ])\n left_neighbors = np.array([\n idx for idx in range(len(horizontal_dist)) if horizontal_dist[idx] < 0 and\n abs(vertical_dist[idx]) < config.NORM_VERTICAL_NEIGHBOR_MARGIN\n ])\n\n if bottom_neighbors.size:\n neighbor_dict[NeighborContextDesc.TOP] = bottom_neighbors[np.argmin(\n vertical_dist[bottom_neighbors])]\n if top_neighbors.size:\n neighbor_dict[NeighborContextDesc.BOTTOM] = top_neighbors[np.argmax(\n vertical_dist[top_neighbors])]\n if right_neighbors.size:\n neighbor_dict[NeighborContextDesc.LEFT] = right_neighbors[np.argmin(\n horizontal_dist[right_neighbors])]\n if left_neighbors.size:\n neighbor_dict[NeighborContextDesc.RIGHT] = left_neighbors[np.argmax(\n horizontal_dist[left_neighbors])]\n\n return neighbor_dict", "def _compute_euclidean_neigh_matrix(src, d_matrix, radius):\n\n n_max = 100\n n_min = 3\n reached_points = np.array([0])\n counter = 0\n n_neigh = []\n list_neigh = []\n\n while counter < reached_points.shape[0] < src.shape[0]:\n P = reached_points[counter]\n aux = np.array(sorted(\n np.where(d_matrix[P] <= radius)[0],\n key=lambda k: d_matrix[P, k]))\n n_neigh.append(aux.shape[0])\n\n # Check the number of neighbours\n if n_neigh[-1] < n_min:\n raise ValueError('Computation of neighbours aborted since '\n 'their minimum number is too small.\\n'\n 'Please choose a higher radius.')\n elif n_neigh[-1] > n_max:\n raise ValueError('Computation of neighbours aborted since'\n 'their maximum number is too big.\\n'\n 'Please choose a lower radius.')\n list_neigh.append(aux)\n reached_points = np.append(reached_points,\n aux[~np.in1d(aux, reached_points)])\n counter += 1\n\n if counter >= reached_points.shape[0]:\n raise ValueError('Too small value of the radius:'\n 'the neighbour-matrix is not connected')\n elif src.shape[0] == reached_points.shape[0]:\n while counter < src.shape[0]:\n P = reached_points[counter]\n aux = np.array(sorted(\n np.where(d_matrix[P] <= radius)[0],\n key=lambda k: d_matrix[P, k]))\n n_neigh.append(aux.shape[0])\n\n if n_neigh[-1] < n_min:\n raise ValueError('Computation of neighbours aborted since '\n 'their minimum number is too small.\\n'\n 'Please choose a higher radius.')\n elif n_neigh[-1] > n_max:\n raise ValueError('Computation of neighbours aborted since'\n 'their maximum number is too big.\\n'\n 'Please choose a lower radius.')\n\n list_neigh.append(aux)\n counter += 1\n\n n_neigh_max = max(n_neigh)\n n_matrix = np.zeros([src.shape[0],\n n_neigh_max], dtype=int) - 1\n for i in range(src.shape[0]):\n n_matrix[i, 0:list_neigh[i].shape[0]] = list_neigh[i]\n index_ord = np.argsort(n_matrix[:, 0])\n n_matrix = n_matrix[index_ord]\n return n_matrix\n else:\n raise RuntimeError(\"Some problems during\"\n \"computation of neighbours.\")", "def get_nearest(self, vector, limit):\n raise NotImplementedError", "def nearest_neighbor(data):\n features = set([i for i, x in enumerate(data[0][1])])\n return leave_one_out_cross_validation(data, features)", "def get_neighbor(x, n, data):\n pad_width = np.ceil(n / 2).astype(np.int32)\n padded = np.pad(data, pad_width, mode='edge')\n x += pad_width\n\n idxes = get_neighbor_idxes(x, n, len(padded))\n ret = padded[idxes]\n return idxes - pad_width, ret", "def nearest_neighbor(self, xRand):\n # TODO: Make this more efficient?\n #within a neighborhood of XRand, determine the lowest cost to go\n minCost = np.inf\n minNode = None\n\n for node in self.Tree:\n\n cost = self.compute_dist(node.state_time[0:6], xRand)\n\n if cost < minCost:\n minNode = node\n minCost = cost\n\n return minNode", "def nearest_neighbor(self, xRand):\n # TODO: Make this more efficient?\n #within a neighborhood of XRand, determine the lowest cost to go\n minCost = np.inf\n minNode = None\n\n for node in self.Tree:\n\n cost = self.compute_dist(node.state_time[0:6], xRand)\n\n if cost < minCost:\n minNode = node\n minCost = cost\n\n return minNode", "def nearest(self, query):\n nearest_trees = list(map(lambda t: t.get_nearest_neighbor(query), self.trees))\n distances_pool = list(zip(map(lambda x: self.dist_fn(x, query), self.pool), self.pool))\n best = None\n best_cost = np.inf\n for cost, near in nearest_trees + distances_pool:\n if cost <= best_cost:\n best = near\n best_cost = cost\n return best", "def nearest_sparse(self, query):\n self.best_dist = float(\"inf\")\n self.best_element = None\n self._register_best_element = self._register_best_element_single \n self._nearest_sparse_recursive(self._sparse2seq(query), self.root, 0.0)\n return self.best_element,self.best_dist", "def get_neighb_dist(self, i, ci):\n ri = self.xyz[i]\n j = self.conn[i][ci]\n rj = self.xyz[j].copy()\n if self.periodic:\n if self.use_pconn:\n img = self.pconn[i][ci]\n rj += np.dot(img, self.cell)\n else:\n all_rj = rj + self.images_cellvec\n all_r = all_rj - self.xyz[i]\n all_d = np.sqrt(np.add.reduce(all_r*all_r,1))\n closest = np.argsort(all_d)[0]\n return all_rj[closest]\n dr = ri-rj\n d = np.sqrt(np.sum(dr*dr))\n return d", "def get_neighbours(self, x, k):\n k = min(k, self.n)\n nearest = {}\n for i in range(k):\n nearest[i] = self.euclidean_distance(x, self.train_x[i])\n for i in range(k, self.n):\n dist = self.euclidean_distance(x, self.train_x[i])\n if dist < max(nearest.values()):\n nearest.pop(max(nearest, key=nearest.get))\n nearest[i] = dist\n return nearest", "def nearest_obstacle_distance(self, state, *args, **kwargs):\n raise NotImplementedError", "def KDSearch(current, nearest, d_star):\n #Base case: dead end.\n if current is None:\n return nearest, d_star\n #set x to location of node we are examining\n x = current.value\n #set i to the pivot of node we are examining\n i = current.pivot\n #distance from x to z\n d_x_z = la.norm(x - z)\n #check if current is closer to z than nearest\n if d_x_z < d_star:\n nearest = current\n d_star = d_x_z\n #Search to the left\n if z[i] < x[i]:\n nearest, d_star = KDSearch(current.left, nearest, d_star)\n #Search to the right if needed\n if (z[i] + d_star) >= x[i]:\n nearest, d_star = KDSearch(current.right, nearest, d_star)\n #Search to the right\n else:\n nearest, d_star = KDSearch(current.right, nearest, d_star)\n #Search to the left if needed\n if (z[i] - d_star) <= x[i]:\n nearest, d_star = KDSearch(current.left, nearest, d_star)\n return nearest, d_star", "def nearest_neighbors(self):\n neighbor_distances_and_indices = []\n for idx, data_point in enumerate(self.data):\n distance = self.euclidean_dis(data_point[:-1], self.query) # Calculate the distance between the query\n # example and the current example from the data.\n\n neighbor_distances_and_indices.append((distance, idx)) # Add the distance and the index of the example\n # to an ordered collection\n\n sorted_neighbor_distances_and_indices = sorted(neighbor_distances_and_indices, key=lambda x: x[0]) #\n # Sort the ordered collection of distances and indices from smallest to largest (in ascending order) by\n # the distances\n\n k_nearest_distances_and_indices = sorted_neighbor_distances_and_indices[:self.k] # Pick the first K\n # entries from the sorted collection\n\n k_nearest_labels = [self.data[i][1] for distance, i in k_nearest_distances_and_indices] # Get the labels of\n # the selected K entries\n\n return k_nearest_labels, self.mode(k_nearest_labels)", "def find_nearest_neighbors(p, points, k=5):\n dist = np.zeros(points.shape[0])\n for i in range(len(dist)):\n dist[i] = distance(p, points[i])\n ind = np.argsort(dist)\n return ind[0:k]", "def get_neighbor(self, c, d):\n \n if d == NORTH: return (c[0]+1, c[1])\n elif d == EAST: return (c[0], c[1]+1)\n elif d == SOUTH: return (c[0]-1, c[1])\n elif d == WEST: return (c[0], c[1]-1)\n\n raise ValueError", "def _get_next_element(cls, d, idx):\n t = np.where(d[:, 2] > 0)[0]\n t = t[t > idx]\n if len(t):\n return d[t[0], 0], d[t[0], 1], t[0]\n return None, None, None", "def get_dist_from_nearest_ndd(digraph, ndds):\n \n # Get a set of donor-patient pairs who are the target of an edge from an NDD\n ndd_targets = set()\n for ndd in ndds:\n for edge in ndd.edges:\n ndd_targets.add(edge.tgt)\n\n # Breadth-first search\n q = deque(ndd_targets)\n distances = [999999999] * len(digraph.vs)\n for v in ndd_targets:\n distances[v.id] = 1\n\n while q:\n v = q.popleft()\n for e in v.edges:\n w = e.tgt\n if distances[w.id] == 999999999:\n distances[w.id] = distances[v.id] + 1\n q.append(w)\n\n return distances", "def get_nearest_neighbors(self, kdt, radius=8):\n neighbors = kdt.query_radius(np.array([self.position[:-1]]), r = radius)\n return neighbors[0][1:]", "def find_vertex_at_nearest_distance(DISTANCES, D):\n v = int(0) # All vertex IDs are integers\n iv = int(0) # Index of the vertex v in DISTANCES\n DISTANCES = np.asarray(DISTANCES)\n min_val = (np.abs(DISTANCES - D)).min()\n vertices = np.where(DISTANCES == min_val + D)\n iv = int(np.random.random() * (len(vertices[0]) - 1))\n v = vertices[0][iv]\n return v", "def findNearest(self, i):\n skel = self.skel[i, :]\n closest = self.nbrs.kneighbors(skel, return_distance=False)\n memberships = np.zeros(len(self.uniqueVor))\n for j, c in enumerate(closest):\n c = c[0]\n nearLabel = self.vorLabels[c]\n memberships[nearLabel] += 1\n if nearLabel == 0:\n self.isCorrect[i[j]] = 0\n return memberships", "def find_nearest_neighbor(src, dst):\n return sp.spatial.KDTree(dst).query(src)", "def get_density_from_neighbours(x: float, y: float, tree: KDTree, n: int = 10):\n\n dist, _ = tree.query([[x, y]], k=n)\n\n hsml = dist.max() / 2 # By definition!!!\n\n density = np.sum(kernel(dist, hsml))\n\n return density", "def nearestneighbors(X, n, metric='euclidean'):\n nn = NearestNeighbors(n_neighbors=n,\n metric=metric,\n n_jobs=-1)\n nbrs = nn.fit(X)\n dist, _ = nbrs.kneighbors(X)\n sort_dist = np.sort(dist, axis=0)[:, 1:]\n return sort_dist", "def _get_nearest_neighbor(self, sample):\n d_min=float('inf') #minimum distance\n node_neighbor=self.start\n\n for iter in self.start:\n d=0 #distance between sample and each node in the node tree\n for j in range(sample.size):\n d+=(iter.state[j]-sample[j])**2\n if(d<d_min):\n d_min=d\n node_neighbor=iter\n\n return node_neighbor", "def KDsearch(current, target, neighbor, distance):\n \n # Base case. Return the distance and the nearest neighbor.\n if current is None:\n return neighbor, distance\n index = current.axis\n d = target - current\n if d < distance:\n distance = d\n neighbor = current\n if target < current: # Recursively search 'left'\n neighbor, distance = KDsearch(\n current.left, target, neighbor, distance)\n # Back up if needed\n if target.data[index] + distance >= current.data[index]: # (?)\n neighbor, distance = KDsearch(\n current.right, target, neighbor, distance)\n else: # Recursively search 'right'\n neighbor, distance = KDsearch(\n current.right, target, neighbor, distance)\n # Back up if needed\n if target.data[index] - distance <= current.data[index]: # (?)\n neighbor, distance = KDsearch(\n current.left, target, neighbor, distance)\n \n return neighbor, distance" ]
[ "0.636273", "0.630254", "0.60281736", "0.5943924", "0.59283555", "0.5885495", "0.5877531", "0.58613336", "0.5830915", "0.57700646", "0.57700646", "0.56760716", "0.56691253", "0.5665002", "0.56431895", "0.563362", "0.5632298", "0.5618386", "0.5602656", "0.5597303", "0.55920225", "0.5590134", "0.5584587", "0.5567192", "0.55572164", "0.55564433", "0.5554555", "0.5530114", "0.55051595", "0.5499697" ]
0.6726871
0
Level as a string.
def level_name(self) -> str: return getLevelName(self.level)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_level(self, level):\n return", "def __str__(self):\n string = ''\n\n # gets the nodes at each level and puts the values into a string\n for i in range(self.get_height()+1):\n nodes = self.get_nodes_on_level(i)\n level = [str(node.value) if node else '-' for node in nodes]\n string += '{}\\n'.format(' '.join(level))\n\n return string", "def _level_info(entity):\n if entity.is_max_level():\n return 'Maxed'\n if entity.max_level is not None:\n return '{entity.level}/{entity.max_level}'.format(entity=entity)\n return entity.level", "def get_level_name(self, levelno):\n try:\n return self.levelnames[levelno]\n except KeyError:\n return 'LogLevel=%d' % levelno", "def test_infrastructure_usage_difficulty_level_display_string(self):\n self.assertEquals(str(self.level), \"Medium (Ecorp)\")", "def getLevel(self):\n return self._level", "def getLevel( self ):\n level = self.getEffectiveLevel()\n if level == logging.CRITICAL:\n return 'critical'\n elif level == logging.ERROR:\n return 'error'\n elif level == logging.WARNING:\n return 'warning'\n elif level == logging.INFO:\n return 'info'\n elif level == logging.DEBUG:\n return 'debug'\n elif level == logging.NOTSET:\n return 'notset'\n else:\n return 'unknown ({})'.format( level )", "def level(self):\n return self._level", "def level(self):\n return self._level", "def level(self):\n return self._level", "def level(self):\n return self._level", "def level(self):\n return self.__level", "def level(self):\n return self.__level", "def __str__(self):\r\n levels = tuple(self.generate_levels())\r\n self.compute_representation_positions()\r\n levels_to_strings = self.represent_tree_levels(levels)\r\n branches = self.represent_tree_branches(levels)\r\n\r\n return \"\".join(\"\".join((level, \"\\n\\n\", branch))\r\n for (level, branch) in zip(levels_to_strings, branches))", "def level(self) -> pulumi.Input[Union[str, 'Level']]:\n return pulumi.get(self, \"level\")", "def _level(self, level):\r\n\r\n level_t = type(level)\r\n if level_t == int: return level\r\n if level == None: return level\r\n if level == \"SILENT\": return log.SILENT\r\n if hasattr(logging, \"_checkLevel\"):\r\n return logging._checkLevel(level)\r\n return logging.getLevelName(level)", "def to_string(self):\n return self.dungeon_string", "def getLevel(self):\n return self.level", "def test_infrastructure_maintenance_difficulty_level_display_string(self):\n self.assertEquals(str(self.level), \"Medium (Ecorp)\")", "def format(self, record):\n # type: (LogRecord) -> str\n try:\n return str(getattr(self, record.levelname)(record))\n except AttributeError as err:\n raise RuntimeError('Unknown record level (name: %s)' % record.levelname) from err", "def __get_formatted(message, level):\r\n if USE_COLOR and LEVELS[level] > 0:\r\n return __termcode(LEVELS[level]) + \"[\" + level + \"] \" + message + __termcode(0)\r\n else:\r\n return \"[\" + level + \"] \" + message", "def getLevel(unique_name):", "def get_tag(level: int) -> str:\n return LEVEL_TAGS[level]", "def getLevel(self, level):\n mingroup = None\n groups = self.console.storage.getGroups()\n\n for x in groups:\n\n if x.level < level:\n continue\n\n if mingroup is None:\n mingroup = x\n continue\n\n if x.level < mingroup.level:\n mingroup = x\n\n return mingroup.name", "def getLevel(self):\n return _libsbml.SBase_getLevel(self)", "def get_log_string(self, level=None):\r\n\r\n log_lines = [f\"Name:{str(self.name)} Level:{str(level)}\"]\r\n for key, item in self.log.items():\r\n log_lines.append(f\"{key}:\")\r\n if item:\r\n for entry in item:\r\n if not level or (entry[\"level\"] == level):\r\n log_lines.append(f\"\\t[{entry['level']} {entry['msg']}]\")\r\n return \"\\n\".join(log_lines)", "def display_label(self) -> str:\n return f\"{self.name} ({len(self.level_doors)} levels)\"", "def format_loglevel(str_level):\n std_levels = {\n \"debug\": logging.DEBUG,\n \"info\": logging.INFO,\n \"warning\": logging.WARNING,\n \"error\": logging.ERROR,\n \"critical\": logging.CRITICAL,\n }\n\n level = str_level.lower().strip()\n\n return std_levels.get(level)", "async def level(self, ctx):\n\n level = await self.get_player_level(ctx.author)\n await ctx.send(f\"{ctx.author.mention}, your level is {level}. Use the `-info` command to learn more!\")", "def mlevelname(level: Union[int, str]) -> str:\n level = mlevel(level)\n return logging._levelToName[level]" ]
[ "0.70281094", "0.6984572", "0.6966007", "0.6825643", "0.6811092", "0.6721429", "0.6673605", "0.66604024", "0.66604024", "0.66604024", "0.66604024", "0.6650703", "0.6650703", "0.6631484", "0.6623055", "0.658296", "0.65787655", "0.65737617", "0.65655804", "0.6522336", "0.6521385", "0.64734465", "0.647155", "0.6415208", "0.6367323", "0.6362647", "0.6353014", "0.6349528", "0.6343372", "0.63389456" ]
0.77638847
0
Initialize an instance of the game visualizer. Store the referee to use to run the game. Additionally, the initial order of players is stored.
def __init__(self, referee): super(GameVisualizerWindow, self).__init__() self.referee = referee self.darea = None # set in init_ui() self.scores = None # set in init_ui() self.player_color_order = [player.get_color() for player in referee.get_current_state().get_players()] self.init_ui()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self):\n self.screen = pg.display.get_surface()\n self.screen_rect = self.screen.get_rect()\n self.clock = pg.time.Clock()\n self.fps = 60\n self.keys = pg.key.get_pressed()\n self.done = False\n # ship = random.choice(list(prepare.GFX[\"ships\"].values()))\n ship = list(prepare.GFX[\"ships\"].values())[7] # pick first ship available\n self.player = actors.Player((0, 0), ship)\n self.level = level.Level(self.screen_rect.copy(), self.player)\n\n self.energyloss_counter = 0\n self.energygain_counter = 0", "def __init__(self, players):\n\n self._players = players\n self._game = None", "def setup_new_game(self):\r\n self._player = Player()\r\n self._stats = GameStats(self._bb_settings)\r\n self._scoreboard = Scoreboard(self._bb_settings, self._screen)", "def __init__(self, screen, win_size_x, win_size_y, player_num):\n self._player_list = list(Player(f\"Player {x}\") for x in range(player_num))\n self._player_num = player_num\n print(\"The number of players is: \", player_num)\n self._screen = screen\n self._player_turn = 0\n self._ui_player_turn = UI((1820, 10), (100, 50), f\"Player {self._player_turn}\")\n self._screen.blit(self._ui_player_turn.update(\n f\"Player {self._player_turn}\"), self._ui_player_turn._location)\n self._core_deck = Deck(\"Deck/test_deck.txt\")\n self._war_deck = []\n self._map = Map(self._player_num, win_size_x, win_size_y)\n self._clock = pygame.time.Clock()\n self._run = True\n self._fps = 30\n self.each_player_draws_hand(self._core_deck)", "def init_game(self):\n self.view.carregar_jogadores_possiveis(self._possible_players_list())\n self.view.put_view_in_main_loop()", "def __init__(self):\n self.played_pos = []\n self.grid = [['-', '-', '-'],\n ['-', '-', '-'],\n ['-', '-', '-']]\n self.player_played_pos = {'p1': set(), 'p2': set()}", "def initGameState(self):\n print(\"Setting game state: \")\n self.playGUI = GUI()\n self.playGUI.drawBoard(self.player)", "def __init__(self, ik_game):\r\n self.ik_game = ik_game\r\n self.screen = ik_game.screen\r\n self.screen_rect = self.screen.get_rect()\r\n self.settings = ik_game.configuracoes\r\n self.stats = ik_game.stats\r\n\r\n # Font para dispositive os dados\r\n self.text_color = (250, 250, 250)\r\n self.font = pygame.font.SysFont(None, 48)\r\n\r\n # Inicia o placar inicial\r\n self.prep_placar()\r\n self.prep_placar_score()\r\n self.prep_level()\r\n self.prep_naves()", "def __init__(self):\n\t\tpygame.init()\n\t\tself.settings = Settings()\n\n\t\tself.screen = pygame.display.set_mode((0, 0), pygame.FULLSCREEN)\n\t\tself.settings.screen_width = self.screen.get_rect().width \n\t\tself.settings.screen_height = self.screen.get_rect().height\n\t\tpygame.display.set_caption(\"Pigeon Drop!\")\n\n\t\t# Create an instance to store game statistics,\n\t\t# and create a scoreboard.\n\t\tself.stats = GameStats(self)\n\t\tself.sb = Scoreboard(self)\n\n\t\tself.pigeon = Pigeon(self)\n\t\tself.droppings = pygame.sprite.Group()\n\t\tself.autos = pygame.sprite.Group()\n\n\t\tself._create_fleet()\n\n\t\t# Make the Play button.\n\t\tself.play_button = Button(self, \"Play\")", "def __init__(self, players):\n\n # Instantiate a Players object with the players queue\n self._players = Players(players)\n # Instantiate the Die to be used for the current game\n self._die = Die()\n # Track the game status\n self._active_turn = True\n self._end_game = False", "def __init__(self, ghost_players=[]):\n self.players = [Player(), Player(), Player(), Player()]\n self.hist = []\n self.round = 1\n self.current_player = 0\n self.first_winner_was = -1\n self.current_dice = -1\n self.observation_pending = False\n self.current_move_pieces = []\n self.current_enemys = []\n self.current_start_attempts = 0\n self.enemys_order = {\n 0: [1, 2, 3],\n 1: [2, 3, 0],\n 2: [3, 0, 1],\n 3: [0, 1, 2]\n }\n self.game_winners = []\n self.ghost_players = ghost_players", "def __init__(self):\n\n self.frameCount = 0\n self._initScreen()\n self._initObjects()\n self._initControls()\n self._initLevel()\n self._start()\n print \"DEBUG: Initializing Game\"\n pass", "def __init__(self):\n self.__grid = create_grid(\n Settings.SIZE_X, Settings.SIZE_Y, MarkerType.NONE)\n\n self.__turn = 0\n self.__state = GameState.PLAYING\n self.__winner = MarkerType.NONE\n self.__loser = MarkerType.NONE\n\n # Separate counter for turns, because __turn depends on starting player\n self.__turns_played = 0", "def start(self):\n self.__init__()\n self.set_n_players()\n self.init_players()\n self.init_territory_selection_phase()\n self.init_troop_deployment_phase()\n # self.game_phase()", "def setup_game(self):", "def __init__(self):\n self.players = {1: [\"Player_a\", \"\\u25CF\"], 2: [\"Player_b\", \"\\u25CB\"]}\n self.current_player = 1\n self.playing_player = self.players[1]\n self.grid = [[\" \"] * 6 for x in range(7)]", "def __init__(self, num_players):\n self.num_players = num_players\n self.firework = [[], [], [], [], []]\n self.nb_blue_stone = MAX_BLUE_STONE\n self.nb_red_stone = MAX_RED_STONE\n self.draw = None\n self.hands = None\n self.fill_draw()\n random.shuffle(self.draw)\n self.discard = []\n self.draw_initial_hands()", "def __init__(self, players):\n\n self._players = players\n self._current_player = players.get()", "def game_setup(self):\n self.deck = Shoe(6)\n self.initial_draw()\n self.pot = ask_for_bet(self.player.money)\n show_table(self.player, self.dealer, self.pot)\n self.surrender_and_insurance()", "def __init__(self, players):\n\n # Define the players\n self.players = players\n\n # Define who starts the game\n self.nplayer = 1 \n\n # Define the board\n self.board = [0] * 9", "def __init__(self, players=None):\n self.game = Game()\n if players:\n self.player1 = players[0]\n self.player2 = players[1]\n else:\n self.player1 = Player('X')\n self.player2 = Player('O')\n self.record = Record()\n self.winning_moves = []", "def __init__(self):\n pygame.init()\n self.settings = Settings()\n self.screen = pygame.display.set_mode(\n (self.settings.screen_width, self.settings.screen_height)\n )\n pygame.display.set_caption(\"Sideways Shooter\")\n self.stats = GameStats(self)\n self.sideways_ship = SidewaysShip(self)\n self.bullets = pygame.sprite.Group()\n self.aliens = pygame.sprite.Group()\n self._create_fleet()", "def __init__(self, width, height, title):\n ## INIT FUNCTION ##\n super().__init__(width, height, title)\n\n ## APPENDING THE SPRTIES ##\n self.shape_list = None\n self.num_key = 0\n\n self.win = arcade.load_texture(\"Numbers/won.png\")\n self.lost = arcade.load_texture(\"Numbers/lost.png\")\n\n # Define variables to check for completeness and accuracy\n self.done = False\n self.correct = False\n self.incorrect = False\n\n self.current_selected = None\n\n # If continuing saved game, convert strings from saved game file to lists and set equal to self.grid and self.fixed_answer\n if new == False:\n self.fixed_answer = Cameron.str_to_list(answer)\n self.grid = Cameron.str_to_list(progress)\n # If starting new game, generate unique board and save solution to text file\n elif new == True:\n self.board = SuDoku(SIZE, (DIV_ROW, DIV_COL), difficulty)\n self.answer = self.board.get_solution()\n self.grid = self.board.get_puzzle()\n self.fixed_answer = self.answer\n\n ## GENERATES BACKGROUND ##\n arcade.set_background_color(arcade.color.BLACK)\n self.recreate_grid()", "def __init__(self, playerColors : Dict[str, str]):\n initialGameState = {\n \"counter\" : {\"Team1\" : 0, \"Team2\" : 0},\n \"lastChanged\" : None,\n \"wonRounds\" : {\"Team1\" : 0, \"Team2\" : 0},\n \"wonGames\" : {\"Team1\" : 0, \"Team2\" : 0},\n \"currentMaxPoints\" : self.maxPointsWithoutOvertime,\n \"sidesChanged\" : False,\n \"playerPositions\" : {\"Team1\" : {\"Player1\" : 1, \"Player2\": 2}, \"Team2\" : {\"Player1\" : 3, \"Player2\": 4}},\n \"servePosition\" : 0,\n \"playerColors\" : playerColors,\n \"undoStack\" : [],\n \"redoStack\" : [],\n \"observers\" : []}\n self.setGameState(initialGameState)", "def run_visualizer(num_players):\n\n num_players = parse_num_players(num_players)\n if num_players is None:\n raise ValueError(\"Invalid player count given.\")\n\n players = get_player_list(num_players)\n ref = Referee(players, (5, 5), timeout=600)\n\n win = GameVisualizerWindow(ref)\n win.show_all()\n Gtk.main()", "def __init__(self):\n super().__init__(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)\n self.player_count: int = None\n self.player_hand_0: arcade.SpriteList = None\n self.player_hand_1: arcade.SpriteList = None\n self.deck: arcade.SpriteList = None\n self.pile: arcade.SpriteList = None", "def __init__(self):\n self.game_screen = pygame.display.set_mode((GameData.screen_dim, GameData.screen_dim))\n self.game_screen.fill(GameData.background_color)\n self.player = 1\n self.game_over = False\n self.board = np.zeros((GameData.rows, GameData.columns))", "def init_new_game(self):\n self.game = get_new_game(self.game_config)", "def start(self):\n self.player = Player()\n self.dealer = Dealer()\n self.pot = 0\n self.side_bet = 0\n start_game()", "def __init__(self):\n self.games = {} # Dict from gameIDs to game objects. Initially empty.\n self.players = {} # Dict from playerID to player name\n self._version = __version__ # Used in version check during un-pickling\n\n # Log initialization\n TournamentSystem._logger.debug(\"Initialized\")" ]
[ "0.679442", "0.6789897", "0.66815865", "0.66547024", "0.66024655", "0.6598829", "0.65734255", "0.6536193", "0.65354216", "0.65309167", "0.65183204", "0.651029", "0.6440358", "0.64252824", "0.6383633", "0.63720644", "0.635871", "0.6323436", "0.630629", "0.6305288", "0.62966394", "0.62920743", "0.6284824", "0.6282714", "0.6276989", "0.627033", "0.62576526", "0.62434095", "0.623849", "0.6228678" ]
0.7888223
0
Initialize and configure the user interface. This involves setting up the drawing area, the button to do turns, and a text view to display scores, as well as setting up callbacks. Configurations such as widget sizes and labels are also set here.
def init_ui(self): self.set_title(TITLE) self.set_default_size(WINDOW_WIDTH, WINDOW_HEIGHT) self.set_resizable(False) fixed = Gtk.Fixed() self.add(fixed) darea = Gtk.DrawingArea() darea.connect(DRAW_EVENT, self.on_draw) darea.set_size_request(DRAWING_AREA_WIDTH, DRAWING_AREA_HEIGHT) self.darea = darea fixed.put(darea, DRAWING_AREA_X, DRAWING_AREA_Y) button = Gtk.Button.new_with_label(RUN_NEXT_TURN_MSG) button.connect(CLICKED_EVENT, self.on_next_turn_click) button.set_size_request(BUTTON_SIZE, BUTTON_SIZE) fixed.put(button, BUTTON_X, BUTTON_Y) scores = Gtk.TextView() scores.get_buffer().set_text(self.get_current_scores_buffer()) scores.set_size_request(TEXT_VIEW_WIDTH, TEXT_VIEW_HEIGHT) self.scores = scores fixed.put(scores, TEXT_VIEW_X, TEXT_VIEW_Y) self.connect(DESTROY_EVENT, Gtk.main_quit)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_ui(self):\n self.master.title(\"Backbone\")\n self.master.geometry(\"300x150\")\n\n self.pack(fill=BOTH, expand=1)\n\n self.btn_upload_file = Button(self, text=\"Upload file\", command=self.upload_file)\n self.btn_upload_file.place(x=90, y=10)\n\n self.btn_create_training_file = Button(self, text=\"Create & upload training file\",\n command=self.create_training_file)\n self.btn_create_training_file.place(x=30, y=40)\n\n self.btn_run_algorithm = Button(self, text=\"Run algorithm\", command=self.run_algorithm)\n self.btn_run_algorithm.place(x=80, y=70)\n\n self.btn_view_results = Button(self, text=\"View Results\", command=self.view_results)\n self.btn_view_results.place(x=85, y=100)", "def initializeUI(self):\n self.setStyleSheet(abstyle)\n self.setGeometry(140, 100, 860, 484)\n self.setWindowTitle('Emotions Data View')\n self.setupModelView()", "def initUI(self):\n startbtn = QPushButton(\"Start Recroding\", self)\n startbtn.move(30, 50)\n\n stopbtn = QPushButton(\"Stop Recording\", self)\n stopbtn.move(150, 50)\n\n initbtn = QPushButton(\"Initilize\", self)\n initbtn.move(30, 100)\n\n plotbtn = QPushButton(\"Plot\", self)\n plotbtn.move(150, 100)\n\n startbtn.clicked.connect(self.start_recording)\n stopbtn.clicked.connect(self.stop_recording)\n initbtn.clicked.connect(self.init_recording)\n plotbtn.clicked.connect(self.plot_signals)\n\n self.statusBar()\n self.statusBar().showMessage('Click Init')\n\n self.setGeometry(300, 300, 290, 150)\n self.setWindowTitle('Recorder 1.0')\n self.setWindowIcon(QIcon(\"./Static/Images/icon.jpg\"))\n self.show()", "def initializeUI(self):\n self.setGeometry(100, 100, 300, 200)\n self.setWindowTitle('Event Handling Example')\n\n self.show()", "def configure_widgets(self):\r\n\r\n # 'command' - callback function executed when button is pressed\r\n # since we can't pass it a function with arguments, we use the partial \r\n # function from the functools module\r\n self.btn_tl['command'] = partial(self.play, \"x\", (0,0))\r\n self.btn_tm['command'] = partial(self.play, \"x\", (0,1))\r\n self.btn_tr['command'] = partial(self.play, \"x\", (0,2))\r\n self.btn_ml['command'] = partial(self.play, \"x\", (1,0))\r\n self.btn_mm['command'] = partial(self.play, \"x\", (1,1))\r\n self.btn_mr['command'] = partial(self.play, \"x\", (1,2))\r\n self.btn_bl['command'] = partial(self.play, \"x\", (2,0))\r\n self.btn_bm['command'] = partial(self.play, \"x\", (2,1))\r\n self.btn_br['command'] = partial(self.play, \"x\", (2,2))\r\n\r\n self.btn_reset['text'] = \"Reset\"\r\n self.btn_reset['command'] = self.reset", "def setUI(self):\n self.parent.title(\"Handwritten digits classification\")\n self.pack(fill=BOTH, expand=1)\n self.columnconfigure(6,weight=1)\n self.rowconfigure(2, weight=1)\n self.canv = Canvas(self, bg=\"white\")\n self.canv.grid(row=2, column=0, columnspan=7,\n padx=5, pady=5,\n sticky=E + W + S + N)\n self.canv.bind(\"<B1-Motion>\",\n self.draw)\n\t\t\t\n\t\t\t\n #size_lab = Label(self, text=\"Classificator: \")\n #size_lab.grid(row=0, column=0, padx=5)\n predict_btn = Button(self, text=\"Predict\", width=10, command=lambda: self.predict())\n predict_btn.grid(row=0, column=0)\n delete_btn = Button(self, text=\"Clear\", width=10, command=lambda: self.canv.delete(\"all\"))\n delete_btn.grid(row=1, column=0, sticky=W)", "def initGUI(self):\r\n\r\n self.pack(fill=tk.BOTH, expand=True)\r\n\r\n # Figure out sizing.\r\n width = 200\r\n height = 200\r\n pad = 5\r\n fontWidth = 8\r\n bigWidth = int((width*3 + pad*6) / fontWidth)\r\n \r\n # Create option frames.\r\n self.frameOptions = tk.LabelFrame(self, text=\"Options:\",\r\n width=width, height=height)\r\n self.frameSegment = tk.LabelFrame(self, text=\"Segmentation Method:\",\r\n width=width, height=height)\r\n self.frameMeasure = tk.LabelFrame(self, text=\"Measurements:\",\r\n width=width, height=height)\r\n\r\n # Create text boxes and labels.\r\n self.labelStatus = tk.LabelFrame(self, text=\"Status:\", bd=0)\r\n self.labelResults = tk.LabelFrame(self, text=\"Results:\", bd=0)\r\n self.textStatus = ScrolledText(self.labelStatus, height=5,\r\n width=bigWidth)\r\n self.textResults = ScrolledText(self.labelResults, height=10,\r\n width=bigWidth)\r\n\r\n # Create buttons.\r\n self.buttonCalculate = tk.Button(self, text='Calculate',\r\n width=20, height=1, font=12, bd=3,\r\n command=lambda:self.prepare())\r\n self.buttonSaveAll = tk.Button(self, text='Save Session Summary',\r\n command=self.saveAll)\r\n self.buttonSelectOutFold = tk.Button(self, text='Set Output Folder',\r\n command=self.setOutputFolder)\r\n self.buttonAbout = tk.Button(self, text='About', command=self.about)\r\n\r\n # Arrange toplevel widgets.\r\n self.frameOptions.grid(row=0, column=2, padx=pad, pady=pad,\r\n sticky='NESW')\r\n self.frameSegment.grid(row=0, column=1, padx=pad, pady=pad,\r\n sticky='NESW')\r\n self.frameMeasure.grid(row=0, column=0, padx=pad, pady=pad,\r\n sticky='NESW')\r\n\r\n self.buttonCalculate.grid(row=1, column=1, \r\n padx=pad, pady=pad*3)\r\n self.buttonSelectOutFold.grid(row=1, column=0, \r\n padx=pad, pady=pad*3)\r\n self.buttonAbout.grid(row=6, column=2, sticky='e', padx=20, pady=10)\r\n\r\n self.labelStatus.grid(row=2, column=0, columnspan=3, sticky='w',\r\n padx=pad, pady=pad)\r\n self.textStatus.grid(row=3, column=0, columnspan=3)\r\n self.labelResults.grid(row=4, column=0, columnspan=3, sticky='w',\r\n padx=pad, pady=pad)\r\n self.textResults.grid(row=5, column=0, columnspan=3)\r\n self.buttonSaveAll.grid(row=6, column=1, padx=pad, pady=pad)\r\n\r\n # Variables\r\n self.outFold = None\r\n columns = [[\"\",\"\",\"\",\"\",\r\n \"Bright phase diameter\",\r\n \"\",\"\",\r\n \"Dark phase diameter\",\r\n \"\",\"\",\r\n \"Bright length\",\r\n \"\",\"\",\r\n \"Dark length\",\r\n \"\",\"\",\r\n \"Bright area\",\r\n \"\",\"\",\r\n \"Dark area\",\r\n \"\",\"\",\r\n \"Bright connected length\",\r\n \"\",\"\",\r\n \"Dark connected length\",\r\n \"\",\"\"], \r\n [\"image\",\r\n \"pixel size\",\r\n \"area frac\",\r\n \"est diam\",\r\n \"Average\",\r\n \"SD\",\r\n \"Measured\",\r\n \"Average\",\r\n \"SD\",\r\n \"Measured\",\r\n \"Average\",\r\n \"SD\",\r\n \"Measured\",\r\n \"Average\",\r\n \"SD\",\r\n \"Measured\",\r\n \"Average\",\r\n \"SD\",\r\n \"Measured\",\r\n \"Average\",\r\n \"SD\",\r\n \"Measured\",\r\n \"Average\",\r\n \"SD\",\r\n \"Measured\",\r\n \"Average\",\r\n \"SD\",\r\n \"Measured\"]]\r\n \r\n self.saveAll = np.array(columns)\r\n\r\n # Measurement options.\r\n # Variables.\r\n self.varDiameter = tk.BooleanVar()\r\n self.varLength = tk.BooleanVar()\r\n self.varArea = tk.BooleanVar()\r\n self.varSumConnectedLength = tk.BooleanVar()\r\n self.varAreaFraction = tk.BooleanVar()\r\n # Create widgets.\r\n self.checkDiameter = tk.Checkbutton(self.frameMeasure,\r\n text=\"Diameter\", variable=self.varDiameter)\r\n self.checkLength = tk.Checkbutton(self.frameMeasure,\r\n text=\"Length\", variable=self.varLength)\r\n self.checkArea = tk.Checkbutton(self.frameMeasure,\r\n text=\"Area\", variable=self.varArea)\r\n self.checkSumConnectedLength = tk.Checkbutton(self.frameMeasure,\r\n text=\"Connected length\", variable=self.varSumConnectedLength)\r\n self.checkAreaFraction = tk.Checkbutton(self.frameMeasure,\r\n text=\"Area fraction\", variable=self.varAreaFraction)\r\n # Pack widgets.\r\n self.checkDiameter.grid(row=0, column=0, sticky='w')\r\n self.checkLength.grid(row=1, column=0, sticky='w')\r\n self.checkArea.grid(row=2, column=0, sticky='w')\r\n self.checkSumConnectedLength.grid(row=3, column=0, sticky='w')\r\n self.checkAreaFraction.grid(row=4, column=0, sticky='w')\r\n # Check appropriate boxes.\r\n self.checkDiameter.select()\r\n self.checkLength.select()\r\n self.checkArea.select()\r\n self.checkSumConnectedLength.select()\r\n self.checkAreaFraction.select()\r\n \r\n # Segment options.\r\n # Variables.\r\n self.varSegment = tk.StringVar()\r\n # Create widgets.\r\n self.radAccurate = tk.Radiobutton(self.frameSegment,\r\n text=\"Accurate\", variable=self.varSegment, value=\"accurate\",\r\n command=self.updateOptions)\r\n self.radFast = tk.Radiobutton(self.frameSegment,\r\n text=\"Fast\", variable=self.varSegment, value=\"fast\",\r\n command=self.updateOptions)\r\n self.radManual= tk.Radiobutton(self.frameSegment,\r\n text=\"Manual\", variable=self.varSegment, value=\"manual\",\r\n command=self.updateOptions)\r\n self.radFromBinary = tk.Radiobutton(self.frameSegment,\r\n text=\"From binary\", variable=self.varSegment, value=\"binary\",\r\n command=self.updateOptions)\r\n # Pack widgets.\r\n self.radAccurate.grid(row=0, column=0, sticky='w')\r\n self.radFast.grid(row=1, column=0, sticky='w')\r\n self.radManual.grid(row=2, column=0, sticky='w')\r\n self.radFromBinary.grid(row=3, column=0, sticky='w')\r\n # Check appropriate boxes.\r\n self.radAccurate.select()\r\n\r\n # Option options.\r\n # Profiles\r\n profiles = autoSelect.profiles()\r\n # Variables.\r\n self.varShowSteps = tk.BooleanVar()\r\n self.varOutputExcel = tk.BooleanVar()\r\n self.varSavePDF = tk.BooleanVar()\r\n self.varSaveMovie = tk.BooleanVar()\r\n self.varSaveBinary = tk.BooleanVar()\r\n self.varAutoParse = tk.BooleanVar()\r\n self.varProfile = tk.StringVar()\r\n self.varProfile.set(profiles[0])\r\n # Create widgets.\r\n self.checkShowSteps = tk.Checkbutton(self.frameOptions,\r\n text=\"Show steps\", variable=self.varShowSteps)\r\n self.checkOutputExcel = tk.Checkbutton(self.frameOptions,\r\n text=\"Output to Excel\", variable=self.varOutputExcel)\r\n self.checkSavePDF = tk.Checkbutton(self.frameOptions,\r\n text=\"Save PDF\", variable=self.varSavePDF)\r\n self.checkSaveMovie = tk.Checkbutton(self.frameOptions,\r\n text=\"Save movie\", variable=self.varSaveMovie)\r\n self.checkSaveBinary = tk.Checkbutton(self.frameOptions,\r\n text=\"Save binary\", variable=self.varSaveBinary)\r\n self.checkAutoParse = tk.Checkbutton(self.frameOptions,\r\n text=\"Auto parse raw image\", variable=self.varAutoParse,\r\n command=self.updateAuto)\r\n self.optionProfile = tk.OptionMenu(self.frameOptions, self.varProfile,\r\n *profiles)\r\n self.optionProfile.config(state=tk.DISABLED)\r\n\r\n # Pack widgets.\r\n self.checkShowSteps.grid(row=0, column=0, sticky='w')\r\n self.checkOutputExcel.grid(row=1, column=0, sticky='w')\r\n self.checkSavePDF.grid(row=2, column=0, sticky='w')\r\n #self.checkSaveMovie.grid(row=3, column=0, sticky='w')\r\n self.checkSaveBinary.grid(row=4, column=0, sticky='w')\r\n self.checkAutoParse.grid(row=5, column=0, sticky='w')\r\n self.optionProfile.grid(row=6, column=0, sticky='w', padx=15)\r\n \r\n # Check appropriate boxes.\r\n self.checkOutputExcel.select()\r\n\r\n self.createToolTips()", "def _setup_ui(self):\n\n self.window = ui.Widget()\n self.window.dimensions = ui.normalize_dimension((\n 0, 0,\n self.normalized_screen_resolution[0],\n self.normalized_screen_resolution[1]\n ))\n self.window.background_color = ImageColor.getcolor('#000000', 'RGB')\n\n interface_frame = ui.Widget(parent=self.window)\n interface_frame.dimensions = ui.normalize_dimension((\n self.preview_renderer.window[2],\n 0,\n self.normalized_screen_resolution[0] - self.preview_renderer.window[2],\n self.normalized_screen_resolution[1]\n ))\n interface_frame.background_color = ImageColor.getcolor('#ffffff', 'RGB')\n\n number = ui.LabelWidget(\"\",\n name=NAME_GET_STARTED,\n parent=interface_frame,\n align=\"center\",\n font_color=(0, 0, 0, 255))\n number.dimensions = (\n 5, 5,\n interface_frame.width - 10,\n interface_frame.height - 10\n )", "def initializeUI(self):\n self.setGeometry(100, 100, 450, 300)\n self.setWindowTitle('Model and View Example')\n\n self.setupModelView()\n\n self.show()", "def _init_ui(self):\n self.setWindowTitle(\"HB Havens: resultaten\")\n self.setWindowFlags(self.windowFlags() & ~QtCore.Qt.WindowContextHelpButtonHint)\n\n self.setLayout(QtWidgets.QVBoxLayout())\n\n # Create figure\n self.figure = Figure(figsize=(4,4))\n self.ax = self.figure.add_subplot()\n\n self.ax.grid()\n self.ax.spines['right'].set_visible(False)\n self.ax.spines['top'].set_visible(False)\n self.ax.tick_params(axis='y', color='0.75')\n self.ax.tick_params(axis='x', color='0.75')\n self.ax.set_aspect(1)\n\n # Add canvas\n self.canvas = FigureCanvasQTAgg(self.figure)\n\n # this is the Navigation widget\n # it takes the Canvas widget and a parent\n self.layout().addWidget(self.canvas)\n\n # Add location selection\n hbox = QtWidgets.QHBoxLayout()\n label = QtWidgets.QLabel('Locatie:')\n label.setFixedWidth(80)\n hbox.addWidget(label)\n self.location_combobox = QtWidgets.QComboBox()\n self.location_combobox.addItems(self.result_locations)\n self.location_combobox.setCurrentIndex(self.locid)\n self.location_combobox.currentIndexChanged.connect(self._set_location)\n hbox.addWidget(self.location_combobox)\n self.layout().addLayout(hbox)\n\n # Add parameter selection\n hbox = QtWidgets.QHBoxLayout()\n label = QtWidgets.QLabel('Parameter:')\n label.setFixedWidth(80)\n hbox.addWidget(label)\n self.parameter_combobox = QtWidgets.QComboBox()\n self.input_parameters = self.modelunctab.mainmodel.hydraulic_loads.result_columns[:]\n self.parameter_combobox.addItems(self.input_parameters)\n self.parameter_combobox.currentIndexChanged.connect(self._set_parameter)\n self.parameter_combobox.setCurrentIndex(0)\n self._set_parameter()\n self.figure.tight_layout()\n hbox.addWidget(self.parameter_combobox)\n self.layout().addLayout(hbox)\n\n # Line\n line = QtWidgets.QFrame()\n line.setFrameShape(QtWidgets.QFrame.HLine)\n line.setFrameShadow(QtWidgets.QFrame.Sunken)\n\n self.layout().addWidget(line)\n\n # Add ok/close\n self.closebutton = QtWidgets.QPushButton('Sluiten')\n self.closebutton.clicked.connect(self.close)\n self.layout().addWidget(self.closebutton, 0, QtCore.Qt.AlignRight)\n\n self.layout().setSizeConstraint(QtWidgets.QLayout.SetFixedSize)", "def init_main(self):\n self.start_game = tk.Button(self.view.frame_2, text=\"Start Game\",\n command=lambda: self.draw_game_table())\n\n self.start_game.pack(side=tk.LEFT)\n self.see_log = tk.Button(self.view.frame_2, text=\"See Log\",\n command=lambda: self.look_log())\n self.see_log.pack(side=tk.LEFT)\n\n self.clean_log = tk.Button(self.view.frame_2, text=\"Clean Log\",\n command=lambda: self.model.clean_log_file())\n self.clean_log.pack(side=tk.LEFT)\n self.close_game = tk.Button(self.view.frame_2, text=\"Close Game\",\n command=lambda: self.view.root.destroy())\n self.close_game.pack(side=tk.LEFT)", "def init_ui(self):\n self.parent.title(\"Roku Player Controller\")\n self.style.theme_use(\"default\")", "def init_window(self, game, width, height, scale):\n self.controller = game\n self.window.geometry(\"{0}x{1}\".format((width * scale)+5, (height * scale)+5))\n self.window.resizable(False, False)\n\n self.canvas = tk.Canvas(self.window, width=width * scale, height=height * scale)\n self.canvas.grid(row=0, column=0, sticky=\"nesw\")\n\n self.draw_grid(width, height, scale)\n\n self.window.bind(\"<Button-1>\", lambda a: game.toggle_onclick(a))\n self.window.bind(\"<B1-Motion>\", lambda a: game.toggle_onclick(a))\n self.window.bind(\"<space>\", lambda a: game.toggle_pause())\n self.window.bind(\"<Return>\", lambda a: game.do_step())\n self.window.bind(\"<BackSpace>\", lambda a: game.reset())\n self.set_menu()", "def initUI(self):\n self.logger.debug('Setting up the Measurement GUI')\n self.setWindowTitle(self.title)\n\n self.show()\n\n self.make_combobox_scanner()\n self.make_combobox_movements()\n self.make_combobox_configurate()\n self.make_combobox_basic()", "def initGui(self):\n self.sketchButton = self.add_action(\n path.join(self.plugin_dir, 'icons', 'sketch.svg'),\n text=self.tr('Sketch on map'),\n callback=self.sketchAction,\n parent=self.iface.mainWindow(),\n object_name='mSketchAction')\n self.penButton = self.add_action(\n path.join(self.plugin_dir, 'icons', 'pen.svg'),\n text=self.tr('Draw line on map'),\n callback=self.penAction,\n parent=self.iface.mainWindow(),\n object_name='mPenAction')\n self.canvasButton = self.add_action(\n path.join(self.plugin_dir, 'icons', 'canvas.svg'),\n text=self.tr('Color and width canvas'),\n callback=None,\n parent=self.iface.mainWindow())\n self.eraseButton = self.add_action(\n path.join(self.plugin_dir, 'icons', 'erase.svg'),\n text=self.tr('Erase sketches'),\n callback=self.eraseAction,\n parent=self.iface.mainWindow(),\n object_name='mEraseAction')\n self.removeButton = self.add_action(\n path.join(self.plugin_dir, 'icons', 'remove.svg'),\n text=self.tr('Remove all sketches'),\n callback=self.removeSketchesAction,\n parent=self.iface.mainWindow(),\n object_name='mRemoveAllSketches')\n self.noteButton = self.add_action(\n path.join(self.plugin_dir, 'icons', 'note.svg'),\n text=self.tr('Add text annotations to sketches'),\n callback=None,\n parent=self.iface.mainWindow(),\n object_name='mAddTextAnnotations')\n self.convertButton = self.add_action(\n path.join(self.plugin_dir, 'icons', 'toLayer.svg'),\n text=self.tr('Convert annotations to Memory Layer'),\n callback=self.toMemoryLayerAction,\n parent=self.iface.mainWindow(),\n object_name='mConvertAnnotationsToMemoryLayer')\n self.saveButton = self.add_action(\n path.join(self.plugin_dir, 'icons', 'inbox.svg'),\n text=self.tr('Save sketches to file'),\n callback=self.saveAction,\n parent=self.iface.mainWindow(),\n object_name='mSaveSketchesToFile')\n self.loadButton = self.add_action(\n path.join(self.plugin_dir, 'icons', 'outbox.svg'),\n text=self.tr('Load sketches from file'),\n callback=self.loadAction,\n parent=self.iface.mainWindow(),\n object_name='mLoadSketchesFromFile')\n self.canvasButton.setMenu(self.canvasMenu())\n self.noteButton.setCheckable(True)\n self.penButton.setCheckable(True)\n self.sketchButton.setCheckable(True)\n self.eraseButton.setCheckable(True)\n self.geoSketches = []\n self.dumLayer = QgsVectorLayer(\"Point?crs=EPSG:4326\", \"temporary_points\", \"memory\")\n self.pressed = None\n self.previousPoint = None\n self.previousMoved = None\n self.gestures = 0\n self.points = 0\n self.currentColor = QColor(\"#aa0000\")\n self.currentWidth = 5\n self.annotation = sketchNoteDialog(self.iface)\n self.annotatatedSketch = None\n self.sketchEnabled(None)\n self.iface.projectRead.connect(self.projectReadAction)\n self.iface.newProjectCreated.connect(self.newProjectCreatedAction)\n QgsProject.instance().legendLayersAdded.connect(self.notSavedProjectAction)", "def initGUI(self):\n\n\t\t# Set main frame's location \n\t\tself.grid(row=0, column=0, sticky=\"nsew\")\n\n\t\t# Set path entry frame and its location\n\t\tself.entryFrame = Frame(self, relief = RAISED, borderwidth = 1)\n\t\tself.entryFrame.pack(fill = BOTH, expand = False)\n\t\t# Make label\n\t\tif self.message:\n\t\t\tmessageLabel = Label(self.entryFrame, text = self.message, font=(\"Bradley\", 10))\n\t\t\tmessageLabel.pack(anchor=W, padx=0, pady=0)\n\n\t\t# Set path entry and its location\n\t\tself.filePathEntry = Entry(self.entryFrame, bd = 4, width = 50)\n\t\tself.filePathEntry.pack(side = LEFT, padx=2, pady=1)", "def _initUI(self) -> None:\n self._createActions()\n self._addActionsToMoveButtons()\n self._createToolBar()\n self._createStatusBar()\n self._createMainContextMenu()", "def __init__widget(self):\n self.__introduction = QtWidgets.QPushButton('使用“WSAD”对应“上下左右”控制\\n贪吃蛇,点击开始游戏!')\n self.add_layout_widget(self.central_widget, self.__introduction)\n self.__introduction.clicked.connect(self.__run)\n self.__ticker.timeout.connect(self.__snake_move)", "def draw_app(self):\n self.num_points_slider = widgets.IntSlider(\n value=self.num_points,\n min=5,\n max=30,\n step=5,\n description='Number of points:',\n style = {'description_width': 'initial'}\n )\n self.num_points_slider.observe(self._on_num_points_change, ['value'])\n# self.slope_slider = widgets.FloatSlider(\n# value=self.slope,\n# min=-1,\n# max=5,\n# step=0.1,\n# description='Slope:'\n# )\n# self.slope_slider.observe(self._on_slope_change, ['value'])\n self.rand_slider = widgets.FloatSlider(\n value=self.rand,\n min=0,\n max=50,\n step=3,\n description='Randomness:', num_points=(10, 50, 5),\n style = {'description_width': 'initial'}\n )\n self.rand_slider.observe(self._on_rand_change, ['value'])\n self.container.children = [\n self.num_points_slider,\n# self.slope_slider,\n self.rand_slider ,\n self.output_widget\n ]", "def initUi(self):\n\n wndw_box = QtGui.QVBoxLayout()\n\n #Calendar\n wndw_box.addWidget(QtGui.QLabel(\"Enter the date of the Exam\"))\n self.cal = QtGui.QCalendarWidget()\n wndw_box.addWidget(self.cal)\n\n #Score Entry Box\n wndw_box.addWidget(QtGui.QLabel(\"Enter Scores Below\"))\n self.score_entry_box = QtGui.QTextEdit()\n wndw_box.addWidget(self.score_entry_box)\n\n #Buttons\n btn_box = QtGui.QHBoxLayout()\n btn_box.addStretch(1)\n\n self.sub_btn = QtGui.QPushButton('Submit')\n self.ccl_btn = QtGui.QPushButton('Cancel')\n self.rst_btn = QtGui.QPushButton('Reset')\n \n btn_box.addWidget(self.sub_btn)\n btn_box.addWidget(self.ccl_btn)\n btn_box.addWidget(self.rst_btn)\n wndw_box.addLayout(btn_box)\n \n self.setLayout(wndw_box)\n self.setGeometry(100, 100, 300, 550)\n self.setWindowTitle('Enter Scores')\n self.show()", "def init_gui(self):\n # This is the main layout.\n main_layout = QtGui.QVBoxLayout(self)\n\n # This is the start button.\n start_btn = QtGui.QPushButton('Start Turntable')\n start_btn.clicked.connect(self.init_turn)\n\n # This is the file browser button.\n brw_btn = QtGui.QPushButton('Browse')\n brw_btn.clicked.connect(self.select_dir)\n\n # This is the render settings drop down.\n self.setting_dropdown = QtGui.QComboBox()\n self.setting_dropdown.addItems(['Low','Medium','High','Show','Custom'])\n\n # These are the line edits.\n self.save_loc = QtGui.QLineEdit()\n self.start_frm_le = QtGui.QLineEdit()\n self.end_frm_le = QtGui.QLineEdit()\n\n # This is the checkbox for rendering wireframe.\n self.ren_cb = QtGui.QCheckBox('Wireframe')\n\n # This is the radio btn group.\n self.rad_grp = QtGui.QButtonGroup()\n rd_01 = QtGui.QRadioButton('Surface')\n rd_02 = QtGui.QRadioButton('Model')\n rd_01.setObjectName('surface')\n rd_02.setObjectName('model')\n self.rad_grp.addButton(rd_01)\n self.rad_grp.addButton(rd_02)\n\n discipline = tl.discipline_check()\n if discipline == 'surface':\n rd_01.toggle()\n else:\n rd_02.toggle()\n\n # These are labels.\n loc_lbl = QtGui.QLabel('Location:')\n start_frm_lbl = QtGui.QLabel('Start Frame:')\n end_frm_lbl = QtGui.QLabel('End Frame:')\n\n # These are the different layout variables\n h_box_01 = QtGui.QHBoxLayout()\n h_box_02 = QtGui.QHBoxLayout()\n h_box_03 = QtGui.QHBoxLayout()\n\n v_box_01 = QtGui.QVBoxLayout()\n\n # This adds the widgets to the layouts.\n v_box_01.addWidget(rd_01)\n v_box_01.addWidget(rd_02)\n\n h_box_01.addLayout(v_box_01)\n h_box_01.addWidget(self.ren_cb)\n h_box_01.addWidget(self.setting_dropdown)\n\n h_box_02.addWidget(loc_lbl)\n h_box_02.addWidget(self.save_loc)\n h_box_02.addWidget(brw_btn)\n\n h_box_03.addWidget(start_btn)\n h_box_03.addWidget(start_frm_lbl)\n h_box_03.addWidget(self.start_frm_le)\n h_box_03.addWidget(end_frm_lbl)\n h_box_03.addWidget(self.end_frm_le)\n\n # This adds the layouts to the window\n main_layout.addLayout(h_box_01)\n main_layout.addLayout(h_box_02)\n main_layout.addLayout(h_box_03)\n\n # This is the main window.\n self.setGeometry(300, 300, 250, 150)\n self.setWindowTitle('Turntable Tool')\n self.show()", "def set_ui(self):\r\n\r\n self.canvas = tk.Canvas(self)\r\n self.canvas.pack()\r\n\r\n self.entry = ttk.Entry(self.canvas, justify=\"center\", font=(\"Calibri\", 12))\r\n\r\n self.grid = Grid(self.canvas)", "def initGui(self):\n\n icon_path = ':/plugins/AreaPrinter/mountainIcon.png'\n self.add_action(\n icon_path,\n text=self.tr(u'AreaPrinter'),\n callback=self.run,\n parent=self.iface.mainWindow())\n\n\tself.add_action(\n icon_path,\n text=self.tr(u'Grid Convergence'),\n callback=self.runTools,\n parent=self.iface.mainWindow())", "def __init__(self):\n # Root window\n self.root = tk.Tk()\n self.root.title(\"Crossword\")\n # Padding frame\n self.frame = tk.Frame(self.root)\n self.frame.pack(fill=\"both\", padx=PAD, pady=PAD)\n # Initialize widget groups\n self.header = HeaderView(self)\n self.puzzle = PuzzleView(self)\n self.clues = CluesView(self)\n # Show widgets\n self.header.show()\n self.puzzle.show()\n self.clues.show()", "def initUI(self):\n \n self.setWindowTitle(\"Intecol Flir camera\")\n self.setGeometry(300, 100, 1012, 622)", "def initialize(self):\n self.setWindowTitle(\"Playlist Maker\")\n self.setGeometry(0,0, 800, 494)\n self.mbox = QVBoxLayout()\n self.hbox = QHBoxLayout()\n self.hbtnbox = QHBoxLayout()", "def setup(self):\n self.ui_manager.purge_ui_elements()\n y_slot = self.window.height // 12\n\n ui_input_box = arcade.gui.UIInputBox(\n center_x=self.window.width // 2,\n center_y=y_slot * 7,\n width=250\n )\n ui_input_box.set_style_attrs(\n bg_color=(66, 179, 208),\n bg_color_hover=(112, 212, 238),\n bg_color_focus=(255, 228, 14)\n )\n ui_input_box.text = self.name\n ui_input_box.cursor_index = len(ui_input_box.text)\n self.ui_manager.add_ui_element(ui_input_box)\n\n button = buttons.ExitButton(\n 'Exit',\n center_x=self.window.width // 2,\n center_y=y_slot * 1,\n width=250\n )\n button.set_style_attrs(\n bg_color=(51, 139, 57),\n bg_color_hover=(135, 21, 25),\n bg_color_press=(122, 21, 24),\n )\n self.ui_manager.add_ui_element(button)\n\n button = buttons.AuthorButton(\"Author\",\n center_x=self.window.width // 2,\n center_y=y_slot * 2,\n width=250,\n user=ui_input_box\n )\n button.set_style_attrs(\n bg_color=(51, 139, 57),\n bg_color_hover=(88, 196, 96),\n bg_color_press=(28, 71, 32),\n )\n self.ui_manager.add_ui_element(button)\n\n button = buttons.ResultButton(\"Results\",\n center_x=self.window.width // 2,\n center_y=y_slot * 3,\n width=250,\n user=ui_input_box,\n level = \"level1\"\n )\n button.set_style_attrs(\n bg_color=(51, 139, 57),\n bg_color_hover=(88, 196, 96),\n bg_color_press=(28, 71, 32),\n )\n self.ui_manager.add_ui_element(button)\n\n button = buttons.RulesButton(\"Rules\",\n center_x=self.window.width // 2,\n center_y=y_slot * 4,\n width=250,\n user=ui_input_box\n )\n button.set_style_attrs(\n bg_color=(51, 139, 57),\n bg_color_hover=(88, 196, 96),\n bg_color_press=(28, 71, 32),\n )\n self.ui_manager.add_ui_element(button)\n\n button = buttons.LevelButton(\"Play level 1\",\n center_x=self.window.width // 2,\n center_y=y_slot * 6,\n width=250,\n user=ui_input_box,\n level=\"level1\"\n )\n button.set_style_attrs(\n bg_color=(51, 139, 57),\n bg_color_hover=(88, 196, 96),\n bg_color_press=(28, 71, 32),\n )\n self.ui_manager.add_ui_element(button)\n\n button = buttons.LevelButton(\"Play level 2\",\n center_x=self.window.width // 2,\n center_y=y_slot * 5,\n width=250,\n user=ui_input_box,\n level = \"level2\"\n )\n button.set_style_attrs(\n bg_color=(51, 139, 57),\n bg_color_hover=(88, 196, 96),\n bg_color_press=(28, 71, 32),\n )\n self.ui_manager.add_ui_element(button)", "def initGui(self):\n from p4_view import Gui\n self.updateStatus(\"Launching GUI...\")\n self.gui = Gui(self, self.lmap)\n self.gui.setStart(self.cfg[\"START\"])\n self.gui.setGoal(self.cfg[\"GOAL\"])\n self.gui.setPossGoals(self.cfg[\"POSS_GOALS\"])\n #GHD\n self.gui.setMapName(self.cfg[\"MAP_FILE\"])\n self.updateStatus(\"OK\")\n self.gui.mainloop()", "def __init__(self):\n self.window = Tk() # The main window\n self.__initialize_variables__() # Initialize the variables\n self.__initialize_menu__() # Initialize the Menu\n self.__initialize_status_bar__()\n self.__initialize_gui__() # Initialize the GUI widgets", "def setUpGUI(self):\n WHITE = '#ffffff'\n # Set up the GUI so that we can paint the fractal image on the screen\n canvas = Canvas(self.window, width=self.width, height=self.height, bg=WHITE)\n canvas.pack()\n canvas.create_image((self.width/2, self.height/2), image=self.img, state=\"normal\")" ]
[ "0.66681504", "0.663576", "0.66196823", "0.6619261", "0.6588875", "0.65485215", "0.6535268", "0.65008545", "0.64888674", "0.64518774", "0.64388704", "0.64347047", "0.6428803", "0.64084625", "0.6373432", "0.63612574", "0.6330323", "0.63185287", "0.6310055", "0.63019323", "0.62841254", "0.625889", "0.6258703", "0.6231909", "0.62279606", "0.6225396", "0.62218755", "0.62119514", "0.6204848", "0.6199077" ]
0.74698806
0
Return a list of scores representing the scores of the players, in the order of player's turns.
def get_scores_in_order_of_players(self): players = self.referee.get_current_state().get_players() player_scores = [] for player_color in self.player_color_order: for player in players: if player_color == player.get_color(): player_scores.append(player.get_score()) break return player_scores
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_scores(self):\n return [(self.players[p.get_color()], p.get_score()) for p in self.state.get_players()]", "def calculate_scores(players):\n scores = {}\n for player in players.tuple_:\n scores[player.id_] = player.score()\n return scores", "def get_players_by_rank(self):\n return sorted(self.participants, key=lambda p: p.tournament_score, reverse=True)", "def get_scores(self, tournament: Tournament):\n self.model.eval()\n # collate_fn = lambda x: collate_teams(x, tournament.max_members)\n dl_rank = DataLoader(tournament.ranking, num_workers=self.jobs, batch_size=self.bs, shuffle=False)\n iterator = tqdm(dl_rank, position=0, desc=f'{tournament.tournament_id} ranking', disable=True)\n scores = []\n for i, team in enumerate(iterator):\n score = self.model.get_team_score(team.to(self.device))\n scores.append(score.cpu().numpy())\n\n scores = np.concatenate(scores)\n return scores.flatten()", "def calculate_scores(self):\n words = self.walk_board()\n player_scores = {}\n for word in words:\n player = word.get_owning_player()\n if player not in player_scores:\n player_scores[player] = 0\n player_scores[player] += word.get_score()\n return player_scores", "def get_list_team_scores(self):\n scores = defaultdict(lambda: {\n \"scored_xg\": [],\n \"conceded_xg\": [],\n \"home_adv\": 0,\n \"expected_points\": 0\n })\n\n for g in self.games:\n scores[g.HomeTeam][\"scored_xg\"].append(g.FTHG)\n scores[g.HomeTeam][\"conceded_xg\"].append(g.FTAG)\n scores[g.AwayTeam][\"scored_xg\"].append(g.FTAG)\n scores[g.AwayTeam][\"conceded_xg\"].append(g.FTHG)\n\n for team in scores.keys():\n scores[team][\"expected_points\"] = (self.get_table(metric='points')[team] /\n len(scores[team][\"scored_xg\"]))\n\n return scores", "def climbingLeaderboard(scores, alice):\n\n # unique scores\n scores = sorted(list(set(scores))) # asc\n player_ranks = []\n idx = 0\n n = len(scores)\n\n for alice_score in alice: # alice in asc order\n \n # Find the rank. For next alice score (which is not smaller), continue from the same index\n while (n > idx and alice_score >= scores[idx]):\n idx += 1\n\n player_ranks.append(n+1-idx)\n\n return player_ranks", "def calculate_scores():\n all_people = models.Leaderboard.query.order_by(\n models.Leaderboard.score.desc()).all()\n print(all_people)\n users = []\n scores = []\n for person in all_people:\n users.append(person.username)\n scores.append(person.score)\n return users, scores", "def scores_for(self, board):\r\n scores = [1]*board.width\r\n for i in range(board.width):\r\n if not board.can_add_to(i):\r\n scores[i] = -1\r\n elif board.is_win_for(self.checker):\r\n scores[i] = 100\r\n elif board.is_win_for(self.opponent_checker()):\r\n scores[i] = 0\r\n elif self.lookahead == 0:\r\n scores[i] = 50\r\n else:\r\n board.add_checker(self.checker, i)\r\n other = AIPlayer(self.opponent_checker(), self.tiebreak, self.lookahead-1)\r\n other_scores = other.scores_for(board)\r\n if max(other_scores) == 100:\r\n scores[i] = 0\r\n elif max(other_scores) == 50:\r\n scores[i] = 50\r\n elif max(other_scores) == 0:\r\n scores[i] = 100\r\n board.remove_checker(i)\r\n return scores", "def pairing_other_rounds(self, players_list: list[Player]) -> list[Player]:\n\n if self.check_same_tournaments_points(players_list):\n players_list = sorted(players_list, key=lambda player: player.tournament_score)\n else:\n players_list = sorted(players_list, key=lambda player: player.ranking)\n players_list.reverse()\n apairing_players = self.generating_pairs(players_list)\n\n return apairing_players", "def find_all_by_player(self, player):\n cursor = self._connection.cursor()\n command = 'SELECT * FROM scores WHERE player=? ORDER BY level'\n cursor.execute(command, [player])\n return cursor.fetchall()", "def get_scores(self):\n\n\t\tscores = np.dot(self.rankings, self.weights)\n\t\tranked_indices = np.argsort(scores)\n\t\tranked_sources = self.source_names[ranked_indices]\n\t\tranked_scores = sorted(scores)\n\t\tself.scores = {source:score for source, score in zip(ranked_sources, ranked_scores)}\n\n\t\treturn self.scores", "def all_scores(self):\r\n if not self.child_history:\r\n return None\r\n return [self.score_for_attempt(index) for index in xrange(0, len(self.child_history))]", "def climbingLeaderboard(scores, alice):\n unique_scores = list({score: None for score in scores}.keys())[::-1]\n ranks = []\n # last_score_index = 0\n for game_score in alice:\n for i, score in enumerate(unique_scores):\n if score > game_score:\n ranks.append(len(unique_scores) - i + 1)\n break\n elif score == game_score:\n ranks.append(len(unique_scores) - i)\n break\n elif i == len(unique_scores) - 1:\n ranks.append(1)\n else:\n continue\n\n return ranks", "def get_scores(self):\n return self.score", "def get_scores(self) -> tuple:\n return (self.get_score(), self.p2_score)", "def scores_for(self, board):\r\n scores = [50] * board.width\r\n\r\n for col in range(board.width):\r\n if not board.can_add_to(col):\r\n scores[col] = -1\r\n elif board.is_win_for(self.checker):\r\n scores[col] = 100\r\n elif board.is_win_for(self.opponent_checker()):\r\n scores[col] = 0\r\n elif self.lookahead == 0:\r\n scores[col] = 50\r\n else: \r\n board.add_checker(self.checker, col)\r\n opponent = AIPlayer(self.opponent_checker(), self.tiebreak, self.lookahead - 1)\r\n opp_scores = opponent.scores_for(board)\r\n if max(opp_scores) == 100:\r\n scores[col] = 0\r\n elif max(opp_scores) == 0:\r\n scores[col] = 100\r\n else:\r\n scores[col] = 50\r\n board.remove_checker(col)\r\n\r\n return scores", "def get_score_list(self) -> List[int]:\n\n result, values = [], []\n for i in range(len(self._cards)):\n for value in self._cards[i]:\n values.append(value[0])\n if sum(values) > 21:\n for num in range(len(values)):\n if values[num] == 11:\n values[num] = 1\n result.append(sum(values))\n values = []\n return result", "def scores(self) -> List[float]:\n if not self.prediction:\n return []\n return [sentence.score for sentence in self.prediction.sentences]", "def abilityScores():\n\n scores_list = []\n\n for i in range(6):\n temp_list = []\n for j in range(4):\n temp_list.append(r.choice([1,2,3,4,5,6]))\n temp_list.sort()\n scores_list.append(temp_list[1]+temp_list[2]+temp_list[3])\n scores_list.sort()\n return scores_list", "def all_scores(self):\r\n if not self.child_history:\r\n return None\r\n return [self.child_history[i].get('score') for i in xrange(0, len(self.child_history))]", "def childScores(self):\n return [x.score for x in self.children]", "def give_round_scores(list_of_players):\n print(\"\\nThe round has ended !\\nWe shall now unveil the cards and the scores!\")\n\n for player in list_of_players:\n cards = [card.name for card in player.cards]\n cards_string = \" \"\n for card in cards:\n cards_string += card + \", \"\n cards_string = cards_string[:-2]\n print(\"\\n{} has these cards: \".format(player.name), cards_string)\n print(\"{} has a score of {}\".format(player.name, player.score()))\n final_scores = [player.score() for player in list_of_players]\n min_score = min(final_scores)\n winners_index = [i for i, x in enumerate(final_scores) if x == min_score]\n if len(winners_index) == 1:\n index_winner = winners_index[0]\n winner = list_of_players[index_winner]\n print(winner.name, \"won the round with a score of {}\".format(winner.score()))\n if len(winners_index) > 1:\n print(\"It's a tie!\")\n winners_names = \"\"\n winners = [list_of_players[i] for i in winners_index]\n for winner in winners:\n winners_names += winner.name\n print(winners_names, \"won the round with a score of \", str(min_score))", "def find_all(self):\n cursor = self._connection.cursor()\n cursor.execute('SELECT * FROM scores ORDER BY level')\n all_scores = cursor.fetchall()\n return all_scores", "def play_game(self):\n # print(\"Playing a random game!\")\n for round_num in range(1, self.rounds_to_play + 1):\n # print(\"Play Round No. {}\".format(round_num))\n round = Round(round_num, self.players)\n score = round.play_round()\n # print(len(round.played_cards))\n for i in range(self.num_players):\n self.scores[i] += score[i]\n # print(\"Scores: {}\".format(self.scores))\n # print(\"Final scores: {}\".format(self.scores))\n for player in self.players:\n player.reset_score()\n return self.scores", "def _update_score(self) -> None:\n\n # setting new score by iterating over players\n self.score_play[self.n_play_turns, ] = [\n self._score_table[(\n self.contract.level,\n self.contract.suit,\n self.tricks[i],\n self.contract.player_vulnerability[i],\n int(self.contract.double + self.contract.redouble)\n )]\n for i in range(NUM_PLAYERS)\n ]", "def own_games(self):\r\n return sorted(self.games + self.finals, key=lambda g: (g.datetime, g.pitch.rank))", "def sorted_scores(scores):\n\treturn sorted(scores, key=lambda sailor: (total_score(sailor), sailor[1][0]))", "def getScore(self):\n\t\tself.scoreList = [submissionsss.score for submissionsss in self.subreddit.top(time_filter = 'day', limit = self.limits)]\n\t\treturn self.scoreList", "def ranks(cls):\n ranked = []\n for team in sorted(dbsession.query(cls).order_by(desc(cls.money)).all()):\n if not team.locked:\n ranked.append(team)\n return ranked" ]
[ "0.8231863", "0.708941", "0.70600694", "0.69003886", "0.68367106", "0.6832921", "0.67670155", "0.6752282", "0.6668831", "0.66560477", "0.656077", "0.65558183", "0.65367687", "0.6521682", "0.65157676", "0.6506245", "0.6499215", "0.64963645", "0.6492643", "0.6460885", "0.641571", "0.6378673", "0.63674086", "0.6321661", "0.6316806", "0.6272668", "0.6255391", "0.6206682", "0.62060803", "0.6203774" ]
0.8183596
1
Get a string representation of all the players current scores. The players are listed in turn order (meaning it does not start at the player whose turn it is currently, but starts at the first player who placed a penguin.)
def get_current_scores_buffer(self): player_scores = self.get_scores_in_order_of_players() score_string = "Scores:\n" for color, score in zip(self.player_color_order, player_scores): player_score = "{}: {}".format(color, score) score_string += player_score score_string += "\t" return score_string
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_scoreboard(self):\n output = ''\n # parallel dictionaries with innings and scores\n innings = []\n away = []\n home = []\n for x in self:\n innings.append(x['inning'])\n away.append(x['away'])\n home.append(x['home'])\n # go through all the information and make a nice output\n # that looks like a scoreboard\n output += 'Inning\\t'\n for x in innings:\n output += str(x) + ' '\n output += '\\n'\n for x in innings:\n output += '---'\n output += '\\nAway\\t' + self.__enumerate_scoreboard(away)\n output += '\\nHome\\t' + self.__enumerate_scoreboard(home)\n return output", "def print_current_scores(self, round_num, index):\n print(f'\\n{self._players_list[index].name.upper()} '\n f'YOUR TURN. ROUND: {round_num + 1}')\n\n print('-'*21)\n print('ROLL SCORES'.rjust(16))\n self._players_list[index].print_stacked_score_dict()\n\n print('-'*21)\n print('TOP SCORE BONUS'.rjust(19))\n print(f\"Top Score:\".ljust(16) +\n f\"{self._players_list[index].get_top_score()}\".rjust(3))\n print(f\"Top Bonus Score:\".ljust(16) +\n f\"{self._players_list[index].get_top_bonus_score()}\".rjust(3))\n\n print('-'*21)\n print('TOTAL SCORES'.rjust(19))\n print(f\"Total Top:\".ljust(16) +\n f\"{self._players_list[index].get_total_top_score()}\".rjust(3))\n print(f\"Total Bottom:\".ljust(16) +\n f\"{self._players_list[index].get_total_bottom_score()}\".rjust(3))\n\n print('-'*21)\n print(f\"GRAND TOTAL:\".ljust(16) +\n f\"{self._players_list[index].get_grand_total_score()}\".rjust(3))", "def __str__(self):\n return \"Name: \" + self.name + \"\\nScores: \" + \\\n \" \".join(map(str, self.scores))", "def __str__(self):\n return \"Name: \" + self._name + \"\\nScores: \" + \\\n \" \".join(map(str, self._scores))", "def get_scores(self):\n return [(self.players[p.get_color()], p.get_score()) for p in self.state.get_players()]", "def score_board():\r\n \r\n return str(score) + \"/\" + str(rounds)", "def get_scores_in_order_of_players(self):\n \n players = self.referee.get_current_state().get_players()\n\n player_scores = []\n for player_color in self.player_color_order:\n for player in players:\n if player_color == player.get_color():\n player_scores.append(player.get_score())\n break\n\n return player_scores", "def print_score(score):\n output_str = \"\"\n for team in score:\n output_str += f\"{team}: \" + '\\t' + f\"{score[team]}\\n\"\n print(output_str)", "def printPlayerStats(self):\n\t\tplayerStats = ['Name = ' + self.name, \n\t\t\t\t\t 'Agility = ' + str(self.agility), \n\t\t\t\t\t 'Personality = ' + str(self.personality), \n\t\t\t\t\t 'Sanity = ' + str(self.sanity), \n\t\t\t\t\t 'Strength = ' + str(self.strength), \n\t\t\t\t\t 'Progress = ' + str(self.progress)]\n\t\tprint playerStats", "def getScores():\r\n results = \"\"\r\n with sqlite3.connect(database_file) as conn:\r\n cursor = conn.cursor()\r\n team_scores = cursor.execute(\"\"\" SELECT * FROM scores;\"\"\")\r\n\r\n for row in team_scores.fetchall():\r\n teamname, auto, rc, spirit, video = row\r\n results += result_string.format(teamname, auto, rc, spirit, video) + \"\\n\"\r\n return results", "def opponents_score(self):\n if self.opponent_wickets == 10:\n var1 = \"All Out\"\n return str('{0} {1}').format(self.opponent_runs, var1)\n else:\n var1 = self.opponent_wickets\n return str('{0}-{1}').format(self.opponent_runs, var1)", "def showtopscores(self):\n top_scores = LeaderBoard.gettopscorerslist(CURRENT_GAME_LEVEL)\n level_string = \"\"\n if CURRENT_GAME_LEVEL == DifficultyLevel.ExpertLevel:\n level_string = \"Expert level\"\n elif CURRENT_GAME_LEVEL == DifficultyLevel.BeginnerLevel:\n level_string = \"Beginner level\"\n else:\n level_string = \"Intermediate level\"\n leaderboard = \"Rank\".ljust(10) + \"Player Name\".ljust(30) + \"Score\".ljust(10) + '\\n'\n print leaderboard,\n rank = 1\n for score in top_scores:\n score = str(rank).ljust(10) + score\n print score,\n leaderboard = leaderboard + score\n rank = rank + 1\n QtGui.QMessageBox.about(self, \"Leaderboard for \" + level_string, leaderboard)", "def print_scores(self):\n print(\"scores: \", self.get_scores())", "def __str__(self):\n result = \", \".join(map(str, self.hand))\n result += \"\\n \" + str(self.get_score()) + \" points\"\n return result", "def player_names(players):\r\n string = ''\r\n for p in players:\r\n string = string + p.name + ', '\r\n return string", "def give_round_scores(list_of_players):\n print(\"\\nThe round has ended !\\nWe shall now unveil the cards and the scores!\")\n\n for player in list_of_players:\n cards = [card.name for card in player.cards]\n cards_string = \" \"\n for card in cards:\n cards_string += card + \", \"\n cards_string = cards_string[:-2]\n print(\"\\n{} has these cards: \".format(player.name), cards_string)\n print(\"{} has a score of {}\".format(player.name, player.score()))\n final_scores = [player.score() for player in list_of_players]\n min_score = min(final_scores)\n winners_index = [i for i, x in enumerate(final_scores) if x == min_score]\n if len(winners_index) == 1:\n index_winner = winners_index[0]\n winner = list_of_players[index_winner]\n print(winner.name, \"won the round with a score of {}\".format(winner.score()))\n if len(winners_index) > 1:\n print(\"It's a tie!\")\n winners_names = \"\"\n winners = [list_of_players[i] for i in winners_index]\n for winner in winners:\n winners_names += winner.name\n print(winners_names, \"won the round with a score of \", str(min_score))", "def print_scores(self):\n ### FILL IN ###", "def print_player_rank_and_points(self):\r\n pass", "def score(self):\n score_message = {\n 'Onewins': \"\\nThe Winner is Player 1!\",\n 'Twowins': \"\\nThe Winner is Player 2!\",\n 'Tie': \"\\nTie! Looks like everyone's a winner!\",\n 'Nowinner': \"\\nYikes, neither of you win!\"\n }\n if self.pone_score > self.ptwo_score:\n print(score_message['Onewins'])\n elif self.pone_score < self.ptwo_score:\n print(score_message['Twowins'])\n elif self.pone_score == 0 and self.ptwo_score == 0:\n print(score_message['Nowinner'])\n else:\n print(score_message['Tie'])", "def __str__(self):\n games = [f'{game.date} - {game.opponent_abbr}'.strip()\n for game in self._games]\n return '\\n'.join(games)", "def nice_score(self):\n return ('{0.away_team} ({0.away_team_runs}) at '\n '{0.home_team} ({0.home_team_runs})').format(self)", "def print_scores(board: Connect4Board) -> None:\r\n print('')\r\n # print('\\n******************************')\r\n # print('************SCORES************')\r\n # print('******************************')\r\n # print(' BLACK = {} & WHITE = {} '.format(board.get_score('B'), board.get_score('W')))\r\n # print('******************************\\n')\r", "def get_scoreboard(self):\n cases = len(self.start_player_list)\n\n opponent03 = \"N/A\"\n opponent04 = \"N/A\"\n opponent05 = \"N/A\"\n opponent06 = \"N/A\"\n opponent07 = \"N/A\"\n opponent08 = \"N/A\"\n opponent09 = \"N/A\"\n opponent10 = \"N/A\"\n opponent11 = \"N/A\"\n opponent12 = \"N/A\"\n opponent13 = \"N/A\"\n opponent14 = \"N/A\"\n winner = \"N/A\"\n\n display = \"\\n\"\n\n # Game with 3 players\n if cases == 3:\n opponent01 = self.all_opponents[0][0][0]\n opponent02 = self.all_opponents[0][0][1]\n if len(self.all_opponents) == 2:\n opponent03 = self.all_opponents[1][0][0]\n opponent04 = self.all_opponents[1][0][1]\n if len(self.winner_list) == 2:\n winner = self.winner_list[1][0]\n\n first_game = [opponent01, opponent02]\n max_string = len(max(first_game, key=len))\n string_size1 = len(opponent01)\n padding1 = \"\".ljust((int(max_string-string_size1)), '-')\n string_size2 = len(max([opponent01, opponent02], key=len))\n padding2 = \"\".ljust(string_size2, ' ')\n string_size3 = len(opponent02)\n padding3 = \"\".ljust((int(max_string)-string_size3), '-')\n string_size4 = len(padding2+\"|---\")\n padding4 = \"\".ljust(string_size4, ' ')\n string_size6 = max(len(opponent03), len(opponent04)) - len(opponent03)\n padding6 = \"\".ljust(string_size6, '-')\n string_size7 = max(len(opponent03), len(opponent04)) - len(opponent04)\n padding7 = \"\".ljust(string_size7, '-')\n string_size5 = len(padding4+opponent04+padding7) - len(opponent02+padding3) - 1\n padding5 = \"\".ljust(string_size5, ' ')\n\n display += opponent01+padding1+\"|\"+\"\\n\"\n display += padding2+\"|---\"+opponent03+padding6+\"|\"+\"\\n\"\n display += opponent02+padding3+\"|\"+padding5+\"|---\"+winner+\"\\n\"\n display += padding4+opponent04+padding7+\"|\"+\"\\n\"\n\n # Game with 4 players\n if cases == 4:\n opponent01 = self.all_opponents[0][0][0]\n opponent02 = self.all_opponents[0][0][1]\n opponent03 = self.all_opponents[0][1][0]\n opponent04 = self.all_opponents[0][1][1]\n if len(self.winner_list_temp) == 1:\n opponent05 = self.winner_list_temp[0]\n if len(self.winner_list) >= 1:\n opponent05 = self.winner_list[0][0]\n opponent06 = self.winner_list[0][1]\n if self.winner_state == 1:\n winner = self.winner_list[1][0]\n\n first_game = [opponent01, opponent02, opponent03, opponent04]\n max_string = len(max(first_game, key=len))\n string_size1 = len(opponent01)\n padding1 = \"\".ljust(int(max_string-string_size1), '-')\n string_size2 = max_string\n padding2 = \"\".ljust(string_size2, ' ')\n string_size3 = len(opponent02)\n padding3 = \"\".ljust(int(max_string-string_size3), '-')\n string_size5 = len(opponent03)\n padding5 = \"\".ljust(int(max_string-string_size5), '-')\n string_size6 = max_string\n padding6 = \"\".ljust(string_size6, ' ')\n string_size7 = len(opponent04)\n padding7 = \"\".ljust(int(max_string-string_size7), '-')\n string_size8 = max(len(opponent05), len(opponent06)) - len(opponent05)\n padding8 = \"\".ljust(string_size8, '-')\n string_size9 = max(len(opponent05), len(opponent06)) - len(opponent06)\n padding9 = \"\".ljust(string_size9, '-')\n string_size4 = max(len(\"\"+padding2+\"|---\"+opponent05+padding8+\"|\"), (len(\"\"+padding6+\"|---\"+opponent06+padding9+\"|\"))) - 1\n padding4 = \"\".ljust(string_size4, ' ')\n string_size10 = len(padding4) - len(\"\"+opponent02+padding3+\"|\")\n padding10 = \"\".ljust(string_size10, ' ')\n string_size11 = len(padding4) - len(\"\"+opponent03+padding5+\"|\")\n padding11 = \"\".ljust(string_size11, ' ')\n\n display += opponent01+padding1+\"|\"+\"\\n\"\n display += padding2+\"|---\"+opponent05+padding8+\"|\"+\"\\n\"\n display += opponent02+padding3+\"|\"+padding10+\"|\"+\"\\n\"\n display += padding4+\"|---\"+winner+\"\\n\"\n display += opponent03+padding5+\"|\"+padding11+\"|\"+\"\\n\"\n display += padding6+\"|---\"+opponent06+padding9+\"|\"+\"\\n\"\n display += opponent04+padding7+\"|\"+\"\\n\"\n\n # Game with 5 players\n if cases == 5:\n opponent01 = self.all_opponents[0][0][0]\n opponent02 = self.all_opponents[0][0][1]\n if len(self.all_opponents) >= 2:\n opponent03 = self.all_opponents[1][0][0]\n opponent04 = self.all_opponents[1][0][1]\n opponent05 = self.all_opponents[1][1][0]\n opponent06 = self.all_opponents[1][1][1]\n if len(self.winner_list_temp) == 1:\n opponent07 = self.winner_list_temp[0]\n if len(self.winner_list) >= 2:\n opponent07 = self.winner_list[1][0]\n opponent08 = self.winner_list[1][1]\n if len(self.winner_list) == 3:\n winner = self.winner_list[2][0]\n\n first_game = [opponent01, opponent02]\n max_string = len(max(first_game, key=len))\n string_size1 = len(opponent01)\n padding1 = \"\".ljust(int(max_string-string_size1), '-')\n string_size2 = max_string\n padding2 = \"\".ljust(string_size2, ' ')\n string_size3 = len(opponent02)\n padding3 = \"\".ljust(int(max_string-string_size3), '-')\n string_size4 = max(len(\"\"+opponent01+padding1), len(\"\"+opponent01+padding1)) + 4\n padding4 = \"\".ljust(string_size4, ' ')\n string_size5 = max(len(opponent03), len(opponent04), len(opponent05), len(opponent06))\n padding5 = \"\".ljust((string_size5 - len(opponent03)), '-')\n string_size7 = max(len(opponent03), len(opponent04), len(opponent05), len(opponent06))\n padding7 = \"\".ljust((string_size7 - len(opponent04)), '-')\n string_size8 = len(\"\"+padding2+\"|---\")\n padding8 = \"\".ljust(string_size8, ' ')\n string_size6 = max(len(opponent03), len(opponent04), len(opponent05), len(opponent06)) + 3\n padding6 = \"\".ljust(string_size6, ' ')\n string_size9 = max(len(opponent03), len(opponent04), len(opponent05), len(opponent06)) - len(opponent05)\n padding9 = \"\".ljust(string_size9, '-')\n string_size10 = max(len(opponent03), len(opponent04), len(opponent05), len(opponent06)) - len(opponent06)\n padding10 = \"\".ljust(string_size10, '-')\n string_size11 = max(len(opponent03), len(opponent04), len(opponent05), len(opponent06)) + len(padding4)\n padding11 = \"\".ljust(string_size11, ' ')\n string_size12 = max(len(opponent07), len(opponent08)) - len(opponent07)\n padding12 = \"\".ljust(string_size12, '-')\n string_size13 = max(len(opponent07), len(opponent08)) - len(opponent08)\n padding13 = \"\".ljust(string_size13, '-')\n string_size14 = max(len(opponent07), len(opponent08)) + 3\n padding14 = \"\".ljust(string_size14, ' ')\n string_size15 = max(len(opponent07), len(opponent08)) + 3\n padding15 = \"\".ljust(string_size15, ' ')\n string_size16 = len(\"\"+padding11+\"|---\"+opponent08+padding13)\n padding16 = \"\".ljust(string_size16, ' ')\n\n display += opponent01+padding1+\"|\"+\"\\n\"\n display += padding2+\"|---\"+opponent03+padding5+\"|\"+\"\\n\"\n display += opponent02+padding3+\"|\"+padding6+\"|---\"+opponent07+padding12+\"|\"+\"\\n\"\n display += padding4+opponent04+padding7+\"|\"+padding14+\"|\"+\"\\n\"\n display += padding16+\"|---\"+winner+\"\\n\"\n display += padding8+opponent05+padding9+\"|\"+padding15+\"|\"+\"\\n\"\n display += padding11+\"|---\"+opponent08+padding13+\"|\"+\"\\n\"\n display += padding8+opponent06+padding10+\"|\"+\"\\n\"\n\n # Game with 6 players\n if cases == 6:\n opponent01 = self.all_opponents[0][0][0]\n opponent02 = self.all_opponents[0][0][1]\n opponent03 = self.all_opponents[0][1][0]\n opponent04 = self.all_opponents[0][1][1]\n opponent05 = self.all_opponents[0][2][0]\n opponent06 = self.all_opponents[0][2][1]\n if len(self.winner_list_temp) >= 1:\n opponent07 = self.winner_list_temp[0]\n if len(self.winner_list_temp) >= 2:\n opponent08 = self.winner_list_temp[1]\n if len(self.winner_list) >= 1:\n print(self.winner_list)\n opponent07 = self.winner_list[0][0]\n opponent08 = self.winner_list[0][1]\n opponent09 = self.winner_list[0][2]\n if len(self.all_opponents) >= 3:\n opponent10 = self.all_opponents[2][0][0]\n if len(self.winner_list) == 3:\n winner = self.winner_list[2][0]\n\n first_game = [opponent01, opponent02, opponent03, opponent04, opponent05, opponent06,]\n max_string = len(max(first_game, key=len))\n string_size1 = len(opponent01)\n padding1 = \"\".ljust(int(max_string-string_size1), '-')\n string_size2 = len(opponent02)\n padding2 = \"\".ljust(int(max_string-string_size2), '-')\n string_size3 = len(opponent03)\n padding3 = \"\".ljust(int(max_string-string_size3), '-')\n string_size4 = len(opponent04)\n padding4 = \"\".ljust(int(max_string-string_size4), '-')\n string_size5 = len(opponent05)\n padding5 = \"\".ljust(int(max_string-string_size5), '-')\n string_size6 = len(opponent06)\n padding6 = \"\".ljust(int(max_string-string_size6), '-')\n string_size7 = max_string\n padding7 = \"\".ljust(string_size7, ' ')\n string_size8 = max(len(opponent07), len(opponent08), len(opponent09)) - len(opponent07)\n padding8 = \"\".ljust(string_size8, '-')\n string_size9 = max(len(opponent07), len(opponent08), len(opponent09)) - len(opponent08)\n padding9 = \"\".ljust(string_size9, '-')\n string_size10 = max(len(opponent07), len(opponent08), len(opponent09)) - len(opponent09)\n padding10 = \"\".ljust(string_size10, '-')\n string_size12 = len(\"\"+padding7+\"|---\"+opponent09+padding10)\n padding12 = \"\".ljust(string_size12, ' ')\n string_size14 = max(len(opponent07), len(opponent08), len(opponent09)) + 3\n padding14 = \"\".ljust(string_size14, ' ')\n string_size15 = len(\"\"+padding12+\"|---\"+opponent10) - len(\"\"+padding7+\"|---\"+opponent09)\n padding15 = \"\".ljust(string_size15, '-')\n string_size16 = len(padding7+\"|---\"+opponent09+padding15) - len(opponent05+padding5+\"|\")\n padding16 = \"\".ljust(string_size16, ' ')\n string_size17 = len(padding7+\"|---\"+opponent09+padding15)\n padding17 = \"\".ljust(string_size17, ' ')\n string_size18 = len(opponent09 + padding15) + 3\n padding18 = \"\".ljust(string_size18, ' ')\n string_size19 = len(\"|---\"+opponent10) - 1\n padding19 = \"\".ljust(string_size19, ' ')\n\n display += opponent01+padding1+\"|\"+\"\\n\"\n display += padding7+\"|---\"+opponent07+padding8+\"|\"+\"\\n\"\n display += opponent02+padding2+\"|\"+padding14+\"|\"+\"\\n\"\n display += padding12+\"|---\"+opponent10+\"|\"+\"\\n\"\n display += opponent03+padding3+\"|\"+padding14+\"|\"+padding19+\"|\"+\"\\n\"\n display += padding7+\"|---\"+opponent08+padding9+\"|\"+padding19+\"|\"+\"\\n\"\n display += opponent04+padding4+\"|\"+padding18+\"|---\"+winner+\"\\n\"\n display += padding17+\"|\"+\"\\n\"\n display += opponent05+padding5+\"|\"+padding16+\"|\"+\"\\n\"\n display += padding7+\"|---\"+opponent09+padding15+\"|\"+\"\\n\"\n display += opponent06+padding6+\"|\"+\"\\n\"\n\n # Game with 7 players\n if cases == 7:\n opponent01 = self.all_opponents[0][0][0]\n opponent02 = self.all_opponents[0][0][1]\n if len(self.all_opponents) >= 2:\n\n opponent03 = self.all_opponents[1][0][0]\n opponent04 = self.all_opponents[1][0][1]\n opponent05 = self.all_opponents[1][1][0]\n opponent06 = self.all_opponents[1][1][1]\n opponent07 = self.all_opponents[1][2][0]\n opponent08 = self.all_opponents[1][2][1]\n if len(self.winner_list_temp) >= 1:\n opponent09 = self.winner_list_temp[0]\n if len(self.winner_list_temp) >= 2:\n opponent10 = self.winner_list_temp[1]\n if len(self.winner_list) >= 2:\n opponent09 = self.winner_list[1][0]\n opponent10 = self.winner_list[1][1]\n opponent11 = self.winner_list[1][2]\n if len(self.all_opponents) >= 4:\n opponent12 = self.all_opponents[3][0][0]\n if len(self.winner_list) == 4:\n winner = self.winner_list[3][0]\n\n string_size1 = max(len(opponent01), len(opponent02)) - len(opponent01)\n padding1 = \"\".ljust(string_size1, '-')\n string_size2 = max(len(opponent01), len(opponent02)) - len(opponent02)\n padding2 = \"\".ljust(string_size2, '-')\n string_size3 = len(opponent01+padding1)\n padding3 = \"\".ljust(string_size3, ' ')\n string_size4 = len(opponent01+padding1) + 4\n padding4 = \"\".ljust(string_size4, ' ')\n string_size5 = max(len(opponent03), len(opponent04), len(opponent05), len(opponent06), len(opponent07), len(opponent08)) - len(opponent03)\n padding5 = \"\".ljust(string_size5, '-')\n string_size6 = max(len(opponent03), len(opponent04), len(opponent05), len(opponent06), len(opponent07), len(opponent08)) - len(opponent04)\n padding6 = \"\".ljust(string_size6, '-')\n string_size7 = max(len(opponent03), len(opponent04), len(opponent05), len(opponent06), len(opponent07), len(opponent08)) - len(opponent05)\n padding7 = \"\".ljust(string_size7, '-')\n string_size8 = max(len(opponent03), len(opponent04), len(opponent05), len(opponent06), len(opponent07), len(opponent08)) - len(opponent06)\n padding8 = \"\".ljust(string_size8, '-')\n string_size9 = max(len(opponent03), len(opponent04), len(opponent05), len(opponent06), len(opponent07), len(opponent08)) - len(opponent07)\n padding9 = \"\".ljust(string_size9, '-')\n string_size10 = max(len(opponent03), len(opponent04), len(opponent05), len(opponent06), len(opponent07), len(opponent08)) - len(opponent08)\n padding10 = \"\".ljust(string_size10, '-')\n string_size11 = len(padding4+opponent08+padding10)\n padding11 = \"\".ljust(string_size11, ' ')\n string_size12 = len(padding4+opponent08+padding10) - len(opponent02+padding2) - 1\n padding12 = \"\".ljust(string_size12, ' ')\n string_size13 = max(len(opponent09), len(opponent10)) - len(opponent09)\n padding13 = \"\".ljust(string_size13, '-')\n string_size14 = max(len(opponent09), len(opponent10)) - len(opponent10)\n padding14 = \"\".ljust(string_size14, '-')\n string_size15 = len(opponent09+padding13) + 3\n padding15 = \"\".ljust(string_size15, ' ')\n string_size16 = len(opponent02+padding2+\"|\"+padding12+\"|---\"+opponent09+padding13)\n padding16 = \"\".ljust(string_size16, ' ')\n string_size17 = len(\"---\"+opponent12)\n padding17 = \"\".ljust(string_size17, ' ')\n string_size18 = len(\"---\"+opponent12) + len(\"---\"+opponent10+padding14) + 1\n padding18 = \"\".ljust(string_size18, ' ')\n string_size19 = len(padding4+opponent06+padding8+\"|\"+padding18)\n padding19 = \"\".ljust(string_size19, ' ')\n string_size20 = len(padding18) - len(opponent11) - 3\n padding20 = \"\".ljust(string_size20, '-')\n\n display += opponent01+padding1+\"|\"+\"\\n\"\n display += padding3+\"|---\"+opponent03+padding5+\"|\"+\"\\n\"\n display += opponent02+padding2+\"|\"+padding12+\"|---\"+opponent09+padding13+\"|\"+\"\\n\"\n display += padding4+opponent04+padding6+\"|\"+padding15+\"|\"+\"\\n\"\n display += padding16+\"|---\"+opponent12+\"|\"+\"\\n\"\n display += padding4+opponent05+padding7+\"|\"+padding15+\"|\"+padding17+\"|\"+\"\\n\"\n display += padding11+\"|---\"+opponent10+padding14+\"|\"+padding17+\"|\"+\"\\n\"\n display += padding4+opponent06+padding8+\"|\"+padding18+\"|---\"+winner+\"\\n\"\n display += padding19+\"|\"+\"\\n\"\n display += padding4+opponent07+padding9+\"|\"+padding18+\"|\"+\"\\n\"\n display += padding11+\"|---\"+opponent11+padding20+\"|\"+\"\\n\"\n display += padding4+opponent08+padding10+\"|\"+\"\\n\"\n\n # Game with 8 players\n if cases == 8:\n opponent01 = self.all_opponents[0][0][0]\n opponent02 = self.all_opponents[0][0][1]\n opponent03 = self.all_opponents[0][1][0]\n opponent04 = self.all_opponents[0][1][1]\n opponent05 = self.all_opponents[0][2][0]\n opponent06 = self.all_opponents[0][2][1]\n opponent07 = self.all_opponents[0][3][0]\n opponent08 = self.all_opponents[0][3][1]\n if len(self.all_opponents) == 1:\n if len(self.winner_list_temp) >= 1:\n opponent09 = self.winner_list_temp[0]\n if len(self.winner_list_temp) >= 2:\n opponent10 = self.winner_list_temp[1]\n if len(self.winner_list_temp) >= 3:\n opponent11 = self.winner_list_temp[2]\n if len(self.all_opponents) >= 2:\n opponent09 = self.all_opponents[1][0][0]\n opponent10 = self.all_opponents[1][0][1]\n opponent11 = self.all_opponents[1][1][0]\n opponent12 = self.all_opponents[1][1][1]\n if len(self.winner_list_temp) >= 1:\n opponent13 = self.winner_list_temp[0]\n if len(self.all_opponents) >= 3:\n opponent13 = self.all_opponents[2][0][0]\n opponent14 = self.all_opponents[2][0][1]\n if len(self.winner_list) == 3:\n winner = self.winner_list[2][0]\n\n first_game = [opponent01, opponent02, opponent03, opponent04, opponent05, opponent06, opponent07, opponent08]\n max_string = len(max(first_game, key=len))\n string_size1 = len(opponent01)\n padding1 = \"\".ljust(int(max_string-string_size1), '-')\n string_size2 = len(opponent02)\n padding2 = \"\".ljust(int(max_string-string_size2), '-')\n string_size3 = len(opponent03)\n padding3 = \"\".ljust(int(max_string-string_size3), '-')\n string_size4 = len(opponent04)\n padding4 = \"\".ljust(int(max_string-string_size4), '-')\n string_size5 = len(opponent05)\n padding5 = \"\".ljust(int(max_string-string_size5), '-')\n string_size6 = len(opponent06)\n padding6 = \"\".ljust(int(max_string-string_size6), '-')\n string_size7 = len(opponent07)\n padding7 = \"\".ljust(int(max_string-string_size7), '-')\n string_size8 = len(opponent08)\n padding8 = \"\".ljust(int(max_string-string_size8), '-')\n string_size9 = len(opponent01+padding1)\n padding9 = \"\".ljust(string_size9, ' ')\n string_size10 = max(len(opponent09), len(opponent10), len(opponent11), len(opponent12)) - len(opponent09)\n padding10 = \"\".ljust(string_size10, '-')\n string_size11 = max(len(opponent09), len(opponent10), len(opponent11), len(opponent12)) - len(opponent10)\n padding11 = \"\".ljust(string_size11, '-')\n string_size12 = max(len(opponent09), len(opponent10), len(opponent11), len(opponent12)) - len(opponent11)\n padding12 = \"\".ljust(string_size12, '-')\n string_size13 = max(len(opponent09), len(opponent10), len(opponent11), len(opponent12)) - len(opponent12)\n padding13 = \"\".ljust(string_size13, '-')\n string_size14 = len(padding9+\"|---\"+opponent09+padding10) - len(opponent02+padding2) - 1\n padding14 = \"\".ljust(string_size14, ' ')\n string_size15 = len(padding9+\"|---\"+opponent09+padding10)\n padding15 = \"\".ljust(string_size15, ' ')\n string_size16 = max(len(opponent13), len(opponent14)) - len(opponent13)\n padding16 = \"\".ljust(string_size16, '-')\n string_size17 = max(len(opponent13), len(opponent14)) - len(opponent14)\n padding17 = \"\".ljust(string_size17, '-')\n string_size18 = len(opponent14+padding17) + 3\n padding18 = \"\".ljust(string_size18, ' ')\n string_size19 = len(padding15+\"|---\"+opponent14+padding17) - len(opponent08+padding8) - 1\n padding19 = \"\".ljust(string_size19, ' ')\n string_size20 = len(padding15+\"|---\"+opponent14+padding17)\n padding20 = \"\".ljust(string_size20, ' ')\n\n display += opponent01+padding1+\"|\"+\"\\n\"\n display += padding9+\"|---\"+opponent09+padding10+\"|\"+\"\\n\"\n display += opponent02+padding2+\"|\"+padding14+\"|\"+\"\\n\"\n display += padding15+\"|---\"+opponent13+padding16+\"|\"+\"\\n\"\n display += opponent03+padding3+\"|\"+padding14+\"|\"+padding18+\"|\"+\"\\n\"\n display += padding9+\"|---\"+opponent10+padding11+\"|\"+padding18+\"|\"+\"\\n\"\n display += opponent04+padding4+\"|\"+padding19+\"|\"+\"\\n\"\n display += padding20+\"|---\"+winner+\"\\n\"\n display += opponent05+padding5+\"|\"+padding19+\"|\"+\"\\n\"\n display += padding9+\"|---\"+opponent11+padding12+\"|\"+padding18+\"|\"+\"\\n\"\n display += opponent06+padding6+\"|\"+padding14+\"|\"+padding18+\"|\"+\"\\n\"\n display += padding15+\"|---\"+opponent14+padding17+\"|\"+\"\\n\"\n display += opponent07+padding7+\"|\"+padding14+\"|\"+\"\\n\"\n display += padding9+\"|---\"+opponent12+padding13+\"|\"+\"\\n\"\n display += opponent08+padding8+\"|\"+\"\\n\"\n\n return display", "def get_score(self, player):\n\n df = pd.read_csv('RPSscores.csv')\n if not str(player) in df['Name'].to_dict().values():\n df.loc[len(df.index)] = [str(player),\n 0, 0, 0]\n player_index = int(df.loc[df['Name'] == str(player)].index[0])\n result = 'wins: ' + str(df.iloc[player_index, 1]) + '\\n' + \\\n 'draws: ' + str(df.iloc[player_index, 2]) + '\\n' + \\\n 'losses: ' + str(df.iloc[player_index, 3])\n return result", "def playerStandings():\n db_conn = connect()\n db_cursor = db_conn.cursor()\n db_cursor.execute(\"select player_id, player_name, wins, (wins + losses) as total_played from normalized_wins_and_losses order by wins desc, total_played desc;\")\n player_standings = db_cursor.fetchall()\n db_conn.commit()\n db_conn.close()\n return player_standings", "def score_display():\n data = score_reader()\n for index, datum in enumerate(data):\n score_format = \"%s ...... %s/%s\" % (datum[0].capitalize(), datum[1], datum[2])\n print(score_format)", "async def cringo_scoreboard(players: List[CringoPlayer], cursed: bool = False, game_finished: bool = False) -> str:\n\n scoreboard_rows = []\n for player in players:\n coin_display = 'zero' if cursed else player.winnings # y'all dumb motherfuckers want a rounding error?\n if game_finished:\n row = f'{player.user} · **{player.score}** points · **{coin_display}** coin'\n else:\n row = f'{player.user} · **{player.score}** points'\n\n scoreboard_rows.append(row)\n\n return '\\n'.join(scoreboard_rows)", "def print_leaderboard(self):\n \n leaderboard = pandas.DataFrame(self.history_score.items(), columns=[\"Name\", \"Score\"])\n leaderboard.index += 1\n \n print(leaderboard)", "def __show_scoreboard(self):\n self.clear_screen()\n\n print('\\n' * 2, end=\"\")\n for line in self.__fame:\n print((\" \" * 5) + line, end=\"\")\n print('\\n' * 2, end=\"\")\n\n with open(\"mastermind/assets/scores.json\", \"r\") as data:\n board = list(load(data).items())\n\n space = \" \" * 11\n print(f\"{space}RANK {'PLAYER':<30}\" +\n f\"{'TIME':>7} (seconds){'POINTS':>29}\\n\")\n\n lines_printed = 0\n for idx, entry in enumerate(board[:10]):\n lines_printed += 1\n space = \" \" * 10\n n = idx + 1\n year, month, day, time = entry[0].split(\" \")\n points = entry[1][\"points\"]\n playtime = entry[1][\"playtime\"]\n player = entry[1][\"player\"]\n\n print(f\"{space}{n:>4}. {player:<30}\" +\n f\"{playtime:>7,.2f}{points:>36}/15\")\n\n lines = \"\\n\" * (12 - lines_printed)\n print(f\"{lines}{space}\", end=\"\")\n sleep(.25)\n self.cool_print(\"Press ENTER to return to player menu.\",\n newline=False, margin=0)\n input()", "def _game_over(self):\n\n # Get the players and create the leaderboard tuple\n leaderboard = ((player.get_name(), player.get_total_score(), player.get_total_rolls())\n for player in self._players.get_players())\n\n print(\"\\nLEADERBOARD\\n\")\n # Print leaderboard header border\n print(\"+-{:<32}-+-{:>10}-+-{:>10}-+\".format(\"-\"*32, \"-\"*10, \"-\"*10))\n # Print the leaderboard header\n print(\"| {:<32} | {:>10} | {:>10} |\".format(\n 'Player', 'Score', '# of Rolls'))\n # Sort by highest scores first and print the details\n for player in sorted(leaderboard,\n key=lambda player: (player[1]),\n reverse=True):\n # Print the cell separators\n print(\"|-{:<32}-+-{:>10}-+-{:>10}-|\".format(\"-\"*32, \"-\"*10, \"-\"*10))\n # Print the player's details\n print(\"| {:<32} | {:>10} | {:>10} |\".format(\n player[0], player[1], player[2]))\n\n # Print leaderboard footer border\n print(\"+-{:<32}-+-{:>10}-+-{:>10}-+\".format(\"-\"*32, \"-\"*10, \"-\"*10))" ]
[ "0.72212094", "0.700178", "0.6971123", "0.69110477", "0.6735857", "0.6676208", "0.6612254", "0.659147", "0.65792733", "0.65697765", "0.6559172", "0.6509007", "0.6461611", "0.6422059", "0.6406507", "0.639278", "0.6347347", "0.63014215", "0.62833303", "0.6267375", "0.6261915", "0.61908567", "0.6174388", "0.61714166", "0.6160271", "0.6114266", "0.61118513", "0.60992235", "0.6096551", "0.6084356" ]
0.7885591
0
Helper function to perform a turn when a button is clicked. This function makes the referee run the next turn of the game. This function also updates the labels of the button that was pressed to tell the user if a round is currently being ran or not. This function also sets the new scores in the scores box after the turn is taken.
def do_next_turn(self, button): button.set_label(RUNNING_TURN_MSG) self.referee.run_turn() self.scores.get_buffer().set_text(self.get_current_scores_buffer()) button.set_label(RUN_NEXT_TURN_MSG) self.darea.queue_draw()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_next_turn_click(self, button):\n if self.referee.is_game_over():\n Gtk.main_quit()\n else:\n self.do_next_turn(button)\n # if the game is over after this turn, we will shutdown on the next click,\n # so visually alert the player with the button label\n if self.referee.is_game_over():\n button.set_label(GAME_OVER_MSG)", "def on_click(button):\n global ttt, choices, count, sym, result, x_pos, o_pos\n\n if count % 2 == 0:\n sym = \"X\"\n else:\n sym = \"O\"\n count += 1\n\n button.config(\n text=sym,\n state='disabled',\n disabledforeground=\"red\") # For cross\n\n x, y = get_coordinates(button)\n x += 1\n y += 1\n x_pos.append((x, y))\n state = gen_state(to_move='O', x_positions=x_pos,\n o_positions=o_pos)\n try:\n choice = choices.get()\n if \"Random\" in choice:\n a, b = random_player(ttt, state)\n elif \"Pro\" in choice:\n a, b = minimax_decision(state, ttt)\n else:\n a, b = alphabeta_player(ttt, state)\n except (ValueError, IndexError, TypeError) as e:\n disable_game()\n result.set(\"It's a draw :|\")\n return\n if 1 <= a <= 3 and 1 <= b <= 3:\n o_pos.append((a, b))\n button_to_change = get_button(a - 1, b - 1)\n if count % 2 == 0: # Used again, will become handy when user is given the choice of turn.\n sym = \"X\"\n else:\n sym = \"O\"\n count += 1\n\n if check_victory(button):\n result.set(\"You win :)\")\n disable_game()\n else:\n button_to_change.config(text=sym, state='disabled',\n disabledforeground=\"black\")\n if check_victory(button_to_change):\n result.set(\"You lose :(\")\n disable_game()", "def play_round(self):\r\n your_move = self.you.move()\r\n opposite_move = self.opposite.move()\r\n result = Game.what_move(your_move, opposite_move)\r\n\r\n self.you.learn(opposite_move)\r\n self.opposite.learn(your_move)\r\n\r\n print(\"you choose:\" + your_move + \" and the opposite player choose:\" +\r\n opposite_move)\r\n\r\n if result == 1:\r\n self.you.score += 1\r\n print('=> you won this round!\\n')\r\n elif result == 2:\r\n self.opposite.score += 1\r\n print('=> the opposite pleyer won this round!\\n')\r\n elif result == 0:\r\n print('=> it is Draw!\\n')", "def changeTurn(self):\r\n # Undoes clicked pieces and removes potential moves from the bar area\r\n for point in self.points:\r\n if point.isClicked():\r\n point.undoClick()\r\n if not self.bar[self.currentPlayer].isEmpty():\r\n self.undoPossibleBarMoves()\r\n \r\n # Changes by turn changing the currentPlayer index and updating all the \r\n # necessary board objects\r\n self.currentPlayer = (self.currentPlayer + 1) % 2\r\n self.dice.makeActive()\r\n for point in self.points:\r\n point.setActiveTurn()\r\n for bar in self.bar:\r\n bar.update()\r\n bar.setActiveTurn()\r\n self.message.resetText('It\\'s ' + self.getCurrentString() + '\\'s turn!', self.surface)\r\n self.turnchanger.setFillColor(self.getTurn())\r\n self.turnchanger.draw(self.surface)\r\n pygame.display.flip()", "def draw_round_result(result: str):\r\n if result == \"Win\":\r\n arcade.draw_text(\r\n \"YOU HAVE WON THIS ROUND!!!\", 10, WINDOW_HEIGHT / 2, arcade.color.WHITE, 30\r\n )\r\n arcade.draw_text(\r\n \"Pressing button 1, 2, or 3 will automatically start the next round\",\r\n 0,\r\n 3 / 8 * WINDOW_HEIGHT,\r\n arcade.color.WHITE,\r\n 15,\r\n )\r\n elif result == \"Did not win\":\r\n arcade.draw_text(\r\n \"You have not won this round. \\nPressing button 1, 2, or 3 will automatically start the next round\",\r\n 0,\r\n WINDOW_HEIGHT / 2,\r\n arcade.color.WHITE,\r\n 15,\r\n )", "def turn(self, player):\n player.turn_status = 1\n print 'It is {}\\'s turn.'.format(player.name)\n while player.turn_status == 1 and player.totscore < 100:\n roll = self.die.roll()\n if roll == 1:\n print ('Sorry {}! You rolled a 1 and forfeit all '\n 'points this turn. Your total score is {}. Pass die '\n 'to next player.').format(player.name, player.totscore)\n player.turnscore = 0\n self.next_player()\n else:\n print '{} rolled a {}.'.format(player.name, roll)\n player.turnscore += roll\n print ('Your current point total '\n 'for this turn is {}. Your total '\n 'score is {}').format(player.turnscore, player.totscore)\n self.turn_choice(player)\n print ('{} score is {} and'\n 'has won the game!').format(player.name, player.totscore)", "def button_press(self, value):\r\n\r\n self.reset_hands()\r\n\r\n self.increment_round(self.try_again)\r\n\r\n self.player_hand(value)\r\n\r\n self.opponent_hand()\r\n\r\n self.compare_hands()", "def switchTurn(self):\n\n # Widget for player 1\n if self.frame1.state() == 'normal':\n self.frame2.deiconify()\n self.frame1.withdraw()\n self.frame1.update()\n self.frame2.update()\n if self.message[0]:\n showDialogBox(self.message[0]) # announce\n self.message[0] = None\n game2.canvas.tag_bind('square', '<Button-1>', game2.fire)\n\n # Widget for player 2\n else:\n self.frame1.deiconify()\n self.frame2.withdraw()\n self.frame1.update()\n self.frame2.update()\n if game2.isComputer == 1:\n self.frame1.after(500)\n game1.computer_fire()\n else:\n if self.message[1]:\n showDialogBox(self.message[1]) # announce\n self.message[1] = None\n game1.canvas.tag_bind('square', '<Button-1>', game1.fire)", "def win(self):\n self.score += 1\n self.ids['score'].text = 'SCORE: ' + str(self.score)", "def run_game(self):\n game = Poker()\n AI_win = game.play_round(self.name)\n self.update_scores(AI_win)\n message = 'Would you like to play another round? Y(es) or N(o): '\n answer = InputHandler.input_bool(message)\n if answer:\n self.run_game()", "def run_turn(self):\n\n # Here is where you'll want to code your AI.\n\n # We've provided sample code that:\n # 1) prints the board to the console\n # 2) prints the opponent's last move to the console\n # 3) prints how much time remaining this AI has to calculate moves\n # 4) makes a random (and probably invalid) move.\n\n # 1) print the board to the console\n self.print_current_board()\n\n # 2) print the opponent's last move to the console\n if len(self.game.moves) > 0:\n print(\"Opponent's Last Move: '\" + self.game.moves[-1].san + \"'\")\n\n # 3) print how much time remaining this AI has to calculate moves\n print(\"Time Remaining: \" + str(self.player.time_remaining) + \" ns\")\n\n # 4) make a move\n (piece_index, move_index) = self.tlabiddl_minimax()\n\n # flip board indicies if playing from other side\n if self.player.color == \"Black\":\n piece_index = 119 - piece_index\n move_index = 119 - move_index\n\n # convert indices to SAN\n piece_pos = square_san(piece_index)\n move_pos = square_san(move_index)\n piece = self.get_game_piece(piece_pos.rank, piece_pos.file)\n piece.move(move_pos.file, move_pos.rank, promotionType=\"Queen\")\n\n return True # to signify we are done with our turn.", "def take_turn(self):\r\n self._choose_best_option()\r\n self._do_draw()", "def initialize_game(self):\r\n self.__turn = 0\r\n self.__Button_ls = []\r\n self.__koordinaatisto_ls = []\r\n for i in range(KOKO):\r\n self.__Button_ls.append([None] * KOKO)\r\n self.__koordinaatisto_ls.append([None] * KOKO)\r\n # Luodaan nappeja, näyttää napit GUI ikkunalle ja tallennetaan listaan.\r\n # Luodaan samalla koordinaatistot matriisina, myöhämmin tunnistamista varten\r\n # y=rivi, x=column\r\n for y in range(KOKO):\r\n for x in range(KOKO):\r\n new_button = Button(self.__mainw, text=\" \",height = 1, width = 3,\r\n command=lambda x=x,y=y : self.press_button(y,x)) # command\r\n\r\n self.__Button_ls[y][x] = new_button\r\n new_button.grid(row=y, column=x)\r\n\r\n self.__koordinaatisto_ls[y][x] = True\r\n\r\n #Yleinen huomautus näytölle\r\n self.__info=Label(self.__mainw,text=\"Replay can be pressed since somebody wins\")\r\n self.__info.grid(row=0, column=KOKO, sticky=EW)\r\n\r\n #Tämä Label käytetään pelin vuoroa ja ilmoitusta varten\r\n self.__info_Label = Label(self.__mainw)\r\n self.__info_Label.grid(row=1, column=KOKO, sticky=EW)\r\n\r\n #Tämä button käytetään pelin nollamista varten\r\n self.__initialize_button = Button(self.__mainw, relief=\"raised\", text=\"REPLAY\",\r\n command=self.initialize_game) # command\r\n self.__initialize_button.grid(row=KOKO - 1, column=KOKO, sticky=EW+S)\r\n self.__initialize_button[\"state\"]=DISABLED\r\n\r\n self.info_update()", "def update_turn(self):\n pass", "def button_clicked(self, button, button_idx):\n if self.turn % 2 == 0:\n if self.board[button_idx-1] == 0:\n self.place_move_x(button, button_idx-1)\n gameOver = self.check_x_won()\n if self.player_mode == 'pvc' and gameOver is None:\n self.play_cpu()\n else:\n if self.player_mode == 'pvp':\n if self.board[button_idx-1] == 0:\n self.place_move_o(button, button_idx-1)\n\n self.check_game()\n self.player_highlight()", "def play_turn(self, cur_board):\n pass", "def click(event):\r\n global score, targets_left, have_friend_param\r\n flag = 0\r\n mult = event.button\r\n for i in range(num_of_balls + 1):\r\n if balls_pool[i][6] > 0 and (event.pos[0] - balls_pool[i][0]) ** 2 + (event.pos[1] - balls_pool[i][1]) ** 2 <= \\\r\n balls_pool[i][2] ** 2:\r\n if i == 0:\r\n score += mult * max_rad * (1 + have_friend_param)\r\n screen.fill(YELLOW)\r\n else:\r\n score += mult * (max_rad + min_rad - balls_pool[i][2]) * (1 + have_friend_param)\r\n balls_pool[i][6] -= 1 * mult\r\n if balls_pool[i][6] <= 0:\r\n targets_left -= 1\r\n flag = 1\r\n\r\n if not flag:\r\n score -= mult * (max_rad + min_rad) // 10", "def play(self, event):\n if self.num_clicks == 1:\n self.clickable(event)\n if len(self.canvas.find_withtag(\"selected\")) == 2:\n self.num_of_tries += 1\n print(f'Number of tries {self.num_of_tries}')\n if self.num_of_tries > 13:\n self.score -= 10\n self.score_label.config(text=f'Score: {self.score}')\n self.check_match(self.click_tiles)\n self.canvas.after(self.delay, self.flip_back)\n self.click_tiles.clear()\n self.num_clicks = 0\n else:\n self.clickable(event)", "def main(self, win):\n\n # The rock, paper, scissor buttons\n rockButton = Button(white, 50, 400, 100, 50, 'ROCK')\n paperButton = Button(white, 200, 400, 100, 50, 'PAPER')\n scissorButton = Button(white, 350, 400, 100, 50, 'SCISSOR')\n\n # Player and computer scores\n player = 0\n computer = 0\n\n run = True\n while run:\n userChoice = 'none'\n compChoice = 'none'\n beginGame = False\n for event in pygame.event.get():\n pos = pygame.mouse.get_pos()\n if event.type == pygame.QUIT:\n run = False\n\n # Control mouse button events\n if event.type == pygame.MOUSEBUTTONDOWN:\n if rockButton.isOver(pos):\n userChoice = 'rock'\n compChoice = self.computer_generate()\n beginGame = True\n elif paperButton.isOver(pos):\n userChoice = 'paper'\n compChoice = self.computer_generate()\n beginGame = True\n elif scissorButton.isOver(pos):\n compChoice = self.computer_generate()\n userChoice = 'scissor'\n beginGame = True\n\n self.display_score(win, player, computer)\n self.display_playground(win, rockButton, paperButton, scissorButton)\n\n if beginGame:\n self.game_initiate(win)\n\n self.display_player(userChoice, win)\n self.display_computer(compChoice, win)\n\n if beginGame:\n scores = self.decide_winner(userChoice, compChoice)\n pygame.display.update()\n pygame.time.delay(1000)\n player += scores[0]\n computer += scores[1]\n\n pygame.display.update()\n pygame.time.delay(40)", "def on_turn(self, turn_state):\n game_state = gamelib.GameState(self.config, turn_state)\n #gamelib.debug_write('Performing turn {} of your custom algo strategy'.format(game_state.turn_number))\n #game_state.suppress_warnings(True) #Uncomment this line to suppress warnings.\n\n self.starter_strategy(game_state)\n\n game_state.submit_turn()", "def main() -> None:\n # the current game is initialized with 1, 3, 5, 7 matches on the 4 rows.\n game: List[int] = [1, 3, 5, 7]\n\n print(\"\\nGame of Nim\")\n print( \"===========\")\n display_game(game)\n start = input(\"Do you want to start? (y/n) \")\n print()\n if start==\"y\" or start==\"Y\":\n print(\"Your turn\")\n user_turn(game)\n display_game(game)\n while True:\n print(\"My turn\")\n computer_turn(game)\n display_game(game)\n if is_finished(game):\n print(\"I WON\\n\")\n break\n print(\"Your turn\")\n user_turn(game)\n display_game(game)\n if is_finished(game):\n print(\"YOU WON\\n\")\n break", "def play(self):\n \n while True:\n self.print_board()\n self.display_board()\n winner = self.is_game_won()\n if winner or self.is_filled():\n break\n \n if self.turn == _PLAYER:\n col = self.human_turn()\n else:\n col = self.ai_turn()\n\n row = self.get_row_for_col(col)\n self.board[7 * row + col] = self.turn\n self.last_play_rc = row, col\n\n if self.debug:\n print(\"position scores:\",\n \"player=\", score_position(self.board, _PLAYER),\n \"ai=\", score_position(self.board, _AI))\n \n self.turn = _AI if self.turn == _PLAYER else _PLAYER\n \n if winner == 0:\n msg = \"Tie!\"\n elif winner == 1:\n msg = \"You win!\"\n else:\n msg = \"I win!\"\n \n oled.text(msg, 64, 30)\n oled.show()\n print(\"\\n\" + msg + \"\\n\")\n \n if winner == 0 or winner == 1:\n if self.plies == 3:\n print(\"\"\"\n(Of course, you did set me to easy mode, which I feel compelled to mention.)\n\"\"\")\n print(\"\"\"\n\nThere are some interesting things to learn about ConnectFour:\n\n {url}\n\nTo move ahead:\n\n >>> import sensors\n >>> sensors.start()\n\n\"\"\".format(url=url(\"connectfour\")))\n\n else:\n print(\"\"\"\nWow. You were beat by a $4 computer--using only one of my processors (!!).\nTo get the code to move ahead, you'll need to at least tie me.\n\nTo play again, make a new instance of the ConnectFour class. You can choose\ndifferent options than the defaults:\n\n connectfour.ConnectFour(plies, start_player, serial_input, debug)\n - plies [5]: moves to look ahead (3-6, where 3 is easy and 6 is slow and hard\n - start_player [0]: 0 for random, 1 for you, 2 for me\n - serial_input [False]: Enter moves w/keyboard in terminal instead of knob\n - debug [False]: Show information about current AI evaluation scores\n\nFor example:\n\n >>> g = ConnectFour(plies=4, start_player=1)\n >>> g.play()\n\n\"\"\")", "def play_round(self):\r\n move1 = self.p1.move()\r\n move2 = self.p2.move()\r\n # Checks if User Wants to Quit Game:\r\n if move1 == \"quit\" or move2 == \"quit\":\r\n self.game_over(True)\r\n print(f\"Player One: {move1.upper()}\\nPlayer Two: {move2.upper()}\")\r\n self.keep_score(move1, move2)\r\n self.p1.learn(move1, move2)\r\n self.p2.learn(move2, move1)", "def button(self):\r\n t = turtle.Turtle()\r\n t.hideturtle()\r\n t.speed(20)\r\n t.penup()\r\n t.color(\"black\")\r\n # Draws one of the squares behind the \"scoreboard\"\r\n t.goto(70, 41)\r\n t.pendown()\r\n t.begin_fill()\r\n for i in range(4):\r\n t.forward(100)\r\n t.left(90)\r\n t.end_fill()\r\n t.penup()\r\n t.goto(70, 139)\r\n # Draws one of the squares over a button up arrow\r\n t.color(\"#20b2aa\") # Turns the color to teal\r\n t.pendown()\r\n t.begin_fill()\r\n for y in range(4):\r\n t.forward(100)\r\n t.left(90)\r\n t.end_fill()\r\n t.penup()\r\n\r\n t.goto(190, 40)\r\n # Draws another one of the square around the enter button\r\n t.color(\"#20b2aa\") # Turns the color to teal\r\n t.pendown()\r\n t.begin_fill()\r\n for y in range(4):\r\n t.forward(100)\r\n t.left(90)\r\n t.end_fill()\r\n\r\n t.penup()\r\n t.goto(70, -59)\r\n t.color(\"#20b2aa\") # Turns the color to teal\r\n t.pendown()\r\n # Draws the box around the down button\r\n t.begin_fill()\r\n for y in range(4):\r\n t.forward(100)\r\n t.left(90)\r\n t.end_fill()\r\n # Draws the up arrow of the button\r\n t.penup()\r\n t.goto(70,143)\r\n t.pendown()\r\n t.color(\"#8b8378\") # Turns the color a light grey\r\n t.begin_fill()\r\n for y in range(3):\r\n t.pendown()\r\n t.forward(100)\r\n t.left(120)\r\n t.end_fill()\r\n # Draws the down arrow of the button\r\n t.penup()\r\n t.goto(70, 40)\r\n t.pendown()\r\n t.begin_fill()\r\n for y in range(3):\r\n t.forward(100)\r\n t.right(120)\r\n t.end_fill()\r\n # Draws scoreboard\r\n t.penup()\r\n t.goto(75, 136)\r\n t.color(\"white\")\r\n t.pendown()\r\n t.begin_fill()\r\n for y in range(4):\r\n t.forward(90)\r\n t.right(90)\r\n t.end_fill()\r\n t.color(\"black\")\r\n t.penup()\r\n t.goto(90,35)\r\n t.pendown()\r\n t.write(\"1\", font=(\"Arial\", 75, \"normal\") )\r\n t.color(\"#8b8378\") # Turns the color a light grey\r\n t.penup()\r\n # Draws the circle for the enter button and writes \"Enter\" on the button\r\n t.goto(240,50)\r\n t.begin_fill()\r\n t.circle(40)\r\n t.end_fill()\r\n t.penup()\r\n t.color(\"white\")\r\n t.goto(210,75)\r\n t.write(\"Enter\", font= (\"Arial\", 20, \"normal\"))\r\n t.color(\"white\")\r\n # Writes \"The Game of Nim\" at the bottom of the screen\r\n t.penup()\r\n t.goto(30, -140)\r\n t.pendown()\r\n t.write(\"The Game \", font=(\"Arial\", 40, \"normal\"))\r\n t.penup()\r\n t.goto(110, -185)\r\n t.write(\"of\", font = (\"Arial\", 40, \"normal\"))\r\n t.goto(70, -245)\r\n t.write(\"Nim\", font = (\"Arial\", 50, \"normal\"))", "def update_game(game, episode, buttons,run_status):\n\n game_round = game.round\n if game_round == 'newround':\n newround(game)\n print(game_round)\n game.round = 'preflop'\n print(game.round)\n\n\n return 'go'\n elif game_round == 'preflop':\n check = preflop(game, episode, buttons)\n if check == True:\n game.round = 'flop'\n return 'go'\n elif check == False:\n game.round = 'showdown'\n print(game.round)\n return 'go'\n elif check == 'no input':\n game.round = 'preflop'\n return 'stop'\n return 'stop'\n elif game_round == 'flop':\n check = flop(game, episode, buttons, run_status)\n if check == True:\n game.round = 'turn'\n return 'go'\n elif check == False:\n game.round = 'showdown'\n return 'go'\n elif check == 'no input':\n game.round = 'flop'\n return 'stop'\n return 'stop'\n pass\n elif game_round == 'turn':\n check = turn(game, episode, buttons, run_status)\n if check == True:\n game.round = 'river'\n return 'go'\n elif check == False:\n game.round = 'showdown'\n return 'go'\n elif check == 'no input':\n game.round = 'turn'\n return 'stop'\n return 'stop'\n elif game_round == 'river':\n check = turn(game, episode, buttons, run_status)\n if check == True:\n game.round = 'showdown'\n return 'go'\n elif check == False:\n game.round = 'showdown'\n return 'go'\n elif check == 'no input':\n game.round = 'river'\n return 'stop'\n return 'stop'\n elif game_round == 'showdown':\n showdown(game, episode)\n #game.player1.wager = 100\n #game.player2.wager = 50\n # game.update_tablepot()\n game.round = 'newround'\n return 'go'\n pass", "def switch_turn(self):\n if self.completed is not None:\n self.current_turn = self.creator if self.current_turn != self.creator else self.opponent\n self.save()", "def play_round(self):\n move1 = self.p1.move()\n move2 = self.p2.move()\n print(f\"P1: {move1} P2: {move2}\")\n self.p1.learn(move1, move2)\n self.p2.learn(move2, move1)\n \"\"\"Proneround_score and ptworound_score resets\n to 0 at beginning of every round.\"\"\"\n poneround_score = 0\n ptworound_score = 0\n if self.beats(move1, move2):\n print(\"Player 1 Wins This Round\")\n poneround_score = 1\n self.pone_score += 1\n elif self.beats(move2, move1):\n print(\"Player 2 Wins This Round\")\n ptworound_score = 1\n self.ptwo_score += 1\n else:\n print(\"Tie! No Points.\")\n print(f\"Round Points - P1: {poneround_score} | P2: {ptworound_score}\")", "def do_turn(self, player):\n \n # if the player is out, skip the turn\n if player.account.balance == 0:\n pass\n else:\n # look up method based on dreidel spin;\n # pass player as argument to method;\n # print the result message along with the turn number\n print 'Turn #%d: %s' % (self._turn_count,\n self.actions[player.spin()](player),)\n \n # if the player ran out of items, the player is out;\n # decrement the number of active players.\n if player.account.balance == 0:\n self.total_in -= 1\n print OUT_TEMPLATE % (player,)\n \n # show updated status of everyone's balance\n self.show_balances()\n print '-'*SEPARATOR_LENGTH\n \n # increment the number of turns\n self._turn_count += 1", "def on_turn(self, turn_state):\n game_state = gamelib.GameState(self.config, turn_state)\n gamelib.debug_write('Performing turn {} of your custom algo strategy'.format(game_state.turn_number))\n #game_state.suppress_warnings(True) #Uncomment this line to suppress warnings.\n\n self.starter_strategy(game_state)\n\n game_state.submit_turn()", "def make_move( self, square, button ):\n # Use a try/except in case the user clicks on an already occupied square.\n try:\n # Get the current player before attempting to make the move\n # because a successful move changes the current player.\n player = self.ttt.current_player\n\n # Attempt to make the move.\n self.ttt.make_move( square )\n\n # If the move was successful, update the button and status label.\n button.setText( player )\n if self.ttt.winner() in \"XO\":\n self.gui.status_label.setText( \"Player {} wins!\".format( self.ttt.winner() ) )\n elif self.ttt.is_full():\n self.gui.status_label.setText( \"Tie game.\" )\n else:\n self.gui.status_label.setText( \"Player {}'s turn...\".format( self.ttt.current_player ) )\n\n except ( ValueError, RuntimeError ) as e:\n # The error is ignored and it will remain the same player's turn.\n pass" ]
[ "0.7048446", "0.6712457", "0.6562238", "0.6472978", "0.63358074", "0.62775713", "0.62745523", "0.62213975", "0.6115055", "0.61080164", "0.6100496", "0.6090055", "0.607604", "0.60638136", "0.60425466", "0.60403013", "0.60321194", "0.60312575", "0.6012323", "0.6010205", "0.5969966", "0.5966066", "0.5956443", "0.59526664", "0.5936538", "0.59333247", "0.592473", "0.592068", "0.59148157", "0.59115005" ]
0.823928
0
Callback function that runs when a button is clicked. This function will run the next turn of the game if the game is not over, or will shut down the program if the game is over. This function will also update the button label to display the game is over or not.
def on_next_turn_click(self, button): if self.referee.is_game_over(): Gtk.main_quit() else: self.do_next_turn(button) # if the game is over after this turn, we will shutdown on the next click, # so visually alert the player with the button label if self.referee.is_game_over(): button.set_label(GAME_OVER_MSG)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_next_turn(self, button):\n button.set_label(RUNNING_TURN_MSG)\n self.referee.run_turn()\n self.scores.get_buffer().set_text(self.get_current_scores_buffer())\n button.set_label(RUN_NEXT_TURN_MSG)\n self.darea.queue_draw()", "def on_click(button):\n global ttt, choices, count, sym, result, x_pos, o_pos\n\n if count % 2 == 0:\n sym = \"X\"\n else:\n sym = \"O\"\n count += 1\n\n button.config(\n text=sym,\n state='disabled',\n disabledforeground=\"red\") # For cross\n\n x, y = get_coordinates(button)\n x += 1\n y += 1\n x_pos.append((x, y))\n state = gen_state(to_move='O', x_positions=x_pos,\n o_positions=o_pos)\n try:\n choice = choices.get()\n if \"Random\" in choice:\n a, b = random_player(ttt, state)\n elif \"Pro\" in choice:\n a, b = minimax_decision(state, ttt)\n else:\n a, b = alphabeta_player(ttt, state)\n except (ValueError, IndexError, TypeError) as e:\n disable_game()\n result.set(\"It's a draw :|\")\n return\n if 1 <= a <= 3 and 1 <= b <= 3:\n o_pos.append((a, b))\n button_to_change = get_button(a - 1, b - 1)\n if count % 2 == 0: # Used again, will become handy when user is given the choice of turn.\n sym = \"X\"\n else:\n sym = \"O\"\n count += 1\n\n if check_victory(button):\n result.set(\"You win :)\")\n disable_game()\n else:\n button_to_change.config(text=sym, state='disabled',\n disabledforeground=\"black\")\n if check_victory(button_to_change):\n result.set(\"You lose :(\")\n disable_game()", "def on_click(self):\n arcade.play_sound(button, volume=constants.MUSIC_VOLUME / 40)\n\n global success\n global fails\n if success or fails == 20:\n reset_global_variables()\n self.minigame.window.show_view(self.minigame.main_view)\n else:\n self.minigame.window.show_view(self.minigame.main_view)\n print(f\"Exit Button.\")", "def start_game(cur_button):\r\n\r\n global time, game_on, wrd_lst\r\n initialize()\r\n time = 180\r\n game_on = True\r\n cmd_refresh()\r\n window.after(1000, minus_time(cur_button))\r\n cur_button.configure(text='stop')", "def GAMEOVER_LOOP():\n pass", "def again(self):\n pygame.display.update()\n clock.tick(15)\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n if event.type == pygame.MOUSEBUTTONDOWN:\n pygame.event.post(event)\n\n background()\n position = ((width / 2), (height / 3))\n text_display(\"Wygrana!!!\", 100, black, ((width / 2), (height / 5)))\n text_display(\"Czy chcesz zagrać ponownie?\", 70, black, position)\n mouse = pygame.mouse.get_pos()\n button_yes = Buttton(350, 250, 100, 50, green, \"tak\", 30)\n button_yes.show()\n button_yes.backlight(mouse)\n button_no = Buttton(350, 350, 100, 50, green, \"nie\", 30)\n button_no.show()\n button_no.backlight(mouse)\n\n if button_yes.is_clicked(mouse):\n self.board = Board()\n self.choose_level()\n\n pygame.display.update()\n clock.tick(15)\n\n if button_no.is_clicked(mouse):\n pygame.quit()\n quit()", "def car_go(argument):\n global buttonCanBePressed\n timer.reset()\n ledcargreen.value(1)\n ledpedred.value(1)\n buttonCanBePressed = True", "def button_clicked(self, button, button_idx):\n if self.turn % 2 == 0:\n if self.board[button_idx-1] == 0:\n self.place_move_x(button, button_idx-1)\n gameOver = self.check_x_won()\n if self.player_mode == 'pvc' and gameOver is None:\n self.play_cpu()\n else:\n if self.player_mode == 'pvp':\n if self.board[button_idx-1] == 0:\n self.place_move_o(button, button_idx-1)\n\n self.check_game()\n self.player_highlight()", "def buttonEventCallback(argument):\n global buttonCanBePressed\n if buttonCanBePressed is True:\n ledpedbutton.value(1)\n buttonCanBePressed = False\n start_new_thread(is_timer_4, tuple('0'))", "def cases_app_opening():\n run = True\n click = False\n clock = pygame.time.Clock()\n\n cooldown = 0\n\n # BUTTON INITIALIZATION\n B_INV_BACK = pygame.Rect(190, 205, 55, 55)\n B_INV_FORWARD = pygame.Rect(255, 205, 55, 55)\n B_TRIP_LABEL = pygame.Rect(110, 185, 280, 75)\n B_TRIP_CASE = pygame.Rect(155, 270, 190, 150)\n B_CLASS_LABEL = pygame.Rect(110, 445, 280, 75)\n B_CLASS_CASE = pygame.Rect(155, 530, 190, 150)\n B_BACK = pygame.Rect(20, 20, 60, 60)\n B_MENU = pygame.Rect(WIDTH - 65 - 20, 20, 60, 60)\n\n while run:\n pos_x, pos_y = pygame.mouse.get_pos()\n user_settings = return_user_settings()\n BG = (\n BG_CASEKY_VOLBA_L\n if user_settings[\"theme\"] == \"light\"\n else BG_CASEKY_VOLBA_D\n )\n FONT_COLOR = FONT_COLOR_L if user_settings[\"theme\"] == \"light\" else FONT_COLOR_D\n WIN.blit(BG, (0, 0))\n WIN.blit(BACK, (20, 20))\n WIN.blit(MENU, (WIDTH - 65 - 20, 20))\n\n # LABELS\n label_trip_col = MAIN_FONT.render(\"Trip Collection\", 1, FONT_COLOR)\n WIN.blit(label_trip_col, (WIDTH_H - 75, 205))\n label_class_col = MAIN_FONT.render(\"Class Collection\", 1, FONT_COLOR)\n WIN.blit(label_class_col, (WIDTH_H - 75, 465))\n\n # Zistovanie, ci nebolo kliknute na textove pole\n if click and cooldown == 0:\n if B_BACK.collidepoint(pos_x, pos_y):\n run = False\n elif B_MENU.collidepoint(pos_x, pos_y):\n settings_menu()\n elif B_TRIP_CASE.collidepoint(pos_x, pos_y) or B_TRIP_LABEL.collidepoint(\n pos_x, pos_y\n ):\n case_opening_app(case_type=\"trip\")\n elif B_CLASS_CASE.collidepoint(pos_x, pos_y) or B_CLASS_LABEL.collidepoint(\n pos_x, pos_y\n ):\n case_opening_app(case_type=\"class\")\n cooldown = FPS // 3\n\n # Event handling\n events = pygame.event.get()\n for event in events:\n if event.type == pygame.QUIT:\n run = False\n if event.type == pygame.MOUSEBUTTONDOWN:\n if event.button == 1:\n click = True\n if event.type == pygame.MOUSEBUTTONUP:\n if event.button == 1:\n click = False\n if cooldown != 0:\n cooldown -= 1\n\n pygame.display.update()\n clock.tick(FPS)", "def start_menu():\r\n score = 0\r\n\r\n while True:\r\n pg.display.set_caption(\"'METEOR BLASTER - 'The Dark Souls of Arcade Games'\")\r\n pg.key.set_repeat(1, 1)\r\n\r\n bg_image = pg.image.load(\"images/meteor shower load game.png\").convert()\r\n bg_image = pg.transform.scale(bg_image, screen.get_size())\r\n screen.blit((bg_image), (0, 0))\r\n\r\n # defines all the buttons on the screen\r\n play_button = sprites.button(\r\n s.play_pos, (200, 50), \"images/play_button.png\", \"images/hl_play_button.png\"\r\n )\r\n one_player_button = sprites.button(\r\n s.one_player_pos,\r\n (130, 50),\r\n \"images/one_player_button.png\",\r\n \"images/hl_op_button.png\",\r\n )\r\n two_player_button = sprites.button(\r\n s.two_player_pos,\r\n (130, 50),\r\n \"images/two_player_button.png\",\r\n \"images/hl_tp_button.png\",\r\n )\r\n load_game_button = sprites.button(\r\n s.load_game_pos,\r\n (130, 50),\r\n \"images/load_game_button.png\",\r\n \"images/hl_lg_button.png\",\r\n )\r\n\r\n # group to hold all the buttons\r\n button_group = pg.sprite.Group()\r\n button_group.add(\r\n play_button, one_player_button, two_player_button, load_game_button\r\n )\r\n\r\n pg.display.update()\r\n end = False\r\n game_loaded = False\r\n\r\n # loops until a mode is selected\r\n while not (end):\r\n\r\n # highlights buttons when the mouse hovers over them\r\n for button in button_group:\r\n button.highlight()\r\n\r\n for event in pg.event.get():\r\n\r\n # if player clicks\r\n if event.type == pg.MOUSEBUTTONDOWN:\r\n x, y = pg.mouse.get_pos()\r\n\r\n # if one player mode is clicked\r\n if one_player_button.rect.collidepoint(x, y):\r\n s.two_player = False\r\n one_player_button.highlight()\r\n end = True\r\n\r\n # if two player mode is clicked\r\n if two_player_button.rect.collidepoint(x, y):\r\n s.two_player = True\r\n two_player_button.highlight()\r\n end = True\r\n\r\n # if load game button is clicked\r\n if load_game_button.rect.collidepoint(x, y):\r\n helpers.enter_name()\r\n score = helpers.load_game()\r\n end = True\r\n game_loaded = True\r\n\r\n pg.display.update()\r\n\r\n # loops until play button is clicked and the game begins\r\n while end:\r\n if game_loaded:\r\n end = False\r\n for event in pg.event.get():\r\n if event.type == pg.MOUSEBUTTONDOWN:\r\n x, y = pg.mouse.get_pos()\r\n if play_button.rect.collidepoint(x, y):\r\n end = False\r\n\r\n return score", "def main(self, win):\n\n # The rock, paper, scissor buttons\n rockButton = Button(white, 50, 400, 100, 50, 'ROCK')\n paperButton = Button(white, 200, 400, 100, 50, 'PAPER')\n scissorButton = Button(white, 350, 400, 100, 50, 'SCISSOR')\n\n # Player and computer scores\n player = 0\n computer = 0\n\n run = True\n while run:\n userChoice = 'none'\n compChoice = 'none'\n beginGame = False\n for event in pygame.event.get():\n pos = pygame.mouse.get_pos()\n if event.type == pygame.QUIT:\n run = False\n\n # Control mouse button events\n if event.type == pygame.MOUSEBUTTONDOWN:\n if rockButton.isOver(pos):\n userChoice = 'rock'\n compChoice = self.computer_generate()\n beginGame = True\n elif paperButton.isOver(pos):\n userChoice = 'paper'\n compChoice = self.computer_generate()\n beginGame = True\n elif scissorButton.isOver(pos):\n compChoice = self.computer_generate()\n userChoice = 'scissor'\n beginGame = True\n\n self.display_score(win, player, computer)\n self.display_playground(win, rockButton, paperButton, scissorButton)\n\n if beginGame:\n self.game_initiate(win)\n\n self.display_player(userChoice, win)\n self.display_computer(compChoice, win)\n\n if beginGame:\n scores = self.decide_winner(userChoice, compChoice)\n pygame.display.update()\n pygame.time.delay(1000)\n player += scores[0]\n computer += scores[1]\n\n pygame.display.update()\n pygame.time.delay(40)", "def handle_button(self, button):\n last_run = self.last_seen[button] if button in self.last_seen else 0\n diff = time.time() - last_run\n\n if diff <= 1:\n logging.warning(\"duplicate: %s, %d, %d\", button, last_run, diff)\n return\n\n try:\n cmd = buttons.COMMANDS[button]\n except KeyError:\n logging.warning(\"No instructions found for button %s.\", button)\n return\n\n self.last_seen[button] = time.time()\n\n try:\n function, music, zone = cmd\n except ValueError, ex:\n logging.warning(\"Couldn't parse instructions from %s: %s\", cmd, ex)\n return\n\n device = self.player.zone(zone)\n if not device:\n logging.warning(\"Can't find a device called %s\", zone)\n return\n\n # If this is the same button we saw last, pause or unpause it.\n if button == self.last_button:\n device.toggle()\n return\n\n if function == \"play_local\":\n self.play_local(music, device)\n self.last_button = button\n else:\n logging.warning(\"Don't know how to %s.\", cmd)", "def start_game():\n logger.info(\"Clicking play button\")\n mouseclick(coords_play_final_button[0], coords_play_final_button[1])", "def check(self,event=None):\n if self.kenken.checkit(self.movelist, self.counter): #Calls the checkit method of the KenKen object\n #If user wins the game, display congratulatory message and instructions.\n self.lbl2[\"text\"] = \"Congratulations, you finished the puzzle!\\nSelect Next Puzzle to go to another puzzle\" + \\\n \"\\nor Exit to quit the game.\"\n elif event:\n #If user has not won the game yet, but checks with the \"Win?\" button in GUI - show message to continue playing.\n self.lbl2[\"text\"] = \"This puzzle is not done. Keep trying!\"", "def update(delta_time):\n for event in coda.event.listing():\n if coda.event.quit_game(event):\n coda.stop()\n if coda.event.mouse_l_button_down(event):\n if MY.restart_button.collides_with_point(coda.event.mouse_position()):\n coda.state.change(0)", "def introduction():\r\n button_width = 290\r\n button_height = 60\r\n play_button = Button(-400, height/(1.5), button_width, button_height, beer, orange, 32, black, white, \"PLAY\")\r\n instructions_button = Button(width+150, height/(1.5)+button_height+10, button_width,button_height, beer, orange, 32, black, white, \"INSTRUCTIONS\")\r\n quit_button = Button(-400, height/(1.5)+button_height*2+20, button_width,button_height, beer, orange, 32, black, white, \"QUIT\")\r\n \r\n\r\n # To centre the button in the middle of the screen -> width/2 - button_width/2\r\n # To draw the buttons in an animated way.\r\n while play_button.x < width/2-button_width/2 or instructions_button.x > width/2-button_width/2:\r\n DISPLAY_SCREEN.blit(intro_img, (0, 0))\r\n\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n \r\n if play_button.x < width/2-button_width/2:\r\n play_button.x += 3\r\n quit_button.x += 3\r\n if instructions_button.x > width/2-button_width/2 : \r\n instructions_button.x -= 3\r\n\r\n play_button.blit(DISPLAY_SCREEN)\r\n instructions_button.blit(DISPLAY_SCREEN)\r\n quit_button.blit(DISPLAY_SCREEN)\r\n pygame.display.update()\r\n\r\n # The introductory screen\r\n run = True\r\n while run:\r\n DISPLAY_SCREEN.blit(intro_img, (0, 0))\r\n\r\n mouse_position = pygame.mouse.get_pos() # to get the position of the mouse\r\n for event in pygame.event.get(): # to manage/handle the events e.g clicking on a button \r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n if event.type == pygame.MOUSEBUTTONDOWN:\r\n if play_button.is_clicked(mouse_position, event):\r\n from game_play_window import start_game\r\n start_game()\r\n run = False\r\n elif instructions_button.is_clicked(mouse_position, event):\r\n # instructions()\r\n webbrowser.open_new(\"https://www.ultraboardgames.com/boggle/game-rules.php\")\r\n elif quit_button.is_clicked(mouse_position, event):\r\n pygame.quit()\r\n sys.exit()\r\n \r\n if play_button.is_hovered_over(mouse_position): # change the color of the button when the player/user hovers over it\r\n play_button.blit_hovered_over(DISPLAY_SCREEN)\r\n else:\r\n play_button.blit(DISPLAY_SCREEN, gray)\r\n if instructions_button.is_hovered_over(mouse_position):\r\n instructions_button.blit_hovered_over(DISPLAY_SCREEN)\r\n else:\r\n instructions_button.blit(DISPLAY_SCREEN, gray)\r\n if quit_button.is_hovered_over(mouse_position):\r\n quit_button.blit_hovered_over(DISPLAY_SCREEN)\r\n else:\r\n quit_button.blit(DISPLAY_SCREEN, gray)\r\n\r\n clock.tick(60)\r\n pygame.display.update()", "def events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.running = False\n self.sleep_time = 0\n return\n\n if event.type == pygame.MOUSEBUTTONUP:\n pos = pygame.mouse.get_pos()\n\n if self.button.collidepoint(pos):\n if self.state == \"solving\":\n self.state = \"stopping\"\n\n if self.state == \"solved\":\n self.state = \"waiting\"\n self.puzzle_state = \"solving\"\n self.button_text = \"Solve!\"\n self.board = self.original_board.copy()\n\n elif self.state == \"waiting\":\n self.state = \"solving\"\n self.button_text = \"Stop!\"\n self.button_color = BUTTON_COLOR_STOP\n\n isSolved = self.solve()\n\n self.button_color = BUTTON_COLOR_SOLVE\n if isSolved:\n self.state = \"solved\"\n self.button_text = \"Clear\"\n self.puzzle_state = \"solved\"\n else:\n if self.state == \"stopping\":\n self.state = \"waiting\"\n self.button_text = \"Solve!\"\n self.puzzle_state = \"solving\"\n else:\n self.state = \"solved\"\n self.button_text = \"Clear\"\n self.puzzle_state = \"failed\"", "def winner():\n winning_lbl_zero.grid(row=0, column=LEFT_COL, rowspan=80, columnspan=2, sticky=N) # Placing the winning image\n messagebox.showinfo(title=\"**** WINNER! ****\", message=\"CONGRATS!!\\n\"\n \"You figured out the word/phrase\\n\"\n \"before it was too late, clearly your\\n\"\n \"guessing skills are unfathomable\")\n\n play_again() # Finds out if they'd like to play again", "def Gameloop():", "def switchTurn(self):\n\n # Widget for player 1\n if self.frame1.state() == 'normal':\n self.frame2.deiconify()\n self.frame1.withdraw()\n self.frame1.update()\n self.frame2.update()\n if self.message[0]:\n showDialogBox(self.message[0]) # announce\n self.message[0] = None\n game2.canvas.tag_bind('square', '<Button-1>', game2.fire)\n\n # Widget for player 2\n else:\n self.frame1.deiconify()\n self.frame2.withdraw()\n self.frame1.update()\n self.frame2.update()\n if game2.isComputer == 1:\n self.frame1.after(500)\n game1.computer_fire()\n else:\n if self.message[1]:\n showDialogBox(self.message[1]) # announce\n self.message[1] = None\n game1.canvas.tag_bind('square', '<Button-1>', game1.fire)", "def check_progress(self):\n if self.check_snake():\n but = tk.Button(root, text=\"Again?\")\n but.pack()\n but.bind(\"<Button-1>\", lambda event: [but.destroy(),self._canvas.destroy(), main()])\n messagebox.showinfo(\"GAME OVER!\", \"Your snake crashed\")\n if self.check_win():\n but = tk.Button(root, text=\"New level\")\n but.pack()\n but.bind(\"<Button-1>\", lambda event: [but.destroy(), self._canvas.destroy(), main()])\n messagebox.showinfo(\"YOU WIN!\", \"All apples are safely collected\")", "def button(self):\r\n t = turtle.Turtle()\r\n t.hideturtle()\r\n t.speed(20)\r\n t.penup()\r\n t.color(\"black\")\r\n # Draws one of the squares behind the \"scoreboard\"\r\n t.goto(70, 41)\r\n t.pendown()\r\n t.begin_fill()\r\n for i in range(4):\r\n t.forward(100)\r\n t.left(90)\r\n t.end_fill()\r\n t.penup()\r\n t.goto(70, 139)\r\n # Draws one of the squares over a button up arrow\r\n t.color(\"#20b2aa\") # Turns the color to teal\r\n t.pendown()\r\n t.begin_fill()\r\n for y in range(4):\r\n t.forward(100)\r\n t.left(90)\r\n t.end_fill()\r\n t.penup()\r\n\r\n t.goto(190, 40)\r\n # Draws another one of the square around the enter button\r\n t.color(\"#20b2aa\") # Turns the color to teal\r\n t.pendown()\r\n t.begin_fill()\r\n for y in range(4):\r\n t.forward(100)\r\n t.left(90)\r\n t.end_fill()\r\n\r\n t.penup()\r\n t.goto(70, -59)\r\n t.color(\"#20b2aa\") # Turns the color to teal\r\n t.pendown()\r\n # Draws the box around the down button\r\n t.begin_fill()\r\n for y in range(4):\r\n t.forward(100)\r\n t.left(90)\r\n t.end_fill()\r\n # Draws the up arrow of the button\r\n t.penup()\r\n t.goto(70,143)\r\n t.pendown()\r\n t.color(\"#8b8378\") # Turns the color a light grey\r\n t.begin_fill()\r\n for y in range(3):\r\n t.pendown()\r\n t.forward(100)\r\n t.left(120)\r\n t.end_fill()\r\n # Draws the down arrow of the button\r\n t.penup()\r\n t.goto(70, 40)\r\n t.pendown()\r\n t.begin_fill()\r\n for y in range(3):\r\n t.forward(100)\r\n t.right(120)\r\n t.end_fill()\r\n # Draws scoreboard\r\n t.penup()\r\n t.goto(75, 136)\r\n t.color(\"white\")\r\n t.pendown()\r\n t.begin_fill()\r\n for y in range(4):\r\n t.forward(90)\r\n t.right(90)\r\n t.end_fill()\r\n t.color(\"black\")\r\n t.penup()\r\n t.goto(90,35)\r\n t.pendown()\r\n t.write(\"1\", font=(\"Arial\", 75, \"normal\") )\r\n t.color(\"#8b8378\") # Turns the color a light grey\r\n t.penup()\r\n # Draws the circle for the enter button and writes \"Enter\" on the button\r\n t.goto(240,50)\r\n t.begin_fill()\r\n t.circle(40)\r\n t.end_fill()\r\n t.penup()\r\n t.color(\"white\")\r\n t.goto(210,75)\r\n t.write(\"Enter\", font= (\"Arial\", 20, \"normal\"))\r\n t.color(\"white\")\r\n # Writes \"The Game of Nim\" at the bottom of the screen\r\n t.penup()\r\n t.goto(30, -140)\r\n t.pendown()\r\n t.write(\"The Game \", font=(\"Arial\", 40, \"normal\"))\r\n t.penup()\r\n t.goto(110, -185)\r\n t.write(\"of\", font = (\"Arial\", 40, \"normal\"))\r\n t.goto(70, -245)\r\n t.write(\"Nim\", font = (\"Arial\", 50, \"normal\"))", "def again():\r\n\tif_again = ask_yes_no(\"\\nDo you want play again (press y or n) \")\r\n\tif if_again == \"y\":\r\n\t\tprint(\"\\n\\n\")\r\n\t\tmain()\r\n\telif if_again == \"n\":\r\n\t\tprint(\"\\nThank you for your time spared for this game. Good bye!\")", "def update(self):\n if self.game_over is False:\n self.player_turn = not self.player_turn\n self.turn_display_timer = self.TURN_TEXT_TIMER", "def initialize_game(self):\r\n self.__turn = 0\r\n self.__Button_ls = []\r\n self.__koordinaatisto_ls = []\r\n for i in range(KOKO):\r\n self.__Button_ls.append([None] * KOKO)\r\n self.__koordinaatisto_ls.append([None] * KOKO)\r\n # Luodaan nappeja, näyttää napit GUI ikkunalle ja tallennetaan listaan.\r\n # Luodaan samalla koordinaatistot matriisina, myöhämmin tunnistamista varten\r\n # y=rivi, x=column\r\n for y in range(KOKO):\r\n for x in range(KOKO):\r\n new_button = Button(self.__mainw, text=\" \",height = 1, width = 3,\r\n command=lambda x=x,y=y : self.press_button(y,x)) # command\r\n\r\n self.__Button_ls[y][x] = new_button\r\n new_button.grid(row=y, column=x)\r\n\r\n self.__koordinaatisto_ls[y][x] = True\r\n\r\n #Yleinen huomautus näytölle\r\n self.__info=Label(self.__mainw,text=\"Replay can be pressed since somebody wins\")\r\n self.__info.grid(row=0, column=KOKO, sticky=EW)\r\n\r\n #Tämä Label käytetään pelin vuoroa ja ilmoitusta varten\r\n self.__info_Label = Label(self.__mainw)\r\n self.__info_Label.grid(row=1, column=KOKO, sticky=EW)\r\n\r\n #Tämä button käytetään pelin nollamista varten\r\n self.__initialize_button = Button(self.__mainw, relief=\"raised\", text=\"REPLAY\",\r\n command=self.initialize_game) # command\r\n self.__initialize_button.grid(row=KOKO - 1, column=KOKO, sticky=EW+S)\r\n self.__initialize_button[\"state\"]=DISABLED\r\n\r\n self.info_update()", "def play_gui():\n global done\n GAME_OVER = False\n pygame.init()\n board = create_board()\n\n screen = pygame.display.set_mode(SIZE)\n draw_board(board, screen)\n pygame.display.update()\n\n myfont = pygame.font.SysFont(\"monospace\", 75)\n turn = np.random.randint(0, 2)\n\n while not GAME_OVER:\n g = Game()\n done = False\n transitions_agent = []\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n\n if event.type == pygame.MOUSEMOTION:\n pygame.draw.rect(screen, black, (0, 0, WIDTH, SQUARESIZE))\n posx = event.pos[0]\n if turn == PLAYER:\n pygame.draw.circle(screen, red, (posx, int(SQUARESIZE / 2)), RADIUS)\n pygame.display.update()\n\n if event.type == pygame.MOUSEBUTTONDOWN:\n pygame.draw.rect(screen, black, (0, 0, WIDTH, SQUARESIZE))\n\n if turn == PLAYER:\n posx = event.pos[0]\n col = int(math.floor(posx / SQUARESIZE))\n\n if is_valid_location(board, col):\n row = get_next_open_row(board, col)\n drop_piece(board, row, col, PLAYER_PIECE)\n\n if winning_move(board, PLAYER_PIECE):\n label = myfont.render(\"Player 1 wins!!\", 1, red)\n screen.blit(label, (40, 10))\n GAME_OVER = True\n\n turn = (turn + 1) % 2\n draw_board(board, screen)\n\n # # Ask for Player 2 Input\n if turn == AI and not GAME_OVER:\n observation = []\n #print(f\"BOARD: {board}\")\n temp_board = np.flipud(board)\n for col in range(COLUMN_COUNT):\n col_elements = temp_board[:,col]\n for element in col_elements:\n observation.append(element)\n\n #print(f\"OBS: {observation}\")\n observation = np.asarray(observation)\n col = agent.choose_action(observation)\n\n if is_valid_location(board, col):\n row = get_next_open_row(board, col)\n drop_piece(board, row, col, AI_PIECE)\n\n if winning_move(board, AI_PIECE):\n label = myfont.render(\"Player 2 wins!!\", 1, yellow)\n screen.blit(label, (40, 10))\n GAME_OVER = True\n\n draw_board(board, screen)\n turn = (turn + 1) % 2\n\n else:\n print(\"AI random choice\")\n col = np.random.randint(7)\n row = get_next_open_row(board, col)\n drop_piece(board, row, col, AI_PIECE)\n\n if winning_move(board, AI_PIECE):\n label = myfont.render(\"Player 2 wins!!\", 1, yellow)\n screen.blit(label, (40, 10))\n GAME_OVER = True\n\n draw_board(board, screen)\n turn = (turn + 1) % 2", "def you_won(self):\n self.end_of_level()\n self.message_holder.add_widget(self.you_win_label)\n Clock.schedule_once(self.goto_next_level, 5)", "def game_o_won(self, msg='YOU LOST', fr='#DC143C'):\n self.o_score += 1\n self.Score_Label.configure(text=msg, foreground=fr)\n self.Score_Label.grid(row=0,column=1, ipadx=12)\n self.change_button_state('disabled')", "def play_game():\n pass" ]
[ "0.7148402", "0.71446335", "0.7122622", "0.6774405", "0.6766827", "0.6748884", "0.672878", "0.6569718", "0.6546862", "0.65195405", "0.64864796", "0.6469882", "0.6464471", "0.6455949", "0.64405733", "0.6390221", "0.63795745", "0.63476944", "0.63342875", "0.6323387", "0.6317748", "0.6313038", "0.63084835", "0.6289814", "0.6274359", "0.627417", "0.625427", "0.6234467", "0.6231573", "0.62305814" ]
0.8086216
0
Create a list of players with size num_players. The colors of each player are in the order defined in BoardPlayer.POSSIBLE_COLORS. Each player has a depth of 1.
def get_player_list(num_players): colors = BoardPlayer.POSSIBLE_COLORS player_list = [] for i in range(num_players): player_list.append(Player(colors[i], DEPTH)) return player_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_player_list(num_players, game_grid):\n\n if num_players == 1:\n return [Cycle(\"Player 1\", 2, 2, \"R\", Color(\"Blue\"), Color(\"White\")),\n Cycle(\"AI\", len(game_grid) - 3, len(game_grid[0]) - 3, \"L\", Color(\"Red\"), Color(\"White\")),\n Cycle(\"AI\", len(game_grid) - 3, 2, \"D\", Color(\"Green\"), Color(\"White\")),\n Cycle(\"AI\", 2, len(game_grid) - 3, \"U\", Color(\"Yellow\"), Color(\"White\"))]\n elif num_players == 2:\n return [Cycle(\"Player 1\", 2, 2, \"R\", Color(\"Blue\"), Color(\"White\")),\n Cycle(\"Player 2\", len(game_grid) - 3, len(game_grid[0]) - 3, \"L\", Color(\"Red\"), Color(\"White\")),\n Cycle(\"AI\", len(game_grid) - 3, 2, \"D\", Color(\"Green\"), Color(\"White\")),\n Cycle(\"AI\", 2, len(game_grid) - 3, \"U\", Color(\"Yellow\"), Color(\"White\"))]\n elif num_players == 3:\n return [Cycle(\"Player 1\", 2, 2, \"R\", Color(\"Blue\"), Color(\"White\")),\n Cycle(\"Player 2\", len(game_grid) - 3, len(game_grid[0]) - 3, \"L\", Color(\"Red\"), Color(\"White\")),\n Cycle(\"Player 3\", len(game_grid) - 3, 2, \"D\", Color(\"Green\"), Color(\"White\")),\n Cycle(\"AI\", 2, len(game_grid) - 3, \"U\", Color(\"Yellow\"), Color(\"White\"))]", "def create_players(num_players):\n players = []\n for player_number in xrange(1,num_players+1):\n # create new players\n players.append(\n Player(player_number, get_new_deck(), deque()))\n\n return players", "def init_players(self):\n complain = \"\"\n players_turn = random.sample(range(self.n_players), self.n_players)\n players_created = {}\n picked_colors = []\n for x in range(self.n_players):\n while True:\n clear_output()\n try:\n color = input(\n f\"{complain}Player {x+1}, please type in one of the following colors: ({', '.join([x.capitalize() for x in self.world.player_colors if x not in picked_colors])}):\\n\").lower()\n if color in self.world.player_colors and color not in picked_colors:\n picked_colors.append(color)\n players_created[players_turn[x]] = Player(\n color.capitalize(), self.start_troops)\n break\n else:\n complain = \"Please enter a valid color\\n\"\n except:\n pass\n\n self.players = [players_created[y] for x in range(\n self.n_players) for y in players_created.keys() if int(y) == x]", "def split_into_players(self, team, num_players=5):\n height = team.shape[0] // num_players\n players = []\n\n for h in range(num_players):\n player = team[h * height : (h + 1) * height, :, :].copy()\n players.append(self.convert_to_pil_image(player))\n\n return players", "def create_players_list(self):\n for p in self.players_names:\n self._players_list.append(Player(p))", "def getPlayersForGame(self, board):\r\n players = []\r\n for preparer in self.playerPreparers:\r\n player = Player()\r\n preparer.prepare(player, board)\r\n players.append(player)\r\n \r\n return players", "def players(self) -> List[Player]:\n return [self.white_player, self.black_player]", "def __set_colors(self, players):\n\n colors = set()\n for p in players:\n if p.get_color() is None:\n continue\n colors.add(p.get_color())\n if len(colors) != 0 and len(colors) != len(players):\n raise ValueError(\"Each player does not have a unique assigned color.\")\n \n if len(colors) == 0:\n for i, p in enumerate(players):\n p.set_color(BoardPlayer.POSSIBLE_COLORS[i])", "def get_players(n_players):\n\n if n_players < 2 or 8 < n_players:\n raise ValueError('A game must have between 2 to 8 players. You input {} players.'.format(n_players))\n\n return {classes.Player(p) for p in range(n_players)}", "def __init__(self, number_players=1000):\n self.player_list = []\n for i in range(number_players):\n self.player_list.append(Player())", "def initialize_players():\n return [Player(name, Hand([]), 0) for name in PLAYER_NAMES]", "def make_players(player_names, seed):\n\n np.random.seed(seed)\n players = [Player(name) for name in player_names]\n\n for player in players:\n player.set_prefs(\n np.random.permutation([p for p in players if p != player]).tolist()\n )\n\n return players", "def get_player_colors() -> List[Tuple[float, float, float]]:\n return PLAYER_COLORS", "def print(self, player_positions: list = None) -> list:\n\n if player_positions is None:\n player_positions = []\n\n init(autoreset=True)\n element_to_color = {-1: Back.RED, 0: Back.WHITE, 1: Back.GREEN, 2: Back.BLUE}\n\n board_text = \"\"\n colored_grid = []\n for row in range(self.grid_height):\n row_text = \"\\n\"\n\n for col in range(self.grid_width):\n cell_id = row * self.grid_width + col\n if cell_id in player_positions:\n cell_color = Back.CYAN\n else:\n cell_value = self.grid[cell_id]\n cell_color = element_to_color[cell_value]\n\n colored_grid.append(cell_color)\n\n if col == self.grid_width - 1:\n row_text += f\" {cell_color} {Back.RESET}\"\n else:\n row_text += f\" {cell_color} {Back.RESET} |\"\n\n if row != self.grid_height - 1:\n divider = \"\\n\"\n for cell in range(self.grid_width):\n\n if cell != self.grid_width - 1:\n divider += \"------\"\n else:\n divider += \"-----\"\n\n row_text += divider\n\n board_text += row_text\n\n logger.info(board_text)\n return colored_grid", "def get_scores_in_order_of_players(self):\n \n players = self.referee.get_current_state().get_players()\n\n player_scores = []\n for player_color in self.player_color_order:\n for player in players:\n if player_color == player.get_color():\n player_scores.append(player.get_score())\n break\n\n return player_scores", "def __init__(self, players, board_size, board=None, timeout=10):\n if not 2 <= len(players) <= 4:\n raise ValueError(\"Invalid number of players provided.\")\n\n if board is None:\n board = Board(*board_size)\n\n self.__check_board_is_valid(board, players)\n\n self.board = board\n \n self.__set_colors(players)\n\n self.players = {p.get_color(): p for p in players}\n\n self.state = State([BoardPlayer(p.get_color()) for p in players], board)\n self.violators = []\n self.timeout = timeout", "def create_player_list(self, current_game):\n players = [Player(c['summonerId'], c['championId'], c['teamId']) for c in current_game['participants']]\n return players", "def make_opponents(player_list):\n player_list_copy = player_list.copy()\n opponents_list = []\n player_number = len(player_list_copy)\n if (player_number % 2 == 0) | (player_number == 3):\n for i in range(int(player_number / 2)):\n player1 = random.choice(player_list_copy)\n player_list_copy.remove(player1)\n player2 = random.choice(player_list_copy)\n player_list_copy.remove(player2)\n opponents = [player1, player2]\n opponents_list.append(opponents)\n else:\n player1 = random.choice(player_list_copy)\n player_list_copy.remove(player1)\n player2 = random.choice(player_list_copy)\n player_list_copy.remove(player2)\n opponents_list = [[player1, player2]]\n return opponents_list", "def get_player_squares(self, player: PlayerColor) -> List[Square]:\r\n return [square for square in self.squares.values() if\r\n square.state == SquareState.OCCUPIED\r\n and square.occupant.owner == player]", "def get_victors(self):\n if self.is_game_over():\n scores = [p.get_score() for p in self.state.get_players()]\n if len(scores) == 0:\n return []\n max_score = max(scores)\n victors = []\n for p in self.state.get_players():\n if p.get_color() not in self.violators and p.get_score() == max_score:\n victors.append(self.players[p.get_color()])\n return victors\n else:\n return None", "def blankTeam():\n temp = [Player(\"Some dude\", 0, 0, 0, 0, 0, 0) for x in range(2)]\n return temp", "def create_players(self):\n players = []\n count = self.query('player', 'count', '?')\n for index in range(0, int(count)):\n player_id = self.query('player', 'id', str(index), '?')\n player = SqueezeBoxDevice(self, player_id)\n players.append(player)\n return players", "def _initiate_board(self):\n grid = []\n for i in range(constant.BOARD_DIMENSION):\n # Starts each row\n current_row = []\n for j in range(constant.BOARD_DIMENSION):\n # Adds the pieces depending on the position\n if i < constant.ROWS_OF_PIECES:\n # Black pieces\n if (j + i) % 2 != 0:\n current_row.append(Piece(i, j, Player.black))\n self.num_black_pieces = self.num_black_pieces + 1\n else:\n current_row.append(None)\n\n elif i >= constant.BOARD_DIMENSION - constant.ROWS_OF_PIECES:\n # White pieces\n if (j + i) % 2 != 0:\n current_row.append(Piece(i, j, Player.white))\n self.num_white_pieces = self.num_white_pieces + 1\n else:\n current_row.append(None)\n\n else:\n current_row.append(None)\n\n grid.append(current_row)\n\n return grid", "def yatzy_card(players):\n return [[0 for x in range(0, 14)] for x in range(players)]", "def generating_pairs(self, players_list) -> list[tuple[Player]]:\n apairing_players = []\n already_paired = []\n id_number = 0\n times_number_loop = 0\n breaks_number = 0\n while len(apairing_players) != 4:\n\n times_number_loop += 1\n if id_number == 8:\n id_number = 0\n pair = self.create_pair(players_list, id_number, already_paired)\n if pair is None:\n id_number += 1\n else:\n already_paired.append(pair[0])\n already_paired.append(pair[1])\n apairing_players.append(pair)\n id_number += 1\n if times_number_loop == 50:\n already_paired, apairing_players = self.break_pair(already_paired, apairing_players, breaks_number)\n times_number_loop = 0\n breaks_number += 1\n\n return apairing_players", "def __init__(self, player_list):\n\n self.tournament_depth = 0\n self.winner_state = 0\n self.winner_list = []\n self.winner_list_temp = []\n\n if (len(player_list) < 3) | (len(player_list) > 8):\n raise Exception(\"Error: It has to be between 3 - 8 players!\")\n if not all(isinstance(s, str) for s in player_list):\n raise Exception(\"Error: The player names has to be in the form of a string!\")\n self.waiting_players = player_list.copy()\n self.start_player_list = player_list.copy()\n player_list_copy = player_list.copy()\n if ((len(player_list) % 2) == 0) | (len(player_list) == 3):\n self.opponents_queue = make_opponents(player_list_copy)\n self.all_opponents = [self.opponents_queue.copy()]\n self.opponents = self.opponents_queue.pop(0)\n self.waiting_players.remove(self.opponents[0])\n self.waiting_players.remove(self.opponents[1])\n for i in range(len(self.opponents_queue)):\n self.waiting_players.remove(self.opponents_queue[i][0])\n self.waiting_players.remove(self.opponents_queue[i][1])\n else:\n self.opponents_queue = make_opponents(player_list_copy)\n self.all_opponents = [self.opponents_queue.copy()]\n self.opponents = self.opponents_queue.pop(0)\n self.waiting_players.remove(self.opponents[0])\n self.waiting_players.remove(self.opponents[1])", "def get_all_pieces(self, player):\n pieces = []\n for row in range(constant.BOARD_DIMENSION):\n for col in range(constant.BOARD_DIMENSION):\n piece = self.get_piece((row, col))\n if piece is not None and piece.get_player() is player:\n pieces.append(piece)\n return pieces", "def create_board(size) -> list:\n return list(itertools.product([i for i in range(size)], repeat=2))", "def generate_random_colours_list(rng: random.Random, size: int) -> List[TupleInt3]:\n return [random_colour(rng) for _ in range(size)]", "def get_players(n, playerspace):\n ps = []\n for i in range(n):\n name = \"\"\n while name == \"\":\n name = input(\"What's the name of player @ index {} (can't be empty): \".format(i))\n p = Player(name, i)\n p.playerspace = playerspace()\n ps.append(p)\n return ps" ]
[ "0.74653816", "0.71190274", "0.69114", "0.6725265", "0.65766233", "0.63620275", "0.62997985", "0.6270952", "0.6266892", "0.6210848", "0.6206907", "0.61643654", "0.61266655", "0.5864176", "0.58553654", "0.5844819", "0.5837506", "0.5829588", "0.5822507", "0.5800725", "0.57976323", "0.57780206", "0.57753134", "0.5772371", "0.5747208", "0.5705166", "0.5699265", "0.5688749", "0.56703305", "0.5656079" ]
0.8641923
0
Parse a string representing the number of players and convert it to an integer representing the number of players to use. The string must be numeric and must be between 2 and 4, inclusive, otherwise None is returned
def parse_num_players(num_players): try: num_players = int(num_players) except ValueError: return None if num_players < 2 or num_players > 4: return None return num_players
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getNumFromString(self, string):\n \n m = re.search(r'\\d+$', string)\n if m is not None:\n return int(m.group())\n else:\n return 0", "def info_player_id(self, playername):\r\n number = 0\r\n name = playername.title().replace(\" \", \"+\")\r\n headers = {\"Content-type\": \"application/x-www-form-urlencoded\", \"Accept\": \"text/plain\",\r\n 'Referer': 'http://' + self.domain + '/team_news.phtml', \"User-Agent\": user_agent}\r\n req = self.session.get('http://stats.comunio.es/search.php?name=' + playername, headers=headers).content\r\n soup = BeautifulSoup(req, \"html.parser\")\r\n for i in soup.find_all('a', {'class', 'nowrap'}):\r\n number = re.search(\"([0-9]+)-\", str(i)).group(1)\r\n break # Solo devuelve la primera coincidencia\r\n return number", "def string_u_broj(self):\n if self.player_input == \"rock\":\n self.player_number = 0\n elif self.player_input == \"spock\":\n self.player_number = 1\n elif self.player_input == \"paper\":\n self.player_number = 2\n elif self.player_input == \"lizard\":\n self.player_number = 3\n elif self.player_input == \"scissors\":\n self.player_number = 4\n else:\n self.player_number = -1\n raise RpslsError(102)\n return self.player_number", "def count_players(definition):\n _, player_definition = parse_player_definition(definition)\n return (int(player_definition['left_players']) +\n int(player_definition['right_players']))", "def create_number_of_players(self):\n self.number_of_players = pyip.inputInt(\n prompt='\\nEnter number of players (1 to 4):\\n', min=1, max=4)", "def __rank_from_str_to_int(rank: str) -> int:\n return int(rank) - 1", "def get_number_of_players():\n number_of_players = None\n while not(type(number_of_players)) == int:\n try:\n number_of_players = int(input(\"How many players are there? \"))\n if number_of_players == 0:\n raise zeroPlayersError\n elif number_of_players > 6:\n raise tooManyPlayersError\n except zeroPlayersError:\n print(\"The game needs at least 1 player\")\n number_of_players = None\n except tooManyPlayersError:\n print(\"Sorry you can't have more than 6 players\")\n number_of_players = None\n except:\n number_of_players = None\n return number_of_players", "def name_to_number(name):\n\n # A simple if/elif/else game...\n\n if name == \"rock\":\n number = 0\n elif name == \"Spock\":\n number = 1\n elif name == \"paper\":\n number = 2\n elif name == \"lizard\":\n number = 3\n else:\n number = 4\n return number", "def countPlayers():\n db, cursor = connect()\n cursor.execute( \" SELECT count(*) as num FROM players \")\n count = cursor.fetchone()[0]\n return int(count)", "def name_to_number(name):\r\n \r\n if name == \"rock\":\r\n return 0\r\n elif name == \"Spock\":\r\n return 1\r\n elif name == \"paper\":\r\n return 2\r\n elif name == \"lizard\":\r\n return 3\r\n elif name == \"scissors\":\r\n return 4\r\n else:\r\n return \"Invalid!Enter any one of the following: rock,Spock,paper,lizard,scissors\"", "def name_to_number(name):\n if name == \"rock\":\n number = 0\n elif name == \"Spock\":\n number = 1\n elif name == \"paper\":\n number = 2\n elif name == \"lizard\":\n number = 3\n elif name == \"scissors\":\n number = 4\n else:\n print \"Name is invalid!\"\n return 1\n return number", "def get_player_count(url):\r\n page = urlopen(url)\r\n\r\n soup = BeautifulSoup(page, features=\"html.parser\")\r\n\r\n table_divs = soup.findAll('td', attrs={'class': 'ranking-page-table__column ranking-page-table__column--dimmed'})\r\n\r\n sum = 0\r\n for i in range(len(table_divs)//5):\r\n sum += int((table_divs[i*5].text).strip().replace(',', ''))\r\n return sum", "def convert_to_num(version_str):\n if not version_str:\n return 0\n if str(version_str).isdigit():\n return version_str\n version_str = version_str.replace(\".\", \"\")\n return int(version_str) if version_str.isdigit() else 0", "def number_of_players(self) -> int:\n return self.param.number_of_players", "def countPlayers():\n conn = connect()\n cur = conn.cursor()\n cur.execute(\"SELECT COUNT(*) FROM players\")\n players = int(cur.fetchone()[0])\n conn.close()\n return players", "def _parse_scan_number(self, string):\n \n # match scan number pattern\n match = SCAN_NUMBER_PATTERN.search(string)\n if not match:\n return None\n \n # return as int\n return int(match.group(1))", "def AskHowManyPlayers():\n\n\t# Loop forever until the user enters an integer between 1 and 10, inclusive.\n\twhile True:\n\t\tprint \"How many players? Enter a number between 1 and 10, or press enter for default 2:\"\n\t\tnum_players = SolicitInteger( lobound=1, hibound=10, default_return=2 )\n\t\tif num_players != None:\n\t\t\tprint \"Ok, {} players.\".format( num_players )\n\t\t\treturn num_players", "def set_n_players(self):\n complain = \"\"\n while True:\n clear_output()\n try:\n self.n_players = int(\n input(f\"{complain}Please insert the number of players (between 2 to 6): \\n\"))\n if self.n_players >= 2 and self.n_players < 7:\n self.start_troops = 120 / self.n_players\n break\n elif self.n_players < 2:\n complain = \"Not enough players!\\n\"\n elif self.n_players >= 7:\n complain = \"Too many players!\\n\"\n except:\n complain = \"Not a valid number!\\n\"\n pass", "def name_to_number(name):\n if (name == 'rock' or name == 'Rock'):\n return 0\n elif (name == 'Spock' or name == 'spock'):\n return 1\n elif (name == 'paper' or name == 'Paper'):\n return 2\n elif (name == 'lizard' or name == 'Lizard'):\n return 3\n elif (name == 'scissors' or name == 'Scissors'):\n return 4\n else:\n return -1", "def countPlayers(t_name=''):\n conn, cur = connect()\n if t_name == '':\n cur.execute(\"SELECT COUNT(*) FROM PLAYERS;\")\n else:\n t_id = getTournamentID(t_name, False)\n if t_id == -1:\n return 0\n query = \"SELECT COUNT(*) FROM PLAYERS WHERE T_ID = %s\"\n param = (t_id, )\n cur.execute(query, param)\n row = cur.fetchone()\n conn.close()\n return row[0]", "def countPlayers():\n with _connect_db() as (conn, cur):\n cur.execute(\"\"\"SELECT COUNT(*) from players;\"\"\")\n ret = int(cur.fetchone()[0])\n return ret", "def countPlayers():\n db = connect()\n db_cursor = db.cursor()\n query = \"SELECT COUNT(id) AS total_players FROM players\"\n db_cursor.execute(query)\n results = db_cursor.fetchone()\n db.close()\n if results:\n return results[0]\n else:\n return '0'", "def noOfPlayers(self):\n\t\tnumber = 0\n\t\tfor n in range(6):\n\t\t\tif self.playerList[n] != None:\n\t\t\t\tnumber = number + 1\n\t\treturn number", "def netflix_read(string):\n val = -1\n ind = -1\n string = string.strip()\n if string.isdigit():\n val = int(string)\n ind = 0\n elif string:\n val = int(string.strip(':'))\n ind = 1\n return (val, ind)", "def countPlayers():\n conn, c = connect()\n\n q = \"select count(id) FROM PLAYERS;\"\n c.execute(q)\n res = c.fetchone()\n\n c.close()\n conn.commit()\n conn.close()\n return int(res[0])", "def parse(value):\n return int(value)", "def countPlayers():\n\n count = 0\n query = (\"SELECT COUNT(id) FROM players;\")\n results = executeQuery({'dbname': 'tournament', 'query' : query, 'type' : 'find'})\n for row in results:\n count = row[0]\n return count", "def countPlayers():\n conn, cur = connect()\n query = \"SELECT count(*) AS player_count FROM players;\"\n try:\n cur.execute(query)\n except:\n print(\"Error encountered when selecting player count from players table\")\n num_players = cur.fetchone()\n conn.close()\n return num_players['player_count']", "def countPlayers():\n dbconnection = connect()\n dbcursor = dbconnection.cursor()\n\n # Use of 'COALESCE' returns zero instead of 'None' when table is empty\n dbcursor.execute(\"SELECT COALESCE(COUNT(*), 0) FROM players\")\n\n # Assign only the first value in the first tuple to avoid error\n playerCount = dbcursor.fetchall()[0][0]\n\n dbconnection.close()\n return playerCount", "def get_num(self, data):\n data = NUM_PATTERN.findall(data)\n if data:\n return int(data[0])\n return 0" ]
[ "0.63758874", "0.6296648", "0.61954373", "0.6072014", "0.6059474", "0.60295767", "0.6026044", "0.6011106", "0.59960264", "0.5989781", "0.5965972", "0.5880547", "0.5842748", "0.58197284", "0.58116", "0.58019173", "0.5784927", "0.576755", "0.575936", "0.5723573", "0.57172006", "0.5716199", "0.57082176", "0.5677585", "0.5672561", "0.5655438", "0.5637927", "0.55442244", "0.5544026", "0.55329" ]
0.8473476
0
Run the visualizer with the given number of players.
def run_visualizer(num_players): num_players = parse_num_players(num_players) if num_players is None: raise ValueError("Invalid player count given.") players = get_player_list(num_players) ref = Referee(players, (5, 5), timeout=600) win = GameVisualizerWindow(ref) win.show_all() Gtk.main()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run_simulation(self, num_games=10):\n for _ in range(num_games):\n self.result.append(self.single_game())", "def run_trials(self, num=0):\n if num == 'all':\n self.trials_to_run = len(self.trials)\n else:\n self.trials_to_run = num\n self.vision_egg.go()", "def run_simulation(num_players=5, strategies=None):\n players = Players(num_players)\n if strategies:\n set_strategies(players, strategies)\n table = Table()\n play_game(players, table)\n scores = calculate_scores(players)\n return scores", "def set_n_players(self):\n complain = \"\"\n while True:\n clear_output()\n try:\n self.n_players = int(\n input(f\"{complain}Please insert the number of players (between 2 to 6): \\n\"))\n if self.n_players >= 2 and self.n_players < 7:\n self.start_troops = 120 / self.n_players\n break\n elif self.n_players < 2:\n complain = \"Not enough players!\\n\"\n elif self.n_players >= 7:\n complain = \"Too many players!\\n\"\n except:\n complain = \"Not a valid number!\\n\"\n pass", "def _draw_players(self):\n for player in self.players:\n player.draw()", "def run(self, num_episodes=1):\n pygame.display.update()\n self.fps_clock = pygame.time.Clock()\n\n try:\n for episode in range(num_episodes):\n self.run_episode()\n self.env.new_episode()\n self.event = Event.next(self.event)\n except QuitRequestedError:\n print(\"Exit Program\")\n\n pygame.quit()", "def create_number_of_players(self):\n self.number_of_players = pyip.inputInt(\n prompt='\\nEnter number of players (1 to 4):\\n', min=1, max=4)", "def run(self, count=1, trace=False):\n for _ in range(count):\n self.game.reset()\n self.game.play([self.player1, self.player2], trace=trace)\n self.winning_moves += self.game.encode_win()\n\n if not self.game.winner:\n self.record.draw += 1\n elif self.game.winner.marker == 'X':\n self.record.player1 += 1\n elif self.game.winner.marker == 'O':\n self.record.player2 += 1\n print(self.record)", "def main() -> None:\n parser = argparse.ArgumentParser(\n description=\"Optimise your Sportpools player selection\"\n )\n parser.add_argument(\n \"-f\",\n \"--file\",\n help=\"Path to file to import\",\n type=str,\n default=\"./data/Tennis Abstract_ 2020 Australian Open Men's Draw Forecast Forecast.htm\",\n required=True,\n )\n parser.add_argument(\n \"-b\",\n \"--black-points\",\n \"--black\",\n help=\"Total number of black points to use\",\n type=int,\n default=20,\n )\n parser.add_argument(\n \"-c\",\n \"--count\",\n \"--player-count\",\n help=\"Number of players to select\",\n type=int,\n default=14,\n )\n parser.add_argument(\n \"-l\", \"--loser\", help=\"Selected loser\", type=str,\n )\n\n args, _ = parser.parse_known_args()\n\n pool = TennisPool(ROUNDS).load_data(args.file).apply_filters().add_features()\n\n emulator = TennisPoolEmulator(pool.get_results())\n\n pool_results = emulator.play_draw(ROUNDS).add_features(ROUNDS).get_results()\n\n selection_optimum = optimise_selection(\n pool_results,\n selection_limit=args.count,\n black_points_limit=args.black_points,\n rounds=ROUNDS,\n loser=args.loser,\n )\n\n LOGGER.info(\"Optimal set of players is as follows:\")\n LOGGER.info(\"\\r\\n%s\", selection_optimum[\"schedule\"].head(25))\n\n LOGGER.info(\n \"The selection of these players results in %d points with %d black points\",\n selection_optimum[\"schedule\"][\"potency\"].sum(),\n selection_optimum[\"schedule\"][\"black\"].sum(),\n )\n LOGGER.info(\"Select your joker in this order:\")\n LOGGER.info(\n \"\\r\\n%s\",\n str(\n selection_optimum[\"schedule\"][selection_optimum[\"schedule\"][\"rounds\"] >= 4]\n .sort_values(by=[\"black\"], ascending=True)\n .head(5)\n ),\n )", "def playGames(self, num, verbose=False):\n\n num = int(num / 2)\n prevWon = 0\n newWon = 0\n draws = 0\n for _ in tqdm(range(num), desc=\"Arena.playGames (1)\"):\n gameResult = self.playGame(verbose=verbose)\n if gameResult == 1:\n prevWon += 1\n elif gameResult == -1:\n newWon += 1\n else:\n draws += 1\n\n self.landlord, self.farmers = self.farmers, self.landlord\n\n for _ in tqdm(range(num), desc=\"Arena.playGames (2)\"):\n gameResult = self.playGame(verbose=verbose)\n if gameResult == -1:\n prevWon += 1\n elif gameResult == 1:\n newWon += 1\n else:\n draws += 1\n\n return prevWon, newWon, draws", "def playManyGames(number):\n wins = 0\n losses = 0\n winRolls = 0\n lossRolls = 0\n player = Player()\n for count in range(number):\n hasWon = player.rollDice()\n rolls = 0\n if player.winner:\n wins += 1\n winRolls += player.rollsCount\n elif player.loser:\n losses += 1\n lossRolls += player.rollsCount\n print(\"The total number of wins is\", wins)\n print(\"The total number of losses is\", losses)\n print(\"The average number of rolls per win is %0.2f\" % \\\n (winRolls / wins))\n print(\"The average number of rolls per loss is %0.2f\" % \\\n (lossRolls / losses))\n print(\"The winning percentage is %0.3f\" % (wins / number))", "def startPresentation(ntrials):\n\n for trialIdx in range(ntrials):\n\n # draw the fixcross followed by the star\n drawFixCross(1.0)\n drawStar(4.0)", "def visualization_experiments(\n num_experiments: Optional[int] = None\n) -> None:\n print(\"RUNNING `visualization_experiments`\")\n\n if num_experiments is None:\n num_experiments = NUM_VISUALIZATION_EXPERIMENTS\n\n for heuristic in hans.DEFAULT_HANS_EVAL_HEURISTICS:\n visualization.main(\n train_task_name=\"hans\",\n eval_task_name=\"hans\",\n num_eval_to_collect=num_experiments,\n use_parallel=USE_PARALLEL,\n hans_heuristic=heuristic,\n trained_on_task_name=\"hans\")\n\n visualization.main(\n train_task_name=\"hans\",\n eval_task_name=\"mnli-2\",\n num_eval_to_collect=num_experiments,\n use_parallel=USE_PARALLEL,\n hans_heuristic=None,\n trained_on_task_name=\"hans\")", "def start_tournament(self):\n for i in range(0, len(self.agents)):\n for j in range(i+1, len(self.agents)):\n p1, p2 = self.agents[i], self.agents[j]\n p1_total_win = 0\n p2_total_win = 0\n for game_num in range(self.G):\n p1_wins, p2_wins, actions = self.play_game(p1, p2)\n p1_total_win += p1_wins\n p2_total_win += p2_wins\n print(p1.name + \": \" + str(p1_total_win) + \" wins, \" + p2.name + \": \" + str(p2_total_win) + \" wins\")\n if self.topp_visualization:\n p1_num = p1.filename.split(\"ep_\")[1].split(\".h5\")[0]\n p2_num = p2.filename.split(\"ep_\")[1].split(\".h5\")[0]\n os.chdir(ROOT_DIR)\n self.visualizer.visualize(actions, p1_num + \"_\" + p2_num)\n self.print_result()", "def main():\n args = get_parser().parse_args()\n players = prepare_game(\n decks_count=args.decks,\n auto_mode=args.auto_mode,\n player_one_name=args.name_player,\n players_count=args.players,\n )\n game(players=players)", "def drawGame(gameWindow, figurelist):\n for i in range(15): #For loops to draw al figures to window\n for k in range(25):\n todraw = figurelist[i][k]\n todraw.draw(gameWindow)", "def play(self):\n self.populateGrid()\n self.displayGrid()", "def run(self):\n for frm_idx in range(self.frames_nbr):\n for player in self.players:\n assert isinstance(player, Player)\n player.play_frame(Frame(pins_nbr=self.pins_nbr))\n\n # Check the last frame for each player\n for player in self.players:\n if player.frames[self.frames_nbr-1].is_strike():\n for _ in range(2):\n player.play_frame(Ball(pins_nbr=self.pins_nbr))\n if player.frames[self.frames_nbr-1].is_spare():\n player.play_frame(Ball(pins_nbr=self.pins_nbr))", "def main():\n winning_score = 100\n counter = 1\n game_state_list = []\n\n # Enable command-line arguments\n parser = argparse.ArgumentParser()\n # Add command-line argmuemnt\n parser.add_argument('--numPlayers', type=int)\n args = parser.parse_args()\n\n # Get number of games from user input\n num_of_games = input_int(\"How many games do you want to play?: \")\n\n # Get number of players in each game\n for x in range(num_of_games):\n # Note. Use this commented code below if you want to also let the user define the number of players in each game\n #game_state_list.append((Game(6) ,(input_int(\"How many players in Game {}?: \".format((x + 1))))))\n\n # list of tuples (Game class instnace, num_of_plauyers)\n game_state_list.append((Game(6) ,args.numPlayers))\n\n # Play all games. Note that the games are not aware of each other\n for game_state, num_users in game_state_list:\n print \"\\nStarting Game\",counter\n game_loop(game_state,num_users,winning_score)\n counter += 1\n\n print \"Completed all the games!\"", "def split_into_players(self, team, num_players=5):\n height = team.shape[0] // num_players\n players = []\n\n for h in range(num_players):\n player = team[h * height : (h + 1) * height, :, :].copy()\n players.append(self.convert_to_pil_image(player))\n\n return players", "def play_multiple_games(players, num_games=10, seed=2):\n total_games_winners = {}\n for player in players:\n if player.name not in total_games_winners:\n total_games_winners[player.name] = 0\n random.seed(seed)\n for game in range(num_games):\n print('-------- Game', game, '--------')\n random.shuffle(players)\n print('Initial game positions: ', players, '\\n')\n if all(x > 1 for x in [p.amount for p in players]):\n rotation_winners = play_multiple_rotations(players)\n for player_name in total_games_winners:\n total_games_winners[player_name] += rotation_winners[player_name]\n print()\n # print('Final Win Count: ', total_games_winners)\n print(players)", "def train(\n self, num_episodes, max_episode_length, reward_network=None,\n ):\n\n for _ in range(num_episodes):\n self.train_episode(max_episode_length)\n\n if self.training_i % self.play_interval == 0:\n self.play(\n max_episode_length,\n self.render,\n reward_network=reward_network,\n )", "def show_players(self) -> None:\n players_list = []\n for player in PLAYERS:\n data_player = ((\n str(player.get(\"first_name\")) + \" \" +\n str(player.get(\"last_name\")) + \" | \" +\n str(player.get(\"birthday\")) + \" | \" +\n str(player.get(\"genre\")) + \" | \" +\n str(player.get(\"ranking\"))\n ))\n players_list.append(data_player)\n utils.clear_terminal()\n print(\n \"Do you want the list of players by alphabetical order or by ranking ? \\n\"\n \"1 - Ranking players list \\n\"\n \"2 - Alphabetical players list\"\n )\n choice = check.request_selection_with_number(\"ranking\", \"alphabetical\", \"None\")\n if choice == \"ranking\":\n player_id = 0\n players_list = sorted(players_list, key=lambda player: players_list[4])\n utils.clear_terminal()\n print(\"==========================================\")\n print(\"List of all Players in ranking order : \")\n print(\"==========================================\")\n for player in players_list:\n player_id += 1\n print(str(player_id) + \" : \" + player)\n elif choice == \"alphabetical\":\n player_id = 0\n players_list.sort()\n utils.clear_terminal()\n print(\"============================================\")\n print(\"List of all Players in alphabetical order : \")\n print(\"============================================\")\n for player in players_list:\n player_id += 1\n print(str(player_id) + \" : \" + player)", "def game(players: List[Player]):\n desk_pile = Deck()\n turn_number = 0\n\n while players[0].has_cards:\n turn_number += 1\n print(f\"Turn {turn_number}\")\n for player in players:\n played_card = player.turn()\n desk_pile.add_card(played_card)\n check_snap(desk_pile, players)\n sleep(DEFAULT_TURN_TIME_SECONDS)\n\n pile_sizes = [(player, player.pile_size) for player in players]\n # sort from maximum player pile size to minimum, first player in the list wins the round\n pile_sizes.sort(key=lambda x: x[1], reverse=True)\n game_winner: Player = pile_sizes[0][0]\n\n # print game results\n print(\"############################\")\n print(f\"Player {game_winner.name} WON!\")\n print(\"############################\")\n print(\n f\"Game results:\\n\"\n + \"\\n\".join(\n f\"{player.name}: {player.pile_size} cards\" for player in list(players)\n )\n )", "def __init__(self, number_players=1000):\n self.player_list = []\n for i in range(number_players):\n self.player_list.append(Player())", "def test(\n self, render=True, opponent=None, wormzero_player=None, num_tests=1, num_gpus=0\n ):\n opponent = opponent if opponent else self.config.opponent\n wormzero_player = wormzero_player if wormzero_player else self.config.wormzero_player\n self_play_worker = self_play.SelfPlay.options(\n num_cpus=0, num_gpus=num_gpus,\n ).remote(self.checkpoint, self.Game, self.config, np.random.randint(10000))\n results = []\n for i in range(num_tests):\n print(f\"Testing {i+1}/{num_tests}\")\n results.append(\n ray.get(\n self_play_worker.play_game.remote(\n 0, 0, render, opponent, wormzero_player,\n )\n )\n )\n self_play_worker.close_game.remote()\n\n if len(self.config.players) == 1:\n result = np.mean([sum(history.reward_history) for history in results])\n else:\n result = np.mean(\n [\n sum(\n reward\n for i, reward in enumerate(history.reward_history)\n if history.to_play_history[i - 1] == wormzero_player\n )\n for history in results\n ]\n )\n return result", "def main():\n play_game(progression)", "def self_play_visualisation(board_size=BOARD_SIZE):\n policy_value = SimpleCNN([board_size, board_size, 2])\n history, winner = play_game(policy_value=policy_value)\n print(\"Watching game replay\\nPress Return to advance board\")\n for state, board, hoice in history:\n print(state)\n input(\"\")\n\n if winner == 1:\n print(\"Black won\")\n else:\n print(\"White won\")", "def run(self):\n count = self.neuron_count\n for i in range(0, count):\n self.run(i)", "def select_player(n):\n pygame.display.set_caption(\"You selected: \" + PROF[n])" ]
[ "0.6365585", "0.6150837", "0.6030641", "0.59946847", "0.59803605", "0.5919918", "0.5898477", "0.58953476", "0.5692316", "0.56912553", "0.56769323", "0.56539273", "0.55989254", "0.5564556", "0.55590165", "0.5540571", "0.5535289", "0.5460685", "0.5444915", "0.54234105", "0.54198074", "0.5403095", "0.53955185", "0.53891677", "0.5384593", "0.5371368", "0.5364338", "0.5349206", "0.53361374", "0.5331152" ]
0.8455506
0
Force sorted responses by self.ipset._get_new/deleted_set_ips. _get_new/deleted_set_ips use internally sets and return randomly ordered responses. This method ensures sorted responses from them in order to guarantee call order in self.ipset.set_members.
def force_sorted_get_set_ips(self): original_get_new_set_ips = self.ipset._get_new_set_ips original_get_deleted_set_ips = self.ipset._get_deleted_set_ips def sorted_get_new_set_ips(set_name, expected_ips): unsorted = original_get_new_set_ips(set_name, expected_ips) return sorted(unsorted) def sorted_get_deleted_set_ips(set_name, expected_ips): unsorted = original_get_deleted_set_ips(set_name, expected_ips) return sorted(unsorted) mock.patch.object(self.ipset, '_get_new_set_ips', side_effect=sorted_get_new_set_ips).start() mock.patch.object(self.ipset, '_get_deleted_set_ips', side_effect=sorted_get_deleted_set_ips).start()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def lotteryPendingQueue(ipSet):\n\t# make list of possible predicates and remove duplicates\n\tpredicates = [ip.predicate for ip in ipSet]\n\tseen = set()\n\tseen_add = seen.add\n\tpredicates = [pred for pred in predicates if not (pred in seen or seen_add(pred))]\n\n\t#choose the predicate\n\tweightList = np.array([pred.num_tickets for pred in predicates])\n\ttotalTickets = np.sum(weightList)\n\tprobList = np.true_divide(weightList, totalTickets)\n\tchosenPred = np.random.choice(predicates, p=probList)\n\n\t#choose the item and then ip\n\tchosenPredSet = ipSet.filter(predicate=chosenPred)\n\titem = chooseItem(chosenPredSet)\n\tchosenIP = ipSet.get(predicate=chosenPred, item=item)\n\n\t# if this ip is not in the queue\n\tif not chosenIP.is_in_queue:\n\t\tchosenIP.add_to_queue()\n\n\tchosenIP.refresh_from_db()\n\t# if the queue is full, update the predicate\n\treturn chosenIP", "def useLottery(ipSet):\n\t# make list of possible predicates and remove duplicates\n\tpredicates = [ip.predicate for ip in ipSet]\n\tseen = set()\n\tseen_add = seen.add\n\tpredicates = [pred for pred in predicates if not (pred in seen or seen_add(pred))]\n\n\t#choose the predicate\n\tweightList = np.array([pred.num_tickets for pred in predicates])\n\ttotalTickets = np.sum(weightList)\n\tprobList = np.true_divide(weightList, totalTickets)\n\tchosenPred = np.random.choice(predicates, p=probList)\n\n\t#choose the item and then ip\n\tchosenPredSet = ipSet.filter(predicate=chosenPred)\n\titem = chooseItem(chosenPredSet)\n\tchosenIP = ipSet.get(predicate=chosenPred, item=item)\n\n\tchosenIP.predicate.award_ticket()\n\n\treturn chosenIP", "def ipset():\n return IPSet(x=np.linspace(0, 10, 11), y=np.random.randn(11), x_new=np.linspace(3, 9, 20))", "def test_list_vips_sort(self):\r\n resources = \"vips\"\r\n cmd = vip.ListVip(test_cli20.MyApp(sys.stdout), None)\r\n self._test_list_resources(resources, cmd,\r\n sort_key=[\"name\", \"id\"],\r\n sort_dir=[\"asc\", \"desc\"])", "def get_new_local_ips(self, count: int=1):\n\n if count <= 0:\n return []\n\n # add more unused local ips to the pool, if needed\n while len(self.unused_local_ips) < count and self.expand_unused_local_ips() == True:\n pass\n\n unused_local_ips = self.unused_local_ips\n uncertain_local_ips = self.uncertain_local_ips\n count_certain = min(count, len(unused_local_ips))\n retr_local_ips = []\n\n for _ in range(0, count_certain):\n random_local_ip = choice(sorted(unused_local_ips))\n retr_local_ips.append(str(random_local_ip))\n unused_local_ips.remove(random_local_ip)\n\n # retrieve uncertain local ips\n if count_certain < count:\n count_uncertain = count - count_certain\n\n # check if new uncertain IPs have to be created\n if len(uncertain_local_ips) < count_uncertain:\n ipspace_multiplier = self.UNCERTAIN_IPSPACE_MULTIPLIER\n\n max_new_ip = self.max_uncertain_local_ip.to_int() + ipspace_multiplier * count_uncertain\n\n count_new_ips = max_new_ip - self.max_uncertain_local_ip.to_int()\n\n # create ipspace_multiplier * count_uncertain new uncertain local IP addresses\n last_gen_ip = None\n for i in range(1, count_new_ips + 1):\n ip = IPAddress.from_int(self.max_uncertain_local_ip.to_int() + i)\n # exclude the definite broadcast address\n if self.priv_ip_segment:\n if ip.to_int() >= self.priv_ip_segment.last_address().to_int():\n break\n uncertain_local_ips.add(ip)\n last_gen_ip = ip\n self.max_uncertain_local_ip = last_gen_ip\n\n # choose the uncertain IPs to return\n total_uncertain = min(count_uncertain, len(uncertain_local_ips))\n for _ in range(0, total_uncertain):\n random_local_ip = choice(sorted(uncertain_local_ips))\n retr_local_ips.append(str(random_local_ip))\n uncertain_local_ips.remove(random_local_ip)\n \n return retr_local_ips", "def test_list_vips_sort(self):\n resources = \"vips\"\n cmd = vip.ListVip(test_cli20.MyApp(sys.stdout), None)\n self._test_list_resources(resources, cmd,\n sort_key=[\"name\", \"id\"],\n sort_dir=[\"asc\", \"desc\"])", "def lotteryPendingTickets(ipSet):\n\t# make list of possible preducates and remove duplicates\n\tpredicates = [ip.predicate for ip in ipSet]\n\tseen = set()\n\tseen_add = seen.add\n\tpredicates = [pred for pred in predicates if not (pred in seen or seen_add(pred))]\n\n\tweightList = np.array([(pred.num_tickets - pred.num_pending) for pred in predicates])\n\t# make everything positive\n\tweightList = weightList.clip(min=0)\n\ttotalTickets = np.sum(weightList)\n\n\t# if all the available questions are pending\n\tif totalTickets == 0:\n\t\tchosenPred = choice(predicates)\n\telse:\n\t\tprobList = [float(weight)/float(totalTickets) for weight in weightList]\n\t\tchosenPred = np.random.choice(predicates, p=probList)\n\n\t#choose the item and then ip\n\tchosenPredSet = ipSet.filter(predicate=chosenPred)\n\titem = chooseItem(chosenPredSet)\n\tchosenIP = ipSet.get(predicate=chosenPred, item=item)\n\n\t# deliever tickets to the predicate\n\tchosenIP.predicate.award_ticket()\n\treturn chosenIP", "def test_fixed_ip_associate_pool_order(self):\n\n instance_uuid = self._create_instance()\n network = db.network_create_safe(self.ctxt, {})\n self.addCleanup(timeutils.clear_time_override)\n start = timeutils.utcnow()\n for i in range(1, 4):\n now = start - datetime.timedelta(hours=i)\n timeutils.set_time_override(now)\n address = self.create_fixed_ip(\n updated_at=now,\n address='10.1.0.%d' % i,\n network_id=network['id'])\n db.fixed_ip_associate_pool(self.ctxt, network['id'], instance_uuid)\n fixed_ip = db.fixed_ip_get_by_address(self.ctxt, address)\n self.assertEqual(fixed_ip['instance_uuid'], instance_uuid)", "def ipset_below():\n return IPSet(x=np.linspace(0, 10, 11), y=np.linspace(-1, 1, 11), x_new=np.linspace(-2, 2, 5))", "def get_ip(self):\n json_scheme = self.gen_def_json_scheme('GetPurchasedIpAddresses')\n json_obj = self.call_method_post(method='GetPurchasedIpAddresses ', json_scheme=json_scheme)\n self.iplist = IpList()\n for ip in json_obj['Value']:\n r = Ip()\n r.ip_addr = ip['Value']\n r.resid = ip['ResourceId']\n r.serverid = ip['ServerId'] if 'None' not in str(ip['ServerId']) else None\n self.iplist.append(r)", "def ipset_above():\n return IPSet(x=np.linspace(0, 10, 11), y=np.linspace(-1, 1, 11), x_new=np.linspace(8, 12, 5))", "def get_used_ips():\n\n all_ips = list()\n\n topologies = Topology.objects.all()\n\n ip_pattern = '\\d+\\.\\d+\\.\\d+\\.\\d+'\n for topology in topologies:\n try:\n json_data = json.loads(topology.json)\n except ValueError as ve:\n logger.error(ve)\n logger.error(\"Could not parse saved topology with id: %s\" % topology.id)\n continue\n\n for json_object in json_data:\n\n if \"userData\" in json_object and json_object[\"userData\"] is not None and \"ip\" in json_object[\"userData\"]:\n ud = json_object[\"userData\"]\n ip = ud[\"ip\"]\n if re.match(ip_pattern, ip) is None:\n logger.info('Found an invalid IP on topology: %s' % topology.id)\n logger.info(\"Invalid IP is %s\" % ip)\n logger.info(type(ip))\n continue\n\n last_octet = ip.split('.')[-1]\n # logger.debug(topology.id)\n # logger.info(\"'%s'\" % ip)\n # logger.info(last_octet)\n all_ips.append(int(last_octet))\n\n dhcp_leases = get_consumed_management_ips()\n all_ips.extend(dhcp_leases)\n\n logger.debug(\"sorting and returning all_ips\")\n all_ips.sort()\n return all_ips", "def digest_ips(self):\n all_subnets = {}\n self.subnets = []\n self.single_ips = []\n # extract all subnets\n for ip in self.iplist:\n subnet = self.__get_sutnet(ip)\n if all_subnets.has_key(subnet):\n all_subnets[subnet].append(ip)\n else:\n new_list = [ip]\n all_subnets[subnet] = new_list\n\n for subnet, subnet_ips in all_subnets.items():\n if len(subnet_ips) > 1:\n self.subnets.append(subnet)\n else:\n self.single_ips.append(subnet_ips[0])\n\n self.subnets.sort()\n self.single_ips.sort()", "def test_get_sync_instances_4(self):\n templates = self.machine_template_1 | self.machine_template_2\n res = templates._get_sync_instances()\n self.assertEqual(\n self._sorted(res), self._sorted(self.mit_1_1 | self.mit_1_2))", "def test_get_sync_instances_1(self):\n res = self.machine_template_1._get_sync_instances()\n self.assertEqual(\n self._sorted(res), self._sorted(self.mit_1_1 | self.mit_1_2))", "def sort(self):\n\t\tself.servers = sorted(self.servers, key=lambda s: s.load)\n\t\tself.servers = sorted(self.servers, key=lambda s: s.distance_class)\n\t\tself.servers = sorted(self.servers, key=lambda s: s.country == self.locale_info.country, reverse=True)", "def force_no_ordering(self):\n return []", "def get_dns(self) -> Set:\n if self.dn_set.should_update():\n contacts_data = self.get_contacts_data()\n self.dn_set.update(set(contacts_data.get_dns()))\n return self.dn_set.data", "def testPartialAndIncorrectSetter(self):\n _1 = [ (self.kl[0], 1), \n (self.kl[1], 1), \n (Customer, 1), ] * 2\n self.o.population = _1\n _2 = self.o.population\n self.assertEqual(len(_2), 3, \"got {}\".format(_2))\n _expect = set([(self.kl[0], 2),\n (self.kl[1], 2),\n (getattr(tp, \"RandConso\"), 6)])\n self.assertEqual(_2, _expect, \"something odd\")", "def __init__(self, initial_set=None, verbose=False, output=sys.stdout):\n\n if initial_set is None:\n initial_set = []\n\n self.__responses = []\n self.clear_responses()\n self.verbose = verbose\n self.output = output\n\n # Round Trip Time\n self.__rtt_avg = 0\n self.__rtt_min = 0\n self.__rtt_max = 0\n self.__packets_lost = 0\n\n for response in initial_set:\n self.append(response)", "def _init_ipaddress_ops(self):\n\n # retrieve local and external IPs\n all_ips_str = set(self.statistics.process_db_query(\"all(ipAddress)\", print_results=False))\n # external_ips_str = set(self.statistics.process_db_query(\"ipAddress(macAddress=%s)\" % self.get_probable_router_mac(), print_results=False)) # including router\n # local_ips_str = all_ips_str - external_ips_str\n external_ips = set()\n local_ips = set()\n all_ips = set()\n\n self.contains_priv_ips = False\n self.priv_ip_segment = None\n\n # convert IP strings to IPv4.IPAddress representation\n for ip in all_ips_str:\n if is_ipv4(ip):\n ip = IPAddress.parse(ip)\n # exclude local broadcast address and other special addresses\n if (not str(ip) == \"255.255.255.255\") and (not ip.is_localhost()) and (not ip.is_multicast()) and (\n not ip.is_reserved()) and (not ip.is_zero_conf()):\n all_ips.add(ip)\n\n for ip in all_ips:\n if ip.is_private():\n local_ips.add(ip)\n\n external_ips = all_ips - local_ips\n\n # save the certain unused local IPs of the network\n # to do that, divide the unused local Addressspace into chunks of (chunks_size) Addresses\n # initally only the first chunk will be used, but more chunks can be added to the pool of unused_local_ips if needed\n self.min_local_ip, self.max_local_ip = min(local_ips), max(local_ips)\n local_ip_range = (self.max_local_ip.to_int()) - (self.min_local_ip.to_int() + 1)\n if local_ip_range < 0:\n # for min,max pairs like (1,1), (1,2) there is no free address in between, but for (1,1) local_ip_range may be -1, because 1-(1+1)=-1\n local_ip_range = 0\n\n # chunk size can be adjusted if needed\n self.chunk_size = 200\n\n self.current_chunk = 1\n if local_ip_range < self.chunk_size:\n # there are not more than chunk_size unused IP Addresses to begin with\n self.chunks = 0\n self.chunk_remainder = local_ip_range\n else:\n # determine how many chunks of (chunk_size) Addresses there are and the save the remainder\n self.chunks = local_ip_range // self.chunk_size\n self.chunk_remainder = local_ip_range % self.chunk_size\n\n # add the first chunk of IP Addresses\n self.unused_local_ips = set()\n self.expand_unused_local_ips()\n\n # save the gathered information for efficient later use\n self.external_ips = frozenset(external_ips)\n self.remaining_external_ips = external_ips\n self.max_uncertain_local_ip = self.max_local_ip\n self.local_ips = frozenset(local_ips)\n # print(\"External IPS: \" + str(external_ips))\n # print(\"LOCAL IPS: \" + str(local_ips))\n self.remaining_local_ips = local_ips\n self.uncertain_local_ips = set()", "def _reorder_collected(self, data):\n priority = {\n 'post': 1,\n 'get': 2,\n 'put': 2,\n 'patch': 2,\n 'head': 2,\n 'options': 2,\n 'delete': 3,\n }\n data = sorted(\n data,\n key=lambda x: priority.get(getattr(x, 'name', ''), 4))\n return data", "def sort_by_ip(unsorted):\n by_ip = {}\n\n for k, v in unsorted.items():\n for ip in v:\n if ip in by_ip and k not in by_ip[ip]:\n by_ip[ip].append(k)\n else:\n by_ip[ip] = [k]\n\n return OrderedDict(sorted(by_ip.items()))", "def test_collection_ordering(mocker, logged_in_apiclient, field):\n mocker.patch(\"ui.serializers.get_moira_client\")\n mocker.patch(\"ui.utils.get_moira_client\")\n CollectionSetPagination.page_size = 5\n client, user = logged_in_apiclient\n CollectionFactory.create_batch(10, owner=user)\n url = reverse(\"models-api:collection-list\")\n p1_response = client.get(\"{}?page=1&ordering={}\".format(url, field))\n assert len(p1_response.data[\"results\"]) == 5\n for i in range(4):\n assert (\n p1_response.data[\"results\"][i][field].lower()\n <= p1_response.data[\"results\"][i + 1][field].lower()\n )\n p2_response = client.get(\"{}?page=2&ordering={}\".format(url, field))\n assert (\n p1_response.data[\"results\"][-1][field].lower()\n <= p2_response.data[\"results\"][0][field].lower()\n )\n for i in range(4):\n assert (\n p2_response.data[\"results\"][i][field].lower()\n <= p2_response.data[\"results\"][i + 1][field].lower()\n )", "def set_blacklist(self):\n\n for name in self.__ipset:\n if self.verbose:\n print(\"Start create: \" + self.__ipset[name]['ipset-name'])\n\n # create ipset\n self.__process(name, self.__parser.create(name))\n\n if self.verbose:\n print('Done')", "def deletion_requests(_):\n return set()", "def fetch(self) -> None:\n self.__networks__.clear()\n networks = process_google_rr_ranges(self.__address_list_record__, self.loader_class)\n for network in networks:\n self.__networks__.append(network)\n self.updated = datetime.now()\n self.__networks__.sort(key=attrgetter('version', 'cidr'))", "def get_ordered_resources(self):\n \n return self.resources.visible().order_by('members__ordering')", "def test_get(self):\n\n # Grab the server's addresses...\n addrs = self.server.addresses\n\n # Make sure the public and private lists are present\n dtutil.assert_true('public' in addrs)\n dtutil.assert_true('private' in addrs)\n\n # Are IP addresses actually returned?", "def test_ipam_ip_addresses_partial_update(self):\n pass" ]
[ "0.5145373", "0.51405823", "0.5088774", "0.50321305", "0.50162804", "0.50126475", "0.49818024", "0.49794364", "0.48830393", "0.48404258", "0.4814375", "0.4799804", "0.47892106", "0.47680792", "0.47661233", "0.47639957", "0.47504014", "0.4743116", "0.468946", "0.46522427", "0.46331078", "0.45943975", "0.4589712", "0.45666662", "0.45515707", "0.45502076", "0.45461386", "0.45351556", "0.4527155", "0.4516468" ]
0.8169664
0
Generate a temporary standard edge file based on the given adjacency edge file.
def generateEdgeFile(adj_filename): # Parse the adjacency file edges = [] edge_id = 0 min_v = None max_v = None min_depth = 0 depth = 0 num_nodes = 0 with open(adj_filename, 'r') as adj_f: r = csv.reader(adj_f, delimiter='\t') for row_idx, row in enumerate(r): if not num_nodes: num_nodes = len(row) else: assert len(row) == num_nodes for col_idx, v in enumerate(row): if v: v = float(v) if not min_v: min_v = v max_v = v if (v < min_v): min_v = v if v > max_v: max_v = v start = 'Node' + str(row_idx + 1) end = 'Node' + str(col_idx + 1) depth += 1 edges.append([edge_id, start, end, v, v, depth, '']) edge_id += 1 max_depth = depth # Create temporary edge CSV file temp_edge_filename = 'temp_edges.csv' with open(temp_edge_filename, 'w') as edge_f: w = csv.writer(edge_f, delimiter='\t') header_row = ['Id', 'Node1', 'Node2', 'Property1', 'Property2', 'Property3', 'Property4'] meta_row_min = ['MIN_VAL', 'NA', 'NA', str(min_v), str(min_v), str(min_depth), 'NA'] meta_row_max = ['MAX_VAL', 'NA', 'NA', str(max_v), str(max_v), str(max_depth), 'NA'] meta_row_use = ['USE_AS', 'S', 'E', 'C', 'W', 'D', 'L'] w.writerow(header_row) w.writerow(meta_row_min) w.writerow(meta_row_max) w.writerow(meta_row_use) for edge in edges: w.writerow(edge) return temp_edge_filename
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_edge_features(edge_features, edge_file):\n dgl.data.utils.save_tensors(edge_file, edge_features)", "def export_graph(cls, graph, filename):\n edges = {}\n for node in graph.values():\n for neighbour, dist in node.distances.items():\n if (node.id, neighbour) in edges or (neighbour, node.id) in edges:\n continue\n edges[(node.id, neighbour)] = dist\n\n file_string = '{}\\n'.format(len(graph))\n for edge, dist in edges.items():\n file_string = file_string + '{} {} {}\\n'.format(edge[0], edge[1], dist)\n file_string = file_string[:-1] # Strip the last \\n\n\n with open(filename, 'w') as file:\n file.write(file_string)", "def writeEdges(self, fileName, format):\n edges = self.edgeIndex.values()\n if format == 'simple':\n f = open(fileName,'w')\n for edge in edges:\n f.write(\"%s -- %s\\n\" % (edge.startVertex.vertexNumber, edge.endVertex.vertexNumber))\n f.close()\n elif format == 'dot':\n f = open(fileName,'w')\n f.write(\"graph G { \\n\")\n for edge in edges:\n f.write(\"%s -- %s;\\n\" % (edge.startVertex.vertexNumber, edge.endVertex.vertexNumber))\n f.write(\"} \\n\")\n f.close()", "def write_edges(\n edges: Mapping[str, Any],\n filename: str,\n jsonlines: bool = False,\n gzipflag: bool = False,\n yaml: bool = False,\n):\n pass", "def _create_edge_skel(self, path):\n self.edges_path = os.path.join(path, \"edges\")\n os.makedirs(self.edges_path)", "def edge_table(self, G, myfile):\r\n layout = \"{0}{1:>6}{2:>6}{3:>6}\"\r\n header = layout.format(\"Neighbor\", \"\\t\", \"Edge Weight\", \"\\n\")\r\n myfile.write(header)\r\n for pre, node in list(G.edges):\r\n data = layout.format((pre, node), '\\t', \"{0:.2f}\".format(G.edges[pre, node]['weight']), \"\\n\")\r\n myfile.write(data)\r\n return myfile", "def generate_seed_file(kb_mapping, seed_file):\n r_file = open(kb_mapping, 'r')\n s_file = open(seed_file, 'w+')\n\n for line in r_file:\n values = line.strip().split(\"\\t\")\n relations = values[1].split(\" \")\n subsumptions = values[2].split(\" \")\n for subsumption in subsumptions:\n if subsumption == \"concept:relatedto\":\n continue\n for relation in relations:\n s_file.write(\"%s\\t%s\\t1.0\\n\" %(relation, subsumption))\n\n r_file.close()\n s_file.close()", "def writeEDGE(self):\n\t\tpass", "def _read_edge_file(self):\n self.edge_df = gt.remove_colons(pd.read_csv(self.edge_file, dtype=str))", "def write_graph(graph, file_name):\r\n all_vertices = graph.get_all_vertices()\r\n written_edges = []\r\n with open(file_name, 'w') as f:\r\n first_line = str(graph.get_no_vertices()) + ' ' + str(graph.get_no_edges()) + '\\n'\r\n f.write(first_line)\r\n for vertex in all_vertices:\r\n if graph.get_degree(vertex) == 0:\r\n line = str(vertex) + '\\n'\r\n f.write(line)\r\n else:\r\n for neighbour in graph.get_neighbours(vertex):\r\n if (vertex, neighbour) not in written_edges and (neighbour, vertex) not in written_edges:\r\n cost = graph.get_cost_of_edge(vertex, neighbour)\r\n written_edges.append((vertex, neighbour))\r\n line = str(vertex) + ' ' + str(neighbour) + ' ' + str(cost) + '\\n'\r\n f.write(line)", "def write_gexf_format(graph_file, adjacency, users_map, node_size=def_node_size,\n node_color=def_node_color, node_label=def_node_label,\n label_size_threshold=-1):\n graph = open(graph_file, mode='w')\n graph.write(\"\"\"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n <gexf xmlns=\"http://www.gexf.net/1.2draft\" xmlns:viz=\"http://www.gexf.net/1.1draft/viz\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:schemaLocation=\"http://www.gexf.net/1.2draft http://www.gexf.net/1.2draft/gexf.xsd\" version=\"1.2\">\n <graph mode=\"static\" defaultedgetype=\"directed\">\n \"\"\")\n graph.write('<nodes>\\n')\n for user_name in users_map:\n id = users_map[user_name][SERIAL_IDX]\n size = node_size(user_name)\n r, g, b = node_color(user_name)\n label = '' if size < label_size_threshold else node_label(user_name)\n graph.write('<node id=\"{0}\" label=\"{1}\">\\n'.format(id, label))\n graph.write('<viz:size value=\"{0}\"></viz:size>\\n'.format(size))\n graph.write('<viz:color r=\"{0}\" g=\"{1}\" b=\"{2}\" a=\"1\"/>'.format(r, g, b))\n graph.write('</node>\\n')\n graph.write('</nodes>\\n')\n graph.write('<edges>\\n')\n # iterate over all non-zero elements in the adjacency matrix\n for i, j in zip(*adjacency.nonzero()):\n graph.write('<edge source=\"{0}\" target=\"{1}\" weight=\"{2}\"/>\\n'.format(i, j,\n adjacency[i, j]))\n graph.write('</edges>\\n')\n graph.write('</graph>\\n')\n graph.write('</gexf>\\n')\n graph.close()", "def read_edges_file(edge_file, edge_data_dict):\n if edge_file == \"\" or edge_file == None:\n return None\n\n # Read the file from here.\n # <global_src_id> <global_dst_id> <type_eid> <etype> <attributes>\n # global_src_id -- global idx for the source node ... line # in the graph_nodes.txt\n # global_dst_id -- global idx for the destination id node ... line # in the graph_nodes.txt\n\n edge_data_df = csv.read_csv(\n edge_file,\n read_options=pyarrow.csv.ReadOptions(autogenerate_column_names=True),\n parse_options=pyarrow.csv.ParseOptions(delimiter=\" \"),\n )\n edge_data_dict = {}\n edge_data_dict[constants.GLOBAL_SRC_ID] = edge_data_df[\"f0\"].to_numpy()\n edge_data_dict[constants.GLOBAL_DST_ID] = edge_data_df[\"f1\"].to_numpy()\n edge_data_dict[constants.GLOBAL_TYPE_EID] = edge_data_df[\"f2\"].to_numpy()\n edge_data_dict[constants.ETYPE_ID] = edge_data_df[\"f3\"].to_numpy()\n return edge_data_dict", "def save_edge(self, edge: Edge) -> Edge:", "def write_edges_shp(self,shpname,extra_fields=[]): \n base_dtype = [('edge_id1',np.int32),\n ('length',np.float64),\n ('depth_mean',np.float64)]\n \n side_depths_mean = self.edge_depths()\n \n try:\n side_depths_max = self.side_depths_max()\n extra_fields.append( ('depth_max',np.float64, lambda e: side_depths_max[e]) )\n except:\n pass\n \n \n for efi in range(len(extra_fields)):\n fname,ftype,ffunc = extra_fields[efi]\n if ftype == int:\n ftype = np.int32\n base_dtype.append( (fname,ftype) )\n \n edges = self.edges_as_nodes_cells_mark()\n vertices = self.nodes['x']\n \n edge_data = np.zeros(len(edges), dtype=base_dtype)\n edge_geoms = [None]*len(edges)\n \n for edge_id in range(edges.shape[0]):\n if edge_id % 500 == 0:\n print(\"%0.2g%%\"%(100.*edge_id/edges.shape[0]))\n \n nodes = vertices[edges[edge_id,:2]]\n g = geometry.LineString(nodes)\n edge_geoms[edge_id] = g\n edge_data[edge_id]['length'] = g.length\n edge_data[edge_id]['edge_id1'] = edge_id + 1\n edge_data[edge_id]['depth_mean'] = side_depths_mean[edge_id]\n\n for fname,ftype,ffunc in extra_fields:\n edge_data[edge_id][fname] = ffunc(edge_id)\n \n wkb2shp.wkb2shp(shpname,input_wkbs=edge_geoms,fields=edge_data,\n overwrite=True)", "def write_edgelist(H, path, delimiter=\" \", encoding=\"utf-8\"):\n with open(path, \"wb\") as file:\n for line in generate_edgelist(H, delimiter):\n line += \"\\n\"\n file.write(line.encode(encoding))", "def addEdge_file(self, path):\n with open(path, 'r') as File:\n for line in File.readlines():\n ints = list(map(int, line.strip().split())) \n u = ints[0]\n v = ints[1:]\n for i in v:\n self.addEdge(u, i)", "def read_graph():\n return nx.read_edgelist('edges_new.txt', delimiter='\\t')", "def write_edge(self, record) -> None:\n pass", "def write_graph(g, filename):\n with open(filename, 'w') as f:\n f.write(repr(g))", "def create_dot_file(edge_list, root_node=None):\n # Generate the dot language input - could have used a template language like\n # Cheetah but decided this could be an exercise for the user... using\n # Python's built-in template string handling\n edges = ' '.join(['%s -> %s;' % (src, tgt) for src, tgt in edge_list])\n if root_node:\n # Visually identify the important \"root\" node\n node_def = 'node [shape = doublecircle]; %s; node [shape = circle];'%root_node\n graph = 'digraph G { %s %s }' % (node_def, edges)\n else:\n graph = 'digraph G { %s }'%edges\n return graph", "def generate_file(file_name, node_keys):\n if file_name is None:\n raise ValueError(\"'file_name' is not present. This was created by @Edd1e234\")\n if node_keys is None or len(node_keys) is 0:\n raise ValueError(\"'node_keys' has no values. This was created by @Edd1e234\")\n\n file = open(file_name, \"w+\")\n for i in node_keys:\n file.write(i + \"\\n\")", "def generate_output(input_filename: str, output_filename: str, goal_node: Node,\n generated: set) -> None:\n\n input_stream = io.open(input_filename, 'r', encoding='utf-8', errors='ignore',\n newline='\\n')\n with open(output_filename, 'w') as out_file:\n for i in range(0, 10):\n out_file.write(input_stream.readline().rstrip())\n out_file.write('\\n')\n \"\"\" The first ten lines of the output file are identical to those in the \n input file. The tenth line should be skipped because it's blank.\"\"\"\n out_file.write(str(goal_node.path_cost) + '\\n')\n # Line 11 of the output, the depth level d\n out_file.write(str(len(generated)) + '\\n')\n # Line 12 of the output, the total number of nodes generated\n\n # Writing Line 13 of the output, the sequence of moves\n length = len(goal_node.path_history)\n for i in range(length - 1):\n out_file.write(goal_node.path_history[i] + ' ')\n out_file.write(goal_node.path_history[length - 1] + '\\n')\n\n # Writing Line 14 of the output, the f(n) values\n f_line = str(goal_node.f) + ' '\n parent = goal_node.parent\n while parent: # Loop stops when parent == None\n f_line += (str(parent.f) + ' ')\n parent = parent.parent\n f_list = f_line.split(' ')\n # Breaks down the string to the integers it contains\n reverse = ''\n for i in range(len(f_list) - 2, -1, -1):\n # f_line[len(f_line)-1] is an extra whitespace character and\n # thus shouldn't be copied\n reverse += str(f_list[i])\n if i != 0:\n reverse += ' '\n \"\"\" The order of the f(n) values in f_line is from goal node \n to root node. The four lines above reverse the order, which \n is what the output format expects.\"\"\"\n out_file.write(reverse)\n\n out_file.close()", "def write_graph(graph, filename):\n with open(filename, 'w') as file: # open the file\n # record the number of nodes and edges\n num_nodes, num_edges = GraphProcessing.compute_num_nodes(graph), GraphProcessing.compute_num_edges(graph)\n file.write(\" \".join(list([str(num_nodes), str(num_edges)])) + \"\\n\")\n for source_node in graph.keys(): # for every node in the graph\n # for every other node and corresponding weight\n for terminal_node, weight in graph[source_node].items():\n # write the edge to the file as the first node, second node and edge weight\n file.write(\" \".join(list([str(source_node), str(terminal_node), str(weight)])) + \"\\n\")", "def write_exact_graph_to_file(self, output_file):\n print(\"Writing output file.\")\n with open(output_file, 'w') as f:\n f.write(\"# graph number = 0 name = interval_graph\\n\")\n f.write(str(len(self.vertices)) + \"\\n\")\n for node in self.vertices:\n for arc in self.out_arcs_lists[node]:\n s = self.arc_info[arc]['start']\n t = self.arc_info[arc]['destin']\n w = self.arc_info[arc]['weight']\n f.write(\"{} {} {}\\n\".format(s, t, w))", "def create_filtered_network_file(network_file_prefix, filtered_network_file, ueids):\n network_file_method_attribute = network_file_prefix + \"_method_id.eda\"\n network_file_source_attribute = network_file_prefix + \"_source.eda\"\n #biana_output_converter.filter_network_by_interaction_type(network_attribute_file_name = network_file_method_attribute, network_out_file_name = network_file_prefix + \"_y2h.sif\", interaction_type=\"y2h\")\n #biana_output_converter.filter_network_by_interaction_type(network_attribute_file_name = network_file_method_attribute, network_out_file_name = network_file_prefix + \"_tap.sif\", interaction_type=\"tap\")\n #biana_output_converter.filter_network_by_interaction_type(network_attribute_file_name = network_file_method_attribute, network_out_file_name = network_file_prefix + \"_no_tap.sif\", interaction_type=\"tap\", reverse_selection=True)\n #biana_output_converter.filter_network_by_interaction_type(network_attribute_file_name = network_file_method_attribute, network_out_file_name = filtered_network_file + \".no_tap\", interaction_type=\"tap\", reverse_selection=True)\n valid_ids = set([0,4,96,676,729,19,6,7,858,59,109]) # TAP\n biana_output_converter.filter_network_by_interaction_attribute_value(network_attribute_file_name = network_file_method_attribute, network_out_file_name = filtered_network_file + \".no_tap\", accept_attribute_value = lambda x: int(x) not in valid_ids)\n\n #interaction_to_sources = get_interaction_sources(network_file_source_attribute)\n with open(filtered_network_file, 'w') as f:\n for line in open(filtered_network_file + \".no_tap\"):\n id1, dummy, id2 = line.split()\n # Filter self interactions\n if id1 == id2:\n continue\n # Remove singleton interacions (that has evidence only from one database)\n #id_pair = sorted([id1, id2])\n #if is_singleton(interaction_to_sources[(id_pair[0], id_pair[1])]):\n # continue\n # Do not include ambigous user entities\n if id1 in ueids and id2 in ueids:\n f.write(line)\n return", "def test_write_tsv2():\n graph = NxGraph()\n graph.add_node(\"A\", id=\"A\", **{\"name\": \"Node A\", \"category\": [\"biolink:NamedThing\", \"biolink:Gene\"]})\n graph.add_node(\"B\", id=\"B\", **{\"name\": \"Node B\"})\n graph.add_node(\"C\", id=\"C\", **{\"name\": \"Node C\"})\n graph.add_node(\"D\", id=\"D\", **{\"name\": \"Node D\"})\n graph.add_node(\"E\", id=\"E\", **{\"name\": \"Node E\"})\n graph.add_node(\"F\", id=\"F\", **{\"name\": \"Node F\"})\n graph.add_edge(\n \"B\", \"A\", **{\"subject\": \"B\", \"object\": \"A\", \"predicate\": \"biolink:sub_class_of\"}\n )\n graph.add_edge(\n \"C\", \"B\", **{\"subject\": \"C\", \"object\": \"B\", \"predicate\": \"biolink:sub_class_of\"}\n )\n graph.add_edge(\n \"D\", \"C\", **{\"subject\": \"D\", \"object\": \"C\", \"predicate\": \"biolink:sub_class_of\"}\n )\n graph.add_edge(\n \"D\", \"A\", **{\"subject\": \"D\", \"object\": \"A\", \"predicate\": \"biolink:related_to\"}\n )\n graph.add_edge(\n \"E\", \"D\", **{\"subject\": \"E\", \"object\": \"D\", \"predicate\": \"biolink:sub_class_of\"}\n )\n graph.add_edge(\n \"F\", \"D\", **{\"subject\": \"F\", \"object\": \"D\", \"predicate\": \"biolink:sub_class_of\"}\n )\n\n t = Transformer()\n s = TsvSink(\n owner=t,\n filename=os.path.join(TARGET_DIR, \"test_graph_archive\"),\n format=\"tsv\",\n compression=\"tar\",\n node_properties={\"id\", \"name\"},\n edge_properties={\"subject\", \"predicate\", \"object\", \"relation\"},\n )\n for n, data in graph.nodes(data=True):\n s.write_node(data)\n for u, v, k, data in graph.edges(data=True, keys=True):\n s.write_edge(data)\n s.finalize()\n\n assert os.path.exists(os.path.join(TARGET_DIR, \"test_graph_archive.tar\"))", "def make_kosaraju(filename, number_of_nodes, number_of_clusters, smallest_degree):\n\n file = open(filename, 'w')\n tmp = generate_graph(number_of_nodes, number_of_clusters, smallest_degree)\n for i in tmp:\n for j in tmp[i]:\n file.write(\"{} {}\\n\".format(i, j))", "def write_graph(edges, out):\n edges = set(imap(frozenset, edges))\n vertices = layout_graph(edges)\n out.write(\"begin vertices\\n\")\n for id_, x, y in vertices:\n out.write(\"%s %s %s\\n\" % (id_, x, y))\n out.write(\"end vertices\\n\\nbegin edges\\n\")\n for a, b in edges:\n out.write(\"%s %s\\n\" % (a, b))\n out.write(\"end edges\\n\")", "def transform_from_edgefile(filename, seed=None, dim=2):\n g = Graph.Read_Ncol(filename)\n\n layout = g.layout_drl(seed=seed,\n dim=dim,\n )\n\n xy = pd.DataFrame(vars(layout)[\"_coords\"], index=g.vs[\"name\"])\n\n return xy", "def generate_edge_tablename(src_label, label, dst_label):\n\n tablename = 'edge_{}{}{}'.format(\n src_label.replace('_', ''),\n label.replace('_', ''),\n dst_label.replace('_', ''),\n )\n\n # If the name is too long, prepend it with the first 8 hex of it's hash\n # truncate the each part of the name\n if len(tablename) > 40:\n oldname = tablename\n logger.debug('Edge tablename {} too long, shortening'.format(oldname))\n tablename = 'edge_{}_{}'.format(\n str(hashlib.md5(tablename.encode('utf-8')).hexdigest())[:8],\n \"{}{}{}\".format(\n ''.join([a[:2] for a in src_label.split('_')])[:10],\n ''.join([a[:2] for a in label.split('_')])[:7],\n ''.join([a[:2] for a in dst_label.split('_')])[:10],\n )\n )\n logger.debug('Shortening {} -> {}'.format(oldname, tablename))\n\n return tablename" ]
[ "0.6024162", "0.58897555", "0.5825257", "0.5786747", "0.5727056", "0.5714465", "0.5684711", "0.567218", "0.56542605", "0.5579996", "0.55602103", "0.5531733", "0.54472744", "0.5443352", "0.54141134", "0.53695107", "0.5321498", "0.5285575", "0.5255383", "0.5216882", "0.5208768", "0.5200115", "0.5189514", "0.51531833", "0.5133258", "0.5109307", "0.51087505", "0.50990134", "0.50830036", "0.5081278" ]
0.7351656
0
Return the given boundary as a Q object suitable for querysets.
def _boundary_filter(self, south, west, north, east): return Q(latitude__gt=south, longitude__gt=west, latitude__lt=north, longitude__lt=east)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def boundary(self): # -> BaseGeometry:\n ...", "def _create_filter_object(form_data: Dict) -> Q:\n filter_object = Q(title__icontains=form_data[\"title\"])\n filter_object &= Q(author__icontains=form_data[\"author\"])\n filter_object &= Q(\n publication_language__icontains=form_data[\"publication_language\"]\n )\n if form_data[\"publication_date_start\"]:\n filter_object &= Q(\n publication_date__gte=form_data[\"publication_date_start\"]\n )\n if form_data[\"publication_date_end\"]:\n filter_object &= Q(publication_date__lte=form_data[\"publication_date_end\"])\n return filter_object", "def boundary(*args, caching: bool=True, endPoint: bool=False, endPointTolerance: Union[float,\n bool]=0.1, nodeState: Union[int, bool]=0, order: bool=True, constructionHistory:\n bool=True, name: AnyStr=\"\", object: bool=True, polygon: int=0, range: bool=True,\n q=True, query=True, e=True, edit=True, **kwargs)->Union[List[AnyStr], Any]:\n pass", "def boundary(self):\n return self._boundary", "def get_query(self):\n q = db.Query(self.KIND,keys_only=self.KEYS_ONLY)\n for prop, value in self.FILTERS:\n q.filter(\"%s =\" % prop, value)\n if self.ancestor:\n q.ancestor(self.ancestor)\n q.order(self.ORDER_BY)\n return q", "def bw_query(self, **kwargs):\n raise NotImplementedError()", "def bound(self, lower: Bound, upper: Bound) -> BoundedType:\n return BoundedType(\n self, BoundedType.convert_bound(lower), BoundedType.convert_bound(upper)\n )", "def _boundary_value(self) -> str:\n ...", "def make_complex_query_set(self):\n\n query = self.request.GET.get(\"q\")\n category = self.request.GET.get(\"category\")\n title = self.request.GET.get(\"title\")\n\n q_object = Q()\n\n if query:\n q_object.add((\n Q(category__category_name__icontains=query) |\n Q(title__icontains=query) \n ), Q.OR)\n\n else:\n if category:\n q_object.add(\n Q(category=category),\n Q.AND)\n if title:\n q_object.add(\n Q(title__icontains=title),\n Q.AND)\n\n return q_object", "def __getQuerysetGivenInterval(model, start_date, end_date):\n cur_model = {\n 'donor': Donor,\n 'donation': Donation,\n 'item': Item\n }.get(model, Donor.objects.none())\n\n # might need following lines when changing back to created_at:\n # date_format = \"%Y-%m-%d\"\n # if start_date is not None:\n # timezone_unaware_start_date = datetime.strptime(start_date, date_format)\n # timezone_aware_start_date = pytz.utc.localize(timezone_unaware_start_date)\n #\n # if end_date is not None:\n # timezone_unaware_end_date = datetime.strptime(end_date, date_format)\n # timezone_aware_end_date = pytz.utc.localize(timezone_unaware_end_date).date()\n\n if start_date is not None and end_date is not None:\n return cur_model.objects.filter(documented_at__range=(start_date, end_date))\n elif start_date is not None and end_date is None:\n return cur_model.objects.filter(documented_at__gte=start_date)\n elif start_date is None and end_date is not None:\n return cur_model.objects.filter(documented_at__lte=end_date)\n else:\n return cur_model.objects.all()", "def ge(cls, __and=True, __key=None, **kwargs):\r\n return _queries(\">=\", __key, __and, kwargs.items())", "def for_boundary(self, south, west, north, east):\n south = float(south)\n west = float(west)\n north = float(north)\n east = float(east)\n if west * east < 0:\n # Boundary overlaps a hemisphere line, look up either side of it.\n if (180 - west) < west:\n # Closest to International Date Line.\n lookup = (self._boundary_filter(south, west, north, 180) | \n self._boundary_filter(south, -180, north, east))\n else:\n # Closest to Prime Meridian.\n lookup = (self._boundary_filter(south, west, north, 0) | \n self._boundary_filter(south, 0, north, east))\n else:\n lookup = self._boundary_filter(south, west, north, east)\n return self.filter(lookup)", "def test_trucks_api_bounds_query_string(self):\n # SF boundary coordinates - we want to test within these\n swBound = (37.708418, -122.500943)\n neBound = (37.812780, -122.383870)\n\n for i in range(0, 5):\n # generate some random boundaries to test within\n swLat = random.uniform(swBound[0], neBound[0])\n swLng = random.uniform(swBound[1], neBound[1])\n neLat = random.uniform(swBound[0], neBound[0])\n neLng = random.uniform(swBound[1], neBound[1])\n query_bounds = \",\".join([str(x) for x in [swLat, swLng, neLat, neLng]])\n\n # make the request\n resp = self.app.get('/trucks?bounds=%s' % query_bounds)\n self.assertEqual(resp.status_code, 200)\n\n # ensure the returned items fall in the correct bounds\n data = json.loads(resp.data)['resp']\n for item in data:\n lat = float(item['latitude'])\n lng = float(item['longitude'])\n assert lat >= swLat\n assert lat <= neLat\n assert lng >= swLng\n assert lng <= neLng", "def global_q(self):\n if not self._global_search_value:\n return Q()\n kw = \"{}__{}\".format(self.field_name, self.lookup_expr)\n return Q(**{kw: self._global_search_value})", "def build_range_clause(field, quantifier, string):\r\n range_str = {}\r\n if quantifier == '<':\r\n range_str['lt'] = string\r\n elif quantifier == '<=':\r\n range_str['lte'] = string\r\n elif quantifier == '>':\r\n range_str['gt'] = string\r\n elif quantifier == '>=':\r\n range_str['gte'] = string\r\n elif quantifier == '!=' or quantifier == '==':\r\n print('Wrong function called for ', quantifier)\r\n return\r\n else:\r\n print('bad quantifier input')\r\n return\r\n answer = {}\r\n tmp = {}\r\n tmp[field] = range_str\r\n answer['range'] = tmp\r\n return answer", "def getRecordFilter(self):\n begin, end = self.getBeginEnd()\n return (Q(**{\"report__date_range_begin__gte\" : begin})\n & Q(**{\"report__date_range_begin__lte\": end}))", "def query_str(self):\n return \"(seqid == %r) & (start >= %s) & (end <= %s)\" % (self.seqid, self.start, self.end)", "def make_complex_query_set(self):\n\n query = self.request.GET.get(\"q\")\n program_id = self.request.META.get('HTTP_X_SVMS_PROGRAM_ID')\n category = self.request.GET.get(\"category\")\n title = self.request.GET.get(\"title\")\n level = self.request.GET.get(\"level\")\n description = self.request.GET.get(\"description\")\n status = self.request.GET.get(\"status\")\n job_tag = self.request.GET.get(\"job_tag\")\n\n q_object = Q()\n\n if query:\n q_object.add((\n Q(program_id=query) |\n Q(category=query) |\n Q(title__icontains=query) |\n #Q(category__category_name__icontains=query) |\n Q(description__icontains=query) |\n Q(job_tag__tag__in=str(query).split(\",\"))\n ), Q.OR)\n\n if query.isnumeric():\n q_object.add(\n Q(level__icontains=int(query)), Q.OR)\n\n q_object.add(Q(status=strtobool(query)), Q.OR) if query in [\n \"true\", \"True\", \"False\", \"false\"] else None\n\n else:\n if program_id:\n q_object.add(\n Q(program_id=program_id),\n Q.AND)\n\n if category:\n q_object.add(\n Q(category=category),\n Q.AND)\n if title:\n q_object.add(\n Q(title__icontains=title),\n Q.AND)\n\n if description:\n q_object.add(\n Q(description__icontains=description), Q.AND)\n\n if job_tag:\n q_object.add(\n Q(job_tag__tag__in=str(job_tag).split(\",\")),\n Q.AND)\n\n if level:\n if level.isnumeric():\n q_object.add(\n Q(level__icontains=int(level)),\n Q.AND)\n else:\n raise Exception(\n ErrorMessage.WRONG_FIELD_TYPE.value.format(\"level\",\n \"numeric\"))\n\n q_object.add(Q(status=strtobool(status)), Q.AND) if status in [\n \"true\", \"True\", \"False\", \"false\"] else None\n\n return q_object", "def boundary_conditions(self):\n pass", "def generateDRQFor(self, domain):\n block = BasicRangeQuery.generateBaseDRQ(self, domain)\n query = set()\n for set_of_queries in block: # Put all Queries from all Blocks into one big block\n query.update(set_of_queries)\n return query", "def bounding_quadrant(self):\n quadrant = Quadrant.empty_quadrant(2)\n for point in self.endpoints:\n quadrant.add_point(point)\n return quadrant", "def build_query_structure(self):\n query_list = list()\n filter_list = list()\n for key, val in self.q_dict.items():\n if key in self.es_query_keys:\n query_list.append(\n {\"match\": {\".\".join(key.split(\"_\")): val[0]}})\n elif key in self.es_date_keys:\n filter_list.append(\n {\"range\": {\".\".join(key.split(\"_\")): val}})\n elif \":\" in val[0]:\n #for handling queries like dd_dct=gte:1\n range_val = val[0].split(\":\")\n filter_list.append({\"range\": {\".\".join(key.split(\"_\")): {\n range_val[0]: int(range_val[1])}}})\n else:\n filter_list.append(\n {\"terms\": {\".\".join(key.split(\"_\")): val}})\n return query_list, filter_list", "def q(self):\n return self.model.gmmobjective(self.params, self.weights)", "def get_exact(self):\n from sympy.polys.domains import QQ\n return QQ", "def query(self):\n q = dict(self._query)\n fworker_check = [{\"spec._fworker\": {\"$exists\": False}}, {\"spec._fworker\": None}, {\"spec._fworker\": self.name}]\n if \"$or\" in q:\n q[\"$and\"] = q.get(\"$and\", [])\n q[\"$and\"].extend([{\"$or\": q.pop(\"$or\")}, {\"$or\": fworker_check}])\n else:\n q[\"$or\"] = fworker_check\n if self.category and isinstance(self.category, str):\n if self.category == \"__none__\":\n q[\"spec._category\"] = {\"$exists\": False}\n else:\n q[\"spec._category\"] = self.category\n elif self.category: # category is list of str\n q[\"spec._category\"] = {\"$in\": self.category}\n\n return q", "def Q(self):\n return self._Q", "def generateDRQFor(self, domain):\n block = PatternRangeQuery.generateBaseDRQ(self, domain)\n query = set()\n for set_of_queries in block: # Put the contents of all blocks into one big block\n query.update(set_of_queries)\n return query", "def boundary(self):\n return self.substrates.boundary", "def getMapBoundedQuery(minLon, minLat, maxLon, maxLat, today=False):\n return None", "def query(self):\n return Query(from_=self)" ]
[ "0.57327753", "0.5393648", "0.5344611", "0.5293871", "0.52651924", "0.5209962", "0.5151361", "0.51221335", "0.509879", "0.5091258", "0.50874144", "0.5077453", "0.50735956", "0.50507224", "0.50312674", "0.4991418", "0.49901503", "0.49759102", "0.49629217", "0.49530393", "0.49454066", "0.49364653", "0.49224892", "0.48918697", "0.48806894", "0.4846356", "0.48461705", "0.48425937", "0.48138148", "0.48059633" ]
0.6247211
0
Return the airports in the given boundary, combining multiple checks if the boundary overlaps a hemisphere line.
def for_boundary(self, south, west, north, east): south = float(south) west = float(west) north = float(north) east = float(east) if west * east < 0: # Boundary overlaps a hemisphere line, look up either side of it. if (180 - west) < west: # Closest to International Date Line. lookup = (self._boundary_filter(south, west, north, 180) | self._boundary_filter(south, -180, north, east)) else: # Closest to Prime Meridian. lookup = (self._boundary_filter(south, west, north, 0) | self._boundary_filter(south, 0, north, east)) else: lookup = self._boundary_filter(south, west, north, east) return self.filter(lookup)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def detect_area_with_boundary(self, boundary): \r\n # create a polynome approximation with the boundary curve\r\n #update boundary\r\n self.set_boundary(boundary)\r\n \r\n # step 1: find the line connected head and tail\r\n head = boundary.get_head(boundary.mode)\r\n tail = boundary.get_tail(boundary.mode)\r\n \r\n major_axis_angle = self.compute_angle(head, tail) \r\n major_axis_length = math.sqrt( (head[1]- tail[1])**2 +(head[0]- tail[0])**2)\r\n # step 2: find cropping positions\r\n tail_dropoff_pos = self.coordinate_shift(tail, \\\r\n major_axis_length * self.tail_dropoff_threshold, \\\r\n major_axis_angle) \r\n \r\n head_dropoff_pos = self.coordinate_shift(head, \\\r\n - major_axis_length * self.head_dropoff_threshold,\\\r\n major_axis_angle)\r\n \r\n # find the intersection points with boundary_curve\r\n # minor_axis_angle is range from (-pi/2 , pi/2)\r\n if major_axis_angle < 0:\r\n minor_axis_angle = major_axis_angle + math.pi / 2\r\n else:\r\n minor_axis_angle = major_axis_angle - math.pi / 2\r\n \r\n if minor_axis_angle > 0:\r\n minor_axis_angle_prime = minor_axis_angle - math.pi\r\n else:\r\n minor_axis_angle_prime = minor_axis_angle + math.pi\r\n \r\n \r\n # find the two vetices of polygone which are close to tail \r\n min_diff_angle_0 = math.inf\r\n min_diff_angle_pi = math.inf\r\n \r\n curve = boundary.get_approx_curve(boundary.mode)\r\n \r\n for (r,c) in curve:\r\n # compute the angle for the line connecting (bd_x, bd_y) and tail_dropoff_pos\r\n bd_angle = self.compute_angle( (c,r), tail_dropoff_pos )\r\n \r\n diff_angle = np.abs( bd_angle - minor_axis_angle) \r\n if (diff_angle < min_diff_angle_0):\r\n min_diff_angle_0 = diff_angle\r\n pos_1 = (r,c)\r\n \r\n diff_angle = np.abs(bd_angle - minor_axis_angle_prime ) \r\n if (diff_angle < min_diff_angle_pi):\r\n min_diff_angle_pi = diff_angle\r\n pos_2 = (r,c)\r\n \r\n if pos_1[0] < pos_2[0]:\r\n self.__tail_poly_vertice_upper = pos_1\r\n self.__tail_poly_vertice_lower = pos_2\r\n else:\r\n self.__tail_poly_vertice_upper = pos_2\r\n self.__tail_poly_vertice_lower = pos_1\r\n \r\n # find the two vertices of polygone which are close to head\r\n min_diff_angle_0 = math.inf\r\n min_diff_angle_pi = math.inf\r\n for (r,c) in curve:\r\n # compute the angle for the line connecting (bd_x, bd_y) and head_dropoff_pos\r\n bd_angle = self.compute_angle( (c,r), head_dropoff_pos )\r\n # diff_angle is range from (0, pi)\r\n diff_angle = np.abs( bd_angle - minor_axis_angle) \r\n if (diff_angle < min_diff_angle_0):\r\n min_diff_angle_0 = diff_angle\r\n pos_1 = (r,c)\r\n diff_angle = np.abs(bd_angle - minor_axis_angle_prime ) \r\n if (diff_angle < min_diff_angle_pi):\r\n min_diff_angle_pi = diff_angle\r\n pos_2 = (r,c)\r\n \r\n if pos_1[0] < pos_2[0]:\r\n self.__head_poly_vertice_upper = pos_1\r\n self.__head_poly_vertice_lower = pos_2\r\n else:\r\n self.__head_poly_vertice_upper = pos_2\r\n self.__head_poly_vertice_lower = pos_1\r\n \r\n # construct the vertices of polygone\r\n self.vertices = np.array([self.__tail_poly_vertice_upper, \\\r\n self.__tail_poly_vertice_lower, \\\r\n self.__head_poly_vertice_lower, \\\r\n self.__head_poly_vertice_upper])\r\n \r\n self.vertices = np.round(self.vertices).astype(int)\r\n \r\n # creat area form vertices\r\n self.vertex2poly()", "def get_ac_within_bounds(airport: list, radius: int) -> list:\n # Convert from nautical miles to km\n radius_km = miles_to_km(radius)\n\n airport_lat = airport[\"latitude_deg\"]\n airport_long = airport[\"longitude_deg\"]\n\n # Obtain boundaries for api call\n lat_max, long_max = calculate_buffer_geoposition(\n airport_lat, airport_long, radius_km, 45\n )\n\n lat_min, long_min = calculate_buffer_geoposition(\n airport_lat, airport_long, radius_km, 45 + 180\n )\n\n api = OpenSkyApi()\n # bbox = (min latitude, max latitude, min longitude, max longitude)\n states = api.get_states(bbox=(lat_min, lat_max, long_min, long_max))\n\n return states", "def ground_trajectory(\n self, airport: Union[str, \"Airport\"]\n ) -> Iterator[\"Flight\"]:\n\n from traffic.data import airports\n\n self = cast(\"Flight\", self)\n airport_ = airports[airport] if isinstance(airport, str) else airport\n assert airport_ is not None\n\n has_onground = \"onground\" in self.data.columns\n criterion = \"altitude < 5000\"\n if has_onground:\n criterion += \" or onground\"\n\n low_altitude = self.query(criterion)\n if low_altitude is None:\n return\n if airport_.shape is None:\n raise ValueError(\"No shape available for the given airport\")\n for low_segment in low_altitude.split(\"10T\"):\n for airport_segment in low_segment.clip_iterate(\n airport_.shape.buffer(5e-3)\n ):\n if has_onground:\n onground = airport_segment.query(\"onground\")\n else:\n onground = airport_segment.query(\n \"altitude < 500 or altitude != altitude\"\n )\n if onground is not None:\n yield onground", "def boundary_conditions(particle_outer_radius, boundary_temp):\n\n boundary_condition = [particle_outer_radius, boundary_temp]\n\n return boundary_condition", "def find_airport_cylinder_intersection(flight_id, latitudes, longitudes,\n airport_id, radius, is_destination):\n log.debug(\"Finding intersection for flight %s, with airport %s at radius %s\",\n flight_id, airport_id, radius)\n\n radius_m = radius * NM_CONVERSION_TO_M\n\n # Get the connection string\n connection = get_geo_db_connection()\n\n # Find the airport\n found = airport_finder(\"icao_ap_code\", airport_id)\n if found[0]:\n airport = found[1][0]\n airport_lon = airport['longitude']\n airport_lat = airport['latitude']\n\n # Make a list of augmented points\n augmented_points = make_augmented_points_from_positions(\n latitudes, longitudes, flight_id, connection)\n\n # Convert the points to a geographic feature\n geographic_trajectory = make_geographic_trajectory(augmented_points, flight_id, connection)\n\n # Make the buffer\n buffer = create_buffer(airport_lon, airport_lat, radius_m, connection)\n\n # The intersections between path and buffer\n intersections = find_line_poly_intersection_without_boundary(\n geographic_trajectory, buffer, connection)\n\n # Organise the outputs\n intersection_wkts = extract_intersection_wkts(intersections)\n\n # The origin dict included here just tells the extract routine that\n # this is not an origin flight.\n intersection_details = extract_details_from_intersection(\"\", intersection_wkts,\n {'is_origin': False}, flight_id)\n\n if len(intersection_details) > 0:\n x_y_sector_ids = reduce(merge_l_t, [intersection_details], [[], [], []])\n\n return x_y_sector_ids[0], x_y_sector_ids[1]\n else:\n return [], []\n else:\n return [], []", "def aligned_on_runway(\n self, airport: Union[str, \"Airport\"]\n ) -> Iterator[\"Flight\"]:\n\n from ..data import airports\n\n # The following cast secures the typing\n self = cast(\"Flight\", self)\n\n _airport = airports[airport] if isinstance(airport, str) else airport\n if (\n _airport is None\n or _airport.runways is None\n or _airport.runways.shape.is_empty\n ):\n return None\n\n if isinstance(_airport.runways.shape, LineString):\n candidate_shapes = [\n LineString(list(self.xy_time)).intersection(\n _airport.runways.shape.buffer(5e-4)\n )\n ]\n else:\n candidate_shapes = [\n LineString(list(self.xy_time)).intersection(\n on_runway.buffer(5e-4)\n )\n for on_runway in _airport.runways.shape.geoms\n ]\n\n for intersection in candidate_shapes:\n if intersection.is_empty:\n continue\n if isinstance(intersection, LineString):\n (*_, start), *_, (*_, stop) = intersection.coords\n segment = self.between(start, stop, strict=False)\n if segment is not None:\n yield segment\n if isinstance(intersection, MultiLineString):\n (*_, start), *_, (*_, stop) = intersection.geoms[0].coords\n for chunk in intersection.geoms:\n (*_, start_bak), *_, (*_, stop) = chunk.coords\n if stop - start > 40: # crossing runways and back\n start = start_bak\n segment = self.between(start, stop, strict=False)\n if segment is not None:\n yield segment", "def get_airport_start_end(result, geo_airport_cities):\n crs={'init': 'epsg:4326'}\n geometry_st = [Point(xy) for xy in zip(result.start_lon, result.start_lat)]\n geometry_end = [Point(xy) for xy in zip(result.end_lon, result.end_lat)]\n geo_st = gpd.GeoDataFrame(geometry_st, crs=crs, geometry=geometry_st)[['geometry']]\n geo_end = gpd.GeoDataFrame(geometry_end, crs=crs, geometry=geometry_end)[['geometry']]\n geo_st.crs = crs\n geo_end.crs = crs\n st_airport = pd.DataFrame(geo_st.within(geo_airport_cities['geometry'].unary_union.buffer(0.1)))\n st_airport.index=result.index\n result['geometry_st'] = st_airport\n end_airport = pd.DataFrame(geo_end.within(geo_airport_cities['geometry'].unary_union.buffer(0.1)))\n end_airport.index=result.index\n result['geometry_end'] = end_airport\n st_florence = pd.DataFrame(geo_st.within(geo_airport_cities['geometry'].loc[1].buffer(0.1)))\n st_florence.index=result.index\n result['geometry_st_fl'] = st_florence\n end_florence = pd.DataFrame(geo_end.within(geo_airport_cities['geometry'].loc[1].buffer(0.1)))\n end_florence.index=result.index\n result['geometry_end_fl'] = end_florence\n st_pisa = pd.DataFrame(geo_st.within(geo_airport_cities['geometry'].loc[0].buffer(0.1)))\n st_pisa.index=result.index\n result['geometry_st_pisa'] = st_pisa\n end_pisa = pd.DataFrame(geo_end.within(geo_airport_cities['geometry'].loc[0].buffer(0.1)))\n end_pisa.index=result.index\n result['geometry_end_pisa'] = end_pisa\n return result", "def landing_airport(self, **kwargs: Any) -> \"Airport\":\n\n from ..core.distance import guess_airport\n\n # The following cast secures the typing\n self = cast(\"Flight\", self)\n\n data = self.data.sort_values(\"timestamp\")\n return guess_airport(data.iloc[-1], **kwargs)", "def add_search_bearings(search_bearing, perpendicular=True):\n\n search_list=[]\n if type(search_bearing) == list:\n for i in search_bearing:\n search_list.append(i)\n if perpendicular==True:\n for t in range(3):\n search_list.append((i+(90*(t+1)))%360)\n else:\n pass\n elif type(search_bearing) == int:\n search_list.append(search_bearing)\n if perpendicular==True:\n for t in range(3):\n search_list.append((search_bearing+(90*(t+1)))%360)\n else:\n pass\n else:\n print(\"Please make sure the bearing(s) you are searching for are either an integer or a list of integers\")\n return search_list", "def airports(osm_path): \n return (retrieve(osm_path,'multipolygons',['aeroway'],**{'aeroway':[\"='aerodrome'\"]})).rename(columns={'aeroway': 'asset'})", "def buildings_in_area(self, polygon):\n return [b for b in self.buildings if polygon.contains(b.geometry.convex_hull)]", "def mesh_boundary(mesh):\n adja = edges_to_adjacency_matrix(mesh)\n r = sparse.extract.find(adja)\n li = r[0][np.where(r[2] == 1)]\n lj = r[1][np.where(r[2] == 1)]\n edges_boundary = np.vstack([li, lj]).T\n \"\"\"\n # alternative implementation based on edges and grouping from trimesh\n # instead of adjacency matrix\n from trimesh import grouping\n groups = grouping.group_rows(mesh.edges_sorted, require_count=1)\n # vertex_boundary = np.unique(open_mesh.edges_sorted[groups])\n edges_boundary = mesh.edges_sorted[groups]\n \"\"\"\n if li.size == 0:\n print('No holes in the surface !!!!')\n return np.array()\n else:\n return edges_to_boundary(edges_boundary)", "def aligned_on_ils(\n self,\n airport: Union[None, str, \"Airport\"],\n angle_tolerance: float = 0.1,\n min_duration: deltalike = \"1T\",\n ) -> Iterator[\"Flight\"]:\n\n from ..data import airports\n\n # The following cast secures the typing\n self = cast(\"Flight\", self)\n\n if airport is None:\n airport = self.landing_airport()\n\n _airport = airports[airport] if isinstance(airport, str) else airport\n if (\n _airport is None\n or _airport.runways is None\n or _airport.runways.shape.is_empty\n ):\n return None\n\n rad = np.pi / 180\n\n chunks = list()\n for threshold in _airport.runways.list:\n tentative = (\n self.bearing(threshold)\n .distance(threshold)\n .assign(\n b_diff=lambda df: df.distance\n * np.radians(df.bearing - threshold.bearing).abs()\n )\n .query(\n f\"b_diff < {angle_tolerance} and \"\n f\"cos((bearing - track) * {rad}) > 0\"\n )\n )\n if tentative is not None:\n for chunk in tentative.split(\"20s\"):\n if (\n chunk.longer_than(min_duration)\n and chunk.altitude_min < 5000\n ):\n chunks.append(\n chunk.assign(\n ILS=threshold.name, airport=_airport.icao\n )\n )\n\n yield from sorted(chunks, key=attrgetter(\"start\"))", "def make_boundaries(self):\n p = self.project\n c = p[0]\n outlet = p.NewOutlet('GW', c.x, c.y, c.z - c.soildepth)\n cmf.FreeDrainagePercolation(c.layers[-1], outlet)\n rainfall = cmf.timeseries.from_sequence(self.starttime, cmf.day, [25, 0, 0, 0, 0, 0, 0] * 200)\n p.rainfall_stations.add('Heavy rain once a week', rainfall, (0, 0, 0))\n print(cmf.describe(p.rainfall_stations))\n p.use_nearest_rainfall()\n\n return outlet", "def mark_conditions(mesh, lst):\n fun = FacetFunction(\"int\", mesh, 0)\n for bc in lst:\n sub = OnBoundary()\n # overwrite inside function with the one from bc\n sub.inside = bc.getTest()\n sub.mark(fun, bc.value)\n return fun.array()", "def getPlane(entry):\n\n \n \n a,b,c = getNewLattice(entry,2)\n a_vector = np.linalg.solve(np.array(entry[0].lattice.as_dict()['matrix']).T,a)\n b_vector = np.linalg.solve(np.array(entry[0].lattice.as_dict()['matrix']).T,b)\n fracs = np.cross(a_vector,b_vector)\n fracs /= min([x for x in fracs if abs(x)>1E-4])\n \n return(fracs)", "def calcFaceAreas(x,y,z):\n (nLonP1, nLatP1) = x.shape\n (nLon, nLat) = (nLonP1-1, nLatP1-1)\n\n area = numpy.zeros((nLon, nLat))\n\n for i in range(nLon):\n for j in range(nLat):\n left = distance( (x[i,j], y[i,j], z[i,j]), (x[i,j+1], y[i,j+1], z[i,j+1]) )\n right = distance( (x[i+1,j], y[i+1,j], z[i+1,j]), (x[i+1,j+1], y[i+1,j+1], z[i+1,j+1]) )\n top = distance( (x[i,j+1], y[i,j+1], z[i,j+1]), (x[i+1,j+1], y[i+1,j+1], z[i+1,j+1]) )\n bot = distance( (x[i,j], y[i,j], z[i,j]), (x[i+1,j], y[i+1,j], z[i+1,j]) )\n \n area[i,j] = 0.5*(left+right) * 0.5*(top+bot)\n\n return area", "def boundary_polygon_by_edges(self):\n lines=self.boundary_linestrings()\n polys=join_features.lines_to_polygons(lines,close_arc=False)\n if len(polys)>1:\n raise GridException(\"somehow there are multiple boundary polygons\")\n return polys[0]", "def detector_outline( bottom_vec3d_list, top_vec3d_list ):\n # hardcoded angular offset for hexagon\n phi0 = -20.0 * I3Units.degree \n\n # hardcoded threshold for an edge\n cos_angle_threshold = math.cos( 7.0 * I3Units.degree ) \n\n bottom = Vec3dList()\n top = Vec3dList()\n\n string_coords = []\n for b, t in zip( bottom_vec3d_list, top_vec3d_list ):\n if t[2] < 450.0 * I3Units.meter: # ignore deep-core\n continue\n string_coords.append(( math.atan2(t[1], t[0]),\n t[0], t[1], b[2], t[2] ))\n\n # border detection:\n # check if there is a point in each angular segment of hexagon\n border = []\n for i, cur in enumerate( string_coords ):\n counts = [False, False, False, False, False , False]\n for j, other in enumerate( string_coords ):\n if i == j: continue\n dx = cur[1] - other[1]\n dy = cur[2] - other[2]\n phi = int((math.atan2( dy, dx ) - phi0) / I3Units.degree)\n if phi < 0:\n phi += 360\n counts[phi // 60] = True\n neighbor_count = sum( counts )\n # border points don't have a full hexagon of neighbors\n if neighbor_count < 6:\n border.append( cur )\n\n border.sort() # put in circular order\n\n # edge detection:\n # check if differential vectors of three consecutive points have an angle\n for i in xrange( len(border) ):\n ax = border[i - 1][1] - border[i - 2][1]\n ay = border[i - 1][2] - border[i - 2][2]\n bx = border[i][1] - border[i - 1][1]\n by = border[i][2] - border[i - 1][2]\n anorm = (ax ** 2 + ay ** 2) ** 0.5\n bnorm = (bx ** 2 + by ** 2) ** 0.5\n cos_angle = (bx * ax + by * ay) / (anorm * bnorm)\n if cos_angle < cos_angle_threshold:\n cur = border[i - 1]\n bottom.append( vec3d(cur[1], cur[2], cur[3]) )\n top.append( vec3d(cur[1], cur[2], cur[4]) )\n\n return bottom, top", "def find_area(self):\n min_lat_point = self.latitude_min\n max_lat_point = self.latitude_max\n min_lon_point = self.longitude_min\n max_lon_point = self.longitude_max\n self.rename_latitude()\n self.rename_longitude()\n all_lat_bounds = self.cube.coord('latitude').bounds\n all_lon_bounds = self.cube.coord('longitude').bounds\n # print(all_lat_bounds)\n # print(all_lon_bounds)\n for i, lat in enumerate(all_lat_bounds):\n for j, lon in enumerate(all_lon_bounds):\n lat_bounds = lat # 2D array of the lower and upper lat bounds\n lon_bounds = lon # 2D array of the lower and upper lon bounds\n if lat_bounds[0] <= min_lat_point < lat_bounds[1]:\n if lon_bounds[0] <= min_lon_point < lon_bounds[1]:\n nlat_min = i\n nlon_min = j\n else:\n pass\n else:\n pass\n\n for k, lat in enumerate(all_lat_bounds):\n for l, lon in enumerate(all_lon_bounds):\n lat_bounds = lat # 2D array of the lower and upper lat bounds\n lon_bounds = lon # 2D array of the lower and upper lon bounds\n if lat_bounds[0] <= max_lat_point < lat_bounds[1]:\n if lon_bounds[0] <= max_lon_point < lon_bounds[1]:\n nlat_max = k\n nlon_max = l\n else:\n pass\n else:\n pass\n\n area_subset = self.cube[:, nlat_min:nlat_max+1, nlon_min:nlon_max+1]\n # print(area_subset.coord('latitude').points)\n # print(area_subset.coord('longitude').points)\n area_mean = area_subset.collapsed(['latitude', 'longitude'],\n iris.analysis.MEAN)\n\n return area_mean", "def island_perimeter(grid):\n sum = 0\n\n for line in range(len(grid)):\n for column in range(len(grid[line])):\n value = grid[line][column]\n water_borders = 4\n if value == 1:\n if line != len(grid) - 1 and grid[line + 1][column] == 1:\n water_borders -= 1\n if line != 0 and grid[line - 1][column] == 1:\n water_borders -= 1\n if column != len(grid[0]) - 1 and grid[line][column + 1] == 1:\n water_borders -= 1\n if column != 0 and grid[line][column - 1] == 1:\n water_borders -= 1\n sum += water_borders\n return sum", "def __init__(self, start, end, hx, hy, hole_start, hole_end):\n mesh, boundary, internal, triangles = triangulate_hole(start, end, hx, hy, hole_start, hole_end)\n self.triangles = triangles\n self.mesh = mesh \n self.boundary = boundary\n self.internal = internal\n self.area = (hx*hy)/2.0\n self.hx, self.hy = hx,hy", "def find_horizontal_user_airspace_intersections(flight_id, latitudes, longitudes,\n min_altitude, max_altitude):\n log.debug(\"Finding user airspace intersections for flight %s, with min \"\n \"altitude %s and max altitude %s\",\n flight_id, min_altitude, max_altitude)\n\n # Get the connection string\n connection = get_geo_db_connection()\n\n # Make a list of augmented points\n augmented_points = make_augmented_points_from_positions(\n latitudes, longitudes, flight_id, connection)\n # Convert the points to a geographic feature\n geographic_trajectory = make_geographic_trajectory(augmented_points, flight_id, connection)\n\n # Make a trajectory that contains the geo line, the augmented points and\n # the 2D user defined intersected sectors\n augmented_trajectory = make_augmented_trajectory(\n augmented_points, geographic_trajectory, flight_id, min_altitude, max_altitude, connection, True)\n\n # Find the 2D intersections\n intersections = find_intersections(\n augmented_trajectory, min_altitude, max_altitude, flight_id, connection)\n # Organise the outputs\n intersection_data_structure = create_intersection_data_structure(intersections, flight_id)\n\n return intersection_data_structure", "def points_on_lines(hyperplanes):\n intersections = []\n for row in hyperplanes:\n intersections.append(an_intersection(row[:-1], -row[-1]))\n return np.array(intersections)", "def walls(self, x, y):\n return [direction for direction in Compass if self._grid[y][x] & direction]", "def filter_by_footprint(footprint, list_of_results, dataset_name):\n\n polygon_geom = ogr.CreateGeometryFromWkt(footprint)\n\n intersect_list = []\n for f in list_of_results:\n geom = ogr.CreateGeometryFromJson(json.dumps(f['spatialFootprint']))\n\n intersect_result = geom.Intersection(polygon_geom)\n\n if not intersect_result.IsEmpty():\n print(\"FOUND INTERSECT\")\n intersect_list.append(f)\n else:\n print(\"NO INTERSECT\")\n\n return intersect_list", "def landing_attempts(\n self, dataset: Optional[\"Airports\"] = None, **kwargs: Any\n ) -> Iterator[\"Flight\"]:\n # The following cast secures the typing\n self = cast(\"Flight\", self)\n\n candidate = self.query(\"altitude < 8000\")\n if candidate is not None:\n for chunk in candidate.split(\"10T\"):\n point = chunk.query(\"altitude == altitude.min()\")\n if point is None:\n return\n if dataset is None:\n cd = point.landing_airport()\n else:\n cd = point.landing_airport(dataset=dataset)\n if cd.runways is not None:\n yield from chunk.assign(airport=cd.icao).aligned_on_ils(\n cd, **kwargs\n )", "def areap(minRA, maxRA, minDec, maxDec):\n poly = [[minRA, minDec], [minRA, maxDec], [maxRA, maxDec], [maxRA, minDec]]\n\n return geometry.Polygon(poly)", "def island_perimeter(grid):\n total = 0\n for b in range(len(grid)):\n for a in range(len(grid[b])):\n # left corner\n if (a == 0) and (b == 0):\n if grid[b][a] == 1:\n total = total + 2\n if grid[b][a + 1] == 0:\n total = total + 1\n if grid[b + 1][a] == 0:\n total = total + 1\n # right corner\n elif (a == len(grid[b]) - 1) and b == 0:\n if grid[b][a] == 1:\n total = total + 2\n if grid[b + 1][a] == 0:\n total = total + 1\n if grid[b][a - 1] == 0:\n total = total + 1\n # lower-left corner\n elif a == 0 and b == (len(grid) - 1):\n if grid[b][a] == 1:\n total = total + 2\n if grid[b][a + 1] == 0:\n total = total + 1\n if grid[b - 1][a] == 0:\n total = total + 1\n # lower-right corner\n elif b == (len(grid) - 1) and a == (len(grid[b]) - 1):\n if grid[b][a] == 1:\n total = total + 2\n if grid[b - 1][a] == 0:\n total = total + 1\n if grid[b][a - 1] == 0:\n total = total + 1\n # top edge\n elif (b == 0 and a > 0) and a < (len(grid[b]) - 1):\n if grid[b][a] == 1:\n total = total + 1\n if grid[b][a - 1] == 0:\n total = total + 1\n if grid[b + 1][a] == 0:\n total = total + 1\n if grid[b][a + 1] == 0:\n total = total + 1\n # left edge\n elif (b > 0 and b < (len(grid) - 1)) and ((a == 0) and a <\n len(grid[b]) - 1):\n if grid[b][a] == 1:\n total = total + 1\n if grid[b - 1][a] == 0:\n total = total + 1\n if grid[b][a + 1] == 0:\n total = total + 1\n if grid[b + 1][a] == 0:\n total = total + 1\n # right edge\n elif (b > 0 and (b < len(grid) - 1)) and (a == len(grid[b]) - 1):\n if grid[b][a] == 1:\n total = total + 1\n if grid[b - 1][a] == 0:\n total = total + 1\n if grid[b][a - 1] == 0:\n total = total + 1\n if grid[b + 1][a] == 0:\n total = total + 1\n # bottom edge\n elif (b == len(grid) - 1) and a > 0 and a < len(grid[b]) - 1:\n if grid[b][a] == 1:\n total = total + 1\n if grid[b][a - 1] == 0:\n total = total + 1\n if grid[b - 1][a] == 0:\n total = total + 1\n if grid[b][a + 1] == 0:\n total = total + 1\n # cases that are neither edges nor corners\n elif (b > 0 and b < len(grid) - 1) and (a > 0 and a <\n len(grid[b]) - 1):\n if grid[b][a] == 1:\n if grid[b][a - 1] == 0:\n total = total + 1\n if grid[b][a + 1] == 0:\n total = total + 1\n if grid[b - 1][a] == 0:\n total = total + 1\n if grid[b + 1][a] == 0:\n total = total + 1\n return total", "def buildAirports(listy):\n listx = listy[:-1]\n airport_list = [] # creates a new list\n\n with open('airport.csv', newline='', encoding=\"utf8\") as airport_file: # opens the csv file\n reader = csv.reader(airport_file) # reads the cotents to a variable\n next(reader, None) # returns none at the end of the file\n for airport in reader: # iterates through the reader\n if airport[4] in listx:\n airport_code = airport[4] # assigns variable\n country_name = airport[3] # assigns variable\n longitude = airport[6] # assigns variable\n latitude = airport[7] # assigns variable\n templist = [airport_code, country_name, longitude, latitude]\n airport_list.append(templist)\n \n country_currency_list = [] # creates a new list\n\n with open('countrycurrency.csv', newline='', encoding=\"utf8\") as countrycurrency_file: # opens the csv file\n reader = csv.reader(countrycurrency_file) # reads the cotents to a variable\n next(reader, None) # returns none at the end of the file\n for country in reader: # iterates through the reader\n temp_list = [country[0], country[14]] # temp list created\n country_currency_list.append(temp_list) # appends temp list to the main list\n\n currency_list = [] # creates a new list\n\n with open('currencyrates.csv', newline='', encoding=\"utf8\") as currencyrates_file: # opens the csv file\n reader = csv.reader(currencyrates_file) # reads the cotents to a variable\n next(reader, None) # returns none at the end of the file\n for currency in reader: # iterates through the reader\n temp_list = [currency[1],currency[2]] # temp list created\n currency_list.append(temp_list) # appends temp list to the main list\n\n #Outer for loops goes through list of countries and the currency they have. Inner loop will go through currency\n #and exchange rate list, matches currency to exchange rate, and creates a final list of lists with the\n #country and currency rate in each inner list.\n final_list = []\n for i in country_currency_list:\n for x in currency_list:\n if i[1] == x[0]:\n templist = [i[0], x[1]]\n final_list.append(templist)\n\n #Outer for loop will go through list of lists that contains airport, country, latitude, longitude,\n #and the inner loop will go through the list of airports and currency information and then match\n #them with the airport in the outer list. The inner loop will then extend\n x = 0\n i = 0\n while x < len(airport_list):\n while i < len(final_list):\n if airport_list[x][1] == final_list[i][0]:\n airport_list[x].extend(final_list[i])\n break\n i+=1\n x+=1\n # Make a dictionary with the Airport code as the key and the value being the airport object of that key\n finalAirports = {}\n for i in airport_list:\n finalAirports[i[0]] = Airport.Airport(i[0], i[2], i[3], i[5])\n\n return finalAirports" ]
[ "0.5394385", "0.53189814", "0.523158", "0.5114259", "0.50788957", "0.49425542", "0.4872695", "0.48638052", "0.48591077", "0.4844756", "0.48313314", "0.481995", "0.48155817", "0.48141488", "0.48097947", "0.47850233", "0.4776012", "0.47720993", "0.47235444", "0.4717238", "0.47164732", "0.47160786", "0.47147152", "0.47008464", "0.4697467", "0.46863785", "0.46794692", "0.467562", "0.46643218", "0.46609223" ]
0.5646328
0
Get softargmax coordinate for the given score map.
def soft_argmax(self, score_map): # (bs, feat_size * feat_size) score_vec = score_map.view((-1, self.feat_size * self.feat_size)) prob_vec = nn.functional.softmax(score_vec, dim=1) if not hasattr(self, 'coord_x'): # generate coordinates and indexes self.indice = torch.arange( 0, self.feat_size, device=score_map.device).view( -1, 1) * self.stride # generate mesh-grid self.coord_x = self.indice.repeat((self.feat_size, 1)) \ .view((self.feat_size * self.feat_size,)).float() self.coord_y = self.indice.repeat((1, self.feat_size)) \ .view((self.feat_size * self.feat_size,)).float() soft_argmax_x = torch.sum((self.coord_x * prob_vec), dim=1) soft_argmax_y = torch.sum((self.coord_y * prob_vec), dim=1) return soft_argmax_x, soft_argmax_y
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_maximum_from_heatmap(self, heatmap):\n assert heatmap.size(0) == 1 and heatmap.size(1) == 1\n max_map = torch.eq(heatmap, self.pool(heatmap)).float()\n heatmap = heatmap * max_map\n score = heatmap.view(-1)\n score, pos_idx = score.topk(self.max_num_people)\n mask = score > self.keypoint_threshold\n score = score[mask]\n pos_idx = pos_idx[mask]\n return pos_idx, score", "def get_peak_inds(map_):\n return np.unravel_index(np.argmax(map_, axis=None), map_.shape)", "def find_max_score_location(grid, shape):", "def softmax(scores):\n exp_score = np.exp(scores)\n return exp_score / np.sum(exp_score)", "def argmax_feature_map_locations(feature_map):\n batch_size, _, width, num_channels = _get_shape(feature_map, 4)\n\n feature_map_flattened = tf.reshape(\n feature_map, [batch_size, -1, num_channels])\n peak_flat_indices = tf.math.argmax(\n feature_map_flattened, axis=1, output_type=tf.dtypes.int32)\n # Get x and y indices corresponding to the top indices in the flat array.\n y_indices, x_indices = (\n row_col_indices_from_flattened_indices(peak_flat_indices, width))\n channel_indices = tf.tile(\n tf.range(num_channels)[tf.newaxis, :], [batch_size, 1])\n return y_indices, x_indices, channel_indices", "def compute_spatial_soft_argmax(self, x):\n\n \n \n # unfortunately can't easily dynamically compute these sizes\n # inside a @tf.function, so just hard-coding them\n H, W, C = 149, 69, 16\n B = self.batch_size\n\n # see: https://github.com/tensorflow/tensorflow/issues/6271\n x = tf.reshape(tf.transpose(x, [0, 3, 1, 2]), [B * C, H * W])\n softmax = tf.nn.softmax(x)\n softmax = tf.transpose(tf.reshape(softmax, [B, C, H, W]), [0, 2, 3, 1])\n \n posx, posy = tf.meshgrid(tf.linspace(-1., 1., num=H), \n tf.linspace(-1., 1., num=W), \n indexing='ij')\n\n image_coords = tf.stack((posx, posy), axis=2) # (H, W, 2)\n # Convert softmax to shape [B, H, W, C, 1]\n softmax = tf.expand_dims(softmax, -1)\n # Convert image coords to shape [H, W, 1, 2]\n image_coords = tf.expand_dims(image_coords, 2)\n # Multiply (with broadcasting) and reduce over image dimensions to get the result\n # of shape [B, C, 2]\n spatial_soft_argmax = tf.reduce_sum(softmax * image_coords, axis=[1, 2])\n return spatial_soft_argmax", "def _compute_softmax(scores):\n if not scores:\n return []\n\n max_score = None\n for score in scores:\n if max_score is None or score > max_score:\n max_score = score\n\n exp_scores = []\n total_sum = 0.0\n for score in scores:\n x = np.exp(score - max_score)\n exp_scores.append(x)\n total_sum += x\n\n probs = []\n for score in exp_scores:\n probs.append(score / total_sum)\n return probs", "def _single_value_max(self, maps, threshold):\r\n max_vec = np.max(maps, axis=1)\r\n cmin = np.min(max_vec)\r\n cmax = np.max(max_vec)\r\n limit = cmax - (cmax - cmin) * threshold\r\n max_mask = max_vec > limit\r\n argmax = np.argmax(maps, axis=1)\r\n return (argmax + 1) * max_mask", "def softmax(self, scores):\n\n\n # for each sample, for each class ,caclulate\n # np.exp(scores) : still (n_samples, n_classes)\n\n # axis = 1\n # a00, a01, a02 as a sinlge one to perfrom np_sum\n # which is the same sample \n # sum_exp : still (n_samples, 1)\n\n # softmax = (n_samples, n_classes) / (n_samples, 1) = (n_samples, n_classes) \n\n sum_exp = np.sum(np.exp(scores), axis=1, keepdims=True)\n softmax = np.exp(scores) / sum_exp\n \n return softmax", "def masked_softmax(scores, mask):\r\n numerator = tf.exp(tf.subtract(scores, tf.reduce_max(scores, 1, keep_dims=True))) * mask\r\n denominator = tf.reduce_sum(numerator, 1, keep_dims=True)\r\n weights = tf.div(numerator, denominator)\r\n return weights", "def spatial_expval(map_):\n map_ = map_ / np.sum(map_)\n x, y = np.meshgrid(np.arange(map_.shape[1]), np.arange(map_.shape[0]))\n\n return np.sum(map_ * x), np.sum(map_ * y)", "def _compute_softmax(scores):\n if not scores:\n return []\n\n max_score = None\n for score in scores:\n if max_score is None or score > max_score:\n max_score = score\n\n exp_scores = []\n total_sum = 0.0\n for score in scores:\n x = math.exp(score - max_score)\n exp_scores.append(x)\n total_sum += x\n\n probs = []\n for score in exp_scores:\n probs.append(score / total_sum)\n return probs", "def _compute_softmax(scores):\n if not scores:\n return []\n\n max_score = None\n for score in scores:\n if max_score is None or score > max_score:\n max_score = score\n\n exp_scores = []\n total_sum = 0.0\n for score in scores:\n x = math.exp(score - max_score)\n exp_scores.append(x)\n total_sum += x\n\n probs = []\n for score in exp_scores:\n probs.append(score / total_sum)\n return probs", "def _compute_softmax(scores):\n if not scores:\n return []\n\n max_score = None\n for score in scores:\n if max_score is None or score > max_score:\n max_score = score\n\n exp_scores = []\n total_sum = 0.0\n for score in scores:\n x = math.exp(score - max_score)\n exp_scores.append(x)\n total_sum += x\n\n probs = []\n for score in exp_scores:\n probs.append(score / total_sum)\n return probs", "def _compute_softmax(scores):\n if not scores:\n return []\n\n max_score = None\n for score in scores:\n if max_score is None or score > max_score:\n max_score = score\n\n exp_scores = []\n total_sum = 0.0\n for score in scores:\n x = math.exp(score - max_score)\n exp_scores.append(x)\n total_sum += x\n\n probs = []\n for score in exp_scores:\n probs.append(score / total_sum)\n return probs", "def scores_to_labels(scores):\n device = sp.get_device(scores)\n xp = device.xp\n with device:\n return xp.argmax(scores, axis=1)", "def spatial_var(map_):\n expx, expy = spatial_expval(map_)\n map_ = map_ / np.sum(map_)\n x, y = np.meshgrid(np.arange(map_.shape[1]), np.arange(map_.shape[0]))\n\n return np.sum(map_ * ((x - expx) ** 2 + (y - expy) ** 2))", "def heatmaps_softmax(heatmaps):\n\n ret = np.zeros(heatmaps.shape, heatmaps.dtype)\n\n width, height, _ = heatmaps.shape\n\n for w in range(width):\n for h in range(height):\n x = heatmaps[w,h]\n e = np.exp(x - np.max(x))\n ret[w,h] = e / np.sum(e, axis=0)\n\n return ret", "def max_point(self):\n x = self.max(0).idxmax()\n y = self.loc[:, x].idxmax()\n return x, y", "def softmax(x):\n scoreMatExp = np.exp(np.asarray(x))\n return scoreMatExp / scoreMatExp.sum(0)", "def get_max(av_scores, pos, constr=(), neg_pos=(), min_seeds=2):\n def key(s):\n \"Give average score of considered positions, and only if constraints are satisfied\"\n if any(s[i] != v for i,v in constr):\n return 0\n elif av_scores[s][0] < min_seeds:\n return 0\n elif pos is None:\n return av_scores[s][1]\n elif isinstance(pos, (list, tuple)):\n return sum(av_scores[s][1][p] for p in pos) - sum(av_scores[s][1][p] for p in neg_pos)\n else:\n return av_scores[s][1][pos]\n # Get the best hyperparameters\n best = max(av_scores, key=key)\n if pos is None:\n return best, av_scores[best]\n else:\n b = av_scores[best]\n return best, (b[0], b[1][pos])", "def _get_optimum_location(dataset: Dataset) -> np.ndarray:\n\n # Retrieve the observations\n X, Y = dataset.inputs_array, dataset.output_array\n\n # Return the location of the maximum\n best_index = int(np.argmax(Y))\n\n return X[best_index, :]", "def softmax(x: np.array) -> Tuple[int, List[float]]:\n dist = np.exp(x) / np.sum(np.exp(x))\n y = np.argmax(dist)\n return int(y), dist", "def get_MAP(prior,likelihood):\n pr = np.array(prior)\n ll = np.array(likelihood)\n\n ps = np.dot(pr * ll)\n ps /= np.sum(ps)\n\n map_idx = np.argmax(ps)\n return (map_idx,ps)", "def point_maxq_position(self, point_distribution, mask):\n point_distribution_np = to_np(point_distribution) # batch x time\n mask_np = to_np(mask) # batch x time\n point_distribution_np = point_distribution_np - np.min(point_distribution_np) + 1e-2 # minus the min value, so that all values are non-negative\n point_distribution_np = point_distribution_np * np.expand_dims(mask_np, -1) # batch x time x 2\n indices = np.argmax(point_distribution_np, 1) # batch x 2\n indices = to_pt(np.array(indices), self.use_cuda) # batch x 2\n return indices", "def get_max_score(location_list, grid, shape):", "def infonce_lower_bound(scores):\n nll = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores) - tf.reduce_logsumexp(input_tensor=scores, axis=1))\n mi = tf.math.log(tf.cast(scores.shape[0], tf.float32)) + nll\n return mi", "def loss(self, y_true, score, pos_label=_NoValue):\n if pos_label is not _NoValue:\n raise ValueError(\"`pos_label` not supported\")\n\n score = score.atleast_2d() # Working with 2-D arrays only\n\n p = CSoftmax().softmax(score) # SoftMax function\n\n # find-like indexing (list of lists)\n return -CArray(p[[list(range(score.shape[0])), y_true.tolist()]]).log()", "def merge_maps(self, map_2d):\n x = map_2d.data.max(0, keepdim=True)[0]\n y = map_2d.data.max(1, keepdim=True)[0]\n return x, y", "def _score_to_distance_map(y_grid, x_grid, heatmap, points_y, points_x,\n score_distance_offset):\n y_diff = y_grid[:, :, tf.newaxis] - points_y\n x_diff = x_grid[:, :, tf.newaxis] - points_x\n distance = tf.math.sqrt(y_diff**2 + x_diff**2)\n return tf.math.divide(heatmap, distance + score_distance_offset)" ]
[ "0.65778434", "0.63588685", "0.627444", "0.6260095", "0.61241853", "0.6041717", "0.590227", "0.58632195", "0.5844217", "0.57985765", "0.57653546", "0.57485694", "0.57485694", "0.57485694", "0.57485694", "0.57430995", "0.57019997", "0.5674879", "0.5664324", "0.56596255", "0.56451917", "0.56219727", "0.5605476", "0.56051743", "0.55838156", "0.55710965", "0.5540615", "0.5526874", "0.5510021", "0.54890466" ]
0.8427702
0
Getting attribute from common configuration file
def __get_common(attribute): return common.API_COMMON_CONFIG[attribute]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_attr(self, server, attribute):\n\t\tattribute = str(attribute)\n\t\tcfg = self.get_cfg(server)\n\t\tif cfg:\n\t\t\treturn cfg.get(attribute)", "def _get_attribute(self, profile, attribute):\n self.validate_root()\n self.is_valid_profile(profile)\n if attribute not in self.config[self.ROOT][profile]:\n raise configerror.ConfigError(\n 'Profile %s does not have %s configured' % (attribute, profile))\n return self.config[self.ROOT][profile][attribute]", "def get_config(self, heading, attribute):\n if attribute in self._config[heading]:\n return self._config[heading][attribute]\n else:\n return None", "def __getattr__(self, name):\n return self._conf._get(name, self._group)", "def get_config(self, key):\n return getattr(self.args, 'conf.{}'.format(key))", "def __getattr__(self, key):\n return self._config[key]", "def config(attr):\n if not hasattr(config, 'config'):\n with open('config.json') as f:\n config.config = eval(f.read())\n node = config.config\n for part in attr.split('.'):\n node = node[part]\n return node", "def get(self, key, failonerror=True):\n parts = key.split('.')\n pointer = self\n try:\n for p in parts:\n pointer = getattr(pointer, p)\n return pointer\n except (KeyError, AttributeError):\n global_cfg_path = self._main_config and os.path.abspath(\n self._main_config.filename) or 'None'\n local_cfg_path = self._findConfigPath(self._recent_caller)\n m = ('Attribute \"%s\" not defined\\nglobal_config: %s\\n'\n 'local_config: %s' % (key, global_cfg_path, local_cfg_path))\n if failonerror:\n raise AttributeError(m)\n else:\n sys.stderr.write(m)", "def _get_attr(self, attr, root=None):\n with self._h5file('r') as h5file:\n if root is None:\n obj = h5file\n else:\n obj = h5file[root]\n return get_decoded(obj.attrs, attr)[attr]", "def get_attribute(self, name):\n\n pass", "def __getattribute__(self, key):\n value = super(Config, self).__getattribute__(key)\n\n if key == \"reserved\" or key in self.reserved:\n return value\n else:\n return self.format(value, key)", "def read_global_attribute(self, name):\n return self._attrs[name]", "def get_attr_from_cfgdbjson(dut, attr):\n cmd = \"sudo cat /etc/sonic/config_db.json | grep {}\".format(attr)\n return utils_obj.remove_last_line_from_string(st.config(dut, cmd))", "def attr_paths_from_config(attribute):\n cfg_path = config_path()\n return [os.path.abspath(\n os.path.join(os.path.dirname(cfg_path), attribute))]", "def __getitem__(self, name):\n return self.config[name]", "def get_attribute(self, key):\n return self.attributes[key]", "def getattribute(self, name):\n return self.attributes[name]", "def __getattr__(self, key):\n # logger.debug(\"Config.__getattr__(%s)\" % key)\n try:\n return self.get(key)\n except KeyError, msg:\n raise AttributeError(msg)", "def get_attr(self, location, attr, default=None):\r\n return self.get_attrs(location).get(attr, default)", "def get_config(self, key):\n return self.data[key]", "def get_attribute(self, attr):\n logger.debug(\"GET ATTRIBUTE {}\".format(attr))", "def get_attr(self, name: str):\n\n if name not in self.CUSTOM_ATTRIBUTES:\n raise ValueError(f\"Supported values are: {', '.join(self.CUSTOM_ATTRIBUTES)}\")\n\n try:\n return getattr(self, name)\n except AttributeError:\n raise AttributeError(f\"The Labourer is not yet registered in TaskManager, and doesn't have any custom \"\n f\"attributes. Use TaskManager.register_labourer() first.\")", "def get(self, key):\n return self.config.get(key)", "def __getattr__(self, key):\n try:\n return self._cache[key]\n except KeyError:\n raise AttributeError('Attribute \"%s\" not found in config.' % key)", "def getconfig(self, key):\n return self.config[key]", "def import_attr(path):\n module_path, attr_name = path.rsplit(\".\", 1)\n return getattr(import_module(module_path), attr_name)", "def __call__(self, name):\n return getattr(self.config, name, getattr(self, name, None))", "def get_attribute(self, attribute: str) -> str:\n pass", "def lookup(self, name):\n for config in self._config:\n if name in config:\n return config[name]\n raise AttributeError(\"%s not found\" % name)", "def _get_attrib(self, attrib_path: str, binfo: dict) -> str:\n apath = attrib_path.split('.')\n return self._get_attrib_by_path(apath, binfo)" ]
[ "0.7258007", "0.69346863", "0.69201845", "0.6858345", "0.6795432", "0.6713505", "0.6605009", "0.6463244", "0.63739073", "0.6330982", "0.62900496", "0.6279128", "0.62187576", "0.618842", "0.6186253", "0.61832786", "0.61632085", "0.61560833", "0.61529845", "0.6120516", "0.6114456", "0.6087707", "0.6077389", "0.60693115", "0.60663956", "0.60631526", "0.6025408", "0.60170156", "0.6011186", "0.60035425" ]
0.7257732
1
Getting url to API based on common configuration file and data code filters
def get_url(self, dataset_code): module = None for qol_param in common.QOL_PARAMS: if dataset_code in common.QOL_PARAMS[qol_param]: module = common.QOL_PARAMS[qol_param][dataset_code] break url = self.__get_host(dataset_code) url = self.__apply_filters(url, common) if module is not None: url = self.__apply_filters(url, module) return url
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def url(vmanage_host,vmanage_port,api):\r\n \"\"\" function to get the url provide api endpoint \"\"\"\r\n \r\n return f\"https://{vmanage_host}:{vmanage_port}{api}\"", "def appurl( instkey, name, **matchdict ) :", "def getURLs():", "def _get_api_url(self):\n return \"%s/%s/\" % (settings.API_URL, settings.API_VERSION)", "def _get_api_url_for (self, component):\n if self.api_data['API_ROOT'].find(self.api_data['API_BASE_URL']) > -1:\n return self.api_data['API_ROOT'] + '/' + self.api_data['BUILD_IDENTIFIER'] + self.urls[component]\n else:\n return self.api_data['API_ROOT'] + self.api_data['API_BASE_URL'] + '/' + self.api_data['BUILD_IDENTIFIER'] + self.urls[component]", "def inject_urls():\n return dict(company_name=config.company_name)", "def api_url(url_base):\n return f\"{url_base}/api/v2\"", "def _get_request_url(self, endpoint: str) -> str:\n return '{qcat_base_url}/en/api/v2/{endpoint}{filter_params}'.format(\n qcat_base_url=self.config.qcat_base_url, endpoint=endpoint,\n filter_params=self.config.api_filter_params)", "def __get_urls(self):\n self.__valid_servers = {\n \"qa\": {\n \"server_url\": \"https://qa.api.deepaffex.ai:9443\",\n \"websocket_url\": \"wss://qa.api.deepaffex.ai:9080\"\n },\n \"dev\": {\n \"server_url\": \"https://dev.api.deepaffex.ai:9443\",\n \"websocket_url\": \"wss://dev.api.deepaffex.ai:9080\"\n },\n \"demo\": {\n \"server_url\": \"https://demo.api.deepaffex.ai:9443\",\n \"websocket_url\": \"wss://demo.api.deepaffex.ai:9080\"\n },\n \"prod\": {\n \"server_url\": \"https://api.deepaffex.ai:9443\",\n \"websocket_url\": \"wss://api.deepaffex.ai:9080\"\n },\n \"prod-cn\": {\n \"server_url\": \"https://api.deepaffex.cn:9443\",\n \"websocket_url\": \"wss://api.deepaffex.cn:9080\"\n },\n \"demo-cn\": {\n \"server_url\": \"https://demo.api.deepaffex.cn:9443\",\n \"websocket_url\": \"wss://demo.api.deepaffex.cn:9080\"\n }\n }\n try:\n self.server_url = self.__valid_servers[self.server][\"server_url\"]\n self.websocket_url = self.__valid_servers[self.server][\"websocket_url\"]\n except KeyError:\n raise KeyError(\"Invalid server ID given\")", "def existing_url(module):\n # Build the format dictionary\n url_base = \"/axapi/v3/event-notification/kafka/server\"\n\n f_dict = {}\n\n return url_base.format(**f_dict)", "def normalize_api_url(self):\n def tester(self, api_url):\n \"\"\"\n Attempts to fetch general information about the MediaWiki instance\n in order to test whether *api_url* will return JSON.\n \"\"\"\n data = self._fetch_http(api_url, {'action': 'query',\n 'meta': 'siteinfo'})\n try:\n data_json = json.loads(data)\n return (data, data_json)\n except ValueError:\n return (data, None)\n\n data, data_json = tester(self, self._api_url)\n if data_json:\n return self._api_url\n else:\n # if there's an index.php in the URL, we might find the API\n if 'index.php' in self._api_url:\n test_api_url = self._api_url.split('index.php')[0] + 'api.php'\n print test_api_url\n test_data, test_data_json = tester(self, test_api_url)\n print (test_data, test_data_json)\n if test_data_json:\n self._api_url = test_api_url\n return self._api_url\n return None", "def test_generate_url_with_api_key():\n config = core.Config(api_key='FAKE')\n expected = \"{}?{}\".format(ENTREZ_URL, \"retmode=text&id=FAKE&db=nucleotide&api_key=FAKE&rettype=gbwithparts\")\n assert expected == core.generate_url(\"FAKE\", config)\n\n config.format = 'gff3'\n expected = \"{}?{}\".format(SVIEWER_URL, \"retmode=text&id=FAKE&db=nucleotide&api_key=FAKE&report=gff3\")\n assert expected == core.generate_url(\"FAKE\", config)", "def build_url(self, config, query):\n if(not os.environ['FLICKR_API_KEY']):\n raise ValueError('Environement variable \"FLICKR_API_KEY\" is empty')\n \n current_provider = [provider for provider in config['providers'] if provider['name'] == self.provider_name][0]\n current_provider['query']['text'] = str(query)\n current_provider['query']['api_key'] = os.environ['FLICKR_API_KEY']\n\n query_strings = helper.build_query_strings(current_provider['query'])\n\n return current_provider['base_url'] + query_strings", "def get_api_url(self, query_, api):\n api_url = \"%s%s%s\" % (api, query_, self.api_key)\n\n return api_url", "def test_get_datafile_url__url_and_template_provided(self, _):\n test_url_template = 'www.optimizelydatafiles.com/{sdk_key}.json'\n test_url = 'www.myoptimizelydatafiles.com/my_key.json'\n self.assertEqual(\n test_url, config_manager.PollingConfigManager.get_datafile_url(None, test_url, test_url_template),\n )", "def api_url(self):\n return self.get_api_url()", "def getAPI(self):\n return self.api_url", "def url(self, api_name):\n return \"https://%s/api/%s/%s/\" % (self.host, self.api_version, api_name)", "def _get_url(context, actual, attribute_name, port):\n return actual or _get_api_url(context, attribute_name, port)", "def test_get_datafile_url__sdk_key_and_url_and_template_provided(self, _):\n test_sdk_key = 'optly_key'\n test_url_template = 'www.optimizelydatafiles.com/{sdk_key}.json'\n test_url = 'www.myoptimizelydatafiles.com/my_key.json'\n\n # Assert that if url is provided, it is always returned\n self.assertEqual(\n test_url, config_manager.PollingConfigManager.get_datafile_url(test_sdk_key, test_url, test_url_template),\n )", "def api(self) -> str:", "def test_uses_configured_servername_when_no_apiurl_defined(self):\n arguments = {\n '--api-url': None,\n '--server': None}\n config = {\n 'api_url': None,\n 'server': 'configured_stuff'}\n result = get_api_url(arguments, config)\n self.assertEqual(result, 'https://configured_stuff/afp-api/latest')\n self.assertEqual(self.mock_sanitize_host.call_count, 0)", "def existing_url(module):\n # Build the format dictionary\n url_base = \"/axapi/v3/fw/template/logging/{name}\"\n f_dict = {}\n \n f_dict[\"name\"] = module.params[\"name\"]\n\n return url_base.format(**f_dict)", "def test_returns_configured_apiurl_over_default(self):\n arguments = {'--api-url': None}\n config = {'api_url': 'configured_stuff'}\n result = get_api_url(arguments, config)\n self.assertEqual(result, 'configured_stuff')\n self.mock_sanitize_host.assert_not_called()", "def base_url(self):\n return 'http://%s/api.php?token=%s&path_info=' % \\\n (self.ac_url, self.api_key)", "def _prepare_url(self):\n\n base_url = '{}://{}{}'.format(\n self.client.protocol, self.client.base_url, self.api_path\n )\n url_parts = '/'.join(\n [part for part in self.parameters[constants.RequestConst.PATH]]\n )\n\n if url_parts:\n final_url = '{}/{}'.format(base_url, url_parts)\n else:\n final_url = base_url\n\n if self.method == constants.RequestConst.GET:\n params = self.parameters[constants.RequestConst.QUERY]\n for param, value in params.items():\n if isinstance(value, list):\n params[param] = ','.join(value)\n elif isinstance(value, dict):\n params[param] = ','.join([f'{k}:{v}' for k, v in value])\n\n url_query = '?' + '&'.join([f'{k}={v}' for k, v in params.items()])\n final_url = '{}{}'.format(final_url, url_query)\n\n self.debug.ok('final url', final_url)\n\n return final_url", "def base_url():\n return json.loads('{\"message\": \"Try with /data\", \"success\": false}')", "def url(request):\n return request.config.getoption(\"--url\")", "def get_api_url():\n return \"https://api.basespace.illumina.com/v1pre3\"", "def apipath(self, code) -> str:\n return f'{self.scriptpath(code)}/api.php'" ]
[ "0.6248061", "0.61557114", "0.6150488", "0.61260974", "0.61066705", "0.6093489", "0.60448945", "0.6019714", "0.6001403", "0.599803", "0.59788513", "0.59691656", "0.59664416", "0.5960868", "0.594734", "0.5928648", "0.5926427", "0.5920128", "0.5910003", "0.58931714", "0.5890854", "0.5874805", "0.5814051", "0.58106554", "0.5786174", "0.57745886", "0.5770508", "0.5769352", "0.57639307", "0.57576144" ]
0.6174639
1
Initialize your data structure here. Set the size of the deque to be k.
def __init__(self, k: int): self.capacity = k self.frontIndex = 0 self.lastIndex = 1 self.deque = [0] * self.capacity self.size = 0 # current size
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, k):\n self._data = []\n self._length = k", "def __init__(self, k):\n self.queue = []\n self.size = k\n self.front = 0\n self.rear = 0", "def __init__(self, k):\n self.queue = [0]*k\n self.headIndex = 0\n self.count = 0\n self.capacity = k", "def __init__(self, k: int):\n self.queue = [0]*k\n self.headIndex = 0\n self.count = 0\n self.capacity = k", "def __init__(self, k: int):\n self.q = [0] * (k + 1)\n self.len = k + 1\n self.rear = 0\n self.front = 0", "def __init__(self, k):\n self.queue = []\n self.size = k\n self.rear = 0", "def __init__(self, k: int):\n self.queue = [0]*k\n self.headIndex = 0\n self.count = 0\n self.capacity = k\n # the additional attribute to protect the access of our queue\n self.queueLock = Lock()", "def __init__(self, k):\n self.capacity = k\n self.head = None\n self.tail = None\n self.count = 0", "def __init__(self, k: int):\n self.front = 0\n self.rear = 0\n self.capacity = k + 1\n self.arr = [0 for _ in range(self.capacity)]", "def __init__(self, k):\n self.queue = []\n self.size = k", "def __init__(self, k: int):\n self.capacity = k\n self.head = None\n self.tail = None\n self.count = 0", "def __init__(self, k: int):\n self.maxlen = k\n # 在Python中这样的操作可以提高效率吗(空间或者时间)? leetcode 提交是一样的\n self.circular = [None] * k\n self.front = 0\n self.rear = -1\n self.size = 0", "def __init__(self, k):\n self.__start = 0\n self.__size = 0\n self.__buffer = [0] * k", "def __init__(self, k):\r\n self.maxlen = k\r\n self.queue = []", "def __init__(self, k):\n self.queue = [\"\"] * k\n self.max_length = k\n self.start = -1\n self.end = -1", "def __init__(self, k: int):\n self.k = k\n self.q = ['#'] * k\n self.front = 0\n self.rear = 0\n self.empty = True", "def __init__(self, size):\n self.q = deque( maxlen=size)", "def __init__(self, size):\n self.values = collections.deque(maxlen = size)", "def __init__(self, k, num_buckets, fp_size, bucket_size, max_iter):\n self.children: List[Node] = []\n self.parent: Optional[Node] = None\n self.filter = CuckooFilterBit(num_buckets, fp_size, bucket_size, max_iter)\n\n self.dataset_id: Optional[str] = None\n self.k = k", "def __init__(self, size):\n self.cache = deque()\n self.max_size = size", "def __init__(self):\n self._deque = []", "def __init__(self):\n #Data Members\n print(\"Hello I am a Circular Array Deque, What is it that you require?\")\n self.capacity_=4\n self.size_ =0\n self.data_ = [None]*self.capacity_#Arry\n self.front_ = -1\n self.back_ = 0", "def __init__(self):\n self.deque = []", "def __init__(self, size: int):\n self.q = deque()\n self.max_size = size\n self.sum = 0.0", "def __init__(self, size):\n self.size = size\n self.q = collections.deque()\n self.sum_ = 0", "def __init__(self):\n self._data = [None]*ArrayQueue.DEFAULT_CAPACITY\n self._size = 0\n self._front = 0", "def __init__(self, size):\n self.size = size\n self.current_size = 0\n self.values = collections.deque()", "def __init__(self):\r\n self.deque = deque()", "def __init__(self, size):\n self.queue = collections.deque(maxlen = size)", "def __init__(self):\n self.min = sys.maxsize\n self.stk = deque()" ]
[ "0.7902773", "0.7679447", "0.7633498", "0.7627674", "0.7582613", "0.7521146", "0.75211114", "0.7503723", "0.7489586", "0.7476659", "0.7475489", "0.7424546", "0.73562276", "0.7342713", "0.73352766", "0.7096012", "0.6864832", "0.66151357", "0.6569535", "0.6564774", "0.6529423", "0.6476536", "0.64691144", "0.64528775", "0.64516276", "0.6407589", "0.63692695", "0.6310223", "0.63048255", "0.62939125" ]
0.82578045
0
Adds an item at the front of Deque. Return true if the operation is successful.
def insert_front(self, value: int) -> bool: if self.size != self.capacity: self.deque[self.frontIndex] = value self.size += 1 if self.frontIndex == 0: self.frontIndex = self.capacity - 1 else: self.frontIndex -= 1 return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def push_front(self, e):\n if(self.size_ >= self.capacity_):#If our Deque is full we need to resize it first\n self.resize_front()\n self.data_[self.front_]= e#New Front\n self.size_+=1\n # print(\"Case 1\")\n elif(self.front_ == -1 and self.size_ ==0) :#If the Deque is intially empty then when we add the first item that will be both the front and the back \n self.front_= 0\n self.back_ = 0\n self.data_[self.front_]= e #Inserting First element in deque either front end or rear end they both lead to the same result.\n self.size_+=1\n # print(\"Case 2\")\n elif (self.front_ ==0):#If the front is at the beginning of the Deque.This may happen after the first insertion.\n self.front_-=1\n self.data_[self.front_] = e\n self.size_+=1\n # print(\"Case 3\")\n else:\n self.front_ -=1 #We add normally \n self.data_[self.front_] = e\n self.size_+=1\n #print(\"Case 4\")", "def insertFront(self, value: int) -> bool:\n \n if not self.isFull():\n self._deque[self._front] = value\n self._front = (self._front + 1) % self._k\n self._elems += 1\n return True\n \n return False", "def insert_and_check(self, item) -> bool:\n with Monitor.acquire(self):\n if item in self:\n return False\n self.add(item)\n return True", "def insertFront(self, value: int) -> bool:\n if not self.isFull():\n # 前端插入始终是先插入后移动,self.front始终指向多出来的那个坑\n self.q[self.front] = value\n self.front = self.move_backward(self.front)\n return True\n else:\n return False", "def enQueue(self, value):\n if self.rear - self.front < self.size:\n self.queue.append(value)\n self.rear += 1\n return True\n else:\n return False", "def add_item(self, item, index):\n if index in self.d_buffer.keys():\n return True\n elif len(self) < self._size:\n self.d_buffer.update({index: item})\n return True\n else:\n return False", "def insertFront(self, value):\n if not self.isFull():\n self._data.insert(0,value)\n return True\n else:\n return False", "def enqueue_front(self, item):\n self._items.insert(0, item)", "def insertFront(self, value: int) -> bool:\n if self.isFull():\n return False\n \n self.front = (self.front - 1 + self.capacity) % self.capacity\n self.arr[self.front] = value\n return True", "def enQueue(self, value):\n if not self.isFull():\n self.queue.append(value)\n self.rear += 1\n return True\n else:\n return False", "def enqueue(self, data):\n # Checking to avoid duplicate entry (not mandatory)\n if data not in self.queue:\n self.queue.insert(0, data)\n return True\n return False", "def add_front(self, item):\n\n self.items.insert(0, item)", "def enQueue(self, value):\n if not self.isFull():\n self.queue.append(value)\n return True\n else:\n return False", "def enQueue(self, value):\r\n if (len(self.queue) >= self.maxlen):\r\n return False\r\n else:\r\n self.queue.append(value)\r\n return True", "def push(self, item):\n super().add_item_to_front(item)", "def test_append_to_empty_deque_check_head(empty_deque):\n empty_deque.append(\"Denny Way\")\n assert empty_deque._deque.head.value == \"Denny Way\"", "def push(self, element):\n if not self.full():\n heapq.heappush(self.queue, element)\n self.size += 1\n return True\n else:\n if element >= self.queue[0]:\n heapq.heapreplace(self.queue, element)\n return True\n else:\n return False", "def insertFront(self, item):\n self.sentinel.insertAfter(item)\n self.N += 1", "def __nonzero__(self):\n if self._pushed:\n return True\n try:\n self.push(self.next())\n except StopIteration:\n return False\n return True", "def test_append_to_full_deque_check_head(full_deque):\n full_deque.append(\"Denny Way\")\n assert full_deque._deque.head.value == 3", "def test_head_of_deque_when_using_append(val, result, filled_deque):\n filled_deque.append(val)\n assert filled_deque._container.head.val == val", "def push(self, item):\n self.list.prepend(item)", "def add_first(self, data):\n self.deque.insert(0, data)", "def add(self, item: T) -> None:\n self._queue.append(item)\n if not self.is_empty():\n self._queue.sort(reverse=True)", "def add(self, data):\n wasquiet = True if (self.tail == self.curr) else False\n\n # Assert the queue is clean\n qtail = self.base + \".\" + str(self.tail)\n print \"creating %s\" % qtail\n assert not os.path.exists(qtail)\n qt = open(qtail, \"w\")\n qt.write(data)\n qt.close()\n\n # Where does the next item go\n self.tail += 1\n self._settail(self.tail)\n\n return wasquiet", "def add(self, item):\n if not (item in self.set):\n self.set[item] = True\n heapq.heappush(self.heap, item)", "def push_back(self, e):\n if(self.size_ >= self.capacity_):#If our Deque is full we need to resize it first\n self.resize_back()\n self.back_+=1\n self.data_[self.back_]= e\n self.size_+=1\n #print(\"case 1\")\n elif (self.front_ == -1 and self.size_==0):#If the Deque is intially empty then when we add the first item that will be both the front and the back \n self.front_= 0\n self.back_=0\n self.data_[self.back_]= e\n self.size_+=1\n else:#The Back is not at the first index(possibly somewhere in between) and if we push back it we have to go up by one to move to the new back\n self.back_+=1\n self.data_[self.back_] =e \n self.size_+=1", "def enqueue(self, item):\n\t\tself.items.insert(0, item)", "def test_appendleft_to_empty_deque_check_head(empty_deque):\n empty_deque.appendleft(\"Denny Way\")\n assert empty_deque._deque.head.value == \"Denny Way\"", "def insert_last(self, value: int) -> bool:\r\n if self.size != self.capacity:\r\n self.deque[self.lastIndex] = value\r\n self.size += 1\r\n if self.lastIndex == self.capacity - 1:\r\n self.lastIndex = 0\r\n else:\r\n self.lastIndex += 1\r\n return True\r\n return False" ]
[ "0.6975975", "0.6975836", "0.69515646", "0.6903378", "0.68868315", "0.68819815", "0.68557245", "0.68127394", "0.678145", "0.6776072", "0.6745622", "0.6681152", "0.66517055", "0.66379315", "0.6589232", "0.65483433", "0.6511195", "0.6491245", "0.6474825", "0.6454851", "0.64367294", "0.63845927", "0.6377922", "0.63012326", "0.63010275", "0.6295451", "0.6294104", "0.6282221", "0.6276925", "0.6271732" ]
0.7306541
0
Take arbitrary number of str arguments (not list) and return expanded, absolute path to a user's (or userdefined) cltk_data dir.
def make_cltk_path(*fp_list): return os.path.join(CLTK_DATA_DIR, *fp_list)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def local_data_dir():\r\n assert sys.argv\r\n prefix_path = os.path.dirname(sys.argv[0])\r\n local_data = os.path.join(prefix_path, \"data\")\r\n return local_data", "def get_app_data_dir(appname, *args):\n import ubelt as ub\n ub.schedule_deprecation(\n modname='ubelt', name='get_app_data_dir and ensure_app_data_dir', type='function',\n migration='use ubelt.Path.appdir(type=\"data\") instead',\n deprecate='1.2.0', error='2.0.0', remove='2.1.0')\n dpath = join(platform_data_dir(), appname, *args)\n return dpath", "def datafilepath(*filename):\r\n import os\r\n return makepath(os.path.join(base_dir, *filename))", "def get_data(path=None):\n import os\n location = os.path.dirname(__file__).replace('/fun', '/ax')\n if path is None:\n print(\"Choose one: \")\n print(\"\\n\".join(os.listdir(os.path.abspath(location))))\n else:\n return os.path.join(os.path.abspath(location), path)", "def datapath(cls, *fname):\n return osp.join(cls.datadir, *fname)", "def get_data_path():\n return os.getcwd() + \"/data/\"", "def getDataPath():\n\treturn \"..\" + os.sep + \"data\" + os.sep", "def get_data_path(file_name=None):\n if file_name is None:\n file_name = \"\"\n return os.path.join(DATA_DIR, file_name)", "def construct_data_path (self, scene_rpath, category):\n dirname, basename = os.path.split (scene_rpath)\n baseprefix, basesuffix = os.path.splitext (basename)\n data_rpath = os.path.join ('data', category, baseprefix + '.dsf')\n return data_rpath", "def data_dir():\n #data_path = os.path.dirname(intervene.__file__)\n #data_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'example_data')\n #print(data_path)\n return os.path.join(os.path.dirname(__file__), 'example_data')", "def locate_data():\n # Locate by using the environment variable\n if \"TESSDATA_PREFIX\" in os.environ:\n data_prefix = os.environ[\"TESSDATA_PREFIX\"]\n\n if os.path.isdir(data_prefix):\n return data_prefix\n\n # Locate by using the command directory\n cmd_path = os.path.dirname(_config.command)\n\n if cmd_path:\n cmd_data_path = os.path.join(cmd_path, \"tessdata\")\n\n if os.path.isdir(cmd_data_path):\n return cmd_data_path\n\n return None", "def path_finder(cls, *args):\n safe_test_data = os.path.join(\n os.path.dirname(__file__),\n '../tasks/tests/data')\n safe_test_data = os.path.abspath(safe_test_data)\n return os.path.join(safe_test_data, *args)", "def parse_data_dir(data_dir):\n from os import path, getcwd\n import sys\n\n try:\n assert isinstance(data_dir, str), \\\n \"Parameter type(data_dir) must be str\"\n except AssertionError, ae:\n log.err(ae)\n\n if data_dir.startswith('~'):\n data_dir = path.expanduser(data_dir)\n elif data_dir.startswith('/'):\n data_dir = path.join(getcwd(), data_dir)\n elif data_dir.startswith('./'):\n data_dir = path.abspath(data_dir)\n else:\n data_dir = path.join(getcwd(), data_dir)\n\n try:\n assert path.isdir(data_dir), \"Could not find %s\" % data_dir\n except AssertionError, ae:\n log.err(ae)\n sys.exit()\n else:\n return data_dir", "def dataset_path(dataset, work_dir=consts.WORK_DIR):\r\n return join_path(work_dir, consts.DATA_DIR, dataset)", "def get_absolute_path(*args):\n directory = os.path.dirname(os.path.abspath(__file__))\n return os.path.join(directory, *args)", "def read_path():\n global path\n if len(sys.argv) >= 2:\n path = sys.argv[1]\n else:\n path = \"train\"", "def get_data_file():\n this_directory = os.path.dirname(__file__)\n parent_directory = os.path.dirname(this_directory)\n return os.path.join(parent_directory, '_data/fortunes.txt')", "def data_dir(path=None, base=None, subdir=None, max_levels=100):\n path = path or _get_caller_path()\n return _data_science_dir(\n path=path, dirname='data', base=base,\n subdir=subdir, max_levels=max_levels)", "def example_data_path(filename=None):\n if filename is None:\n filename = ''\n return os.path.join(os.path.abspath(os.path.dirname(__file__)),\n 'example_data', filename)", "def get_full_path(path, *args):\n\n return os.path.join(_search_parent_dir(\".wit\"), *args, path)", "def parse_user_input():\n DISC = 'Generate dataset from input files to one csv frame.'\n parser = argparse.ArgumentParser(description=DISC)\n\n # USER ARGS\n parser.add_argument('-raw_dir',\n type=str,\n help='Path to the dir of raw data.',\n required=True\n )\n\n parser.add_argument('-csv_file',\n type=str,\n help='CSV file of the utterances to transform.',\n required=True\n )\n\n parser.add_argument('-feature_dir',\n type=str,\n help='Path to the dir of output feature representations.',\n required=True\n )\n\n parser.add_argument('-feature_type',\n type=str,\n help='Feature representation of the speech signal.',\n required=True\n )\n\n return parser.parse_args()", "def data_path(scope=\"session\"):\n return join(dirname(__file__), pardir, \"new_data\")", "def get_pdata_path(base_name, recurs):\n base_name = base_name.replace(os.sep, '_')\n return join(PYLINT_HOME, \"%s%s%s\"%(base_name, recurs, '.stats'))", "def get_path(data_path):\n\treturn os.path.dirname(os.path.realpath(__file__)) + os.sep + data_path", "def get_default_paths():\n DATA_ROOT = os.environ.get(\"DATA_ROOT\", \"data\")\n defaults = {\n \"TOKENIZE_DATA_DIR\": DATA_ROOT + \"/tokenize\",\n \"MWT_DATA_DIR\": DATA_ROOT + \"/mwt\",\n \"LEMMA_DATA_DIR\": DATA_ROOT + \"/lemma\",\n \"POS_DATA_DIR\": DATA_ROOT + \"/pos\",\n \"DEPPARSE_DATA_DIR\": DATA_ROOT + \"/depparse\",\n \"ETE_DATA_DIR\": DATA_ROOT + \"/ete\",\n \"NER_DATA_DIR\": DATA_ROOT + \"/ner\",\n \"CHARLM_DATA_DIR\": DATA_ROOT + \"/charlm\",\n \"SENTIMENT_DATA_DIR\": DATA_ROOT + \"/sentiment\",\n \"CONSTITUENCY_DATA_DIR\": DATA_ROOT + \"/constituency\",\n\n # Set directories to store external word vector data\n \"WORDVEC_DIR\": \"extern_data/wordvec\",\n\n # TODO: not sure what other people actually have\n # TODO: also, could make this automatically update to the latest\n \"UDBASE\": \"extern_data/ud2/ud-treebanks-v2.11\",\n \"UDBASE_GIT\": \"extern_data/ud2/git\",\n\n \"NERBASE\": \"extern_data/ner\",\n \"CONSTITUENCY_BASE\": \"extern_data/constituency\",\n \"SENTIMENT_BASE\": \"extern_data/sentiment\",\n\n # there's a stanford github, stanfordnlp/handparsed-treebank,\n # with some data for different languages\n \"HANDPARSED_DIR\": \"extern_data/handparsed-treebank\",\n\n # directory with the contents of https://nlp.stanford.edu/projects/stanza/bio/\n # on the cluster, for example, /u/nlp/software/stanza/bio_ud\n \"BIO_UD_DIR\": \"extern_data/bio\",\n\n # data root for other general input files, such as VI_VLSP\n \"EXTERN_DIR\": \"extern_data\",\n }\n\n paths = { \"DATA_ROOT\" : DATA_ROOT }\n for k, v in defaults.items():\n paths[k] = os.environ.get(k, v)\n\n return paths", "def data_dir():\n return os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data')", "def abspath(self, *args):\n return os.path.join(self._spool, *args)", "def root(*args):\n return join(abspath(dirname(__file__)), *args)", "def dirpath(self, *args, **kwargs):\n if not kwargs:\n path = object.__new__(self.__class__)\n path.strpath = dirname(self.strpath)\n if args:\n path = path.join(*args)\n return path\n return self.new(basename=\"\").join(*args, **kwargs)", "def _argListToDir(argNameList, valueCmb, baseDir, connectionSymbol=\"=\", seperationSymbol=\",\"):\n\t# make a list of type [[arg1=xx, arg2=xx],arg3=xx,...]:\n\ttmp = listR.mimic(argNameList, listR.strZip(listR.FL(argNameList), listR.FL(valueCmb), connectionSymbol))\n\t# join sublist using seperationSymbol:\n\ttmp = map(lambda x:seperationSymbol.join(listR.toList(x)), tmp)\n\t# join them with os.path.join\n\treturn os.path.join(baseDir, os.path.sep.join(tmp))" ]
[ "0.58661675", "0.5668878", "0.56555104", "0.5650495", "0.5595522", "0.5523274", "0.54594797", "0.5454015", "0.5431963", "0.540515", "0.5386639", "0.5351135", "0.5348916", "0.53021574", "0.5289212", "0.5272937", "0.52652055", "0.52642393", "0.5259409", "0.5253235", "0.5218821", "0.5211357", "0.5205563", "0.5193186", "0.51814675", "0.51701313", "0.51471394", "0.51461256", "0.5117076", "0.51168007" ]
0.6467885
0
rpc SetDebugLevel (DebugLevelRequest) returns (BaseResponse) { option (google.api.http) = {
def SetDebugLevel(self, request, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_debuglevel(self, level):\n self.debugging = level", "def setDebugFLags(sessionId):\n try:\n if not authentication.checkSessionId(sessionId, ADMIN):\n abort(403)\n if request.data is None:\n abort(400)\n debugFlags = json.loads(request.data)\n DebugFlags.setDebugFlags(debugFlags)\n return jsonify({\"status\": \"OK\"})\n except:\n print \"Unexpected error:\", sys.exc_info()[0]\n print sys.exc_info()\n traceback.print_exc()\n util.logStackTrace(sys.exc_info())\n raise", "def SetLogLevel(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def set_level(self, debug_level, verbose=False):\n self.debug_level = debug_level\n self.verbosity = verbose\n level = logging.INFO\n if debug_level > 4:\n level = logging.DEBUG - 3\n elif debug_level > 0:\n level = logging.DEBUG - debug_level + 1\n elif verbose:\n level = logging.INFO - 1\n self.mylog.setLevel(level)\n self.handler.setLevel(level)", "def configure_logging(debugLevel):\n\n if(debugLevel == 1):\n logging.basicConfig(level=logging.INFO)\n elif(debugLevel >= 2):\n logging.basicConfig(level=logging.DEBUG)", "def catalogSetDebug(level):\n ret = libxml2mod.xmlCatalogSetDebug(level)\n return ret", "def debug(self, s, level=1):\n if self._debug >= level:\n print(s)", "def _set_debug_mode(self, **kwargs):\n self.debug_mode = myint(kwargs.get(\"value\"))\n self.parent.logger.info(\"CameraServer:set_debug_mode: %d\" % (self.debug_mode))\n self.set_driver('GV4', self.debug_mode, uom=25, report=True)\n self.logger.setLevel(self.debug_mode)\n return True", "def set_debug(self, value=True):\n self.debug = value", "def setLevel(newLevel):\n Verbose.__level = max(-1, newLevel)", "def _set_debug(debug):\n global _DEBUG\n _DEBUG = debug\n if debug:\n logging.disable(logging.NOTSET)\n else:\n logging.disable(logging.DEBUG)", "def set_debug_mode(self, value):\n self.debug = value", "def debug(self, level, *args):\n try:\n basestring\n except NameError:\n basestring = str\n if isinstance(level, basestring):\n args = (level,) + args\n level = 1\n self.mylog.log(logging.DEBUG - level + 1, *args)", "def set_debug(self, debug):\n self._debug = debug\n return self", "def set_debug(self, debug):\n self.debug = debug", "def set_level(self, device_id, new_level):\n\t\treturn self.post(self.value_url % (ART_SERVER_HOST, device_id), {'value':new_level })", "def set_debug(debug_val):\n global _DEBUG # noqa: PLW0603\n _DEBUG = debug_val", "def _trace(self):\n self.__aceQLHttpApi.trace()", "def SetTriacLevel(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def set_debug(flag):\n global debug\n debug = flag\n XLM.XLM_Object.debug = flag\n XLM.xlm_library.debug = flag\n XLM.ms_stack_transformer.debug = flag\n XLM.stack_transformer.debug = flag\n XLM.excel2007.debug = flag", "def debug(statement,level=0):\n if config['debug']:\n if level <= config['debug_level']:\n print(statement)", "def _set_debug_mode(self, value):\n self.debug_mode = value\n self.l_info(\"_set_debug_mode\",\"%d\" % (self.debug_mode))\n self.set_driver('GV4', self.debug_mode, uom=25, report=True)\n self.logger.setLevel(self.debug_mode)\n return True", "def setDebug():\n\tglobal debug\n\tdebug = True", "def setLogLevel(level):\n None", "def set_debug(self):\n self.logger.setLevel(5)\n if self.uses_adc:\n self.adc.logger.setLevel(5)", "def setLogLevel(self,value):\n self.PDFreactorConfiguration.in1[\"logLevel\"] = value", "def set_logging_level(self, level):\n if str(level) == '1':\n self.logging_level = logging.DEBUG\n elif str(level) == '2':\n self.logging_level = logging.INFO\n elif str(level) == '3':\n self.logging_level = logging.WARNING\n elif str(level) == '4':\n self.logging_level = logging.ERROR\n elif str(level) == '5':\n self.logging_level = logging.CRITICAL", "def get_debug_level(self):\n return self.debug_level", "def set_level(self, level: LogLevel):\n pass", "def SetDebugMode(enabled=True):\n global option\n option['debug_mode'] = enabled" ]
[ "0.6524345", "0.61447215", "0.6071056", "0.6021396", "0.56288624", "0.5567556", "0.55668926", "0.5558487", "0.55518526", "0.5518022", "0.55119455", "0.5408538", "0.53755057", "0.5363909", "0.5355801", "0.5307751", "0.53025997", "0.5299265", "0.52971476", "0.5263971", "0.5263712", "0.5262109", "0.5256931", "0.5253445", "0.52525884", "0.5245388", "0.523909", "0.5231722", "0.52263314", "0.5221142" ]
0.79708946
0
rpc UpdateNetworkID (UpdateNetworkIDRequest) returns (BaseResponse) { option (google.api.http) = {
def UpdateNetworkID(self, request, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GetNetworkID(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def put(self, id):\n context = request.environ.get('context')\n net_obj = dbapi.networks_update(context, id, request.json)\n return jsonutils.to_primitive(net_obj), 200, None", "def modify_network(self, username, machine_name, new_network, txn_id):\n logger = get_task_logger(txn_id=txn_id, task_id=self.request.id, loglevel=const.VLAB_ONEFS_LOG_LEVEL.upper())\n resp = {'content' : {}, 'error': None, 'params': {}}\n logger.info('Task starting')\n try:\n vmware.update_network(username, machine_name, new_network)\n except ValueError as doh:\n logger.error('Task failed: {}'.format(doh))\n resp['error'] = '{}'.format(doh)\n logger.info('Task complete')\n return resp", "def put(self, id):\n context = request.environ.get('context')\n obj = dbapi.networks_data_update(context, id, request.json)\n resp = {\"data\": jsonutils.to_primitive(obj.variables)}\n return resp, 200, None", "def update_network(self, context, net_id, network):\n\n LOG.debug(_(\"QuantumRestProxyV2.update_network() called\"))\n\n # Validate Args\n if network[\"network\"].get(\"admin_state_up\"):\n if network[\"network\"][\"admin_state_up\"] is False:\n LOG.warning(_(\"Network with admin_state_up=False are not yet \"\n \"supported by this plugin. Ignoring setting for \"\n \"network %s\", net_name))\n\n # update DB\n orig_net = super(QuantumRestProxyV2, self).get_network(context, net_id)\n tenant_id = orig_net[\"tenant_id\"]\n new_net = super(QuantumRestProxyV2, self).update_network(\n context, net_id, network)\n\n # update network on network controller\n if new_net[\"name\"] != orig_net[\"name\"]:\n try:\n resource = NETWORKS_PATH % (tenant_id, net_id)\n data = {\n \"network\": new_net,\n }\n ret = self.servers.put(resource, data)\n if not self.servers.action_success(ret):\n raise RemoteRestError(ret[2])\n except RemoteRestError as e:\n LOG.error(_(\"QuantumRestProxyV2: Unable to update remote \"\n \"network: %s\"), e.message)\n # reset network to original state\n super(QuantumRestProxyV2, self).update_network(\n context, id, orig_net)\n raise\n\n # return updated network\n return new_net", "def update_network(self, context, net_id, network):\n LOG.debug(_(\"NeutronRestProxyV2.update_network() called\"))\n\n self._warn_on_state_status(network['network'])\n\n session = context.session\n with session.begin(subtransactions=True):\n new_net = super(NeutronRestProxyV2, self).update_network(\n context, net_id, network)\n self._process_l3_update(context, new_net, network['network'])\n\n # update network on network controller\n self._send_update_network(new_net, context)\n return new_net", "def set_network_id(self, sNetworkId):\n\t\tcall_sdk_function('PrlVirtNet_SetNetworkId', self.handle, sNetworkId)", "def update_network(**kwargs):\n\n ip_addr = kwargs.get('ip_addr')\n is_private = kwargs.get('is_private')\n name = kwargs.get('name')\n dns_names = kwargs.get('dns_names')\n is_scanning = kwargs.get('is_scanning', False)\n network_id = make_shortuuid(name)\n\n network = {\n 'dns_names': dns_names,\n 'ip_addr': ip_addr,\n 'is_private' : is_private,\n 'name': name,\n 'id': network_id,\n 'is_scanning': is_scanning,\n 'updated_count': 0\n\n }\n\n network_exists = r.table(\"networks\").insert([network], conflict=\"update\")\n\n return network_exists.run(conn)", "def _get_network_nsx_id(self, context, neutron_id):\n pass", "def put(self, id):\n context = request.environ.get('context')\n net_obj = dbapi.net_interfaces_update(context, id, request.json)\n return jsonutils.to_primitive(net_obj), 200, None", "def get_network_id(self):\n\t\treturn call_sdk_function('PrlVirtNet_GetNetworkId', self.handle)", "def post_network_ipam_update(self, resource_id, resource_dict):\n pass", "def network_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"network_id\")", "def network_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"network_id\")", "def network_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"network_id\")", "def put(self, request, nnid):\n try:\n input_parm = request.data\n input_parm['nn_id'] = nnid\n if input_parm.get('automl_parms') == None:\n input_parm['automl_parms'] = {}\n if input_parm.get('automl_runtime') == None:\n input_parm['automl_runtime'] = {}\n if input_parm.get('automl_stat') == None:\n input_parm['automl_stat'] = {}\n return_data = NNCommonManager().update_nn_info(input_parm)\n return Response(json.dumps(return_data))\n except Exception as e:\n return_data = {\"status\": \"404\", \"result\": str(e)}\n return Response(json.dumps(return_data))", "def do_nic_update(cc, args):\n\n patch = utils.args_array_to_patch(args.attributes[0])\n result = cc.nic.update(args.uuid, patch)\n cliutils.print_dict(result)", "def network_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"network_id\")", "def _get_network_id(self):\n pubnet = self.conn.network.find_network('public')\n net = self.conn.network.find_network(self.net_conf['net_name'])\n subnet = self.conn.network.find_subnet(self.net_conf['subnet_name'])\n # TODO: Add support for security group\n\n self.network_id = {\n 'public': pubnet.id,\n 'net': net.id,\n 'subnet': subnet.id\n }", "def UpdateOIDCClient(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def restart(self, cleanup=False): \n params = {'command':'restartNetwork',\n 'id':self.id,\n 'cleanup':cleanup}\n\n name = self.name\n self.logger.debug('Restart network %s' % name)\n\n try:\n response = self.send_request(params)\n res = json.loads(response)\n clsk_job_id = res['restartnetworkresponse']['jobid']\n self.logger.debug('Start job over %s.%s - %s: %s' % (\n self._obj_type, self.name, \n 'restartNetwork', res))\n return clsk_job_id\n except KeyError as ex :\n self.logger.error('Error parsing json data: %s' % ex)\n raise ClskError('Error parsing json data: %s' % ex)\n except ApiError as ex:\n self.logger.error(ex)\n raise ClskError(ex)", "def fusion_api_edit_network_set(self, body=None, uri=None, api=None, headers=None):\n return self.network_set.update(body, uri, api, headers)", "def cni_network_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"cni_network_id\")", "def update_dme(username, password, dme_id, ip_address):\r\n dme_url = 'https://www.dnsmadeeasy.com/servlet/updateip'\r\n dme_url += '?username=%s&password=%s&id=%s&ip=%s'\r\n s = urllib2.urlopen(dme_url % (username, password, dme_id, ip_address))\r\n return s.read()", "def network_id(tenant_id, auth_token, network_name):\r\n content = common_utils.do_request(\r\n tenant_id, auth_token,\r\n method='GET',\r\n body='', service=\"network\",\r\n path='networks.json')\r\n for network in range(len(content[\"networks\"])):\r\n if content[\"networks\"][network][\"name\"] == network_name:\r\n network_id = content[\"networks\"][network][\"id\"]\r\n return network_id", "def ModifyNetwork(self, network, reason=None, **kwargs):\n query = []\n _AppendReason(query, reason)\n\n return self._SendRequest(HTTP_PUT,\n (\"/%s/networks/%s/modify\" %\n (GANETI_RAPI_VERSION, network)), None, kwargs)", "def get_network_id(options, network):\n service_instance = get_vc_content(options)\n datacenter = get_datacenter(options)\n for item in datacenter.networkFolder.childEntity:\n if (item.name == network):\n return item._GetMoId()", "def update_status(request_id, status):\n pass", "def run(self, network_update_args, network_create_args=None):\n network = self.neutron.create_network(**(network_create_args or {}))\n self.neutron.update_network(network[\"id\"], **network_update_args)", "def put(self):\n if not request.is_json:\n parser = reqparse.RequestParser()\n parser.add_argument(constants.PID_KEY, help='Process id')\n parser.add_argument(constants.GID_KEY, help='Group id')\n data = parser.parse_args()\n else:\n data = request.json\n group_id = data[constants.GID_KEY]\n process_id = data[constants.PID_KEY]\n utils.check_process_id_in_req(process_id)\n utils.check_group_id_in_req(group_id)\n _check_group_exists(group_id)\n GID_COORD_DICT[group_id] = (process_id, request.remote_addr)\n response = {constants.COORD_PID_KEY: GID_COORD_DICT[group_id][0],\n constants.COORD_IP_KEY: GID_COORD_DICT[group_id][1]}\n return response" ]
[ "0.6377155", "0.597741", "0.56649214", "0.56214505", "0.5573502", "0.5530543", "0.55015075", "0.54945767", "0.53921235", "0.530519", "0.52978355", "0.52969354", "0.5288424", "0.5288424", "0.5288424", "0.52693397", "0.5260793", "0.5172831", "0.516545", "0.5126393", "0.5124526", "0.50992614", "0.5042299", "0.5013844", "0.5009878", "0.5001176", "0.49581987", "0.4897491", "0.48697874", "0.48162994" ]
0.83613276
0
rpc GetNetworkID (GetNetworkIDRequest) returns (GetNetworkIDResponse) { option (google.api.http) = {
def GetNetworkID(self, request, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_network_id(self):\n\t\treturn call_sdk_function('PrlVirtNet_GetNetworkId', self.handle)", "def get_network_id(options, network):\n service_instance = get_vc_content(options)\n datacenter = get_datacenter(options)\n for item in datacenter.networkFolder.childEntity:\n if (item.name == network):\n return item._GetMoId()", "def network_id(tenant_id, auth_token, network_name):\r\n content = common_utils.do_request(\r\n tenant_id, auth_token,\r\n method='GET',\r\n body='', service=\"network\",\r\n path='networks.json')\r\n for network in range(len(content[\"networks\"])):\r\n if content[\"networks\"][network][\"name\"] == network_name:\r\n network_id = content[\"networks\"][network][\"id\"]\r\n return network_id", "def network_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"network_id\")", "def network_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"network_id\")", "def network_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"network_id\")", "def network_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"network_id\")", "def UpdateNetworkID(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def get_net_id(self, net_name):\n _url = \"http://\" + self.host_ip + \":9696/v2.0/networks\"\n _headers = {'x-auth-token': self.project_info[\"token_project\"]}\n _body = None\n\n result = self.request(\"GET\", _url, _headers, _body)\n\n if result is None:\n LOG_OBJ.error(\n \"No response from Server while trying to\"\n \" get networks of tenant: %s\" %\n self.project_info[\"project_id\"])\n return result\n\n if result.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Get network Failed with status %s \" % result.status)\n return result.status\n\n output = json.loads(result.data)\n LOG_OBJ.debug(\"Networks: %s\" % output['networks'])\n\n for nets in output['networks']:\n if nets['name'].lower() == net_name.lower() and \\\n net_name == config.extnet_name:\n LOG_OBJ.debug(\"Net ID : %s \" % nets['id'])\n return nets['id']\n if nets['name'].lower() == net_name.lower() and \\\n nets['tenant_id'] == self.project_info[\"project_id\"]:\n LOG_OBJ.debug(\"Net ID : %s \" % nets['id'])\n return nets['id']\n\n LOG_OBJ.debug(\"Net:%s Not Found\" % net_name)\n return", "def get_network(self, network_id):\n url = '%s/v2.0/networks/%s' % (self.catalog['network'], network_id)\n res = self.get(url)\n if res['status'] == 200:\n return json.loads(res['body'])['network']\n else:\n LOG.error('Get network failed: %s %s %s' %\n (res['status'], res['reason'], res['body']))\n raise InvalidResponse(res)", "def _get_network_id(self):\n pubnet = self.conn.network.find_network('public')\n net = self.conn.network.find_network(self.net_conf['net_name'])\n subnet = self.conn.network.find_subnet(self.net_conf['subnet_name'])\n # TODO: Add support for security group\n\n self.network_id = {\n 'public': pubnet.id,\n 'net': net.id,\n 'subnet': subnet.id\n }", "def cni_network_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"cni_network_id\")", "def getNetworksDetails(network_id):\n logger.debug('Start.')\n code, res = rest_requests.get(networks_url + \"/\" + network_id)\n if code != requests.codes.ok:\n logger.error((code, res))\n return None\n return res[\"network\"]", "def _get_network_nsx_id(self, context, neutron_id):\n pass", "def get_virtual_network_id(self):\n\t\treturn call_sdk_function('PrlVmDevNet_GetVirtualNetworkId', self.handle)", "def get_NID():\n return NID", "def cloud_services_network_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"cloud_services_network_id\")", "def lookup_netid(self, netid):\n self.setQuery(\"\"\"Select ?uid where {\n ?who <http://vivo.dartmouth.edu/ontology/netId> \"%s\" .\n ?who <http://vivo.dartmouth.edu/ontology/geiselId> ?uid .\n }\"\"\" % (netid))\n\n try:\n rval = self.query()\n try:\n g = rval.convert()\n except:\n pass\n return g['results']['bindings'][0]['uid']['value']\n except:\n return None", "def virtual_network_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"virtual_network_id\")", "def get_network_interface(\n name=None,\n network_interface_id=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n r = {}\n result = _get_network_interface(conn, name, network_interface_id)\n if \"error\" in result:\n if result[\"error\"][\"message\"] == \"No ENIs found.\":\n r[\"result\"] = None\n return r\n return result\n eni = result[\"result\"]\n r[\"result\"] = _describe_network_interface(eni)\n return r", "def external_network_id(self) -> str:\n return pulumi.get(self, \"external_network_id\")", "async def retrieveId(self):\n url = self._buildUrl(rest_method=\"id\")\n try:\n status, text = await self.fetch(url)\n if status != 200:\n log.error(\"Unexpected status code retrieving ID: %r\",\n status)\n raise ConnectionError(f'Error. Status:{status}')\n log.debug(\"Retrieve ID response text: %r\", text)\n return text\n except Exception as error:\n log.exception(\"Error retrieving ID: %r\", error)\n pathError = \"\"\n if self._options.path == \"/\" and \\\n self._options.host != util.CLOUD_HOST:\n pathError = \\\n \" If you passed in a 'path' to your \" \\\n \" self-hosted PeerServer, \" \\\n \" you'll also need to pass in that \" \\\n \" same path when creating a new \" \\\n \" Peer.\"\n raise ConnectionError(\n \"Could not get an ID from the server.\" +\n pathError)", "def virtual_network_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"virtual_network_id\")", "def GetNetwork(self, network, reason=None):\n query = []\n _AppendReason(query, reason)\n\n return self._SendRequest(HTTP_GET,\n \"/%s/networks/%s\" % (GANETI_RAPI_VERSION, network),\n query, None)", "def get_network_by_id(self, id):\n return self.network.get_network(id)", "def get_network(session, network):\n # type: (Session, str) -> Dict[str, Any]\n url_tail = f\"/{CoordConstsV2.RSC_NETWORKS}/{network}\"\n return _get_dict(session, url_tail)", "def network_interface_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"network_interface_id\")", "def get_network_info(self, network_id):\n return DictModel(self.call(self.context,\n self.make_msg('get_network_info',\n network_id=network_id,\n host=self.host),\n topic=self.topic))", "def get_network_url(project_id, network):\n assert is_valid_project_id(project_id), project_id\n assert is_valid_network(network), network\n return (\n 'https://www.googleapis.com/compute/v1/projects/%s/global/networks/%s' % (\n project_id, network))", "def get_network_interface_id(name, region=None, key=None, keyid=None, profile=None):\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n r = {}\n try:\n enis = conn.get_all_network_interfaces(filters={\"tag:Name\": name})\n if not enis:\n r[\"error\"] = {\"message\": \"No ENIs found.\"}\n elif len(enis) > 1:\n r[\"error\"] = {\"message\": \"Name specified is tagged on multiple ENIs.\"}\n else:\n eni = enis[0]\n r[\"result\"] = eni.id\n except boto.exception.EC2ResponseError as e:\n r[\"error\"] = __utils__[\"boto.get_error\"](e)\n return r" ]
[ "0.7171402", "0.6616125", "0.6607974", "0.6472358", "0.6472358", "0.6472358", "0.6451169", "0.6377823", "0.63719785", "0.63127875", "0.6158992", "0.61568797", "0.6053438", "0.60057765", "0.59049284", "0.5859211", "0.5817157", "0.5731048", "0.5705311", "0.5654971", "0.5641681", "0.5573612", "0.55470085", "0.5545637", "0.55378735", "0.5536892", "0.55365074", "0.5490189", "0.5489056", "0.5462492" ]
0.81038713
0
rpc AddNode (AddNodeRequest) returns (BaseResponse) { option (google.api.http) = {
def AddNode(self, request, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_node(self, node):", "def addnode(self, ip_addr: str, cmd: str) -> None:\n assert type(ip_addr) == str\n assert cmd in COMMANDS\n return self.rpc_call(\"addnode\", ip_addr, cmd)", "def add_node (self, node):\n raise NotImplementedError", "def _add_node(self, node_name, node_type):\n q = 'MATCH (r:' + node_type + ') WHERE r.name=\"' \\\n + node_name + '\" RETURN r'\n results = self.db.query(q, returns=(client.Node, str, client.Node))\n res = self.db.labels.create(node_type)\n\n if (len(results) == 0):\n r = self.db.nodes.create(name=node_name)\n res.add(r)\n else:\n r = results[0][0]\n return r", "def post(self):\n node_id = blockchain.register_node(request.host)\n\n return {\n 'message': 'New node have been added.',\n 'node_id': node_id,\n 'nodes': list(blockchain.nodes)\n }, 201", "def view_addNode(self, user, cTag, nTag, pkg, exe, args='', name='',\r\n namespace=''):\r\n try:\r\n user.containers[cTag].addNode(nTag, pkg, exe, args, name, namespace)\r\n except KeyError:\r\n raise InvalidRequest('Can not add Node, because Container {0} '\r\n 'does not exist.'.format(cTag))\r\n\r\n # TODO: Return some info about success/failure of request\r", "def add_node (self, node):\n self.network.add_node(node.id)\n self.network.node[node.id] = node", "def register_with_existing_node():\n #print('********************')\n print(request.get_json())\n node_address = request.get_json()[\"node_address\"]\n if not node_address:\n return \"Invalid data\", 400\n\n data = {\"node_address\": request.host_url}\n headers = {'Content-Type': \"application/json\"}\n\n # Make a request to register with remote node and obtain information\n response = requests.post(node_address + \"/register_node\",\n data=json.dumps(data), headers=headers)\n\n if response.status_code == 200:\n global blockchain\n global peers\n # update chain and the peers\n chain_dump = response.json()['chain']\n blockchain = create_chain_from_dump(chain_dump)\n peers.update(response.json()['peers'])\n return \"Registration successful\", 200\n else:\n # if something goes wrong, pass it on to the API response\n #print(response.content)\n #print(response.status_code)\n return response.content, response.status_code", "def addNode( self, name, **opts ):\n self.g.add_node( name, **opts )\n return name", "def AddNode(self, node):\n self.nodes.append(node)\n return node", "def add_node(p, nodes, retval, size=0):\n if p.id not in nodes:\n nodes[p.id] = len(nodes)\n retval[\"nodes\"].append({\"id\": str(p.id), \"title\": p.title, \"size\": size})", "def register_with_existing_node():\n node_address = request.get_json()[\"node_address\"]\n if not node_address:\n return \"Invalid data\", 400\n\n data = {\"node_address\": request.host_url}\n headers = {'Content-Type': \"application/json\"}\n\n # Make a request to register with remote node and obtain information\n response = requests.post(node_address + \"/register_node\",\n data=json.dumps(data), headers=headers)\n\n if response.status_code == 200:\n global blockchain\n global peers\n # update chain and the peers\n chain_dump = response.json()['chain']\n blockchain = create_chain_from_dump(chain_dump)\n # peers.update(response.json()['peers'])\n peers.add(node_address+'/') #Add other node address to peers\n return \"Registration successful\", 200\n else:\n # if something goes wrong, pass it on to the API response\n return response.content, response.status_code", "def jsonrpc_add(self, a, b):\n # The doc string is part of the test.\n return a + b", "def test_add(self):\n query_string = [('x', 56),\n ('y', 56)]\n response = self.client.open('/addition-api/1.0.0/add',\n method='GET',\n query_string=query_string)\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def addNode(self, appendIt=False, nodeId=None, childId=None, sublist=None, label=''):\n node = super().addNode(appendIt=appendIt, nodeId=nodeId, childId=childId,\n sublist=sublist, label=label)\n self.save(node)\n if self.atHead():\n self.saveHeadId(node.nodeId)\n return node", "def add_node():\n\ttry:\n\t\tif(check_entries()):\n\t\t\tserver_entry = [frame.entries[0].get(), str(frame.entries[1].get()+\".\"+frame.entries[2].get()+\".\"+frame.entries[3].get()+\".\"+frame.entries[4].get())]\n\t\t\tclient_entry = [frame.entries[5].get(), str(frame.entries[6].get()+\".\"+frame.entries[7].get()+\".\"+frame.entries[8].get()+\".\"+frame.entries[9].get())]\n\t\t\tconnection_lifetime = frame.entries[10].get()\n\t\t\tnetwork.add_connection(server_entry, client_entry, connection_lifetime);\n\n\texcept ValueError as err:\n\t\tfeedback.config(text=err)", "def addGenericNode():\n return render_template(\"addGenericNode.html\")", "def _add_node(self, node: int) -> None:\r\n self.nodes.add(node)", "def jsonrpc_add(self, a, b):\n return a + b", "def _add_node(self, input_tensors, output_tensors):\n raise NotImplementedError", "def add_node(parent_name, child_name, node):\n if node.name == parent_name:\n return node.add(Node(child_name))\n else:\n for child in node.children:\n add_node(parent_name, child_name, child)", "def add(self, x, y):\n req = JSONRPCRequest('add', [x, y])\n result = yield self._send(req)\n raise tornado.gen.Return(result)", "def add_node(self, node):\n\n # Add node only if it does not exist yet\n if node.id() in self.__nodes:\n return\n\n labels = node.labels()\n for label in labels:\n break\n\n if label not in self.__labels:\n self.__labels[label] = len(self.__labels)\n\n js = \"nodes.push({index: \" + str(node.id()) + \", \" +\\\n \"name: \\\"\" + str(node.id()) + \"\\\", \" +\\\n \"group: \" + str(self.__labels[label]) + \\\n \" });\"\n\n d3_node_id = self.frame.evaluateJavaScript(js) - 1\n self.__nodes[node.id()] = str(d3_node_id)\n logger.info(\"node id %s - > d3 id: %s\", node.id(), d3_node_id)", "def addRelatedNode():\n return render_template(\"addRelatedNode.html\")", "def add_node(self, node):\n self.nodes.append(node)", "def InvocationAddRequest(builder, request):\n return AddRequest(builder, request)", "def add_node(self, node):\n self.nodes.add(node)", "def addNode( self, n, **attr ):\n self._G.add_node(n, attr)", "def add_node(self, node_id):\n assert(node_id is not None)\n LOG.info(\"Try to add node=%s\" % node_id)\n\n try:\n enet = EnhNetNode(node_id)\n self.info.nodeAdd(enet.ident)\n # update net-params (enabled + up)\n self.info.netNodeUpdate(enet.nid, enet.net_params())\n LOG.debug(\"Successfully added node: %s\", str(enet))\n\n except TOPOLOGY.NodeAlreadyExists, exe:\n LOG.error(\"NodeAlreadyExists exception: %s\", str(exe))\n except TOPOLOGY.InternalProblems, exe:\n LOG.error(\"InternalProblems exception: %s\", str(exe))\n except TOPOLOGY.InvocationNotAllowed, exe:\n LOG.error(\"InvocationNotAllowed exception: %s\", str(exe))\n except Exception, exe:\n LOG.error(\"Generic exception: %s\", str(exe))", "def add():\n pass" ]
[ "0.6886949", "0.66823274", "0.6648524", "0.6632756", "0.65144575", "0.6461046", "0.63111734", "0.62823254", "0.6270688", "0.6242011", "0.61145186", "0.6106171", "0.61001", "0.6069039", "0.60320395", "0.60257554", "0.6008064", "0.5974223", "0.5972402", "0.5953031", "0.5950946", "0.59379315", "0.5920943", "0.5911886", "0.58926743", "0.58695024", "0.58671147", "0.58580023", "0.5841459", "0.58377826" ]
0.8054984
0
rpc GetBlockHeight (GetBlockHeightRequest) returns (GetBlockHeightResponse) { option (google.api.http) = {
def GetBlockHeight(self, request, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fetch_block_height(self, blk_hash, cb):\r\n data = serialize.ser_hash(blk_hash)\r\n self.send_command('blockchain.fetch_block_height', data, cb)", "def get_block_hash(height):\n return requests.get(BASE+f'/api/block-index/{height}').json()['blockHash']", "def height(self):\n return self.client.call('GET', self.name + 'height')", "def get_block(self, crypto, block_height='', block_number='', latest=False):\n raise NotImplementedError(\n \"This service does not support getting getting block data. \"\n \"Or rather it has no defined 'get_block' method.\"\n )", "async def last_block_height(self, api_params: dict) -> Optional[int]:\n\n transform_id: str = api_params['transform_id']\n\n db = self.transform_storage_dbs[transform_id]\n value = db.get(Storage.LAST_BLOCK_HEIGHT_KEY)\n\n try:\n height = int(value)\n except Exception:\n height = None\n\n return height", "def get_block(blockhash):\n return requests.get(BASE+f'/api/block/{blockhash}').json()", "def get_block_at_height(height, headers):\n if height == 0:\n print('retrieving genesis block...')\n return GENESIS_DICTIONARY\n\n else:\n height_bottom = headers[0]['height'] # TODO can pass in height_bottom as an argument to save recompute\n result = headers[height_bottom + height - 2]\n assert height == result['height']\n return result", "def fetch_last_height(self, cb):\r\n self.send_command('blockchain.fetch_last_height', cb=cb)", "def getblockhash(self, blockheight):\n for block in self.blocks:\n if block[\"height\"] == int(blockheight):\n return block[\"hash\"]", "def get_first_block(blockchain):\n response = requests.get('https://api.blockcypher.com/v1/%s/main' % blockchain)\n if response.status_code == 200:\n return int(json.loads(response.content.decode('latin1'))['height'])\n elif response.status_code == 429:\n print('Too many requests')\n return -1", "def GetBlockHash(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def height(self) -> int:\n\t\treturn self._raw_result['data']['height']", "def GetBlock(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def block_size(self, block_id): # -> int:\n ...", "def height(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"height\")", "def height(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"height\")", "def height(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"height\")", "def get_new_height(self):\n return self.new_height", "def height(self):\n return self[\"height\"]", "def height(self):\n return self[\"height\"]", "async def get_block(self, api_params: dict) -> Optional[str]:\n\n height: int = api_params['height']\n transform_id: str = api_params['transform_id']\n\n db = self.transform_storage_dbs[transform_id]\n key = str(height).encode()\n value = db.get(key)\n value = value.decode() if value else value\n\n return value", "def get_blocks_by_height(self, height: int):\n try:\n return self.blocks[height]\n except:\n return None", "def height(self):\n self._updateExtents()\n return self._mHeight", "def rpc_getblockcount(self) -> int:\n return self._call_command([\"getblockcount\"])", "def height(rbt):\n try:\n return heightTree(rbt['root'])\n except Exception as exp:\n error.reraise(exp, 'RBT:height')", "def height(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"height\")", "def height(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"height\")", "def height(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"height\")", "def height(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"height\")", "def height(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"height\")" ]
[ "0.69185287", "0.6833825", "0.6609316", "0.6593401", "0.630603", "0.6188763", "0.6185624", "0.6182024", "0.6149188", "0.61161625", "0.60192716", "0.5953324", "0.58345455", "0.57578415", "0.5682839", "0.5682839", "0.5682839", "0.5676383", "0.56692904", "0.56692904", "0.5665602", "0.56540734", "0.56278354", "0.5619168", "0.5604993", "0.559785", "0.559785", "0.559785", "0.559785", "0.559785" ]
0.8232079
0
rpc GetBlockHash (GetBlockHashRequest) returns (GetBlockHashResponse) { option (google.api.http) = {
def GetBlockHash(self, request, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_block(blockhash):\n return requests.get(BASE+f'/api/block/{blockhash}').json()", "def get_block_hash(height):\n return requests.get(BASE+f'/api/block-index/{height}').json()['blockHash']", "def get_blockHash(self, data):\n blockHash = data['blockHash']\n return blockHash", "def get_rawblock(blockhash):\n return requests.get(BASE+f'/api/rawblock/{blockhash}').json()['rawblock']", "def getblock(self, hash):\n return self.proxy.getblock(hash)", "def get_block(self, blockhash):\n\n return self._blocks.get(blockhash)", "def getblockhash(self, blockheight):\n for block in self.blocks:\n if block[\"height\"] == int(blockheight):\n return block[\"hash\"]", "def lookup_block_hash(db, block_number):\n validate_uint256(block_number)\n number_to_hash_key = make_block_number_to_hash_lookup_key(block_number)\n # TODO: can raise KeyError\n block_hash = rlp.decode(\n db.get(number_to_hash_key),\n sedes=rlp.sedes.binary,\n )\n return block_hash", "def lookup_block_hash(db, block_number):\n validate_uint256(block_number)\n number_to_hash_key = make_block_number_to_hash_lookup_key(block_number)\n # TODO: can raise KeyError\n block_hash = rlp.decode(\n db.get(number_to_hash_key),\n sedes=rlp.sedes.binary,\n )\n return block_hash", "def compute_hash(block):\n block_string = json.dumps(self.__dict__, sort_keys= True)\n return sha256(block_string.encode()).hexdigest()", "def calculate_hash(self, base_block_hash: bytes) -> bytes:\n from hathor.merged_mining.bitcoin import build_merkle_root_from_path, sha256d_hash\n coinbase_tx_hash = sha256d_hash(self.coinbase_head + base_block_hash + self.coinbase_tail)\n merkle_root = bytes(reversed(build_merkle_root_from_path([coinbase_tx_hash] + self.merkle_path)))\n return sha256d_hash(self.header_head + merkle_root + self.header_tail)", "def get_block_hash(header_bin):\n _hash = hashlib.sha256(hashlib.sha256(header_bin).digest()).digest()\n return reverse_hash(_hash.hex())", "def hash(block):\n # hashes a block\n #we must make sure that the dictionary is ordered, or we will have inconsistent hashes\n block_string = json.dumps(block, sort_keys = True).encode()\n return hashlib.sha256(block_string).hexdigest()\n #pass", "def hash(block):\r\n block_string = json.dumps(block, sort_keys=True).encode()\r\n return hashlib.sha256(block_string).hexdigest()", "def get_block_header_by_hash(db, block_hash):\n validate_word(block_hash)\n try:\n block = db.get(block_hash)\n except KeyError:\n raise BlockNotFound(\"No block with hash {0} found\".format(\n encode_hex(block_hash)))\n return rlp.decode(block, sedes=BlockHeader)", "def hash(self, block):\n block_string = json.dumps(block, sort_keys=True).encode()\n\n return hashlib.sha256(block_string).hexdigest()", "def hash(block):\n block_string = json.dumps(block, sort_keys=True).encode()\n return hashlib.sha256(block_string).hexdigest()", "def hash(block):\n block_string = json.dumps(block, sort_keys=True).encode()\n return hashlib.sha256(block_string).hexdigest()", "def hash(self, block):\r\n # Convert Dictionary To String\r\n\r\n encoded_block = json.dumps({'nonce': block['nonce'], # Create a string from the required fields\r\n 'transaction': block['transactions'],\r\n 'previous_hash': block['previous_hash']}, sort_keys=True).encode()\r\n\r\n # Hash The String And Return It\r\n return hashlib.sha256(encoded_block).hexdigest() # Return the hash\r", "def get_block_hash(index):\n # TODO: Require implementation\n pass", "def get_block(self, block_num):\n data = self._rpc_request(\"eth_getBlockByNumber\", [hex(block_num), True], \"result\")\n return GethBlock(data)", "def get_block_by_hash(self, block_hash):\n if block_hash in self.block_cache:\n return self.block_cache[block_hash]\n\n mongo_block = self.blocks.find_one({\"hash\": block_hash})\n\n if not mongo_block:\n raise KeyError(\"[get_block_by_hash] Block with hash %s not found.\" % block_hash)\n\n mongo_block_transactions = self.transactions.find({\"blockhash\": mongo_block['hash']})\n return MongoBlockFactory.from_mongo(mongo_block, mongo_block_transactions)", "def fetch_block_height(self, blk_hash, cb):\r\n data = serialize.ser_hash(blk_hash)\r\n self.send_command('blockchain.fetch_block_height', data, cb)", "def getBlock(self, hash):\n if hash in self.chain:\n return self.chain[hash]\n return None", "def get_block(self, crypto, block_height='', block_number='', latest=False):\n raise NotImplementedError(\n \"This service does not support getting getting block data. \"\n \"Or rather it has no defined 'get_block' method.\"\n )", "def compute_hash(self) -> str:\r\n #block_dict = self.__dict__.pop('hash', None) # Remove hash field value before calculating hash\r\n block_dict = self.__dict__.copy()\r\n block_dict.pop('hash', None) # Remove hash field value before calculating hash\r\n block_string = json.dumps(block_dict, sort_keys=True).encode('utf-8')\r\n return sha256(block_string).hexdigest()", "def hash(block):\n\n # Dictionary must be ordered, else hashes will be inconsistent\n block_string = json.dumps(block, sort_keys=True).encode()\n return hashlib.sha256(block_string).hexdigest()", "def GetBlock(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def hash(block):\n # The dictionary MUST be ordered, or we can have inconsistent hashes\n block_string = json.dumps(block, sort_keys=True).encode()\n return hashlib.sha256(block_string).hexdigest()", "def getblockhash(self, index):\n return self.proxy.getblockhash(index)" ]
[ "0.7595978", "0.7173805", "0.6853576", "0.6824187", "0.6586017", "0.6550719", "0.65430444", "0.6444644", "0.6444644", "0.63811195", "0.61842805", "0.6078641", "0.6065449", "0.606363", "0.6060592", "0.60471463", "0.6044907", "0.6044907", "0.60407466", "0.6018738", "0.59679776", "0.59664106", "0.5951817", "0.5905437", "0.59028596", "0.5876909", "0.5869821", "0.5848225", "0.58259547", "0.58215797" ]
0.78051454
0
rpc GetBlockHeader (GetBlockRequest) returns (GetBlockHeaderResponse) { option (google.api.http) = {
def GetBlockHeader(self, request, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GetBlock(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def GetBlockHash(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def fetch_block_header(self, index, cb):\r\n data = pack_block_index(index)\r\n self.send_command('blockchain.fetch_block_header', data, cb)", "def block_header(self):\n return self._current_block[0]", "def get_block(blockhash):\n return requests.get(BASE+f'/api/block/{blockhash}').json()", "def get_block_header_by_hash(db, block_hash):\n validate_word(block_hash)\n try:\n block = db.get(block_hash)\n except KeyError:\n raise BlockNotFound(\"No block with hash {0} found\".format(\n encode_hex(block_hash)))\n return rlp.decode(block, sedes=BlockHeader)", "async def refresh(self):\n block = await self.blockchain.rpc.get_block_header(self.identifier)\n if not block:\n raise BlockDoesNotExistsException\n await super(BlockHeader, self).__init__(\n block, blockchain_instance=self.blockchain, use_cache=self._use_cache\n )", "def GetBlockHeight(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def get_first_block(blockchain):\n response = requests.get('https://api.blockcypher.com/v1/%s/main' % blockchain)\n if response.status_code == 200:\n return int(json.loads(response.content.decode('latin1'))['height'])\n elif response.status_code == 429:\n print('Too many requests')\n return -1", "def get_block(self, crypto, block_height='', block_number='', latest=False):\n raise NotImplementedError(\n \"This service does not support getting getting block data. \"\n \"Or rather it has no defined 'get_block' method.\"\n )", "def block_info(self, block):\n # Allow for a list of blocks..\n block = utils.request_type(block)\n\n res = r.get(self.url + self.block + str(block))\n return self.execute(res)", "def getHeader():\n return _HEADER", "def get_source(block):\n raw_src = \"\"\n\n if (block.get(\"if\", None)):\n raw_src = block[\"if\"][\"condition\"][\"src\"]\n elif (block.get(\"return\", None)): \n raw_src = block[\"return\"][\"src\"]\n else:\n raise KeyError(\"Asking for the source of unknown contract block\")\n\n split_source = raw_src.split(\":\")\n \n return {\"offset\" : int(split_source[0]), \n \"length\" : int(split_source[1]) }", "def getBlock(self) -> ghidra.program.model.correlate.Block:\n ...", "def get_rawblock(blockhash):\n return requests.get(BASE+f'/api/rawblock/{blockhash}').json()['rawblock']", "def send_get_block_headers(\n self,\n block_number_or_hash: Union[BlockNumber, Hash32],\n max_headers: int,\n skip: int,\n reverse: bool) -> None:\n cmd = GetBlockHeaders(self.cmd_id_offset)\n data = {\n 'block_number_or_hash': block_number_or_hash,\n 'max_headers': max_headers,\n 'skip': skip,\n 'reverse': reverse\n }\n header, body = cmd.encode(data)\n self.send(header, body)", "def get_block_hash(header_bin):\n _hash = hashlib.sha256(hashlib.sha256(header_bin).digest()).digest()\n return reverse_hash(_hash.hex())", "def build_get_header_request(\n guid: str,\n **kwargs: Any\n) -> HttpRequest:\n accept = \"application/json\"\n\n # Construct URL\n url = kwargs.pop(\"template_url\", '/atlas/v2/entity/guid/{guid}/header')\n path_format_arguments = {\n 'guid': _SERIALIZER.url(\"guid\", guid, 'str', max_length=4096, min_length=1),\n }\n url = _format_url_section(url, **path_format_arguments)\n\n # Construct headers\n header_parameters = kwargs.pop(\"headers\", {}) # type: Dict[str, Any]\n header_parameters['Accept'] = _SERIALIZER.header(\"accept\", accept, 'str')\n\n return HttpRequest(\n method=\"GET\",\n url=url,\n headers=header_parameters,\n **kwargs\n )", "def get_block_names(self):\n return self._parse_response(self.client.service.GetBlockNames())", "def block_headers(self, block_headers: list):\n num_headers = len(block_headers)\n block_headers_size = num_headers * self._message_size['header']\n return {\n 'id': 'block_headers',\n 'block_headers': block_headers,\n 'size': kB_to_MB(block_headers_size)\n }", "def getBlock(self, blockName: unicode) -> ghidra.program.model.mem.MemoryBlock:\n ...", "def GetBlock(self):\n return self._block", "def get_block_info(pool_name='0-100-pool.burst.cryptoguru.org:8008'):\n channel = grpc.insecure_channel(pool_name)\n stub = api_pb2_grpc.ApiStub(channel)\n block_info = stub.GetBlockInfo(api_pb2.Void())\n return block_info", "def hash_block_header(header: Header) -> str:\n hashable_block_header = header.SerializeToString()\n return Verification.hash_bytes_256(hashable_block_header)", "def get_block_hash(height):\n return requests.get(BASE+f'/api/block-index/{height}').json()['blockHash']", "def DescribeBlackHeader(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"DescribeBlackHeader\", params, headers=headers)\n response = json.loads(body)\n model = models.DescribeBlackHeaderResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "def get_block_with_name(self, block_name: str) -> BasicBlock:\n block: BasicBlock\n all_blocks: Dict[str, BasicBlock] = self.get_all_blocks()\n if block_name in all_blocks:\n return all_blocks[block_name]\n raise Exception(ERROR_BLOCK_DOESNT_EXIST % block_name)", "def getheader(self, name, default=None):\n return self.urllib3_response.getheader(name, default)", "def getheader(self, name, default=None):\n return self.urllib3_response.getheader(name, default)", "def getheader(self, name, default=None):\n return self.urllib3_response.getheader(name, default)" ]
[ "0.67096466", "0.65781176", "0.6541774", "0.6459646", "0.6158835", "0.6119097", "0.594869", "0.58376133", "0.58140343", "0.5777425", "0.5696825", "0.56788206", "0.56240237", "0.56043345", "0.55059505", "0.54990935", "0.54933983", "0.5474018", "0.5440675", "0.54353565", "0.5392863", "0.5390409", "0.5367006", "0.53539306", "0.53518635", "0.532425", "0.5316323", "0.53123647", "0.53123647", "0.53123647" ]
0.8214981
0
rpc GetBlock (GetBlockRequest) returns (GetBlockResponse) { option (google.api.http) = {
def GetBlock(self, request, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_block(blockhash):\n return requests.get(BASE+f'/api/block/{blockhash}').json()", "def get_block(self, crypto, block_height='', block_number='', latest=False):\n raise NotImplementedError(\n \"This service does not support getting getting block data. \"\n \"Or rather it has no defined 'get_block' method.\"\n )", "def get_block(self, block_num):\n data = self._rpc_request(\"eth_getBlockByNumber\", [hex(block_num), True], \"result\")\n return GethBlock(data)", "def block_info(self, block):\n # Allow for a list of blocks..\n block = utils.request_type(block)\n\n res = r.get(self.url + self.block + str(block))\n return self.execute(res)", "def GetBlock(self):\n return self._block", "def getBlock(self) -> ghidra.program.model.correlate.Block:\n ...", "def getBlock(self, blockName: unicode) -> ghidra.program.model.mem.MemoryBlock:\n ...", "def get(block_id):\n _url = get_root_url()\n try:\n block = DB.get_block_details([block_id]).__next__()\n response = block\n\n response['links'] = {\n 'self': '{}'.format(request.url),\n 'list': '{}/processing-blocks'.format(_url),\n 'home': '{}'.format(_url)\n }\n return block\n except IndexError as error:\n response = dict(message='Unable to GET Processing Block',\n id='{}'.format(block_id),\n error=error.__str__())\n response['links'] = {\n 'list': '{}/processing-blocks'.format(_url),\n 'home': '{}'.format(_url)\n }\n return response, HTTPStatus.NOT_FOUND", "def GetBlockHash(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def getBlock(self):\n return self.block.getWorld().getWorld().getBlockAt(self.block.getChunkCoordinates().getX(), self.block.getChunkCoordinates().getY(), self.block.getChunkCoordinates().getZ())", "def get_block_info(pool_name='0-100-pool.burst.cryptoguru.org:8008'):\n channel = grpc.insecure_channel(pool_name)\n stub = api_pb2_grpc.ApiStub(channel)\n block_info = stub.GetBlockInfo(api_pb2.Void())\n return block_info", "def GetBlockHeader(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def get_block(block_id: Optional[bytes] = None) -> Optional[Block]:\n if block_id is not None and not isinstance(block_id, bytes):\n raise TypeError\n if block_id is None:\n logging.debug(\"Last block requested\")\n else:\n logging.debug(\"Block %s requested\", util.bintos(block_id))\n\n r = util.get_db()\n if block_id is None:\n block_id = r.get(\"blockchain:last_block\")\n if block_id is None:\n # The blockchain is empty\n logging.debug(\"Blockchain is empty\")\n return None\n\n blockb = r.hget(\"blockchain:blocks\", block_id)\n if blockb is None:\n # Requested block not found\n logging.debug(\"Block %s not found\", util.bintos(block_id))\n return None\n\n logging.debug(\"Block %s retrieved\", util.bintos(block_id))\n return Block.loadb(blockb)", "async def new_block(request: Request) -> dict:\n block: dict = await request.json()\n block = await chain.add_block(block)\n response_block = Block(**block).to_dict()\n\n miner_ip = f\"{request.client.host}:{request.client.port}\"\n for node in chain.peers:\n async with httpx.AsyncClient() as client:\n _ = await client.get(f\"{node}/\")\n temp_chain = {f\"Block-{height}\": data.to_dict()\n for height, data in enumerate(chain.serialized)}\n return {\"miner_address\": miner_ip,\n \"latest_block\": response_block.dict(),\n \"new_chain\": temp_chain, }", "def GetBlockHeight(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def get_rawblock(blockhash):\n return requests.get(BASE+f'/api/rawblock/{blockhash}').json()['rawblock']", "def getBlocks(request):\n if request.method == 'GET':\n blockName = request.GET.get('block', '')\n bid = request.GET.get('bid', '')\n districtName=request.GET.get('district', '')\n stateName=request.GET.get('state', '')\n limit=request.GET.get('limit', '')\n if limit == '':\n limit=50\n else:\n limit=int(limit)\n if bid=='':\n blocks = Block.objects.filter(name__icontains=blockName, district__name__icontains = districtName, district__state__name__icontains=stateName)\n else:\n blocks = Block.objects.filter(id = bid)\n\n blocks = blocks[:limit]\n serializer = SelectBlockSerializer(blocks, many=True)\n return JsonResponse(serializer.data, safe=False)", "def getBlocks(request):\n if request.method == 'GET':\n blockName = request.GET.get('block', '')\n bid = request.GET.get('bid', '')\n districtName=request.GET.get('district', '')\n stateName=request.GET.get('state', '')\n limit=request.GET.get('limit', '')\n if limit == '':\n limit=50\n else:\n limit=int(limit)\n if bid=='':\n blocks = Block.objects.filter(name__icontains=blockName, district__name__icontains = districtName, district__state__name__icontains=stateName)\n else:\n blocks = Block.objects.filter(id = bid)\n\n blocks = blocks[:limit]\n serializer = SelectBlockSerializer(blocks, many=True)\n return JsonResponse(serializer.data, safe=False)", "def getBlockByName(self, name):\n try:\n return self.blocksByName[name]\n except AttributeError:\n self._genBlocksByName()\n return self.blocksByName[name]", "def block_transaction_raw(self, block):\n # Allow for a list of blocks..\n block = utils.request_type(block)\n\n res = r.get(self.url + self.block_raw + str(block))\n return self.execute(res)", "def _get_block(self, pos):\n return _get_mc().getBlock(pos)", "def get_first_block(blockchain):\n response = requests.get('https://api.blockcypher.com/v1/%s/main' % blockchain)\n if response.status_code == 200:\n return int(json.loads(response.content.decode('latin1'))['height'])\n elif response.status_code == 429:\n print('Too many requests')\n return -1", "def get_block(blk):\n global active_block_queries\n #This one is for processing the results from get_block\n def process_block(event, client):\n \"\"\"Process the result from block getting request.\"\"\"\n global active_block_queries\n global nextblock\n global sync_block\n active_block_queries = active_block_queries - 1\n if event != None:\n if sync_block != None and blk >= sync_block:\n sync_block = None\n #Itterate over all operations in the block.\n for t in event[\"transactions\"]:\n for o in t[\"operations\"]:\n #We are only interested in downvotes\n if o[0] == \"vote\" and o[1][\"weight\"] < 0:\n #Call process_vote for each downvote\n process_vote(o[1],client)\n #fetching network clients alive.\n get_block(nextblock)\n nextblock = nextblock + 1\n if active_block_queries < 8:\n treshold = active_block_queries * 20\n behind = (dt.utcnow() - dateutil.parser.parse(event[\"timestamp\"])).seconds\n if behind >= treshold:\n print(\"Behind\",behind,\"seconds while\",active_block_queries,\"queries active. Treshold =\",treshold)\n print(\"Spinning up an extra parallel query loop.\")\n get_block(nextblock)\n nextblock = nextblock + 1\n else:\n if sync_block == None or blk <= sync_block:\n sync_block = blk\n get_block(blk)\n else:\n print(\"Overshot sync_block\")\n if active_block_queries == 0:\n print(\"Keeping one loop alive\")\n get_block(blk)\n else:\n print(\"Scaling down paralel HTTPS queries\",active_block_queries)\n #Create a new JSON-RPC entry on the queue to fetch a block.\n opp = rpcclient.condenser_api.get_block(blk)\n active_block_queries = active_block_queries + 1\n #Bind the above closure to the result of get_block\n opp.on_result(process_block)", "def block_transaction(self, block):\n # Allow for a list of blocks..\n block = utils.request_type(block)\n\n res = r.get(self.url + self.block_tx + str(block))\n return self.execute(res)", "def get_block(self, chunk, coords):\n\n return chunk.get_block(coords)", "def get_ipblock(self, ipblock_id):\n response = self._perform_request('/ipblocks/%s' % ipblock_id)\n return response", "def get_block_with_name(self, block_name: str) -> BasicBlock:\n block: BasicBlock\n all_blocks: Dict[str, BasicBlock] = self.get_all_blocks()\n if block_name in all_blocks:\n return all_blocks[block_name]\n raise Exception(ERROR_BLOCK_DOESNT_EXIST % block_name)", "def sync_get_block(self, chunk, coords):\n\n return chunk.get_block(coords)", "def getBlock(self, addr: ghidra.program.model.address.Address) -> ghidra.program.model.mem.MemoryBlock:\n ...", "def getBlocks(self):\n blocks = self.getBlocksMsg(b'\\x00')\n last_locator = self.largeMessageControl(blocks, 'inv', 0)\n\n while last_locator[1] < TARGET_BLOCK:\n blocks = self.getBlocksMsg(bytearray.fromhex(convertLittleBig(last_locator[0])))\n last_locator = self.largeMessageControl(blocks, 'inv', last_locator[1])\n\n print('\\nSuccessfully found the Block #{}: {}'.format(TARGET_BLOCK, last_locator[0]))\n return last_locator[0]" ]
[ "0.7107428", "0.68164647", "0.6625506", "0.65607804", "0.6505889", "0.64306474", "0.63874197", "0.63551414", "0.6283359", "0.62779987", "0.62558174", "0.619906", "0.6185053", "0.6125997", "0.61109483", "0.6100958", "0.61004436", "0.61004436", "0.6088709", "0.60805076", "0.6058329", "0.6023772", "0.6021982", "0.59328955", "0.59324944", "0.5876844", "0.5871138", "0.58679646", "0.586012", "0.5846233" ]
0.7806856
0
rpc GetNodeInfo (GetNodeInfoRequest) returns (GetNodeInfoResponse) { option (google.api.http) = {
def GetNodeInfo(self, request, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GetNodeInfo(self, hvparams=None):\n return self.GetLinuxNodeInfo()", "def get(self, request, nnid, wfver, desc):\n try:\n return_data = NNCommonManager().get_nn_node_info(nnid, wfver, desc)\n return Response(json.dumps(return_data))\n except Exception as e:\n return_data = {\"status\": \"404\", \"result\": str(e)}\n return Response(json.dumps(return_data))", "def get_nodes_info(self, ctxt):\n cctxt = self.client.prepare(server=DEFAULT_SERVER, timeout=RPC_TIMEOUT)\n return cctxt.call(ctxt, \"get_nodes_info\")", "def rpc_info():", "def get_nodes(self):\n return requests.get(self.__url + 'nodes').json()", "def get(self, request, nnid):\n try:\n condition = {}\n condition['nn_id'] = nnid\n if str(nnid).lower() == 'all':\n condition['nn_id'] = '%'\n elif str(nnid).lower() == 'seq':\n condition['nn_id'] = 'seq'\n return_data = NNCommonManager().get_nn_info(condition)\n logging.info(return_data)\n # Data node name\n graph = NNCommonManager.get_nn_node_name(None, nnid)\n\n return_param = {}\n return_param['fields'] = return_data\n return_param['graph'] = graph\n return Response(json.dumps(return_param, cls=DjangoJSONEncoder))\n except Exception as e:\n return_data = {\"status\": \"404\", \"result\": str(e)}\n return Response(json.dumps(return_param, cls=DjangoJSONEncoder))", "async def retrieve_node(request: web.Request) -> web.Response:\n path_params = parse_request_path_parameters_as(_NodePathParams, request)\n\n try:\n data = await request.json()\n port_keys = data.get(\"port_keys\", [])\n except json.JSONDecodeError as exc:\n raise web.HTTPBadRequest(reason=f\"Invalid request body: {exc}\") from exc\n\n return web.json_response(\n await director_v2_api.retrieve(\n request.app, f\"{path_params.node_id}\", port_keys\n ),\n dumps=json_dumps,\n )", "def getInfo():", "def get_host_info(self):\n\n if len(self.index) == 0:\n # Need to load index from cache\n self.load_index_from_cache()\n\n if not self.args.host in self.index:\n # try updating the cache\n self.do_api_calls_update_cache()\n if not self.args.host in self.index:\n # host might not exist anymore\n return self.json_format_dict({}, True)\n\n node_id = self.index[self.args.host]\n print \"NODE ID %s\" % node_id\n print \"INDEX: %s\" % self.index\n\n node = self.get_node(node_id)\n node_vars = {}\n for direct_attr in [\n \"api_id\",\n \"datacenter_id\",\n \"label\",\n \"display_group\",\n \"create_dt\",\n \"total_hd\",\n \"total_xfer\",\n \"total_ram\",\n \"status\",\n \"alert_cpu_enabled\",\n \"alert_cpu_threshold\",\n \"alert_diskio_enabled\",\n \"alert_diskio_threshold\",\n \"alert_bwin_enabled\",\n \"alert_bwin_threshold\",\n \"alert_bwout_enabled\",\n \"alert_bwout_threshold\",\n \"alert_bwquota_enabled\",\n \"alert_bwquota_threshold\",\n \"backup_weekly_daily\",\n \"backup_window\",\n \"watchdog\"\n ]:\n node_vars[direct_attr] = getattr(node, direct_attr)\n\n node_vars[\"datacenter_city\"] = self.get_datacenter_city(node)\n node_vars[\"public_ip\"] = [addr.address for addr in node.ipaddresses if addr.is_public][0]\n\n return self.json_format_dict(node_vars, True)", "def getInfo(self, id):\n facade = self._getFacade()\n monitor = facade.get(id)\n data = Zuul.marshal(ITreeNode(monitor))\n return DirectResponse.succeed(data=data)", "def rpc_getblockchaininfo(self) -> dict:\n return self._call_command([\"getblockchaininfo\"])", "def info(self):\n return self.client.call('GET', self.name + 'info')", "def info_request():\n return SentmanRequest(SentmanRequest.GET_INFO)", "def _nodeinfo_endpoint(host):\n zkclient = context.GLOBAL.zk.conn\n nodeinfo_zk_path = '{}/{}'.format(z.ENDPOINTS, 'root')\n for node in zkclient.get_children(nodeinfo_zk_path):\n if 'nodeinfo' in node and host in node:\n data, _metadata = zkclient.get(\n '{}/{}'.format(nodeinfo_zk_path, node)\n )\n return data.decode().split(':')", "def cluster_node_get(self, node_name, desired_attributes=None):\n return self.request( \"cluster-node-get\", {\n 'node_name': [ node_name, 'node-name', [ basestring, 'node-name' ], False ],\n 'desired_attributes': [ desired_attributes, 'desired-attributes', [ ClusterNodeInfo, 'None' ], False ],\n }, {\n 'attributes': [ ClusterNodeInfo, False ],\n } )", "def getNodes(self):\n data = self.connect('get','nodes',None)\n return data", "def get_node(conn, name):\n datacenter_id = get_datacenter_id()\n\n for item in conn.list_servers(datacenter_id)[\"items\"]:\n if item[\"properties\"][\"name\"] == name:\n node = {\"id\": item[\"id\"]}\n node.update(item[\"properties\"])\n return node", "def get_info_by_node(conn, node): \n cur = conn.cursor()\n cur.execute(\"SELECT * FROM Info WHERE NodeID=?\", (node))", "def get(self) -> Info:\n return InfoService.get()", "def get_info(self):\n pass", "def get_info(self):\n pass", "def get(node_instance_id, logger, client, tenant_name):\n if tenant_name:\n logger.info('Explicitly using tenant `{0}`'.format(tenant_name))\n logger.info('Retrieving node instance {0}'.format(node_instance_id))\n try:\n node_instance = client.node_instances.get(node_instance_id)\n except CloudifyClientError as e:\n if e.status_code != 404:\n raise\n raise CloudifyCliError('Node instance {0} not found'.format(\n node_instance_id))\n\n print_data(NODE_INSTANCE_COLUMNS, node_instance, 'Node-instance:', 50)\n\n # print node instance runtime properties\n logger.info('Instance runtime properties:')\n for prop_name, prop_value in utils.decode_dict(\n node_instance.runtime_properties).iteritems():\n logger.info('\\t{0}: {1}'.format(prop_name, prop_value))\n logger.info('')", "def get_info():\n message = \"GET information about glancesync server\"\n\n logger_api.info(message)\n\n message = '''\n {\n \"id\": \"%s\",\n \"owner\": \"%s\",\n \"status\": \"%s\",\n \"version\": \"%s\",\n \"updated\": \"%s\",\n \"runningfrom\": \"%s\",\n \"href\": \"%s\"\n }\n ''' % (ID, OWNER, STATUS, VERSION, UPDATED, RUNNINGFROM, API_INFO_URL)\n\n resp = make_response(message, httplib.OK)\n resp.headers[SERVER_HEADER] = SERVER\n resp.headers[CONTENT_TYPE] = JSON_TYPE\n\n logger_api.info('Return result: %s', message)\n\n return resp", "def getNodeStatus(self,node):\n data = self.connect('get','nodes/%s/status' % (node),None)\n return data", "def get_info(self):\n url = self._url_for_op('info')\n data= None # This will be a GET request since data is None\n response = self._get_raw_response(self._get_json_headers,\n self._get_json_response, url, data)\n response = json.loads(response)\n self.api_info = response['results']\n return self.api_info", "def get_info(self, info):\r\n pass", "def get_node(self, _id):\n return self.make_request(\"GET\", \"nodes/\"+_id, {})", "def get_nodes(self):\n self.get_status()\n old_api = self.version[0] <= '3'\n if old_api:\n certs_path = \"%s/certificate_statuses/*\" % (self.environment)\n nodeinfo_path_tpl = \"{env}/node/{node}\"\n else:\n certs_path = \"puppet-ca/v1/certificate_statuses/no_key?environment=%s\" % (self.environment)\n nodeinfo_path_tpl = \"puppet/v3/node/{node}?environment={env}\"\n\n csts = self._send('GET', certs_path)\n nodes_names = []\n for cst in csts:\n nodes_names.append(cst['name'])\n\n all_nodes = []\n for nname in nodes_names:\n path = nodeinfo_path_tpl.format(node=nname, env=self.environment)\n nodeinfo = self._send('GET', path)\n if old_api:\n nodeinfo = self._from_pson(nodeinfo['data'])\n else:\n nodeinfo = self._from_pson(nodeinfo)\n if 'parameters' in nodeinfo:\n node = nodeinfo['parameters']\n if self.onlynodes:\n if not (node.get('hostname') in self.onlynodes or\n node.get('ipaddress') in self.onlynodes or\n node.get('fqdn') in self.onlynodes or\n node.get('uuid') in self.onlynodes):\n continue\n all_nodes.append(node)\n\n return all_nodes", "def get(self):\n\n self.response.headers['Access-Control-Allow-Origin'] = '*' # Required until JSONP is supported\n self.response.headers['Content-Type'] = 'application/json' # All responses are json, so JSONP to come\n\n if(self.request.get(\"cid\")):\n cid = self.request.get(\"cid\")\n node = memcache.get(cid)\n\n if node is not None:\n # if it is a valid cid then check to see if there are any commands\n # in the nodes command queue\n # and update some fields on the node\n\n command = node.getNextCommand()\n if command is not None:\n self.write(JSONResponse.execCommand(command))\n if command.autoComplete is True:\n logging.info(\"auto completing\")\n node.completeCommand(command.id)\n else:\n self.write(json.dumps({\"type\":\"noCommand\"}))\n else:\n # if it turned out to be a non valid cid, just make a new node\n node = createNode(self.request)\n self.write(JSONResponse.assignment(node.cid))\n \n\n else: # if the request had no cid attached\n node = createNode(self.request)\n self.write(JSONResponse.assignment(node.cid))", "def get(self, id, timeout=None):\n req = NodeGetRequest()\n\n req.id = (id)\n tries = 0\n plumbing_response = None\n while True:\n try:\n plumbing_response = self.stub.Get(\n req,\n metadata=self.parent.get_metadata('Nodes.Get', req),\n timeout=timeout)\n except Exception as e:\n if self.parent.shouldRetry(tries, e):\n tries += 1\n self.parent.jitterSleep(tries)\n continue\n raise plumbing.convert_error_to_porcelain(e) from e\n break\n\n resp = models.NodeGetResponse()\n resp.meta = plumbing.convert_get_response_metadata_to_porcelain(\n plumbing_response.meta)\n resp.node = plumbing.convert_node_to_porcelain(plumbing_response.node)\n resp.rate_limit = plumbing.convert_rate_limit_metadata_to_porcelain(\n plumbing_response.rate_limit)\n return resp" ]
[ "0.7439849", "0.6852461", "0.65889823", "0.6323488", "0.62261367", "0.6143817", "0.60689044", "0.6056143", "0.5977873", "0.58589566", "0.58075047", "0.5785552", "0.5767497", "0.57391393", "0.57375586", "0.5720717", "0.57124937", "0.5708837", "0.5707627", "0.5707309", "0.5707309", "0.5657674", "0.56567395", "0.5643742", "0.5620816", "0.5609476", "0.56093097", "0.55977416", "0.5593453", "0.5578206" ]
0.8246034
0
Normalizes a given hospital name. 1. Converts all words to lower case. 2. Removes all stopwords.
def normalize_hospital_name(name): normalized_name = name.lower() stopword_list = stopwords.words('english') filtered_words = [word for word in wordpunct_tokenize(normalized_name) if word not in stopword_list] slug = slugify(' '.join(filtered_words)) return slug
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _normalize_name(self, name, aggressive=False):\n stopwords = 'the', 'a'\n if aggressive:\n # Remove anything in brackets.\n name = re.sub(r'\\([^)]+\\)', '', name)\n # Some shows have a \"with Firstname Lastname\" suffix, like \"The Daily Show\n # with Jon Stewart\". Strip this out.\n # FIXME: hardcoded English\n name = re.sub(r'with +\\w+ +\\w+\\b', '', name)\n\n # Replace & with 'and' and remove other non-word characters\n name = re.sub(r'\\W', ' ', name.replace('&', 'and').replace('.', '').lower())\n # Remove stop words and remove whitespace.\n return remove_stop_words(name).replace(' ', '')", "def normalize(s):\n s = replace_whitespace(s)\n s = remove_dashes(s)\n s = s.lower()\n return s", "def normalize(s):\n def remove_articles(text):\n return re.sub(r'\\b(a|an|the)\\b', ' ', text)\n\n def white_space_fix(text):\n return ' '.join(text.split())\n\n# def remove_punc(text):\n# exclude = set(string.punctuation)\n# return ''.join(ch for ch in text if ch not in exclude)\n\n def lower(text):\n return text.lower()\n\n return white_space_fix(remove_articles(lower(s)))", "def normalize(s):\r\n def remove_articles(text):\r\n return re.sub(r'\\b(a|an|the)\\b', ' ', text)\r\n\r\n def white_space_fix(text):\r\n return ' '.join(text.split())\r\n\r\n def remove_punc(text):\r\n exclude = set(string.punctuation)\r\n return ''.join(ch for ch in text if ch not in exclude)\r\n\r\n def lower(text):\r\n return text.lower()\r\n\r\n return white_space_fix(remove_articles(remove_punc(lower(s))))", "def normalize(s):\r\n def remove_articles(text):\r\n return re.sub(r'\\b(a|an|the)\\b', ' ', text)\r\n\r\n def white_space_fix(text):\r\n return ' '.join(text.split())\r\n\r\n def remove_punc(text):\r\n exclude = set(string.punctuation)\r\n return ''.join(ch for ch in text if ch not in exclude)\r\n\r\n def lower(text):\r\n return text.lower()\r\n\r\n return white_space_fix(remove_articles(remove_punc(lower(s))))", "def normalize(word):\n word = word.lower()\n # removing plural, it facilitates the matching\n if len(word)>0 and word[-1] == 's':\n return word[0:-1]\n return word", "def normalize_word(word):\n\n return word.lower()", "def get_normalised_phrase(self, sentence):\n return re.sub(r'[\\W_ ]+', ' ', sentence).lower()", "def normalize(name):\n name = name.lower()\n name = name.replace('-', '')\n name = name.replace(' ', '')\n return name", "def normalise_title(title):\n normalised = title.lower()\n if normalised.startswith('the '):\n normalised = normalised[4:]\n normalised = re.sub('[^a-z ]', '', normalised)\n normalised = re.sub(' +', ' ', normalised)\n normalised = normalised.replace(' the ', ' ')\n return normalised", "def preprocess(self, s):\n stripped = re.sub(\"[^\\w\\s]\", \"\", s)\n stripped = re.sub(\"_\", \"\", stripped)\n\n stripped = re.sub(\"\\s+\", \" \", stripped)\n\n stripped = stripped.strip()\n\n return stripped.lower()", "def normalize_word(text):\n if text:\n stripped = \"\".join(unicode(text).translate(tbl).lower().split())\n return stripped", "def normalize(data):\n data = lowercase(data)\n data = remove_punct(data)\n data = remove_apostrophes(data)\n data = remove_stopwords(data)\n data = num_to_words(data)\n data = lemmatize(data)\n data = stemming(data)\n data = remove_punct(data)\n data = num_to_words(data)\n data = lemmatize(data)\n data = stemming(data)\n data = remove_punct(data) #done again to remove hyphens produced by num2words\n data = remove_stopwords(data) #done agan to remove stopwords produced by num2words\n return data", "def normalise(word):\n\t\tword = word.lower()\n\t\tword = lemmatizer.lemmatize(word)\n\t\t# word = stemmer.stem_word(word)\n\t\treturn word", "def _normalize_text(s: str) ->str:\n\n def remove_articles(text: str) ->str:\n return re.sub('\\\\b(a|an|the)\\\\b', ' ', text)\n\n def white_space_fix(text: str) ->str:\n return ' '.join(text.split())\n\n def remove_punc(text: str) ->str:\n exclude = set(string.punctuation)\n return ''.join(ch for ch in text if ch not in exclude)\n\n def lower(text: str) ->str:\n return text.lower()\n return white_space_fix(remove_articles(remove_punc(lower(s))))", "def normalize_text(s):\n import string, re\n\n def remove_articles(text):\n regex = re.compile(r\"\\b(a|an|the)\\b\", re.UNICODE)\n return re.sub(regex, \" \", text)\n\n def white_space_fix(text):\n return \" \".join(text.split())\n\n def remove_punc(text):\n exclude = set(string.punctuation)\n return \"\".join(ch for ch in text if ch not in exclude)\n\n def lower(text):\n return text.lower()\n\n return white_space_fix(remove_articles(remove_punc(lower(s))))", "def _normalize(self, word):\n return self.lemmatize(word.lower())", "def normalize_name(cls, name):\n\t\treturn ' '.join(name.lower().strip().split())", "def normalize_name(cls, name):\n\t\treturn ' '.join(name.lower().strip().split())", "def normalize_name(cls, name):\n\t\treturn ' '.join(name.lower().strip().split())", "def normalize_word (word):\n return st.stem(word.strip ().lower ())", "def standardize_name_for_look_up(name: Any) -> str:\n if not isinstance(name, str):\n return name\n\n name = name.lower().strip()\n name = \" \".join(name.split(\"_\"))\n name = name.translate(\n str.maketrans(\"\", \"\", string.punctuation)\n ) # remove punctuation\n name = \" \".join(\n [part for part in name.split(\" \") if part]\n ) # ensure there is only a single space between words\n return name", "def clean_text(s,stem=False):\n\tret = s.lower()\n\tret = re.sub(r'[^a-z ]',' ',ret)\n\tret = re.sub(r' +',' ',ret).strip()\n\tret = re.sub(r'see more occupations related to this (activity|skill|task)','',ret)\n\tif stem:\n\t\tret = ' '.join( stemmer.stem(word) for word in ret.split(' ') )\n\treturn ret", "def normalize_title(self, title):\n return \" \".join(w[0].capitalize() + w[1:] for w in title.split())", "def normalize(self, string):\n\n # If the string is empty, just return it\n if len(string) is 0:\n return string\n\n # Setting all words to lowercase\n string = string.lower()\n\n # Removing punctuation\n if not string[-1].isalnum():\n string = string[:-1]\n\n # Removing words\n string = self.substitute_words(string)\n\n # Returning normalized text\n return string", "def normalise(word):\n word = word.lower()\n word = stemmer.stem_word(word)\n word = lemmatizer.lemmatize(word)\n return word", "def normalize(word):\n word = word.strip().lower()\n return ''.join(sorted(word))", "def normalize_name(word):\n return word.strip(\"0123456789!@#$%^&*_() +=\\/?<>,.`~;:\").lower().replace(\" \",\"_\")", "def clean_name(s):\n return re.sub('[\\W_]+', '', s).lower()", "def _normalize_answer(s):\n\n def remove_articles(text):\n return re.sub(r'\\b(a|an|the)\\b', ' ', text)\n\n def white_space_fix(text):\n return ' '.join(text.split())\n\n def remove_punc(text):\n exclude = set(string.punctuation)\n return ''.join(ch for ch in text if ch not in exclude)\n\n def lower(text):\n return text.lower()\n\n return white_space_fix(remove_articles(remove_punc(lower(s))))" ]
[ "0.72365785", "0.68701774", "0.6862431", "0.6830881", "0.6830881", "0.6761262", "0.6750685", "0.6738248", "0.6719288", "0.6670043", "0.6665048", "0.66137934", "0.6578984", "0.6569107", "0.65648854", "0.6541455", "0.65140444", "0.64899707", "0.64899707", "0.64899707", "0.64876103", "0.6413003", "0.6359083", "0.6357951", "0.6355984", "0.6351049", "0.6349069", "0.6345816", "0.63396364", "0.63307476" ]
0.79034716
0
does a git reset hard to whatever remote the branch is assigned to
def reset_branch_to_remote(repo, branch, hard=True): remote = repo.get_branch_remote(branch) kw = dict(remote=remote, branch=branch) if hard: kw['flags'] = '--hard' repo.issue('git reset {flags} {remote}/{branch}'.format(**kw))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hard_reset_branches(args):\n checkout_branches(args)\n man = load_manifest()\n for (name, project) in man.projects.iteritems():\n print >>sys.stderr, \"Hard resetting tracking branch in project: %s\" % name\n repo = GitRepo(workdir_for_project(project))\n repo.check_command([\"reset\", \"--hard\", project.remote_refspec])", "def test_reset_to_remote_after_rebase(self) -> None:\n (\n self.repo_sandbox\n .new_branch(\"branch-0\")\n .commit()\n .push()\n .new_branch(\"branch-1\")\n .commit()\n .push()\n .check_out(\"branch-0\")\n .commit()\n )\n rewrite_branch_layout_file(\"branch-0\\n\\tbranch-1\")\n\n with fixed_author_and_committer_date_in_past():\n assert_success(\n [\"traverse\", \"-y\"],\n \"\"\"\n Pushing branch-0 to origin...\n\n Checking out branch-1\n\n branch-0\n |\n x-branch-1 *\n\n Rebasing branch-1 onto branch-0...\n\n Branch branch-1 diverged from (and has older commits than) its remote counterpart origin/branch-1.\n Resetting branch branch-1 to the commit pointed by origin/branch-1...\n\n branch-0\n |\n x-branch-1 *\n\n Reached branch branch-1 which has no successor; nothing left to update\n \"\"\"\n )", "def reset_branch(ctx, name, sha, hard):\n\n try:\n\n gh = ctx.obj.github\n\n log.echo(\"Updating {} branch...\".format(name), break_line=False)\n gh.reset_branch(name=name, sha=sha, hard=hard)\n log.echo('Branch {} is now at {} '.format(name, sha), break_line=False)\n log.checkmark()\n except BaseException as _:\n log.xmark()\n raise", "def reset(self, depth=0):\n\n if self.ref_type(self.default_ref) == 'branch':\n branch = self.truncate_ref(self.default_ref)\n branch_output = fmt.ref_string(branch)\n if not self.existing_local_branch(branch):\n return_code = self._create_branch_local_tracking(branch, self.remote, depth=depth, fetch=True)\n if return_code != 0:\n message = colored(' - Failed to create tracking branch ', 'red') + branch_output\n self._print(message)\n self._exit(message)\n return\n elif self._is_branch_checked_out(branch):\n self._print(' - Branch ' + branch_output + ' already checked out')\n else:\n self._checkout_branch_local(branch)\n remote_output = fmt.remote_string(self.remote)\n if not self.existing_remote_branch(branch, self.remote):\n message = colored(' - No existing remote branch ', 'red') + remote_output + ' ' + branch_output\n self._print(message)\n self._exit(message)\n self.fetch(self.remote, ref=self.default_ref, depth=depth)\n self._print(' - Reset branch ' + branch_output + ' to ' + remote_output + ' ' + branch_output)\n remote_branch = self.remote + '/' + branch\n self._reset_head(branch=remote_branch)\n elif self.ref_type(self.default_ref) == 'tag':\n self.fetch(self.remote, ref=self.default_ref, depth=depth)\n self._checkout_tag(self.truncate_ref(self.default_ref))\n elif self.ref_type(self.default_ref) == 'sha':\n self.fetch(self.remote, ref=self.default_ref, depth=depth)\n self._checkout_sha(self.default_ref)", "def git_pull():\n\n puts(yellow(\"Pull master from GitHub\"))\n with cd(env.source_dir):\n run('git reset --hard HEAD')\n run('git pull')", "def pull(ctx, path_base):\n with ctx.cd(path_base):\n ctx.run('git reset --hard')\n ctx.run('git pull origin master')", "def delete_remote():\n branch = git.current_branch().name\n shell.run('git push -u origin {}'.format(branch))", "def _mocked_repo_reset(self, project):\n repo.git.reset(\"--hard\", current_head)", "def git_reset(commit_id):\n env.commit_id = commit_id\n run(\"cd %(repo_path)s; git reset --hard %(commit_id)s\" % env)", "def revert(self, ref):\n self._git.head.commit = ref\n self._git.head.reset(index=True, working_tree=True)", "def switch_branch(branch, rdir):\r\n # Get the latest remote\r\n try:\r\n cmd_log(['git', 'fetch', ], rdir)\r\n except subprocess.CalledProcessError as ex:\r\n log.exception('Unable to fetch remote: %r', ex.output)\r\n raise GitImportError(GitImportError.CANNOT_BRANCH)\r\n\r\n # Check if the branch is available from the remote.\r\n cmd = ['git', 'ls-remote', 'origin', '-h', 'refs/heads/{0}'.format(branch), ]\r\n try:\r\n output = cmd_log(cmd, rdir)\r\n except subprocess.CalledProcessError as ex:\r\n log.exception('Getting a list of remote branches failed: %r', ex.output)\r\n raise GitImportError(GitImportError.CANNOT_BRANCH)\r\n if not branch in output:\r\n raise GitImportError(GitImportError.REMOTE_BRANCH_MISSING)\r\n # Check it the remote branch has already been made locally\r\n cmd = ['git', 'branch', '-a', ]\r\n try:\r\n output = cmd_log(cmd, rdir)\r\n except subprocess.CalledProcessError as ex:\r\n log.exception('Getting a list of local branches failed: %r', ex.output)\r\n raise GitImportError(GitImportError.CANNOT_BRANCH)\r\n branches = []\r\n for line in output.split('\\n'):\r\n branches.append(line.replace('*', '').strip())\r\n\r\n if branch not in branches:\r\n # Checkout with -b since it is remote only\r\n cmd = ['git', 'checkout', '--force', '--track',\r\n '-b', branch, 'origin/{0}'.format(branch), ]\r\n try:\r\n cmd_log(cmd, rdir)\r\n except subprocess.CalledProcessError as ex:\r\n log.exception('Unable to checkout remote branch: %r', ex.output)\r\n raise GitImportError(GitImportError.CANNOT_BRANCH)\r\n # Go ahead and reset hard to the newest version of the branch now that we know\r\n # it is local.\r\n try:\r\n cmd_log(['git', 'reset', '--hard', 'origin/{0}'.format(branch), ], rdir)\r\n except subprocess.CalledProcessError as ex:\r\n log.exception('Unable to reset to branch: %r', ex.output)\r\n raise GitImportError(GitImportError.CANNOT_BRANCH)", "def clean(self):\n self.run(['git', 'reset', '--hard', 'HEAD'])\n self.run(['git', 'clean', '-fdx'])\n self.run(['git', 'checkout', 'origin/master'])", "def prune_branch_local(self, branch, force):\n\n branch_output = fmt.ref_string(branch)\n if branch not in self.repo.heads:\n self._print(' - Local branch ' + branch_output + \" doesn't exist\")\n return\n prune_branch = self.repo.heads[branch]\n if self.repo.head.ref == prune_branch:\n ref_output = fmt.ref_string(self.truncate_ref(self.default_ref))\n try:\n self._print(' - Checkout ref ' + ref_output)\n self.repo.git.checkout(self.truncate_ref(self.default_ref))\n except GitError as err:\n message = colored(' - Failed to checkout ref', 'red') + ref_output\n self._print(message)\n self._print(fmt.error(err))\n self._exit(message)\n except (KeyboardInterrupt, SystemExit):\n self._exit()\n try:\n self._print(' - Delete local branch ' + branch_output)\n self.repo.delete_head(branch, force=force)\n return\n except GitError as err:\n message = colored(' - Failed to delete local branch ', 'red') + branch_output\n self._print(message)\n self._print(fmt.error(err))\n self._exit(message)\n except (KeyboardInterrupt, SystemExit):\n self._exit()", "def flush_repo():\n server = get_server()\n run(\"rm -rf %(project_name)s\" % env)\n git.clone()\n server.setup()", "def deploy_pull_master(self, restart=True):\n self.ops.local(\"cd \"+self.local_path+\"/src && git reset --hard HEAD && git pull origin master && git submodule update\")\n PiService.deploy(self, restart)", "def _git_pull_ff(path, branch, service_name):\n slab_logger.log(15, 'Fast forward only pull of %s branch %s' % (service_name, branch))\n # Note: Branch defaults to master in the click application\n service_path = os.path.join(path, \"services\", service_name)\n\n # Before doing git checkout, check if the remote ref exists\n # if it does not then take some steps to get it and run checks\n slab_logger.debug(\"Checking for remote references in %s \" % (service_path))\n returncode, output = run_this('git show-ref %s' % (branch), cwd=service_path)\n if not returncode == 0:\n return(returncode, output)\n slab_logger.error('\"git show-ref %s\" returned an error for %s\\ncmd output: %s'\n % (branch, service_name, output))\n if branch not in output:\n slab_logger.log(25, \"Remote git branch not found : %s \" % (branch))\n slab_logger.log(25, \"Setting remote origin in .git/config to :\"\n \" +refs/heads/*:refs/remotes/origin/*\")\n command_to_run = 'git config --replace-all remote.origin.fetch'\\\n ' \"+refs/heads/*:refs/remotes/origin/*\"'\n returncode, output = run_this(command_to_run, cwd=service_path)\n if not returncode == 0:\n return(returncode, output)\n slab_logger.debug(\"Fetching all remote branches. It might take a few minutes. %s\"\n % (service_path))\n returncode, output = run_this('git fetch --unshallow', cwd=service_path)\n if not returncode == 0:\n return(returncode, output)\n slab_logger.debug(\"Done Fetching all remote branches. Updating remotes.\")\n returncode, output = run_this('git remote update', cwd=service_path)\n if not returncode == 0:\n return(returncode, output)\n slab_logger.debug(\"Remote updates completed. \")\n command_to_run = \"git show-ref %s\" % (branch)\n returncode, output = run_this(command_to_run, cwd=service_path)\n if not returncode == 0:\n return(returncode, output)\n if branch not in output:\n slab_logger.error(\"Remote branch %s not found.\" % (branch))\n returncode, output = run_this(\"git show-ref\", cwd=service_path)\n if not returncode == 0:\n return(returncode, output)\n ref_info = output.communicate()[0]\n slab_logger.log(25, \"The following branches were found : %s \" % ref_info)\n slab_logger.log(25, \"Branch not found. Please, check branch name. Exiting.\")\n return(1, 'Unable to find remote branch')\n # TODO: Do more error checking here --> after debugging, definitely\n # TODO: checkout a branch ifexists in origin only--> not replacing git\n # or setup a tracking branch if there's nothing local or fail.\n returncode, output = run_this('git checkout %s' % (branch), cwd=service_path)\n if not returncode == 0:\n return(returncode, output)\n returncode, myinfo = run_this('git pull --ff-only origin %s' % (branch), service_path)\n return(returncode, myinfo)", "def prune_branch_remote(self, branch, remote):\n\n branch_output = fmt.ref_string(branch)\n if not self.existing_remote_branch(branch, remote):\n self._print(' - Remote branch ' + branch_output + \" doesn't exist\")\n return\n try:\n self._print(' - Delete remote branch ' + branch_output)\n self.repo.git.push(remote, '--delete', branch)\n except GitError as err:\n message = colored(' - Failed to delete remote branch ', 'red') + branch_output\n self._print(message)\n self._print(fmt.error(err))\n self._exit(message)\n except (KeyboardInterrupt, SystemExit):\n self._exit()", "def git_branch(self, app, branch):\n if app == self.PROJECT_NAME:\n app_path = self.PROJECT_DIR\n else:\n raise ValueError('Unknown app')\n\n with lcd(app_path):\n self.local('git pull && git checkout %s' % branch)\n\n self.display('%s has been successfully switched to tag/branch %s.' % (app, branch), color='green')", "def reset_all(self, top_repo_path):\n my_output = subprocess.check_output([\"git\", \"reset\"], cwd=top_repo_path)\n return my_output", "def update_from_remote(remote, force=False):\n git_cmd('fetch', [remote])\n for pkg in TEST_PKGS:\n update_pkg_branches(pkg, remote, force=force)", "def __gitDeleteBranch(self):\n self.vcs.gitDeleteRemoteBranch(self.project.getProjectPath())", "def clean_repo(c):\n c.run('git clean -ffdx')\n c.run('git reset --hard')", "def pull():\n _with_deploy_env(['git pull'])", "def reset(self, filename, top_repo_path):\n my_output = subprocess.check_output(\n [\"git\", \"reset\", \"--\", filename], cwd=top_repo_path\n )\n return my_output", "def _set_tracking_branch_commit(self, branch, remote, depth):\n\n branch_output = fmt.ref_string(branch)\n origin = self._remote(remote)\n return_code = self.fetch(remote, depth=depth, ref=branch)\n if return_code != 0:\n raise ClowderGitError(msg=colored(' - Failed to fech', 'red'))\n if not self.existing_local_branch(branch):\n message = colored(' - No local branch ', 'red') + branch_output + '\\n'\n self._print(message)\n self._exit(message)\n if not self.existing_remote_branch(branch, remote):\n message = colored(' - No remote branch ', 'red') + branch_output + '\\n'\n self._print(message)\n self._exit(message)\n local_branch = self.repo.heads[branch]\n remote_branch = origin.refs[branch]\n if local_branch.commit != remote_branch.commit:\n message_1 = colored(' - Existing remote branch ', 'red')\n message_2 = colored(' on different commit', 'red')\n message = message_1 + branch_output + message_2 + '\\n'\n self._print(message)\n self._exit(message_1)\n return_code = self._set_tracking_branch(remote, branch)\n if return_code != 0:\n self._exit(colored(' - Failed to set tracking branch', 'red'))", "def reset_to_commit(self, commit_id, top_repo_path):\n my_output = subprocess.check_output(\n [\"git\", \"reset\", \"--hard\", commit_id], cwd=top_repo_path\n )\n return my_output", "def checkout(branch=\"lf-dev\"):\n with cd(FOLDER):\n sudo('git fetch', user='tomcat')\n sudo('git checkout %s' % branch, user='tomcat')\n status()", "def check_out_topic_branch_from_remote(self):\n self.git.checkout('-b', self.topic_branch, '{}/{}'.format(self.base_branch_remote(), self.topic_branch))", "def master():\n env.branch = 'master'", "def master():\n env.branch = 'master'" ]
[ "0.7487938", "0.7343279", "0.72039735", "0.680071", "0.6762364", "0.6681312", "0.66094434", "0.65522313", "0.64959395", "0.6403131", "0.6313687", "0.619142", "0.61898285", "0.6114071", "0.601012", "0.59937227", "0.5980438", "0.5962808", "0.59619427", "0.5953685", "0.59454954", "0.5942215", "0.593983", "0.5934509", "0.5926325", "0.5912196", "0.58797467", "0.5857429", "0.5850037", "0.5850037" ]
0.8294332
0
Changes the url format for committing
def change_url_format(repo, out_type='ssh'): url = repo.url url_parts = re.split('[/:]', url) in_type = url_parts[0] url_fmts = { 'https': ('.com/', 'https://'), 'ssh': ('.com:', 'git@'), } url_fmts['git'] = url_fmts['ssh'] new_repo_url = url for old, new in zip(url_fmts[in_type], url_fmts[out_type]): new_repo_url = new_repo_url.replace(old, new) # Inplace change repo.url = new_repo_url print('new format repo.url = {!r}'.format(repo.url))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def format_url(self, command):\n\n return '{}{}'.format(self.url,command)", "def _format_api_url(self, url):\n user_name = self._get_user_name()\n # format and return url\n return url.format(\n user_name = user_name,\n element = urllib.quote(self.qnet_element.encode('utf-8'), safe=''),\n token = self._md5(\"%s:%s:%s\" % (user_name, self.iteration_id, self._secret_key))\n )", "def _make_url(self):\n ...", "def _format_url(s):\n return u'%s%s\\n' % (BASE_URL, s.get_absolute_url())", "def format_url(endpoint, cmd):\n url = base_url + endpoint + cmd + '&key=' + bart_api_key + json\n return url", "def format_url(self, data):\n git_url = urlparse(data[\"git_url\"])\n\n url = \"oauth2:{0}@{1}\".format(data[\"token\"], git_url.netloc)\n return git_url._replace(netloc=url).geturl()", "def _proper_url(self, url):\n if self.base_url not in url:\n url = self.base_url + url\n url = re.sub(r'(?<!https:)//', '/', url)\n if not url.endswith('/') and '?' not in url:\n url = url + '/'\n if url.endswith('?'):\n url = url[:-1]\n return url", "def Url(self) -> str:", "def fix_url(cls, url: str):\r\n ...", "def _transform_github_url(self):\n self.url = (\n self.url\n .replace('/blob/', '/')\n .replace(self.GITHUB_NETLOC, self.GITHUB_RAW_NETLOC)\n )", "def new_url(**kwargs):\n # To create the URL, we need to take the format string and return it with no params\n url_base = \"/axapi/v3/export\"\n f_dict = {}\n\n return url_base.format(**f_dict)", "def format_url(url: str) -> str:\n return urljoin(url.replace('https://app', 'https://api'), '')", "def make_url_safe(self, url):\n return url.replace(' ', '%20')\\\n .replace('(', '%28')\\\n .replace(')', '%29')\\\n .replace('\"', '%22')", "def update_url(self, path):\n self._url += \"/%s\" % path.lstrip(\"/\")\n return self", "def format_output_url(cls, url, **kw):\r\n u = UrlParser(url)\r\n\r\n if u.is_reddit_url():\r\n # make sure to pass the port along if not 80\r\n if not kw.has_key('port'):\r\n kw['port'] = request.port\r\n \r\n # disentagle the cname (for urls that would have cnameframe=1 in them)\r\n u.mk_cname(**kw)\r\n \r\n # make sure the extensions agree with the current page\r\n if c.extension:\r\n u.set_extension(c.extension)\r\n\r\n # unparse and encode it un utf8\r\n return _force_unicode(u.unparse()).encode('utf8')", "def urlify(board):\n return(board.replace(\" \",\"%20\"))", "def format_url(url):\n no_scheme = url.split('://', 1)[-1]\n return '[{0}]({1})'.format(no_scheme, url)", "def format_url(url, msg):\n return url+\"?str={}\".format(urllib.parse.quote(msg))", "def existing_url(**kwargs):\n # Build the format dictionary\n url_base = \"/axapi/v3/export\"\n f_dict = {}\n\n return url_base.format(**f_dict)", "def _override_tourl(self):\n base_url = urlparse.urlparse(self.url)\n try:\n query = base_url.query\n except AttributeError:\n # must be python <2.5\n query = base_url[4]\n query = parse_qs(query)\n for k, v in self.items():\n query.setdefault(k, []).append(v)\n\n try:\n scheme = base_url.scheme\n netloc = base_url.netloc\n path = base_url.path\n params = base_url.params\n fragment = base_url.fragment\n except AttributeError:\n # must be python <2.5\n scheme = base_url[0]\n netloc = base_url[1]\n path = base_url[2]\n params = base_url[3]\n fragment = base_url[5]\n\n url = (scheme, netloc, path, params,\n urllib.urlencode(query, True), fragment)\n return urlparse.urlunparse(url)", "def _formulate_change(self, path, params, as_json=False):\n url = self._make_url(path)\n # the following code is removed from our method\n # if 'op' in params:\n # params = dict(params)\n # op = params.pop('op')\n # url += '?' + urlencode([('op', op)])\n if as_json:\n body, headers = maas_client.encode_json_data(params)\n else:\n body, headers = maas_client.encode_multipart_data(params, {})\n self.auth.sign_request(url, headers)\n return url, headers, body", "def barbican_url(self):", "def normalize_url(self, url):\n pass", "def _format_to_link(self, commit):\n return os.path.join(self.mount, \"commits-by-hash\", self._hash_updir(commit), commit) + \"/\"", "def edit_url(self, inplace=False,show_url = 0, **kwargs):\n\n if len(kwargs) > 0 :\n other_args = [\"{}={}\".format(k,str(v).replace(\" \",\"+\")) for k,v in kwargs.items()]\n new_url = self.url + \"&\" + \"&\".join(other_args)\n if show_url: print(new_url) \n\n if \"maxresults\" not in kwargs : \n print(\"Be careful : This request will only display the first 100 results.\")\n\n if inplace:\n self.url = new_url", "def _encode_url(full_url):\n return urllib.parse.quote(full_url, safe=\"%/:=&?~#+!$,;'@()*[]|\")", "def create_url(self):\n self.base_url = self.base + self.strs[jpn.path_latest]", "def url(self):\n ...", "def form_url_str(self):\n self.form_com_data_stock_url_str()\n \n self.com_data_full_url = self.com_data_start_url + self.com_data_stock_portion_url +\\\n self.com_data_end_url", "def form_url_str(self, type = 'cur_quotes'):\n if type == 'cur_quotes':\n self.form_cur_quotes_stock_url_str()\n \n # form the property. 2 methods enabled.\n if self.enable_form_properties_fr_exceltable:\n self.form_cur_quotes_property_url_str_fr_excel()\n else:\n self.form_cur_quotes_property_url_str()\n \n self.cur_quotes_full_url = self.cur_quotes_start_url + self.cur_quotes_stock_portion_url +\\\n self.cur_quotes_property_portion_url + self.cur_quotes_end_url" ]
[ "0.65926516", "0.6366216", "0.614659", "0.6131997", "0.6079068", "0.5984888", "0.5974174", "0.5911585", "0.5877944", "0.58651567", "0.5848259", "0.5804585", "0.5783219", "0.5775814", "0.57553697", "0.5724391", "0.57028", "0.56964356", "0.56613433", "0.5646112", "0.563889", "0.561248", "0.5612172", "0.55980307", "0.55948097", "0.55854666", "0.5584128", "0.55559087", "0.55537826", "0.55364317" ]
0.69633836
0
Checkout `branch` and automatically overwrites conflict files.
def checkout2(repo, branch, overwrite=True): cmd = 'git checkout %s' % (branch,) out = repo.issue(cmd, error='return') if overwrite and out is not None: repo._handle_overwrite_error(out) repo._handle_abort_merge_rebase(out) # Retry repo.issue(cmd)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def checkout(branch=\"lf-dev\"):\n with cd(FOLDER):\n sudo('git fetch', user='tomcat')\n sudo('git checkout %s' % branch, user='tomcat')\n status()", "def gitCheckoutBranch(self, path, branch):\r\n\r\n with workInDirectory(path):\r\n fetch_cmd = [\"git\", \"fetch\"]\r\n if self.verbose:\r\n print(\"Runing Command : {}\".format(\" \".join(fetch_cmd)))\r\n\r\n SubProcessUtility.runCommand(fetch_cmd)\r\n\r\n checkout_branch_command = [\"git\", \"checkout\", branch]\r\n if self.verbose:\r\n print(\"Running Command : {}\".format(\" \".join(checkout_branch_command)))\r\n SubProcessUtility.runCommand(checkout_branch_command)", "def git_checkout_branch(name):\n\n if subprocess.call([\"git\", \"diff\", \"--quiet\", \"HEAD\"]) != 0:\n raise Exception(\"Dirty working tree; not checking out %s\" % name)\n\n if subprocess.call([\"git\", \"checkout\", name]) != 0:\n raise Exception(\"Could not checkout %s\" % name)", "def checkout_branches(args):\n\n ensure_tracking_branches([])\n if check_dirty([]) and '-f' not in args:\n raise Exception(\"Cannot checkout new branches with dirty projects.\")\n \n man = load_manifest()\n for (name, project) in man.projects.iteritems():\n print >>sys.stderr, \"Checking out tracking branch in project: %s\" % name\n repo = GitRepo(workdir_for_project(project))\n # Check that sucker out\n repo.check_command([\"checkout\", project.tracking_branch])", "def test_cherrypick_conflict_edit(repository: Repository, path: Path) -> None:\n main = repository.head\n branch = repository.heads.create(\"branch\")\n\n repository.checkout(branch)\n updatefile(path, \"a\")\n\n repository.checkout(main)\n updatefile(path, \"b\")\n\n with pytest.raises(MergeConflictError, match=path.name):\n repository.cherrypick(branch.commit)", "def __gitStashBranch(self):\n pfile = self.project.getProjectFile()\n lastModified = QFileInfo(pfile).lastModified().toString()\n shouldReopen = (\n self.vcs.gitStashBranch(self.project.getProjectPath()) or\n QFileInfo(pfile).lastModified().toString() != lastModified\n )\n if shouldReopen:\n res = E5MessageBox.yesNo(\n self.parent(),\n self.tr(\"Create Branch\"),\n self.tr(\"\"\"The project should be reread. Do this now?\"\"\"),\n yesDefault=True)\n if res:\n self.project.reopenProject()", "def _stash_and_checkout(repo, version):\n repo.git.stash()\n repo.git.checkout(version)\n repo.git.clean(\"-df\")", "def git_dir_checkout_branch(c, org_name, repo_name, remote, branch):\n print('Fetching updates from Git repository')\n c.run('git remote add {remote} [email protected]:{org_name}/{repo_name}.git'.format(remote=remote, org_name=org_name, repo_name=repo_name),\n warn=True)\n c.run('git fetch --all')\n\n print('Checking out {}/{}'.format(remote, branch))\n try:\n c.run('git checkout {}/{}'.format(remote, branch))\n except Failure:\n # probably branch is tag name\n print('Checking out failed. Assuming this is a tag, attempting to checkout without stating remote')\n c.run('git checkout {}'.format(branch))", "def create_branch(self):\n os.chdir(str(self.repository_path))\n sh.git.checkout('master')\n sh.git.checkout('-b', self.branch)\n logger.debug('Branch {} created', self.branch)", "def checkout(connection, branch, rid=None, repo=None):\n\n if repo is None:\n repo = Repository(connection, rid)\n\n return repo.checkout(branch)", "def switch_branch(branch, rdir):\r\n # Get the latest remote\r\n try:\r\n cmd_log(['git', 'fetch', ], rdir)\r\n except subprocess.CalledProcessError as ex:\r\n log.exception('Unable to fetch remote: %r', ex.output)\r\n raise GitImportError(GitImportError.CANNOT_BRANCH)\r\n\r\n # Check if the branch is available from the remote.\r\n cmd = ['git', 'ls-remote', 'origin', '-h', 'refs/heads/{0}'.format(branch), ]\r\n try:\r\n output = cmd_log(cmd, rdir)\r\n except subprocess.CalledProcessError as ex:\r\n log.exception('Getting a list of remote branches failed: %r', ex.output)\r\n raise GitImportError(GitImportError.CANNOT_BRANCH)\r\n if not branch in output:\r\n raise GitImportError(GitImportError.REMOTE_BRANCH_MISSING)\r\n # Check it the remote branch has already been made locally\r\n cmd = ['git', 'branch', '-a', ]\r\n try:\r\n output = cmd_log(cmd, rdir)\r\n except subprocess.CalledProcessError as ex:\r\n log.exception('Getting a list of local branches failed: %r', ex.output)\r\n raise GitImportError(GitImportError.CANNOT_BRANCH)\r\n branches = []\r\n for line in output.split('\\n'):\r\n branches.append(line.replace('*', '').strip())\r\n\r\n if branch not in branches:\r\n # Checkout with -b since it is remote only\r\n cmd = ['git', 'checkout', '--force', '--track',\r\n '-b', branch, 'origin/{0}'.format(branch), ]\r\n try:\r\n cmd_log(cmd, rdir)\r\n except subprocess.CalledProcessError as ex:\r\n log.exception('Unable to checkout remote branch: %r', ex.output)\r\n raise GitImportError(GitImportError.CANNOT_BRANCH)\r\n # Go ahead and reset hard to the newest version of the branch now that we know\r\n # it is local.\r\n try:\r\n cmd_log(['git', 'reset', '--hard', 'origin/{0}'.format(branch), ], rdir)\r\n except subprocess.CalledProcessError as ex:\r\n log.exception('Unable to reset to branch: %r', ex.output)\r\n raise GitImportError(GitImportError.CANNOT_BRANCH)", "def ensure_sync_master_branch(self):\n # TODO(robertocn): Investigate what causes the states mentioned in the\n # docstring in the first place.\n self.api.m.git('update-ref', 'refs/heads/master',\n 'refs/remotes/origin/master')\n self.api.m.git('checkout', 'master', cwd=self.api.m.path['checkout'])", "def main(branch):\n try:\n # Ensure that we're in a git repository. This command is silent unless\n # you're not actually in a git repository, in which case, you receive a\n # \"Not a git repository\" error message.\n output = subprocess.check_output(['git', 'rev-parse']).decode('utf-8')\n sys.stdout.write(output)\n except subprocess.CalledProcessError:\n # Bail if we're not in a git repository.\n return\n\n # This behavior ensures a better user experience for those that aren't\n # intimately familiar with git.\n ensure_remote_branch_is_tracked(branch)\n\n # Switch to the specified branch and update it.\n subprocess.check_call(['git', 'checkout', '--quiet', branch])\n\n # Pulling is always safe here, because we never commit to this branch.\n subprocess.check_call(['git', 'pull', '--quiet'])\n\n # Checkout the top commit in the branch, effectively going \"untracked.\"\n subprocess.check_call(['git', 'checkout', '--quiet', '%s~0' % branch])\n\n # Clean up the repository of Python cruft. Because we've just switched\n # branches and compiled Python files should not be version controlled,\n # there are likely leftover compiled Python files sitting on disk which may\n # confuse some tools, such as sqlalchemy-migrate.\n subprocess.check_call(['find', '.', '-name', '\"*.pyc\"', '-delete'])\n\n # For the sake of user experience, give some familiar output.\n print('Your branch is up to date with branch \\'origin/%s\\'.' % branch)", "def test_cherrypick_conflict_deletion(repository: Repository, path: Path) -> None:\n updatefile(path, \"a\")\n\n main = repository.head\n branch = repository.heads.create(\"branch\")\n\n repository.checkout(branch)\n updatefile(path, \"b\")\n\n repository.checkout(main)\n removefile(path)\n\n with pytest.raises(MergeConflictError, match=path.name):\n repository.cherrypick(branch.commit)", "def checkout_latest():\n with cd(env.repo_path):\n run('git checkout %(branch)s;' % env)\n run('git pull origin %(branch)s' % env)", "def checkout_new_branch(self, branchname, current_path):\n p = Popen(\n [\"git\", \"checkout\", \"-b\", branchname],\n stdout=PIPE,\n stderr=PIPE,\n cwd=os.path.join(self.root_dir, current_path),\n )\n my_output, my_error = p.communicate()\n if p.returncode == 0:\n return {\"code\": p.returncode, \"message\": my_output.decode(\"utf-8\")}\n else:\n return {\n \"code\": p.returncode,\n \"command\": \"git checkout \" + \"-b\" + branchname,\n \"message\": my_error.decode(\"utf-8\"),\n }", "def git_branch(self, app, branch):\n if app == self.PROJECT_NAME:\n app_path = self.PROJECT_DIR\n else:\n raise ValueError('Unknown app')\n\n with lcd(app_path):\n self.local('git pull && git checkout %s' % branch)\n\n self.display('%s has been successfully switched to tag/branch %s.' % (app, branch), color='green')", "def ensure_remote_branch_is_tracked(branch):\n if branch == MASTER_BRANCH:\n # We don't need to explicitly track the master branch, so we're done.\n return\n\n # Ensure the specified branch is in the local branch list.\n output = subprocess.check_output(['git', 'branch', '--list'])\n for line in output.split('\\n'):\n if line.strip() == branch:\n # We are already tracking the remote branch\n break\n else:\n # We are not tracking the remote branch, so track it.\n try:\n sys.stdout.write(subprocess.check_output(\n ['git', 'checkout', '--track', 'origin/%s' % branch]))\n except subprocess.CalledProcessError:\n # Bail gracefully.\n raise SystemExit(1)", "def update_branch(branch, repo, options):\n update = None\n\n remote = repo.get_merge_branch(branch)\n if not remote:\n gbp.log.warn(\"No branch tracking '%s' found - skipping.\" % branch)\n return False\n\n can_fast_forward, up_to_date = repo.is_fast_forward(branch, remote)\n\n if up_to_date: # Great, we're done\n gbp.log.info(\"Branch '%s' is already up to date.\" % branch)\n return True\n\n if can_fast_forward:\n update = 'merge'\n else:\n if options.force == 'merge':\n gbp.log.info(\"Non-fast forwarding '%s' due to --force=merge\" % branch)\n update = 'merge'\n elif options.force == 'clean':\n gbp.log.info(\"Checking out clean copy of '%s' due to --force=clean\" % branch)\n update = 'clean'\n else:\n gbp.log.warn(\"Skipping non-fast forward of '%s' - use --force or \"\n \"update manually\" % branch)\n\n if update:\n gbp.log.info(\"Updating '%s'\" % branch)\n if repo.branch == branch:\n if update == 'merge':\n repo.merge(remote)\n elif update == 'clean':\n # Have to drop our current branch\n tmpbranch = \"_gbptmp-\"+branch\n gbp.log.debug(\"Checking out '%s' to '%s'\" % (remote, tmpbranch))\n repo.create_branch(tmpbranch, remote)\n gbp.log.debug(\"Switching current branch to '%s'\" % (tmpbranch))\n repo.set_branch(tmpbranch)\n gbp.log.debug(\"Dropping branch '%s'\" % branch)\n repo.delete_branch(branch)\n gbp.log.info(\"Renaming branch '%s' to '%s'\" % (tmpbranch, branch))\n repo.rename_branch(tmpbranch, branch)\n else:\n if can_fast_forward or (update == 'clean'):\n sha1 = repo.rev_parse(remote)\n repo.update_ref(\"refs/heads/%s\" % branch, sha1,\n msg=\"gbp: forward %s to %s\" % (branch, remote))\n elif update == 'merge':\n # Merge other branch, if it cannot be fast-forwarded\n current_branch=repo.branch\n repo.set_branch(branch)\n repo.merge(remote)\n repo.set_branch(current_branch)\n\n return (update != None)", "def install_branch(branch):\n\n # if it's already in the virtualenv, remove it\n ver = '.'.join(map(str,(sys.version_info.major,sys.version_info.minor)))\n sitepack = os.path.join(virtual_dir, 'lib','python'+ver, 'site-packages')\n if os.path.exists(sitepack):\n dir_list = os.listdir(sitepack)\n else:\n dir_list = []\n for f in dir_list:\n if 'statsmodels' in f:\n shutil.rmtree(os.path.join(sitepack, f))\n\n # checkout the branch\n os.chdir(gitdname)\n retcode = subprocess.call('git checkout ' + branch, shell=True)\n if retcode != 0:\n msg = \"\"\"Could not checkout out branch %s\"\"\" % branch\n raise Exception(msg)\n\n # build and install\n retcode = subprocess.call(\" \".join([virtual_python, 'setup.py', 'build']),\n shell=True)\n if retcode != 0:\n msg = \"\"\" Could not build branch %s\"\"\" % branch\n raise Exception(msg)\n retcode = subprocess.call(\" \".join([virtual_python, os.path.join(gitdname,\n 'setup.py'), 'install']), shell=True)\n if retcode != 0:\n os.chdir(dname)\n msg = \"\"\"Could not install branch %s\"\"\" % branch\n raise Exception(msg)\n os.chdir(dname)", "def checkout_branch(self, branchname, current_path):\n p = Popen(\n [\"git\", \"checkout\", branchname],\n stdout=PIPE,\n stderr=PIPE,\n cwd=os.path.join(self.root_dir, current_path),\n )\n my_output, my_error = p.communicate()\n if p.returncode == 0:\n return {\"code\": p.returncode, \"message\": my_output.decode(\"utf-8\")}\n else:\n return {\n \"code\": p.returncode,\n \"command\": \"git checkout \" + branchname,\n \"message\": my_error.decode(\"utf-8\"),\n }", "def reset_branch(ctx, name, sha, hard):\n\n try:\n\n gh = ctx.obj.github\n\n log.echo(\"Updating {} branch...\".format(name), break_line=False)\n gh.reset_branch(name=name, sha=sha, hard=hard)\n log.echo('Branch {} is now at {} '.format(name, sha), break_line=False)\n log.checkmark()\n except BaseException as _:\n log.xmark()\n raise", "def verify_up_to_date(path, branch=\"master\"):\n\n sys.stdout.write(\" - Verifying your branch up to date:\")\n run_in_component(path, ['git', 'remote', 'update'])\n\n result = run_in_component(path, ['git', 'rev-list', 'HEAD...origin/%s' % branch, '--count'])\n count = int(result.strip())\n\n if count == 0:\n print(\" OKAY\")\n return\n\n print(\" FAILED\")\n\n raise GenericError(\"You branch is not up-to-date with remote branch: %d different commits\" % count)", "def branch(branch_name):\n env.branch = branch_name", "def branch(branch_name):\n env.branch = branch_name", "def _check_branch(opt, params):\n\n # Check the current branch and hash\n _get_branch(opt)\n\n if params.git_branch != opt.git_branch or params.git_hash != opt.git_hash:\n msg = 'You are not on the right branch or commit. Please run the following in the repository: \\n'\n msg += f'git checkout {params.git_branch}\\n'\n msg += f'git revert {params.git_hash}'\n sys.exit(msg)", "def ensure_tracking_branches(args):\n man = load_manifest()\n for (name, project) in man.projects.iteritems():\n repo = GitRepo(workdir_for_project(project))\n branch_missing = repo.command(\n [\"rev-parse\", \"--verify\", \"-q\", project.refspec],\n capture_stdout=True)\n \n if branch_missing:\n logging.warn(\"Branch %s does not exist in project %s. checking out.\" %\n (project.refspec, name))\n repo.command([\"branch\", \"--track\",\n project.tracking_branch, project.remote_refspec])", "def merge(self, branch):\n\n if branch.username != self.username or branch.reponame != self.reponame:\n raise BranchError(\"Branch to merge must be in the same repository\")\n\n context = {\n \"username\": self.username,\n \"reponame\": self.reponame,\n \"name\": self.name\n }\n LOG.debug(\"Merging from %r to %r\" % (branch, self))\n self._client.postjson(path=\"/users/%(username)s/repos/%(reponame)s/\"\n \"branches/%(name)s/merge\" % context,\n payload={\"from_branch\": branch.name})", "def _set_tracking_branch_commit(self, branch, remote, depth):\n\n branch_output = fmt.ref_string(branch)\n origin = self._remote(remote)\n return_code = self.fetch(remote, depth=depth, ref=branch)\n if return_code != 0:\n raise ClowderGitError(msg=colored(' - Failed to fech', 'red'))\n if not self.existing_local_branch(branch):\n message = colored(' - No local branch ', 'red') + branch_output + '\\n'\n self._print(message)\n self._exit(message)\n if not self.existing_remote_branch(branch, remote):\n message = colored(' - No remote branch ', 'red') + branch_output + '\\n'\n self._print(message)\n self._exit(message)\n local_branch = self.repo.heads[branch]\n remote_branch = origin.refs[branch]\n if local_branch.commit != remote_branch.commit:\n message_1 = colored(' - Existing remote branch ', 'red')\n message_2 = colored(' on different commit', 'red')\n message = message_1 + branch_output + message_2 + '\\n'\n self._print(message)\n self._exit(message_1)\n return_code = self._set_tracking_branch(remote, branch)\n if return_code != 0:\n self._exit(colored(' - Failed to set tracking branch', 'red'))", "def __gitBranch(self):\n pfile = self.project.getProjectFile()\n lastModified = QFileInfo(pfile).lastModified().toString()\n shouldReopen = (\n self.vcs.gitBranch(self.project.getProjectPath())[1] or\n QFileInfo(pfile).lastModified().toString() != lastModified\n )\n if shouldReopen:\n res = E5MessageBox.yesNo(\n self.parent(),\n self.tr(\"Branch\"),\n self.tr(\"\"\"The project should be reread. Do this now?\"\"\"),\n yesDefault=True)\n if res:\n self.project.reopenProject()" ]
[ "0.65946996", "0.6574886", "0.6410081", "0.6246568", "0.6213896", "0.6204029", "0.61986405", "0.6193927", "0.6152797", "0.61391264", "0.6123888", "0.61154884", "0.6081411", "0.6080463", "0.60580766", "0.60198873", "0.6007687", "0.5984934", "0.5980404", "0.5944135", "0.59402764", "0.5909256", "0.5903495", "0.5899725", "0.5899725", "0.58995396", "0.5883408", "0.5856517", "0.5855238", "0.5833517" ]
0.68562466
0
DEPRICATE My standard build script names. Calls mingw_build.bat on windows and unix_build.sh on unix
def std_build_command(repo='.'): import utool as ut print('+**** stdbuild *******') print('repo = %r' % (repo,)) if sys.platform.startswith('win32'): # vtool --rebuild-sver didnt work with this line #scriptname = './mingw_build.bat' scriptname = 'mingw_build.bat' else: scriptname = './unix_build.sh' if repo == '': # default to cwd repo = '.' else: os.chdir(repo) ut.assert_exists(scriptname) normbuild_flag = '--no-rmbuild' if ut.get_argflag(normbuild_flag): scriptname += ' ' + normbuild_flag # Execute build ut.cmd(scriptname) #os.system(scriptname) print('L**** stdbuild *******')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build():\n local('wintersmith build')", "def compile_build_files(ctx):\n project_dir = Path(__file__).parent\n\n build_dir = project_dir / \"build\"\n ninja_dir = project_dir / \"build/ninja\"\n artifacts_dir = project_dir / \"build/artifacts\"\n\n if artifacts_dir.exists():\n shutil.rmtree(artifacts_dir)\n if ninja_dir.exists():\n shutil.rmtree(ninja_dir)\n\n os.makedirs(artifacts_dir)\n os.makedirs(ninja_dir)\n\n call_cmake = (\n f\"cmake \"\n f\"-DCMAKE_BUILD_TYPE=Release \"\n f'-G Ninja \"{build_dir}\" '\n f\"-DPYTHON_EXECUTABLE={sys.executable} \"\n )\n call_ninja = \"ninja -j 8\"\n call_install = \"ninja install\"\n\n with ctx.cd(str(project_dir / \"build/ninja\")):\n if sys.platform == \"win32\":\n paths = (\n os.path.expandvars(\n r\"${PROGRAMFILES(X86)}\\Microsoft Visual Studio\\2017\\Community\\VC\\Auxiliary\\Build\\vcvarsall.bat\"\n ),\n os.path.expandvars(\n r\"${PROGRAMFILES(X86)}\\Microsoft Visual Studio\\2017\\BuildTools\\VC\\Auxiliary\\Build\\vcvarsall.bat\"\n ),\n os.path.expandvars(\n r\"${PROGRAMFILES(X86)}\\Microsoft Visual Studio\\2017\\Professional\\VC\\Auxiliary\\Build\\vcvarsall.bat\"\n ),\n os.path.expandvars(\n r\"${PROGRAMFILES(X86)}\\Microsoft Visual Studio\\2017\\WDExpress\\VC\\Auxiliary\\Build\\vcvarsall.bat\"\n ),\n # Path for vcvars on GithubAction\n r\"C:\\Program Files (x86)\\Microsoft Visual Studio\\2019\\Enterprise\\VC\\Auxiliary\\Build\\vcvars64.bat\",\n )\n for msvc_path in paths:\n if os.path.isfile(msvc_path):\n break\n else:\n raise RuntimeError(\n \"Couldn't find MSVC compiler in any of:\\n{}\".format(\"- \" + \"\\n- \".join(paths))\n )\n\n call_cmd = f'call \"{msvc_path}\" amd64'\n ctx.run(command=call_cmd + \"&\" + call_cmake + \"&&\" + call_ninja + \"&&\" + call_install)\n\n else:\n ctx.run(command=call_cmake + \"&&\" + call_ninja + \"&&\" + call_install)", "def build():\n local('python' + python_version + ' setup.py bdist_egg')", "def setup_quiet_build(env):\r\n # colors\r\n c = dict()\r\n c['cyan'] = '\\033[96m'\r\n c['purple'] = '\\033[95m'\r\n c['blue'] = '\\033[94m'\r\n c['bold_blue'] = '\\033[94;1m'\r\n c['green'] = '\\033[92m'\r\n c['yellow'] = '\\033[93m'\r\n c['red'] = '\\033[91m'\r\n c['magenta']= '\\033[35m'\r\n c['bold_magenta']= '\\033[35;1m'\r\n c['inverse']= '\\033[7m'\r\n c['bold'] = '\\033[1m'\r\n c['rst'] = '\\033[0m'\r\n\r\n # if the output is not a terminal, remove the c\r\n # also windows console doesn't know about ansi c seems\r\n if not sys.stdout.isatty() or re.match('^win.*', plat_id()):\r\n for key, value in c.iteritems():\r\n c[key] = ''\r\n\r\n compile_cxx_msg = '%s[CXX]%s %s$SOURCE%s' % \\\r\n (c['blue'], c['rst'], c['yellow'], c['rst'])\r\n\r\n compile_c_msg = '%s[CC]%s %s$SOURCE%s' % \\\r\n (c['cyan'], c['rst'], c['yellow'], c['rst'])\r\n\r\n compile_shared_msg = '%s[SHR]%s %s$SOURCE%s' % \\\r\n (c['bold_blue'], c['rst'], c['yellow'], c['rst'])\r\n\r\n link_program_msg = '%s[LNK exe]%s %s$TARGET%s' % \\\r\n (c['bold_magenta'], c['rst'], c['bold'] + c['yellow'] + c['inverse'], c['rst'])\r\n\r\n link_lib_msg = '%s[LIB st]%s %s$TARGET%s' % \\\r\n ('', c['rst'], c['cyan'], c['rst'])\r\n\r\n ranlib_library_msg = '%s[RANLIB]%s %s$TARGET%s' % \\\r\n ('', c['rst'], c['cyan'], c['rst'])\r\n\r\n link_shared_library_msg = '%s[LNK shr]%s %s$TARGET%s' % \\\r\n (c['bold_magenta'], c['rst'], c['bold'], c['rst'])\r\n\r\n env['CXXCOMSTR'] = compile_cxx_msg\r\n env['SHCXXCOMSTR'] = compile_shared_msg\r\n env['CCCOMSTR'] = compile_c_msg\r\n env['SHCCCOMSTR'] = compile_shared_msg\r\n env['ARCOMSTR'] = link_lib_msg\r\n env['SHLINKCOMSTR'] = link_shared_library_msg\r\n env['LINKCOMSTR'] = link_program_msg\r\n env['RANLIBCOMSTR']= ranlib_library_msg", "def script_build(repo_root, spec, build_args, verbose):\n env = os.environ\n env.update(build_args)\n for s in spec.build_script():\n if verbose:\n click.echo(str(s))\n ret = subprocess.call(str(s), shell=True, cwd=repo_root, env=env)\n if ret:\n raise click.ClickException(\"{} exited with code {}\".format(str(s), ret))", "def task_build(argv):\n pytaskmaster.generator(\"setup.py.in\", \"setup.py\", config)\n pytaskmaster.generator(\"pytaskmaster/version.py.in\", \"pytaskmaster/version.py\", config)\n shell(\"python setup.py bdist_wheel\")\n if \"--sign\" in argv:\n for file in os.listdir(\"dist\"):\n asc_file = \"dist/\" + file + \".asc\"\n if file.endswith(\".whl\") and not os.path.isfile(asc_file):\n shell(\"gpg --detach-sign -a dist/{}\".format(file))", "def common_configure(conf):\n \n conf.env['MSVC_VERSIONS'] = ['msvc 9.0', 'msvc 8.0']\n conf.env['MSVC_TARGETS'] = ['x86']\n \n if sys.platform.startswith('cygwin'):\n print \"ERROR: You must use the Win32 Python from python.org, not Cygwin Python, when building on Windows.\"\n sys.exit(1)\n \n if sys.platform.startswith('darwin') and build_port == 'wx':\n import platform\n if platform.release().startswith('10'): # Snow Leopard\n # wx currently only supports 32-bit compilation, so we want gcc-4.0 instead of 4.2 on Snow Leopard\n # unless the user has explicitly set a different compiler.\n if not \"CC\" in os.environ:\n conf.env['CC'] = 'gcc-4.0'\n if not \"CXX\" in os.environ:\n conf.env['CXX'] = 'g++-4.0'\n conf.check_tool('compiler_cxx')\n conf.check_tool('compiler_cc')\n if Options.options.wxpython:\n conf.check_tool('python')\n conf.check_python_headers()\n \n if sys.platform.startswith('darwin'):\n conf.check_tool('osx')\n \n global msvc_version\n global msvclibs_dir\n \n if building_on_win32:\n found_versions = conf.get_msvc_versions()\n if found_versions[0][0] == 'msvc 9.0':\n msvc_version = 'msvc2008'\n elif found_versions[0][0] == 'msvc 8.0':\n msvc_version = 'msvc2005'\n \n msvclibs_dir = os.path.join(wklibs_dir, msvc_version, 'win')\n conf.env.append_value('CXXFLAGS', ['/wd4291','/wd4344','/wd4396','/wd4800'])\n \n for use in port_uses[build_port]:\n conf.env.append_value('CXXDEFINES', ['WTF_USE_%s' % use])\n\n if build_port == \"wx\":\n update_wx_deps(conf, wk_root, msvc_version)\n \n conf.env.append_value('CXXDEFINES', ['BUILDING_WX__=1'])\n\n if building_on_win32:\n conf.env.append_value('LIBPATH', os.path.join(msvclibs_dir, 'lib'))\n # wx settings\n global config\n is_debug = (config == 'Debug')\n wxdefines, wxincludes, wxlibs, wxlibpaths = get_wxmsw_settings(wx_root, shared=True, unicode=True, debug=is_debug, wxPython=Options.options.wxpython)\n conf.env['CXXDEFINES_WX'] = wxdefines\n conf.env['CPPPATH_WX'] = wxincludes\n conf.env['LIB_WX'] = wxlibs\n conf.env['LIBPATH_WX'] = wxlibpaths\n\n if sys.platform.startswith('darwin'):\n conf.env['LIB_ICU'] = ['icucore']\n # Apple does not ship the ICU headers with Mac OS X, so WebKit includes a copy of 3.2 headers\n conf.env['CPPPATH_ICU'] = [os.path.join(jscore_dir, 'icu'), os.path.join(webcore_dir, 'icu')]\n \n conf.env.append_value('CPPPATH', wklibs_dir)\n conf.env.append_value('LIBPATH', wklibs_dir)\n \n min_version = None\n \n mac_target = 'MACOSX_DEPLOYMENT_TARGET'\n if Options.options.macosx_version != '':\n min_version = Options.options.macosx_version\n \n # WebKit only supports 10.4+, but ppc systems often set this to earlier systems\n if not min_version:\n min_version = commands.getoutput('sw_vers -productVersion')[:4]\n if min_version in ['10.1','10.2','10.3']:\n min_version = '10.4'\n\n os.environ[mac_target] = conf.env[mac_target] = min_version \n\n sdk_version = min_version\n if min_version == \"10.4\":\n sdk_version += \"u\"\n \n sdkroot = '/Developer/SDKs/MacOSX%s.sdk' % sdk_version\n sdkflags = ['-arch', 'i386', '-isysroot', sdkroot]\n \n conf.env.append_value('CPPFLAGS', sdkflags)\n conf.env.append_value('LINKFLAGS', sdkflags)\n \n conf.env.append_value('CPPPATH_SQLITE3', [os.path.join(wklibs_dir, 'WebCoreSQLite3')])\n conf.env.append_value('LIB_SQLITE3', ['WebCoreSQLite3'])\n \n libprefix = ''\n if building_on_win32:\n libprefix = 'lib'\n \n conf.env['LIB_JSCORE'] = [libprefix + 'jscore']\n conf.env['LIB_WEBCORE'] = [libprefix + 'webcore']\n conf.env['LIB_WXWEBKIT'] = ['wxwebkit']\n conf.env['CXXDEFINES_WXWEBKIT'] = ['WXUSINGDLL_WEBKIT']\n \n conf.env.append_value('CXXDEFINES', feature_defines)\n if config == 'Release':\n conf.env.append_value('CPPDEFINES', 'NDEBUG')\n \n if building_on_win32:\n conf.env.append_value('CPPPATH', [\n os.path.join(jscore_dir, 'os-win32'),\n os.path.join(msvclibs_dir, 'include'),\n os.path.join(msvclibs_dir, 'include', 'pthreads'),\n os.path.join(msvclibs_dir, 'lib'),\n ])\n \n conf.env.append_value('LIB', ['libpng', 'libjpeg', 'pthreadVC2'])\n # common win libs\n conf.env.append_value('LIB', [\n 'kernel32', 'user32','gdi32','comdlg32','winspool','winmm',\n 'shell32', 'comctl32', 'ole32', 'oleaut32', 'uuid', 'advapi32', \n 'wsock32', 'gdiplus', 'version'])\n\n conf.env['LIB_ICU'] = ['icudt', 'icule', 'iculx', 'icuuc', 'icuin', 'icuio', 'icutu']\n \n #curl\n conf.env['LIB_CURL'] = ['libcurl']\n \n #sqlite3\n conf.env['CPPPATH_SQLITE3'] = [os.path.join(msvclibs_dir, 'include', 'SQLite')]\n conf.env['LIB_SQLITE3'] = ['sqlite3']\n \n #libxml2\n conf.env['LIB_XML'] = ['libxml2']\n \n #libxslt\n conf.env['LIB_XSLT'] = ['libxslt']\n else: \n if build_port == 'wx':\n conf.env.append_value('LIB', ['jpeg', 'png', 'pthread'])\n conf.env.append_value('LIBPATH', os.path.join(wklibs_dir, 'unix', 'lib'))\n conf.env.append_value('CPPPATH', os.path.join(wklibs_dir, 'unix', 'include'))\n conf.env.append_value('CXXFLAGS', ['-fPIC', '-DPIC'])\n \n conf.check_cfg(path=get_path_to_wxconfig(), args='--cxxflags --libs', package='', uselib_store='WX', mandatory=True)\n \n conf.check_cfg(msg='Checking for libxslt', path='xslt-config', args='--cflags --libs', package='', uselib_store='XSLT', mandatory=True)\n conf.check_cfg(path='xml2-config', args='--cflags --libs', package='', uselib_store='XML', mandatory=True)\n conf.check_cfg(path='curl-config', args='--cflags --libs', package='', uselib_store='CURL', mandatory=True)\n \n if not sys.platform.startswith('darwin'):\n conf.check_cfg(package='cairo', args='--cflags --libs', uselib_store='WX', mandatory=True)\n conf.check_cfg(package='pango', args='--cflags --libs', uselib_store='WX', mandatory=True)\n conf.check_cfg(package='gtk+-2.0', args='--cflags --libs', uselib_store='WX', mandatory=True)\n conf.check_cfg(package='sqlite3', args='--cflags --libs', uselib_store='SQLITE3', mandatory=True)\n conf.check_cfg(path='icu-config', args='--cflags --ldflags', package='', uselib_store='ICU', mandatory=True)", "def build_step(self):\n run_cmd('./compile.sh', log_all=True, simple=True, log_ok=True)", "def test_platform():\n build()\n sh(\"%s psutil\\\\tests\\\\test_windows.py\" % PYTHON)", "def build():\n os.makedirs(DIST_DIR, exist_ok=True)\n\n if \"WHEEL\" in os.environ:\n whl = build_wheel()\n else:\n click.echo(\"Not building wheels.\")\n\n if \"WHEEL\" in os.environ and \"DOCKER\" in os.environ:\n # Docker image requires wheels\n build_docker_image(whl)\n else:\n click.echo(\"Not building Docker image.\")\n\n if \"PYINSTALLER\" in os.environ:\n build_pyinstaller()\n else:\n click.echo(\"Not building PyInstaller packages.\")", "def _generateWindowsBuildArgs(\n self, logger, basetagOverride=None, isolationOverride=None\n ):\n\n # Determine the appropriate container image base tag for the host system release unless the user specified a base tag\n buildArgs = []\n hostBaseTag = WindowsUtils.getHostBaseTag()\n baseTag = basetagOverride if basetagOverride is not None else hostBaseTag\n\n if baseTag is None:\n raise RuntimeError(\n \"unable to determine Windows Server Core base image tag from host system. Specify it explicitly using -basetag command-line flag\"\n )\n\n buildArgs = [\"--build-arg\", \"BASETAG={}\".format(baseTag)]\n\n # Use the default isolation mode unless requested otherwise\n dockerInfo = DockerUtils.info()\n isolation = (\n isolationOverride\n if isolationOverride is not None\n else dockerInfo[\"Isolation\"]\n )\n buildArgs += [\"--isolation={}\".format(isolation)]\n\n # If the user specified process isolation mode and a different base tag to the host system then warn them\n prefix = self.getPrefix()\n if isolation == \"process\" and baseTag != hostBaseTag:\n logger.info(\n \"[{}] Warning: attempting to use different Windows container/host versions\".format(\n prefix\n ),\n False,\n )\n logger.info(\n \"[{}] when running in process isolation mode, this will usually break!\".format(\n prefix\n ),\n False,\n )\n\n # Set a sensible memory limit when using Hyper-V isolation mode\n if isolation == \"hyperv\":\n buildArgs += [\"-m\", \"4GiB\"]\n\n return buildArgs", "def build_msms():\r\n\r\n # Prepare include file with dynamic data\r\n f = open(os.path.join(GME_ROOT, \"Install\", \"GME_dyn.wxi\"), 'w')\r\n print >> f, \"<!-- DO NOT EDIT THIS FILE. WILL BE REGENERATED BY THE BUILD SCRIPTS -->\"\r\n print >> f, \"<Include>\"\r\n print >> f, \" <?define GUIDSTRMETAGME='%s' ?>\" % (tools.query_GUID(mta_for_xmp(METAGME_XMP)))\r\n print >> f, \" <?define GUIDSTRHFSM='%s' ?>\" % (tools.query_GUID(mta_for_xmp(HFSM_XMP)))\r\n print >> f, \" <?define GUIDSTRSF='%s' ?>\" % (tools.query_GUID(mta_for_xmp(SF_XMP)))\r\n print >> f, \" <?define GUIDSTRUML='%s' ?>\" % (tools.query_GUID(mta_for_xmp(UML_XMP)))\r\n print >> f, \"</Include>\"\r\n f.close()\r\n\r\n import glob\r\n sources = [f for f in glob.glob(os.path.join(GME_ROOT, \"Install\", \"*.wxs\")) if os.path.basename(f) not in ('GME.wxs', 'GME_bundle.wxs')]\r\n if prefs['arch'] == 'x64':\r\n sources.remove(os.path.join(GME_ROOT, \"Install\", \"GME_paradigms.wxs\"))\r\n for file_ in sources:\r\n extras = []\r\n if os.path.basename(file_) == 'GME_paradigms.wxs':\r\n extras = glob.glob(os.path.join(GME_ROOT, \"Install\", \"PIA*/*.wxi\"))\r\n tools.build_WiX([file_] + extras)", "def SlaveBuildName(chrome_dir):\n return os.path.basename(SlaveBaseDir(chrome_dir))", "def wsBuildExe(self, nj=0):\n\n txt = '\\n#Written by cms_cmssw::wsBuildExe\\n'\n txt += 'echo \">>> moving CMSSW software directories in `pwd`\" \\n'\n \n txt += 'rm -rf lib/ module/ \\n'\n txt += 'mv $RUNTIME_AREA/lib/ . \\n'\n txt += 'mv $RUNTIME_AREA/module/ . \\n'\n if self.dataExist == True:\n txt += 'rm -rf src/ \\n'\n txt += 'mv $RUNTIME_AREA/src/ . \\n'\n if len(self.additional_inbox_files)>0:\n #files used by Watchdog must not be moved\n watchdogFiles=['rssLimit','vszLimit','diskLimit','cpuLimit','wallLimit']\n for file in self.additional_inbox_files:\n if file in watchdogFiles :\n pass\n else:\n txt += 'mv $RUNTIME_AREA/'+os.path.basename(file)+' . \\n'\n\n txt += 'echo \">>> Include $RUNTIME_AREA in PYTHONPATH:\"\\n'\n txt += 'if [ -z \"$PYTHONPATH\" ]; then\\n'\n txt += ' export PYTHONPATH=$RUNTIME_AREA/\\n'\n txt += 'else\\n'\n txt += ' export PYTHONPATH=$RUNTIME_AREA/:${PYTHONPATH}\\n'\n txt += 'echo \"PYTHONPATH=$PYTHONPATH\"\\n'\n txt += 'fi\\n'\n txt += '\\n'\n\n if self.pset != None:\n psetName = 'pset.py'\n\n txt += '\\n'\n if self.debug_wrapper == 1:\n txt += 'echo \"***** cat ' + psetName + ' *********\"\\n'\n txt += 'cat ' + psetName + '\\n'\n txt += 'echo \"****** end ' + psetName + ' ********\"\\n'\n txt += '\\n'\n txt += 'echo \"***********************\" \\n'\n txt += 'which edmConfigHash \\n'\n txt += 'echo \"***********************\" \\n'\n txt += 'edmConfigHash ' + psetName + ' \\n'\n txt += 'PSETHASH=`edmConfigHash ' + psetName + '` \\n'\n txt += 'echo \"PSETHASH = $PSETHASH\" \\n'\n #### temporary fix for noEdm files #####\n txt += 'if [ -z \"$PSETHASH\" ]; then \\n'\n txt += ' export PSETHASH=null\\n'\n txt += 'fi \\n'\n #############################################\n txt += '\\n'\n return txt", "def build_linux(self, **kargs):\n self.linux_files = [\"%s/%s/wombat/vmlinux\" % (self.builddir, self.name)]\n LIB_DEPENDS = [self.libs[\"mutex\"][1]]\n LIB_DEPENDS += [self.libs[\"iguana\"][1]]\n LIB_DEPENDS += [self.libs[\"l4\"][1]]\n LIB_DEPENDS += [self.libs[\"timer\"][1]]\n LIB_DEPENDS += [self.libs[\"l4e\"][1]]\n LIB_DEPENDS += [self.libs[\"c\"][1]]\n LIB_DEPENDS += [self.libs[\"circular_buffer\"][1]]\n LIB_DEPENDS += [self.libs[\"ll\"][1]]\n LIB_DEPENDS += [self.libs[\"range_fl\"][1]]\n LIB_DEPENDS += [self.libs[\"naming\"][1]]\n\n if \"pxa\" in self.machine.drivers:\n LIB_DEPENDS += [self.libs[\"pxa\"][1]]\n \n l4linux = self.Command(self.linux_files, LIB_DEPENDS, buildlinux)\n l4linux = Flatten([l4linux])[0]\n Precious(self.linux_files)\n\n\twombat_cflags = \"-DENDIAN_%s \" % self.machine.endian.upper()\n\n\tif machine.pidreloc == True:\n wombat_cflags += \" -DARM_PID_RELOC \"\n\n if restrict_vm == True:\n wombat_cflags += \" -DCONFIG_RESTRICTED_VM=1 \"\n\n\tif (hasattr(machine, \"c_flags\")):\n\t wombat_cflags += ' '.join(machine.c_flags)\n\n # This is horrible :(\n mutex_include = os.getcwd() + os.sep + self.libs[\"mutex\"][0][0][1:]\n ig_include = os.getcwd() + os.sep + self.libs[\"iguana\"][0][0][1:]\n ig_idl4_include = self.libs[\"iguana\"][0][-1]\n l4_include = os.getcwd() + os.sep + self.libs[\"l4\"][0][0][1:]\n timer_include = os.getcwd() + os.sep + self.libs[\"timer\"][0][0][1:]\n cb_include = os.getcwd() + os.sep + self.libs[\"circular_buffer\"][0][0][1:]\n idl4_include = os.getcwd() + os.sep + self.libs[\"idl4\"][0][1:] + os.sep\n naming_include = os.getcwd() + os.sep + self.libs[\"naming\"][0][0][1:] + os.sep\n \n mutex_lib = os.getcwd() + os.sep + self.libs[\"mutex\"][2][1:] + os.sep\n ig_lib = os.getcwd() + os.sep + self.libs[\"iguana\"][2][1:] + os.sep\n l4_lib = os.getcwd() + os.sep + self.libs[\"l4\"][2][1:] + os.sep\n timer_lib = os.getcwd() + os.sep + self.libs[\"timer\"][2][1:] + os.sep\n l4e_lib = os.getcwd() + os.sep + self.libs[\"l4e\"][2][1:] + os.sep\n c_lib = os.getcwd() + os.sep + self.libs[\"c\"][2][1:] + os.sep\n cb_lib = os.getcwd() + os.sep + self.libs[\"circular_buffer\"][2][1:] + os.sep\n ll_lib = os.getcwd() + os.sep + self.libs[\"ll\"][2][1:] + os.sep\n rfl_lib = os.getcwd() + os.sep + self.libs[\"range_fl\"][2][1:] + os.sep\n naming_lib = os.getcwd() + os.sep + self.libs[\"naming\"][2][1:] + os.sep\n\n LIB_ARGS = \"\"\n LIB_ARGS += \" LIBL4_INCLUDE=%s\" % l4_include\n LIB_ARGS += \" LIBTIMER_INCLUDE=%s\" % timer_include\n LIB_ARGS += \" LIBCB_INCLUDE=%s\" % cb_include\n LIB_ARGS += \" IGUANA_INCLUDE=%s\" % ig_include\n LIB_ARGS += \" IGUANA_IDL_INCLUDE=%s\" % ig_idl4_include\n LIB_ARGS += \" IDL4_INCLUDE=%s\" % idl4_include\n LIB_ARGS += \" NAMING_INCLUDE=%s\" % naming_include\n LIB_ARGS += \" MUTEX_INCLUDE=%s\" % mutex_include\n if \"pxa\" in self.machine.drivers:\n pxa_include = os.getcwd() + os.sep + self.libs[\"pxa\"][0][0][1:] + os.sep\n LIB_ARGS += \" LIBPXA_INCLUDE=%s\" % pxa_include\n\n LIB_ARGS += \" LIBCDIR=%s\" % c_lib\n LIB_ARGS += \" LIBIGUANADIR=%s\" % ig_lib\n LIB_ARGS += \" LIBL4DIR=%s\" % l4_lib\n LIB_ARGS += \" LIBTIMERDIR=%s\" % timer_lib\n LIB_ARGS += \" LIBL4EDIR=%s\" % l4e_lib\n LIB_ARGS += \" LIBCBDIR=%s\" % cb_lib\n LIB_ARGS += \" LIBLLDIR=%s\" % ll_lib\n LIB_ARGS += \" LIBRANGE_FLDIR=%s\" % rfl_lib\n LIB_ARGS += \" LIBNAMINGDIR=%s\" % naming_lib\n LIB_ARGS += \" LIBMUTEXDIR=%s\" % mutex_lib\n if \"pxa\" in self.machine.drivers:\n pxa_lib = os.getcwd() + os.sep + self.libs[\"pxa\"][2][1:] + os.sep\n LIB_ARGS += \" LIBPXADIR=%s\" % pxa_lib\n\n l4linux.linux_build_cmd = \"make -C wombat O=%s/%s/wombat WOMBAT_CFLAGS=\\'%s\\' V=0 %s \" \\\n \"CROSS_COMPILE=%s \" % \\\n (self.builddir, self.name, wombat_cflags, LIB_ARGS, self.toolchain)\n\n if cleaning and os.path.exists(\"%s/%s/wombat\" % (self.builddir, self.name)):\n shutil.rmtree(\"%s/%s/wombat\" % (self.builddir, self.name))\n\n # As for pistachio we don't track the L4Linux dependencies so the\n # use needs to explicitly specify scons build_linux= to get L4Linux\n # rebuilt\n add_arg(\"build_linux\", \"Set this option if you want to rebuild Wombat on this build\", 0)\n if build_linux != 0:\n AlwaysBuild(l4linux)\n\n\tenv['EXPECT_TEST_DATA'] = [(\"Iguana init starting\", None),\n (\"Loading linux\", None),\n (\"Memory: \\d+k/\\d+k available\", None),\n (\"Please press Enter to activate this console.\", None)]\n\n return l4linux", "def built_file_basename(self, name, type=None, **kw):\n if not kw.get('bare'):\n if type == self.EXECUTABLE:\n name = name + self._exe\n elif type == self.STATIC_LIB:\n name = self.lib_ + name + self._lib\n elif type == self.SHARED_LIB:\n name = self.dll_ + name + self._dll\n return name", "def GetBuildFormat(self):\n # The comma means that ninja and qtcreator_ninja will be chained and use the\n # same input information so that .gyp files will only have to be parsed\n # once.\n return 'ninja,qtcreator_ninja'", "def genCMakeCmd(self):\n \n cmd = \"cmake -C \" + self.parent.env[\"ILCSOFT_CMAKE\"] + \" \"\n for k, v in self.envcmake.iteritems():\n cmd = cmd + \"-D\" + k + \"=\\\"\" + str(v).strip() + \"\\\" \"\n\n cmd += self.installPath\n\n return cmd.strip()", "def _get_build_os_name():\n system = platform.system()\n if 'Darwin' in system or 'Macintosh' in system:\n return 'darwin-x86'\n\n # TODO: Add more values if needed.\n return 'linux-x86'", "def buildname(self, env_prop=None):\n if self._buildname is not None:\n return self._buildname\n try:\n platform, build = env_prop['chipName'], env_prop['switchppVersion']\n except (KeyError, TypeError):\n message = 'Cannot determine build name'\n self.class_logger.warning(message)\n self._buildname = self.UNDEFINED_BUILD\n else:\n self.platform = platform\n self.build = build\n name_iter = (MODULES[_var].ReportingServerConfig._get_build_name(self._opts) for _var in # pylint: disable=protected-access\n MODULES if 'reports_conf.' in _var)\n with suppress(StopIteration): # retain build name from env_prop\n build = next(name for name in name_iter if name is not None)\n self._buildname = '{0}-{1}'.format(build, platform)\n\n # WORKAROUND to add 'sanity' suffix to buildname\n if 'sanity' in self._opts.markexpr and self._buildname is not None:\n self._buildname += \"-sanity\"\n # WORKAROUND END\n return self._buildname", "def _generate_build_name(build_slug, ref, prefix=\"\", limit=63, ref_length=6):\n # escape parts that came from providers (build slug, ref)\n # build names are case-insensitive `.lower()` is called at the end\n build_slug = _safe_build_slug(\n build_slug, limit=limit - len(prefix) - ref_length - 1\n )\n ref = _safe_build_slug(ref, limit=ref_length, hash_length=2)\n\n return \"{prefix}{safe_slug}-{ref}\".format(\n prefix=prefix,\n safe_slug=build_slug,\n ref=ref[:ref_length],\n ).lower()", "def _add_scripts(prefix):\n mapping = {\"MAST_HOME\": prefix}\n if \"Windows\" in platform.system():\n script_dir = os.path.join(INSTALL_DIR, \"files\", \"windows\")\n files = [\n \"mast.bat\",\n \"mast-system.bat\",\n \"mast-accounts.bat\",\n \"mast-backups.bat\",\n \"mast-crypto.bat\",\n \"mast-deployment.bat\",\n \"mast-developer.bat\",\n \"mast-network.bat\",\n \"test-mast.bat\",\n \"mast-version.bat\",\n \"mast-web.bat\",\n \"mastd.bat\",\n \"mast-ssh.bat\",\n \"set-env.bat\",\n ]\n elif \"Linux\" in platform.system():\n script_dir = os.path.join(INSTALL_DIR, \"files\", \"linux\")\n files = [\n \"mast\",\n \"mast-system\",\n \"mast-accounts\",\n \"mast-backups\",\n \"mast-crypto\",\n \"mast-deployment\",\n \"mast-developer\",\n \"mast-network\",\n \"test-mast\",\n \"mast-version\",\n \"mast-web\",\n \"mast-ssh\",\n \"mastd\",\n \"set-env\",\n ]\n\n for f in files:\n dst = os.path.join(prefix, f)\n src = os.path.join(script_dir, f)\n print(\"{} -> {}\".format(src, dst))\n content = render_template_file(src, mapping)\n write_file(dst, content)\n if \"Linux\" in platform.system():\n os.chmod(dst, 0o755)\n\n if \"Windows\" in platform.system():\n # copy python27.dll to site-packages/win32 directory to get around\n # issue when starting mastd\n src = os.path.join(prefix, \"miniconda\", \"python27.dll\")\n dst = os.path.join(\n prefix,\n \"miniconda\",\n \"Lib\",\n \"site-packages\",\n \"win32\",\n \"python27.dll\"\n )\n copyfile(src, dst)\n for filename in [\"pythoncom27.dll\", \"pythoncomloader27.dll\", \"pywintypes27.dll\"]:\n src = os.path.join(\n prefix,\n \"miniconda\",\n \"Lib\",\n \"site-packages\",\n \"pywin32_system32\",\n filename,\n )\n dst = os.path.join(\n prefix,\n \"miniconda\",\n \"Lib\",\n \"site-packages\",\n \"win32\",\n filename,\n )\n copyfile(src, dst)\n copytree(\n os.path.join(INSTALL_DIR, \"files\", \"bin\"),\n os.path.join(prefix, \"bin\")\n )\n copytree(\n os.path.join(INSTALL_DIR, \"files\", \"etc\"),\n os.path.join(prefix, \"etc\")\n )\n copytree(\n os.path.join(INSTALL_DIR, \"files\", \"var\"),\n os.path.join(prefix, \"var\")\n )\n copytree(\n os.path.join(INSTALL_DIR, \"files\", \"usrbin\"),\n os.path.join(prefix, \"usrbin\")\n )\n copytree(\n os.path.join(INSTALL_DIR, \"files\", \"tmp\"),\n os.path.join(prefix, \"tmp\")\n )\n copytree(\n os.path.join(INSTALL_DIR, \"files\", \"doc\"),\n os.path.join(prefix, \"doc\")\n )\n copytree(\n os.path.join(INSTALL_DIR, \"files\", \"contrib\"),\n os.path.join(prefix, \"contrib\")\n )", "def build_all(c, name, path, force=False):\n print(40*\"-\")\n print(\" starting the build and check...\")\n print(40*\"-\")\n # genate the app\n if force:\n r=c.run(\"python generate_app.py -n {} -p {} -f\".format(name, path))\n else:\n r=c.run(\"python generate_app.py -n {} -p {}\".format(name, path))\n \n print(\" .. generated the PythonOnWheels app.\")\n # switch the current dir for invoke. every c.run starts from that dir.\n app_path=os.path.abspath(os.path.join(path, name))\n # create a venv\n if os.name == \"nt\":\n with c.cd(app_path):\n print(\" .. creating a virtualenv\")\n c.run(\"python -m venv ./venv\")\n print(\" .. Installing the PoW requirements\")\n c.run(\"cd ./venv/Scripts && pip.exe install -r {}\".format(\n os.path.normpath(os.path.join(\"..\\..\", \"requirements.txt\"))))\n elif os.name == \"posix\":\n with c.cd(app_path):\n print(\" .. creating a virtualenv\")\n c.run(\"python -m venv ./venv\")\n with c.cd(os.path.join(app_path, \"venv/bin\")):\n print(\" .. Installing the PoW requirements\")\n print(\"cwd: \" + c.cwd)\n #pipath= os.path.abspath(os.path.join(app_path, \"./venv/bin/pip\"))\n #print(\"venv pip path: {}\".format( pipath ))\n reqpath = os.path.normpath(os.path.join( app_path, \"requirements.txt\"))\n print(\"requirements.txt: {}\".format(reqpath))\n c.run(\"./pip install -r {}\".format( reqpath ))\n c.run(\"./pip freeze\")\n else:\n print(\"only posix and windows compatible OS are supported, sorry!\")\n sys.exit()\n test(c,path,name)\n runserver(c,path,name)", "def main():\n for key in UNSAFE_FLAGS:\n if key in os.environ:\n del os.environ[key]\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--fcompiler', choices=['pgi', 'intel', 'gfortran'],\n help='Fortran compiler to use')\n parser.add_argument('--usecuda', help='whether to also compile CUDA implementation',\n action='store_true')\n parser.add_argument('--debug', help='use debug configuration when compiling fortran code',\n action='store_true')\n\n cmd_args = parser.parse_args()\n args = ['python', 'setup.py', 'build_ext', '--fcompiler=' + FCOMPILER_MAP[cmd_args.fcompiler]]\n if cmd_args.debug:\n args += ['--debug']\n if cmd_args.usecuda:\n args += ['--usecuda']\n args += ['build', 'bdist_wheel']\n\n proc = subprocess.Popen(['rm', '-rf', 'dist', 'build'])\n proc.wait()\n\n proc = subprocess.Popen(args)\n proc.wait()\n if proc.returncode != 0:\n print('ERROR! build process failed.')\n exit(1)\n\n wheels = glob.glob('./dist/*.whl')\n latest_wheel = max(wheels, key=os.path.getctime)\n\n proc = subprocess.Popen(['pip', 'install', latest_wheel, '--upgrade'])\n proc.wait()", "def GetBuildFormat(self):\n # The comma means that ninja and qtcreator_ninja will be chained and use the\n # same input information so that .gyp files will only have to be parsed\n # once.\n return 'ninja'", "def pkgbuildContentBuild( self, pars, directory ):\n\n return \"\"\"\\\n # If your package requires compilation, insert your build code here\n cd \"${srcdir}/${pkgname}-${pkgver}\"\n echo Building ...\\\n\"\"\"", "def get_env_name(tool_name, python, requirements, tagged_env_vars, build=False):\n if tool_name:\n name = [tool_name]\n else:\n # Backward compatibility vs. result file names\n name = []\n\n name.append(f\"py{python}\")\n reqs = list(requirements.items())\n reqs.sort()\n for key, val in reqs:\n if val:\n name.append(''.join([key, val]))\n else:\n name.append(key)\n\n env_vars = _untag_env_vars(tagged_env_vars, build=build)\n\n for env_var, value in sorted(env_vars.items()):\n name.append(''.join([env_var, value]))\n\n return util.sanitize_filename('-'.join(name))", "def build(self):\n env = ConfigureEnvironment(self.deps_cpp_info, self.settings)\n\n set_path_command = \"\"\n # Download nasm as build tool. This should go to source()\n if self.options.SSE == True:\n if self.settings.os == \"Linux\":\n # TODO: We should build nasm from source then.\n self.options.SSE = False # Or is removing here better? I'm not familiar with python..\n else:\n nasm_version = \"2.12.02\"\n nasm_os_url_id = \"\" #nasm url identifier\n if self.settings.os == \"Windows\":\n if self.settings.arch == \"x86\":\n nasm_os_url_id = \"win32\"\n else:\n nasm_os_url_id = \"win64\" \n elif self.settings.os == \"Macos\":\n nasm_os_url_id = \"macosx\"\n nasm_folder_name = \"nasm-%s-%s\" % (nasm_version, nasm_os_url_id)\n nasm_zip_name = \"%s.zip\" % nasm_folder_name\n download(\"http://www.nasm.us/pub/nasm/releasebuilds/%s/%s/%s\" % (nasm_version, nasm_os_url_id, nasm_zip_name), nasm_zip_name)\n self.output.warn(\"Downloading nasm: http://www.nasm.us/pub/nasm/releasebuilds/%s/%s/%s\" % (nasm_version, nasm_os_url_id, nasm_zip_name))\n unzip(nasm_zip_name)\n os.unlink(nasm_zip_name)\n nasm_path = os.path.join(os.getcwd(), nasm_folder_name)\n\n #env.environ[\"PATH\"] += os.pathsep + nasm_path #its probably as easy as this, but i cant append to the path self.run operates in.\n if self.settings.os == \"Windows\":\n set_path_command = \"set \\\"PATH=%s\\\" &&\" % os.environ[\"PATH\"]\n else:\n set_path_command = \"PATH=\\\"%s\\\" &&\" % os.environ[\"PATH\"]\n\n if self.settings.os == \"Linux\" or self.settings.os == \"Macos\":\n if self.options.fPIC:\n env_line = env.command_line.replace('CFLAGS=\"', 'CFLAGS=\"-fPIC ')\n else:\n env_line = env.command_line\n self.run(\"cd %s && autoreconf -fiv\" % self.ZIP_FOLDER_NAME)\n config_options = \"\"\n if self.settings.arch == \"x86\":\n if self.settings.os == \"Linux\":\n config_options = \"--host i686-pc-linux-gnu CFLAGS='-O3 -m32' LDFLAGS=-m32\"\n else:\n config_options = \"--host i686-apple-darwin CFLAGS='-O3 -m32' LDFLAGS=-m32\"\n\n if self.settings.os == \"Macos\":\n old_str = '-install_name \\$rpath/\\$soname'\n new_str = '-install_name \\$soname'\n replace_in_file(\"./%s/configure\" % self.ZIP_FOLDER_NAME, old_str, new_str)\n\n self.run(\"cd %s && %s ./configure %s\" % (self.ZIP_FOLDER_NAME, env_line, config_options))\n self.run(\"cd %s && %s make\" % (self.ZIP_FOLDER_NAME, env_line))\n else: # We should (for simplicity) always use cmake shouldnt we?\n conan_magic_lines = '''project(libjpeg-turbo)\n cmake_minimum_required(VERSION 3.0)\n include(../conanbuildinfo.cmake)\n CONAN_BASIC_SETUP()\n '''\n replace_in_file(\"%s/CMakeLists.txt\" % self.ZIP_FOLDER_NAME, \"cmake_minimum_required(VERSION 2.8.8)\", conan_magic_lines)\n replace_in_file(\"%s/CMakeLists.txt\" % self.ZIP_FOLDER_NAME, \"project(libjpeg-turbo C)\", \"\")\n \n cmake = CMake(self.settings)\n builddir = os.path.join(self.ZIP_FOLDER_NAME, \"_build\")\n\n if os.path.exists(builddir):\n shutil.rmtree(builddir) # We need to remove this folder first for windows\n os.makedirs(builddir)\n\n cmake_options = []\n if self.options.shared == True:\n cmake_options += [\"-DENABLE_STATIC=0\"]\n else:\n cmake_options = [\"-DENABLE_SHARED=0\"]\n cmake_options += [\"-DWITH_SIMD=%s\" % \"1\" if self.options.SSE else \"0\"]\n\n # why this comment: \"Don't change runtime, conan will take care of\"? conan_basic_setup() runs before this cmake option replaces MT with MD again\n cmake_options += [\"-DWITH_CRT_DLL=%s\" % \"1\" if self.settings.compiler.runtime == \"MD\" or self.settings.compiler.runtime == \"MDd\" else \"0\"]\n\n self.run('%s cd %s && cmake .. %s %s' % (set_path_command, builddir, cmake.command_line, \" \".join(cmake_options)))\n self.run(\"%s cd %s && cmake --build . %s\" % (set_path_command, builddir, cmake.build_config))", "def esp32_app_build(ctx):\n _run_idf_script(ctx, \"build\")", "def on_windows ():\n if bjam.variable(\"NT\"):\n return True\n\n elif bjam.variable(\"UNIX\"):\n\n uname = bjam.variable(\"JAMUNAME\")\n if uname and uname[0].startswith(\"CYGWIN\"):\n return True\n\n return False" ]
[ "0.62677795", "0.6129463", "0.6032943", "0.5977447", "0.577154", "0.5720386", "0.56446457", "0.5628962", "0.5623403", "0.550566", "0.5472778", "0.54249173", "0.5416323", "0.53943837", "0.53937995", "0.53872776", "0.536856", "0.53682464", "0.5364527", "0.53571385", "0.53334236", "0.5329062", "0.53271425", "0.5321418", "0.5318968", "0.5299371", "0.528379", "0.5254594", "0.52465224", "0.52420634" ]
0.6428409
0
A view that renders the bag contents page
def view_bag(request): return render(request, 'bag/bag.html')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def view_bag(request):\n return render(request, 'bag/bag.html')", "def view_bag(request):\n template = 'bag/bag.html'\n return render(request, template)", "def view_shoppingbag(request):\n\n return render(request, 'shoppingbag/shoppingbag.html')", "def view_basket(request):\n\n return render(request, 'basket/basket.html')", "def get(self):\n self.render('view.html')", "def view(self):", "def test_show_bag(self):\n response = self.client.get('/shopping_bag/')\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'shopping_bag/bag.html')", "def index(self):\n return self.html", "def index(self) -> HTMLBody:\n\t\treturn render_template(\"index.jinja2\")", "def index(self):\n return render(\"/derived/rock/index.mako\")", "def get(self, request ):\n return render(request, \"main_display_cards.html\")", "def get(self):\n WriteTemplate(self.response, 'tips.html', {})", "def index(self):\n\t\treturn render_template('index.html')", "def get(self):\n self.response.write(view_utils.render('base.html', {}))", "def index() -> object:\n return render_template('ue_bootstrap.j2', title='UENERGO TAGS')", "def index(self):\n return self.load_view('index.html')", "def data_page():\n return render_template(\"data.html\")", "def get(self, request, **kwargs):\n elementos_list= Elementos.objects.all()\n return render(request, 'alchemy/index.html', {})", "def my_box(request):\n calls_detail = CallDetail.objects.all()\n if len(calls_detail) > 5:\n calls_detail = calls_detail[:5]\n template = loader.get_template('box.html')\n context = {\n 'calls_detail': calls_detail\n }\n return HttpResponse(template.render(context, request))", "def view_html_page():\n\n return render_template(\"moby.html\")", "def view(self, viewname, **data):\n view = self.mylookup.get_template(viewname + '.mako').render(**data)\n \n self.res.status = 202\n self.res.content_type = 'text/html'\n self.res.content_length = len(view)\n \n self.start_response(self.res.status, self.res.headerlist)\n return view", "def machinelearn2():\n return render_template('frontml.html')", "def render_content(self, request, tag):\n if self.showreel_document is not None:\n graph_row_xml = XMLString(self.get_content_xml())\n return graph_row_xml.load()\n else:\n oops_container = self.get_bad_container()\n return oops_container.load()", "def data_page():\n\n return render_template('Data_Page.html')", "def get(self):\n\n upload_info = self.request.GET.get('upload_info')\n if upload_info:\n self.response.headers['content-type'] = 'text/plain'\n self.response.out.write(upload_info)\n return\n\n blobs = reversed(blobstore.BlobInfo.all().fetch(10))\n output = template.render('index.html', {'blobs': blobs})\n self.response.out.write(output)", "def get(self):\n self.render('index.html')\n return", "def index(request):\n return render(request, 'items/index.html', {\n 'globalvars': globalvars,\n 'fart': 'fart'\n })", "def get(self):\n self.render(\"index.html\")", "def view_basket(request):\n\n context = {\n 'discount_percentage': settings.DISCOUNT_PERCENTAGE,\n }\n return render(request, 'basket/basket.html', context)", "def index(self):\n return render_template('main/index.html')" ]
[ "0.81315035", "0.7977562", "0.6872735", "0.68546337", "0.6620919", "0.6348403", "0.6221381", "0.6121466", "0.6058291", "0.6022294", "0.60026085", "0.59646916", "0.5949683", "0.5934281", "0.5898349", "0.58762145", "0.58650833", "0.5857844", "0.5836029", "0.5827804", "0.5826162", "0.58243877", "0.58224225", "0.58123755", "0.58030176", "0.57988465", "0.5769149", "0.57536566", "0.5753263", "0.57491356" ]
0.81268024
1
busy wait for robot completion
def waitrobot(robot): while not robot.GetController().IsDone(): time.sleep(0.01)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def busyWait(self):\n time.sleep(0.0)", "def waitForCompletion(self):\n\n while(json.loads(self.robot.device())['state']!=0):\n time.sleep(0.1)\n continue\n\n return", "def wait(self):\n time.sleep(0.010)", "def wait(self):\n pass", "def wait(self):\n pass", "def waitUntilFinished():", "def waitUntilFinished():", "def waitUntilFinished():", "def waitUntilFinished():", "def wait():\n pass", "def do_wait(self):\n pass", "def wait():\n time.sleep(1)", "def wait(self):\n\t\traise NotImplementedError(\"must be redeclared\")", "def check_completion(self):\n\n time.sleep(3)\n while self.status == 0:\n pass", "def wait(self):\n\t\twhile True:\n\t\t\tr1 = self.zaberSend(self.translation[\"hor\"], self.cmd[\"returnStatus\"], data=0)\n\t\t\tr2 = self.zaberSend(self.translation[\"ver\"], self.cmd[\"returnStatus\"], data=0)\n\t\t\tif r1[2] == 0 and r2[2] == 0:\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\ttime.sleep(.01)", "def run_and_wait():\n self.busy.put(True)\n action()\n self.busy.put(False)\n status._finished(success=True)", "def wait(self):\n time.sleep(self.next())", "def wait_progress(self):\n pass", "def wait_progress(self):\n pass", "def _busy_wait(self):\n wait_for = GPIO.HIGH\n if self.inky_version == 2:\n wait_for = GPIO.LOW\n\n while(GPIO.input(self.busy_pin) != wait_for):\n pass", "def waitfor(self):\r\n finished = False\r\n while finished == False:\r\n time.sleep(5)\r\n finished = self.isFinished()", "def wait(self, cycles):\n\t\tpass", "def block_waiting( self ):\n while self.num_waiting > 0:\n time.sleep( 1 )", "def wait(self, timeoout=None, state=\"C-completed\"):", "def _wait_for_completion(self):\n if self.do_timing:\n self.timer.start(\"Running.\")\n\n while self.state != State.COMPLETED:\n self._update_state()\n\n if self.do_timing:\n self.timer.stop()", "def wait(self):\n self.event.wait()", "async def wait_until_done(self) -> None:\n ...", "def wait(self):\n while not self.done:\n self.device._handle_events(1000)", "def wait(self, ms=None):\r\n util.raiseNotDefined()", "def wait_complete(self):\n self.join()" ]
[ "0.7729015", "0.7684526", "0.76390046", "0.7526746", "0.7526746", "0.7480899", "0.7480899", "0.7480899", "0.7480899", "0.7460632", "0.7429611", "0.7353933", "0.73518384", "0.7336683", "0.7308061", "0.7302214", "0.72688043", "0.7262008", "0.7262008", "0.72150105", "0.7173207", "0.7164182", "0.716261", "0.7159663", "0.7087422", "0.7077602", "0.7065622", "0.7051149", "0.7040757", "0.70143384" ]
0.8239064
0
Reads input file and runs monte carlo simulation to convert raman tensors from molecular to labratory coordinates and prints the matrix in the mueller formalism to a file
def main(cliArgs): log.info("START RAMAN TENSOR CONVERSION") # Read tensor file as matrices tensorlist = util.readFileAsMatrices(cliArgs.tensorfile, (3,3)) # PREPARE SIMULATION log.info("Prepare simulation") # Copy the structure of tensorlist with empty arrays. This copy will be filled with the result of the simulation convertedTensorlist = [{"head": tensor["head"], "muellerMatrix": np.diag([0, 0, 0, 0]).astype(np.float), "ramanTensor": np.diag([0, 0, 0]).astype(np.float) } for tensor in tensorlist] # Set a flag to signal the while loop below wether or not to rerun the simulation if validation fails runMonteCarlo = True # Total number of iterations # This number will increase if the simulation is not validated and run again totalIterations = cliArgs.iterationLimit # RUN MONTE-CARLO SIMULATION # The steps 1. and 2. will be performed by the function __monteCarlo(). Step 3. will be performed by this function. # Calculation: 1. Rotate all raman tensors randomly via matrix multiplication # Uniformly distributed random rotations are generated with James Arvo's Algorithm "Fast Random Rotation Matrices". See pdf file jamesArvoAlgorithm.pdf for the math. # 2. Compute the mueller matrix of the rotated raman tensor. For the math, see pdf file ramanMuellerMatrix.pdf. # 3. Compute the mean of all rotated mueller matrices and raman tensors. The mean will be computed by the main function. # The while loop gives the opportunity to run the simulatio again, if the validation of the simulation fails. while( runMonteCarlo == True ): log.info("START MONTE CARLO SIMULATION") # !!!!! LOGGING IS OMITTED DURING THE SIMULATION DUE TO SEVERE PERFORMANCE ISSUES !!!!!! # Build a generator that returns the tensorlist that will be passed to every iteration of the monte-carlo-simulation processArgs = ( tensorlist for i in range(cliArgs.iterationLimit) ) # Create a pool of workers sharing the computation task with multiprocessing.Pool(processes = cliArgs.processCount) as pool: # Start child processes which run __monteCarlo() # Each subprocess will be given a list of size chunksize. Each element of the list contains the list of all raman tensor. # Each subprocess will therefore run the function __monteCarlo() cunksize times and passes the tensorlist to every function call. # The computation will be slow if the chunksize is to big or to small process = pool.imap_unordered(__monteCarlo, processArgs, chunksize = cliArgs.chunksize) # Loop over all ready results, while the processes are still running # process contains all rotated matrices # tqdm prints a lovely progress bar for result in tqdm( process, total = cliArgs.iterationLimit, desc = "Processes " + str(cliArgs.processCount) ): # Tally the results of all processes up and divide by the iteration limit to get the mean of all computations convertedTensorlist = [ {"head" : tensor["head"], "muellerMatrix": np.add(convertedTensorlist[index]["muellerMatrix"], tensor["muellerMatrix"]/totalIterations), "ramanTensor" : np.add(convertedTensorlist[index]["ramanTensor"] , tensor["ramanTensor"] /totalIterations) } for (index, tensor) in enumerate(result) ] log.info("STOPPED MONTE CARLO SIMULATION SUCCESSFULLY") # # VALIDATE THE SIMULATION # by comparing the depolarisation ratio of the molecular tensor and the labratory matrix # Source: Richard N. Zare: Angular Momentum, p.129 # log.info("Validating monte-carlo-simulation via the depolarisation ratio.") # Check every matrix for initial, final in zip(tensorlist, convertedTensorlist): log.debug("Check matrix '" + initial["head"] + "'.") # Check if loop is comparing the right matrices if initial["head"] != final["head"]: log.critical("INTERNAL ERROR: The header of input and output matrices don't match! Error in input tensor '" + initial["head"] + "' and output matrix '" + final["head"] + "'." ) log.critical("TERMINATE EXECUTION.") sys.exit(-1) # Compute eigenvalues of molecular tensor try: eigenvalues = np.linalg.eigvals(initial["matrix"]) except LinAlgError as e: # Eigenvalues do not converge. Log this issue and exit execution. log.critical("The eigenvalue computation of the input raman tensor '" + initial["head"] + "' does not converge. Unable to validate monte-carlo-simulation!") log.critical("TERMINATE EXECUTION.") sys.exit(-1) # Compute depolarisation ratio of the inital tensor via the eigenvalues. See Richard N. Zare: "Angluar Momentum", p.129. isotropicPolarisability = sum(eigenvalues)/3 anisotropicPolarisability_squared = ( (eigenvalues[0]-eigenvalues[1])**2 + (eigenvalues[1]-eigenvalues[2])**2 + (eigenvalues[2]-eigenvalues[0])**2 )/2 initialDepolarisationRatio = 3*anisotropicPolarisability_squared / ( 45*isotropicPolarisability**2 + 4*anisotropicPolarisability_squared ) log.debug("Initial Depolarisation Ratio: " + str(initialDepolarisationRatio)) # Compute the depolarisation ratio of the final mueller matrix via raman scattering in Mueller-Formalism. See Richard N. Zare: "Angluar Momentum", p.129. # Compute light intensities along x- and y-axis via stokes parameter: # I_x = S_0 + S_1 # I_y = S_0 - S_1 # depolarisationRatio = I_y / I_x ; if the incoming light is polarised along the x-axis. incomingLight = np.array([1,1,0,0]) scatteredLight = final["muellerMatrix"] @ incomingLight finalDepolarisationRatio = (scatteredLight[0]-scatteredLight[1])/(scatteredLight[0]+scatteredLight[1]) log.debug("Final Depolarisation Ratio: " + str(finalDepolarisationRatio)) # # CHECK RESULTS # # Give the user the opportunity to run the simulation # again and use the computation time that's been spent so far # if round(initialDepolarisationRatio, cliArgs.threshold) != round(finalDepolarisationRatio, cliArgs.threshold): success = False break else: success = True # # DECIDE TO CONTINUE OR END THE PROGRAM # if success == True: # Simulation is valid exit while loop runMonteCarlo = False log.info("Validation done.") else: # The validation failed log.critical("Validation failed for matrix '" + final["head"] + "'!") log.critical("Input: " + str(round(initialDepolarisationRatio, cliArgs.threshold)) + " Simulation: " + str(round(finalDepolarisationRatio, cliArgs.threshold))) log.critical("Ask for user input. Should the simulation run again?") # Ask user if he/she wants to run more iterations and try the validation again response = input("The simulation did " + str(totalIterations) + " iterations. Do you wish to compute another " + str(cliArgs.iterationLimit) + " iterations and try the validation again? [Y/n] ").lower() log.critical("Users response: " + response) if response == "n": # User wants to exit log.critical("The user does not want to continue the computation.") log.critical("TERMINATE EXECUTION.") sys.exit(-1) else: # User wants to continue runMonteCarlo = True log.info("Run Monte-Carlo-Simulation again.") # Save the number of computed iterations done so far iterationsSoFar = totalIterations # Compute new number of total iterations totalIterations = iterationsSoFar + cliArgs.iterationLimit # Rescale the calculated matrices. # There is following problem: The programm does not save a list of all computed matrices. # It only saves the mean value. In order to use the current mean # value of the matrices to compute the mean you get when doing more # iterations, you have to multiply the matrices by the number of # iterations done so far and divide it by the total number of # iterations that will be done after rerunning the simulation. log.info("Prepare rerun of simulation by rescaling the mueller matrices mean.") scalingFactor = iterationsSoFar / totalIterations convertedTensorlist = [ {"head" : entry["head"], "muellerMatrix": entry["muellerMatrix"] * scalingFactor, "ramanTensor" : entry["ramanTensor"] * scalingFactor } for entry in convertedTensorlist ] ##### END OF MONTE-CARLO-SIMULATIONS WHILE LOOP # CONVERT RESULTS TO TEXT # Write the commandline parameters and the execution time in a string output_text = "# polaram convert " + str(cliArgs.tensorfile.resolve()) output_text += " --output " + str(cliArgs.outputfile.resolve()) output_text += " --log " + str(cliArgs.logfile.resolve()) output_text += " --iterations " + str(totalIterations) output_text += " --threshold " + str(cliArgs.threshold) output_text += "\n# Execution time: " + str(datetime.now()) # Add user comment to string # Given via command line interface if cliArgs.comment != "": output_text += "\n\n# " + str(cliArgs.comment) # Add the calculated matrices to the string. The matrices are formated like the tensor input file for dict in convertedTensorlist: # Print mean of mueller matrices output_text += "\n\n! " + dict["head"] + "\n" + np.array2string(dict["muellerMatrix"], sign = None).replace("[[", "").replace(" [", "").replace("]", "") # Print mean of raman tensors as comments output_text += "\n\n#! " + dict["head"] + " (Mean Of Rotated Raman Tensors)\n" + np.array2string(dict["ramanTensor"], sign = None).replace("[[", "#").replace(" [", "#").replace("]", "") # Log and write text to file log.debug("Writing results to '" + str(cliArgs.outputfile.resolve()) + "':\n\n" + output_text + "\n") print(output_text) cliArgs.outputfile.write_text(output_text) log.info("STOPPED RAMAN TENSOR CONVERSION SUCCESSFULLY")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n\tif len(sys.argv) < 12 or len(sys.argv) > 13:\n\t\tprint(\"Input parameters must be: 'filename lambda mu C c0 Q theta L H simulation_time is_debug repeats(optionally)'\")\n\telse:\n\t\tstart_time = time.time()\n\n\t\tfile_name = sys.argv[1]\n\t\tlambd = float(sys.argv[2])\n\t\tmu = float(sys.argv[3])\n\t\tC = int(sys.argv[4])\n\t\tc0 = int(sys.argv[5])\n\t\tQ = int(sys.argv[6])\n\t\ttheta = float(sys.argv[7])\n\t\tL = int(sys.argv[8])\n\t\tH = int(sys.argv[9])\n\t\tsimulation_time = float(sys.argv[10]);\n\t\tis_debug = True if sys.argv[11] == \"True\" else False;\n\t\trepeats = int(sys.argv[12]) if len(sys.argv) == 13 else 1;\n\n\t\tprint(\"Simulation started for params: lambda =\", lambd,\n\t\t\t \", mu =\", mu,\n\t\t\t \", C =\", C,\n\t\t\t \", c0 =\", c0,\n\t\t\t \", Q =\", Q,\n\t\t\t \", theta =\", theta,\n\t\t\t \", L =\", L,\n\t\t\t \", H =\", H,\n\t\t\t \", repeats =\", repeats)\n\n\t\tblocked = 0\n\t\tserved = 0\n\t\tgenerated = 0\n\t\tB = 0\n\t\tN = 0\n\n\t\tsimulation = Simulation(\"m/m/c[c0]/r[l,h]\", lambd, mu, theta, C, c0, L, H, simulation_time, Q, is_debug)\n\t\tfor i in range(0, repeats):\n\t\t\tsimulation = Simulation(\"m/m/c[c0]/r[l,h]\", lambd, mu, theta, C, c0, L, H, simulation_time, Q, is_debug)\n\t\t\tsimulation.start()\n\t\t\tblocked += simulation.queue.blocked\n\t\t\tserved += simulation.served_count\n\t\t\tgenerated += simulation.flow.generated_count\n\t\t\tB += simulation.queue.blocked/(simulation.served_count+simulation.queue.blocked)\n\t\t\tN += simulation.served_count/simulation_time\n\t\tend_time = time.time()\n\n\t\tblocked = blocked/repeats\n\t\tserved = served/repeats\n\t\tgenerated = generated/repeats\n\t\tB = B/repeats\n\t\tN = N/repeats\n\n\t\tprint( \"\")\n\t\tprint( \"Summary results:\")\n\t\tprint( \"blocked=\", blocked, \" served=\", served, \", generated=\", generated)\n\t\tprint(\"B = \", B)\n\t\tprint(\"N = \", N)\n\t\tprint(\"Execution time = %s seconds\" % (end_time - start_time))\n\t\tprint( \"... to be implemented more summary ...\")\n\n\t\t# write stats to file\n\t\tabs_path = os.path.abspath(__file__)\n\t\tpath = os.path.relpath('stats', abs_path)\n\t\tpath = os.path.join(path, file_name + '-(%s,%s,%s,%s,%s,%s,%s,%s).csv' % (lambd,mu,theta,C,c0,L,H,simulation_time))\n\n\t\toutfile=open(path,'w')\n\t\toutput = csv.writer(outfile, delimiter=';')\n\t\toutput.writerow(['Request ID','Queue', 'Arrival_Time','Queue_Arrival_time','Server_Arrival_time','alpha','beta'])\n\n\t\ti=0\n\t\tfor request in simulation.served_requests:\n\t\t\ti=i+1\n\t\t\toutrow=[]\n\t\t\toutrow.append(request.ID)\n\t\t\toutrow.append(request.queue_size_at_serving)\n\t\t\toutrow.append(request.arrival_time)\n\t\t\toutrow.append(request.queue_arrival_time)\n\t\t\toutrow.append(request.server_arrival_time)\n\t\t\toutrow.append(request.alpha)\n\t\t\toutrow.append(request.beta)\n\t\t\toutput.writerow(outrow)\n\t\toutfile.close()\n\n\t\treturn simulation", "def main():\n\t#Necessary Parameters for Simulation\n\tAmplitudes = ['230','260','290']\n\tConditions = ['No EES','EES','EES+A08','EES+A08+ProIncrease']\n\n\n\n\t#eesAmplitude = \"230\"\n\teesAmplitudeName = \"230\"\n\tdelay = \"2\"\n\ttoAddname = \"\"\n\tspecies = \"rat\"\n\t#Paramters initialization\n\ttotSimTime = rp.get_tot_sim_time()\n\tgaitCyclesFileName = rp.get_gait_cycles_file()\n\tmuscles = rp.get_muscles()\n\ttemplateFile = \"templateFrwSimRORaReal.txt\"\n\tw1 = 0.011\n\tw2 = -0.005\n\n\ttemplateFile = \"A08.txt\"\n\n\ttls.modify_network_structure(templateFile,templateFile,delay,[w1,w2])\n\n\teesFrequencies = range(0,41,40)\n\tnProc = 4\n\tseed = \"1\"\n\n\tnSim = len(eesFrequencies)\n\tcount=0.\n\tpercLastPrint=0.\n\tprintPeriod = 0.05\n\n\t# run simulations\n\tfor j,eesAmplitude in enumerate(Amplitudes):\n\t\tfor i,eesFrequency in enumerate(eesFrequencies):\n\t\t\tfor condition in Conditions:\n\t\t\t\t#name = \"Tonic_FFS_\"+inputFileName+\"_freq_\"+str(eesFrequency)\n\t\t\t\tinputFileName = condition\n\t\t\t\tinputFile = \"generatedStructures/\"+inputFileName+\".txt\"\n\t\t\t\tname = \"Tonic_FFS_\"+condition+\"_freq_\"+str(eesFrequency)\n\t\t\t\tresultFile = gt.find(\"*\"+name+\".p\",pathToResults)\n\t\t\t\tif not resultFile:\n\t\t\t\t\tprogram = ['python','./scripts/runForSimMuscleSpindles_RORa.py',\\\n\t\t\t\t\t\tstr(eesFrequency),eesAmplitude,inputFile,name,\"--simTime\",str(totSimTime),\"--seed\",seed,\"--noPlot\"]\n\n\t\t\t\tif not resultFile: gt.run_subprocess(program)\n\n\t\t\t\tcount+=1\n\t\t\t\tif count/nSim-percLastPrint>=printPeriod:\n\t\t\t\t\tpercLastPrint=count/nSim\n\t\t\t\t\tprint str(round(count/nSim*100))+\"% of simulations performed...\"\n\n\n\n\t\"\"\" create plots \"\"\"\n\terrParams = dict(lw=0.5, capsize=1, capthick=0.5)\n\twith open(gaitCyclesFileName, 'r') as pickle_file:\n\t\theelStrikes = pickle.load(pickle_file)\n\t\tfootOffs = pickle.load(pickle_file)\n\n\n\t# Figure 5 plot all gait cycles- afferent and efferents\n\t#if not phasicStim:\n\tfigName = time.strftime(\"/%Y_%m_%d_Tonic_FFS_species_\"+toAddname+inputFileName+species+\"_muscles_\"+\"\".join(muscles)+\"_delay_\"+str(delay)+\"_amp_\"+str(eesAmplitudeName)+\"_firingRates.pdf\")\n\t#else: figName = time.strftime(\"/%Y_%m_%d_Phasic_FFS_species_\"+toAddname+inputFileName+species+\"_muscles_\"+\"\".join(muscles)+\"_delay_\"+str(delay)+\"_amp_\"+str(eesAmplitudeName)+\"_firingRates.pdf\")\n\tfig, ax = plt.subplots(2, 4,figsize=(16,9))\n\tcmap = plt.get_cmap('winter')\n\tcolors = cmap(np.linspace(0.1,0.9,len(eesFrequencies)))\n\n\tfor i,eesFrequency in enumerate(eesFrequencies):\n\t\t#if not phasicStim:\n\t\tname = \"FS_EES_230uA_\"+str(eesFrequency)+\"Hz_Delay_2ms_Tonic_FFS_Control_freq_\"+str(eesFrequency)\n\t\t#name = \"Tonic_FFS_species_\"+toAddname+inputFileName+species+\"_muscles_\"+\"\".join(muscles)+\"_delay_\"+str(delay)+\"_amp_\"+str(eesAmplitudeName)+\"_freq_\"+str(eesFrequency)\n\t\t#else: name = \"Phasic_\"+emgVsKinMod+\"_FFS_species_\"+toAddname+inputFileName+species+\"_muscles_\"+\"\".join(muscles)+\"_delay_\"+str(delay)+\"_amp_\"+str(eesAmplitudeName)+\"_freq_\"+str(eesFrequency)\n\t\tif species == \"human\":name += hp.get_dataset()\n\n\t\t# get data\n\t\tprint name\n\t\tresultFile = gt.find(\"*\"+name+\".p\",pathToResults)\n\t\tprint resultFile\n\t\tif len(resultFile)>1: print \"Warning: multiple result files found!!!\"\n\t\twith open(resultFile[0], 'r') as pickle_file:\n\t\t\testimatedEmg = pickle.load(pickle_file)\n\t\t\tmeanFr = pickle.load(pickle_file)\n\n\t\t# get gait cycles\n\t\tif not 'heelStrikeSamples' in locals():\n\t\t\tnSamples = len(meanFr[muscles[0]][\"Mn\"])\n\t\t\tdtMeanFr = float(totSimTime)/nSamples\n\t\t\theelStrikeSamples = [int(x) for x in heelStrikes*1000./dtMeanFr]\n\t\t\tfootOffSamples = [int(x) for x in footOffs*1000./dtMeanFr]\n\t\t\tsamples = range(nSamples)\n\t\t\tstance = np.zeros(nSamples).astype(bool)\n\t\t\tfor strike,off in zip(heelStrikeSamples,footOffSamples):\n\t\t\t\tif strike>nSamples: break\n\t\t\t\tstance[strike:off]=True\n\n\t\tfor j,muscle in enumerate(muscles):\n\t\t\tax[j,0].plot(meanFr[muscle]['Iaf'],color=colors[i])\n\t\t\tax[j,0].fill_between(samples, 0, 200, where=stance, facecolor='#b0abab', alpha=0.25)\n\t\t\tax[j,1].plot(meanFr[muscle]['IaInt'],color=colors[i])\n\t\t\tax[j,1].fill_between(samples, 0, 200, where=stance, facecolor='#b0abab', alpha=0.25)\n\t\t\tax[j,2].plot(meanFr[muscle]['Mn'],color=colors[i])\n\t\t\tax[j,2].fill_between(samples, 0, 200, where=stance, facecolor='#b0abab', alpha=0.25)\n\t\t\tax[j,3].plot(estimatedEmg[muscle]['Mn'],color=colors[i])\n\t\t\tax[j,3].fill_between(samples, 0, 200, where=stance, facecolor='#b0abab', alpha=0.25)\n\n\n\tfor j,muscle in enumerate(muscles):\n\t\tax[j,0].set_ylim([0,200])\n\t\tax[j,0].set_title(\"Ia fibers firing rate - \"+muscle)\n\t\tax[j,0].set_xlabel(\"Time (ms)\")\n\t\tax[j,0].set_ylabel(\"Firing rate (Imp/s)\")\n\t\tax[j,1].set_ylim([0,200])\n\t\tax[j,1].set_title(\"IaInt firing rate - \"+muscle)\n\t\tax[j,1].set_xlabel(\"Time (ms)\")\n\t\tax[j,1].set_ylabel(\"Firing rate (Imp/s)\")\n\t\tax[j,2].set_ylim([0,200])\n\t\tax[j,2].set_title(\"Mn firing rate - \"+muscle)\n\t\tax[j,2].set_xlabel(\"Time (ms)\")\n\t\tax[j,2].set_ylabel(\"Firing rate (Imp/s)\")\n\t\tax[j,3].set_ylim([0,200])\n\t\tax[j,3].set_title(\"EMG - \"+muscle)\n\t\tax[j,3].set_xlabel(\"Time (ms)\")\n\t\tax[j,3].set_ylabel(\"Emg amplitude (a.u.)\")\n\tplt.savefig(pathToResults+figName, format=\"pdf\",transparent=True)\n\n\n# FIgure 5 plot 2 single gait cycles- afferent and efferents + mn phasicity score\n\tif species == \"rat\":\n\t\tstartGaitCycleN = 3\n\t\tnCycles = 1\n\telif species == \"human\":\n\t\tstartGaitCycleN = 3\n\t\tnCycles = 1\n\n\tfigName = time.strftime(\"/%Y_%m_%d_Tonic_FFS_species_\"+toAddname+inputFileName+species+\"_muscles_\"+\"\".join(muscles)+\"_delay_\"+str(delay)+\"_amp_\"+str(eesAmplitudeName)+\"_single_firingRates.pdf\")\n\t#else: figName = time.strftime(\"/%Y_%m_%d_Phasic_FFS_species_\"+toAddname+inputFileName+species+\"_muscles_\"+\"\".join(muscles)+\"_delay_\"+str(delay)+\"_amp_\"+str(eesAmplitudeName)+\"_single_firingRates.pdf\")\n\tfig, ax = plt.subplots(2, 6,figsize=(16,9))\n\tcmap = plt.get_cmap('winter')\n\tcolors = cmap(np.linspace(0.1,0.9,len(eesFrequencies)))\n\tbar_width = 5\n\n\tfor i,eesFrequency in enumerate(eesFrequencies):\n\t\t#if not phasicStim:\n\n\t\tname = \"FS_EES_230uA_\"+str(eesFrequency)+\"Hz_Delay_2ms_Tonic_FFS_Control_freq_\"+str(eesFrequency)\t\t#else: name = \"Phasic_\"+emgVsKinMod+\"_FFS_species_\"+toAddname+inputFileName+species+\"_muscles_\"+\"\".join(muscles)+\"_delay_\"+str(delay)+\"_amp_\"+str(eesAmplitudeName)+\"_freq_\"+str(eesFrequency)\n\t\tif species == \"human\":name += hp.get_dataset()\n\n\t\t# get data\n\t\tresultFile = gt.find(\"*\"+name+\".p\",pathToResults)\n\t\tif len(resultFile)>1: print \"Warning: multiple result files found!!!\"\n\t\twith open(resultFile[0], 'r') as pickle_file:\n\t\t\testimatedEmg = pickle.load(pickle_file)\n\t\t\tmeanFr = pickle.load(pickle_file)\n\n\t\t# compute stats\n\t\tiaIntModDepth = {}\n\t\tactiveMnFr={}\n\t\tfor muscle in muscles:\n\t\t\tiaIntModDepth[muscle]=[]\n\t\t\tactiveMnFr[muscle]=[]\n\t\tfor j in xrange(len(heelStrikeSamples)-1):\n\t\t\tif heelStrikeSamples[j+1]>nSamples-50: break\n\t\t\tif heelStrikeSamples[j]<50:continue # to skip artefacts\n\t\t\tfor muscle in muscles:\n\t\t\t\tiaIntModDepth[muscle].append(\\\n\t\t\t\t\tmeanFr[muscle]['IaInt'][heelStrikeSamples[j]:heelStrikeSamples[j+1]].max()-meanFr[muscle]['IaInt'][heelStrikeSamples[j]:heelStrikeSamples[j+1]].min())\n\t\t\t\tmnActivityDuringCycle = meanFr[muscle]['Mn'][heelStrikeSamples[j]:heelStrikeSamples[j+1]]\n\t\t\t\tactiveMnFr[muscle].append(\\\n\t\t\t\t\tmnActivityDuringCycle[mnActivityDuringCycle>=0.8*mnActivityDuringCycle.max()].mean())\n\t\t\t\t\t# mnActivityDuringCycle[mnActivityDuringCycle>=1.5*mnActivityDuringCycle.std()].mean())\n\t\t\t\t\t# mnActivityDuringCycle[mnActivityDuringCycle>=np.percentile(mnActivityDuringCycle,90)].mean())\n\t\tiaIntModDepthStats = {}\n\t\tactiveMnFrStats = {}\n\t\tfor muscle in muscles:\n\t\t\tiaIntModDepthStats[muscle] = {\"mean\":np.mean(iaIntModDepth[muscle]),\n\t\t\t\t\"sem\":np.std(iaIntModDepth[muscle])/(np.sqrt(len(iaIntModDepth[muscle])-1))}\n\t\t\tactiveMnFrStats[muscle] = {\"mean\":np.mean(activeMnFr[muscle]),\n\t\t\t\t\"sem\":np.std(activeMnFr[muscle])/(np.sqrt(len(activeMnFr[muscle])-1))}\n\n\t\t# get gait cycles to plot\n\t\tif not 'startPlot' in locals():\n\t\t\tstartPlot = heelStrikeSamples[startGaitCycleN-1]\n\t\t\tstopPlot = heelStrikeSamples[startGaitCycleN+nCycles-1]\n\t\t\tif stopPlot>nSamples: stopPlot=nSamples\n\t\t\treducedSamples = range(stopPlot-startPlot)\n\t\t\treducedStance = stance[startPlot:stopPlot]\n\n\t\tfor j,muscle in enumerate(muscles):\n\t\t\tax[j,0].plot(meanFr[muscle]['Iaf'][startPlot:stopPlot],color=colors[i])\n\t\t\tax[j,0].fill_between(reducedSamples, 0, 200, where=reducedStance, facecolor='#b0abab', alpha=0.25)\n\t\t\tax[j,1].plot(meanFr[muscle]['IaInt'][startPlot:stopPlot],color=colors[i])\n\t\t\tax[j,1].fill_between(reducedSamples, 0, 250, where=reducedStance, facecolor='#b0abab', alpha=0.25)\n\t\t\tax[j,2].bar(eesFrequency,iaIntModDepthStats[muscle][\"mean\"],bar_width,yerr=iaIntModDepthStats[muscle][\"sem\"],\\\n\t\t\t\tcolor=colors[i],error_kw=errParams)\n\t\t\txValsScatter = np.linspace(0,bar_width*0.9,len(iaIntModDepth[muscle]))+eesFrequency-bar_width*0.45\n\t\t\tax[j,2].scatter(xValsScatter,iaIntModDepth[muscle], marker='o',edgecolor='black', linewidth='0.1', color=\"#dddde3\", s=7, zorder=3, alpha=0.7)\n\n\t\t\tax[j,3].plot(meanFr[muscle]['Mn'][startPlot:stopPlot],color=colors[i])\n\t\t\tax[j,3].fill_between(reducedSamples, 0, 40, where=reducedStance, facecolor='#b0abab', alpha=0.25)\n\t\t\tax[j,4].bar(eesFrequency,activeMnFrStats[muscle][\"mean\"],bar_width,yerr=activeMnFrStats[muscle][\"sem\"],\\\n\t\t\t\tcolor=colors[i],error_kw=errParams)\n\t\t\tax[j,4].scatter(xValsScatter,activeMnFr[muscle], marker='o',edgecolor='black', linewidth='0.1', color=\"#dddde3\", s=7, zorder=3, alpha=0.7)\n\t\t\tax[j,5].plot(estimatedEmg[muscle]['Mn'][startPlot:stopPlot],color=colors[i])\n\t\t\tax[j,5].fill_between(reducedSamples, -50, 50, where=reducedStance, facecolor='#b0abab', alpha=0.25)\n\n\tfor j,muscle in enumerate(muscles):\n\t\tax[j,0].set_ylim([0,200])\n\t\tax[j,0].set_title(\"Ia fibers firing rate - \"+muscle)\n\t\tax[j,0].set_xlabel(\"Time (ms)\")\n\t\tax[j,0].set_ylabel(\"Firing rate (Imp/s)\")\n\t\tax[j,1].set_ylim([0,250])\n\t\tax[j,1].set_title(\"IaInt firing rate - \"+muscle)\n\t\tax[j,1].set_xlabel(\"Time (ms)\")\n\t\tax[j,1].set_ylabel(\"Firing rate (Imp/s)\")\n\t\tax[j,2].set_ylim([0,250])\n\t\tax[j,2].set_title(\"Mean IaInr Fr while active\")\n\t\tax[j,2].set_xlabel(\"Stimulation amplitude (uA)\")\n\t\tax[j,2].set_ylabel(\"Firing rate (Imp/s)\")\n\t\tax[j,3].set_ylim([0,40])\n\t\tax[j,3].set_title(\"Mn firing rate - \"+muscle)\n\t\tax[j,3].set_xlabel(\"Time (ms)\")\n\t\tax[j,3].set_ylabel(\"Firing rate (Imp/s)\")\n\t\tax[j,4].set_ylim([0,40])\n\t\tax[j,4].set_title(\"Mean Mn Fr while active\")\n\t\tax[j,4].set_xlabel(\"Stimulation amplitude (uA)\")\n\t\tax[j,4].set_ylabel(\"Firing rate (Imp/s)\")\n\t\tax[j,5].set_ylim([-50,50])\n\t\tax[j,5].set_title(\"EMG - \"+muscle)\n\t\tax[j,5].set_xlabel(\"Time (ms)\")\n\t\tax[j,5].set_ylabel(\"Emg amplitude (a.u.)\")\n\tplt.savefig(pathToResults+figName, format=\"pdf\",transparent=True)\n\n\n\n\n\n\t# FIgure 2-7 plot\n\tif species == \"rat\":\n\t\tstartGaitCycleN = 3\n\t\tnCycles = 1\n\telif species == \"human\":\n\t\tstartGaitCycleN = 3\n\t\tnCycles = 1\n\n\t#if not phasicStim:\n\tfigName = time.strftime(\"/%Y_%m_%d_Tonic_FFS_species_\"+toAddname+inputFileName+species+\"_muscles_\"+\"\".join(muscles)+\"_delay_\"+str(delay)+\"_amp_\"+str(eesAmplitudeName)+\"_afferentStats.pdf\")\n\t#else: figName = time.strftime(\"/%Y_%m_%d_Phasic_FFS_species_\"+toAddname+inputFileName+species+\"_muscles_\"+\"\".join(muscles)+\"_delay_\"+str(delay)+\"_amp_\"+str(eesAmplitudeName)+\"_afferentStats.pdf\")\n\tfig, ax = plt.subplots(2, 4,figsize=(16,9))\n\tcmap = plt.get_cmap('winter')\n\tcolors = cmap(np.linspace(0.1,0.9,len(eesFrequencies)))\n\tbar_width = 5\n\n\tmeanPerEraserApIaf = []\n\toffsetMeanFr = 0\n\toffsetMeanModDepth = 0\n\n\tfor i,eesFrequency in enumerate(eesFrequencies):\n\t\t#if not phasicStim:\n\t\tname = \"FS_EES_230uA_\"+str(eesFrequency)+\"Hz_Delay_2ms_Tonic_FFS_Control_freq_\"+str(eesFrequency)\n\t\t#name = \"Tonic_FFS_species_\"+toAddname+inputFileName+species+\"_muscles_\"+\"\".join(muscles)+\"_delay_\"+str(delay)+\"_amp_\"+str(eesAmplitudeName)+\"_freq_\"+str(eesFrequency)\n\t\t#else: name = \"Phasic_\"+emgVsKinMod+\"_FFS_species_\"+toAddname+inputFileName+species+\"_muscles_\"+\"\".join(muscles)+\"_delay_\"+str(delay)+\"_amp_\"+str(eesAmplitudeName)+\"_freq_\"+str(eesFrequency)\n\t\tif species == \"human\":name += hp.get_dataset()\n\n\t\tresultFile = gt.find(\"*\"+name+\".p\",pathToResults)\n\t\tif len(resultFile)>1: print \"Warning: multiple result files found!!!\"\n\t\twith open(resultFile[0], 'r') as pickle_file:\n\t\t\testimatedEmg = pickle.load(pickle_file)\n\t\t\tmeanFr = pickle.load(pickle_file)\n\t\t\tmeanPerEraserApIaf.append(pickle.load(pickle_file))\n\n\t\t# compute stats\n\t\tiaModDepth = {}\n\t\tiaMeanFr={}\n\t\tfor muscle in muscles:\n\t\t\tiaModDepth[muscle]=[]\n\t\t\tiaMeanFr[muscle]=[]\n\t\tfor j in xrange(len(heelStrikeSamples)-1):\n\t\t\tif heelStrikeSamples[j+1]>nSamples-50: break\n\t\t\tif heelStrikeSamples[j]<50:continue # to skip artefacts\n\t\t\tfor muscle in muscles:\n\t\t\t\tiaModDepth[muscle].append(\\\n\t\t\t\t\tmeanFr[muscle]['Iaf'][heelStrikeSamples[j]:heelStrikeSamples[j+1]].max()-meanFr[muscle]['Iaf'][heelStrikeSamples[j]:heelStrikeSamples[j+1]].min())\n\t\t\t\tiaMeanFr[muscle].append(\\\n\t\t\t\t\tmeanFr[muscle]['Iaf'][heelStrikeSamples[j]:heelStrikeSamples[j+1]].mean())\n\t\tiaModDepthStats = {}\n\t\tiaMeanFrStats = {}\n\t\tfor muscle in muscles:\n\t\t\tiaModDepthStats[muscle] = {\"mean\":np.mean(iaModDepth[muscle]),\n\t\t\t\t\"sem\":np.std(iaModDepth[muscle])/(np.sqrt(len(iaModDepth[muscle])-1))}\n\t\t\tiaMeanFrStats[muscle] = {\"mean\":np.mean(iaMeanFr[muscle]),\n\t\t\t\t\"sem\":np.std(iaMeanFr[muscle])/(np.sqrt(len(iaMeanFr[muscle])-1))}\n\n\t\t# get gait cycles to plot\n\t\tif not 'startPlot' in locals():\n\t\t\tstartPlot = heelStrikeSamples[startGaitCycleN-1]\n\t\t\tstopPlot = heelStrikeSamples[startGaitCycleN+nCycles-1]\n\t\t\tif stopPlot>nSamples: stopPlot=nSamples\n\t\t\treducedSamples = range(stopPlot-startPlot)\n\t\t\treducedStance = stance[startPlot:stopPlot]\n\n\t\tfor j,muscle in enumerate(muscles):\n\n\t\t\tax[j,0].plot(meanFr[muscle]['Iaf'][startPlot:stopPlot],color=colors[i])\n\t\t\tax[j,0].fill_between(reducedSamples, 0, 125, where=reducedStance, facecolor='#b0abab', alpha=0.25)\n\t\t\tax[j,1].bar(eesFrequency,iaMeanFrStats[muscle][\"mean\"],bar_width,yerr=iaMeanFrStats[muscle][\"sem\"],\\\n\t\t\t\tcolor=colors[i],error_kw=errParams)\n\t\t\txValsScatter = np.linspace(0,bar_width*0.9,len(iaMeanFr[muscle]))+eesFrequency-bar_width*0.45\n\t\t\tax[j,1].scatter(xValsScatter,iaMeanFr[muscle], marker='o',edgecolor='black', linewidth='0.1', color=\"#dddde3\", s=7, zorder=3, alpha=0.7)\n\n\t\t\tax[j,2].bar(eesFrequency,iaModDepthStats[muscle][\"mean\"],bar_width,yerr=iaModDepthStats[muscle][\"sem\"],\\\n\t\t\t\tcolor=colors[i],error_kw=errParams)\n\t\t\tax[j,2].scatter(xValsScatter,iaModDepth[muscle], marker='o',edgecolor='black', linewidth='0.1', color=\"#dddde3\", s=7, zorder=3, alpha=0.7)\n\t\t\tax[j,3].bar(eesFrequency,meanPerEraserApIaf[-1],5,color=colors[i])\n\n\t\t\tax[j,0].set_ylim([0,125])\n\t\t\tax[j,0].set_title(\"Ia fibers firing rate - \"+muscle)\n\t\t\tax[j,0].set_xlabel(\"Time (ms)\")\n\t\t\tax[j,0].set_ylabel(\"Firing rate (Imp/s)\")\n\t\t\tax[j,1].set_ylim([0,125])\n\t\t\tax[j,1].set_title(\"Mean Ia firing rate \")\n\t\t\tax[j,1].set_xlabel(\"Stimulation amplitude (uA)\")\n\t\t\tax[j,1].set_ylabel(\"(imp/s)\")\n\t\t\tax[j,2].set_ylim([0,80])\n\t\t\tax[j,2].set_title(\"modulation depth\")\n\t\t\tax[j,2].set_xlabel(\"Stimulation amplitude (uA)\")\n\t\t\tax[j,2].set_ylabel(\"(imp/s)\")\n\t\t\tax[j,3].set_ylim([0,100])\n\t\t\tax[j,3].set_title(\"Percentage erased APs\")\n\t\t\tax[j,3].set_xlabel(\"Stimulation frequency (Hz)\")\n\t\t\tax[j,3].set_ylabel(\"Percentage\")\n\tplt.savefig(pathToResults+figName, format=\"pdf\",transparent=True)", "def main(mzml_file):\n run = pymzml.run.Reader(mzml_file)\n print(\n \"\"\"\nSummary for mzML file:\n {file_name}\nRun was measured on {start_time} using obo version {obo_version}\nFile contains {spectrum_count} spectra\n \"\"\".format(\n **run.info\n )\n )", "def main(um_file, ptl_file, wl_min_r=0.08, wl_max_r=50.0, wl_n_bins=22, verbose=True):\n # Read in the UM mock catalog\n um_mock = Table(np.load(um_file))\n if verbose:\n print(\"# Load in UM mock catalog: {}\".format(um_file))\n print(\"# Dealing with {} galaxies\".format(len(um_mock)))\n # Read in the particle table\n sim_particles = Table(np.load(ptl_file))\n if verbose:\n print(\"# Load in particle table: {}\".format(ptl_file))\n print(\"# Dealing with {} particles\".format(len(sim_particles)))\n\n # Output file name\n um_pre, _ = os.path.splitext(um_file)\n ptl_pre, _ = os.path.splitext(ptl_file)\n n_ptl = ptl_pre.split('_')[-1]\n precompute_out = \"{}_{}_r_{:4.2f}_{:4.1f}_{:2d}bins.npy\".format(\n um_pre, n_ptl, wl_min_r, wl_max_r, wl_n_bins\n )\n if verbose:\n print(\"# Output file name : {}\".format(precompute_out))\n\n # Run precompute\n if 'smdpl' in ptl_file:\n mass_encl = vagc.precompute_wl_smdpl(\n um_mock, sim_particles, wl_min_r=wl_min_r, wl_max_r=wl_max_r,\n wl_n_bins=wl_n_bins)\n elif 'mdpl2' in ptl_file:\n mass_encl = vagc.precompute_wl_mdpl2(\n um_mock, sim_particles, wl_min_r=wl_min_r, wl_max_r=wl_max_r,\n wl_n_bins=wl_n_bins)\n else:\n raise NameError(\"# Wrong simulation: [smdpl/mdpl2]\")\n\n np.save(precompute_out, mass_encl)", "def main(infile):\n for i, frame in enumerate(read_lammpstrj(infile)):\n data = frame_to_dict(frame)\n print(i, data['timestep'], data['number of atoms'])", "def simul_and_export(file, config, i):\n\n simulate_UVSPEC(file, config)\n\n load_skymap(config)\n\n sim = files_sim(config)[i]\n export_sim_rad(sim, config)", "def main():\n\n\t# Parse the file\n\tmem_file = advanced_analysis('../data_1/mempages.dat.out')", "def readMAFOTLaminarOutput(self,PFC,file):\n print('Reading laminar output')\n log.info('Reading laminar output')\n data = np.genfromtxt(file,comments='#')\n use = np.where(PFC.shadowed_mask != 1)[0]\n xyz = PFC.centers[use]\n r,z,phi = tools.xyz2cyl(xyz[:,0],xyz[:,1],xyz[:,2])\n r = np.round(r,10) #number of decimal places in MAFOT output file\n\n #Sometimes the MAFOT calculation returns an error and it discards the\n #launch point instead of writing to the output file. This results\n #in less points coming out of MAFOT than we put in. So we amend this\n #by checking to make sure the points coming out are assigned to the correct\n #location in the mesh centers.\n #Could be parallelized in future\n\n #Create shared variables so that all multiprocessing cores can access\n #same variables without needless data transmission\n tools.lamData = data\n tools.lamR = r\n\n #Prepare intersectionTest across multiple cores\n t0 = time.time()\n Ncores = multiprocessing.cpu_count() -2 #reserve 2 cores for overhead\n print('Initializing parallel MAFOT laminar check across {:d} cores'.format(Ncores))\n log.info('Initializing parallel MAFOT laminar check across {:d} cores'.format(Ncores))\n print('Spawning tasks to workers')\n log.info('Spawning tasks to workers')\n pool = multiprocessing.Pool(Ncores)\n indexes = np.asarray(pool.map(tools.readLaminarParallel, np.arange(len(r))))\n pool.close()\n\n PFC.psimin = data[indexes,4]\n PFC.conLength = data[indexes,3]\n\n #R = data[:,0]\n #Z = data[:,1]\n #phi = np.radians(data[:,9])\n #x,y,z = tools.cyl2xyz(R,Z,phi)\n #PFC.centersLam = np.concatenate((x,y,z)).reshape((-1, 3), order='F')\n print('Laminar output read')\n print('Requested {:d} traces from MAFOT'.format(len(r)))\n print('MAFOT returned {:d} traces'.format(len(data[:,0])))\n log.info('Laminar output read')\n log.info('Requested {:d} traces from MAFOT'.format(len(r)))\n log.info('MAFOT returned {:d} traces'.format(len(data[:,0])))\n\n return", "def readOutputfile(filename, verbose=False):\n\n # -----------------------------------------------------------------------------\n # Defining the classes for data structure\n T_Simulation = namedtuple('Simulation', ['step'])\n T_Step = namedtuple('Step', ['element', 'node'])\n\n T_Displacement = namedtuple('Displacement', ['ux', 'uy'])\n\n T_Element = namedtuple('Element', ['gp', 'avstrain', 'avstress', 'eqstrain'])\n T_GP = namedtuple('GP', ['stress', 'strain'])\n T_Stresses = namedtuple('Stresses', ['xx', 'yy', 'zz', 'yz', 'zx', 'xy'])\n T_Strains = namedtuple('Strains', ['xx', 'yy', 'zz', 'yz', 'zx', 'xy'])\n # -----------------------------------------------------------------------------\n\n nSteps = 0 # Simulation step counter\n\n SimData = T_Simulation(list())\n\n with open(filename) as f:\n line = f.readline() # Read in the first line of the input file\n while True: # Loop over all lines of the input file\n # Read the nodes displacements\n #line = f.readline()\n #print(line)\n if line == 'DofManager output:\\n': # String starts a list of nodes displacement information\n nSteps += 1 # The above string starts a new simulation step\n line = f.readline() # Cancel ---------- seperator\n line = f.readline()\n Nodes = list() # Initialize/clear list of nodes\n\n while line != '\\n' and line != 'Element output:\\n': # Strings that finish the list\n #\t\t\t\tnNode = int(line.strip().split()[1]) # Node id\n line = f.readline()\n dim1 = float(line.strip().split()[3]) # Displacement dim1\n line = f.readline()\n dim2 = float(line.strip().split()[3]) # Displacement dim2\n Nodes.append(\n T_Displacement(dim1, dim2)) # Append displacements of the current node to the node list\n line = f.readline()\n\n\n if verbose:\n print('Step {}: Dofs completed.\\n'.format(nSteps))\n print('---------------------------------\\n')\n\n # Read the stresses an strains at Gauss points\n elif line == 'Element output:\\n': # String starts a list elements, GPs, strains and stresses\n line = f.readline() # Cancel ---------- seperator\n line = f.readline()\n Elements = list() # Initialize/clear list of elements\n\n while line != '\\n' and line != '\\tR E A C T I O N S O U T P U T:\\n': # Strings that finish the list\n #\t\t\t\t\tnElement = line.strip().split()[2] # Element id\n line = f.readline()\n GPs = T_Element(list(), 0, 0, 0) # List of Gauss points\n\n while line != '\\n' and line.strip().split()[0] == 'GP': # String that starts a new GP\n #\t\t\t\t\t\tnGP = int(line.strip().split()[1].split('.')[1]) # GP id\n tmp = [float(i) for i in line.strip().split()[4:10]] # Read the strains\n strain = T_Strains(tmp[0], tmp[1], tmp[2], tmp[3], tmp[4], tmp[5])\n line = f.readline()\n tmp = [float(i) for i in line.strip().split()[1:7]] # Read the stresses\n stress = T_Stresses(tmp[0], tmp[1], tmp[2], tmp[3], tmp[4], tmp[5])\n GPs.gp.append(\n T_GP(stress, strain)) # Append stresses and strains of the current GP to the GP list\n line = f.readline()\n\n\n Elements.append(GPs) # Append GP list of the current element to the element list\n\n if verbose:\n print('Step {}: GPs completed.\\n'.format(nSteps))\n print('---------------------------------\\n')\n\n SimData.step.append(T_Step(Elements, Nodes)) # Append element and node list of the current step to the step list\n #print('the file input ends')\n #print(nSteps)\n # only needed with a while loop\n # Jump over the lines until we reach the next time step (Caught by if-clause)\n try:\n line = f.readline() # Will generate an error if files end is reached\n if line == \"\":\n raise EOFError\n except:\n if verbose: print(\"End of file reached.\\n\")\n break # Break the 'while True' loop\n\n # -----------------------------------------------------------------------------\n\n\n print('averaging the stress')\n # Averaging of strains and stress of GPs of each element\n for istep in range(len(SimData.step)):\n\n for ielement in range(len(SimData.step[istep].element)):\n print(len)\n # Initialization before each element\n stresses = np.array([0., 0., 0., 0., 0., 0.])\n strains = np.array([0., 0., 0., 0., 0., 0.])\n\n for igp in range(len(SimData.step[istep].element[ielement])):\n print(igp)\n # Add up all data of all GPs\n #stresses[:] += SimData.step[istep].element[ielement].gp[igp].stress[:]\n strains[:] += SimData.step[istep].element[ielement].gp[igp].strain[:]\n\n # Divide GP sum by number of GPs\n stresses /= len(SimData.step[istep].element[ielement])\n strains /= len(SimData.step[istep].element[ielement])\n # Replace the field (initialized with 0) with new information\n SimData.step[istep].element[ielement] = SimData.step[istep].element[ielement]._replace(\n avstress=T_Stresses(stresses[0], stresses[1], stresses[2], stresses[3], stresses[4], stresses[5]))\n SimData.step[istep].element[ielement] = SimData.step[istep].element[ielement]._replace(\n avstrain=T_Strains(strains[0], strains[1], strains[2], strains[3], strains[4], strains[5]))\n print('Analysis finished')\n return SimData", "def run(self) :\n# print \"evaluating with laban\"\n # currently, labanx reads from a preset file\n os.system('labanx '+str(self.rank)+\" \"+self.input+\" \"+self.output)", "def read_file_sim(file_name, tree):\n try:\n file = open(file_name, \"r\")\n except FileNotFoundError:\n print(\"File not found\")\n raise FileNotFoundError(\"File not found, Created by @Edd1e234\")\n print(\"Printing Words along with their similarities.\\n\")\n for line in file:\n words = line.split(\" \")\n value = sim(words[0], words[1][:len(words[1]) - 1], tree)\n print(words[0], words[1][:len(words[1]) - 1], value)\n print(\"\\nFinished Part 2.\")\n file.close()", "def __main__():\n try:\n gff_file = sys.argv[1]\n mat_file = sys.argv[2]\n except:\n print __doc__\n sys.exit(-1)\n\n genes, transcripts, exons, utr3, utr5, cds = GFFParse(gff_file) \n gene_models = CreateGeneModels(genes, transcripts, exons, utr3, utr5, cds)\n # TODO Write to matlab/octave struct instead of cell arrays.\n sio.savemat(mat_file, \n mdict=dict(genes=gene_models), \n format='5', \n oned_as='row')", "def main():\n parser = argparse.ArgumentParser(description=\"Align ORB-SLAM results with ground truth according to camera orientation in AirSim.\")\n parser.add_argument(\"filename\", help = \"Trajectory in TUM format.\")\n parser.add_argument(\"output\", help = \"Output file.\")\n \n parser.add_argument(\"roll\", help=\"Camera Roll.\")\n parser.add_argument(\"pitch\", help=\"Camera Pitch.\")\n parser.add_argument(\"yaw\", help=\"Camera Yaw.\")\n\n args = parser.parse_args()\n\n roll = float(args.roll)*m.pi/180\n pitch = float(args.pitch)*m.pi/180\n yaw = float(args.yaw)*m.pi/180\n\n file = open(args.filename, \"r\")\n newFile = open(args.output, \"w\")\n \n for line in file:\n values = line.split()\n x = float(values[3])\n y = float(values[1])\n z = float(values[2])\n position = np.array([[x],[y],[z]])\n position = Rx(roll) @ Ry(pitch) @ Rz(yaw) @ position\n\n newFile.write(\"%s %s %s %s %s %s %s %s\\n\" %(values[0], position[0,0], position[1,0], position[2,0], values[4], values[5], values[6], values[7]))\n\n file.close\n newFile.close\n print(\"Saved as \" + args.output)\n\n return", "def read_simulation_results(self,\n fname_sims, \n fname_pareto = None, \n fname_cull = None):\n\n \n self.fname_sims = fname_sims\n self.__read_file(fname_sims, 'sim_results')\n \n if fname_pareto is not None:\n self.fname_pareto = fname_pareto\n self.__read_file(fname_in, 'pareto')\n \n if fname_cull is not None:\n self.fname_cull = fname_cull\n self.__read_file(fname_in, 'cull')", "def main(argv):\n parser = argparse.ArgumentParser()\n \n parser.add_argument(\"sim_name\", type=str, help=\"name of simulation folder\")\n args = parser.parse_args() \n \n sim_path = '/'.join([current_dir, args.sim_name]) \n \n sim_input(sim_path) # write the fortran input files\n runmodel(sim_path) # compile and run fortran code\n sim_read(sim_path)", "def read_input():\n \n argv = sys.argv\n\n # Read file names from sd input\n f_dy = argv[1] # matdyn.modes\n f_pat = argv[2] # path.out (should be in crystal coords)\n f_ph = argv[3] # ph.x output (Gamma point)\n\n # Read input card\n f_inp = open(\"input.dat\",'r')\n l1 = f_inp.readline()\n l2 = f_inp.readline()\n l3 = f_inp.readline().split()\n f_inp.close()\n\n # Open files\n\n f = open(f_dy,'r') # matdyn.modes \n f_dyn = f.readlines()\n f.close()\n\n f = open(f_pat,'r') # path.out\n f_path = f.readlines()\n f.close()\n\n f = open(f_ph,'r') # ph.x output\n f_zs = f.readlines()\n f.close()\n\n # Assign values to a0, nat, M, nqp\n a0, vol = float(l1.split()[0]), float(l1.split()[1])\n nat = int(l2) \n mass = np.zeros(nat)\n for iat in range(nat):\n mass[iat] = float(l3[iat])\n\n # Assign values to G (reciprocal lattice vec)\n ig = 0 ; i = 0\n for line in f_zs:\n if \"reciprocal axes:\" in line:\n ig = i + 1 \n break\n i += 1 \n\n rG = np.zeros((3,3))\n for ic in range(3):\n rGtext = f_zs[ig+ic][23:48].split()\n rG[ic,:] = np.array([float(rGtext[0]), float(rGtext[1]), float(rGtext[2])])\n\n # Read Z* tensor from f_zs\n i = 0\n iz = 0\n zstart = []\n for line in f_zs:\n if \"(d P / du)\" in line:\n iz = i + 3\n if \"Px\" in line:\n zstart.append(i)\n\n i += 1\n\n # Read the dielectric tensor from f_zs\n i = 0\n ie = 0\n for line in f_zs:\n if \"Dielectric constant in cartesian axis\" in line:\n ie = i + 2\n break\n\n i += 1\n\n # Assign Z* values\n zs = np.zeros((nat,3,3)) # initialize Z*\n\n for iat in range(nat):\n for ic in range(3):\n ztext = f_zs[zstart[iat]+ic][19:56].split()\n for jc in range(3):\n zs[iat][ic][jc] = float(ztext[jc])\n\n # Assing the dielectric tensor\n eps = np.zeros((3,3))\n\n for ic in range(3):\n epstext = f_zs[ie+ic][16:66].split()\n for jc in range(3):\n eps[ic][jc] = float(epstext[jc])\n\n # Number of modes and q-points\n nmodes = 3 * nat\n nqpt = int(f_path[0].split()[0])\n\n # Read the q-points\n q = np.zeros((nqpt,4)) # 4th dimension is lenght for q-points on a line, weights for q-points on a grid \n for iq in range(1,nqpt+1):\n q[iq-1,] = np.array([float(f_path[iq].split()[0]),float(f_path[iq].split()[1]), \\\n float(f_path[iq].split()[2]),float(f_path[iq].split()[3])])\n\n # Read the eigenvalues(om) and eigenvectors(eig) \n # Initiate first\n om = np.zeros((nmodes,nqpt))\n eig = np.zeros((nmodes,nqpt,nat,3), dtype=complex) \n\n # Get the starting lines for each q-pt\n i = 0\n i_q = []\n for line in f_dyn:\n if \"q =\" in line:\n i_q.append(i+2)\n i += 1\n\n #Assign values to om and eig\n for iq in range(nqpt):\n for imod in range(nmodes):\n omtext = f_dyn[i_q[iq]+imod*(nat+1)][43:55]\n om[imod][iq] = float(omtext)\n for iat in range(nat):\n etext = f_dyn[i_q[iq]+imod*(nat+1)+iat+1][2:72].split()\n for ic in range(3):\n eig.real[imod][iq][iat][ic]=float(etext[2*ic])*np.sqrt(mass[iat])\n eig.imag[imod][iq][iat][ic]=float(etext[2*ic+1])*np.sqrt(mass[iat])\n\n #Normalize the eigenvectors\n t1 = eig[imod,iq,:,:]\n t_nu = np.sum(np.sum(np.conjugate(t1)*t1,axis=0))\n eig[imod,iq,:,:] = eig[imod,iq,:,:]/np.sqrt(np.abs(t_nu))\n\n # Check normalization\n delta = np.zeros((nmodes,nmodes), dtype=complex)\n for iat in range(nat):\n for ic in range(3):\n t2 = eig[:,iq,iat,ic]\n delta += np.outer(np.conjugate(t2),t2)\n\n unit = np.diag(np.diag(np.ones((nmodes,nmodes)))) # Unit vector\n test = np.abs( (delta-unit) )\n if ( np.max(test) > 1e-3):\n print \"Non-orthonormal eigenvector at iq=\", q[iq,:]\n\n return om, eig, q, zs, eps, mass, a0, vol, rG, nmodes, nqpt, nat", "def main():\n\n\t# eesAmplitudes = range(200,321,10)\n\teesAmplitudes = [\"%\"+\"%.2f_0_0\"%(i) for i in np.arange(0,1.01,.05)]\n\t# eesFrequencies = range(10,1001,20)\n\teesFrequencies = np.logspace(1,3,50)\n\t# nrnStructureFile = \"fsSFrFfMnArtMod.txt\"\n\t# nrnStructureFile = \"fsSFrFfMnArtModHuman.txt\"\n\tnrnStructureFile = \"fsMnArtModHuman.txt\"\n\t# name = \"FreqAmpModHuman_0367S\"\n\tname = \"FreqAmpModHuman_ArtmodHuman_10msBurst\"\n\n\tnSim = len(eesFrequencies)*len(eesAmplitudes)\n\tcount=0.\n\tpercLastPrint=0.\n\tprintPeriod = 0.05\n\t# simTime = 250\n\tsimTime = 15\n\tspecies = \"human\"\n\n\tfor eesAmplitude in eesAmplitudes:\n\t\tfor eesFrequency in eesFrequencies:\n\t\t\tfilName = name+\"_amp_\"+str(eesAmplitude)+\"_freq_\"+str(eesFrequency)\n\t\t\tresultFile = gt.find(\"*\"+filName+\".p\",pathToResults)\n\t\t\tif not resultFile:\n\t\t\t\treturnCode = None\n\t\t\t\twhile not returnCode==0:\n\t\t\t\t\tprogram = ['python','scripts/computeAfferentsEfferentsModulation.py',\n\t\t\t\t\t\tstr(eesFrequency),str(eesAmplitude),species,nrnStructureFile,name,\"--simTime\",str(simTime)]\n\t\t\t\t\tprint \" \".join(program)\n\t\t\t\t\tforwardSimulation = subprocess.Popen(program, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\t\t\t\t\treturnCode = None\n\t\t\t\t\twhile returnCode is None:\n\t\t\t\t\t\tmessage = forwardSimulation.stdout.readline().rstrip(\"\\n\").split()\n\t\t\t\t\t\tif message != None:print \"\\t\\t\"+\" \".join(message)+\"\\t\\t\"\n\t\t\t\t\t\treturnCode = forwardSimulation.poll()\n\t\t\t\t\tif returnCode != 0: print \"\\t\\t\\t\\t Error n: \",forwardSimulation.poll(),\" resetting simulation...\"\n\t\t\tcount+=1\n\t\t\tif count/nSim-percLastPrint>=printPeriod:\n\t\t\t\tpercLastPrint=count/nSim\n\t\t\t\tprint str(round(count/nSim*100))+\"% of simulations performed...\"\n\tplot_stats(eesAmplitudes,eesFrequencies,simTime,name)", "def run_from_file(f):\n #set defaults\n x_loops=1;max_steps=0;display_on=True;max_fps=10;garden_size=13;tako_number=20\n pop_max=40;max_width=1800;max_height=900;collect_data=True;export_all=False\n rand_nets=False;max_gen=0;genetic_mode=\"Plain\";learning_on=False\n seeds=None;garden_mode=\"Diverse Static\";family_detection=None;family_mod=0\n record_inbreeding=True;inbreed_lim=1.1;filename=\"default file\"\n hla_genes=0;binary_health=0;carrier_percentage=40;two_envs=False\n diff_envs=False;migration_rate=0;phen_pref=False\n\n \n atr_dict = {\"x_loops\": x_loops, \"max_steps\": max_steps,\n \"display_on\": display_on, \"max_fps\": max_fps,\n \"garden_size\": garden_size,\n \"tako_number\": tako_number, \"pop_max\": pop_max,\n \"max_width\": max_width, \"max_height\": max_height,\n \"collect_data\": collect_data, \"export_all\": export_all,\n \"rand_nets\": rand_nets, \"max_gen\": max_gen,\n \"genetic_mode\": genetic_mode, \"learning_on\": learning_on,\n \"seeds\": seeds, \"garden_mode\": garden_mode,\n \"family_detection\": family_detection, \"family_mod\": family_mod,\n \"record_inbreeding\": record_inbreeding,\n \"inbreed_lim\": inbreed_lim, \"filename\": filename,\n \"hla_genes\": hla_genes, \"binary_health\": binary_health,\n \"carrier_percentage\": carrier_percentage,\n \"two_envs\": two_envs, \"diff_envs\": diff_envs,\n \"migration_rate\": migration_rate, \"phen_pref\": phen_pref}\n \n ints = [\"x_loops\", \"max_steps\", \"garden_size\", \"tako_number\", \"pop_max\",\n \"max_width\", \"max_height\", \"max_gen\", \"hla_genes\",\n \"binary_health\", \"carrier_percentage\", \"max_fps\"]\n floats = [\"family_mod\", \"inbreed_lim\", \"migration_rate\"]\n strs = [\"genetic_mode\", \"garden_mode\", \"filename\"]\n bools = [\"display_on\", \"collect_data\", \"export_all\", \"rand_nets\",\n \"learning_on\", \"record_inbreeding\", \"two_envs\", \"diff_envs\",\n \"phen_pref\"]\n\n #then sets all user-defined settings from the file f\n with open(f) as exp_file:\n for line in exp_file:\n #comments\n if line[0] == \"#\":\n pass\n #blank line = run what we have, then continue\n #to read the file for a new set of parameters\n elif line == \"\\n\":\n run_experiment(atr_dict[\"x_loops\"], atr_dict[\"max_steps\"],\n atr_dict[\"display_on\"], atr_dict[\"max_fps\"],\n atr_dict[\"garden_size\"],\n atr_dict[\"tako_number\"], atr_dict[\"pop_max\"],\n atr_dict[\"max_width\"], atr_dict[\"max_height\"],\n atr_dict[\"collect_data\"], atr_dict[\"export_all\"],\n atr_dict[\"rand_nets\"], atr_dict[\"max_gen\"],\n atr_dict[\"genetic_mode\"],\n atr_dict[\"learning_on\"],\n atr_dict[\"seeds\"], atr_dict[\"garden_mode\"],\n atr_dict[\"family_detection\"],\n atr_dict[\"family_mod\"],\n atr_dict[\"record_inbreeding\"],\n atr_dict[\"inbreed_lim\"],\n atr_dict[\"hla_genes\"], atr_dict[\"binary_health\"],\n atr_dict[\"carrier_percentage\"],\n atr_dict[\"filename\"],\n atr_dict[\"two_envs\"],\n atr_dict[\"diff_envs\"],\n atr_dict[\"migration_rate\"],\n atr_dict[\"phen_pref\"])\n #reset defaults\n atr_dict = {\"x_loops\": x_loops, \"max_steps\": max_steps,\n \"display_on\": display_on, \"max_fps\": max_fps,\n \"garden_size\": garden_size,\n \"tako_number\": tako_number, \"pop_max\": pop_max,\n \"max_width\": max_width, \"max_height\": max_height,\n \"collect_data\": collect_data, \"export_all\": export_all,\n \"rand_nets\": rand_nets, \"max_gen\": max_gen,\n \"genetic_mode\": genetic_mode, \"learning_on\": learning_on,\n \"seeds\": seeds, \"garden_mode\": garden_mode,\n \"family_detection\": family_detection,\n \"family_mod\": family_mod,\n \"record_inbreeding\": record_inbreeding,\n \"inbreed_lim\": inbreed_lim, \"filename\": filename,\n \"hla_genes\": hla_genes, \"binary_health\": binary_health,\n \"carrier_percentage\": carrier_percentage,\n \"two_envs\": two_envs, \"diff_envs\": diff_envs,\n \"migration_rate\": migration_rate, \"phen_pref\": phen_pref}\n else:\n #get rid of newline character\n line = line[:-1]\n line = line.split(\": \")\n if line[0] in ints:\n val = int(line[1])\n elif line[0] in floats:\n val = float(line[1])\n elif line[0] in bools:\n val = True if line[1] == \"True\" else False\n elif line[0] in strs:\n val = line[1]\n elif line[0] == \"family_detection\":\n if line[1] == \"None\":\n val = None\n else:\n val = line[1]\n elif line[0] == \"seeds\":\n val = line[1].split(\" \")\n atr_dict[line[0]] = val\n #run the last one in the file\n run_experiment(atr_dict[\"x_loops\"], atr_dict[\"max_steps\"],\n atr_dict[\"display_on\"], atr_dict[\"max_fps\"],\n atr_dict[\"garden_size\"],\n atr_dict[\"tako_number\"], atr_dict[\"pop_max\"],\n atr_dict[\"max_width\"], atr_dict[\"max_height\"],\n atr_dict[\"collect_data\"], atr_dict[\"export_all\"],\n atr_dict[\"rand_nets\"], atr_dict[\"max_gen\"],\n atr_dict[\"genetic_mode\"],\n atr_dict[\"learning_on\"],\n atr_dict[\"seeds\"], atr_dict[\"garden_mode\"],\n atr_dict[\"family_detection\"],\n atr_dict[\"family_mod\"],\n atr_dict[\"record_inbreeding\"],\n atr_dict[\"inbreed_lim\"], atr_dict[\"hla_genes\"],\n atr_dict[\"binary_health\"], atr_dict[\"carrier_percentage\"],\n atr_dict[\"two_envs\"], atr_dict[\"diff_envs\"],\n atr_dict[\"migration_rate\"], atr_dict[\"phen_pref\"],\n atr_dict[\"filename\"])", "def simulate(): \n \n # Create tmpdir to hold all steerfiles and log files \n SimObj = Simulation(steerfiles=steerfiles, name=os.path.splitext(os.path.basename(rawfile_alu))[0] + '-sim' )\n\n # Set Beam energy\n SimObj.set_beam_momentum(beamenergy)\n\n # Create steerfiles for processing\n simpath = create_sim_path_air(SimObj)\n\n # Get gearfile\n localgearfile = SimObj.get_filename('gear.xml')\n\n # Misalign gear file\n randomize_telescope(gearfile=localgearfile, mean_list=mean_list, sigma_list=sigma_list, sensorexception_list=sensorexception_list, modeexception_list=modeexception_list)\n\n localtruthdb_filename=SimObj.create_dbfilename(truthdb_filename)\n\n # Convert gear file to alignmentDB root file, which will be stored in the sim folder\n Create_AlignmentDBFile_From_Gear(gearfile=SimObj.get_filename('gear.xml'), truthdbfilename=localtruthdb_filename)\n\n # Copy gearfile\n SimObj.copy_file('gear.xml','gear_air.xml')\n\n # Get air gearfile\n gearfile_air = SimObj.get_filename('gear_air.xml')\n\n # Change DUT in copied gearfile\n set_parameter(gearfile=gearfile_air, sensorID=11, parametername='thickness', value=0.0001)\n set_parameter(gearfile=gearfile_air, sensorID=11, parametername='radLength', value=304000.0)\n\n\n # Create caltag for the truthdb\n localcaltag = os.path.splitext(os.path.basename(rawfile_air))[0] + '-test'\n simcaltag=localcaltag+ '-truthdb'\n\n # Run simulation to create rawfile with simulated digits \n SimObj.simulate(path=simpath,caltag=simcaltag)", "def main(matrix,model,processors,algorithm):\n if algorithm == \"raxml-ng\":\n ab = subprocess.call(['which', 'raxml-ng'])\n if ab == 0:\n pass\n else:\n print(\"RAxML must be in your path as raxml-ng\")\n sys.exit()\n elif algorithm == \"raxml-HPC\":\n ab = subprocess.call(['which', 'raxmlHPC-PTHREADS-SSE3'])\n if ab == 0:\n pass\n else:\n print(\"RAxML must be in your path as raxmlHPC-PTHREADS-SSE3\")\n sys.exit()\n last=get_field_index(matrix)\n matrix_to_fasta(matrix, last)\n #Prep the creation of the FASTA file, removing odd characters\n os.system(\"sed 's/://g' all.fasta | sed 's/,//g' > out.fasta\")\n if model == \"ASC_GTRGAMMA\":\n subprocess.check_call(\"raxmlHPC-SSE3 -f d -p 12345 -m %s -s out.fasta -n nasp --asc-corr=lewis --no-bfgs > /dev/null 2>&1\" % model, stdout=open(os.devnull, 'wb'),stderr=open(os.devnull, 'wb'),shell=True)\n subprocess.check_call(\"raxmlHPC-SSE3 -f e -m %s -s out.fasta -t RAxML_bestTree.nasp -n PARAMS --asc-corr=lewis --no-bfgs > /dev/null 2>&1\" % model, stdout=open(os.devnull, 'wb'),stderr=open(os.devnull, 'wb'),shell=True)\n else:\n if algorithm == \"raxml-HPC\":\n subprocess.check_call(\"raxmlHPC-PTHREADS-SSE3 -T %s -f d -p 12345 -m %s -s out.fasta -n nasp --no-bfgs > /dev/null 2>&1\" % (processors,model), stdout=open(os.devnull, 'wb'),stderr=open(os.devnull, 'wb'),shell=True)\n subprocess.check_call(\"raxmlHPC-PTHREADS-SSE3 -T %s -f e -m %s -s out.fasta -t RAxML_bestTree.nasp -n PARAMS --no-bfgs > /dev/null 2>&1\" % (processors,model), stdout=open(os.devnull, 'wb'),stderr=open(os.devnull, 'wb'),shell=True)\n elif algorithm == \"raxml-ng\":\n subprocess.check_call(\"raxml-ng --msa out.fasta --model GTR+G --threads %s --prefix nasp\" % processors,stdout=open(os.devnull, 'wb'),stderr=open(os.devnull, 'wb'),shell=True)\n if algorithm == \"raxml-HPC\":\n subprocess.check_call(\"mv RAxML_bestTree.nasp nasp_raxml.tree\", shell=True)\n subprocess.check_call(\"mv RAxML_binaryModelParameters.PARAMS nasp.PARAMS\", shell=True)\n subprocess.check_call(\"rm RAxML_* out.fasta all.fasta\", shell=True)\n else:\n subprocess.check_call(\"mv nasp.raxml.bestTree nasp_raxml.tree\", stdout=open(os.devnull, 'wb'),stderr=open(os.devnull, 'wb'),shell=True)\n subprocess.check_call(\"rm nasp.raxml.startTree out.fasta all.fasta\", stdout=open(os.devnull, 'wb'),stderr=open(os.devnull, 'wb'),shell=True)\n print(\"Model used: %s\" % model)", "def main(workdir):\n dir = os.path.expanduser(workdir)\n \n #read the .dat file\n f = open('{}smi.dat'.format(dir))\n par = imp.load_source('par', '', f)\n \n #make a sdf file for visualization\n output = pybel.Outputfile(\"sdf\", dir + \"species.sdf\",overwrite=True)\n for name in par.smiles:\n smi = par.smiles[name]\n obmol = pybel.readstring(\"smi\",smi)\n output.write(obmol)\n output.close()\n \n #list with the jobs that need to be done\n jobs = []\n \n #iterate the input files\n for name in par.smiles:\n #name = input_file.replace('.inp','') #name of the calculation\n test_dir = dir + name #location where the calculations will be done\n if not os.path.exists(test_dir):\n os.mkdir(test_dir)\n \n #copy the input file to the working directory\n write_input_file(par,name,par.smiles[name],test_dir + '/input.inp')\n job = workdir + name + '/'\n jobs.append(job)\n \n run_threads(jobs, 'eric', max_running = 3)", "def mutatraj(self,line):\n line=line.strip().split()\n no=line[1];mutares=line[2]\n typelines=open(self.mainpath+'/A-R.dat','r').readlines() \n mutapath=os.path.join(self.mainpath,no+mutares)\n if not os.path.exists(mutapath):os.makedirs(mutapath)\n rosettapdb=os.path.join(mutapath,'rospdbs')\n rosettamuta=os.path.join(mutapath,'rosmutapdbs')\n amberpdb=os.path.join(mutapath,'ambpdbs')\n minpdb=os.path.join(mutapath,'minpdbs')\n if not os.path.exists(rosettapdb):os.makedirs(rosettapdb)\n if not os.path.exists(rosettamuta):os.makedirs(rosettamuta)\n if not os.path.exists(amberpdb):os.makedirs(amberpdb)\n if not os.path.exists(minpdb):os.makedirs(minpdb)\n res=''\n res+='parm '+self.mainpath+'/'+str(self.top)+'\\n';res+='trajin '+self.mainpath+'/'+self.crd+' 1 '+str(self.mutaframes)+'\\n'\n res+='trajout snap.pdb multi chainid A\\n'\n f=open(mutapath+'/traj.in','w')\n f.write(res)\n f.close()\n os.system('cpptraj < '+no+mutares+'/traj.in')\n res=''\n res+='NATRO'+'\\n';res+='start'+'\\n'\n res+=' '+str(no)+' A '+' PIKAA '+str(mutares)+'\\n'\n resfile=mutapath+'/resfile.in'\n f=open(resfile,'w')\n f.write(res)\n f.close()\n inputpdbs=self._findfile('./','snap.pdb')\n print(inputpdbs)\n for i in range(len(inputpdbs)):\n AtoR(self.mainpath,rosettapdb,inputpdbs[i],typelines)\n inputf=os.path.join(rosettapdb,inputpdbs[i])\n fileno=re.sub(\"\\D\",\"\",inputpdbs[i])\n cmd='fixbb.default.linuxgccrelease -in:file:s '+inputf+' -out:path:all '+rosettamuta+' -resfile '+resfile+' -out:suffix .'+str(fileno)+' -overwrite -ex1:level 2 -ex2:level 2 -ex3:level 0 -ex4:level 0' \n os.system(cmd)\n RtoA(rosettamuta,amberpdb,inputpdbs[i],typelines)\n self.minimize(amberpdb,inputpdbs[i],minpdb)\n self.calint(no,mutares,'wild')\n self.calint(no,mutares,'muta')", "def run():\n motion_extraction()\n file_buff = open(\"right_arm.txt\", \"w\")\n for frame_coord in limb_coords[0]:\n file_buff.write(\"%d %d\\n\" % (frame_coord[1], frame_coord[0]))\n file_buff.close()\n file_buff2 = open(\"left_arm.txt\", \"w\")\n for frame_coord in limb_coords[1]:\n file_buff2.write(\"%d %d\\n\" % (frame_coord[1], frame_coord[0]))\n file_buff2.close()\n file_buff3 = open(\"body.txt\", \"w\")\n for frame_coord in limb_coords[2]:\n file_buff3.write(\"%d %d\\n\" % (frame_coord[1], frame_coord[0]))\n file_buff3.close()\n file_buff4 = open(\"right_leg.txt\", \"w\")\n for frame_coord in limb_coords[3]:\n file_buff4.write(\"%d %d\\n\" % (frame_coord[1], frame_coord[0]))\n file_buff4.close()\n file_buff5 = open(\"left_leg.txt\", \"w\")\n for frame_coord in limb_coords[4]:\n file_buff5.write(\"%d %d\\n\" % (frame_coord[1], frame_coord[0]))\n file_buff5.close()", "def test_random_M(m, k, n, T, fpr, fnr, num_random_matrices, COVID_dir, generate_matrix, print_every=5, verbose=False):\n folder_names = {'generate_const_row_weight': 'const-row-weight', 'generate_doubly_regular': 'doubly-regular'}\n folder_name = folder_names[generate_matrix.__name__]\n\n if not os.path.exists(COVID_dir + \"/tests/results/\"):\n os.makedirs(COVID_dir + \"/tests/results/\")\n if not os.path.exists(COVID_dir + \"/tests/results/%s/\" % folder_name):\n os.makedirs(COVID_dir + \"/tests/results/%s/\" % folder_name)\n\n f = k / n\n test_file = COVID_dir + '/tests/data/x-f-%s-384.csv' % k\n results_async = [None]*num_random_matrices\n\n outfile_name = COVID_dir + \"/tests/results/%s/m%s-k%s-n%s-T%s-numM%s.txt\" % (folder_name, m, k, n, T, num_random_matrices)\n\n multiproc_pool = mp.Pool(mp.cpu_count()) # Or put mp.Pool(3) here if you want to use 3 CPU cores.\n for i in range(num_random_matrices):\n if i % print_every == 0:\n print(\"Starting matrix %s\" % i)\n results_async[i] = multiproc_pool.apply_async(test_random_matrix, args=(T, f, fnr, fpr, generate_matrix, m, n, test_file))\n\n results = [r.get() for r in results_async]\n multiproc_pool.close()\n multiproc_pool.join()\n\n with open(outfile_name, 'w') as outfile:\n json.dump(results, outfile)\n\n if not verbose:\n num_errors = []\n for result in results:\n num_errors.append(result['num_errors'])\n average_errors = np.average(num_errors)\n print(\"======================\")\n print(\"Test result for constant row weight = %s, infection rate %s/%s:\" % (m, k, n))\n print(\"(based on %s membership matrices)\" % num_random_matrices)\n print(\"Average Accuracy: %.2f \" % (1 - average_errors / (n * 100)))\n print(\"======================\")", "def run_script(input_dir, output_dir):\n\n \"\"\"+++++++++++++++++++++++++++++++++++\"\"\"\n \"\"\"Step 1. load dataset \"\"\"\n print(\"loading data ......\")\n print(\"+++++++Read the surface shape data+++++++\")\n shape_file_name = input_dir + \"aligned_shapes.mat\"\n mat = loadmat(shape_file_name)\n y_design = mat['aligned_shape']\n n, l, m = y_design.shape\n print(\"The dimension of shape matrix is \" + str(y_design.shape))\n print(\"+++++++Read the sphere coordinate data+++++++\")\n template_file_name = input_dir + \"template.mat\"\n mat = loadmat(template_file_name)\n coord_mat = mat['template']\n # d = coord_mat.shape[1]\n print(\"+++++++Read the design matrix+++++++\")\n design_data_file_name = input_dir + \"design_data.txt\"\n design_data = np.loadtxt(design_data_file_name)\n # read the covariate type\n var_type_file_name = input_dir + \"var_type.txt\"\n var_type = np.loadtxt(var_type_file_name)\n print(\"+++++++Construct the design matrix: normalization+++++++\")\n x_design = read_x(design_data, var_type)\n p = x_design.shape[1]\n print(\"The dimension of design matrix is \" + str(x_design.shape))\n\n \"\"\"+++++++++++++++++++++++++++++++++++\"\"\"\n \"\"\"Step 2. Statistical analysis: including (1) smoothing and (2) hypothesis testing\"\"\"\n gpvals, lpvals_fdr, clu_pvals, efit_beta, efity_design, efit_eta = mfsda.run_stats(y_design, coord_mat, design_data, var_type)\n\n \"\"\"+++++++++++++++++++++++++++++++++++\"\"\"\n \"\"\"Step3. Save all the results\"\"\"\n gpvals_file_name = output_dir + \"global_pvalue.txt\"\n np.savetxt(gpvals_file_name, gpvals)\n lpvals_fdr_file_name = output_dir + \"local_pvalue_fdr.txt\"\n np.savetxt(lpvals_fdr_file_name, lpvals_fdr)\n clu_pvals_file_name = output_dir + \"cluster_pvalue.txt\"\n np.savetxt(clu_pvals_file_name, clu_pvals)", "def main():\n _input = read_lines_to_list(r'input.txt')\n dimensions = get_dimensions(_input)\n plotter = plot_claims_on_matrix(dimensions)\n answer_part_1 = calculate_claims_within_two_or_more_claims(plotter)\n print(answer_part_1)", "def load_sim(filename):\n return pybamm.load(filename)", "def load_simulation_mass(self, file_name):\n \n data = np.genfromtxt(file_name, names=True)\n \n if not hasattr(self, 'simulation_data'):\n self.simulation_data = {}\n \n \n self.simulation_data['mass'] = data['m'] # assuming in solar masses\n self.simulation_data['time'] = data['t'] # assuming in millions of years", "def main():\n parser = ArgumentParser(description=\"write to a file\")\n\n parser.add_argument(\"-i\",\"--input\", type=setup.is_valid_h5_file, required=True, nargs='+',\n help=\"path(s) of HDF5 master file(s)\")\n\n parser.add_argument(\"-b\",\"--beamcenter\", nargs=2, required=True,\n help=\"beam center in X and Y (two arguments)\")\n\n parser.add_argument(\"-r\",\"--oscillation\", type=float, default=1,\n help=\"oscillation angle per well, default = 1\")\n\n parser.add_argument(\"-d\",\"--distance\", type=float, default=100,\n help=\"detector distance in mm\")\n\n parser.add_argument(\"-w\",\"--wavelength\", type=float, default=1.216,\n help=\"Wavelength in Angstrom, default is 1.216\")\n\n parser.add_argument(\"-f\",\"--framesperdegree\", type=int, default=5,\n help=\"Number of frames per degree, default is 5\")\n\n parser.add_argument(\"-t\",\"--totalframes\", type=int, default=0,\n help=\"Total number of frames to be processed, default all\")\n\n parser.add_argument(\"--output\", default=os.getcwd(),\n help=\"Use this option to change output directory, default pwd\")\n\n parser.add_argument(\"-sg\",\"--spacegroup\", type=int, default=0,\n help=\"Space group\")\n\n parser.add_argument(\"-u\",\"--unitcell\", type=str, default=\"50 50 50 90 90 90\",\n help=\"unit cell\")\n\n argslist = parser.parse_args()\n for masterfile in argslist.input:\n master1= Master(argslist,masterfile)\n master1.printDataWells()", "def change_input(filename, frac_l=0.019994, force=25.0, time=200_000, seed=None):\r\n\r\n frac_sl = round(1.5 /(32**3 *3), 6)\r\n frac_l = round(frac_l -frac_sl, 6)\r\n frac_w = round(1 -frac_l -frac_sl, 6)\r\n \r\n params = {'Box': \"32 32 32\\t1 1 1\", 'RNGSeed': seed if seed is not None else -4073, 'Step': 0.02, 'Time': time, \r\n 'SamplePeriod': 100, 'AnalysisPeriod': 1000, 'DensityPeriod': time, 'DisplayPeriod': time //10, 'RestartPeriod': time,\r\n }\r\n\r\n with open(filename, 'rt') as rf:\r\n with open(filename+'_sim', 'wt') as wf:\r\n\r\n for line in rf:\r\n \r\n if line.startswith('Polymer\\tWater') or line.startswith('Polymer Water'):\r\n line = line.strip().split()\r\n line[2] = f\"{frac_w:.6f}\"\r\n \r\n # Converts list to list[str]\r\n line = list(map(str, line))\r\n wf.write('\\t'.join(line) + '\\n')\r\n\r\n elif line.startswith('Polymer\\tLipid') or line.startswith('Polymer Lipid'):\r\n line = line.strip().split()\r\n line[2] = f\"{frac_l:.6f}\"\r\n\r\n # Converts list to list[str]\r\n line = list(map(str, line))\r\n wf.write('\\t'.join(line) + '\\n')\r\n \r\n elif line.startswith('Polymer\\tSingleLipid') or line.startswith('Polymer SingleLipid'):\r\n line = line.strip().split()\r\n line[2] = f\"{frac_sl:.6f}\"\r\n\r\n # Converts list to list[str]\r\n line = list(map(str, line))\r\n wf.write('\\t'.join(line) + '\\n')\r\n\r\n elif line.startswith('Command\\tConstantForceOnTarget') or line.startswith('Command ConstantForceOnTarget'):\r\n line = line.strip().split()\r\n\r\n if line[3] == \"singleLipidHead\":\r\n line[-1] = f\"{force /3:.6f}\" \r\n\r\n elif line[3] == \"lowerHeads\":\r\n line[-1] = f\"{force /3 /(1636):.6f}\"\r\n\r\n # Converts list to list[str]\r\n line = list(map(str, line))\r\n wf.write('\\t'.join(line) + '\\n')\r\n \r\n # if line.startswith('\tTimes\t0 1000'):\r\n # line = line.strip().split()\r\n # line[2] = f\"{time}\"\r\n \r\n # # Converts list to list[str]\r\n # line = list(map(str, line))\r\n # wf.write('\\t'.join(line) + '\\n') \r\n # line = next(rf) \r\n \r\n # if line.strip().split() and line.strip().split()[0] in params.keys():\r\n # key = line.strip().split()[0]\r\n # wf.write(f\"{key:<12}\\t{str(params[key])}\\n\")\r\n\r\n else:\r\n wf.write(line)" ]
[ "0.63589895", "0.6317133", "0.62950945", "0.6243753", "0.6216487", "0.6118958", "0.60965335", "0.60688287", "0.60328394", "0.5999172", "0.59918565", "0.59618837", "0.5955662", "0.59437644", "0.59124404", "0.5902302", "0.58720607", "0.5864495", "0.58569854", "0.5856138", "0.58284146", "0.5828209", "0.58241034", "0.5816234", "0.581581", "0.5811193", "0.57881755", "0.57845116", "0.5767427", "0.5765671" ]
0.66973764
0
add post filter object as a report entry
def add_post_filter_object(self, data_obj): self._description = data_obj.description self._filter = data_obj.filter self._method = data_obj.method self._operator = data_obj.operator self._type = 'pfo'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __call__(self, event):\n post_event(event, self.baseUrl, self.filterName)", "def addAutoSaveFilter(filter):", "def post_filter(self, qs):\n return qs", "def _filter_post(post):\n\n return True", "def add_filter_entry(entry_message, data=''):\n return partial(__add_entry,\n event_type='FILTER',\n entry_message=entry_message,\n data=data)", "def report_callback(self, object, report, request):\n ...", "def add_filter(self, filter):\n self._filters.append(filter.as_dict())", "def _add_filters(self, filters):\n self._env.filters['dateformat'] = dateformat\n self._env.filters.update(filters or {})", "def _add_filter(self, type, args):\r\n if isinstance(args, pylastica.filter.abstractfilter.AbstractFilter):\r\n args = args.to_dict()\r\n assert isinstance(args, dict), \"Invalid parameter. Must be a dict or instance of implementation of AbstractFilter.\"\r\n var_name = '_' + type\r\n self.__dict__[var_name].append(args)\r\n return self", "def add_to_pr_export(self, exp_template):", "def report(self, trade, is_entry):\n pass", "def process_post_events(self):\n self.portfolio_handler.to_database_portfolio()", "def with_post_criteria(self, fn):\n return self._using_post_criteria([fn])", "def add_filter(self, filter):\n ooi.logging.log._add_filter(filter)", "async def add(self, ctx, *, link):\r\n try: # compatability with older versions\r\n self.adkillr[ctx.message.server.id]['filters'].append(link)\r\n except KeyError:\r\n self.adkillr[ctx.message.server.id]['filters'] = [link]\r\n dataIO.save_json(\"data/adkillr/adkillr.json\", self.adkillr)\r\n await self.bot.say(\"Filter added.\")", "def stash_filter(\n self, filter_obj, institute_obj, case_obj, user_obj, category=\"snv\", link=None\n ):\n\n LOG.info(\n \"Stashing filter for user '%s' and institute %s.\",\n user_obj.get(\"email\"),\n institute_obj.get(\"display_name\"),\n )\n\n LOG.info(\"Filter object {}\".format(filter_obj))\n\n institute_id = institute_obj.get(\"_id\")\n filter_dict = {\"institute_id\": institute_id, \"category\": category}\n\n # make up a default display name\n filter_dict[\"display_name\"] = (\n institute_obj.get(\"display_name\") + \"-\" + case_obj.get(\"display_name\")\n )\n\n for (element, value) in filter_obj.lists():\n if value == [\"\"]:\n continue\n if element in [\"save_filter\", \"filters\", \"csrf_token\"]:\n continue\n if element == \"filter_display_name\":\n # filter display_name if given\n # will appear as the only element in an array\n filter_dict[\"display_name\"] = value[0]\n continue\n filter_dict[element] = value\n\n result = self.filter_collection.insert_one(filter_dict)\n\n filter_id = result.inserted_id\n\n # log event\n subject = institute_obj[\"display_name\"]\n\n # link e.g. to the variants view where filter was created\n if link is None:\n variants_target_from_category = {\n \"sv\": \"variants.sv_variants\",\n \"cancer\": \"variants.cancer_variants\",\n \"snv\": \"variants.variants\",\n }\n target = variants_target_from_category.get(category)\n\n case_name = case_obj.get(\"display_name\")\n # filter dict already contains institute_id=institute_id,\n link = url_for(target, case_name=case_name, **filter_dict)\n\n self.create_event(\n institute=institute_obj,\n case=case_obj,\n user=user_obj,\n link=link,\n category=\"case\",\n verb=\"filter_stash\",\n subject=subject,\n level=\"global\",\n )\n\n return filter_id", "def append_crds_filter(self, filter):\n if filter not in self.filters:\n self.filters = self.filters + [filter]", "def _setpostdata(report_type, start_date, end_date, customer):\n if report_type is 'summary':\n post_data = {\n 'download_output_type': 'PDF', \n 'id': 'customer_summary', \n 'ContainerWidget_TrafficWidget_Container_889b1a3e07b07d5ec96c0978ae18d3a2_active_tab': '0',\n 'ControlWidgetTimeframe_1b22834a75e912b90453a5ef163437c9_time_period': 'other',\n 'ControlWidgetTimeframe_1b22834a75e912b90453a5ef163437c9_time_start' : start_date, \n 'ControlWidgetTimeframe_1b22834a75e912b90453a5ef163437c9_time_end' : end_date, \n 'ControlWidgetUnit_3f24a644dea974fac65aac8a216ef833_unit': 'bps',\n 'ControlWidgetGraphType_ec873e91b2317542483fdfe3bf923464_graph_type_select': 'Detail',\n 'ControlWidgetObjectSelect_e0114bd4a614338bf811cbefdf58d0b7_name': customer[1], \n 'ControlWidgetObjectSelect_e0114bd4a614338bf811cbefdf58d0b7_gid': customer[0], \n 'selected_gid': '',\n 'selected_gid_changed': '0',\n 'TrafficQueryWidget_ea22dec57073eec32be17532cd88f38e_query_md5': 'f2acc4685dcc92191871de3c0a05543d',\n 'TrafficQueryWidget_ea22dec57073eec32be17532cd88f38e_results_filename': 'uberfetch.LOxoCV3876',\n 'ClassTableWidget_9f541ed7163793ad8d45f291f43891cc_sort_column_id': 'total',\n 'ResourceSelect_d41be746af56b50aa32ad05785d21d2f_filtergroupby': 'all',\n 'ResourceSelect_d41be746af56b50aa32ad05785d21d2f_search_text': 'cloud',\n 'ResourceSelect_d41be746af56b50aa32ad05785d21d2f_resource': customer[0]\n }\n elif report_type is 'applications':\n post_data = {\n 'download_output_type': 'PDF',\n 'id': 'customer_application',\n 'ContainerWidget_TrafficWidget_Container_964cdf33e0915d29bb987c831c29d8d0_active_tab': '0',\n 'ControlWidgetTimeframe_0aff26a635998c30f2ff1c264ca4cfff_time_period': 'other',\n 'ControlWidgetTimeframe_0aff26a635998c30f2ff1c264ca4cfff_time_start' : start_date, \n 'ControlWidgetTimeframe_0aff26a635998c30f2ff1c264ca4cfff_time_end' : end_date, \n 'ControlWidgetUnit_f853c1c8b8819f089a8c2ab391ff5283_unit': 'bps',\n 'ControlWidgetGraphType_aa6d8d424a0f0132503fba1b32f82105_graph_type_select': 'Stacked',\n 'ControlWidgetGraphType_aa6d8d424a0f0132503fba1b32f82105_graph_class_select': 'In',\n 'ControlWidgetObjectSelect_d6e585c3ac27a1ed9de7a07f1b100565_name': customer[1],\n 'ControlWidgetObjectSelect_d6e585c3ac27a1ed9de7a07f1b100565_gid': customer[0],\n 'selected_gid': '',\n 'selected_gid_changed': '0',\n 'TrafficQueryWidget_315bf3e9bf3e986576840af184d3fce4_query_md5': 'c74f51a07d5553e0913388fd8e5f29de',\n 'TrafficQueryWidget_315bf3e9bf3e986576840af184d3fce4_results_filename': 'uberfetch.DwyPP62471',\n 'TrafficTableWidget_f4d7b827b26ab87cf56a33ab68703d0e_sort_column_id': 'SumTotal',\n 'ResourceSelect_c61fcd7d877ca2d0ae04e1952c21c40c_filtergroupby': 'all',\n 'ResourceSelect_c61fcd7d877ca2d0ae04e1952c21c40c_search_text': '',\n 'ResourceSelect_c61fcd7d877ca2d0ae04e1952c21c40c_resource': customer[0]\n } \n elif report_type is 'alerts':\n post_data = {\n 'download_output_type': 'PDF',\n 'id': 'customer_alerts',\n 'ContainerWidget_c_934eb365a6f3d58d18959d295aa7c97e_active_tab': '0',\n 'ControlWidgetTimeframe_2e68cdf9a00018d1599760e16a6b7dba_time_period': 'other',\n 'ControlWidgetTimeframe_2e68cdf9a00018d1599760e16a6b7dba_time_start': start_date, \n 'ControlWidgetTimeframe_2e68cdf9a00018d1599760e16a6b7dba_time_end': end_date,\n 'ControlWidgetObjectSelect_71c2cde0c20efc862ca6a9b260861db3_name': customer[1],\n 'ControlWidgetObjectSelect_71c2cde0c20efc862ca6a9b260861db3_gid': customer[0],\n 'selected_gid': '',\n 'selected_gid_changed': '0',\n 'AlertSearchPopIn_52100d1694ec0516d15f1344bcf6e441_alert_class_saved': 'all',\n 'AlertSearchPopIn_52100d1694ec0516d15f1344bcf6e441_alert_type_saved': 'all_types',\n 'AlertSearchPopIn_52100d1694ec0516d15f1344bcf6e441_start_wiz_dir_saved': 'between',\n 'AlertSearchPopIn_52100d1694ec0516d15f1344bcf6e441_stop_wiz_dir_saved': '',\n 'AlertSearchPopIn_52100d1694ec0516d15f1344bcf6e441_importance_high': 'on',\n 'AlertSearchPopIn_52100d1694ec0516d15f1344bcf6e441_importance_medium': 'on',\n 'AlertSearchPopIn_52100d1694ec0516d15f1344bcf6e441_importance_low': 'on',\n 'AlertSearchPopIn_52100d1694ec0516d15f1344bcf6e441_alert_class': 'all',\n 'AlertSearchPopIn_52100d1694ec0516d15f1344bcf6e441_alert_type': 'all_types',\n 'AlertSearchPopIn_52100d1694ec0516d15f1344bcf6e441_alert_classification': 'all',\n 'AlertSearchPopIn_52100d1694ec0516d15f1344bcf6e441_search_limit': '100',\n 'AlertSearchPopIn_52100d1694ec0516d15f1344bcf6e441_page_size':'100',\n 'AlertSearchPopIn_52100d1694ec0516d15f1344bcf6e441_ongoing': 'on',\n 'AlertSearchPopIn_52100d1694ec0516d15f1344bcf6e441_recent': 'on',\n 'AlertSearchPopIn_52100d1694ec0516d15f1344bcf6e441_use_start_wiz': 'on',\n 'AlertSearchPopIn_52100d1694ec0516d15f1344bcf6e441_start_wiz_dir': 'between',\n 'AlertSearchPopIn_52100d1694ec0516d15f1344bcf6e441_start_wiz_month1': '12',\n 'AlertSearchPopIn_52100d1694ec0516d15f1344bcf6e441_start_wiz_day1': '1',\n 'AlertSearchPopIn_52100d1694ec0516d15f1344bcf6e441_start_wiz_year1': '2017',\n 'AlertSearchPopIn_52100d1694ec0516d15f1344bcf6e441_start_wiz_hour1': '00:00',\n 'AlertSearchPopIn_52100d1694ec0516d15f1344bcf6e441_start_wiz_month2': '1',\n 'AlertSearchPopIn_52100d1694ec0516d15f1344bcf6e441_start_wiz_day2': '1',\n 'AlertSearchPopIn_52100d1694ec0516d15f1344bcf6e441_start_wiz_year2': '2018',\n 'AlertSearchPopIn_52100d1694ec0516d15f1344bcf6e441_start_wiz_hour2': '00:00',\n 'AlertSearchPopIn_52100d1694ec0516d15f1344bcf6e441_stop_wiz_dir': 'before',\n 'AlertSearchPopIn_52100d1694ec0516d15f1344bcf6e441_stop_wiz_month1': '1',\n 'AlertSearchPopIn_52100d1694ec0516d15f1344bcf6e441_stop_wiz_day1': '',\n 'AlertSearchPopIn_52100d1694ec0516d15f1344bcf6e441_stop_wiz_year1': '2018',\n 'AlertSearchPopIn_52100d1694ec0516d15f1344bcf6e441_stop_wiz_hour1': '00:00',\n 'AlertSearchPopIn_52100d1694ec0516d15f1344bcf6e441_lo_bps_wiz_base': '',\n 'AlertSearchPopIn_52100d1694ec0516d15f1344bcf6e441_lo_bps_wiz_scale': 'u',\n 'AlertSearchPopIn_52100d1694ec0516d15f1344bcf6e441_hi_bps_wiz_base': '',\n 'AlertSearchPopIn_52100d1694ec0516d15f1344bcf6e441_hi_bps_wiz_scale': 'u',\n 'AlertSearchPopIn_52100d1694ec0516d15f1344bcf6e441_lo_pps_wiz_base': '',\n 'AlertSearchPopIn_52100d1694ec0516d15f1344bcf6e441_lo_pps_wiz_scale': 'u',\n 'AlertSearchPopIn_52100d1694ec0516d15f1344bcf6e441_hi_pps_wiz_base': '',\n 'AlertSearchPopIn_52100d1694ec0516d15f1344bcf6e441_hi_pps_wiz_scale': 'u',\n 'AlertSearchPopIn_52100d1694ec0516d15f1344bcf6e441_low_sev_wiz': '',\n 'AlertSearchPopIn_52100d1694ec0516d15f1344bcf6e441_high_sev_wiz': '',\n 'ResourceSelect_44123392654d8d76d8f7b70ad83d7801_filtergroupby': 'all',\n 'ResourceSelect_44123392654d8d76d8f7b70ad83d7801_search_text': '',\n 'ResourceSelect_44123392654d8d76d8f7b70ad83d7801_resource': customer[0]\n } \n elif report_type is 'toptalkers':\n post_data = {\n 'download_output_type': 'PDF',\n 'id': 'customer_toptalkers_external',\n 'ContainerWidget_TrafficWidget_Container_ad3ec1cf1c07cad4162e3fcbc43ea958_active_tab': '0',\n 'ControlWidgetTimeframe_69df78a48f4409b568a21bca903cea67_time_period': 'month',\n 'ControlWidgetTimeframe_69df78a48f4409b568a21bca903cea67_time_start': '28+days+ago',\n 'ControlWidgetTimeframe_69df78a48f4409b568a21bca903cea67_time_end': 'now',\n 'ControlWidgetUnit_45302aa65733e4733dcc633212655f1a_unit': 'bps',\n 'ControlWidgetGraphType_6255f5246a0ee3ce4c0f8ae53339ea02_graph_type_select': 'Bar',\n 'ControlWidgetObjectSelect_cdf5728cbfccd6ca358afeeb305c8602_name': customer[1],\n 'ControlWidgetObjectSelect_cdf5728cbfccd6ca358afeeb305c8602_gid': customer[0],\n 'selected_gid': '',\n 'selected_gid_changed': '0',\n 'TrafficQueryWidget_d59d50899f6dbcc240c32f7c7a741532_query_md5': 'fec0c00a8f9bc842316aa66f06ccb5ab',\n 'TrafficQueryWidget_d59d50899f6dbcc240c32f7c7a741532_results_filename': 'uberfetch.fFhot30911',\n 'HostName_5e0c6cae3e5a4e8f1f510e4b40272d9e': 'desc',\n 'Peak_893cc9c6f75deb698c4dda2790c86f78': 'desc',\n 'Proportion_1c9d7962ef85f58409619385e086df97': 'desc',\n 'Time_48194f7711fc2b6c0fe1b840f3d02aa5': 'desc',\n 'TrafficTableWidget_4735cae5e8309e405643bcb4573109a5_sort_column_id': 'total',\n 'ResourceSelect_473bfbca9e3041f7953b6fe15602a833_filtergroupby0': 'all',\n 'ResourceSelect_473bfbca9e3041f7953b6fe15602a833_search_text': '',\n 'ResourceSelect_473bfbca9e3041f7953b6fe15602a833_resource': customer[0]\n }\n else:\n pass \n post_data_encoded = urllib.urlencode(post_data)\n return post_data_encoded", "def add_filter(self, label):\n if label not in self.FILTER:\n if \"PASS\" in self.FILTER:\n self.FILTER = [f for f in self.FILTER if f != \"PASS\"]\n self.FILTER.append(label)", "def post_processor(self):", "def add_filter_entry(self, filter_column=None, filter_entry=None):\n new_filter_label = tkinter.Label(self.rightmostframe, text='Custom Column Filter:')\n new_filter_label.pack(pady=4)\n\n my_str = tkinter.StringVar()\n\n new_filter_columns = tkinter.OptionMenu(self.rightmostframe, my_str, *self.columns_list)\n if filter_column != None:\n my_str.set(filter_column)\n new_filter_columns.pack(pady=4)\n\n new_filter_entry = tkinter.Entry(self.rightmostframe)\n if filter_entry != None:\n new_filter_entry.insert(0, filter_entry)\n new_filter_entry.pack(pady=4)\n \n self.filter_entries_list.append((new_filter_entry, my_str))", "def collection_post(self):\n return super(TenderAwardDocumentResource, self).collection_post()", "def response_add(self, request, obj):\n if '_custom_action' in request.POST:\n pass\n return super().response_add(request, obj)", "def addAutoSaveDeleteFilter(filter):", "def handle_post(self, request, user, *args, **kwargs):\n\n try:\n\n self.log.info('Add Filter')\n # Commons Validations\n\n # User permission\n if not has_perm(user, AdminPermission.ENVIRONMENT_MANAGEMENT, AdminPermission.WRITE_OPERATION):\n self.log.error(\n u'User does not have permission to perform the operation.')\n return self.not_authorized()\n\n # Load XML data\n xml_map, attrs_map = loads(request.raw_post_data)\n\n # XML data format\n networkapi_map = xml_map.get('networkapi')\n if networkapi_map is None:\n return self.response_error(3, u'There is no value to the networkapi tag of XML request.')\n\n filter_map = networkapi_map.get('filter')\n if filter_map is None:\n return self.response_error(3, u'There is no value to the filter tag of XML request.')\n\n # New Filter\n filter_ = Filter()\n\n # Validates\n filter_.validate_filter(filter_map)\n\n try:\n # Save filter\n filter_.save()\n except Exception, e:\n self.log.error(u'Failed to save the filter.')\n raise FilterError(e, u'Failed to save the filter')\n\n filter_map = dict()\n filter_map['id'] = filter_.id\n\n return self.response(dumps_networkapi({'filter': filter_map}))\n\n except InvalidValueError, e:\n return self.response_error(269, e.param, e.value)\n except FilterDuplicateError, e:\n return self.response_error(344, e.message)\n except FilterError, e:\n return self.response_error(338)\n except BaseException, e:\n return self.response_error(1)", "def add_report_raw(self, report_data):\n self._reports.append(report_data)", "def add_filter(self, f):\n raise NotImplementedError", "def add(self, new_filter: Filter) -> None:\r\n self.filters.append(new_filter)", "def addPostRunAction ( action ) :\n global __Bender_PostRun_Actions\n if action : __Bender_PostRun_Actions.append ( action )\n return tuple(__Bender_PostRun_Actions)", "def collection_post(self):\n return super(TenderAwardContractDocumentResource, self).collection_post()" ]
[ "0.5869994", "0.5798248", "0.5682353", "0.56647676", "0.5580719", "0.5488624", "0.54865915", "0.5483772", "0.5360345", "0.532434", "0.5279582", "0.5261436", "0.52171093", "0.51795906", "0.5126167", "0.51139915", "0.51130533", "0.5110276", "0.51059115", "0.51031727", "0.5094383", "0.50874835", "0.5076272", "0.50536156", "0.5048631", "0.503866", "0.50055397", "0.49952698", "0.4988301", "0.4929675" ]
0.76323026
0
Inverse of builtin zip(), returns iterators of each tuple item. Returns tuple of iterators based upon an iterable, each item of which is assumed to be a tuple of the same number of items (i.e., of the sort created by zip() builtin). Assumes that all items of the iterable are a tuple with same length as the initial item. The iterable can be infinite (i.e., a zip() of infinite iterators). In that case, each returned iterator will also be infinite.
def unzip(iterable: Iterable[Tuple[Any, ...]]) -> Tuple[Iterator[Any], ...]: first, iterator = _common.peek(iter(iterable)) if first is None: return () tees = itertools.tee(iterator, len(first)) return (map(operator.itemgetter(i), tee) for i, tee in enumerate(tees))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unzip_finite(\n iterable: Iterable[Tuple[Any, ...]],\n) -> Tuple[Iterator[Any], ...]:\n for zipped in zip(*iterable):\n yield zipped", "def pairwise(iterable):\r\n a = iter(iterable)\r\n return izip(a, a)", "def unzip(seq: Iterable) -> tuple:\n seq = iter(seq)\n # check how many iterators we need\n try:\n first = tuple(next(seq))\n except StopIteration:\n return tuple()\n # and create them\n niters = len(first)\n seqs = itertools.tee(itertoolz.cons(first, seq), niters)\n return tuple(itertools.starmap(itertoolz.pluck, enumerate(seqs)))", "def pairwise(iterable):\n a = iter(iterable)\n return zip(a, a)", "def pairwise(iterable):\n a, b = tee(iterable)\n next(b, None)\n return izip(a, b)", "def pairwise(iterable: Iterable[Any]) -> Iterable[Any]:\n a, b = tee(iterable)\n next(b, None)\n return zip(a, b)", "def pairwise(iterable):\n a, b = itertools.tee(iterable)\n next(b, None)\n return zip(a, b)", "def pairwise(iterable):\n a, b = itertools.tee(iterable)\n next(b, None)\n return zip(a, b)", "def pairwise(iterable):\n a, b = itertools.tee(iterable)\n next(b, None)\n return zip(a, b)", "def pairwise(iterable):\n a = iter(iterable)\n return zip(a, a, a)", "def pairwise(iterable: Iterable):\n a, b = tee(iterable)\n next(b, None)\n return zip(a, b)", "def pairwise(iterable):\r\n a, b = itertools.tee(iterable)\r\n next(b, None)\r\n return itertools.izip(a, b)", "def pairwise(iterable):\n # copied from itertools docs\n from itertools import tee\n a, b = tee(iterable)\n next(b, None)\n return zip(a, b)", "def pairwise(iterable):\n a, b = itertools.tee(iterable)\n next(b, None)\n return list(zip(a, b))", "def pairwise(iterable):\n a, b = itertools.tee(iterable)\n next(b, None)\n return itertools.zip_longest(a, b)", "def pairwise(iterable):\n a, b = tee(iterable)\n next(b, None)\n\n return zip(a, b)", "def pairwise(iterable):\n a, b = tee(iterable)\n next(b, None)\n\n return zip(a, b)", "def pairwise(iterable):\n a, b = tee(iterable)\n next(b, None)\n return zip(a, b)", "def pairwise(iterable):\n a, b = tee(iterable)\n next(b, None)\n return zip(a, b)", "def pairwise(iterable):\n a, b = tee(iterable)\n next(b, None)\n return zip(a, b)", "def pairwise(iterable):\n a, b = tee(iterable)\n next(b, None)\n return zip(a, b)", "def unzip(seq):\n seq = iter(seq)\n # check how many iterators we need\n try:\n first = tuple(next(seq))\n except StopIteration:\n return tuple()\n # and create them\n niters = len(first)\n seqs = tee(cons(first, seq), niters)\n return tuple(starmap(pluck, enumerate(seqs)))", "def pairwise(iterable: Iterable,\n tuple_size: int):\n return zip_longest(*(islice(it, pos, None) for pos, it in enumerate(tee(iterable, tuple_size))))", "def pairwise(iterable, include_tail=False):\n left, right = itertools.tee(iterable)\n next(right, None)\n if include_tail:\n right = itertools.chain(right, [None])\n\n return zip(left, right)", "def pairwise(iterable, fillvalue=None):\n a, b = it.tee(iterable)\n next(b, fillvalue)\n return it.izip(a, b)", "def pairwise(iter):\n from itertools import tee, izip\n it, it_next = tee(iter)\n next(it_next)\n for first, second in izip(it, it_next):\n yield first, second", "def unzip(i, iterable):\n return [x[i] for x in iterable]", "def split_in_pairs(arg: Iterable) -> Iterable[Tuple]:\n # We are using zip_longest with one clever hack:\n # https://docs.python.org/3/library/itertools.html#itertools.zip_longest\n # We create an iterator out of the list and then pass the same iterator to\n # the function two times. Thus the function consumes a different element\n # from the iterator each time and produces the desired result.\n iterator = iter(arg)\n return zip_longest(iterator, iterator)", "def batch_iter(iterable: Iterable[T], size: int) -> Iterator[Tuple[T]]:\n # make sure we can deal with iterables like lists too\n sourceiter = iter(iterable)\n # call islice until it returns an empty tuple\n return iter(lambda: tuple(islice(sourceiter, size)), ())", "def pairwise(iterable: Iterable[Any]) -> Sequence[Any]:\n a, b = tee(iterable)\n next(b, None)\n return [\"\".join(t) for t in zip(a, b)]" ]
[ "0.7884123", "0.73951375", "0.73822427", "0.7329859", "0.72852314", "0.7253318", "0.72527623", "0.72527623", "0.72527623", "0.72360116", "0.7232941", "0.71585727", "0.7145812", "0.71393615", "0.71206856", "0.7116042", "0.7116042", "0.7113765", "0.7113765", "0.7113765", "0.7113765", "0.7015295", "0.6857419", "0.676724", "0.6685703", "0.6578283", "0.6511718", "0.65046394", "0.6431568", "0.6419559" ]
0.7844818
1
Similar to unzip(), but limited to finite iterables. This function does not distinguish sentinel values of the sort used by itertools.zip_longest(). Such sentinels will be included in the returned item iterators. See unzip_longest_finite() in this module to efficiently unzip an iterable created using itertools.zip_longest().
def unzip_finite( iterable: Iterable[Tuple[Any, ...]], ) -> Tuple[Iterator[Any], ...]: for zipped in zip(*iterable): yield zipped
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unzip_longest_finite(\n iterable: Iterable[Tuple[Any, ...]],\n *,\n fillvalue: Optional[Any] = None,\n) -> Tuple[Iterator[Any], ...]:\n for zipped in zip(*iterable):\n yield tuple(item for item in zipped if item != fillvalue)", "def zip_discard_compr(*iterables: Iterable, sentinel: Any = object()) -> Any:\r\n return [[entry for entry in iterable if entry is not sentinel]\r\n for iterable in zip_longest(*iterables, fillvalue=sentinel)]", "def zip_strict(*iterables) -> Iterator[Tuple[Any, ...]]:\n for values in itertools.zip_longest(*iterables, fillvalue=_NO_VALUE):\n if any(value is _NO_VALUE for value in values):\n msg = f'all iterables must have the same length'\n raise ValueError(msg)\n yield values", "def longzip(a, b):\n aiter = iter(a)\n biter = iter(b)\n try:\n for item1 in aiter:\n yield item1, next(biter)\n except StopIteration:\n for item1 in aiter:\n yield item1, None\n else:\n for item2 in biter:\n yield None, item2", "def unzip(seq):\n return zip(*seq)", "def _create_zip(*iterables, fillvalue=None, type_longest=False):\n\n if type_longest:\n from itertools import zip_longest\n\n zipped = zip_longest(*iterables, fillvalue=fillvalue)\n else:\n zipped = zip(*iterables)\n return zipped", "def zip2(*iterables, default=None):\n pack = [default] * len(iterables)\n # benchmarck this fonction using (unordered) set() or (ordered) dict() : seem that using dict() is a little bit quicker than set(), which is a bit quicker than list().\n #iterators = [(i, iter(it)) for i, it in enumerate(iterables)] # list()\n #iterators = {(i, iter(it)) for i, it in enumerate(iterables)} # unordered set() : https://docs.python.org/3/tutorial/datastructures.html#sets\n iterators = {i: iter(it) for i, it in enumerate(iterables)} # ordered dict() : https://docs.python.org/3/tutorial/datastructures.html#dictionaries\n todel = [] # using set() or dict()\n while True:\n #oi = 0 # using list()\n #for ii, (i, it) in enumerate(iterators): # using list()\n #for i, it in iterators: # using set()\n for i, it in iterators.items(): # using dict()\n for v in it:\n pack[i] = v\n break\n else:\n pack[i] = default\n #iterators.pop(ii - oi) ; oi += 1 # using list()\n #todel.append((i, it)) # using set()\n todel.append(i) # using dict()\n if todel:\n #for i in todel: iterators.remove(i) # using set()\n for i in todel: del iterators[i] # using dict()\n todel[:] = ()\n if iterators: yield tuple(pack)\n else: break", "def unzip(iterable: Iterable[Tuple[Any, ...]]) -> Tuple[Iterator[Any], ...]:\n first, iterator = _common.peek(iter(iterable))\n if first is None:\n return ()\n tees = itertools.tee(iterator, len(first))\n return (map(operator.itemgetter(i), tee) for i, tee in enumerate(tees))", "def unzip(seq):\n seq = iter(seq)\n # check how many iterators we need\n try:\n first = tuple(next(seq))\n except StopIteration:\n return tuple()\n # and create them\n niters = len(first)\n seqs = tee(cons(first, seq), niters)\n return tuple(starmap(pluck, enumerate(seqs)))", "def unzip(i, iterable):\n return [x[i] for x in iterable]", "def unzip(seq: Iterable) -> tuple:\n seq = iter(seq)\n # check how many iterators we need\n try:\n first = tuple(next(seq))\n except StopIteration:\n return tuple()\n # and create them\n niters = len(first)\n seqs = itertools.tee(itertoolz.cons(first, seq), niters)\n return tuple(itertools.starmap(itertoolz.pluck, enumerate(seqs)))", "def pairwise(iterable):\n a, b = itertools.tee(iterable)\n next(b, None)\n return itertools.zip_longest(a, b)", "def unzip(zipped):\n return zip(*zipped)", "def pairwise(iterable, fillvalue=None):\n a, b = it.tee(iterable)\n next(b, fillvalue)\n return it.izip(a, b)", "def exactZip(*args):\n lengths = [len(a) for a in args]\n if len(set(lengths)) <= 1:\n return zip(*args)\n else:\n raise ValueError(\"Iterables were of different lengths; {0}\".format(args))", "def unzip(ls, nout):\n out = list(zip(*ls))\n if not out:\n out = [()] * nout\n return out", "def zip_longest(list1, list2):\n zipped = zip(list1, list2)\n if len(list1) < len(list2):\n zipped += [(None, item) for item in list2[len(list1):]]\n elif len(list1) > len(list2):\n zipped += [(item, None) for item in list1[len(list2):]]\n return zipped", "def zip(*args, **kwargs):\n args = [list(iterable) for iterable in args]\n n = max(map(len, args))\n v = kwargs.get(\"default\", None)\n return _zip(*[i + [v] * (n - len(i)) for i in args])", "def dewindowify(iterable):\n for _, current, _ in iterable:\n yield current", "def grouper(size, iterable, fillvalue=None):\n args = [iter(iterable)] * size\n return zip_longest(fillvalue=fillvalue, *args)", "def zip_longest_ffill(*args):\n lists = [x if isinstance(x, list) else list(x) for x in args]\n max_total_len = max([len(l) for l in lists])\n\n ffill_lists = []\n\n for sub_list in lists:\n max_list_len = len(sub_list)\n max_diff = max_total_len - max_list_len\n\n # empty lists with no -1 index\n try:\n last_val = sub_list[-1]\n complete_list = sub_list + [last_val] * max_diff\n except:\n complete_list = [[] for _ in range(max_diff)]\n ffill_lists.append(complete_list)\n\n return zip(*ffill_lists)", "def grouper(iterable, n, fillvalue=None):\n args = [iter(iterable)] * n\n return itertools.zip_longest(*args, fillvalue=fillvalue)", "def grouper(iterable, n, fillvalue=None):\n args = [iter(iterable)] * n\n return itertools.zip_longest(*args, fillvalue=fillvalue)", "def unzip(self, x):\n if (len(x)>0):\n return list(zip(*x))\n else:\n return x, list()", "def grouper(iterable, block_size, fillvalue=None) -> list:\n args = [iter(iterable)] * block_size\n return zip_longest(*args, fillvalue=fillvalue)", "def grouper(iterable, n, fillvalue=None):\r\n args = [iter(iterable)] * n\r\n return zip_longest(*args, fillvalue=fillvalue)", "def zipGens(inputGens):\n gensRunning = [True for i in range(len(inputGens))] #this map prevents needing to catch the same StopIteration many times for each generator that stops sooner than the last one to stop.\n workingGenArr = [makeGen(item) for item in inputGens] #in case the inputGens contains things that aren't generators _or_ inputGens itself is a generator, this fixes that.\n while not all(not genIsRunning for genIsRunning in gensRunning):\n for genIndex in range(len(workingGenArr)):\n if gensRunning[genIndex]:\n try:\n yield next(workingGenArr[genIndex])\n except StopIteration:\n gensRunning[genIndex] = False #don't check this generator for items again.", "def alternate_iter(array):\n result = []\n array = sorted(array)\n for n in range(len(array)):\n if n >= len(array):\n break\n result.append(array.pop())\n try:\n result.append(array[n])\n except IndexError:\n break\n\n return result", "def grouper(iterable, n, fillvalue=None):\n args = [iter(iterable)] * n\n return itertools.zip_longest(fillvalue=fillvalue, *args)", "def grouper(iterable, n, fillvalue=None):\n args = [iter(iterable)] * n\n return zip_longest(*args, fillvalue=fillvalue)" ]
[ "0.7599641", "0.7005301", "0.64382815", "0.62493163", "0.6243333", "0.6125157", "0.61182976", "0.6105243", "0.5990228", "0.59512585", "0.5897735", "0.5838505", "0.57998437", "0.5778073", "0.57158124", "0.5677239", "0.5629741", "0.56005573", "0.5599043", "0.5512083", "0.546287", "0.53889626", "0.53889626", "0.53759533", "0.5353273", "0.5342557", "0.53189254", "0.5311378", "0.53111833", "0.5303489" ]
0.7502314
1
Similar to unzip(), but limited to finite iterables. This function distinguishes sentinel values of the sort used by itertools.zip_longest(). Such sentinels will be stripped from the returned item iterators. Use unzip_finite() in this module to efficiently unzip iterables created using builtin zip().
def unzip_longest_finite( iterable: Iterable[Tuple[Any, ...]], *, fillvalue: Optional[Any] = None, ) -> Tuple[Iterator[Any], ...]: for zipped in zip(*iterable): yield tuple(item for item in zipped if item != fillvalue)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def zip_discard_compr(*iterables: Iterable, sentinel: Any = object()) -> Any:\r\n return [[entry for entry in iterable if entry is not sentinel]\r\n for iterable in zip_longest(*iterables, fillvalue=sentinel)]", "def unzip_finite(\n iterable: Iterable[Tuple[Any, ...]],\n) -> Tuple[Iterator[Any], ...]:\n for zipped in zip(*iterable):\n yield zipped", "def zip_strict(*iterables) -> Iterator[Tuple[Any, ...]]:\n for values in itertools.zip_longest(*iterables, fillvalue=_NO_VALUE):\n if any(value is _NO_VALUE for value in values):\n msg = f'all iterables must have the same length'\n raise ValueError(msg)\n yield values", "def longzip(a, b):\n aiter = iter(a)\n biter = iter(b)\n try:\n for item1 in aiter:\n yield item1, next(biter)\n except StopIteration:\n for item1 in aiter:\n yield item1, None\n else:\n for item2 in biter:\n yield None, item2", "def unzip(seq):\n return zip(*seq)", "def zip2(*iterables, default=None):\n pack = [default] * len(iterables)\n # benchmarck this fonction using (unordered) set() or (ordered) dict() : seem that using dict() is a little bit quicker than set(), which is a bit quicker than list().\n #iterators = [(i, iter(it)) for i, it in enumerate(iterables)] # list()\n #iterators = {(i, iter(it)) for i, it in enumerate(iterables)} # unordered set() : https://docs.python.org/3/tutorial/datastructures.html#sets\n iterators = {i: iter(it) for i, it in enumerate(iterables)} # ordered dict() : https://docs.python.org/3/tutorial/datastructures.html#dictionaries\n todel = [] # using set() or dict()\n while True:\n #oi = 0 # using list()\n #for ii, (i, it) in enumerate(iterators): # using list()\n #for i, it in iterators: # using set()\n for i, it in iterators.items(): # using dict()\n for v in it:\n pack[i] = v\n break\n else:\n pack[i] = default\n #iterators.pop(ii - oi) ; oi += 1 # using list()\n #todel.append((i, it)) # using set()\n todel.append(i) # using dict()\n if todel:\n #for i in todel: iterators.remove(i) # using set()\n for i in todel: del iterators[i] # using dict()\n todel[:] = ()\n if iterators: yield tuple(pack)\n else: break", "def unzip(zipped):\n return zip(*zipped)", "def unzip(iterable: Iterable[Tuple[Any, ...]]) -> Tuple[Iterator[Any], ...]:\n first, iterator = _common.peek(iter(iterable))\n if first is None:\n return ()\n tees = itertools.tee(iterator, len(first))\n return (map(operator.itemgetter(i), tee) for i, tee in enumerate(tees))", "def unzip(seq):\n seq = iter(seq)\n # check how many iterators we need\n try:\n first = tuple(next(seq))\n except StopIteration:\n return tuple()\n # and create them\n niters = len(first)\n seqs = tee(cons(first, seq), niters)\n return tuple(starmap(pluck, enumerate(seqs)))", "def _create_zip(*iterables, fillvalue=None, type_longest=False):\n\n if type_longest:\n from itertools import zip_longest\n\n zipped = zip_longest(*iterables, fillvalue=fillvalue)\n else:\n zipped = zip(*iterables)\n return zipped", "def unzip(seq: Iterable) -> tuple:\n seq = iter(seq)\n # check how many iterators we need\n try:\n first = tuple(next(seq))\n except StopIteration:\n return tuple()\n # and create them\n niters = len(first)\n seqs = itertools.tee(itertoolz.cons(first, seq), niters)\n return tuple(itertools.starmap(itertoolz.pluck, enumerate(seqs)))", "def unzip(ls, nout):\n out = list(zip(*ls))\n if not out:\n out = [()] * nout\n return out", "def unzip(i, iterable):\n return [x[i] for x in iterable]", "def zip(*args, **kwargs):\n args = [list(iterable) for iterable in args]\n n = max(map(len, args))\n v = kwargs.get(\"default\", None)\n return _zip(*[i + [v] * (n - len(i)) for i in args])", "def exactZip(*args):\n lengths = [len(a) for a in args]\n if len(set(lengths)) <= 1:\n return zip(*args)\n else:\n raise ValueError(\"Iterables were of different lengths; {0}\".format(args))", "def pairwise(iterable, fillvalue=None):\n a, b = it.tee(iterable)\n next(b, fillvalue)\n return it.izip(a, b)", "def compact(seq):\n for item in seq:\n if item:\n yield item", "def unzip(self, x):\n if (len(x)>0):\n return list(zip(*x))\n else:\n return x, list()", "def pairwise(iterable):\n a, b = itertools.tee(iterable)\n next(b, None)\n return itertools.zip_longest(a, b)", "def alternate_iter(array):\n result = []\n array = sorted(array)\n for n in range(len(array)):\n if n >= len(array):\n break\n result.append(array.pop())\n try:\n result.append(array[n])\n except IndexError:\n break\n\n return result", "def dewindowify(iterable):\n for _, current, _ in iterable:\n yield current", "def zip_longest(list1, list2):\n zipped = zip(list1, list2)\n if len(list1) < len(list2):\n zipped += [(None, item) for item in list2[len(list1):]]\n elif len(list1) > len(list2):\n zipped += [(item, None) for item in list1[len(list2):]]\n return zipped", "def zipGens(inputGens):\n gensRunning = [True for i in range(len(inputGens))] #this map prevents needing to catch the same StopIteration many times for each generator that stops sooner than the last one to stop.\n workingGenArr = [makeGen(item) for item in inputGens] #in case the inputGens contains things that aren't generators _or_ inputGens itself is a generator, this fixes that.\n while not all(not genIsRunning for genIsRunning in gensRunning):\n for genIndex in range(len(workingGenArr)):\n if gensRunning[genIndex]:\n try:\n yield next(workingGenArr[genIndex])\n except StopIteration:\n gensRunning[genIndex] = False #don't check this generator for items again.", "def partition(zipped, num_steps, allow_overflow=True):\n size = len(zipped)\n parts = []\n\n for i in range(0, size, num_steps):\n end = i + num_steps\n\n if end >= size:\n parts.append(zip(*zipped[i:]))\n break\n elif allow_overflow:\n parts.append(zip(*zipped[i:end]))\n return parts", "def zip() -> List:\n pass", "def zip_longest_ffill(*args):\n lists = [x if isinstance(x, list) else list(x) for x in args]\n max_total_len = max([len(l) for l in lists])\n\n ffill_lists = []\n\n for sub_list in lists:\n max_list_len = len(sub_list)\n max_diff = max_total_len - max_list_len\n\n # empty lists with no -1 index\n try:\n last_val = sub_list[-1]\n complete_list = sub_list + [last_val] * max_diff\n except:\n complete_list = [[] for _ in range(max_diff)]\n ffill_lists.append(complete_list)\n\n return zip(*ffill_lists)", "def izip_fill(*iterables, **kw):\n iterables = map(iter, iterables)\n default = kw.pop('default', None)\n if kw:\n raise TypeError(\"unrecognized keyword arguments\")\n columns = len(iterables)\n columns_range = range(columns)\n while True:\n found_data = False\n row = [None] * columns\n for i in columns_range:\n try:\n row[i] = iterables[i].next()\n found_data = True\n except StopIteration:\n row[i] = default\n if not found_data:\n break\n yield tuple(row)", "def pairwise(iterable):\n a, b = itertools.tee(iterable)\n next(b, None)\n return zip(a, b)", "def pairwise(iterable):\n a, b = itertools.tee(iterable)\n next(b, None)\n return zip(a, b)", "def pairwise(iterable):\n a, b = itertools.tee(iterable)\n next(b, None)\n return zip(a, b)" ]
[ "0.71071625", "0.70818937", "0.6151461", "0.61206645", "0.61140555", "0.6043417", "0.5882001", "0.58613545", "0.5844499", "0.5748359", "0.57428557", "0.5737231", "0.5674092", "0.55678666", "0.5558231", "0.55369616", "0.5512515", "0.5436522", "0.5401102", "0.53359205", "0.533476", "0.53262705", "0.52165866", "0.5123413", "0.50232524", "0.5005101", "0.50028795", "0.49965718", "0.49965718", "0.49965718" ]
0.71139956
0
Returns the ith (0indexed) binary array from the enumeration of all 2^n binary array with n elements, in order [0,0,...,0],[1,0,...,0], etc.
def ith_binary_array(i,n=9): return numpy.array([(i//2**j) % 2 for j in xrange(n)])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_naive_array(n):\n result = list()\n for i in range(1, n+1):\n result.append(i)\n return result", "def create_array( n ):", "def binary_compositions(n):\n return productrange(*[2]*(n-1))", "def create_n(N):\n\n all_n = np.array([])\n max_bin_len = len(bin(2 ** N - 1)[2:]) # need this for 2 -> 010 instead of 10\n for i in range(2**N):\n all_n = np.append(all_n, bin(i)[2:].zfill(max_bin_len))\n\n return all_n", "def bin_states(n, sym=False):\n\n if n<0:\n raise Exception(\"n cannot be <0\")\n if n>30:\n raise Exception(\"n is too large to enumerate all states.\")\n \n v = np.array([list(np.binary_repr(i,width=n)) for i in range(2**n)]).astype(int)\n\n if sym is False:\n return v\n return v*2-1", "def convert_to_bits(n):\n result = []\n if n == 0:\n return [0]\n while n > 0:\n result = [int(n % 2)] + result\n n = n / 2\n return result", "def xbin_states(n, sym=False):\n\n assert n>0, \"n cannot be <0\"\n \n def v():\n for i in range(2**n):\n if sym is False:\n yield np.array(list(np.binary_repr(i,width=n))).astype('int')\n else:\n yield np.array(list(np.binary_repr(i,width=n))).astype('int')*2-1\n\n return v()", "def tobinary_multiples(arr):\n return [np.array(arr_i).tobytes() for arr_i in arr]", "def generate_array_uints(n: int = 1024, max_int: int = 256, random_seed: int = None) -> TYPE_ARRAY:\n return _RNG.randint(0, max_int, n).astype(numpy.uint8)", "def subsets(n):\n binary = lambda x: x>0 and binary(x>>1) + [x&1] or []\n pad = lambda l: [0]*(n-len(l)) + l #Always returns a list of length 'n'\n return [pad(binary(i)) for i in range(1, 2**n)]", "def int2bin(k, n):\r\n binary_expansion = np.zeros(n, dtype=int)\r\n position = n-1 \r\n while k > 0:\r\n if k % 2 == 1: \r\n binary_expansion[position] = 1\r\n k = int(k/2)\r\n position -=1\r\n return binary_expansion", "def enumerate_combinations(n):\n combos = []\n for i in range(2, n): # 1 to n - 1\n _combos = list(combinations(range(n), i))\n combos += _combos\n\n combos_np = np.zeros((len(combos), n))\n for i in range(len(combos)):\n for idx in combos[i]:\n combos_np[i][idx] = 1\n\n combos_np = combos_np.astype(np.bool)\n return combos_np", "def convert_to_binary(N):\n if N == 0:\n return [0]\n if N == 1:\n return [1]\n if N == 2:\n return [1, 0]\n n = math.floor(math.log(N, 2))\n bin = [0 for i in range(n+1)]\n bin[0] = 1\n print(\"bin is {}\".format(bin))\n rem = N - 2 ** n\n print(\"rem is {}\".format(rem))\n bin_rem = convert_to_binary(rem)\n for i in range(-1, (len(bin_rem) * - 1) - 1, -1):\n bin[i] = bin_rem[i]\n return bin", "def indices(self):\n\n # We used lookup tables here. Read more about other methods here:\n # https://chessprogramming.wikispaces.com/Bitboard+Serialization\n\n if self.num == 0:\n return []\n\n bits = []\n\n for i in [0, 1, 2, 3, 4, 5, 6, 7]:\n row = (self.num >> UINT64_PADDING[i]) & EIGHT_ONES\n indices = row_to_indices[row]\n for index in indices:\n bits.append(index + i*8)\n\n return bits", "def bitlist(n):\n return [n >> i & 1 for i in range(7,-1,-1)]", "def bits(n):\n b = []\n while n:\n b = [n & 1] + b\n n >>= 1\n # add heading 0\n head = len(b) % 8\n if head != 0:\n b = [0] * (8 - head) + b\n return b", "def binomial_1D_array_kernel(i, n=4):\n\n # Below 1 is irrelevant.\n if i < 1:\n i = 1\n\n # Get the binomial coefficients.\n cs = list(binomial_coefficients(n))\n\n # Reuse the correction to `n` found by `binomial_coefficients`.\n n = len(cs) - 1\n\n # Get the right number of zeros to fill in\n zs = list(numpy.zeros(2 ** (i - 1) - 1, dtype=int))\n\n # Create the contents of the 1D kernel before normalization\n r = []\n if len(cs) > 1:\n for _ in cs[:-1]:\n r.append(_)\n r.extend(zs)\n\n r.append(cs[-1])\n else:\n r.extend(cs)\n\n r = numpy.array(r)\n r = r.astype(float)\n\n # Normalization on the L_1 norm.\n r /= 2 ** n\n\n return(r)", "def conv_array(n):\n\n\t# Allocate kernel\n\tconv = np.zeros(n)\n\n\t# Give values to elements\n\tfor i in range(0,n-1):\n\t\tconv[i] = 1/n\n\n\treturn conv", "def PLCTYPE_ARR_USINT(n: int) -> Type[Array]:\n return c_uint8 * n", "def arrayManipulation_brute(n, queries):\n arr = [0] * n\n\n for i, row in enumerate(queries):\n a, b, k = row[0], row[1], row[2]\n for j in range(a - 1, b):\n arr[j] = arr[j] + k\n print(f'array size {arr.__sizeof__()/1000000}')\n return max(arr)", "def generate_array_ints(n: int = 1024, max_int: int = 256, random_seed: int = None) -> TYPE_ARRAY:\n return _RNG.randint(0, max_int, n).astype(int)", "def orderByIncreasingBitCount(n):\n res = [0] # freebie\n biggest = 2**n - 1\n for i in range(1, n):\n for j in range(1, biggest):\n if hamming_weight(j) == i:\n res.append(j)\n res.append(biggest) # another freebie\n return res", "def bits(n):\n\n # Create a list of the first 1,000 binary numbers\n binary_list = reverse_binary_list()\n\n # Start by calculating number of 1's for n\n n_ones = num_of_ones(n, binary_list)\n\n # Calculate number of 1's for next value\n next_ones = 0\n while n_ones != next_ones:\n n = n + 1\n next_ones = num_of_ones(n, binary_list)\n\n return(n)", "def grayCode(self, n):\n res = [0]\n for i in range(0, n):\n res += [(1 << i) + x for x in reversed(res)]\n return res", "def make_b_array(n):\n array = np.linspace(-3, 3, n)\n for i, x in enumerate(array[1:-1], start=1):\n if abs(x) < 1:\n array[i] = 2\n else:\n array[i] = 0\n array[0] = 0\n array[n-1] = 0\n\n return array", "def n(l):\n return np.array(l,dtype=object)", "def bin_array(num, m):\n return np.array(list(np.binary_repr(num).zfill(m))).astype(np.int8)", "def identity(n,dtype=None):\n a = array([1]+n*[0],dtype=dtype)\n b = empty((n,n),dtype=dtype)\n b.flat = a\n return b", "def to_array(X, n=2):\n return np.array([np.eye(n)[x] for x in X])", "def PLCTYPE_ARR_SINT(n: int) -> Type[Array]:\n return c_int8 * n" ]
[ "0.70245016", "0.6837331", "0.65712476", "0.6568325", "0.6497333", "0.6471789", "0.6447051", "0.6431503", "0.6422565", "0.6368894", "0.6367631", "0.63026714", "0.62990963", "0.6229507", "0.620456", "0.61642796", "0.61545974", "0.6154187", "0.6134292", "0.60946274", "0.60587806", "0.6036479", "0.60330236", "0.5993042", "0.5992298", "0.59625304", "0.59459615", "0.5919867", "0.59080344", "0.589573" ]
0.8176775
0
Gets concept target values for an array, i.e. next life value, current life value, number live neighbors
def get_concept_target(a): curr_life = a[4] num_live = numpy.sum(a)-curr_life nl_g1 = 0 nl_g2 = 0 nl_g3 = 0 if (num_live > 1): nl_g1 = 1 if (num_live > 2): nl_g2 = 1 if (num_live > 3): nl_g3 = 1 if (num_live > 3 or num_live < 2): next_life = 0 elif num_live == 3: next_life = 1 else: next_life = curr_life return numpy.array([next_life,curr_life,nl_g1,nl_g2,nl_g3])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def narration_target(self):", "def get_targets(self, states, j):\n a = self.get_optimal_action(states, j)\n a = np.expand_dims(a, axis=1)*1\n return {'gt_action': a}", "def get_targets(self):\n\t\treturn self.prDoc['inputs']['data'][0]['targets']", "def make_target(self, state_index, traj):\n\n # The value target is the discounted root value of the search tree N steps\n # into the future, plus the discounted sum of all rewards until then.\n targets = []\n root_values = traj[\"root_value\"]\n rewards = traj[\"reward\"]\n child_visits = traj[\"child_visits\"]\n target_value = traj[\"target_value\"]\n obs = traj[\"cur_state\"]\n\n for current_index in range(state_index, state_index + self.unroll_step + 1):\n\n if current_index < len(root_values):\n targets.append((target_value[current_index], rewards[current_index], child_visits[current_index]))\n else:\n # States past the end of games are treated as absorbing states.\n targets.append((0, 0, []))\n return targets", "def targets(self) -> List[List[float]]:\n return [d.targets for d in self.data]", "def target(self) -> np.ndarray:\n return self._dist['target']", "def _target(self, data):\n relative_values = abs(data - data.mean())\n index = relative_values.idxmax()\n value = relative_values[index]\n return index, value", "def compute_targets(rollout, action_space, last_r=0.0, gamma=0.9, lambda_=1.0):\n\n rollout = compute_advantages(rollout, last_r, gamma=gamma, lambda_=lambda_)\n rollout[\"adv_targets\"] = np.zeros((rollout.count, action_space.n))\n rollout[\"adv_targets\"][np.arange(rollout.count), rollout[\"actions\"]] = \\\n rollout[\"advantages\"]\n rollout[\"value_targets\"] = rollout[\"rewards\"].copy()\n rollout[\"value_targets\"][:-1] += gamma * rollout[\"vf_preds\"][1:]\n return rollout", "def get_target(self, batch):\n # initialise array to store yj values\n target = np.zeros((len(batch[0]), self.num_actions))\n\n # loop over samples in the minibatch\n for j in range(len(batch[0])):\n\n a0_i = self.action_str2idx(batch[1][j])\n r0 = batch[2][j]\n done = batch[3][j]\n s1 = batch[4][j]\n\n # if terminating state\n if done:\n target[j, a0_i] = r0\n else:\n qs_target = self.target_Qmodel.predict(s1)\n target[j, a0_i] = r0 + self.gamma * np.max(qs_target)\n\n return target", "def get_next_target_addresses(self) -> List[str]:\n targets = []\n for edge in self._get_out_edges(self.active_pod):\n targets.append(self._get_target_pod(edge.pod).full_address)\n return targets", "def life(arr):\n\tres_arr = arr\n\tmax_x = len(arr[0]) - 1\n\tmax_y = len(arr) - 1\n\n\tfor y, y_value in enumerate(arr):\n\t\tfor x, x_value in enumerate(y_value):\n\t\t\tneighb_count = get_count_life_neighbor(arr, x, y, max_x, max_y)\n\t\t\tif x_value:\n\t\t\t\tif neighb_count < 2 or neighb_count > 3:\n\t\t\t\t\tres_arr[y][x] = False\n\t\t\telse:\n\t\t\t\tif neighb_count == 3:\n\t\t\t\t\tres_arr[y][x] = True\n\treturn res_arr", "def __getitem__(self, index):\r\n\r\n #current input in the sequence at t\r\n x = self.yInput[:, index]\r\n #input = x[t] not like that to create a matrix and not a vector\r\n \r\n #current target value at t\r\n target = self.yTarget[:, index]\r\n\r\n return (x, target)", "def __getitem__(self, index):\r\n\r\n #current input in the sequence at t\r\n x = self.yInput[:, index]\r\n #input = x[t] not like that to create a matrix and not a vector\r\n \r\n #current target value at t\r\n target = self.yTarget[:, index]\r\n\r\n return (x, target)", "def targets(self) -> Optional[jnp.ndarray]:\n pass", "def detERT(self, targets):\n res = []\n for f in targets:\n idx = (self.target<=f)\n try:\n res.append(self.ert[idx][0])\n except IndexError:\n res.append(np.inf)\n return res", "def _state(self):\n state = [] \n for _temp in self.config[\"performance_targets\"]:\n ID = _temp[0]\n attribute = _temp[1]\n state.append(self.methods[attribute](ID))\n \n for _temp in self.config[\"states\"]:\n ID = _temp[0]\n attribute = _temp[1]\n state.append(self.methods[attribute](ID))\n\n state = np.asarray(state)\n \n return state", "def get_active_target(self, inp_hist):\n go = inp_hist[:, 0]\n curr_targ = inp_hist[:, 3:5]\n next_targ = inp_hist[:, 5:7]\n return curr_targ * (1 - go[:, None]) + next_targ * go[:, None]", "def targets(self) -> List[List[float]]:\n if self.preload:\n return [[j[QM9.U0].item()] for j in self.data_ram] # if data_ram is a array of dicts\n # return self.data_ram[QM9.U0].numpy() # if data_ram is a dict of arrays\n else:\n return [[j[QM9.U0].item()] for j in self.data]", "def evaluate(self, triples):\n all_result = self.forward_eval(triples) # b x num_entities\n # uses tail as index to get target score\n target_result = all_result.gather(dim=-1, index=triples[:, 2].unsqueeze(-1))\n return all_result, target_result", "def predict_next_state_gt(self, states, actions):\n # TODO: write your code here\n\n # return [self.env.get_nxt_state(states[i], actions) for i in range(self.num_particles)]\n return np.array([[self.env.get_nxt_state(states[j][i], actions[j]) for i in range(self.num_particles)] for j in range(self.popsize)])", "def get_targets(self, sample, net_output):\n return sample[\"target\"]", "def step(self, actions):\r\n # Run actions\r\n actions = [np.argmax((action_scores+.0001) * mask) for action_scores, mask in zip(actions, self.get_avail_actions())]\r\n reward, terminated, info = self.env.step(actions)\r\n\r\n # Get updated state\r\n self.state = self.env.get_state()\r\n\r\n # Return arrays for each agent\r\n reward_n = [reward / self.n for _ in range(self.n)]\r\n terminated_n = [terminated for _ in range(self.n)]\r\n info_n = [info for _ in range(self.n)]\r\n observation_n = self.env.get_obs()\r\n\r\n return observation_n, reward_n, terminated_n, info_n", "def get_data(self, action):\n n, _ = self.contexts.shape\n ind = np.array([i for i in range(n) if self.actions[i] == action])\n return self.contexts[ind, :], self.rewards[ind, action]", "def get_targets(self, df):\n return df.iloc[:, self.target_col]", "def get_reward(self, actions, next_states):\n r = []\n for state in next_states:\n ended, winner = self.judge_terminal(state)\n if ended:\n r.append(winner)\n else:\n r.append(0)\n return np.array(r)", "def getstate(self):\r\n return [self.tied_indices,\r\n self.fixed_indices,\r\n self.fixed_values,\r\n self.constrained_indices,\r\n self.constraints]", "def _compute_targets(ex_rois, gt_rois):\n\n assert ex_rois.shape[0] == gt_rois.shape[0]\n assert ex_rois.shape[1] == 4\n assert gt_rois.shape[1] == 5\n\n return bbox_transform(ex_rois, gt_rois[:, :4]).astype(np.float32, copy=False)", "def _target(self, data):\n index = self._get_index(data)\n value = data[index]\n return index, abs(value - data.mean())", "def estimate(self, xs, max_n_speakers=15):\n\n xp = cuda.get_array_module(xs[0])\n zeros = [xp.zeros((max_n_speakers, self.n_units), dtype=xp.float32) for _ in xs]\n attractors = self.forward(xs, zeros)\n probs = [F.sigmoid(F.flatten(self.counter(att))) for att in attractors]\n return attractors, probs", "def _compute_targets(ex_rois, labels, gt_rois):\n\n assert ex_rois.shape[0] == gt_rois.shape[0]\n assert ex_rois.shape[1] == 4\n assert gt_rois.shape[1] == 8\n assert len(labels) == ex_rois.shape[0]\n\n # bbox_transform函数的输入是anchors, 和GT的坐标部分\n # 输出是一个N×2的矩阵,每行表示一个anchor与对应的IOU最大的GT的y,h回归,\n return bbox_transform(ex_rois, labels, gt_rois).astype(np.float32, copy=False)" ]
[ "0.59355456", "0.58669674", "0.5836947", "0.5732367", "0.566638", "0.5532835", "0.5528128", "0.550536", "0.5441657", "0.5401287", "0.5375301", "0.5360174", "0.5360174", "0.5349574", "0.52805483", "0.523364", "0.5230511", "0.5207085", "0.51966673", "0.5144492", "0.5144058", "0.5121771", "0.50999117", "0.5095465", "0.5088146", "0.50790787", "0.5072215", "0.5066599", "0.5054795", "0.5015782" ]
0.7112624
0
Attempts to retrieve the crash_id from a data dictionary
def get_crash_id(data: dict) -> int: try: return data["event"]["data"]["new"]["crash_id"] except (TypeError, KeyError): raise_critical_error( message="Unable to parse request body to identify a crash_id", data=data )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_city_id_from_db(crash_id: int) -> Optional[int]:\n if not str(crash_id).isdigit():\n print(\"It's bad\")\n return None\n\n get_city_id_query = \"\"\"\n query getCityId($crash_id:Int!){\n atd_txdot_crashes(where: {\n crash_id: {_eq:$crash_id}\n }){\n city_id\n }\n }\n \"\"\"\n\n try:\n response = requests.post(\n HASURA_ENDPOINT,\n data=json.dumps(\n {\n \"query\": get_city_id_query,\n \"variables\": {\n \"crash_id\": crash_id\n }\n }\n ),\n headers=HEADERS\n )\n return response.json()[\"data\"][\"atd_txdot_crashes\"][0][\"city_id\"]\n except Exception as e:\n print(f\"LIttle error: {str(e)}\")\n return None", "def process_unreal_crash(data):\n return Unreal4Crash.from_bytes(data)", "def _get_id(key):\n id = request.args.get(key)\n if not id:\n raise NoIDError()\n else:\n return id", "def getid(data):\n return int(data.split('/')[-1])", "def getRetKey(dictionary):\n retKey = \"\"\n try:\n if dictionary:\n retKey = dictionary.values()[0].keys()[0]\n except TypeError:\n logging.debug(\"type error\")\n\n return retKey", "def get_key_from_data_dict(data: dict, key: str):\n retrieved_key = data.get(key, None)\n if not retrieved_key:\n LOG.info(\n f\"Could not get key {key} from request to the API. Data received: {data}\"\n )\n return retrieved_key", "def retrieve_id(self, data_id):\n return self.database[data_id]", "def get_city_id(data: dict) -> Optional[int]:\n try:\n return data[\"event\"][\"data\"][\"new\"][\"city_id\"]\n except (TypeError, KeyError):\n return None", "def get_incident_id(row):\n additional_fields = row.get('additional_fields')\n generated = row.get('generatedTime') or row.get('generated_time') or row.get('GeneratedTime')\n event_id = row.get('event_id') or row.get('EventId') or row.get('eventId')\n instance_id = row.get('instance_id') or row.get('InstanceId') or row.get('instanceId')\n agent_id = row.get('agent_id') or row.get('AgentId') or row.get('agentId')\n data = [additional_fields, generated, event_id, instance_id, agent_id]\n row_data_string = ''\n for data_field in data:\n row_data_string += f'{data_field}_'\n row_id = hashlib.md5(row_data_string.encode('utf-8')).hexdigest() # nosec\n return row_id", "def get_original_city_id(data: dict) -> Optional[int]:\n try:\n return data[\"event\"][\"data\"][\"new\"][\"original_city_id\"]\n except (TypeError, KeyError):\n return None", "def __get_fault(self, mps_db_session, fault_id):\n fault = mps_db_session.query(models.Fault).filter(models.Fault.id==fault_id).all()\n\n if len(fault) == 1:\n return fault[0]\n elif len(fault) == 0:\n raise ValueError(\"Function \\\"__get_fault(fault_id={}). Not fault was found.\\\"\"\n .format(fault_id))\n else:\n raise ValueError(\"Function \\\"__get_fault(fault_id={}). More than one fault matches\\\"\"\n .format(fault_id))", "def find_log_id(xcresult_path):\n parsed = xcresulttool_json('get', '--path', xcresult_path)\n actions = parsed['actions']['_values']\n action = actions[-1]\n\n result = action['actionResult']['logRef']['id']['_value']\n _logger.debug('Using log id %s', result)\n return result", "def find_issue_id(self):", "def get_run_id(self):\n\t\tif self.have_metadata is False:\n\t\t\tself._get_metadata()\n\t\t\tself.have_metadata = True\n\n\t\ttry:\n\t\t\treturn self.keyinfo['tracking_id'].attrs['run_id']\n\t\texcept:\n\t\t\treturn None", "def _retrieve_task_id(job_name, res_id, job_dict):\n if job_dict:\n workers = list(job_dict.keys())\n for worker in workers:\n for job in job_dict[worker]:\n if 'name' in job:\n if job['name'] == job_name:\n if res_id in job['args']:\n return job['id']\n elif 'request' in job:\n scheduled_job = job['request']\n if 'name' in scheduled_job:\n if scheduled_job['name'] == job_name:\n if res_id in scheduled_job['args']:\n return scheduled_job['id']\n\n return None", "def is_crash_in_jurisdiction(crash_id: int) -> str:\n if not str(crash_id).isdigit():\n return \"N\"\n\n find_jurisdiction_query = \"\"\"\n query($crash_id:Int) {\n find_crash_in_jurisdiction(args: {jurisdiction_id: 5, given_crash_id: $crash_id}) {\n crash_id\n austin_full_purpose\n }\n }\n \"\"\"\n\n try:\n response = requests.post(\n HASURA_ENDPOINT,\n data=json.dumps(\n {\n \"query\": find_jurisdiction_query,\n \"variables\": {\n \"crash_id\": crash_id\n }\n }\n ),\n headers=HEADERS\n )\n within_jurisdiction = len(response.json()[\"data\"][\"find_crash_in_jurisdiction\"]) > 0\n return \"Y\" if within_jurisdiction else \"N\"\n except:\n return \"N\"", "def _get_vendor_id(device_dict):\n return device_dict['vendor_id'].split()[0].split('x')[-1]", "def _get_product_id(device_dict):\n return device_dict['product_id'].split('x')[-1]", "def get_pydantic_error_value(data: dict, loc: tuple):\n try:\n obj = data\n for item in loc:\n obj = obj[item]\n except KeyError:\n return None\n else:\n return obj", "def extract_uid(self, data):\n member = data.get('member', {})\n return str(member.get('id'))", "def getIdOrThrow(con, key, value, table):\n\n # create cache directory if not present\n Path(ID_CACHE_FILE.parent).mkdir(parents=True, exist_ok=True)\n \n path_exists = Path(ID_CACHE_FILE).exists()\n\n cache = None\n if (path_exists):\n cache = read_JSON(ID_CACHE_FILE)\n if key in cache:\n return cache[key]\n\n query = \"SELECT Id FROM {} WHERE {} = \\\"{}\\\"\".format(table, key, value)\n\n result = con.execute_query(query).fetchone()\n\n if (result is None):\n raise LookupError(\"Could not find {} in table {}\".format(value, table))\n\n if (path_exists == False):\n cache = {}\n\n cache[value] = result[0]\n\n write_JSON(cache, ID_CACHE_FILE)\n\n return str(cache[value])", "def __getIDFromCID(self, cid):\n if cid == \"daemon\": return self._did\n \n if cid in self._attachments or cid == self._did:\n return cid\n \n for k,v in self._attachments.items():\n if cid == v.cmd: return k\n \n return None", "def _get_bookmark_id(self, bookmark: str) -> str:\n try:\n UUID(bookmark)\n return bookmark\n except ValueError as bkmark_name:\n bookmarks = self.list_bookmarks()\n filtered_bookmarks = bookmarks[\n bookmarks[\"properties.displayName\"].str.contains(bookmark)\n ]\n if len(filtered_bookmarks) > 1:\n display(filtered_bookmarks[[\"name\", \"properties.displayName\"]])\n raise MsticpyUserError(\n \"More than one incident found, please specify by GUID\"\n ) from bkmark_name\n if (\n not isinstance(filtered_bookmarks, pd.DataFrame)\n or filtered_bookmarks.empty\n ):\n raise MsticpyUserError(\n f\"Incident {bookmark} not found\"\n ) from bkmark_name\n return filtered_bookmarks[\"name\"].iloc[0]", "def get(self, error_id):\n error = self.db.session.query(models.Error).get(error_id)\n if not error: # If invalid error id, bail out\n raise InvalidErrorReference(\"No error with id %s\" % str(error_id))\n return self.derive_error_dict(error)", "def get_fault_info(filenames=['disk_sample_fault_tag.csv', 'disk_sample_fault_tag_201808.csv']):\n fault_df1 = pd.read_csv(os.path.join(conf.DATA_DIR, filenames[0]))\n fault_df2 = pd.read_csv(os.path.join(conf.DATA_DIR, filenames[1]))\n fault_df2.drop(['key'], axis=1,inplace=True)\n fault_tag_df = pd.concat([fault_df1, fault_df2], ignore_index=True)\n fault_dic = {}\n \n for _, row in fault_tag_df.iterrows():\n f_time = row[\"fault_time\"]\n tag = row[\"tag\"]\n key = tuple([row[\"manufacturer\"], row[\"model\"], row[\"serial_number\"]])\n if key not in fault_dic.keys():\n sub_dic = {}\n sub_dic[\"date\"] = f_time\n sub_dic[\"tag\"] = tag\n fault_dic[key] = sub_dic\n return fault_dic", "def get_id():\n try:\n regd_no = request.form['regd_no']\n query_society_id = queries['get_society_id']\n query = query_society_id.format(regd_no)\n \n with dbm.dbManager() as manager:\n result = manager.getDataFrame(query)\n\n return jsonify(result.to_dict(orient='records'))\n except psycopg2.DatabaseError as error:\n errors = {'registeration': False, 'error': (error) }\n return str(errors)", "def get_valid_call_payload(event: event_models.Event) -> Dict[str, Any]:\n return {\"event_id\": event.id}", "def GetEventDataIdentifier(self):\n return self._event_data_identifier", "def _get_id(results, index):\n return results[index]['_id']", "def get_label_id(label_name):\n if label_to_int == {}:\n print(\"ERROR\")\n print(\"Need to import data first\")\n else:\n label_id = self._label_to_int[label_name]\n\n return label_id" ]
[ "0.6272437", "0.59513015", "0.55179", "0.54812336", "0.53696215", "0.5354128", "0.53469145", "0.52892274", "0.525855", "0.5231635", "0.5222822", "0.5222562", "0.5200039", "0.5149051", "0.5127858", "0.5088431", "0.50883", "0.50799894", "0.5075199", "0.50506073", "0.5049268", "0.5033375", "0.5026021", "0.50186175", "0.50181437", "0.5013356", "0.5008907", "0.50029314", "0.49983743", "0.4997059" ]
0.8355854
0
Returns the city id of the record in question
def get_city_id(data: dict) -> Optional[int]: try: return data["event"]["data"]["new"]["city_id"] except (TypeError, KeyError): return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_city_id(cls, city, state):\n city_instance = cls.query.filter_by(name=city, state=state).first()\n if city_instance:\n return city_instance.id\n\n city_instance = cls(name=city, state=state)\n try:\n db.session.add(city_instance)\n db.session.commit()\n except:\n city_instance = None\n db.session.rollback()\n finally:\n city_id = city_instance.id if city_instance else None\n\n return city_id", "def cityId(city_id):\n yy = storage.get(\"City\", str(city_id))\n if yy is None:\n abort(404)\n return jsonify(yy.to_dict())", "def cities_id(ident):\n cities = storage.all(\"City\").values()\n for c in cities:\n if c.id == ident:\n if request.method == 'GET':\n return getcity(c)\n elif request.method == 'PUT':\n return putcity(c)\n elif request.method == 'DELETE':\n return deletecity(c)\n abort(404, 'Not found')", "def get_city(self, territory_id: str = \"\"):", "def get_city(self, territory_id: str = \"\"):", "def get_original_city_id(data: dict) -> Optional[int]:\n try:\n return data[\"event\"][\"data\"][\"new\"][\"original_city_id\"]\n except (TypeError, KeyError):\n return None", "def city(self) -> str:\n return pulumi.get(self, \"city\")", "def city(self):\n return self._city", "def city(self):\n return self._city", "def city(self):\n return self._city", "def city(self):\n return self._city", "def city(self):\n return self._city", "def city_by_id(city_id):\n\n fetched_obj = storage.get(\"City\", str(city_id))\n\n if fetched_obj is None:\n abort(404)\n\n return jsonify(fetched_obj.to_json())", "def city(city_id):\n\n if storage.get(\"City\", city_id) is not None:\n return jsonify(storage.get(\"City\", city_id).to_dict())\n else:\n abort(404)", "def city():\r\n cursor.execute('SELECT city FROM american_cities \\\r\n order by RANDOM() limit 1;')\r\n return cursor.fetchone()[0]", "def city(self) -> Optional[str]:\n return pulumi.get(self, \"city\")", "def city(self):\n if \"city\" in self._prop_dict:\n return self._prop_dict[\"city\"]\n else:\n return None", "def city(self):\n if \"city\" in self._prop_dict:\n return self._prop_dict[\"city\"]\n else:\n return None", "def get_city(city_id):\n city = storage.get(\"City\", city_id)\n if city is None:\n abort(404)\n return jsonify(city.to_json())", "def city(self, instance):\r\n return instance.user.profile.city", "def cities_id(city_id):\n city = storage.get(City, city_id)\n if city:\n return jsonify(city.to_dict())\n else:\n abort(404)", "def get_city(city_id):\n city = storage.get(\"City\", city_id)\n if city is None:\n abort(404)\n return jsonify(city.to_dict())", "def GetCityFromAirportId(self, airprot_id):\n return self.airports.set_index('airport_id').loc[airprot_id]['city']", "def retrieve_city(city_id):\n city = storage.get('City', city_id)\n if city:\n return city.to_dict()\n abort(404)", "def city_by_id(city_id):\n cities_values = storage.all(\"City\").values()\n for obj in cities_values:\n if obj.id == city_id:\n return jsonify(obj.to_dict())\n abort(404)", "def find_city(city, dbsession):\n\n\t# Since we're creating the FK relation based on ID, and hence the casing has no bearing on \n\t# whether the city record associates with the address, I'm upcasing the city to prevent dupes.\n\tcity = str(city)\n\tcity = city.upper()\n\n\tresult = dbsession.query(db.City).filter_by(city_name=city).first()\n\n\tif result is None:\n\t\t# Create a new instance of city\n\t\tcity_object = db.City(city)\n\t\t# I'm adding the city without committing the transaction since it would also\n\t\t# commit the address insert transaction that's still open in routes.py.\n\t\tdbsession.add(city_object)\n\t\treturn city_object\n\telse:\n\t\t# Assign the existing user object to the variable\n\t\treturn result", "def city(self):\n # type: () -> string_types\n return self._city", "def retrieve_city(city_id):\n obj = models.storage.get(\"City\", city_id)\n if obj is not None:\n return jsonify(obj.to_dict())\n else:\n abort(404)", "def city():\r\n _cursor.execute('SELECT DISTINCT(name) FROM ca_cities where name is not null order by random() limit 1;')\r\n return _cursor.fetchone()[0].decode(\"utf-8\")", "def get_city_details(self, location_id):\n sql = \"SELECT * FROM [location] WHERE [id] = %d\"%(location_id)\n self.cursor.execute(sql)\n row = self.cursor.fetchone()\n city = row['city']\n state = row['region']\n zip_code = row['postal_code']\n provider = row['provider']\n ip_address_int = random.randint(3221225729, 3758096126) # Class C\n #ip_address = socket.inet_ntop(socket.AF_INET6, struct.pack('L', int(socket.htonl(ip_address_int))))\n ip_address = socket.inet_ntoa(hex(ip_address_int)[2:].zfill(8).decode('hex')) \n return [city, state, zip_code, provider, ip_address]" ]
[ "0.75250643", "0.70947176", "0.70381284", "0.6898604", "0.6898604", "0.6812364", "0.6685977", "0.6669821", "0.6669821", "0.6669821", "0.6669821", "0.6669821", "0.66373014", "0.6633529", "0.66121835", "0.6608844", "0.656693", "0.656693", "0.6549308", "0.6533387", "0.6527506", "0.65236247", "0.6519683", "0.6507999", "0.6500562", "0.640583", "0.64009154", "0.6393915", "0.63801426", "0.63275796" ]
0.7371334
1
Returns the original city id of the record in question
def get_original_city_id(data: dict) -> Optional[int]: try: return data["event"]["data"]["new"]["original_city_id"] except (TypeError, KeyError): return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_city_id(cls, city, state):\n city_instance = cls.query.filter_by(name=city, state=state).first()\n if city_instance:\n return city_instance.id\n\n city_instance = cls(name=city, state=state)\n try:\n db.session.add(city_instance)\n db.session.commit()\n except:\n city_instance = None\n db.session.rollback()\n finally:\n city_id = city_instance.id if city_instance else None\n\n return city_id", "def get_city_id(data: dict) -> Optional[int]:\n try:\n return data[\"event\"][\"data\"][\"new\"][\"city_id\"]\n except (TypeError, KeyError):\n return None", "def cityId(city_id):\n yy = storage.get(\"City\", str(city_id))\n if yy is None:\n abort(404)\n return jsonify(yy.to_dict())", "def cities_id(ident):\n cities = storage.all(\"City\").values()\n for c in cities:\n if c.id == ident:\n if request.method == 'GET':\n return getcity(c)\n elif request.method == 'PUT':\n return putcity(c)\n elif request.method == 'DELETE':\n return deletecity(c)\n abort(404, 'Not found')", "def city():\r\n _cursor.execute('SELECT DISTINCT(name) FROM ca_cities where name is not null order by random() limit 1;')\r\n return _cursor.fetchone()[0].decode(\"utf-8\")", "def city():\r\n cursor.execute('SELECT city FROM american_cities \\\r\n order by RANDOM() limit 1;')\r\n return cursor.fetchone()[0]", "def get_city(self, territory_id: str = \"\"):", "def get_city(self, territory_id: str = \"\"):", "def city(self):\n return self._city", "def city(self):\n return self._city", "def city(self):\n return self._city", "def city(self):\n return self._city", "def city(self):\n return self._city", "def GetCityFromAirportId(self, airprot_id):\n return self.airports.set_index('airport_id').loc[airprot_id]['city']", "def city(self, instance):\r\n return instance.user.profile.city", "def city(self) -> str:\n return pulumi.get(self, \"city\")", "def city(self):\n # type: () -> string_types\n return self._city", "def derive_id(row):\n fips = row.get('fips')\n if len(fips) == 2:\n # if row has fips length 2, then it is a state, and the id is state_abbr\n fid = row['state_abbr']\n elif re.match(r'\\d{5}', fips):\n # if row belongs to a county, then id is fips\n fid = fips\n elif fips == \"\":\n # if no fips, then we make up an id\n fid = f'99999-{row[\"state_abbr\"]}-{row[\"county\"]}'\n else: # this shouldn't happen\n import pdb; pdb.set_trace(); raise\n return fid", "def find_city(city, dbsession):\n\n\t# Since we're creating the FK relation based on ID, and hence the casing has no bearing on \n\t# whether the city record associates with the address, I'm upcasing the city to prevent dupes.\n\tcity = str(city)\n\tcity = city.upper()\n\n\tresult = dbsession.query(db.City).filter_by(city_name=city).first()\n\n\tif result is None:\n\t\t# Create a new instance of city\n\t\tcity_object = db.City(city)\n\t\t# I'm adding the city without committing the transaction since it would also\n\t\t# commit the address insert transaction that's still open in routes.py.\n\t\tdbsession.add(city_object)\n\t\treturn city_object\n\telse:\n\t\t# Assign the existing user object to the variable\n\t\treturn result", "def city_by_id(city_id):\n\n fetched_obj = storage.get(\"City\", str(city_id))\n\n if fetched_obj is None:\n abort(404)\n\n return jsonify(fetched_obj.to_json())", "def get_city_details(self, location_id):\n sql = \"SELECT * FROM [location] WHERE [id] = %d\"%(location_id)\n self.cursor.execute(sql)\n row = self.cursor.fetchone()\n city = row['city']\n state = row['region']\n zip_code = row['postal_code']\n provider = row['provider']\n ip_address_int = random.randint(3221225729, 3758096126) # Class C\n #ip_address = socket.inet_ntop(socket.AF_INET6, struct.pack('L', int(socket.htonl(ip_address_int))))\n ip_address = socket.inet_ntoa(hex(ip_address_int)[2:].zfill(8).decode('hex')) \n return [city, state, zip_code, provider, ip_address]", "def city_current(self, instance):\r\n return instance.user.profile.city_current", "def city(self) -> Optional[str]:\n return pulumi.get(self, \"city\")", "def case_id():\n return 3000", "def get_location_id(self):\n return self.cleaned_data['location_id']", "def find_ID(table):\n if field_exists(table, \"orig_ID\"):\n return \"orig_ID\"\n elif field_exists(table, \"ORIG_FID\"):\n return \"ORIG_FID\"\n else:\n return arcpy.Describe(table).OIDFieldName", "def city(city_id):\n\n if storage.get(\"City\", city_id) is not None:\n return jsonify(storage.get(\"City\", city_id).to_dict())\n else:\n abort(404)", "def city(self):\n if \"city\" in self._prop_dict:\n return self._prop_dict[\"city\"]\n else:\n return None", "def city(self):\n if \"city\" in self._prop_dict:\n return self._prop_dict[\"city\"]\n else:\n return None", "def get_actual_id(translated):" ]
[ "0.6964615", "0.66592705", "0.6169417", "0.6157738", "0.60501194", "0.6029745", "0.60122174", "0.60122174", "0.5994775", "0.5994775", "0.5994775", "0.5994775", "0.5994775", "0.594946", "0.5866604", "0.5851254", "0.57407475", "0.5719268", "0.5703641", "0.57028884", "0.56992984", "0.5674521", "0.56658185", "0.5654411", "0.56486815", "0.5586299", "0.5570531", "0.55701137", "0.55701137", "0.5560856" ]
0.75089175
0
Attempts to find the jurisdiction of a crash by it's ID
def is_crash_in_jurisdictions(crash_id: int) -> bool: if not str(crash_id).isdigit(): return False find_crash_jurisdictions = """ query findCrashJurisdictions($crash_id:Int!) { find_crash_jurisdictions(args: {given_crash_id: $crash_id}) { id } } """ try: response = requests.post( HASURA_ENDPOINT, data=json.dumps( { "query": find_crash_jurisdictions, "variables": { "crash_id": crash_id } } ), headers=HEADERS ) valid_jurisdictions = [5, 3, 7, 8, 10] output_jurisdictions = list(map(lambda x: x['id'], response.json()["data"]["find_crash_jurisdictions"])) return get_jurisdiction_common_members(valid_jurisdictions, output_jurisdictions) != set() except Exception as e: print(f"Error {str(e)}") return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_crash_in_jurisdiction(crash_id: int) -> str:\n if not str(crash_id).isdigit():\n return \"N\"\n\n find_jurisdiction_query = \"\"\"\n query($crash_id:Int) {\n find_crash_in_jurisdiction(args: {jurisdiction_id: 5, given_crash_id: $crash_id}) {\n crash_id\n austin_full_purpose\n }\n }\n \"\"\"\n\n try:\n response = requests.post(\n HASURA_ENDPOINT,\n data=json.dumps(\n {\n \"query\": find_jurisdiction_query,\n \"variables\": {\n \"crash_id\": crash_id\n }\n }\n ),\n headers=HEADERS\n )\n within_jurisdiction = len(response.json()[\"data\"][\"find_crash_in_jurisdiction\"]) > 0\n return \"Y\" if within_jurisdiction else \"N\"\n except:\n return \"N\"", "def find_issue_id(self):", "def get_city_id_from_db(crash_id: int) -> Optional[int]:\n if not str(crash_id).isdigit():\n print(\"It's bad\")\n return None\n\n get_city_id_query = \"\"\"\n query getCityId($crash_id:Int!){\n atd_txdot_crashes(where: {\n crash_id: {_eq:$crash_id}\n }){\n city_id\n }\n }\n \"\"\"\n\n try:\n response = requests.post(\n HASURA_ENDPOINT,\n data=json.dumps(\n {\n \"query\": get_city_id_query,\n \"variables\": {\n \"crash_id\": crash_id\n }\n }\n ),\n headers=HEADERS\n )\n return response.json()[\"data\"][\"atd_txdot_crashes\"][0][\"city_id\"]\n except Exception as e:\n print(f\"LIttle error: {str(e)}\")\n return None", "def get_bug(self, id, year=None):\n year = self.get_year(id, switch='bugs') if year is None else year\n directory = self.get_bugs_path(year)\n for path in self._get_files(directory, pattern='bugs.*.json'):\n bugs = helpers.load_json(path)\n for bug in bugs:\n if id == bug['id']:\n return bug\n raise Exception('No bug identified by {}'.format(id))", "def get_crash_id(data: dict) -> int:\n try:\n return data[\"event\"][\"data\"][\"new\"][\"crash_id\"]\n except (TypeError, KeyError):\n raise_critical_error(\n message=\"Unable to parse request body to identify a crash_id\",\n data=data\n )", "def lookup_by_id(i_d):\n imdb_id = 0\n str_id = str(i_d)\n if str_id[0].isdigit():\n #contact the moviedb api for inmdb id\n res = requests.get(\n f\"https://api.themoviedb.org/3/movie/{i_d}/external_ids?api_key=28dda9f76d76f128b47831768bc9a103\")\n res.raise_for_status()\n mov = res.json()\n imdb_id = mov[\"imdb_id\"]\n else:\n imdb_id = i_d\n # Contact API\n try:\n response = requests.get(\n f\"http://www.omdbapi.com/?i={imdb_id}&apikey=ced7be9a\")\n response.raise_for_status()\n except requests.RequestException:\n return None\n\n # parse response\n try:\n movie = response.json()\n return {\n \"title\":movie[\"Title\"],\n \"id\":movie[\"imdbID\"],\n \"plot\":movie[\"Plot\"],\n \"year\":movie[\"Year\"],\n \"poster\":movie[\"Poster\"],\n \"gross\":movie[\"BoxOffice\"],\n \"rating\":movie[\"imdbRating\"],\n \"website\":movie[\"Website\"],\n \"director\":movie[\"Director\"],\n \"writer\":movie[\"Writer\"],\n \"genre\":movie[\"Genre\"],\n \"actors\":movie[\"Actors\"]\n }\n\n except (KeyError, TypeError, ValueError):\n return None", "def find(key):\n return ItopapiPrototype.find(ItopapiIncident, key)", "def _resolve_id(self, id):\n if len(id) == 40:\n if os.path.exists(self.tracker.get_issue_path(id)):\n return id\n else:\n raise BadReference('No matching issue on disk: %s' % id)\n # glob the path returned by the tracker helper method\n matches = glob.glob(self.tracker.get_issue_path(id + '*'))\n # no matches, raise bad ref:\n if not matches:\n raise BadReference('No matching issue on disk: %s' % id)\n # multiple matches, raise ambiguous ref:\n if len(matches) > 1:\n raise AmbiguousReference('Multiple issues matched that id fragment')\n # one match, return the match\n head = os.path.split(matches[0])[0]\n match_id = os.path.split(head)[1]\n return match_id", "def get_problem(id):\n return query(WEB_EXAMPLE_BASE + f\"/classical/problem/{id}\")", "def lookup():", "def find_log_id(xcresult_path):\n parsed = xcresulttool_json('get', '--path', xcresult_path)\n actions = parsed['actions']['_values']\n action = actions[-1]\n\n result = action['actionResult']['logRef']['id']['_value']\n _logger.debug('Using log id %s', result)\n return result", "def find_one_issue(connection, jsql):\n result = connection.retrieve_search(jsql, 0, 1)\n\n if result is None:\n raise ConnectionError(\"Fail to check if issue exist. Http status %d, %s\" %\n (connection.http_status, connection.value(\"errorMessages/0\")))\n\n cnt = int(connection.value(\"total\"))\n\n if cnt == 0:\n return None\n elif cnt == 1:\n return connection.get_issue(0)\n else:\n raise LookupError(\"Many issues are found with '%s'\" % jsql)", "def ReporterReference(pidofreporter):\n try:\n pid_list = []\n Mcafee_Reporter_pid = getpid(pidofreporter)\n print \"Now\",Mcafee_Reporter_pid\n listofpid = list(Mcafee_Reporter_pid)\n pid_list.append(listofpid[1])\n split_pids_by_space = [words for segments in pid_list for words in segments.split()]\n print \"split_pids_by_space\", split_pids_by_space\n reporter_current_pid = int(''.join(map(str,split_pids_by_space[1])))\n print \"reporter_current_pid\", reporter_current_pid\n Mcafee_Reporter_Reference = getAppRefByPidofapp(reporter_current_pid)\n #print \"Mcafee_Reporter_Reference\", Mcafee_Reporter_Reference\n except Exception as er:\n return False\n print \"Not able to get Reporter details\"\n print Mcafee_Reporter_Reference\n return Mcafee_Reporter_Reference", "def Get(id):\n try:\n bug = Bug.get_by_id(id)\n if not bug:\n raise InvalidIdError\n except (db.Error, InvalidIdError), e:\n logging.error('bug.Get: Exception while retrieving bug (%s): %s', id, e)\n raise InvalidIdError('Bug not found [id=%s].%s' % (id, e))\n return bug", "def identify_disease(*arguments):\n\tsymptom_list = []\n\tfor symptom in arguments:\n\t\tsymptom_list.append(symptom)\n\t# Handle key error\n\treturn symptom_map[str(symptom_list)]", "def get_species_id( species ):\n\n species = species.strip( ).lower( )\n result = 1 # (non-sensical) fail-safe if there is no match in the loop\n for species_key in Species_Dict:\n if species in Species_Dict[ species_key ]:\n result = Species_Dict[ species_key ][ 0 ] # change assignment if you want to return another list element\n break\n return result", "def get_problem(problem_id):\n Firebase = firebase.FirebaseApplication('https://team1robotsim.firebaseio.com/', None)\n result = Firebase.get('/problems', 'id_' + str(problem_id))\n if result is None:\n return jsonify(Error(404, \"Problem not found\")), status.HTTP_404_NOT_FOUND\n else:\n return jsonify(result)", "def problem(self, identifier):\n return self._get(\"problems/%d\" % identifier).json()", "def lookup_concept(wikidataId):\n\n if cfg.USE_CACHE:\n if (wikidataId in LOOKUP_DICT):\n return LOOKUP_DICT[wikidataId]\n \n response_json = wa.request_entity_fishing_concept_lookup(wikidataId)\n \n if not response_json:\n return nan\n\n if 'statements' not in response_json.keys():\n return nan\n\n for statement in response_json['statements']:\n if statement['propertyId'] == 'P1566':\n logger.debug('GeoNamesID found for %s: %s', wikidataId, statement['value'])\n if cfg.USE_CACHE:\n LOOKUP_DICT[wikidataId] = statement['value']\n return statement['value']\n\n \n return nan", "def __get_fault(self, mps_db_session, fault_id):\n fault = mps_db_session.query(models.Fault).filter(models.Fault.id==fault_id).all()\n\n if len(fault) == 1:\n return fault[0]\n elif len(fault) == 0:\n raise ValueError(\"Function \\\"__get_fault(fault_id={}). Not fault was found.\\\"\"\n .format(fault_id))\n else:\n raise ValueError(\"Function \\\"__get_fault(fault_id={}). More than one fault matches\\\"\"\n .format(fault_id))", "def filter_bridge_domain_dump_by_id(data, bd_id):\n\n for item in data:\n if str(item[\"bd_id\"]) == str(bd_id):\n return item\n else:\n raise RuntimeError(\"Bridge domain not found by id {id}.\".format(id=bd_id))", "def find_index(segmentation, stroke_id):\n for i, symbol in enumerate(segmentation):\n for sid in symbol:\n if sid == stroke_id:\n return i\n return -1", "def getErrorByCode(SID, errorid, langId):\n return call(\"getErrorByCode\", SID, errorid, langId)", "def find_label_by_id(self, _id):\n search = True\n i = 0\n while search:\n if i == len(self.labels):\n break;\n\n if self.labels[i].id == _id:\n return self.labels[i]\n search = False\n #print self.labels[i].id\n i += 1\n if search:\n return None", "def rvid_lookup(onid):\n rvid = id_mapping.loc[id_mapping['open_neuro_id'] == onid, 'SUBJECT_NUMBER'].values[0]\n return rvid", "def find_id_categorie_in_database(self, db):\n\n try:\n select_query = \"SELECT id_categorie FROM categorie WHERE categorie_name='\"+self.categorie_name+\"';\"\n result = db.query(select_query)\n self.id_categorie = result[0][\"id_categorie\"]\n\n\n except IntegrityError as int_err:\n print(\"There was an integrity error while selecting id categorie\")\n print(int_err)\n\n except ProgrammingError as prg_err:\n print(\"There was a programming error while selecting id categorie\")\n print(prg_err)", "def get_project(db, id):\n \n for element in db:\n if element['project_no'] == id:\n return element\n return None", "def get_code_from_registry( id ):\n #print \"Trying to get code with key %s\" % id\n return _theRegistry.get_code( id )", "def get_scanid_from_lookup_table(archivepath, lookup):\n basename = os.path.basename(os.path.normpath(archivepath))\n source_name = basename[:-len(datman.utils.get_extension(basename))]\n lookupinfo = lookup[ lookup['source_name'] == source_name ]\n\n if len(lookupinfo) == 0:\n debug(\"{} not found in source_name column.\".format(source_name))\n return (None, None)\n else: \n scanid = lookupinfo['target_name'].tolist()[0]\n return (scanid, lookupinfo)", "def rclass_id_lookup(cur):\n if 'rclass_id' not in _tables:\n cur.execute(\"SELECT name, rclass_id FROM rclass\")\n _tables['rclass_id'] = dict(cur)\n return _tables['rclass_id']" ]
[ "0.6507495", "0.55955935", "0.55866814", "0.54629683", "0.5459501", "0.5459319", "0.51034653", "0.510335", "0.5093428", "0.50861627", "0.50335515", "0.50307775", "0.5009739", "0.50014627", "0.4972644", "0.495175", "0.49425152", "0.49007264", "0.48689076", "0.4845339", "0.48214388", "0.47727045", "0.47657573", "0.4748428", "0.4739666", "0.47322074", "0.47272387", "0.47242266", "0.47184268", "0.46982926" ]
0.6285635
1
Attempts to find the jurisdiction of a crash by it's ID
def is_crash_in_jurisdiction(crash_id: int) -> str: if not str(crash_id).isdigit(): return "N" find_jurisdiction_query = """ query($crash_id:Int) { find_crash_in_jurisdiction(args: {jurisdiction_id: 5, given_crash_id: $crash_id}) { crash_id austin_full_purpose } } """ try: response = requests.post( HASURA_ENDPOINT, data=json.dumps( { "query": find_jurisdiction_query, "variables": { "crash_id": crash_id } } ), headers=HEADERS ) within_jurisdiction = len(response.json()["data"]["find_crash_in_jurisdiction"]) > 0 return "Y" if within_jurisdiction else "N" except: return "N"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_crash_in_jurisdictions(crash_id: int) -> bool:\n if not str(crash_id).isdigit():\n return False\n\n find_crash_jurisdictions = \"\"\"\n query findCrashJurisdictions($crash_id:Int!) {\n find_crash_jurisdictions(args: {given_crash_id: $crash_id}) {\n id\n }\n }\n \"\"\"\n\n try:\n response = requests.post(\n HASURA_ENDPOINT,\n data=json.dumps(\n {\n \"query\": find_crash_jurisdictions,\n \"variables\": {\n \"crash_id\": crash_id\n }\n }\n ),\n headers=HEADERS\n )\n valid_jurisdictions = [5, 3, 7, 8, 10]\n output_jurisdictions = list(map(lambda x: x['id'], response.json()[\"data\"][\"find_crash_jurisdictions\"]))\n return get_jurisdiction_common_members(valid_jurisdictions, output_jurisdictions) != set()\n\n except Exception as e:\n print(f\"Error {str(e)}\")\n return False", "def find_issue_id(self):", "def get_city_id_from_db(crash_id: int) -> Optional[int]:\n if not str(crash_id).isdigit():\n print(\"It's bad\")\n return None\n\n get_city_id_query = \"\"\"\n query getCityId($crash_id:Int!){\n atd_txdot_crashes(where: {\n crash_id: {_eq:$crash_id}\n }){\n city_id\n }\n }\n \"\"\"\n\n try:\n response = requests.post(\n HASURA_ENDPOINT,\n data=json.dumps(\n {\n \"query\": get_city_id_query,\n \"variables\": {\n \"crash_id\": crash_id\n }\n }\n ),\n headers=HEADERS\n )\n return response.json()[\"data\"][\"atd_txdot_crashes\"][0][\"city_id\"]\n except Exception as e:\n print(f\"LIttle error: {str(e)}\")\n return None", "def get_bug(self, id, year=None):\n year = self.get_year(id, switch='bugs') if year is None else year\n directory = self.get_bugs_path(year)\n for path in self._get_files(directory, pattern='bugs.*.json'):\n bugs = helpers.load_json(path)\n for bug in bugs:\n if id == bug['id']:\n return bug\n raise Exception('No bug identified by {}'.format(id))", "def get_crash_id(data: dict) -> int:\n try:\n return data[\"event\"][\"data\"][\"new\"][\"crash_id\"]\n except (TypeError, KeyError):\n raise_critical_error(\n message=\"Unable to parse request body to identify a crash_id\",\n data=data\n )", "def lookup_by_id(i_d):\n imdb_id = 0\n str_id = str(i_d)\n if str_id[0].isdigit():\n #contact the moviedb api for inmdb id\n res = requests.get(\n f\"https://api.themoviedb.org/3/movie/{i_d}/external_ids?api_key=28dda9f76d76f128b47831768bc9a103\")\n res.raise_for_status()\n mov = res.json()\n imdb_id = mov[\"imdb_id\"]\n else:\n imdb_id = i_d\n # Contact API\n try:\n response = requests.get(\n f\"http://www.omdbapi.com/?i={imdb_id}&apikey=ced7be9a\")\n response.raise_for_status()\n except requests.RequestException:\n return None\n\n # parse response\n try:\n movie = response.json()\n return {\n \"title\":movie[\"Title\"],\n \"id\":movie[\"imdbID\"],\n \"plot\":movie[\"Plot\"],\n \"year\":movie[\"Year\"],\n \"poster\":movie[\"Poster\"],\n \"gross\":movie[\"BoxOffice\"],\n \"rating\":movie[\"imdbRating\"],\n \"website\":movie[\"Website\"],\n \"director\":movie[\"Director\"],\n \"writer\":movie[\"Writer\"],\n \"genre\":movie[\"Genre\"],\n \"actors\":movie[\"Actors\"]\n }\n\n except (KeyError, TypeError, ValueError):\n return None", "def find(key):\n return ItopapiPrototype.find(ItopapiIncident, key)", "def _resolve_id(self, id):\n if len(id) == 40:\n if os.path.exists(self.tracker.get_issue_path(id)):\n return id\n else:\n raise BadReference('No matching issue on disk: %s' % id)\n # glob the path returned by the tracker helper method\n matches = glob.glob(self.tracker.get_issue_path(id + '*'))\n # no matches, raise bad ref:\n if not matches:\n raise BadReference('No matching issue on disk: %s' % id)\n # multiple matches, raise ambiguous ref:\n if len(matches) > 1:\n raise AmbiguousReference('Multiple issues matched that id fragment')\n # one match, return the match\n head = os.path.split(matches[0])[0]\n match_id = os.path.split(head)[1]\n return match_id", "def get_problem(id):\n return query(WEB_EXAMPLE_BASE + f\"/classical/problem/{id}\")", "def lookup():", "def find_one_issue(connection, jsql):\n result = connection.retrieve_search(jsql, 0, 1)\n\n if result is None:\n raise ConnectionError(\"Fail to check if issue exist. Http status %d, %s\" %\n (connection.http_status, connection.value(\"errorMessages/0\")))\n\n cnt = int(connection.value(\"total\"))\n\n if cnt == 0:\n return None\n elif cnt == 1:\n return connection.get_issue(0)\n else:\n raise LookupError(\"Many issues are found with '%s'\" % jsql)", "def find_log_id(xcresult_path):\n parsed = xcresulttool_json('get', '--path', xcresult_path)\n actions = parsed['actions']['_values']\n action = actions[-1]\n\n result = action['actionResult']['logRef']['id']['_value']\n _logger.debug('Using log id %s', result)\n return result", "def ReporterReference(pidofreporter):\n try:\n pid_list = []\n Mcafee_Reporter_pid = getpid(pidofreporter)\n print \"Now\",Mcafee_Reporter_pid\n listofpid = list(Mcafee_Reporter_pid)\n pid_list.append(listofpid[1])\n split_pids_by_space = [words for segments in pid_list for words in segments.split()]\n print \"split_pids_by_space\", split_pids_by_space\n reporter_current_pid = int(''.join(map(str,split_pids_by_space[1])))\n print \"reporter_current_pid\", reporter_current_pid\n Mcafee_Reporter_Reference = getAppRefByPidofapp(reporter_current_pid)\n #print \"Mcafee_Reporter_Reference\", Mcafee_Reporter_Reference\n except Exception as er:\n return False\n print \"Not able to get Reporter details\"\n print Mcafee_Reporter_Reference\n return Mcafee_Reporter_Reference", "def Get(id):\n try:\n bug = Bug.get_by_id(id)\n if not bug:\n raise InvalidIdError\n except (db.Error, InvalidIdError), e:\n logging.error('bug.Get: Exception while retrieving bug (%s): %s', id, e)\n raise InvalidIdError('Bug not found [id=%s].%s' % (id, e))\n return bug", "def identify_disease(*arguments):\n\tsymptom_list = []\n\tfor symptom in arguments:\n\t\tsymptom_list.append(symptom)\n\t# Handle key error\n\treturn symptom_map[str(symptom_list)]", "def get_species_id( species ):\n\n species = species.strip( ).lower( )\n result = 1 # (non-sensical) fail-safe if there is no match in the loop\n for species_key in Species_Dict:\n if species in Species_Dict[ species_key ]:\n result = Species_Dict[ species_key ][ 0 ] # change assignment if you want to return another list element\n break\n return result", "def get_problem(problem_id):\n Firebase = firebase.FirebaseApplication('https://team1robotsim.firebaseio.com/', None)\n result = Firebase.get('/problems', 'id_' + str(problem_id))\n if result is None:\n return jsonify(Error(404, \"Problem not found\")), status.HTTP_404_NOT_FOUND\n else:\n return jsonify(result)", "def problem(self, identifier):\n return self._get(\"problems/%d\" % identifier).json()", "def lookup_concept(wikidataId):\n\n if cfg.USE_CACHE:\n if (wikidataId in LOOKUP_DICT):\n return LOOKUP_DICT[wikidataId]\n \n response_json = wa.request_entity_fishing_concept_lookup(wikidataId)\n \n if not response_json:\n return nan\n\n if 'statements' not in response_json.keys():\n return nan\n\n for statement in response_json['statements']:\n if statement['propertyId'] == 'P1566':\n logger.debug('GeoNamesID found for %s: %s', wikidataId, statement['value'])\n if cfg.USE_CACHE:\n LOOKUP_DICT[wikidataId] = statement['value']\n return statement['value']\n\n \n return nan", "def __get_fault(self, mps_db_session, fault_id):\n fault = mps_db_session.query(models.Fault).filter(models.Fault.id==fault_id).all()\n\n if len(fault) == 1:\n return fault[0]\n elif len(fault) == 0:\n raise ValueError(\"Function \\\"__get_fault(fault_id={}). Not fault was found.\\\"\"\n .format(fault_id))\n else:\n raise ValueError(\"Function \\\"__get_fault(fault_id={}). More than one fault matches\\\"\"\n .format(fault_id))", "def filter_bridge_domain_dump_by_id(data, bd_id):\n\n for item in data:\n if str(item[\"bd_id\"]) == str(bd_id):\n return item\n else:\n raise RuntimeError(\"Bridge domain not found by id {id}.\".format(id=bd_id))", "def find_index(segmentation, stroke_id):\n for i, symbol in enumerate(segmentation):\n for sid in symbol:\n if sid == stroke_id:\n return i\n return -1", "def getErrorByCode(SID, errorid, langId):\n return call(\"getErrorByCode\", SID, errorid, langId)", "def find_label_by_id(self, _id):\n search = True\n i = 0\n while search:\n if i == len(self.labels):\n break;\n\n if self.labels[i].id == _id:\n return self.labels[i]\n search = False\n #print self.labels[i].id\n i += 1\n if search:\n return None", "def rvid_lookup(onid):\n rvid = id_mapping.loc[id_mapping['open_neuro_id'] == onid, 'SUBJECT_NUMBER'].values[0]\n return rvid", "def find_id_categorie_in_database(self, db):\n\n try:\n select_query = \"SELECT id_categorie FROM categorie WHERE categorie_name='\"+self.categorie_name+\"';\"\n result = db.query(select_query)\n self.id_categorie = result[0][\"id_categorie\"]\n\n\n except IntegrityError as int_err:\n print(\"There was an integrity error while selecting id categorie\")\n print(int_err)\n\n except ProgrammingError as prg_err:\n print(\"There was a programming error while selecting id categorie\")\n print(prg_err)", "def get_project(db, id):\n \n for element in db:\n if element['project_no'] == id:\n return element\n return None", "def get_code_from_registry( id ):\n #print \"Trying to get code with key %s\" % id\n return _theRegistry.get_code( id )", "def get_scanid_from_lookup_table(archivepath, lookup):\n basename = os.path.basename(os.path.normpath(archivepath))\n source_name = basename[:-len(datman.utils.get_extension(basename))]\n lookupinfo = lookup[ lookup['source_name'] == source_name ]\n\n if len(lookupinfo) == 0:\n debug(\"{} not found in source_name column.\".format(source_name))\n return (None, None)\n else: \n scanid = lookupinfo['target_name'].tolist()[0]\n return (scanid, lookupinfo)", "def rclass_id_lookup(cur):\n if 'rclass_id' not in _tables:\n cur.execute(\"SELECT name, rclass_id FROM rclass\")\n _tables['rclass_id'] = dict(cur)\n return _tables['rclass_id']" ]
[ "0.62852407", "0.5597144", "0.55872697", "0.5461747", "0.5458556", "0.5457154", "0.5104035", "0.5101891", "0.5091695", "0.50856304", "0.50348383", "0.5034417", "0.5008962", "0.50002253", "0.4971581", "0.49519747", "0.49415794", "0.48994198", "0.48683766", "0.48447788", "0.48195377", "0.47711495", "0.47652084", "0.47454184", "0.47391453", "0.4735793", "0.4727949", "0.4720922", "0.47177556", "0.4697941" ]
0.65068835
0
Concatenate the labels and predictions. If normalization was performed on the labels, undo the normalization.
def _finalize_labels_and_prediction(self): y_pred = torch.cat(self.y_pred, dim=0) y_true = torch.cat(self.y_true, dim=0) if (self.mean is not None) and (self.std is not None): # To compensate for the imbalance between labels during training, # we normalize the ground truth labels with training mean and std. # We need to undo that for evaluation. y_pred = y_pred * self.std + self.mean return y_pred, y_true
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def consolidate_predictions(self, examples_raw, examples, preds):\n assert len(examples_raw) == len(examples)\n assert len(examples_raw) == len(preds)\n\n ret = []\n for i, (sentence, labels) in enumerate(examples_raw):\n _, _, mask,_ = examples[i]\n labels_ = [l for l, m in zip(preds[i], mask) if m] # only select elements of mask.\n #print(len(labels_),len(labels))\n assert len(labels_) == len(labels)\n ret.append([sentence, labels, labels_])\n return ret", "def consolidate_predictions(self, examples_raw, examples, preds):\n assert len(examples_raw) == len(examples)\n assert len(examples_raw) == len(preds)\n\n ret = []\n for i, (sentence, labels, attributes) in enumerate(examples_raw):\n _, _, mask, _ = examples[i]\n labels_ = None\n #print(\"labels:\", labels)\n #print(\"preds unmasked:\", preds[i])\n labels_gt = labels[:]\n labels_ = [l for l, m in zip(preds[i], mask) if m] # only select elements of mask.\n #print(\"preds:\", labels_)\n assert len(labels_) == len(labels_gt)\n #print(\"labels np:\", np.array(labels))\n #print(\" \")\n ret.append([sentence, labels_gt, labels_, attributes])\n #print(\"Predictions (sent, true, pred): \", ret)\n return ret", "def consolidate_labels(labels):\n return map(RNN_model.consolidate_label , labels)", "def normalize_labels(self):\n self.y_mean, self.y_std = du.get_mean_std(self.y_train)\n self.y_train = du.normalize(self.y_train, self.y_mean, self.y_std)\n if self.x_test is not None and self.y_test is not None:\n self.y_test = du.normalize(self.y_test, self.y_mean, self.y_std)", "def post_process_predictions(self, labels: Labels, scene: Scene) -> Labels:\n return labels", "def after_pred(self):\n # If training then skip\n if self.training:\n return\n\n # Get ground truths in epoch 0 i.e. start of training\n if self.epoch == 0:\n self.y_true.extend(self.y.cpu().flatten().numpy())\n\n # Get predictions from each batch and add them to prediction container\n y_pred = self.pred.detach().cpu()\n \n self.y_pred.extend(y_pred.flatten().numpy())", "def prepare_for_predict(self) -> None:\n _, self.all_labels_embed = self._create_all_labels_embed()", "def predict(self, inputs):\n if self.use_logistic:\n return self.predict_labels_logistic(self.w, inputs)\n return predict_labels(self.w, inputs)", "def _reformat_predictions(self,\n y_true: List[List[int]],\n y_pred: List[List[int]],\n input_ids: List[List[str]]\n ) -> Tuple[List[List[str]],\n List[List[str]],\n List[List[str]]]:\n # Map indexes to labels and remove ignored indexes\n true_list = [[] for _ in range(len(y_true))]\n pred_list = [[] for _ in range(len(y_pred))]\n input_list = [[] for _ in range(len(input_ids))]\n\n for i in range(len(y_true)):\n for j in range(len(y_true[0])):\n if y_true[i][j] != CrossEntropyLoss().ignore_index:\n true_list[i].append(self.inds2labels[y_true[i][j]])\n pred_list[i].append(self.inds2labels[y_pred[i][j]])\n input_list[i].append(input_ids[i][j])\n\n return true_list, pred_list, input_list", "def merge(self, other):\r\n self._train_datas = np.concatenate(\r\n [self._train_datas, other._train_datas], 0)\r\n self._train_labels = np.concatenate(\r\n [self._train_labels, other._train_labels], 0)", "def _construct_clf_opt_X(predictions):\n\n return np.concatenate([predictions['yinf'], predictions['ymatch'], predictions['ynet']], axis=1)", "def post_process_predictions(self, labels, scene):\n pass", "def encode_predictions(y_test: NpArray) -> NpArray:\n print(\"y_test.shape after merge\", y_test.shape)\n\n # extract top K values\n y_test = np.argsort(y_test, axis=1)[:, -TOPK:]\n y_test = np.flip(y_test, axis=1)\n print(\"y_test\", y_test.shape)\n\n n_test = y_test.shape[0]\n pred = np.zeros((n_test, TOPK), dtype=object)\n\n for col in range(TOPK):\n answers = keras.utils.to_categorical(y_test[:, col])\n pred[:, col] = label_binarizer.inverse_transform(answers)\n\n joined_pred = np.zeros(n_test, dtype=object)\n for row in range(n_test):\n joined_pred[row] = \" \".join(pred[row, :])\n\n return joined_pred", "def combined_predictions(classifiers, test_attributes):\n all_predictions = list()\n for model in classifiers:\n predictions = model.predict(test_attributes)\n all_predictions.append(predictions)\n comb_predictions = all_predictions[0]\n for i in range(len(all_predictions[0])):\n predicted_labels = list()\n for fold in all_predictions:\n predicted_labels.append(fold[i])\n predicted_labels = np.array(predicted_labels)\n labels, freq = np.unique(predicted_labels, return_counts=True)\n comb_predictions[i] = labels[np.argmax(freq)]\n return comb_predictions", "def predict(self, texts: List[ParsedText]) -> List[ParsedText]:\n self.model.eval()\n texts = copy.deepcopy(texts)\n\n batches = DataLoader(texts=texts,\n batch_size=self.batch_size,\n vocabulary=self.vocabulary,\n aspect_labels=self.aspect_labels,\n device=self.device)\n for batch_index, batch in enumerate(batches):\n logits = self.model(embed_ids=batch.embed_ids, sentence_len=batch.sentence_len)\n pred_labels = th.argmax(logits.to('cpu'), dim=1)\n pred_sentences_opinions = self._get_opinions(\n labels_indexes=pred_labels,\n sentence_len=[x.item() for x in batch.sentence_len.to('cpu')])\n for internal_index, opinions in enumerate(pred_sentences_opinions):\n text_index = batch.text_index[internal_index]\n sentence_index = batch.sentence_index[internal_index]\n sentence_nodes = texts[text_index].sentences[\n sentence_index].nodes_sentence_order()\n\n explicit_opinions = []\n explicit_categories = set()\n for opinion in opinions:\n opinion.nodes = [sentence_nodes[x] for x in opinion.nodes]\n explicit_opinions.append(opinion)\n explicit_categories.add(opinion.category)\n for opinion in texts[text_index].sentences[sentence_index].opinions:\n if (not opinion.nodes) and (opinion.category in explicit_categories):\n texts[text_index].sentences[sentence_index].opinions.remove(opinion)\n texts[text_index].sentences[sentence_index].opinions.extend(explicit_opinions)\n return texts", "def final_predictions(x, y, x_tk, y_tk):\n # TODO: Train neural network using model_final\n model = model_final(x.shape,y.shape[1],\n len(x_tk.word_index)+1,\n len(y_tk.word_index)+1)\n model.summary()\n model.fit(x, y, batch_size=1024, epochs=25, validation_split=0.2)\n\n \n ## DON'T EDIT ANYTHING BELOW THIS LINE\n y_id_to_word = {value: key for key, value in y_tk.word_index.items()}\n y_id_to_word[0] = '<PAD>'\n\n sentence = 'he saw a old yellow truck'\n sentence = [x_tk.word_index[word] for word in sentence.split()]\n sentence = pad_sequences([sentence], maxlen=x.shape[-1], padding='post')\n sentences = np.array([sentence[0], x[0]])\n predictions = model.predict(sentences, len(sentences))\n\n print('Sample 1:')\n print(' '.join([y_id_to_word[np.argmax(x)] for x in predictions[0]]))\n print('Il a vu un vieux camion jaune')\n print('Sample 2:')\n print(' '.join([y_id_to_word[np.argmax(x)] for x in predictions[1]]))\n print(' '.join([y_id_to_word[np.max(x)] for x in y[0]]))", "def update_predictions(self, context):\n x, y, o = context.get_predictions()\n self.x_eval += x\n self.y_eval += y\n self.o_eval += o\n self.write_predictions(o)", "def update(self, loss, pred_classes, gold_classes, pred_probas=None,\n gold_probas=None):\n self.loss += loss\n self.nb_batches += 1\n # unmask & flatten predictions and gold labels before storing them\n mask = gold_classes != constants.TARGET_PAD_ID\n self.pred_classes.extend(unroll(unmask(pred_classes, mask)))\n self.gold_classes.extend(unroll(unmask(gold_classes, mask)))\n if pred_probas is not None and gold_probas is not None:\n self.pred_probas.extend(unroll(unmask(pred_probas, mask)))\n self.gold_probas.extend(unroll(unmask(gold_probas, mask)))", "def predict(self, phrases):\n Z = self.pipeline.transform(phrases)\n labels = self.classifier.predict(Z)\n if self.duplicates:\n for i, phrase in enumerate(phrases):\n label = self.dupes.get(phrase)\n if label is not None:\n labels[i] = label\n return labels", "def _post_process_output(self,predictions,convert_to_string):\n normalized = [(p[0],REVERSE_ARROWS.get(p[1],p[1]),p[2]) for p in predictions]\n if convert_to_string:\n return ' '.join([\"%s%s\" % (p[0],p[1]) for p in normalized])\n return normalized", "def postprocess_model_outputs(self, predictions, expected):\n\n for key, val in predictions.items():\n predictions[key] = val.numpy()\n\n for key, val in expected.items():\n expected[key] = val.numpy()\n\n return predictions, expected", "def postprocess_model_outputs(self, predictions, expected):\n\n predictions = {k: t.numpy() for k, t in predictions.items()}\n\n return predictions, expected", "def forget_labels(labels_to_forget=\"none\"):\n\t\t\tassert labels_to_forget in {\"none\",\"originally unlabelled\",\"all\"}\n\t\t\tif labels_to_forget != \"none\":\n\t\t\t\tif labels_to_forget == \"originally unlabelled\":\n\t\t\t\t\tself.train_labels___0_unlab__neg1_exclud=self.train_orig_labels.copy()\n\t\t\t\telif labels_to_forget == \"all\":\n\t\t\t\t\tself.train_labels___0_unlab__neg1_exclud=np.zeros(self.num_train)\n\t\t\t\telse:\n\t\t\t\t\tassert False\n\t\t\t\tself.bool_train_labelled=(self.train_labels___0_unlab__neg1_exclud>0)\n\t\t\t\tself.bool_train_unlabelled=(self.train_labels___0_unlab__neg1_exclud==0)\n\t\t\t\tself.bool_train_excluded=(self.train_labels___0_unlab__neg1_exclud<0)\n\t\t\t\tself.num_train_labelled=sum(self.bool_train_labelled)\n\t\t\t\tself.num_train_unlabelled=sum(self.bool_train_unlabelled)\n\t\t\t\tself.num_train_excluded=sum(self.bool_train_excluded)", "def append(self, images, labels):\n\n self.train_data = np.concatenate((self.train_data, images), axis=0)\n self.train_labels = self.train_labels + labels", "def update(self, y_pred, y_true):\n self.y_pred.append(y_pred.detach().cpu())\n self.y_true.append(y_true.detach().cpu())", "def update(self, y_pred, y_true):\n self.y_pred.append(y_pred.detach().cpu())\n self.y_true.append(y_true.detach().cpu())", "def update(self, y_preds, labels):\r\n predicted_labels = torch.argmax(y_preds, dim=1)\r\n batch_confusion_matrix = self._fast_hist(labels.numpy().flatten(), predicted_labels.numpy().flatten())\r\n self.confusion_matrix += batch_confusion_matrix", "def _build_predictions(self, results, features, labels):\n predictions = flatten_dict({'results': results})\n # Add features and, if available, labels to predictions\n predictions.update(flatten_dict({'features': features}))\n if labels is not None:\n predictions.update(flatten_dict({'labels': labels}))\n\n if self._losses is not None:\n predictions['losses'] = self._losses\n\n return predictions", "def prediction_collate(cls, batch):\n return default_prediction_collate(batch)", "def _predict(self, X):\n predictions = np.asarray([clf.predict(X) for clf in self.clfs_]).T\n predicted_labels = self.combiner.combine(predictions)\n return predicted_labels" ]
[ "0.6221581", "0.6194209", "0.60707176", "0.60045135", "0.59993315", "0.5892911", "0.5884708", "0.5791044", "0.5764986", "0.57386476", "0.56580555", "0.56315595", "0.55635166", "0.5531881", "0.55164355", "0.55140215", "0.55046463", "0.5495391", "0.5449793", "0.54393893", "0.54250854", "0.5417234", "0.5413769", "0.54125875", "0.54073906", "0.54073906", "0.538915", "0.5386187", "0.5328229", "0.5327418" ]
0.7226459
0
Proxy function for a3.str5_cmyk
def str5_cmyk(cmyk): result = a3.str5_cmyk(cmyk) if result is None: return '' return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def intern(string): # real signature unknown; restored from __doc__\n return \"\"", "def c(k):\n if isinstance(k, str):\n return k.lower() if ord(k) % 2 == 0 else k.upper()\n return k", "def _GetKeyString(self):", "def _GetKeyString(self):", "def __le__(self, *args):\n return _libsbml.string___le__(self, *args)", "def crackSafe(self, n: int, k: int) -> str:", "def func0(s):\n\n return s+\"tsy\"", "def fips(self, cname: str)->str:\n return self.__call__(cname)", "def string_cache_key_adapter(obj):\n return obj", "def gluechops(string, key, n, funcref):\n\n messageparts = []\n chops = decode64chops(string) #Decode base64 strings into integer chops\n \n for chop in chops:\n value = funcref(chop, key, n) #Decrypt each chop\n block = transform.int2bytes(value)\n messageparts.append(block)\n\n # Combine decrypted strings into a msg\n return ''.join(messageparts)", "def test_strings_common_symbols():\n\n common_result = strings_ops.strings_common_symbols(\"hi\", \"hello\")\n assert common_result == \"h\"", "def __getitem__(self, *args):\n return _libsbml.string___getitem__(self, *args)", "def test():\n LowerCaseStr().from_python('ABC')", "def encodeString(*args, **kwargs)->AnyStr:\n pass", "def get_string(self, **kwargs):\n ...", "def ccut(value,arg):\n return value.replace(arg, '')", "def get_string2(self):\n pass", "def bytes_and_strings_are_cool(func):\n def inner(*args, **kwargs):\n nargs = tuple(map(lambda arg: e(arg) if isinstance(arg, str) else arg, args))\n nkwargs = dict(map(lambda k, v: (k, e(v)) if isinstance(v, str) else (k, v), kwargs))\n return func(*nargs, **nkwargs)\n return inner", "def c_chars(x):\r\n return (c_char * len(x))(*x)", "def _abc(i):\n if i < 26:\n return ABC_STRING[i]\n else:\n return _abc(i - 26) + ABC_STRING[i % 26] # sexy sexy recursion", "def test_string():", "def cisfun(text):\n text = text.replace(\"_\", \" \")\n return \"C {}\".format(text)", "def XorChiff(str:str, key:int)->str:\r\n output = \"\"\r\n for x in range(0, len(str)):\r\n output += chr(key ^ ord(str[x]))\r\n return output", "def string_from_invokedymanic(ins, cf):\n info = InvokeDynamicInfo.create(ins, cf)\n if not isinstance(info, StringConcatInvokeDynamicInfo):\n return\n \n return info.recipe", "def lcs(s, t):\n return 42", "def test_str(self):\n dummy = DummyCryptographicObject()\n str(dummy)", "def test_args_kwargs_properly_convert_to_string(self):\n self.apple.take_then_give_back(self.cherry)\n apple_take_cherry_key = 'tests.Fruit.take_then_give_back;MyNameIsApple,MyNameIsCherry;'\n self.assertExpectedKeyInCache(apple_take_cherry_key)", "def MakeKey(self, string, string_1, string_2):\n ...", "def stringfilter(func):\n @wraps(func)\n def _dec(*args, **kwargs):\n if args:\n args = list(args)\n args[0] = str(args[0])\n return func(*args, **kwargs)\n\n return _dec", "def stringcr(runtime_addr, exclude_terminator=False):\n\n runtime_addr = memorymanager.RuntimeAddr(runtime_addr)\n return stringterm(runtime_addr, 13, exclude_terminator)" ]
[ "0.56533873", "0.5508936", "0.5494697", "0.5494697", "0.5325098", "0.52597857", "0.52154547", "0.5180345", "0.5079391", "0.5076965", "0.50710034", "0.5046079", "0.50234175", "0.50144464", "0.5010689", "0.49747807", "0.4974715", "0.49191678", "0.48957407", "0.4875642", "0.48653305", "0.4861148", "0.48557544", "0.48447508", "0.48132643", "0.4811015", "0.4787513", "0.47842643", "0.47785988", "0.47784314" ]
0.7502337
0
Proxy function for a3.str5_hsv
def str5_hsv(hsv): result = a3.str5_hsv(hsv) if result is None: return '' return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def applyHSV(img):\n\treturn applyColorMap(img, \"hsv\")", "def hsv(img):\n\tif img is None:\n\t\tprint \"Img is None\"\n\t\tsys.exit()\n\tif len(img.shape) > 2:\n\t\treturn cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n\treturn None", "def rgb2hsv(t):\n r,g,b = t\n r /= 255.0\n g /= 255.0\n b /= 255.0\n return rgb_to_hsv(r,g,b)", "def getHSV((r,g,b)):\n return rgb_to_hsv(r/255., g/255., b/255.)", "def rgb2hsv(red, green, blue):\n return colorsys.rgb_to_hsv(red, green, blue)", "def as_hsv(self):\n return rgb_to_hsv(*self.normalise().as_tuple())", "def _colorstr(self, args):", "def test_rgb_tuple_to_hsv(self):\r\n rth = rgb_tuple_to_hsv # for convenience\r\n self.assertEqual(rth((0, 0, 0)), (0, 0, 0))\r\n self.assertEqual(rth((255, 0, 0)), (0, 100, 100))\r\n self.assertEqual(rth((0, 255, 0)), (120, 100, 100))\r\n self.assertEqual(rth((0, 0, 255)), (240, 100, 100))\r\n assert_almost_equal(rth((127, 127, 127)), (0, 0, 49.803921568627452))", "def hsv(self):\n return colorsys.rgb_to_hsv(self.red, self.green, self.blue)", "def test_rgb_to_hsv():\n #Test rgb_to_hsv when MAX = MIN and MAX = 0:\n rgb = colormodel.RGB(0, 0, 0)\n hsv = a3.rgb_to_hsv(rgb)\n cunittest.assert_equals(\"0.000\", a3.round5(hsv.hue))\n cunittest.assert_equals(\"0.000\", a3.round5(hsv.saturation))\n cunittest.assert_equals(\"0.000\", a3.round5(hsv.value))\n \n #Test rgb_to_hsv when MAX = MIN and MAX <> 0:\n rgb = colormodel.RGB(100, 100, 100)\n hsv = a3.rgb_to_hsv(rgb)\n cunittest.assert_equals(\"0.000\", a3.round5(hsv.hue))\n cunittest.assert_equals(\"0.000\", a3.round5(hsv.saturation))\n cunittest.assert_equals(\"0.392\", a3.round5(hsv.value))\n \n #Test rgb_to_hsv when MAX = R and G >=B:\n rgb = colormodel.RGB(161, 42, 42)\n hsv = a3.rgb_to_hsv(rgb)\n cunittest.assert_equals(\"0.000\", a3.round5(hsv.hue))\n cunittest.assert_equals(\"0.739\", a3.round5(hsv.saturation))\n cunittest.assert_equals(\"0.631\", a3.round5(hsv.value))\n \n rgb = colormodel.RGB(161, 72, 42)\n hsv = a3.rgb_to_hsv(rgb)\n cunittest.assert_equals(\"15.13\", a3.round5(hsv.hue))\n cunittest.assert_equals(\"0.739\", a3.round5(hsv.saturation))\n cunittest.assert_equals(\"0.631\", a3.round5(hsv.value))\n \n #Test rgb_to_hsv when MAX = R and G < B:\n rgb = colormodel.RGB(161, 42, 72)\n hsv = a3.rgb_to_hsv(rgb)\n cunittest.assert_equals(\"344.9\", a3.round5(hsv.hue))\n cunittest.assert_equals(\"0.739\", a3.round5(hsv.saturation))\n cunittest.assert_equals(\"0.631\", a3.round5(hsv.value))\n \n #Test rgb_to_hsv when MAX = G:\n rgb = colormodel.RGB(17, 101, 19)\n hsv = a3.rgb_to_hsv(rgb)\n cunittest.assert_equals(\"121.4\", a3.round5(hsv.hue))\n cunittest.assert_equals(\"0.832\", a3.round5(hsv.saturation))\n cunittest.assert_equals(\"0.396\", a3.round5(hsv.value))\n \n #Test rgb_to_hsv when MAX = B:\n rgb = colormodel.RGB(21, 100, 255)\n hsv = a3.rgb_to_hsv(rgb)\n cunittest.assert_equals(\"219.7\", a3.round5(hsv.hue))\n cunittest.assert_equals(\"0.918\", a3.round5(hsv.saturation))\n cunittest.assert_equals(\"1.000\", a3.round5(hsv.value))", "def view_hsv(img):\n _, pixels = get_hsv_hist(img)\n\n view = np.zeros(img.data.shape)\n for hsv_col in pixels.keys():\n points = pixels[hsv_col]\n for p in points:\n #black case\n if hsv_col == -1:\n view[p[0]][p[1]] = [0, 255, 255]\n else:\n view[p[0]][p[1]] = [hsv_col, 128, 128]\n view = view.astype(np.uint8)\n col = cv2.cvtColor(view, cv2.COLOR_HSV2BGR)\n return ColorImage(col)", "def hsvHue(rgb):\n return rgbToHsv(rgb)[0]", "def getHSV(self):\n\t\tself.colour = [self.getH(), self.getS(),1]\n\t\treturn self.colour", "def toMage(self):\r\n h, s, v = self.Coords\r\n return '@hsvcolor {%s} %3.1f %3.1f %3.1f' % (self.Name, h, s, v)", "def rgb_to_hsv(x):\n # separating channels\n R = x[:,:,0]\n G = x[:,:,1]\n B = x[:,:,2]\n \n \n # h, s, v = hue, saturation, value \n # initial arrays for h, s and v filled with 0.0\n # we take R array just as 2D sample for copying the shape\n H = np.full_like(R, 0.0, dtype=np.double)\n S = np.full_like(R, 0.0, dtype=np.double)\n V = np.full_like(R, 0.0, dtype=np.double)\n \n HSV = np.full_like(x, 0.0, dtype=np.double)\n \n # np.max/min and axis=2 creates a 2D matrix\n C_max = np.max(x, axis=2) # maximum of r, g, b \n C_min = np.min(x, axis=2) # minimum of r, g, b \n Diff = C_max - C_min # diff of cmax and cmin. \n \n # Formula:\n # https://www.geeksforgeeks.org/program-change-rgb-color-model-hsv-color-model/\n \n # if cmax and cmax are equal (R=G=B) then h = 0 \n H[np.isclose(C_max, R, 0.0001)] = 0 \n \n # if cmax equal r \n m = np.isclose(C_max, R, 0.0001)&(Diff!=0)\n H[m] = (60 * ((G[m] - B[m]) / Diff[m]) + 360) % 360\n \n\n # if cmax equal g \n m = np.isclose(C_max, G, 0.0001)&(Diff!=0)\n H[m] = (60 * ((B[m] - R[m]) / Diff[m]) + 120) % 360\n \n # if cmax equal b \n m = np.isclose(C_max, B, 0.0001)&(Diff!=0)\n H[m] = (60 * ((R[m] - G[m]) / Diff[m]) + 240) % 360\n \n # if cmax equal zero \n S[C_max == 0] = 0\n \n # else\n m = (C_max != 0)\n S[m] = (Diff[m] / C_max[m])\n \n # compute v \n V = C_max\n \n # building new 3D picture\n HSV[:,:,0] = H\n HSV[:,:,1] = S\n HSV[:,:,2] = V\n \n return HSV", "def test_hsv_to_rgb():\n #Test hsv_to_rgb() when H is in the interval [0,60):\n hsv = colormodel.HSV(42, .6, .7)\n rgb = a3.hsv_to_rgb(hsv)\n cunittest.assert_equals(179, rgb.red)\n cunittest.assert_equals(146, rgb.green)\n cunittest.assert_equals(71, rgb.blue)\n \n #Test hsv_to_rgb() when H is in the interval [60,120):\n hsv = colormodel.HSV(94, .5, .5)\n rgb = a3.hsv_to_rgb(hsv)\n cunittest.assert_equals(91, rgb.red)\n cunittest.assert_equals(128, rgb.green)\n cunittest.assert_equals(64, rgb.blue)\n \n #Test hsv_to_rgb() when H is in the interval [120,180):\n #Also tests that hsv_to_rgb() properly handles values at the borders of\n #each interval of H\n hsv = colormodel.HSV(120, .5, .5)\n rgb = a3.hsv_to_rgb(hsv)\n cunittest.assert_equals(64, rgb.red)\n cunittest.assert_equals(128, rgb.green)\n cunittest.assert_equals(64, rgb.blue)\n \n #Test hsv_to_rgb() when H is in the interval [180,240):\n hsv = colormodel.HSV(216, .6, .3)\n rgb = a3.hsv_to_rgb(hsv)\n cunittest.assert_equals(31, rgb.red)\n cunittest.assert_equals(49, rgb.green)\n cunittest.assert_equals(77, rgb.blue)\n \n #Test hsv_to_rgb() when H is in the interval [240,300):\n hsv = colormodel.HSV(256, .2, .8)\n rgb = a3.hsv_to_rgb(hsv)\n cunittest.assert_equals(174, rgb.red)\n cunittest.assert_equals(163, rgb.green)\n cunittest.assert_equals(204, rgb.blue)\n \n #Test hsv_to_rgb() when H is in the interval [300,360):\n hsv = colormodel.HSV(343, .7, .3)\n rgb = a3.hsv_to_rgb(hsv)\n cunittest.assert_equals(77, rgb.red)\n cunittest.assert_equals(23, rgb.green)\n cunittest.assert_equals(38, rgb.blue)", "def setHsv ( self, h, s = 0.0, v = 0.0 ):\n #self.reset()\n\n # Check if first argument is list\n if isinstance(h, list):\n s = h[1]\n v = h[2]\n h = h[0]\n\n rgb = Colz.hsvToRgb( h, s, v )\n self.setRgba( rgb[0], rgb[1], rgb[2] )", "def get_hsv(rgb):\n rgb = rgb.lstrip(\"#\") # in case you have Web color specs\n r, g, b = (int(rgb[i : i + 2], 16) / 255 for i in range(0, 5, 2))\n return colorsys.rgb_to_hsv(r, g, b)", "def rgb_to_hsv(x):\n hsv = th.zeros(*x.size())\n c_min = x.min(0)\n c_max = x.max(0)\n\n delta = c_max[0] - c_min[0]\n\n # set H\n r_idx = c_max[1].eq(0)\n hsv[0][r_idx] = ((x[1][r_idx] - x[2][r_idx]) / delta[r_idx]) % 6\n g_idx = c_max[1].eq(1)\n hsv[0][g_idx] = 2 + ((x[2][g_idx] - x[0][g_idx]) / delta[g_idx])\n b_idx = c_max[1].eq(2)\n hsv[0][b_idx] = 4 + ((x[0][b_idx] - x[1][b_idx]) / delta[b_idx])\n hsv[0] = hsv[0].mul(60)\n\n # set S\n hsv[1] = delta / c_max[0]\n\n # set V - good\n hsv[2] = c_max[0]\n\n return hsv", "def np_hsv_saturation_histogram(s):\n title = \"HSV Saturation Histogram, mean=%.2f, std=%.2f\" % (np.mean(s), np.std(s))\n return np_histogram(s, title)", "def test_hsv2rgb(self):\n # Black\n assert hsv2rgb(0, 0, 0) == (0.0, 0.0, 0.0)\n # Cyan\n assert hsv2rgb(180, 1, 1) == (0.0, 1.0, 1.0)\n # Red\n assert hsv2rgb(0, 1, 1) == (1.0, 0.0, 0.0)\n # Lime\n assert hsv2rgb(120, 1, 1) == (0.0, 1.0, 0.0)\n # Blue\n assert hsv2rgb(240, 1, 1) == (0.0, 0.0, 1.0)", "def hsv_to_cvhsv(self, h, s, v):\r\n cv_h = int(179 * h / 360)\r\n cv_s = int(255 * s / 100)\r\n cv_v = int(255 * v / 100)\r\n colour = np.array([cv_h, cv_s, cv_v])\r\n return colour", "def _hsv_to_rgb(img):\n h, s, v = img.unbind(axis=-3)\n f = h * 6.0\n i = paddle.floor(f)\n f = f - i\n i = i.astype(paddle.int32) % 6\n\n p = paddle.clip(v * (1.0 - s), 0.0, 1.0)\n q = paddle.clip(v * (1.0 - s * f), 0.0, 1.0)\n t = paddle.clip(v * (1.0 - s * (1.0 - f)), 0.0, 1.0)\n\n mask = paddle.equal(\n i.unsqueeze(axis=-3),\n paddle.arange(6, dtype=i.dtype).reshape((-1, 1, 1)),\n ).astype(img.dtype)\n matrix = paddle.stack(\n [\n paddle.stack([v, q, p, p, t, v], axis=-3),\n paddle.stack([t, v, v, q, p, p], axis=-3),\n paddle.stack([p, p, t, v, v, q], axis=-3),\n ],\n axis=-4,\n )\n return paddle.einsum(\"...ijk, ...xijk -> ...xjk\", mask, matrix)", "def hsvToHsl ( h, s = 0.0, v = 0.0 ):\n # Check if first argument is list\n if isinstance(h, list):\n s = h[1]\n v = h[2]\n h = h[0]\n return Colz.rgbToHsl( Colz.hsbToRgb( h, s, v ) )", "def compute_new_hsv(im):\n eps = 1e-10\n r,g,b = np.array(cv2.split(im)) + eps\n traditional_hsv = cv2.cvtColor(im, cv2.COLOR_RGB2HSV)\n numerator = np.log(r) - np.log(g)\n denominator = np.log(r) + np.log(g) - 2*np.log(b) + eps\n new_hue = np.clip(np.round(numerator/denominator).astype(np.uint8), 0, 180)\n new_hsv = np.zeros_like(traditional_hsv).astype(np.uint8)\n new_hsv[:, :, 0] = new_hue\n new_hsv[:, :, 1] = traditional_hsv[:, :, 1]\n new_hsv[:, :, 2] = traditional_hsv[:, :, 2]\n return new_hsv", "def from_hsv(*hsv):\n return ScreenColor(*hsv_to_rgb(*hsv)).denormalise()", "def convert_rgb_hsv(rcol, gcol, bcol):\n\n mxi = max(rcol, gcol, bcol)\n mni = min(rcol, gcol, bcol)\n\n d_f = mxi-mni\n if mxi == mni:\n hcol = 0\n elif mxi == rcol:\n hcol = (60 * ((gcol-bcol)/d_f) + 360) % 360\n elif mxi == gcol:\n hcol = (60 * ((bcol-rcol)/d_f) + 120) % 360\n elif mxi == bcol:\n hcol = (60 * ((rcol-gcol)/d_f) + 240) % 360\n if mxi == 0:\n scol = 0\n else:\n scol = d_f/mxi\n vcol = mxi\n return hcol, scol, vcol", "def test_mage_hsv_tuple_to_rgb(self):\r\n htr = mage_hsv_tuple_to_rgb # for convenience\r\n self.assertEqual(htr((0, 0, 0)), (0, 0, 0))\r\n self.assertEqual(htr((0, 100, 100)), (255, 0, 0))\r\n self.assertEqual(htr((120, 100, 100)), (0, 255, 0))\r\n self.assertEqual(htr((240, 100, 100)), (0, 0, 255))\r\n assert_almost_equal(htr((0, 0, 49.803921568627452)), (127, 127, 127))", "def _color(self, args):", "def on_hsv_slide(self,h,s,v):\n if not self.active:\n return\n hue = h / 100.0\n sat = s / 100.0\n val = v / 100.0\n self.hsv = colormodel.HSV(hue, sat, val)\n temp = a3.hsv_to_rgb(self.hsv)\n assert (temp == None or type(temp) == colormodel.RGB), 'hsv_to_rgb does not return a RGB object'\n self.rgb = self.rgb if temp is None else temp\n self.cmyk = a3.rgb_to_cmyk(self.rgb);\n assert (self.cmyk == None or type(self.cmyk) == colormodel.CMYK), 'rgb_to_cmyk does not return a CMYK object'\n self.update()" ]
[ "0.68916756", "0.64323556", "0.6351842", "0.6342069", "0.6278692", "0.6187963", "0.6183965", "0.61620563", "0.61368906", "0.6113743", "0.6085936", "0.6053684", "0.602697", "0.5963562", "0.59115314", "0.58982813", "0.5862156", "0.5861341", "0.5852343", "0.58294475", "0.58177567", "0.58045584", "0.5798798", "0.57870394", "0.57723874", "0.5709417", "0.5694399", "0.5681544", "0.56738156", "0.5653627" ]
0.80301946
0
Register the slider with this parent widget
def on_slider(self, instance, value): self.slider.bind(value=self.update_proxy) self.bind(pos=self.hack_position) self.slider.bind(pos=self.hack_position)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def slider(self, parent, variable, low, high, label):\n widget = Scale(parent, orient='horizontal',\n from_=low, to=high, # range of slider\n # tickmarks on the slider \"axis\":\n tickinterval=(high-low)/5.0,\n # the steps of the counter above the slider:\n resolution=(high-low)/100.0,\n label=label, # label printed above the slider\n length=300, # length of slider in pixels\n variable=variable) # slider value is tied to variable\n widget.pack(side='top')\n return widget", "def __init__(self,name,value,*args,**kargs):\n InputInteger.__init__(self,name,value,*args,**kargs)\n self.slider = QtGui.QSlider(QtCore.Qt.Horizontal)\n self.slider.setTickPosition(QtGui.QSlider.TicksBelow)\n vmin = kargs.get('min',0)\n vmax = kargs.get('max',100)\n \n ticks = kargs.get('ticks',(vmax-vmin)/10)\n self.slider.setTickInterval(ticks)\n self.slider.setMinimum(vmin)\n self.slider.setMaximum(vmax)\n self.slider.setValue(value)\n self.slider.setSingleStep(1)\n #self.slider.setPageStep(5)\n self.slider.setTracking(1)\n self.connect(self.slider,QtCore.SIGNAL(\"valueChanged(int)\"),self.set_value)\n if kargs.has_key('func'):\n self.connect(self.slider,QtCore.SIGNAL(\"valueChanged(int)\"),kargs['func']) \n self.layout().addWidget(self.slider)", "def setParentItem(self, slider):\r\n slider.add_playhead(self)\r\n super(PlayHead, self).setParentItem(slider)", "def add_Slider(self, slider_rating):\n for item in slider_rating:\n slider = MySlider(item)\n self.layout.add_widget(slider)\n self.list_sliders.append(slider)", "def __create_slider(\n self,\n master: Misc,\n width: int,\n name: str,\n value_callback: Callable[[int], None],\n ) -> tuple[Frame, Spinbox, Scale]:\n slider_frame = Frame(master, width=width, bg=\"\")\n slider_frame.rowconfigure(2, weight=1)\n\n int_value = IntVar(slider_frame, 0)\n int_value.trace_add(\n \"write\", lambda var, index, mode: value_callback(int_value.get())\n )\n\n label = Label(slider_frame, text=name, font=Font(size=width // 5))\n label.grid(row=0, column=0)\n\n spinbox = Spinbox(\n slider_frame,\n from_=-self.__max_value,\n to=self.__max_value,\n textvariable=int_value,\n width=5,\n font=Font(size=width // 5),\n )\n spinbox.grid(row=1, column=0)\n\n scale = Scale(\n slider_frame,\n from_=-self.__max_value,\n to=self.__max_value,\n showvalue=False,\n width=width,\n variable=int_value,\n )\n\n scale.bind(\"<Double-Button-1>\", lambda _: int_value.set(0))\n scale.grid(row=2, column=0, sticky=\"ns\")\n\n if (increase := SLIDERS_BUTTONS.get(f\"{name}_increase\")) is not None:\n master.bind_all(\n f\"<KeyPress-{increase}>\",\n lambda _: int_value.set(min(2000, int_value.get() + SLIDERS_SPEED)),\n )\n if JOYSTICK_MODE:\n master.bind_all(f\"<KeyRelease-{increase}>\", lambda _: int_value.set(0))\n\n if (decrease := SLIDERS_BUTTONS.get(f\"{name}_decrease\")) is not None:\n master.bind_all(\n f\"<KeyPress-{decrease}>\",\n lambda _: int_value.set(max(-2000, int_value.get() - SLIDERS_SPEED)),\n )\n if JOYSTICK_MODE:\n master.bind_all(f\"<KeyRelease-{decrease}>\", lambda _: int_value.set(0))\n\n return slider_frame, spinbox, scale", "def __add_sliders_frame(self, width: int) -> None:\n sliders_frame = Frame(self)\n sliders_frame.place()\n\n self.add(sliders_frame, text=\"Sliders\")\n\n slider_width = round(width * 0.2)\n sliders_frame.rowconfigure(0, weight=1)\n sliders_frame.columnconfigure(0, pad=slider_width // 2)\n sliders_frame.columnconfigure(1, pad=slider_width // 2)\n sliders_frame.columnconfigure(2, pad=slider_width // 2)\n\n x_frame, self.__x_spinbox, self.__x_scale = self.__create_slider(\n master=sliders_frame,\n width=slider_width,\n name=\"X\",\n value_callback=lambda value: self.set_x(value),\n )\n y_frame, self.__y_spinbox, self.__y_scale = self.__create_slider(\n master=sliders_frame,\n width=slider_width,\n name=\"Y\",\n value_callback=lambda value: self.set_y(value),\n )\n z_frame, self.__z_spinbox, self.__z_scale = self.__create_slider(\n master=sliders_frame,\n width=slider_width,\n name=\"Z\",\n value_callback=lambda value: self.set_z(value),\n )\n\n self.__set_max_value(2)\n\n x_frame.grid(row=0, column=0, sticky=\"ns\")\n y_frame.grid(row=0, column=1, sticky=\"ns\")\n z_frame.grid(row=0, column=2, sticky=\"ns\")", "def slider_dragged(self):\n pass", "def _create_number_widget(self,frame,name,widget_options):\n w = TaggedSlider(frame,variable=self._tkvars[name],**widget_options)\n param = self.get_parameter_object(name)\n\n lower_bound,upper_bound = param.get_soft_bounds()\n\n if upper_bound is not None and lower_bound is not None:\n # TaggedSlider needs BOTH bounds (neither can be None)\n w.set_bounds(lower_bound,upper_bound,inclusive_bounds=param.inclusive_bounds)\n\n\n # have to do the lookup because subclass might override default\n if not lookup_by_class(self.param_immediately_apply_change,type(param)):\n w.bind('<<TagReturn>>', lambda e=None,x=name: self._handle_gui_set(x,force=True))\n w.bind('<<TagFocusOut>>', lambda e=None,x=name: self._handle_gui_set(x,force=True))\n w.bind('<<SliderSet>>', lambda e=None,x=name: self._handle_gui_set(x,force=True))\n\n return w", "def create_sliders(self):\n self.create_contrast_slider()\n self.create_crop_sliders()", "def add_sliders(self):\n\n\t\tself.scale_dict = {}\n\t\tinnov_str = simpledialog.askstring(\"Get Innovations.\", \n\t\t\t\"What innovation numbers do you want sliders for (separate with commas)?\")\n\t\tinnov_nums = innov_str.split(\",\")\n\t\tfor innov in innov_nums:\n\t\t\tself.scale_dict[innov] = tk.Scale(self, from_=-20, \n\t\t\t\tto=20, orient=tk.HORIZONTAL)\n\t\t\ttk.Label(self, text=\"Slider for #{0}\".format(innov)).pack()\n\t\t\tself.scale_dict[innov].pack(pady=10)\n\t\ttk.Button(self, text=\"Apply\", command=lambda: self.slider_update_CPPN()).pack()\n\n\n\t\ttk.Button(self, text=\"Exit\", command=self.quit).pack(pady=(20,10))", "def register_plugin(self):\n self.create_toggle_view_action()\n\n self.main.add_dockwidget(self)", "def makeSliders(self):\n #Builds the frame for the sliders\n self.stockFrame = Frame(height=400, width=400, bd=10)\n self.stockFrame.grid(row=1, column=1)\n\n #Adds labels to the frame\n self.lab1= Label(self.stockFrame, text=\"Asset 1\")\n self.lab1.grid(row=0, column=0, sticky=W)\n self.lab2= Label(self.stockFrame, text=\" Return: \")\n self.lab2.grid(row=1, column=0, sticky=E)\n self.lab3= Label(self.stockFrame, text=\" Risk: \")\n self.lab3.grid(row=2, column=0, sticky=E)\n self.lab4= Label(self.stockFrame, text=\"Asset 2\")\n self.lab4.grid(row=3, column=0, sticky=W)\n self.lab5= Label(self.stockFrame, text=\" Return: \")\n self.lab5.grid(row=4, column=0, sticky=E)\n self.lab6= Label(self.stockFrame, text=\" Risk: \")\n self.lab6.grid(row=5, column=0, sticky=E)\n self.lab7= Label(self.stockFrame, text=\"Asset 1, Asset 2\")\n self.lab7.grid(row=6, column=0, sticky=W, columnspan=2)\n self.lab8= Label(self.stockFrame, text=\"Covariance:\")\n self.lab8.grid(row=7, column=0, sticky=E)\n\n #Adds the sliding bars to the frame\n self.r1 = Scale(self.stockFrame, from_=0, to=0.2, \\\n resolution=0.01, orient=HORIZONTAL, length=200, \\\n command=self.update, showvalue=0)\n self.s1 = Scale(self.stockFrame, from_=0, to=0.2, \\\n resolution=0.01, orient=HORIZONTAL, length=200, \\\n command=self.update, showvalue=0)\n self.r2 = Scale(self.stockFrame, from_=0, to=0.2, \\\n resolution=0.01, orient=HORIZONTAL, length=200, \\\n command=self.update, showvalue=0)\n self.s2 = Scale(self.stockFrame, from_=0, to=0.2, \\\n resolution=0.01, orient=HORIZONTAL, length=200, \\\n command=self.update, showvalue=0)\n self.p = Scale(self.stockFrame, from_=-1, to=1, \\\n resolution=0.05, orient=HORIZONTAL, length=200, \\\n command=self.update, showvalue=0)\n\n #Organizes all the sliders \n self.r1.grid(row=1, column=2)\n self.s1.grid(row=2, column=2)\n self.r2.grid(row=4, column=2)\n self.s2.grid(row=5, column=2)\n self.p.grid(row=7, column=2)\n self.r1.set(0.12)\n self.s1.set(0.15)\n self.r2.set(0.07)\n self.s2.set(0.08)\n\n #Provides interactivity between sliders and graph\n self.r1_string = Label(self.stockFrame, text=self.r1.get())\n self.r1_string.grid(row=1, column=1)\n self.s1_string = Label(self.stockFrame, text=self.r1.get())\n self.s1_string.grid(row=2, column=1)\n self.r2_string = Label(self.stockFrame, text=self.r1.get())\n self.r2_string.grid(row=4, column=1)\n self.s2_string = Label(self.stockFrame, text=self.r1.get())\n self.s2_string.grid(row=5, column=1)\n self.p_string = Label(self.stockFrame, text=self.r1.get())\n self.p_string.grid(row=7, column=1)", "def create(self, parent):\n self.widget = QImageView(parent)", "def init_widget(self):", "def int_slider(init: int = 0, range: Tuple[int, int] = (0, 10), descr: str = '', data_type: type[Data] = Data):\n\n class StdInpWidget_IntSlider(StdInputWidgetBase, QSlider):\n def __init__(self, params):\n StdInputWidgetBase.__init__(self, params)\n QSlider.__init__(self, Qt.Horizontal)\n\n # tooltip\n self.setToolTip(self.__doc__)\n\n self.valueChanged.connect(self.value_changed)\n\n # initial value and rage\n with self._prevent_update:\n self.setRange(*range)\n self.setValue(init)\n\n @property\n def val(self) -> data_type:\n return data_type(self.value())\n\n def load_from(self, val: Data):\n with self._prevent_update:\n self.setValue(val.payload)\n\n def value_changed(self, _):\n self.on_widget_val_changed(self.val)\n\n def val_update_event(self, val: Data):\n if isinstance(val.payload, int):\n self.setValue(val.payload)\n\n StdInpWidget_IntSlider.__doc__ = descr\n\n return StdInpWidget_IntSlider", "def __init__(self,name,value,*args,**kargs):\n InputFloat.__init__(self,name,value,*args,**kargs)\n self.slider = QtGui.QSlider(QtCore.Qt.Horizontal)\n self.slider.setTickPosition(QtGui.QSlider.TicksBelow)\n self.scale = kargs.get('scale',1.0)\n self.func = kargs.get('func',None)\n\n vmin = kargs.get('min',0)\n vmax = kargs.get('max',100)\n ticks = kargs.get('ticks',(vmax-vmin)/10)\n self.slider.setTickInterval(ticks)\n self.slider.setMinimum(vmin)\n self.slider.setMaximum(vmax)\n self.slider.setValue(value/self.scale)\n self.slider.setSingleStep(1)\n #self.slider.setPageStep(5)\n self.slider.setTracking(1)\n self.connect(self.slider,QtCore.SIGNAL(\"valueChanged(int)\"),self.set_value)\n self.layout().addWidget(self.slider)", "def tag_set(self):\n # Set slider resolution. This is important because\n # we want the slider to be positioned at the exact\n # tag value.\n self._try_to_set_slider_resolution()\n self._try_to_set_slider()", "def add_slider(\n self,\n name: str,\n slider_min: Union[int, float] = 0,\n slider_max: Union[int, float] = 100,\n thumb_text=\"{value}\",\n steps: Optional[int] = None,\n default: Optional[Union[int, float]] = None,\n decimals: Optional[int] = 1,\n ):\n if default:\n default = float(default)\n\n # Is this even necessary?\n if default.is_integer():\n default = int(default)\n\n if slider_min > default or slider_max < default:\n raise ValueError(f\"Slider {name} had an out of bounds default value.\")\n self._client.results[name] = default\n\n slider = Slider(\n min=slider_min,\n max=slider_max,\n divisions=steps,\n label=thumb_text,\n value=default,\n round=decimals,\n )\n self._client.add_element(name=name, element=slider)", "def __init__(self, context):\n super(AnnotationPlugin, self).__init__(context)\n\n # Widget setup\n self.setObjectName('Label Plugin')\n\n self._widget = QWidget()\n context.add_widget(self._widget)\n self._widget.resize(800,1000)\n # Layout and attach to widget\n layout = QVBoxLayout() \n self._widget.setLayout(layout)\n\n self._image_widget = ImageWidget(self._widget, self._image_callback)\n layout.addWidget(self._image_widget)\n\n # Input field\n grid_layout = QGridLayout()\n layout.addLayout(grid_layout)\n\n grid_layout.addWidget(QLabel(\"Dilation size\"), 1, 1)\n\n self._sliderDil = QSlider(Qt.Horizontal)\n self._sliderDil.setMinimum(1)\n self._sliderDil.setMaximum(15)\n self._sliderDil.setValue(5)\n self._sliderDil.setTickPosition(QSlider.TicksBelow)\n self._sliderDil.setTickInterval(1)\n\n grid_layout.addWidget(self._sliderDil, 1, 2)\n\n grid_layout.addWidget(QLabel(\"Erosion size\"), 1, 3)\n\n self._sliderEros = QSlider(Qt.Horizontal)\n self._sliderEros.setMinimum(1)\n self._sliderEros.setMaximum(15)\n self._sliderEros.setValue(5)\n self._sliderEros.setTickPosition(QSlider.TicksBelow)\n self._sliderEros.setTickInterval(1)\n\n grid_layout.addWidget(self._sliderEros, 1, 4)\n\n self._edit_path_button = QPushButton(\"Edit path\")\n self._edit_path_button.clicked.connect(self._get_output_directory)\n grid_layout.addWidget(self._edit_path_button, 2, 1)\n\n self._output_path_edit = QLineEdit()\n self._output_path_edit.setDisabled(True)\n grid_layout.addWidget(self._output_path_edit, 2, 2)\n\n\n self.labels = []\n self._option_selector = QComboBox()\n self._option_selector.currentIndexChanged.connect(self.classChange)\n grid_layout.addWidget(self._option_selector, 2, 3)\n\n self.classImgs = []\n self._imgNum_label = QLabel(str(0))\n grid_layout.addWidget(self._imgNum_label, 2, 4)\n\n self._label_edit = QLineEdit()\n grid_layout.addWidget(self._label_edit, 3, 2)\n\n self._edit_labels_button = QPushButton(\"Add Label\")\n self._edit_labels_button.clicked.connect(self._add_label)\n grid_layout.addWidget(self._edit_labels_button, 3, 1)\n\n self._save_button = QPushButton(\"START\")\n self._save_button.clicked.connect(self.create_dataset_clicked)\n grid_layout.addWidget(self._save_button, 3, 3)\n\n self._test_button = QRadioButton(\"TestSet\")\n self._test_button.setChecked(False)\n grid_layout.addWidget(self._test_button, 3, 4)\n\n # Bridge for opencv conversion\n self.bridge = CvBridge()\n\n # Set subscriber to None\n self._sub = None\n self._srv = None\n\n self.interval = 3\n self.numImg = 0\n\tself.counter = 0\n self.save = False\n self.label = \"\"\n self.output_directory = \"\"\n self.cls_id = None", "def add_grid_slider(self, w):\n self.event_emitters[(w.__class__.__name__, w.type)].append(w)\n self.grid[(w.x, w.y)] = w\n self.control_sliders[w.control] = w", "def __init__(self, name, value=None, orientation=None, max=None, desc=None, prop=None, style=None, attr=None,\n disabled=False, onclick_callback=None, slider_changed_callback=None, app=None, css_cls=None):\n Widget.__init__(self, name, desc=desc, prop=prop, style=style, attr=attr,\n css_cls=css_cls)\n if value is None:\n self._value = 0\n else:\n self._value = value\n self._app = app\n self._slider_changed_callback = slider_changed_callback\n self._onclick_callback = onclick_callback\n self._disabled = disabled\n if orientation is None:\n self._orientation = \"horizontal\"\n else:\n self._orientation = orientation\n if max is None:\n self._max = 100\n else:\n self._max = max\n self.add_property('onclick', self._attach_onclick())", "def render(self):\n content = self._attach_css() + \"\\n\"\n content += self._render_pre_content('div')\n content += \"<div id='\" + self._name + \"_handle' class='ui-slider-handle'></div>\"\n content += self._render_post_content('div')\n self._widget_content = content + \"\\n\" + self._attach_script() + \"\\n\" + self._attach_polling()\n return self._widget_content", "def synchronize_slider(self):\n\n # Block slider signals to avoid a shortcut.\n self.slider_frames.blockSignals(True)\n\n if self.frame_ordering == \"quality\":\n self.quality_index = self.listWidget.currentRow()\n self.frame_index = self.quality_sorted_indices[self.quality_index]\n self.slider_frames.setValue(self.quality_index + 1)\n else:\n self.frame_index = self.listWidget.currentRow()\n self.quality_index = self.rank_indices[self.frame_index]\n self.slider_frames.setValue(self.frame_index + 1)\n\n # Unblock the slider signals again.\n self.slider_frames.blockSignals(False)\n\n # Update the image in the viewer.\n self.frame_selector.setPhoto(self.frame_index)", "def draw_app(self):\n self.num_points_slider = widgets.IntSlider(\n value=self.num_points,\n min=5,\n max=30,\n step=5,\n description='Number of points:',\n style = {'description_width': 'initial'}\n )\n self.num_points_slider.observe(self._on_num_points_change, ['value'])\n# self.slope_slider = widgets.FloatSlider(\n# value=self.slope,\n# min=-1,\n# max=5,\n# step=0.1,\n# description='Slope:'\n# )\n# self.slope_slider.observe(self._on_slope_change, ['value'])\n self.rand_slider = widgets.FloatSlider(\n value=self.rand,\n min=0,\n max=50,\n step=3,\n description='Randomness:', num_points=(10, 50, 5),\n style = {'description_width': 'initial'}\n )\n self.rand_slider.observe(self._on_rand_change, ['value'])\n self.container.children = [\n self.num_points_slider,\n# self.slope_slider,\n self.rand_slider ,\n self.output_widget\n ]", "def intSlider(*args, annotation: Union[AnyStr, bool]=\"\", backgroundColor: Union[List[float,\n float, float], bool]=None, changeCommand: Script=None, defineTemplate: AnyStr=\"\",\n docTag: Union[AnyStr, bool]=\"\", dragCallback: Script=None, dragCommand:\n Script=None, dropCallback: Script=None, enable: bool=True, enableBackground:\n bool=True, enableKeyboardFocus: bool=True, exists: bool=True, fullPathName:\n bool=True, height: Union[int, bool]=0, highlightColor: Union[List[float, float,\n float], bool]=None, horizontal: bool=True, isObscured: bool=True, manage:\n bool=True, maxValue: Union[int, bool]=0, minValue: Union[int, bool]=0,\n noBackground: bool=True, numberOfPopupMenus: bool=True, parent: Union[AnyStr,\n bool]=\"\", popupMenuArray: bool=True, preventOverride: bool=True,\n statusBarMessage: AnyStr=\"\", step: Union[int, bool]=0, useTemplate: AnyStr=\"\",\n value: Union[int, bool]=0, visible: bool=True, visibleChangeCommand: Union[Script,\n bool]=None, width: Union[int, bool]=0, q=True, query=True, e=True, edit=True,\n **kwargs)->Union[AnyStr, Any]:\n pass", "def __init__(self, parent=None):\n super(Representative, self).__init__(parent)\n self.setupUi(self)", "def create_widgets(self):", "def load_new_sliders_(self):\n self.sliders = list()\n for joint in self.joints:\n slider = None\n slider_ui_file = os.path.join(\n rospkg.RosPack().get_path('gui_synergy_slider'), 'uis', 'Slider.ui')\n try:\n if joint.controller.controller_category == \"position_trajectory\":\n slider = EtherCATHandTrajectorySlider(\n joint, slider_ui_file, self, self._widget.scrollAreaWidgetContents)\n else:\n slider = EtherCATHandSlider(\n joint, slider_ui_file, self, self._widget.scrollAreaWidgetContents)\n except Exception, e:\n rospy.loginfo(e)\n\n if slider is not None:\n slider.setMaximumWidth(100)\n # Load the new slider\n self._widget.horizontalLayout.addWidget(slider)\n # Put the slider in the list\n self.sliders.append(slider)\n\n # Create the slider to move all the selected joint sliders\n selection_slider_ui_file = os.path.join(\n rospkg.RosPack().get_path('gui_synergy_slider'), 'uis', 'SelectionSlider.ui')\n self.selection_slider = EtherCATSelectionSlider(\n \"Change sel.\", 0, 100, selection_slider_ui_file, self, self._widget.scrollAreaWidgetContents)\n\n self.selection_slider.setMaximumWidth(100)\n self._widget.horizontalLayout.addWidget(self.selection_slider)", "def getWidget(self):", "def __init__(self, parent):\n\n super().__init__()\n\n self.color_depth = parent.color_depth\n self.original_hist = parent.calc_histogram()['b']\n self.img_data = parent.data.copy()\n self.current_img_data = None\n\n self.init_ui(self, [self.img_data.min(), self.img_data.max()])\n self.label_txt.setText(\"Choose the range for normalization:\")\n self.setWindowTitle(\"Normalize\")\n\n self.range_slider.left_value_changed.connect(self.update_left_value)\n self.range_slider.right_value_changed.connect(self.update_right_value)\n self.range_slider.range_chagned.connect(self.update_plot_preview)\n\n self.update_left_value()\n self.update_right_value()\n self.update_plot_preview()" ]
[ "0.6726716", "0.6464478", "0.6321544", "0.620911", "0.6139864", "0.61175656", "0.6081543", "0.6059349", "0.5923887", "0.58818233", "0.5880173", "0.584921", "0.5826456", "0.58105385", "0.5807407", "0.5802929", "0.57598364", "0.5745984", "0.5733525", "0.5701902", "0.5675679", "0.56668913", "0.56548285", "0.5650958", "0.55963105", "0.5557329", "0.5541406", "0.5509763", "0.54744214", "0.544491" ]
0.64797753
1
Call back to hsv sliders
def on_hsv_slide(self,h,s,v): if not self.active: return hue = h / 100.0 sat = s / 100.0 val = v / 100.0 self.hsv = colormodel.HSV(hue, sat, val) temp = a3.hsv_to_rgb(self.hsv) assert (temp == None or type(temp) == colormodel.RGB), 'hsv_to_rgb does not return a RGB object' self.rgb = self.rgb if temp is None else temp self.cmyk = a3.rgb_to_cmyk(self.rgb); assert (self.cmyk == None or type(self.cmyk) == colormodel.CMYK), 'rgb_to_cmyk does not return a CMYK object' self.update()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def slider_action(self, sender):\n self.r = self.rslider.value\n self.g = self.gslider.value\n self.b = self.bslider.value\n self.preview.background_color = self.rgb\n self.colorlabel.text = self.hexcode", "def ct_slider_value_changed(self):\n for (x, slider) in enumerate(self.sliders):\n # for x in range(0, len(self.sliders)):\n # slider = self.sliders[x]\n slider_value = float(slider.value()) / float(slider.maximum())\n # Use an square function for easier opacity adjustments\n converted_value = slider_value * slider_value * slider_value\n self.render_widget.sectionsOpacity[x] = converted_value\n\n self.render_widget.update()", "def makeSliders(self):\n #Builds the frame for the sliders\n self.stockFrame = Frame(height=400, width=400, bd=10)\n self.stockFrame.grid(row=1, column=1)\n\n #Adds labels to the frame\n self.lab1= Label(self.stockFrame, text=\"Asset 1\")\n self.lab1.grid(row=0, column=0, sticky=W)\n self.lab2= Label(self.stockFrame, text=\" Return: \")\n self.lab2.grid(row=1, column=0, sticky=E)\n self.lab3= Label(self.stockFrame, text=\" Risk: \")\n self.lab3.grid(row=2, column=0, sticky=E)\n self.lab4= Label(self.stockFrame, text=\"Asset 2\")\n self.lab4.grid(row=3, column=0, sticky=W)\n self.lab5= Label(self.stockFrame, text=\" Return: \")\n self.lab5.grid(row=4, column=0, sticky=E)\n self.lab6= Label(self.stockFrame, text=\" Risk: \")\n self.lab6.grid(row=5, column=0, sticky=E)\n self.lab7= Label(self.stockFrame, text=\"Asset 1, Asset 2\")\n self.lab7.grid(row=6, column=0, sticky=W, columnspan=2)\n self.lab8= Label(self.stockFrame, text=\"Covariance:\")\n self.lab8.grid(row=7, column=0, sticky=E)\n\n #Adds the sliding bars to the frame\n self.r1 = Scale(self.stockFrame, from_=0, to=0.2, \\\n resolution=0.01, orient=HORIZONTAL, length=200, \\\n command=self.update, showvalue=0)\n self.s1 = Scale(self.stockFrame, from_=0, to=0.2, \\\n resolution=0.01, orient=HORIZONTAL, length=200, \\\n command=self.update, showvalue=0)\n self.r2 = Scale(self.stockFrame, from_=0, to=0.2, \\\n resolution=0.01, orient=HORIZONTAL, length=200, \\\n command=self.update, showvalue=0)\n self.s2 = Scale(self.stockFrame, from_=0, to=0.2, \\\n resolution=0.01, orient=HORIZONTAL, length=200, \\\n command=self.update, showvalue=0)\n self.p = Scale(self.stockFrame, from_=-1, to=1, \\\n resolution=0.05, orient=HORIZONTAL, length=200, \\\n command=self.update, showvalue=0)\n\n #Organizes all the sliders \n self.r1.grid(row=1, column=2)\n self.s1.grid(row=2, column=2)\n self.r2.grid(row=4, column=2)\n self.s2.grid(row=5, column=2)\n self.p.grid(row=7, column=2)\n self.r1.set(0.12)\n self.s1.set(0.15)\n self.r2.set(0.07)\n self.s2.set(0.08)\n\n #Provides interactivity between sliders and graph\n self.r1_string = Label(self.stockFrame, text=self.r1.get())\n self.r1_string.grid(row=1, column=1)\n self.s1_string = Label(self.stockFrame, text=self.r1.get())\n self.s1_string.grid(row=2, column=1)\n self.r2_string = Label(self.stockFrame, text=self.r1.get())\n self.r2_string.grid(row=4, column=1)\n self.s2_string = Label(self.stockFrame, text=self.r1.get())\n self.s2_string.grid(row=5, column=1)\n self.p_string = Label(self.stockFrame, text=self.r1.get())\n self.p_string.grid(row=7, column=1)", "def update(self, rgb, cmyk, hsv):\n compRGB = a3.complement_rgb(rgb)\n if (compRGB is None):\n compRGB = rgb\n \n rgb_str = rgb_to_str(rgb)\n cmyk_str = '' if cmyk is None else str5_cmyk(cmyk) \n hsv_str = '' if hsv is None else str5_hsv(hsv)\n \n self.main.text = (\"Color\\nRGB: \" + rgb_str +\n \"\\nCMYK: \" + cmyk_str +\n \"\\nHSV: \" + hsv_str + \"\\n \\n\" +\n \"R,G,B sliders in: 0..255\\n\" +\n \"C,M,Y,K sliders: 0 to 100%\\n\" +\n \"H slider: 0 <= H < 360 degrees\\n\" +\n \"S,V sliders: 0 <= S,V <= 1\")\n self.main.background = rgb.glColor()\n self.main.foreground = compRGB.glColor()\n self.comp.text = (\"Color\\nRGB: \" + rgb_str +\n \"\\nCMYK: \" + cmyk_str +\n \"\\nHSV: \" + hsv_str + \"\\n \\n\" +\n \"R,G,B sliders in: 0..255\\n\" +\n \"C,M,Y,K sliders: 0 to 100%\\n\" +\n \"H slider: 0 <= H < 360 degrees\\n\" +\n \"S,V sliders: 0 <= S,V <= 1\" )\n self.comp.background = compRGB.glColor()\n self.comp.foreground = rgb.glColor()\n \n # set the sliders\n self.rSlider.value = rgb.red*100\n self.gSlider.value = rgb.green*100\n self.bSlider.value = rgb.blue*100\n self.cSlider.value = 0 if cmyk is None else cmyk.cyan*100 \n self.mSlider.value = 0 if cmyk is None else cmyk.magenta*100\n self.ySlider.value = 0 if cmyk is None else cmyk.yellow*100\n self.kSlider.value = 0 if cmyk is None else cmyk.black*100\n self.hSlider.value = 0 if hsv is None else hsv.hue*100\n self.sSlider.value = 0 if hsv is None else hsv.saturation*100\n self.vSlider.value = 0 if hsv is None else hsv.value*100", "def on_hsv_press(self,h,s,v):\n self.hsv = colormodel.HSV(h, s, v)\n temp = a3.hsv_to_rgb(self.hsv)\n assert (temp == None or type(temp) == colormodel.RGB), 'hsv_to_rgb does not return a RGB object'\n self.rgb = self.rgb if temp is None else temp\n self.cmyk = a3.rgb_to_cmyk(self.rgb);\n assert (self.cmyk == None or type(self.cmyk) == colormodel.CMYK), 'rgb_to_cmyk does not return a CMYK object'\n self.update()", "def __init__(self,name,value,*args,**kargs):\n InputFloat.__init__(self,name,value,*args,**kargs)\n self.slider = QtGui.QSlider(QtCore.Qt.Horizontal)\n self.slider.setTickPosition(QtGui.QSlider.TicksBelow)\n self.scale = kargs.get('scale',1.0)\n self.func = kargs.get('func',None)\n\n vmin = kargs.get('min',0)\n vmax = kargs.get('max',100)\n ticks = kargs.get('ticks',(vmax-vmin)/10)\n self.slider.setTickInterval(ticks)\n self.slider.setMinimum(vmin)\n self.slider.setMaximum(vmax)\n self.slider.setValue(value/self.scale)\n self.slider.setSingleStep(1)\n #self.slider.setPageStep(5)\n self.slider.setTracking(1)\n self.connect(self.slider,QtCore.SIGNAL(\"valueChanged(int)\"),self.set_value)\n self.layout().addWidget(self.slider)", "def draw_spiders(self, spiders, graph, positions, draw_box_labels=True):", "def slider_dragged(self):\n pass", "def __init__(self,name,value,*args,**kargs):\n InputInteger.__init__(self,name,value,*args,**kargs)\n self.slider = QtGui.QSlider(QtCore.Qt.Horizontal)\n self.slider.setTickPosition(QtGui.QSlider.TicksBelow)\n vmin = kargs.get('min',0)\n vmax = kargs.get('max',100)\n \n ticks = kargs.get('ticks',(vmax-vmin)/10)\n self.slider.setTickInterval(ticks)\n self.slider.setMinimum(vmin)\n self.slider.setMaximum(vmax)\n self.slider.setValue(value)\n self.slider.setSingleStep(1)\n #self.slider.setPageStep(5)\n self.slider.setTracking(1)\n self.connect(self.slider,QtCore.SIGNAL(\"valueChanged(int)\"),self.set_value)\n if kargs.has_key('func'):\n self.connect(self.slider,QtCore.SIGNAL(\"valueChanged(int)\"),kargs['func']) \n self.layout().addWidget(self.slider)", "def gui_choose_hsv(self, img):\n \n cv2.namedWindow('result')\n\n # Starting with 100's to prevent error while masking\n hl,sl,vl = 100,100,100\n hh,sh,vh = 179, 255, 255\n\n # Creating track bar\n cv2.createTrackbar('hl', 'result',0,179,self.nothing)\n cv2.createTrackbar('sl', 'result',0,255,self.nothing)\n cv2.createTrackbar('vl', 'result',0,255,self.nothing)\n cv2.createTrackbar('hh', 'result',0,179,self.nothing)\n cv2.createTrackbar('sh', 'result',0,255,self.nothing)\n cv2.createTrackbar('vh', 'result',0,255,self.nothing)\n\n cv2.setTrackbarPos('hh','result',hh)\n cv2.setTrackbarPos('sh','result',sh)\n cv2.setTrackbarPos('vh','result',vh)\n\n\n while(1):\n\n frame = img.copy()\n #converting to HSV\n hsv = cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)\n\n # get info from track bar and appy to result\n hl = cv2.getTrackbarPos('hl','result')\n sl = cv2.getTrackbarPos('sl','result')\n vl = cv2.getTrackbarPos('vl','result')\n\n hh = cv2.getTrackbarPos('hh','result')\n sh = cv2.getTrackbarPos('sh','result')\n vh = cv2.getTrackbarPos('vh','result')\n\n # Normal masking algorithm\n lower_green = np.array([hl,sl,vl])\n upper_green = np.array([hh, sh, vh])\n # upper_green = np.array([180,255,255])\n\n mask = cv2.inRange(hsv,lower_green, upper_green)\n\n result = cv2.bitwise_and(frame,frame,mask = mask)\n\n cv2.imshow('result',result)\n k = cv2.waitKey(5) & 0xFF\n if k == 27:\n break", "def add_sliders(self):\n\n\t\tself.scale_dict = {}\n\t\tinnov_str = simpledialog.askstring(\"Get Innovations.\", \n\t\t\t\"What innovation numbers do you want sliders for (separate with commas)?\")\n\t\tinnov_nums = innov_str.split(\",\")\n\t\tfor innov in innov_nums:\n\t\t\tself.scale_dict[innov] = tk.Scale(self, from_=-20, \n\t\t\t\tto=20, orient=tk.HORIZONTAL)\n\t\t\ttk.Label(self, text=\"Slider for #{0}\".format(innov)).pack()\n\t\t\tself.scale_dict[innov].pack(pady=10)\n\t\ttk.Button(self, text=\"Apply\", command=lambda: self.slider_update_CPPN()).pack()\n\n\n\t\ttk.Button(self, text=\"Exit\", command=self.quit).pack(pady=(20,10))", "def create_sliders(self):\n self.create_contrast_slider()\n self.create_crop_sliders()", "def update_H(self):", "def isoslider(surface_dic, surface_value_dic, min_value=0):\r\n return \\\r\nf\"\"\"\r\n\\n\\nclass IsoLevel(tk.Variable):\r\n def __init__(self, master, name, level):\r\n tk.Variable.__init__(self, master, value=level)\r\n self.name = name\r\n self.trace('w', self.callback)\r\n\r\n def callback(self, *args):\r\n cmd.isolevel(self.name, self.get())\r\n\r\n def increment(self, event=None, delta=0.1):\r\n self.set(round(float(self.get()) + delta, 2))\r\n\r\n def decrement(self, event=None):\r\n self.increment(None, -0.1)\r\n\r\n\r\nsurface_list = {surface_dic}\r\nsurface_max_list = {surface_value_dic}\r\n\r\ntop = tk.Toplevel(plugins.get_tk_root())\r\n\r\nmaster = tk.Frame(top, padx=10, pady=10)\r\nmaster.pack(fill=\"both\", expand=1)\r\n\r\nfor child in list(master.children.values()):\r\n child.destroy()\r\n\r\n\r\nrow_counter = 0\r\nfor identifier, component_dic in surface_list.items():\r\n # add calculation identifier\r\n tk.Label(master, text=identifier).grid(row=row_counter, column=0, sticky=\"w\")\r\n row_counter += 1\r\n \r\n for component_id, surfaces in component_dic.items():\r\n # add collection label, e.g. superstar or hotspot etc.\r\n tk.Label(master, text=component_id).grid(row=row_counter, column=1, sticky='w')\r\n row_counter += 1\r\n \r\n for i, surface in enumerate(surfaces):\r\n # add grid type label\r\n probe = surface.split(\"_\")[-2]\r\n tk.Label(master, text=probe).grid(row=row_counter, column=2, sticky=\"w\")\r\n \r\n # slider code \r\n v = IsoLevel(master, surface, 5)\r\n e = tk.Scale(master, orient=tk.HORIZONTAL, from_={min_value}, to=surface_max_list[identifier][component_id],\r\n resolution=0.1, showvalue=0, variable=v)\r\n e.grid(row=row_counter, column=3, sticky=\"ew\")\r\n\r\n e = tk.Entry(master, textvariable=v, width=4)\r\n e.grid(row=row_counter, column=4, sticky=\"e\")\r\n master.columnconfigure(3, weight=1)\r\n row_counter += 1\r\n\\n\\n\r\n\"\"\"", "def simple_slider_value_changed(self):\n slider_value = float(self.sliders_simple_widget.value()) \\\n / float(self.sliders_simple_widget.maximum())\n self.render_widget.lowerBound = self.render_widget.minimum \\\n + (self.render_widget.maximum - self.render_widget.minimum) * slider_value\n self.render_widget.update()", "def on_slider(self, instance, value):\n self.slider.bind(value=self.update_proxy)\n self.bind(pos=self.hack_position)\n self.slider.bind(pos=self.hack_position)", "def rgb_slider_moved(self, event):\n slider_red = int(self.slider_r.get_value())\n slider_green = int(self.slider_g.get_value())\n slider_blue = int(self.slider_b.get_value())\n\n self.change_color((slider_red, slider_green, slider_blue))", "def temphum_plot(self, kwargs=None):\n\n def valuechange():\n \"\"\"This is the function which is called, when a value is changed in the spin boxes\"\"\"\n\n tempmin.setMaximum(tempmax.value())\n tempmax.setMinimum(tempmin.value())\n hummin.setMaximum(hummax.value())\n hummax.setMinimum(hummin.value())\n\n self.variables.default_values_dict[\"settings\"][\n \"current_tempmin\"\n ] = tempmin.value()\n self.variables.default_values_dict[\"settings\"][\n \"current_tempmax\"\n ] = tempmax.value()\n self.variables.default_values_dict[\"settings\"][\n \"current_hummin\"\n ] = hummin.value()\n self.variables.default_values_dict[\"settings\"][\n \"current_hummax\"\n ] = hummax.value()\n\n max = build_command(\n self.variables.devices_dict[\"temphum_controller\"],\n (\"set_hummax\", hummax.value()),\n )\n min = build_command(\n self.variables.devices_dict[\"temphum_controller\"],\n (\"set_hummin\", hummin.value()),\n )\n\n self.variables.vcw.write(\n self.variables.devices_dict[\"temphum_controller\"], max\n )\n self.variables.vcw.write(\n self.variables.devices_dict[\"temphum_controller\"], min\n )\n\n def dry_air_action():\n if dry_air_btn.isChecked():\n device_dict = self.variables.devices_dict[\"temphum_controller\"]\n try:\n command = build_command(\n device_dict, (\"set_environement_control\", \"ON\")\n )\n answer = self.variables.vcw.write(device_dict, command)\n if not answer:\n self.log.error(\n \"The environement controller did not responsed accordingly. Answer: \"\n + str(answer).strip()\n )\n return 0\n except:\n self.log.error(\n \"An error occured while changing the environement control\"\n )\n return 0\n dry_air_btn.setText(\"Humidity ctl. on\")\n self.variables.default_values_dict[\"settings\"][\n \"humidity_control\"\n ] = True\n\n else:\n device_dict = self.variables.devices_dict[\"temphum_controller\"]\n try:\n command = build_command(\n device_dict, (\"set_environement_control\", \"OFF\")\n )\n answer = self.variables.vcw.write(device_dict, command)\n if not answer:\n self.log.error(\n \"The environement controller did not responsed accordingly. Answer: \"\n + str(answer).strip()\n )\n\n return 0\n except:\n self.log.error(\n \"An error occured while changing the environement control\"\n )\n return 0\n dry_air_btn.setText(\"Humidity ctl. off\")\n self.variables.default_values_dict[\"settings\"][\n \"humidity_control\"\n ] = False\n\n def light_action():\n \"\"\"This function is debricated\"\"\"\n if light_btn.isChecked():\n self.variables.default_values_dict[\"settings\"][\"external_lights\"] = True\n else:\n self.variables.default_values_dict[\"settings\"][\n \"external_lights\"\n ] = False\n\n def check_light_state():\n if (\n self.variables.default_values_dict[\"settings\"][\"lights\"]\n and not light_btn.text() == \"Lights on\"\n ): # Checks if the lights are on and the button is off\n light_btn.setText(\"Lights on\")\n light_btn.setStyleSheet(\"background : rgb(0,255,0); border-radius: 5px\")\n elif (\n not self.variables.default_values_dict[\"settings\"][\"lights\"]\n and not light_btn.text() == \"Lights off\"\n ):\n light_btn.setText(\"Lights off\")\n light_btn.setStyleSheet(\"background : rgb(255,0,0); border-radius: 5px\")\n\n def config_plot(plot, plot2, pg):\n plot = plot.plotItem\n plot.setLabel(\"right\", \"humidity\", units=\"%\")\n plot.setLabel(\"bottom\", \"time\")\n plot.setLabel(\"left\", \"temperature\", units=\"Celsius\")\n plot.getAxis(\"left\").setPen(pg.mkPen(color=\"#c4380d\", width=3))\n plot.getAxis(\"right\").setPen(pg.mkPen(color=\"#025b94\", width=3))\n plot.showAxis(\"top\", show=True)\n plot.getAxis(\"top\").setTicks([])\n plot.getAxis(\"bottom\").setScale(1e-9)\n # plot.setRange(yRange=[15, 35])\n\n # For second plot\n plot.scene().addItem(\n plot2\n ) # inserts the second plot into the scene of the first\n plot2.setGeometry(plot.vb.sceneBoundingRect())\n plot.getAxis(\"right\").linkToView(\n plot2\n ) # links the second y axis to the second plot\n plot2.setXLink(plot) # sync the x axis of both plots\n # plot2.setRange(yRange=[0, 50])\n\n def __cut_arrays(data_array, maximum_time, arrays_to_cut):\n \"\"\"This function cuts an array to a maximum time difference\n This function is supposed to be used only for temp and humidity shaped arrays\n \"\"\"\n\n try:\n begin_time = data_array[arrays_to_cut[0]][0][0]\n end_time = data_array[arrays_to_cut[0]][0][-1]\n delta_time = (\n data_array[arrays_to_cut[0]][0][1]\n - data_array[arrays_to_cut[0]][0][0]\n )\n total_time = end_time - begin_time\n if total_time > maximum_time:\n over_time = total_time - maximum_time\n array_elm_to_drop = int(over_time / delta_time)\n for arrays in arrays_to_cut:\n data_array[arrays][0] = data_array[arrays][0][\n array_elm_to_drop:\n ]\n data_array[arrays][1] = data_array[arrays][1][\n array_elm_to_drop:\n ]\n except:\n pass\n\n def update_temphum_plots(kwargs=None):\n # for rooms in self.rooms:\n if self.variables.default_values_dict[\"settings\"][\"new_data\"]:\n temphum_plot.clear() # clears the plot and prevents a memory leak\n hum_plot_obj.clear()\n p1 = temphum_plot.plotItem\n\n ax = p1.getAxis(\"bottom\") # This is the trick\n __cut_arrays(\n self.variables.meas_data,\n float(\n self.variables.default_values_dict[\"settings\"].get(\n \"temp_history\", 3600\n )\n ),\n [\"temperature\", \"humidity\"],\n )\n ax.setTicks(\n [\n get_thicks_for_timestamp_plot(\n self.variables.meas_data[\"temperature\"][0],\n 5,\n self.variables.default_values_dict[\"settings\"][\n \"time_format\"\n ],\n )\n ]\n )\n\n try:\n if len(self.variables.meas_data[\"temperature\"][0]) == len(\n self.variables.meas_data[\"humidity\"][1]\n ): # sometimes it happens that the values are not yet ready\n p1.plot(\n self.variables.meas_data[\"temperature\"][0],\n self.variables.meas_data[\"temperature\"][1],\n pen={\"color\": \"r\", \"width\": 2},\n clear=True,\n )\n plot_item = setpg.PlotCurveItem(\n self.variables.meas_data[\"humidity\"][0],\n self.variables.meas_data[\"humidity\"][1],\n pen={\"color\": \"b\", \"width\": 2},\n clear=True,\n )\n hum_plot_obj.addItem(plot_item)\n del plot_item # the plot class needs a plot item which can be rendered, to avoid a mem leak delete the created plot item or 20k ram will be used\n # hum_plot_obj.addItem(setpg.plot(self.variables.meas_data[\"humidity\"][0],self.variables.meas_data[\"humidity\"][1],pen={'color': \"b\", 'width': 2}, clear=True))\n hum_plot_obj.setGeometry(\n p1.vb.sceneBoundingRect()\n ) # resize the second plot!\n except:\n pass\n\n # Create sublayout\n temphum_layout = QGridLayout()\n\n # Frame over the objects\n frame = QLabel()\n frame.setFrameStyle(QFrame.Box | QFrame.Raised)\n frame.setLineWidth(0)\n frame.setMidLineWidth(2)\n\n self.layout.addWidget(\n frame, self.temp_ypos, self.temp_xpos, self.temp_ysize, self.temp_xsize\n )\n\n x = np.zeros(1)\n y = np.zeros(1)\n\n setpg = pq\n # date_axis = CAxisTime(orientation='bottom') # Correctly generates the time axis\n hum_plot_obj = setpg.ViewBox() # generate new plot item\n temphum_plot = pq.PlotWidget()\n config_plot(temphum_plot, hum_plot_obj, setpg) # config the plot items\n\n self.variables.add_update_function(update_temphum_plots)\n\n # Additional Variables will be generated for temp and hum\n # self.variables.default_values_dict[\"settings\"].update({\"lights\": False, \"humidity_control\": True, \"current_tempmin\": 20, \"current_tempmax\": 25, \"current_hummin\": 20,\"current_hummax\": 25})\n\n # Spin Boxes for temp and humidity\n\n tempmin = QSpinBox()\n tempmax = QSpinBox()\n hummin = QSpinBox()\n hummax = QSpinBox()\n\n # Spinbox label\n textbox_temp = QLabel()\n textbox_temp.setText(\"Min temp. Max temp.\")\n textbox_temp.setFont(self.font)\n textbox_hum = QLabel()\n textbox_hum.setText(\"Min hum. Max hum.\")\n textbox_hum.setFont(self.font)\n\n # Config\n\n tempmin.setRange(15, 35)\n tempmin.setValue(\n float(\n self.variables.default_values_dict[\"settings\"].get(\"current_tempmin\", 0)\n )\n )\n tempmax.setRange(15, 35)\n tempmax.setValue(\n float(\n self.variables.default_values_dict[\"settings\"].get(\"current_tempmax\", 0)\n )\n )\n tempmin.valueChanged.connect(valuechange)\n tempmax.valueChanged.connect(valuechange)\n\n hummin.setRange(0, 70)\n hummin.setValue(\n float(\n self.variables.default_values_dict[\"settings\"].get(\"current_hummin\", 0)\n )\n )\n hummax.setRange(0, 70)\n hummax.setValue(\n float(\n self.variables.default_values_dict[\"settings\"].get(\"current_hummax\", 0)\n )\n )\n hummin.valueChanged.connect(valuechange)\n hummax.valueChanged.connect(valuechange)\n\n # Push buttons on the right for humidity control and light control\n\n dry_air_btn = QPushButton(\"Humidity ctl. off\")\n self.variables.default_values_dict[\"settings\"][\"humidity_control\"] = False\n dry_air_btn.setCheckable(True)\n dry_air_btn.toggle()\n dry_air_btn.clicked.connect(dry_air_action)\n dry_air_btn.setChecked(False)\n\n light_btn = QLabel()\n light_btn.setText(\"State not defined\")\n light_btn.setAlignment(QtCore.Qt.AlignVCenter | QtCore.Qt.AlignHCenter)\n light_btn.setStyleSheet(\"background : rgb(255,0,0); border-radius: 5px\")\n\n # light_btn.setCheckable(True)\n # light_btn.clicked.connect(light_action)\n\n # Humidity\n # temphum_plot.plot(x,y, pen=\"b\")\n\n # Widgets add\n temphum_layout.addWidget(textbox_temp, 0, 0, 1, 2)\n temphum_layout.addWidget(tempmin, 1, 0)\n temphum_layout.addWidget(tempmax, 1, 1)\n\n temphum_layout.addWidget(textbox_hum, 2, 0, 1, 2)\n temphum_layout.addWidget(hummin, 3, 0)\n temphum_layout.addWidget(hummax, 3, 1)\n\n temphum_layout.addWidget(dry_air_btn, 4, 0, 1, 2)\n temphum_layout.addWidget(light_btn, 5, 0, 3, 2)\n\n temphum_layout.addWidget(temphum_plot, 0, 3, 10, 2)\n\n temphum_layout.setContentsMargins(8, 8, 0, 8) # Makes a margin to the layout\n\n # Add the layout to the main layout\n self.layout.addLayout(\n temphum_layout,\n self.temp_ypos,\n self.temp_xpos,\n self.temp_ysize,\n self.temp_xsize,\n )\n\n def update():\n pass\n\n self.variables.add_update_function(update)\n self.variables.add_update_function(check_light_state)", "def slider_changed(self):\n freq_index = self.ui.frequencySlider.value()\n freq = self.psd.freqs[freq_index]\n self.ui.fmin.setText(str(freq))\n self.ui.fmax.setText(str(freq))\n self.value_changed()", "def setHsv ( self, h, s = 0.0, v = 0.0 ):\n #self.reset()\n\n # Check if first argument is list\n if isinstance(h, list):\n s = h[1]\n v = h[2]\n h = h[0]\n\n rgb = Colz.hsvToRgb( h, s, v )\n self.setRgba( rgb[0], rgb[1], rgb[2] )", "def update(self, *args):\n #Fetches slider information\n s1=self.s1.get()\n s2=self.s2.get()\n r1=self.r1.get()\n r2=self.r2.get()\n p=self.p.get()\n\n #Changes the number next to the bar\n self.r1_string.configure(text=\"%.2f\"% r1)\n self.r2_string.configure(text=\"%.2f\"% r2)\n self.s1_string.configure(text=\"%.2f\"% s1)\n self.s2_string.configure(text=\"%.2f\"% s2)\n self.p_string.configure(text=\"%.2f\"% self.p.get())\n\n #Creates two asset objects\n self.I1 = Instrument(r1, s1, \"Asset 1\", \"Equity\")\n self.I2 = Instrument(r2, s2, \"Asset 2\", \"Bond\")\n\n #Builds a portfolio object\n self.port = Portfolio([self.I1, self.I2])\n self.port.addcorr([[0,p]])\n\n #Displays the new graph to the graph frame\n fff =Frame(height=400, width=400, bd=10, bg='white')\n Chart(self.port, 0.02).scatter(fff)\n fff.grid(row=1, column=0)", "def slider(self, parent, variable, low, high, label):\n widget = Scale(parent, orient='horizontal',\n from_=low, to=high, # range of slider\n # tickmarks on the slider \"axis\":\n tickinterval=(high-low)/5.0,\n # the steps of the counter above the slider:\n resolution=(high-low)/100.0,\n label=label, # label printed above the slider\n length=300, # length of slider in pixels\n variable=variable) # slider value is tied to variable\n widget.pack(side='top')\n return widget", "def interact(self):\n param_min = self.param_vals[0]\n param_max = self.param_vals[-1]\n param_step = self.param_vals[1] - self.param_vals[0]\n\n qbt_indices = [index for (index, subsystem) in self.sweep.hilbertspace.qbt_subsys_list]\n osc_indices = [index for (index, subsystem) in self.sweep.hilbertspace.osc_subsys_list]\n\n param_slider = ipywidgets.FloatSlider(min=param_min, max=param_max, step=param_step,\n description=self.param_name, continuous_update=False)\n photon_slider = ipywidgets.IntSlider(value=1, min=1, max=4, description='photon number')\n initial_slider = ipywidgets.IntSlider(value=0, min=0, max=self.evals_count, description='initial state index')\n final_slider = ipywidgets.IntSlider(value=1, min=1, max=self.evals_count, description='final state index')\n\n qbt_dropdown = ipywidgets.Dropdown(options=qbt_indices, description='qubit subsys')\n osc_dropdown = ipywidgets.Dropdown(options=osc_indices, description='oscillator subsys')\n\n def update_min_final_index(*args):\n final_slider.min = initial_slider.value + 1\n\n initial_slider.observe(update_min_final_index, 'value')\n\n out = ipywidgets.interactive_output(self.plot_explorer_panels,\n {'param_val': param_slider,\n 'photonnumber': photon_slider,\n 'initial_index': initial_slider,\n 'final_index': final_slider,\n 'qbt_index': qbt_dropdown,\n 'osc_index': osc_dropdown\n })\n\n left_box = ipywidgets.VBox([param_slider])\n mid_box = ipywidgets.VBox([initial_slider, final_slider, photon_slider])\n right_box = ipywidgets.VBox([qbt_dropdown, osc_dropdown])\n\n user_interface = ipywidgets.HBox([left_box, mid_box, right_box])\n display(user_interface, out)", "def draw(self) -> None:\n top = SliderProperties.body_y - 30\n if self.number == 2:\n top = SliderProperties.body_y + 100\n\n indicator = self.font.render(str(int(self.volume)), True, Color.orange)\n self.screen.blit(\n indicator, (SliderProperties.body_x + SliderProperties.body_width + 20, top)\n )", "def setup_hsv_boundaries():\n global l_hsv_thresh, u_hsv_thresh\n cv2.destroyAllWindows()\n l_hsv_thresh, u_hsv_thresh = prompt_calibration()\n cv2.destroyAllWindows()", "def setupConnections(self):\n self.T1Button.connect('clicked(bool)', self.onApplyButton)\n self.RViewButton.connect('clicked(bool)', self.onApplyRViewButton)\n self.CheckButton.connect('stateChanged(int)', self.onCheckbuttonChecked)\n self.LLE_Selector.connect(\"currentNodeChanged(vtkMRMLNode*)\", self.onSelectLLENode)\n self.LLN_Selector.connect(\"currentNodeChanged(vtkMRMLNode*)\", self.onSelectLLNNode)\n self.Aref_Selector.connect(\"currentNodeChanged(vtkMRMLNode*)\", self.onSelectArefNode)\n\n self.ThSlider_LLE.Slider.connect(\"positionsChanged(double,double)\",self.ThSlider_LLE.onSliderChanged)\n self.ThSlider_LLE.SpinBoxL.connect(\"valueChanged(int)\", self.ThSlider_LLE.onSpinBoxLChanged)\n self.ThSlider_LLE.SpinBoxR.connect(\"valueChanged(int)\", self.ThSlider_LLE.onSpinBoxRChanged)\n \n self.ThSlider_LLN.Slider.connect(\"positionsChanged(double,double)\",self.ThSlider_LLN.onSliderChanged)\n self.ThSlider_LLN.SpinBoxL.connect(\"valueChanged(int)\", self.ThSlider_LLN.onSpinBoxLChanged)\n self.ThSlider_LLN.SpinBoxR.connect(\"valueChanged(int)\", self.ThSlider_LLN.onSpinBoxRChanged) \n\n self.ThSlider_ECV.Slider.connect(\"positionsChanged(double,double)\",self.ThSlider_ECV.onSliderChanged)\n self.ThSlider_ECV.SpinBoxL.connect(\"valueChanged(int)\", self.ThSlider_ECV.onSpinBoxLChanged)\n self.ThSlider_ECV.SpinBoxR.connect(\"valueChanged(int)\", self.ThSlider_ECV.onSpinBoxRChanged)\n \n self.Stats.segmentationSelector.connect(\"currentNodeChanged(vtkMRMLNode*)\", self.Stats.onScalarSelectorChanged)\n self.Stats.scalarSelector.connect(\"currentNodeChanged(vtkMRMLNode*)\", self.Stats.onScalarSelectorChanged)\n self.Stats.scalarSelector2.connect(\"currentNodeChanged(vtkMRMLNode*)\", self.Stats.onScalarSelector2Changed)\n self.Stats.SButton.connect('clicked(bool)', self.onApplyGetStatistics)\n\n self.NativeT1_Selector.connect(\"currentNodeChanged(vtkMRMLNode*)\", self.onSelectNT1Node)\n self.EnhancedT1_Selector.connect(\"currentNodeChanged(vtkMRMLNode*)\", self.onSelectET1Node)\n self.SB_NBlodd.connect(\"valueChanged(Double)\", self.onSpinBoxNBChanged)\n self.SB_EBlodd.connect(\"valueChanged(Double)\", self.onSpinBoxEBChanged)\n self.SB_Haematocrit.connect(\"valueChanged(Double)\", self.onSpinBoxHChanged)\n self.ECVButton.connect('clicked(bool)',self.onApplyECVButton)", "def __create_slider(\n self,\n master: Misc,\n width: int,\n name: str,\n value_callback: Callable[[int], None],\n ) -> tuple[Frame, Spinbox, Scale]:\n slider_frame = Frame(master, width=width, bg=\"\")\n slider_frame.rowconfigure(2, weight=1)\n\n int_value = IntVar(slider_frame, 0)\n int_value.trace_add(\n \"write\", lambda var, index, mode: value_callback(int_value.get())\n )\n\n label = Label(slider_frame, text=name, font=Font(size=width // 5))\n label.grid(row=0, column=0)\n\n spinbox = Spinbox(\n slider_frame,\n from_=-self.__max_value,\n to=self.__max_value,\n textvariable=int_value,\n width=5,\n font=Font(size=width // 5),\n )\n spinbox.grid(row=1, column=0)\n\n scale = Scale(\n slider_frame,\n from_=-self.__max_value,\n to=self.__max_value,\n showvalue=False,\n width=width,\n variable=int_value,\n )\n\n scale.bind(\"<Double-Button-1>\", lambda _: int_value.set(0))\n scale.grid(row=2, column=0, sticky=\"ns\")\n\n if (increase := SLIDERS_BUTTONS.get(f\"{name}_increase\")) is not None:\n master.bind_all(\n f\"<KeyPress-{increase}>\",\n lambda _: int_value.set(min(2000, int_value.get() + SLIDERS_SPEED)),\n )\n if JOYSTICK_MODE:\n master.bind_all(f\"<KeyRelease-{increase}>\", lambda _: int_value.set(0))\n\n if (decrease := SLIDERS_BUTTONS.get(f\"{name}_decrease\")) is not None:\n master.bind_all(\n f\"<KeyPress-{decrease}>\",\n lambda _: int_value.set(max(-2000, int_value.get() - SLIDERS_SPEED)),\n )\n if JOYSTICK_MODE:\n master.bind_all(f\"<KeyRelease-{decrease}>\", lambda _: int_value.set(0))\n\n return slider_frame, spinbox, scale", "def sliderChange(self):\n for rdout, sldr in zip(self.joint_slider_rdouts, self.joint_sliders):\n rdout.setText(str(sldr.value()))\n\n self.ui.rdoutTorq.setText(str(self.ui.sldrMaxTorque.value()) + \"%\")\n self.ui.rdoutSpeed.setText(str(self.ui.sldrSpeed.value()) + \"%\")\n\n # Do nothing if the rexarm is not initialized\n if self.rexarm.initialized:\n self.rexarm.set_torque_limits([self.ui.sldrMaxTorque.value() / 100.0] * self.rexarm.num_joints)\n self.rexarm.set_speeds_normalized_all(self.ui.sldrSpeed.value() / 100.0)\n joint_positions = np.array([sldr.value() * D2R for sldr in self.joint_sliders])\n # Only send the joints that the rexarm has\n self.rexarm.set_positions(joint_positions[0:self.rexarm.num_joints])", "def slider_update(attrname, old, new):\n tick = slider.value\n title.text = str(time.strftime(\n '%Y-%m-%d %H:%M:%S', time.localtime(tick)))\n source2.data = my_sources[tick]", "def valuechange():\n\n tempmin.setMaximum(tempmax.value())\n tempmax.setMinimum(tempmin.value())\n hummin.setMaximum(hummax.value())\n hummax.setMinimum(hummin.value())\n\n self.variables.default_values_dict[\"settings\"][\n \"current_tempmin\"\n ] = tempmin.value()\n self.variables.default_values_dict[\"settings\"][\n \"current_tempmax\"\n ] = tempmax.value()\n self.variables.default_values_dict[\"settings\"][\n \"current_hummin\"\n ] = hummin.value()\n self.variables.default_values_dict[\"settings\"][\n \"current_hummax\"\n ] = hummax.value()\n\n max = build_command(\n self.variables.devices_dict[\"temphum_controller\"],\n (\"set_hummax\", hummax.value()),\n )\n min = build_command(\n self.variables.devices_dict[\"temphum_controller\"],\n (\"set_hummin\", hummin.value()),\n )\n\n self.variables.vcw.write(\n self.variables.devices_dict[\"temphum_controller\"], max\n )\n self.variables.vcw.write(\n self.variables.devices_dict[\"temphum_controller\"], min\n )" ]
[ "0.6486781", "0.63806134", "0.63726234", "0.625302", "0.5907185", "0.5888957", "0.5856775", "0.5827766", "0.5827567", "0.580906", "0.57934964", "0.5773385", "0.5756233", "0.57184124", "0.57120323", "0.5695729", "0.56683296", "0.564698", "0.5633634", "0.5592048", "0.55850995", "0.5529624", "0.55240035", "0.5503105", "0.5498187", "0.54897934", "0.548112", "0.5469947", "0.5465387", "0.5436273" ]
0.6754454
0
Format and return lines from the output of dmesg.
def human_dmesg(source=None, max_lines=20): formatted_dmesg = [] # Reversing the array shows the newest lines first, to stay consistent # with how other logs are presented. try: dmesg_data = exec_process(['dmesg', '-T'], True).split('\n') dmesg_data.reverse() for line in dmesg_data: if not line: continue if source: match = _dmesg_line_regex.match(line) if match: source_match = match.groupdict('UNKNOWN')['source'] if source in source_match: formatted_dmesg.append(line) else: formatted_dmesg.append(line) if len(formatted_dmesg) >= max_lines: break except RuntimeError: return [] else: return formatted_dmesg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_dmesg_log(host=None):\r\n if host:\r\n ret_out = host.run('dmesg').stdout\r\n return ret_out\r\n else:\r\n ret_out = utils.run('dmesg').stdout\r\n return ret_out", "def read_linelog():", "def dumpDmesg(self):\n pass", "def read_syslog():\n lines = []\n with open(SYSLOG, 'rb') as f:\n for line in f:\n tf = transform_line(line)\n if tf:\n lines.append(tf)\n return lines", "def stdout(self, stdout: str) -> Tuple[List[Message], List[AnnotateCode], str]:\n return [], [], stdout", "def get_errors(self, output_str):\n\n\n out = '''' 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored\n GigabitEthernet0/0 is up, line protocol is up \n Hardware is CN Gigabit Ethernet, address is f44e.05b5.b358 (bia f44e.05b5.b358)\n 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored\n GigabitEthernet0/1 is administratively down, line protocol is down \n Hardware is CN Gigabit Ethernet, address is f44e.05b5.b359 (bia f44e.05b5.b359)\n 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored\n Serial0/0/0 is up, line protocol is up \n Hardware is WIC MBRD Serial\n 3 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored, 0 abort\n Serial0/0/1 is administratively down, line protocol is down \n Hardware is WIC MBRD Serial\n 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored, 0 abort'\n'''\n date_time = get_date_time()\n trimmed_output = []\n all_errors = []\n line_counter = 0\n temp_lines = ''\n # trimming unnecessary lines from raw input\n for line in output_str.split('\\n'):\n if 'protocol' in line or 'input errors' in line:\n line_counter = line_counter + 1\n temp_lines = temp_lines + line.strip() + ' '\n if line_counter == 2:\n trimmed_output.append(temp_lines)\n line_counter = 0\n temp_lines = ''\n\n # extracting necessary information from each trimmed line\n for line in trimmed_output:\n # extracting port name\n port = re.search('(GigabitEthernet|Serial)\\d*\\W\\d*', line).group(0)\n error_str = re.search('\\d* input errors', line).group(0)\n input_error = re.search('\\d*', error_str).group(0)\n # extracting crc error\n error_str = re.search('\\d* CRC', line).group(0)\n crc_error = re.search('\\d*', error_str).group(0)\n # extracting frame error\n error_str = re.search('\\d* frame', line).group(0)\n frame_error = re.search('\\d*', error_str).group(0)\n # extracting overrun error\n error_str = re.search('\\d* overrun', line).group(0)\n overrun_error = re.search('\\d*', error_str).group(0)\n # extracting ignored error\n error_str = re.search('\\d* ignored', line).group(0)\n ignored_error = re.search('\\d*', error_str).group(0)\n # appending to a temporary list which will be later converted to a DataFrame\n all_errors.append([self.device, port, input_error, crc_error, frame_error, overrun_error,\n ignored_error, date_time])\n\n error_df = pd.DataFrame(all_errors, columns=['device_name', 'port', 'input', 'crc', 'frame', 'overrun',\n 'ignored', 'date_time'])\n return error_df", "def serverinfo( self, mess, args):\n version = open('/proc/version').read().strip()\n loadavg = open('/proc/loadavg').read().strip()\n\n return '%s\\n\\n%s' % ( version, loadavg, )", "def get_output(self, *args, **kwargs):\n self.send(*args, **kwargs)\n return self.process.before.split(\"\\r\\n\")", "def render_line(self, line):\n time = eid_to_datetime(line['eid'], self.tz)\n msg = \"[%s] \" % (time.strftime('%Y-%m-%d %H:%M:%S'))\n if line['type'] == 'buffer_msg':\n msg += \"<%s> %s\" % (line.get('from', line.get('server')), line['msg'])\n return msg\n if line['type'] == 'buffer_me_msg':\n msg += \"— %s %s\" % (line['from'], line['msg'])\n return msg\n\n if line['type'] in ['joined_channel', 'you_joined_channel']:\n msg += '→ '\n elif line['type'] in ['parted_channel', 'you_parted_channel']:\n msg += '← '\n elif line['type'] == 'quit':\n msg += '⇐ '\n else:\n msg += '* '\n\n if line['type'] in VERBATIM:\n try:\n msg += line['msg']\n except KeyError:\n self.log.warn(\"Log type %s has no attribute 'msg'\", line['type'])\n elif line['type'] in MESSAGES:\n temp = Template(MESSAGES[line['type']])\n msg += temp.safe_substitute(defaultdict(lambda: '', line))\n elif line['type'] in STATS:\n if 'parts' in line:\n msg += line['parts'] + \": \"\n msg += line['msg']\n elif line['type'] == 'user_channel_mode':\n msg += '%s set %s %s' % (line.get('from', line.get('server')), line['diff'], line['nick'])\n elif line['type'] == 'channel_query':\n if line['query_type'] == 'timestamp':\n msg += 'channel timestamp is %s' % line['timestamp']\n elif line['query_type'] == 'mode':\n msg += 'channel mode is %s' % line['newmode']\n else:\n self.log.warn('Unknown channel_query type: %s', line['query_type'])\n elif line['type'] == 'channel_mode':\n msg += 'Channel mode set to %s by ' % line['diff']\n if 'from' in line:\n msg += line['from']\n else:\n msg += 'the server %s' % line['server']\n elif line['type'] == 'motd_response':\n msg += \"\\n\".join(line['lines'])\n elif line['type'] in ['cap_ls', 'cap_req', 'cap_ack']:\n if line['type'] == 'cap_ls':\n msg += 'Available'\n if line['type'] == 'cap_req':\n msg += 'Requested'\n if line['type'] == 'cap_ack':\n msg += 'Acknowledged'\n msg += ' capabilities: %s' % ' | '.join(line['caps'])\n elif line['type'] == 'unknown_umode':\n if 'flag' in line:\n msg += line['flag'] + \" \"\n msg += line['msg']\n elif line['type'] == 'time':\n msg += 'Server time: %s' % line['time_string']\n if 'time_stamp' in line:\n msg += ' (%s)' % line['time_stamp']\n msg += ' - %s' % line['time_server']\n else:\n if 'msg' in line:\n msg += line['msg']\n self.log.warn('Unknown message type (%s)', line['type'])\n return msg", "def extract_errors(stdout: str) -> str:\n if not (stdout + \"\").strip():\n return \"\"\n\n out: typing.List[str] = []\n for line in filter(None, str(stdout).replace(\"\\\\n\", \"\\n\").split(\"\\n\")):\n if line.lower().startswith(\"ora-\") or line.lower().startswith(\"rman-\"):\n if not line.find(\"===\") > -1:\n out += textwrap.wrap(line.strip())\n\n return '\\n'.join(out)", "def main():\n mount_file = '/proc/mounts'\n if os.path.isfile(mount_file):\n try:\n f = open(mount_file, 'r')\n except IOError:\n print 'cannot open', mount_file\n else:\n lines = []\n lines = f.readlines()\n f.close()\n\n matching = [line for line in lines if \"rootfs\" in line]\n #print matching\n \n removed = [lines.remove(m) for m in matching]\n #print removed\n \n for line in lines:\n if line.endswith(\"0 0\\n\"):\n line = line[:-5] \n #print line\n # line = line.rstrip(\" 0\\n\") does not work if\n # the line contains 0. \n # i.e. \"...gid=5,mode=620,ptmxmode=000 0 0\\n\"\n\n fields = line.split(\" \")\n #print fields\n\n if (len(fields) != 4):\n print 'cannot format', line\n else:\n print fields[0], 'on', fields[1], 'type', fields[2], \\\n '('+ fields[3] + ')'\n else:\n print 'cannot find', mount_file\n\n return 0", "def find_valons_with_dmesg():\n \n try:\n dmesg = check_output('dmesg | grep \"FT232RL\"',shell=True)\n except subprocess.CalledProcessError:\n # grep failed so no ports found\n return []\n lines = dmesg.split('\\n')\n lines = [x for x in lines if len(x) > 0]\n m = usbre.search(lines[-1])\n usbport = m.group('port')\n try:\n dmesg = check_output(('dmesg | grep \"usb %s.*now attached to\"' % usbport),shell=True)\n except subprocess.CalledProcessError:\n # grep failed so no ports found\n return []\n lines = dmesg.split('\\n')\n lines = [x for x in lines if len(x) > 0]\n lines = lines[-1:]\n ports = []\n for ln in lines[::-1]:\n idx = ln.find('ttyUSB')\n if idx >= 0:\n port = '/dev/' + ln[idx:]\n if port not in ports:\n ports.append(port)\n return ports", "def get_formatted_line(self) -> str:\n\t\tmount_point_fs_space = \"\"\n\t\tfs_device_space = \"\"\n\t\tdevice_flags_space = \"\"\n\t\tmount_point_fs_space_int = default_mount_point_fs_space - len(self.mount_point)\n\t\tfs_device_space_int = default_fs_device_space - len(self.fstype)\n\t\tdevice_flags_space_int = default_device_flags_space - len(self.device)\n\t\tfor _ in repeat(None, mount_point_fs_space_int):\n\t\t\tmount_point_fs_space += \" \"\n\t\tfor _ in repeat(None, fs_device_space_int):\n\t\t\tfs_device_space += \" \"\n\t\tfor _ in repeat(None, device_flags_space_int):\n\t\t\tdevice_flags_space += \" \"\n\t\treadable_flags = \"flags=\"\n\t\tfor flag in self.flags:\n\t\t\treadable_flags += f\"{flag};\"\n\t\treturn f\"{self.mount_point}{mount_point_fs_space}{self.fstype}{fs_device_space}{self.device}{device_flags_space}{readable_flags}\\n\"", "def info(self, handle):\n\n # Each process group gathers their output\n\n groupstr = \"\"\n procstr = \"\"\n\n gcomm = self._comm.comm_group\n wcomm = self._comm.comm_world\n rcomm = self._comm.comm_rank\n\n if wcomm.rank == 0:\n handle.write(\"Data distributed over {} processes in {} groups\\n\".format(self._comm.world_size, self._comm.ngroups))\n\n for ob in self.obs:\n id = ob['id']\n tod = ob['tod']\n base = ob['baselines']\n nse = ob['noise']\n intrvl = ob['intervals']\n\n if gcomm.rank == 0:\n groupstr = \"observation {}:\\n\".format(id)\n groupstr = \"{} {} total samples, {} detectors\\n\".format(groupstr, tod.total_samples, len(tod.detectors))\n if intrvl is not None:\n groupstr = \"{} {} intervals:\\n\".format(groupstr, len(intrvl))\n for it in intrvl:\n groupstr = \"{} {} --> {} ({} --> {})\\n\".format(groupstr, it.first, it.last, it.start, it.stop)\n\n # rank zero of the group will print general information,\n # and each process will get its statistics.\n\n nsamp = tod.local_samples[1]\n dets = tod.local_dets\n\n procstr = \" proc {}\\n\".format(gcomm.rank)\n my_chunks = 1\n if tod.local_chunks is not None:\n my_chunks = tod.local_chunks[1]\n procstr = \"{} sample range {} --> {} in {} chunks:\\n\".format(procstr, tod.local_samples[0], (tod.local_samples[0] + nsamp - 1), my_chunks)\n \n if tod.local_chunks is not None:\n chkoff = tod.local_samples[0]\n for chk in range(tod.local_chunks[1]):\n abschk = tod.local_chunks[0] + chk\n chkstart = chkoff\n chkstop = chkstart + tod.total_chunks[abschk] - 1\n procstr = \"{} {} --> {}\\n\".format(procstr, chkstart, chkstop)\n chkoff += tod.total_chunks[abschk]\n\n if nsamp > 0:\n \n stamps = tod.read_times(local_start=0, n=nsamp)\n\n procstr = \"{} timestamps {} --> {}\\n\".format(procstr, stamps[0], stamps[-1])\n\n for dt in dets:\n procstr = \"{} det {}:\\n\".format(procstr, dt)\n\n pdata = tod.read_pntg(detector=dt, local_start=0, n=nsamp)\n\n procstr = \"{} pntg [{:.3e} {:.3e} {:.3e} {:.3e}] --> [{:.3e} {:.3e} {:.3e} {:.3e}]\\n\".format(procstr, pdata[0,0], pdata[0,1], pdata[0,2], pdata[0,3], pdata[-1,0], pdata[-1,1], pdata[-1,2], pdata[-1,3])\n\n data = tod.read(detector=dt, local_start=0, n=nsamp)\n flags, common = tod.read_flags(detector=dt, local_start=0, n=nsamp)\n procstr = \"{} {:.3e} ({}) --> {:.3e} ({})\\n\".format(procstr, data[0], flags[0], data[-1], flags[-1])\n good = np.where((flags | common) == 0)[0]\n procstr = \"{} {} good samples\\n\".format(procstr, len(good))\n min = np.min(data[good])\n max = np.max(data[good])\n mean = np.mean(data[good])\n rms = np.std(data[good])\n procstr = \"{} min = {:.4e}, max = {:.4e}, mean = {:.4e}, rms = {:.4e}\\n\".format(procstr, min, max, mean, rms)\n\n for cname in tod.cache.keys():\n procstr = \"{} cache {}:\\n\".format(procstr, cname)\n ref = tod.cache.reference(cname)\n min = np.min(ref)\n max = np.max(ref)\n mean = np.mean(ref)\n rms = np.std(ref)\n procstr = \"{} min = {:.4e}, max = {:.4e}, mean = {:.4e}, rms = {:.4e}\\n\".format(procstr, min, max, mean, rms)\n\n recvstr = \"\"\n if gcomm.rank == 0:\n groupstr = \"{}{}\".format(groupstr, procstr)\n for p in range(1, gcomm.size):\n if gcomm.rank == 0:\n recvstr = gcomm.recv(source=p, tag=p)\n groupstr = \"{}{}\".format(groupstr, recvstr)\n elif p == gcomm.rank:\n gcomm.send(procstr, dest=0, tag=p)\n gcomm.barrier()\n\n # the world rank 0 process collects output from all groups and\n # writes to the handle\n\n recvgrp = \"\"\n if wcomm.rank == 0:\n handle.write(groupstr)\n for g in range(1, self._comm.ngroups):\n if wcomm.rank == 0:\n recvgrp = rcomm.recv(source=g, tag=g)\n handle.write(recvgrp)\n elif g == self._comm.group:\n if gcomm.rank == 0:\n rcomm.send(groupstr, dest=0, tag=g)\n wcomm.barrier()\n\n return", "def readlines():\n while 1:\n line = nb_server.stdout.readline().decode(\"utf-8\").strip()\n if line:\n print(line)", "def show_device_information_long(self):\n\n for device in self._devices:\n print(\"\")\n if device['Device Type'].startswith(\"enclosu\"):\n if device.get('Device Type'):\n print(\"{0:>32}: {1}\".format(\"Device Type\", device['Device Type']))\n if device['Device Type Description']:\n print(\"{0:>32}: {1}\".format(\"Device Description\", device['Device Type Description']))\n if device.get('SCSI Device Name'):\n print(\"{0:>32}: {1}\".format(\"SCSI Device Name\", device['SCSI Device Name']))\n if device.get('Product Identification'):\n print(\"{0:>32}: {1}\".format(\"Product Identification\", device['Product Identification']))\n if device.get('Vendor Identification'):\n print(\"{0:>32}: {1}\".format(\"Vendor Identification\", device['Vendor Identification']))\n if device.get('Firmware Version'):\n print(\"{0:>32}: {1}\".format(\"Firmware Version\", device['Firmware Version']))\n if device.get('Serial Number'):\n print(\"{0:>32}: {1}\".format(\"Serial Number\", device['Serial Number']))\n if device.get('SAS Address'):\n print(\"{0:>32}: {1}\".format(\"SAS Address\", device['SAS Address']))\n else:\n if device.get('Device Type'):\n print(\"{0:>32}: {1}\".format(\"Device Type\", device['Device Type']))\n if device['Device Type Description']:\n print(\"{0:>32}: {1}\".format(\"Device Description\", device['Device Type Description']))\n if device.get('Linux Device Name'):\n print(\"{0:>32}: {1}\".format(\"Linux Device Name\", device['Linux Device Name']))\n if device.get('SCSI Device Name'):\n print(\"{0:>32}: {1}\".format(\"SCSI Device Name\", device['SCSI Device Name']))\n if device.get('Product Identification'):\n print(\"{0:>32}: {1}\".format(\"Product Identification\", device['Product Identification']))\n if device.get('Vendor Identification'):\n print(\"{0:>32}: {1}\".format(\"Vendor Identification\", device['Vendor Identification']))\n if device.get('Firmware Version'):\n print(\"{0:>32}: {1}\".format(\"Firmware Version\", device['Firmware Version']))\n if device.get('Serial Number'):\n print(\"{0:>32}: {1}\".format(\"Serial Number\", device['Serial Number']))\n if device.get('Drive Capacity'):\n print(\"{0:>32}: {1}\".format(\"Drive Capacity\", device['Drive Capacity']))\n if device.get('Block Length'):\n print(\"{0:>32}: {1}\".format(\"Block Length\", device['Block Length']))\n if device.get('Power On Hours'):\n print(\"{0:>32}: {1}\".format(\"Power On Hours\", device['Power On Hours']))\n if device.get('Current Temperature'):\n print(\"{0:>32}: {1}\".format(\"Current Temperature\", device['Current Temperature']))\n if device.get('SAS Address'):\n print(\"{0:>32}: {1}\".format(\"SAS Address\", device['SAS Address']))\n if device.get('Enclosure Device'):\n print(\"{0:>32}: {1}\".format(\"Enclosure Device\", device['Enclosure Device']))\n if device.get('Enclosure Slot'):\n print(\"{0:>32}: {1}\".format(\"Enclosure Slot\", device['Enclosure Slot']))\n if device.get('Slot Description'):\n print(\"{0:>32}: {1}\".format(\"Slot Desciption\", device['Slot Description']))\n\n if len(self._devices):\n print(\"\")", "def get_info(withLen=0):\n global RESULT\n\n # Format RESULT nicely, one item per line. Oh yeah!\n ret = string.join(map(lambda y: string.join(map(repr, y)), RESULT), '\\n')\n\n if withLen: # send with length and status for AdminRunner's CommandPipe\n ret = str(len(RESULT) + 1) + '\\n' + str(STATUS) + '\\n' + ret\n\n RESULT = []\n return ret", "def debug_msgs_from_lines(lines):\n # Read each line in the file. If a line doesn't start with a\n # timestamp (YYYY-MM-DD), assume it's a continuation of the previous line.\n debug_msgs = []\n debug_re = re.compile(r\"\\d\\d\\d\\d-\\d\\d-\\d\\d.*\")\n for line in lines:\n if debug_re.match(line):\n debug_msgs.append(line)\n elif len(debug_msgs) > 0:\n debug_msgs[-1] += line\n\n return debug_msgs", "def parse_client_stdout(txt):\n r = Result.from_netperf_stdout(txt)\n return r", "def serverinfo( self, mess, args):\n version = open('/proc/version').read().strip()\n loadavg = open('/proc/loadavg').read().strip()\n\n return '%snn%s' % ( version, loadavg, )", "def outputStatus(self, line):\r\n for l in line.strip('\\r\\n').split('\\n'):\r\n self.output('%s: %s' % (ctime(), l), 0)", "def result_handler(raw_output):\n write_log(raw_output[1])\n item_list = []\n line_list = raw_output[1].split('\\n')\n title_list = re.sub(\"\\s{2,}\", \"\\t\", line_list[0]).split(\"\\t\")\n for line in line_list[1:]:\n item_list.append(re.sub(\"\\s{2,}\", \"\\t\", line).split(\"\\t\"))\n return raw_output[0], title_list, item_list", "def format_bash(self,query_results):\n data=query_results.data\n \n name=\"ddb\"\n\n print (\"{0}_row_length={1}\".format(name,len(data)))\n print (\"{0}_column_length={1}\".format(name,len(query_results.columns)))\n print (\"\")\n\n column_index=0\n for column in query_results.columns:\n print(\"{0}_columns['{1}']='{2}'\".format(name,column_index,column))\n column_index+=1\n\n\n row_index=0\n for row in data:\n for column_index in range(0,len(query_results.columns)):\n print('{0}_data[{1}][{2}]=\"{3}\"'.format(name,row_index,column_index,row['data'][column_index]))\n row_index+=1\n # TODO return output for this\n return \"\"", "def main():\n lines = read_syslog()\n if len(sys.argv) > 1:\n lines = filter_logs(sys.argv[1], lines)\n for line in lines:\n print(line)", "def _getDiagnosticString():\n text = '\\n## Diagnostic output from minimalmodbus ## \\n\\n'\n text += 'Minimalmodbus version: ' + __version__ + '\\n'\n text += 'Minimalmodbus status: ' + __status__ + '\\n'\n text += 'Revision: ' + __revision__ + '\\n'\n text += 'Revision date: ' + __date__ + '\\n'\n text += 'File name (with relative path): ' + __file__ + '\\n'\n text += 'Full file path: ' + os.path.abspath(__file__) + '\\n\\n'\n text += 'pySerial version: ' + serial.VERSION + '\\n'\n text += 'pySerial full file path: ' + os.path.abspath(serial.__file__) + '\\n\\n'\n text += 'Platform: ' + sys.platform + '\\n'\n text += 'Filesystem encoding: ' + repr(sys.getfilesystemencoding()) + '\\n'\n text += 'Byteorder: ' + sys.byteorder + '\\n'\n text += 'Python version: ' + sys.version + '\\n'\n text += 'Python version info: ' + repr(sys.version_info) + '\\n'\n text += 'Python flags: ' + repr(sys.flags) + '\\n'\n text += 'Python argv: ' + repr(sys.argv) + '\\n'\n text += 'Python prefix: ' + repr(sys.prefix) + '\\n'\n text += 'Python exec prefix: ' + repr(sys.exec_prefix) + '\\n'\n text += 'Python executable: ' + repr(sys.executable) + '\\n'\n try:\n text += 'Long info: ' + repr(sys.long_info) + '\\n'\n except:\n text += 'Long info: (none)\\n' # For Python3 compatibility\n try:\n text += 'Float repr style: ' + repr(sys.float_repr_style) + '\\n\\n'\n except:\n text += 'Float repr style: (none) \\n\\n' # For Python 2.6 compatibility\n text += 'Variable __name__: ' + __name__ + '\\n'\n text += 'Current directory: ' + os.getcwd() + '\\n\\n'\n text += 'Python path: \\n'\n text += '\\n'.join(sys.path) + '\\n'\n text += '\\n## End of diagnostic output ## \\n'\n return text", "def formatLines(volumes):\n print \"Processing data....\"\n\n results = [[], [], [], [], []]\n for vol in volumes:\n results[0].append(volumes[vol][0])\n results[1].append(volumes[vol][1])\n results[2].append(volumes[vol][2])\n results[3].append(volumes[vol][3])\n results[4].append(volumes[vol][4])\n print \"is \" + str(results[0])\n return results", "def nice_output(self):\n return self.des", "def nice_output(self):\n return self.des", "def get_device_file_dict():\n cmd = 'lshw -class disk'\n desc = \"description\"\n log_name = \"logical name\"\n serial = \"serial\"\n\n dev = []\n dev_list = []\n\n ret, output, err = run_gluster_command(cmd)\n output = output.decode('ASCII')\n dev_info = output.split('\\n')\n for line in dev_info:\n if re.search(desc, line):\n if dev:\n dev_list.append(dev)\n\n dev = []\n if re.search(log_name, line) or re.search(serial, line):\n temp = line.split(':')\n temp[1] = temp[1].strip(' ')\n dev.append(temp[1])\n dev_list.append(dev)\n for line in dev_list:\n print(line)", "def calc_lines(self):\n line = []\n for msg in self.message_log:\n msg = msg['msg']\n if '\\n' not in msg:\n line.append(math.ceil((len(msg) + 18) / self.w))\n else:\n msg = msg.split('\\n')\n total = 0\n for i in msg:\n total += math.ceil((len(i) + 18) / self.w)\n line.append(total)\n return line" ]
[ "0.6757594", "0.59532964", "0.59326035", "0.56777763", "0.54108447", "0.5383906", "0.53356045", "0.53201014", "0.52892894", "0.5279133", "0.5222678", "0.5219982", "0.5216076", "0.5173431", "0.51427615", "0.5130245", "0.51280516", "0.5125033", "0.5106825", "0.509392", "0.5090979", "0.50879383", "0.50879073", "0.50832516", "0.5081032", "0.50699514", "0.506586", "0.506586", "0.5060367", "0.505614" ]
0.6822441
0
A static method to merge two lexemes
def merge_lexemes(source: str, target: str, login: _Login | None = None, summary: str | None = None, is_bot: bool = False, **kwargs: Any) -> dict: params = { 'action': 'wblmergelexemes', 'fromid': source, 'toid': target, 'format': 'json' } if summary: params.update({'summary': summary}) if is_bot: params.update({'bot': ''}) return mediawiki_api_call_helper(data=params, login=login, is_bot=is_bot, **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def stemset_combiner(stempool1, stempool2):\n return stempool1.stemset | stempool2.stemset", "def sentence_encoding_rnn_phi(t1, t2):\n return (t1.leaves(), t2.leaves())", "def __radd__(self, other):\n return Token(\n other + self.text, self.position - len(other), self.category)", "def Extended_Lesk(word1,word2):\n\n #Creates a list of the word, and one layer of hyponyms\n list1 = [word1]\n for i in word1.hyponyms():\n list1.append(i)\n list2 = [word2]\n for i in word2.hyponyms():\n list2.append(i)\n\n #Creates a list of each of the above words' definitions, tokenized\n words1 = []\n words2 = []\n for i in list1:\n words1.append([l for l in word_tokenize(i.definition())])\n for i in list2:\n words2.append([l for l in word_tokenize(i.definition())])\n\n #Calculates the Maximum length of the Longest Definition\n lengths = []\n lengths.extend(len(l) for l in words1)\n lengths.extend(len(l) for l in words2)\n maxim = max(lengths)\n\n igramcount = []\n igram1 = []\n igram2 = []\n\n # Creates N-grams for each definition for each N, from 1:max(lengths)\n for i in range(int(maxim)):\n for g in words1:\n for l in ngrams(g, i+1):\n igram1.append(l)\n for f in words2:\n for m in ngrams(f, i+1):\n igram2.append(m)\n\n #For Each N-gram in the first set, which matches that of the Second set,\n # Denoting a form of \"Similarity\" between the two definitions,\n # Record the Value of N into a new List, igramcount.\n for x in set(igram1):\n if x in set(igram2):\n igramcount.append(i + 1)\n\n igram1 = []\n igram2 = []\n\n #Square the values of igramcount, and return the sum as the value of Extended Lesk.\n squared = [number**2 for number in igramcount]\n return sum(squared)", "def merge_words(w, z):\n\n list_w = list(w)\n list_z = list(z)\n\n return recursive_build_list(\"\", list_w, list_z)", "def edits2(word):\r\n return (e2 for e1 in edits1(word) for e2 in edits1(e1))", "def __add__(self, other):\n if isinstance(other, Token):\n return Token(self.text + other.text, self.position, self.category)\n else:\n return Token(self.text + other, self.position, self.category)", "def newPassword(a, b):\n if len(a) > len(b):\n return combine(a[:len(b)], b) + a[len(b):]\n elif len(a) < len(b):\n return combine(a, b[:len(a)]) + b[len(a):]\n else:\n return combine(a, b)", "def interleave(one, other):\r\n\r\n inter = \"\"\r\n for i in range(len(one)):\r\n inter = inter + (one[i] + other[i])\r\n return inter", "def __iadd__(self, other):\n if isinstance(other, Token):\n new = Token(self.text + other.text, self.position, self.category)\n else:\n new = Token(self.text + other, self.position, self.category)\n return new", "def MergeLogic(self) -> str:", "def lev(w1, w2):\n\n if len(w1) < len(w2):\n # check if length of word1 is smaller than word2.\n # if so, call function and switch parameters\n return lev(w2, w1)\n elif len(w1) == 0:\n # if the length of word1 equals 0, that means that\n # the Lev' distance is the length of word2\n return len(w2)\n elif len(w2) == 0:\n # if the length of word2 equals 0, that means that\n # the Lev' distance is the length of word1\n return len(w1)\n elif w1 == w2:\n # check if words are simply the same\n return 0\n\n # thanks to the check above, we can assume that w2 is the longest word\n # we use this information to determine the range of 'previous'\n previous = range(len(w2) + 1)\n\n # DEBUG\n # print(previous)\n\n # iterate over the characters of the first word\n for a, char1 in enumerate(w1):\n # DEBUG\n # print(\"i -> \" + str(a))\n # print(\"char1 -> \" + str(char1))\n\n current = [a + 1]\n\n # iterate over the characters of the second word\n for b, char2 in enumerate(w2):\n # DEBUG\n # print(\"j -> \" + str(b))\n # print(\"\\tchar2 -> \" + str(char2))\n\n inserts = previous[b + 1] + 1\n deletions = current[b] + 1\n subs = previous[b] + (char1 != char2)\n\n # DEBUG\n # print(str(char1 != char2))\n # print(\"INSERTS -> \" + str(inserts))\n # print(\"DELS -> \" + str(deletions))\n # print(\"SUBS -> \" + str(subs))\n\n current.append(min(inserts, deletions, subs))\n\n # DEBUG\n # print(\"CURRENT -> \" + str(current))\n previous = current\n\n return previous[-1]", "def merge(self, tokens):\n tokens = iter(tokens)\n (lasttype, lastval) = tokens.next()\n for ttype, value in tokens:\n if ttype is lasttype:\n lastval += value\n else:\n yield(lasttype, lastval)\n (lasttype, lastval) = (ttype, value)\n if lastval.endswith('\\n'):\n lastval = lastval[:-1]\n if lastval:\n yield(lasttype, lastval)", "def _extend(cls, li1, li2):\n return li1 + li2", "def merge(it):\n s = it[0]\n for i in it[1:]:\n s = s + i\n s = normalize(s)\n return normalize(s)", "def _merge_majorana_terms(left_term, right_term):\n merged_term = []\n parity = 0\n i, j = 0, 0\n while i < len(left_term) and j < len(right_term):\n if left_term[i] < right_term[j]:\n merged_term.append(left_term[i])\n i += 1\n elif left_term[i] > right_term[j]:\n merged_term.append(right_term[j])\n j += 1\n parity += len(left_term) - i\n else:\n parity += len(left_term) - i - 1\n i += 1\n j += 1\n if i == len(left_term):\n merged_term.extend(right_term[j:])\n else:\n merged_term.extend(left_term[i:])\n return tuple(merged_term), parity % 2", "def compareLemma(l1, l2, cwn):\n l1 = f\"^{l1}$\"\n l2 = f\"^{l2}$\"\n \n l1_lst = [str(li).replace('<CwnSense', '').replace('>', '').replace('(', '').replace(')', '') for li in cwn.find_senses(lemma=l1)]\n l2_lst = [str(li).replace('<CwnSense', '').replace('>', '').replace('(', '').replace(')', '') for li in cwn.find_senses(lemma=l2)]\n \n if len(l1_lst) < len(l2_lst):\n zip_lst = zip(l2_lst, l1_lst)\n long = l2_lst\n short = l1_lst\n else:\n zip_lst = zip(l1_lst, l2_lst)\n long = l1_lst\n short = l2_lst\n \n # Print paired list\n for i, (a, b) in enumerate(zip_lst):\n #print(f\"{a:50}{b:>50}\")\n print(f\"{i+1:>3} {a.ljust(45, ' ')}{b}\") # 使用全形空白\n # Print remaining list\n if len(long) > len(short):\n for item in long[len(list(zip_lst)):]:\n i += 1\n print(f\"{i+1:>3} {item}\")", "def lemmatize_text_rus(text):\n text_lemm, text_sent = lemmatize_texts_rus([text])\n text_lemm, text_sent = text_lemm[0], text_sent[0]\n return text_lemm, text_sent", "def __add__(self, other):\n space = '' if other.type == 'punctuation' else ' '\n new = Word('0.0', '0.0', self.word + space + other.word, self.type)\n new.start = self.start\n new.end = other.end\n return new", "def _union(cls, s1, s2):\n return s1.union(s2)", "def encode(lang1, lang2):\n lang1 = [tokenizer_pt.vocab_size] + tokenizer_pt.encode(\n lang1.numpy()) + [tokenizer_pt.vocab_size + 1]\n\n lang2 = [tokenizer_en.vocab_size] + tokenizer_en.encode(\n lang2.numpy()) + [tokenizer_en.vocab_size + 1]\n\n return lang1, lang2", "def merge(*args):\n return _libsbml.Unit_merge(*args)", "def join_union(self, other):\n\n assert type(self) is type(other), 'Expected NestedRE instance'\n\n A = self.make_flat()\n B = other.make_flat()\n\n if A == B and A !='ϵ':\n return self.merge_union(A, [self.closure, other.closure])\n elif A == 'ϵ' and B == 'ϵ':\n return NestedRE('ϵ')\n elif A == 'ϵ':\n return NestedRE(B, '?')\n elif B == 'ϵ':\n return NestedRE(A, '?')\n else:\n return NestedRE( '(' + A + '|' + B + ')' )", "def merge_extras(extras1, extras2):\n if not extras1:\n return extras2\n if not extras2:\n return extras1\n return tuple(sorted(set(extras1) | set(extras2)))", "def join_op(it1, it2):\n\n d = defaultdict(list)\n for tpl in it1:\n d[tpl[0]].append(tpl)\n for tpl in it2:\n matches = d[tpl[0]]\n for match in matches:\n yield match + tpl[1:]", "def lemmatize_fun(self):\n tokens = str(self.doc).split()\n cleaned_tokens = None\n if self.lemmatize_method == 'wordnet':\n cleaned_tokens = [self.lemmatizer.lemmatize(token) for token in tokens]\n else:\n cleaned_tokens = [self.lemmatizer.stem(token) for token in tokens]\n \n self.doc = ' '.join(cleaned_tokens)", "def concat(self, other):\n assert isinstance(other, Tuple)\n return Tuple(self.spaces + other.spaces)", "def union(self, other):\n return PermClass([S_1 + S_2 for S_1, S_2 in zip(self, other)])", "def Unit_merge(*args):\n return _libsbml.Unit_merge(*args)", "def merge(self, other):\n result = IntSet(0)\n len_first = len(self)\n len_second = len(other)\n if len_first >= len_second:\n for i in range(len_second):\n num = self._nums[i] + other[i]\n for digit in self._digits(num):\n result.append(digit)\n for i in range(len_second, len_first):\n for digit in self._digits(self._nums[i]):\n result.append(digit)\n else:\n for i in range(len_first):\n num = self._nums[i] + other[i]\n for digit in self._digits(num):\n result.append(digit)\n for i in range(len_first, len_second):\n for digit in self._digits(other[i]):\n result.append(digit)\n return result" ]
[ "0.5477292", "0.54750645", "0.54190856", "0.5407232", "0.540251", "0.536537", "0.526485", "0.5264185", "0.5249599", "0.523565", "0.5216812", "0.52129334", "0.52117616", "0.5202586", "0.51841414", "0.51818204", "0.51625794", "0.5160106", "0.515256", "0.5100521", "0.50957334", "0.50836444", "0.506496", "0.5058121", "0.50492483", "0.5045363", "0.50351167", "0.50312954", "0.50110537", "0.5008396" ]
0.5588235
0
A method which allows for retrieval of a list of Wikidata entities. The method generates a list of tuples where the first value in the tuple is the entity's ID, whereas the second is the new instance of a subclass of BaseEntity containing all the data of the entity. This is most useful for mass retrieval of entities.
def generate_entity_instances(entities: str | list[str], allow_anonymous: bool = True, **kwargs: Any) -> list[tuple[str, BaseEntity]]: from wikibaseintegrator.entities.baseentity import BaseEntity if isinstance(entities, str): entities = [entities] assert isinstance(entities, list) params = { 'action': 'wbgetentities', 'ids': '|'.join(entities), 'format': 'json' } reply = mediawiki_api_call_helper(data=params, allow_anonymous=allow_anonymous, **kwargs) entity_instances = [] for qid, v in reply['entities'].items(): from wikibaseintegrator import WikibaseIntegrator wbi = WikibaseIntegrator() f = [x for x in BaseEntity.__subclasses__() if x.ETYPE == v['type']][0] ii = f(api=wbi).from_json(v) entity_instances.append((qid, ii)) return entity_instances
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def from_datastore(entity):\n if not entity:\n return None\n if isinstance(entity, list):\n entity = entity.pop()\n return [entity['title'],entity['author'],entity['date'],entity['recipe']]", "def get_entities(self, clean=False):\n return list(self.iter_entities(clean=clean))", "def get_entities(self):\n return list(self._entities.values())", "def _getData(self, entity, params):\n\n res = []\n entity_code = entity.code\n conn = self._connect(entity)\n try:\n conn.create_function(\"INLIST\", 2, self._inlist)\n\n conn.row_factory = sqlite3.Row\n cursor = conn.cursor()\n\n if not self.exists(entity_code, cursor):\n self.generate_entity(entity)\n\n my_departments = \"\"\n my_users = \"\"\n for column in entity.definition[\"columns\"]:\n if \"entityFilterByDepartment\" in column or column[\"type\"] == \"departmentSelector\":\n my_departments = self.getMyDepartments()\n if \"entityFilterByUser\" in column or column[\"type\"] == \"userSelector\":\n my_users = self.getMyUsers()\n\n # Create columnames for each column in entity metadata. Adding too related fields\n columnNames = \"A.id\"\n leftJoin = \"\"\n letter = \"B\"\n thisEntityHaveDepartmentFilter = False\n thisEntityHaveUserFilter = False\n for column in entity.definition[\"columns\"]:\n\n if column[\"type\"] in [\"numeric\", \"text\"]:\n columnNames += f\", A.[{column['field']}]\"\n\n elif column[\"type\"] == \"dateTime\":\n columnNames += f\", strftime('%Y-%m-%d',{column['field']}) as [{column['field']}]\"\n\n elif column[\"type\"] in [\"dropdown\", \"remoteDropdown\"]:\n columnNames += f\", A.[{column['field']}]\"\n columnNames += f\", {letter}.[{column['entityLabel']}] as {letter}_label\"\n leftJoin += f\" LEFT JOIN [{column['entity']}] as {letter} ON {letter}.id = A.{column['field']} \"\n\n if \"entityFilterByDepartment\" in column:\n leftJoin += f' AND ( {letter}.departments is null or INLIST({letter}.departments,\"{my_departments}\") = 1 ) '\n if \"entityFilterByUser\" in column:\n leftJoin += f' AND ( {letter}.users is null or INLIST({letter}.users,\"{my_users}\") = 1 ) '\n\n letter = self.getNextLetter(letter)\n\n elif column[\"type\"] == \"departmentSelector\":\n columnNames += f\", A.[departments]\"\n thisEntityHaveDepartmentFilter = True\n\n elif column[\"type\"] == \"userSelector\":\n columnNames += f\", A.[users]\"\n thisEntityHaveUserFilter = True\n\n elif column[\"type\"] == \"relatedEntity\":\n columnNames += f\", {letter}.[{column['entityLabel']}] as {column.field}\"\n if \"relatedColumnRelation\" in column and column[\"relatedColumnRelation\"]:\n left_on = str(column['relatedColumnRelation']).replace(\n \"#entity#\", \"A\").replace(\"#relatedEntity#\", letter)\n leftJoin += f\" LEFT JOIN [{column['entity']}] as {letter} ON {left_on} \"\n else:\n leftJoin += f\" LEFT JOIN [{column['entity']}] as {letter} ON {letter}.id = A.{column['relatedForeignKey']} \"\n letter = self.getNextLetter(letter)\n\n sortBy = \"A.ID\"\n if \"sortBy\" in params and params[\"sortBy\"]:\n sortBy = f'A.{params[\"sortBy\"]}'\n elif \"sortBy\" in entity.definition and entity.definition[\"sortBy\"]:\n sortBy = f'A.{entity.definition[\"sortBy\"]}'\n where = \"\"\n letter = \"B\"\n\n if thisEntityHaveDepartmentFilter:\n where = f' WHERE ( A.departments is null or INLIST(A.departments,\"{my_departments}\") = 1 ) '\n if thisEntityHaveUserFilter:\n where = f' WHERE ( A.users is null or INLIST(A.users,\"{my_users}\") = 1 ) '\n\n # Add filter for group in related entities\n for column in entity.definition[\"columns\"]:\n if column[\"type\"] in [\"dropdown\", \"remoteDropdown\"] and (\"entityFilterByDepartment\" in column or \"entityFilterByUser\" in column):\n where += \" AND \" if where else \" WHERE \"\n where += f'A.{column[\"field\"]} is null or A.{column[\"field\"]} is not null and {letter}.id is not null '\n letter = self.getNextLetter(letter)\n\n param_list = tuple()\n if \"filters\" in params and params[\"filters\"] and len(params[\"filters\"]) > 0:\n for filter_item in params[\"filters\"]:\n if \"values\" in filter_item and filter_item[\"values\"] and len(filter_item[\"values\"]) > 0:\n if where == \"\":\n where = \" WHERE \"\n else:\n where += \" AND \"\n\n if \".\" in str(filter_item[\"field\"]):\n mm_entity = \"MM\" + str(filter_item[\"field\"]).split(\".\")[0]\n mm_field = str(filter_item[\"field\"]).split(\".\")[1]\n if len(filter_item[\"values\"]) == 1:\n where += f\" {mm_entity}.[{mm_field}] = ?\"\n param_list += (append(filter_item[\"values\"][0]),)\n else:\n where += f\" {mm_entity}.[{mm_field}] IN ({','.join( filter_item['values'])})\"\n\n leftJoin += f\" INNER JOIN [{filter_item['field'].split('.')[0]}] as {mm_entity} ON {mm_entity}.{filter_item['relatedManyToManyKey']} = A.id \"\n else:\n if len(filter_item[\"values\"]) == 1:\n if filter_item[\"useLike\"]:\n where += f\" A.[{filter_item['field']}] LIKE ?\"\n param_list += (f\"%{filter_item['values'][0]}%\",)\n else:\n where += f\" A.[{filter_item['field']}] = ?\"\n param_list += (filter_item[\"values\"][0],)\n else:\n if filter_item[\"useLike\"]:\n where += \" ( 1=2 \"\n for filter_value in filter_item[\"values\"]:\n if filter_value:\n where += f\" OR A.[{filter_item['field']}] LIKE ?\"\n param_list += (f\"%{filter_value}%\",)\n where += \" ) \"\n else:\n where += f\" A.[{filter_item['field']}] IN ({','.join( filter_item['values'])})\"\n\n # Add fixed condition\n if \"condition\" in entity.definition and entity.definition[\"condition\"]:\n if where == \"\":\n where = \" WHERE \"\n else:\n where += \" AND \"\n where += entity.definition[\"condition\"]\n\n sql = f\"SELECT {columnNames} FROM {entity_code} as A {leftJoin}\"\n if where != \"\":\n sql += where\n\n sql += f\" ORDER BY {sortBy}\"\n\n if \"fromReg\" in params and params[\"fromReg\"] > 0 and \"toReg\" in params and params[\"toReg\"] > 0:\n sql += F\" LIMIT {params['fromReg']-1}, {params['toReg']-params['fromReg']+1} \"\n\n cursor.execute(sql, param_list)\n for row in cursor:\n dic = {\"id\": row[\"id\"]}\n letter = \"B\"\n\n for column in entity.definition[\"columns\"]:\n\n if column[\"type\"] in [\"numeric\", \"text\", \"dateTime\", \"date\"]:\n dic[column[\"field\"]] = row[column[\"field\"]]\n elif column[\"type\"] in [\"dropdown\", \"remoteDropdown\"]:\n dic[column[\"field\"]] = f\"{row[column['field']]}|-|{row[f'{letter}_label']}\"\n letter = self.getNextLetter(letter)\n elif column[\"type\"] == \"departmentSelector\":\n dic[\"departments\"] = row[\"departments\"]\n elif column[\"type\"] == \"userSelector\":\n dic[\"users\"] = row[\"users\"]\n elif column[\"type\"] == \"relatedEntity\":\n dic[column[\"field\"]] = row[column[\"field\"]]\n letter = self.getNextLetter(letter)\n\n res.append(dic)\n\n finally:\n conn.close()\n\n return res", "def entities(self):\n for f in self._children(EntityData):\n log.debug(\"RecordTypeData.entities: f %s\"%f) \n e = EntityData.load(self, f)\n if e:\n yield e\n return", "def extract_entities(self) :\n entities = []\n googleEntityList = self.googleLanguageModel.analyze_entities() \n watsonEntityList = self.watsonLanguageModel['entities']\n\n for entity in googleEntityList.entities[:self.entitySizeLimit]:\n if len(entity.metadata) > 0:\n entities.append({ 'name' : entity.name, 'metadata' : entity.metadata})\n \n for entity in watsonEntityList[:self.entitySizeLimit]: \n entities.append({ 'name': entity['text'], 'metadata': entity.get('disambiguation', {})}) \n\n return entities", "def entities(self, params=None, **kwargs):\n entities = entity_map()\n\n # Sort entities into type => <set of aliases>.\n type_to_aliases = {}\n for alias in entities:\n entity = entities[alias]\n\n if isinstance(entity, Facility):\n type_name = 'Facilities'\n elif isinstance(entity, Ship):\n type_name = 'Ships'\n elif isinstance(entity, Defense):\n type_name = 'Defense'\n elif isinstance(entity, Technology):\n type_name = 'Technology'\n\n if type_name not in type_to_aliases:\n type_to_aliases[type_name] = set()\n type_to_aliases[type_name].add(alias)\n\n nick = self.irc.source.split('!')[0]\n self.irc.reply('Sending list of entities to %s.' % nick)\n\n for type_name in type_to_aliases:\n aliases = sorted(list(type_to_aliases[type_name]))\n self.irc.privmsg(nick, '%s: %s' % (type_name, ', '.join(aliases)))", "def init_entities(self):\n if self.entity_provider == self.provider and self.entity_schema == self.data_schema:\n self.entity_session = self.session\n else:\n self.entity_session = get_db_session(provider=self.entity_provider, data_schema=self.entity_schema)\n\n if self.day_data:\n df = self.data_schema.query_data(\n start_timestamp=now_time_str(), columns=[\"entity_id\", \"timestamp\"], provider=self.provider\n )\n if pd_is_not_null(df):\n entity_ids = df[\"entity_id\"].tolist()\n self.logger.info(f\"ignore entity_ids:{entity_ids}\")\n if self.entity_filters:\n self.entity_filters.append(self.entity_schema.entity_id.notin_(entity_ids))\n else:\n self.entity_filters = [self.entity_schema.entity_id.notin_(entity_ids)]\n\n #: init the entity list\n self.entities = get_entities(\n session=self.entity_session,\n entity_schema=self.entity_schema,\n exchanges=self.exchanges,\n entity_ids=self.entity_ids,\n codes=self.codes,\n return_type=\"domain\",\n provider=self.entity_provider,\n filters=self.entity_filters,\n )", "def generate_entities(self, data):\r\n\t\t# create an empty dictionary to hold entities\r\n\t\tent_dic = {}\r\n\r\n\t\tfor row in data.itertuples():\r\n\t\t\t# feed nlp the first line's set of keywords\r\n\t\t\tdoc = self.nlp(row.keywords)\t\r\n\t\t\t# begin iterating through the nlp's entities\r\n\t\t\tfor ent in doc.ents:\r\n\r\n\t\t\t\t# For each entity, check if the label exists in 'ent_dic'.\r\n\t\t\t\t# If it does, append the entity into the key, value pair.\r\n\t\t\t\t# If it doesn't, create a new key, value pair\r\n\t\t\t\tkey = str(ent.label_) + ''\r\n\t\t\t\tif ent.label_ in ent_dic:\r\n\t\t\t\t\tent_dic[key].append(str(ent)) if not str(ent) in ent_dic[key] else print(f'The entity: {ent} is already in the array')\r\n\t\t\t\telse: \r\n\t\t\t\t\tent_dic[key] = [str(ent)]\r\n\r\n\t\t# return the dictionary of entities\r\n\t\treturn ent_dic", "def wb_get_entities(ids):\n if not ids:\n return {}\n\n # HTTP parameters\n params = {\n 'action': 'wbgetentities',\n 'format': 'json',\n }\n\n offset, items_per_batch = 0, 50\n batches = ((len(ids) + 1) // items_per_batch) + 1\n entities = {}\n for batch in range(batches):\n ids50 = ids[offset:offset + items_per_batch]\n offset += items_per_batch\n params[\"ids\"] = \"|\".join(ids50)\n\n response_data = requests.get(\n 'https://www.wikidata.org/w/api.php',\n headers=HEADERS, params=params).json()\n\n # TODO: Make informative/better error handling\n if 'error' in response_data:\n message = response_data['error'].get('info', '')\n message += \", id=\" + response_data['error'].get('id', '')\n raise Exception(message)\n\n if 'entities' in response_data:\n non_missing_entities = {\n id_: entity\n for id_, entity in response_data['entities'].items()\n if 'missing' not in entity}\n entities.update(non_missing_entities)\n\n if len(non_missing_entities) == 0:\n break\n\n return entities", "def deserialize_entities(data):\n if data is None:\n return None\n elif isinstance(data, str):\n # Just one instance\n return db.model_from_protobuf(data)\n else:\n return [db.model_from_protobuf(item) for item in data]", "def return_entity_collection(self, entities, request, environ,\n start_response, response_headers):\n response_type = self.content_negotiation(\n request, environ, self.FeedTypes)\n if response_type is None:\n return self.odata_error(\n request, environ, start_response, \"Not Acceptable\",\n 'xml, json or plain text formats supported', 406)\n entities.set_topmax(self.topmax)\n if response_type == \"application/json\":\n data = str('{\"d\":%s}' % ''.join(\n entities.generate_entity_set_in_json(request.version)))\n else:\n # Here's a challenge, we want to pull data through the feed\n # by yielding strings just load in to memory at the moment\n f = core.Feed(None, entities)\n doc = core.Document(root=f)\n f.collection = entities\n f.set_base(str(self.service_root))\n data = str(doc)\n data = data.encode('utf-8')\n response_headers.append((\"Content-Type\", str(response_type)))\n response_headers.append((\"Content-Length\", str(len(data))))\n start_response(\"%i %s\" % (200, \"Success\"), response_headers)\n return [data]", "def getEntityIds(type, subtype = None):\r\n\r\n # get a cursor\r\n conn = ecommerce.db.getConnection()\r\n cursor = conn.cursor()\r\n\r\n # decide the query to execute\r\n if type not in entityQueries:\r\n return [ ]\r\n\r\n # execute the query\r\n qparams = (type, )\r\n if subtype is not None:\r\n qparams = (type, subtype)\r\n cursor.execute(entityQueries[type], qparams)\r\n\r\n # fetch the ids\r\n elist = [ ]\r\n row = cursor.fetchone()\r\n while row is not None:\r\n elist.append(int(row[0]))\r\n row = cursor.fetchone()\r\n cursor.close()\r\n\r\n return elist", "def get_entities(self, row_id):\n return self.get(row_id).entities", "def make_entities():\n entities = [\n Creature((5, 5), 10, [], '*'),\n Creature((9, 5), 1, [], '@'),\n Creature((5, 9), 1, [], '@'),\n Potion((1, 2), 8, '#'),\n Weapon((2, 4), 5, '/'),\n Weapon((7, 1), 6, '(')\n ]\n return entities", "def entities(self):\n return self._entities", "def getentities(self):\n entities = {}\n\n # The following will create lots of errors in suds.client, one\n # for every type that is not an entity. Disable their logger\n # temporarily to avoid cluttering the log.\n sudslog = logging.getLogger('suds.client')\n sudssav = sudslog.disabled\n sudslog.disabled = True\n for t in self.gettypes():\n try:\n info = EntityInfo(t, self.client)\n except ICATError:\n continue\n entities[t] = info\n sudslog.disabled = sudssav\n\n return entities", "def from_tuples(tuples=list()):\n result = []\n for (user_id, creation_date, name, google_id, email , role_id, student_id) in tuples:\n person = Person()\n person.set_id(user_id)\n person.set_name(name)\n person.set_berechtigung(role_id)\n person.set_email(email)\n person.set_google_id(google_id)\n person.set_creation_date(creation_date)\n person.set_student(student_id)\n result.append(person)\n return result", "def map(self, entity):\n return ([], [])", "def _get_bids_entities(self, bids):\n ds_entities = list(bids.entities.keys())\n new_entities = {}\n # If the entity is in the main list AND in the list\n # created from the dataset (i.e. in BIDSLayout),\n # the entity values can be retrieved using\n # BIDSLayout.get_[entity]()\n for ent in BIDS_ENTITIES_MAPPING.keys():\n if ent in ds_entities:\n class_method = getattr(bids, f\"get_{ent}\")\n new_entities[ent] = class_method()\n # Delete key/value if value is empty list\n if not new_entities[ent]:\n del new_entities[ent]\n return new_entities", "def query_all(cls)->List:\n database.cursor.execute(\"SELECT * FROM {}\".format(cls.table_name))\n items = database.cursor.fetchall()\n return [cls.to_object(item) for item in items]", "def entities(self) -> List[Entity]:\n return [field for field in self._fields.values() if isinstance(field, Entity)]", "def _ProcessQueryResult(self, result):\n self.__more_results = result.more_results()\n\n if self.__keys_only:\n return [Key._FromPb(e.key()) for e in result.result_list()]\n else:\n return [Entity._FromPb(e) for e in result.result_list()]", "def _Get(self, count):\n if count > MAXIMUM_RESULTS:\n count = MAXIMUM_RESULTS\n entity_list = self._Next(count)\n while len(entity_list) < count and self.__more_results:\n next_results = self._Next(count - len(entity_list))\n if not next_results:\n break\n entity_list += next_results\n return entity_list;", "def iter_entities(self, clean=False):\n for d in self.iter_dicts(clean=clean):\n yield Entity(self.node, d)", "def return_entity(self, entity, request, environ, start_response,\n response_headers, status=200, status_msg=\"Success\"):\n response_type = self.content_negotiation(\n request, environ, self.EntryTypes)\n if response_type is None:\n return self.odata_error(\n request, environ, start_response, \"Not Acceptable\",\n 'xml, json or plain text formats supported', 406)\n # Here's a challenge, we want to pull data through the feed by\n # yielding strings just load in to memory at the moment\n if response_type == \"application/json\":\n data = str('{\"d\":%s}' %\n ''.join(entity.generate_entity_type_in_json()))\n else:\n doc = core.Document(root=core.Entry)\n e = doc.root\n e.set_base(str(self.service_root))\n e.set_value(entity)\n data = str(doc)\n data = data.encode('utf-8')\n response_headers.append((\"Content-Type\", str(response_type)))\n response_headers.append((\"Content-Length\", str(len(data))))\n self.set_etag(entity, response_headers)\n start_response(\"%i %s\" % (status, status_msg), response_headers)\n return [data]", "def get_many(cls, limit: int = 100, offset: int = 0):\n if limit > 100:\n raise ModelExceptions(\"It is not possible to list more than 100 resources.\")\n\n instance_list = DBSESSION.query(cls)\n instance_list = instance_list.order_by(cls.id)\n instance_list = instance_list.offset(offset)\n instance_list = instance_list.limit(limit)\n instance_list = instance_list.all()\n if not instance_list:\n raise ObjectNotFound(f\"No registers of {cls.str_representation} found\")\n\n return instance_list", "def fetch_all(self):\n return list(iter(self))", "def fetchall(self):\n rows = self.cursor.fetchall()\n\n if self.model.single:\n for row in rows:\n yield self.__instance_from_db(self.model, row)\n else:\n for row in rows:\n yield tuple(self.__instance_from_db(m, row) for m in self.model.models)", "def get_entities(self, data):\n\n entities = None\n\n if \"d\" in data:\n logger.debug(f\"'d' found.\")\n if \"results\" in data.get(\"d\"):\n logger.debug(f\"'d.results' found.\")\n entities = data[\"d\"].get(\"results\")\n else:\n entities = data.get(\"d\")\n elif \"value\" in data:\n logger.debug(f\"'value' found.\")\n entities = data.get(\"value\")\n else:\n logger.debug(f\"No entities found.\")\n\n return entities" ]
[ "0.64230967", "0.61319405", "0.6031639", "0.5971305", "0.59133303", "0.5893585", "0.5877375", "0.58596236", "0.58466774", "0.580628", "0.5791746", "0.57809865", "0.5752515", "0.57504046", "0.57445014", "0.57344115", "0.5728816", "0.57220846", "0.5711131", "0.56942236", "0.5651766", "0.56317717", "0.56196034", "0.5591618", "0.5581531", "0.5561442", "0.55576986", "0.55435014", "0.550455", "0.54909045" ]
0.6743911
0
Return a user agent string suitable for interacting with the Wikibase instance.
def get_user_agent(user_agent: str | None) -> str: from wikibaseintegrator import __version__ wbi_user_agent = f"WikibaseIntegrator/{__version__}" if user_agent is None: return_user_agent = wbi_user_agent else: return_user_agent = user_agent + ' ' + wbi_user_agent return return_user_agent
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def user_agent(self):\n version = '{0}.{1}.{2}'.format(sys.version_info[0], sys.version_info[1], sys.version_info[2])\n return \"PAYNL/SDK/{0} Python/{1} ({2})\".format(self.client_version, version, sys.hexversion)", "def user_agent(self) -> str:\n return self.root_hartree.user_agent", "def user_agent(self):\n ua_list = [\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/30.0.1599.101',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.122',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71',\n 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E)',\n 'Mozilla/5.0 (Windows NT 5.1; U; en; rv:1.8.1) Gecko/20061208 Firefox/2.0.0 Opera 9.50',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:34.0) Gecko/20100101 Firefox/34.0',\n ]\n return random.choice(ua_list)", "def user_agent(self):\n ua_list = [\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/30.0.1599.101',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.122',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71',\n 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E)',\n 'Mozilla/5.0 (Windows NT 5.1; U; en; rv:1.8.1) Gecko/20061208 Firefox/2.0.0 Opera 9.50',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:34.0) Gecko/20100101 Firefox/34.0',\n ]\n return random.choice(ua_list)", "def UserAgent(self):\n return self._userAgent", "def user_agent(self):\n # type: () -> str\n return self.user_agent_policy.user_agent", "def build_user_agent(application_name, version, url):\n return '%s/%s %s/%s (+%s)' % (application_name, version,\n 'python-simplemediawiki', __version__, url)", "def getUA(self):\n\t\tself.script(\"return navigator.userAgent\")", "def default_user_agent(name=\"crawlit\"):\n #https://github.com/kennethreitz/requests/blob/master/requests/utils.py#L440\n _implementation = platform.python_implementation()\n\n if _implementation == 'CPython':\n _implementation_version = platform.python_version()\n elif _implementation == 'PyPy':\n _implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major,\n sys.pypy_version_info.minor,\n sys.pypy_version_info.micro)\n if sys.pypy_version_info.releaselevel != 'final':\n _implementation_version = ''.join([_implementation_version, sys.pypy_version_info.releaselevel])\n elif _implementation == 'Jython':\n _implementation_version = platform.python_version() # Complete Guess\n elif _implementation == 'IronPython':\n _implementation_version = platform.python_version() # Complete Guess\n else:\n _implementation_version = 'Unknown'\n\n try:\n p_system = platform.system()\n p_release = platform.release()\n except IOError:\n p_system = 'Unknown'\n p_release = 'Unknown'\n\n return u\" \".join(['{0}/{1}'.format(name, get_version()),\n '%s/%s' % (_implementation, _implementation_version),\n '%s/%s' % (p_system, p_release)])", "def userAgent(self):\n raise NotImplementedError", "def format_user_agent(name=None):\n parts = ['TronAPI/%s' % tronapi.__version__,\n '%s/%s' % (platform.python_implementation(),\n platform.python_version())]\n if name:\n parts.insert(0, name)\n return ' '.join(parts)", "def build_user_agent():\n if any(key.startswith(prefix) for prefix in TESTING_ENV_PREFIXES for key in os.environ.keys()):\n testing = \" (testing) \"\n else:\n testing = \" \"\n os_platform = \"{0.system}/{0.release} ({0.machine})\".format(utils.get_os_platform())\n return \"charmcraft/{}{}{} python/{}\".format(\n __version__, testing, os_platform, platform.python_version()\n )", "def user_agent_identifier():\n client_info = (get_version(), platform.system(), platform.machine())\n return \"txclient/%s (%s %s)\" % client_info", "def user_agent(name, version):\n\n def _interpreter():\n name = platform.python_implementation()\n version = platform.python_version()\n bitness = platform.architecture()[0]\n if name == 'PyPy':\n version = '.'.join(map(str, sys.pypy_version_info[:3]))\n full_version = [version]\n if bitness:\n full_version.append(bitness)\n return name, \"-\".join(full_version)\n\n tags = [\n (name, version),\n (\"python\", platform.python_version()),\n _interpreter(),\n (\"machine\", platform.machine() or 'unknown'),\n (\"system\", platform.system() or 'unknown'),\n (\"platform\", platform.platform() or 'unknown'),\n ]\n\n return ' '.join(\"{}/{}\".format(name, version) for name, version in tags)", "def user_agent(self):\n return self._session.headers[\"User-Agent\"]", "def view_user_agent():\n\n headers = get_headers()\n\n return jsonify({\"user-agent\": headers[\"user-agent\"]})", "def get_iothub_user_agent() -> str:\n return \"{iothub_iden}/{version}{common}\".format(\n iothub_iden=IOTHUB_IDENTIFIER,\n version=VERSION,\n common=_get_common_user_agent(),\n )", "def user_agent():\n ua_list = [\n\"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50\",\n\"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50\",\n\"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0);\",\n\"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)\",\n\"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)\",\n\"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)\",\n\"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1\",\n\"Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1\",\n\"Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; en) Presto/2.8.131 Version/11.11\",\n\"Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11\",\n\"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 \",\n]\n return random.choice(ua_list)", "def get_manubot_user_agent() -> str:\n try:\n from manubot import __version__ as manubot_version\n except ImportError:\n manubot_version = \"\"\n return (\n f\"manubot/{manubot_version} \"\n f\"({platform.system()}; Python/{sys.version_info.major}.{sys.version_info.minor}) \"\n f\"<{contact_email}>\"\n )", "def userAgentForUrl(self, url):\n return \"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36\"", "def http_user_agent(user_agent: Union[Dict, str, None] = None) -> str:\n ua = f\"transformers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}\"\n if is_torch_available():\n ua += f\"; torch/{_torch_version}\"\n if is_tf_available():\n ua += f\"; tensorflow/{_tf_version}\"\n if DISABLE_TELEMETRY:\n return ua + \"; telemetry/off\"\n if is_training_run_on_sagemaker():\n ua += \"; \" + \"; \".join(f\"{k}/{v}\" for k, v in define_sagemaker_information().items())\n # CI will set this value to True\n if os.environ.get(\"TRANSFORMERS_IS_CI\", \"\").upper() in ENV_VARS_TRUE_VALUES:\n ua += \"; is_ci/true\"\n if isinstance(user_agent, dict):\n ua += \"; \" + \"; \".join(f\"{k}/{v}\" for k, v in user_agent.items())\n elif isinstance(user_agent, str):\n ua += \"; \" + user_agent\n return ua", "def get_ua(self):\n with self.lock:\n return choice(self.user_agents)", "def get_user_agent(platform=None):\n if isinstance(platform, ustr):\n platform = platform.upper()\n return {\"chrome\": AGENT_CHROME, \"edge\": AGENT_EDGE, \"ios\": AGENT_IOS}.get(\n platform, random.choice(AGENT_ALL)\n )", "def option_user_agent(self):\n return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/view/optionUserAgent/')))", "def user_agent_f(x: Text) -> Tuple[Text, Text]:\n return \"userAgent\", x", "def get_user_agent(faked=False):\n if faked:\n agent = 'curl/7.21.4 (universal-apple-darwin11.0) libcurl/7.21.4 OpenSSL/0.9.8r zlib/1.2.5'\n\n else:\n from bowerer import VERSION\n from platform import platform\n agent = 'bowerer/%s (%s)' % ('.'.join(map(str, VERSION)), platform(terse=True))\n\n return agent", "def get_user_agent_from_request(request):\n from user_agents import parse\n return parse(request.META.get('HTTP_USER_AGENT', ''))", "def user_agent():\n headers = [\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:54.0) Gecko/20100101 Firefox/54.0',\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) AppleWebKit/603.2.4 (KHTML, like Gecko) Version/10.1.1 Safari/603.2.4',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:54.0) Gecko/20100101 Firefox/54.0',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/603.3.8 (KHTML, like Gecko) Version/10.1.2 Safari/603.3.8',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',\n 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:54.0) Gecko/20100101 Firefox/54.0',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:54.0) Gecko/20100101 Firefox/54.0',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko',\n 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36 Edge/15.15063',\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:54.0) Gecko/20100101 Firefox/54.0',\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.78 Safari/537.36',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.79 Safari/537.36 Edge/14.14393',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',\n 'Mozilla/5.0 (X11; Linux x86_64; rv:54.0) Gecko/20100101 Firefox/54.0',\n 'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:54.0) Gecko/20100101 Firefox/54.0',\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/59.0.3071.109 Chrome/59.0.3071.109 Safari/537.36',\n 'Mozilla/5.0 (X11; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/52.0',\n 'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko',\n 'Mozilla/5.0 (Windows NT 6.1; rv:54.0) Gecko/20100101 Firefox/54.0',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:54.0) Gecko/20100101 Firefox/54.0',\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.78 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:54.0) Gecko/20100101 Firefox/54.0',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/603.3.8 (KHTML, like Gecko) Version/10.1.2 Safari/603.3.8',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.78 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36 OPR/46.0.2597.57',\n 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.78 Safari/537.36',\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 6.1; Trident/7.0; rv:11.0) like Gecko',\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',\n 'Mozilla/5.0 (iPad; CPU OS 10_3_2 like Mac OS X) AppleWebKit/603.2.4 (KHTML, like Gecko) Version/10.0 Mobile/14F89 Safari/602.1',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_4) AppleWebKit/603.1.30 (KHTML, like Gecko) Version/10.1 Safari/603.1.30',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:52.0) Gecko/20100101 Firefox/52.0',\n 'Mozilla/5.0 (Windows NT 5.1; rv:52.0) Gecko/20100101 Firefox/52.0',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36',\n 'Mozilla/5.0 (X11; Linux x86_64; rv:45.0) Gecko/20100101 Firefox/45.0',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/603.2.5 (KHTML, like Gecko) Version/10.1.1 Safari/603.2.5',\n 'Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',\n 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0; Trident/5.0)',\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:55.0) Gecko/20100101 Firefox/55.0',\n 'Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko',\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',\n 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.0; Trident/5.0; Trident/5.0)',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',\n 'Mozilla/5.0 (iPad; CPU OS 10_3_3 like Mac OS X) AppleWebKit/603.3.8 (KHTML, like Gecko) Version/10.0 Mobile/14G60 Safari/602.1',\n 'Mozilla/5.0 (X11; Fedora; Linux x86_64; rv:54.0) Gecko/20100101 Firefox/54.0',\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:54.0) Gecko/20100101 Firefox/54.0',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/603.2.5 (KHTML, like Gecko) Version/10.1.1 Safari/603.2.5',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/603.3.8 (KHTML, like Gecko) Version/10.1.2 Safari/603.3.8',\n 'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; Touch; rv:11.0) like Gecko',\n 'Mozilla/5.0 (Windows NT 6.1; rv:52.0) Gecko/20100101 Firefox/52.0',\n 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:55.0) Gecko/20100101 Firefox/55.0',\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.86 Safari/537.36',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/602.4.8 (KHTML, like Gecko) Version/10.0.3 Safari/602.4.8',\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.104 Safari/537.36',\n ]\n return {'User-Agent': headers[random.randrange(0, len(headers))]}", "def _random_user_agent(self):\n try:\n ua = UserAgent()\n return ua.random\n except:\n default_ua = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) \\\n AppleWebKit/537.36 (KHTML, like Gecko) \\\n Chrome/58.0.3029.110 Safari/537.36'\n return default_ua", "def get_new_user_agent(self):\n new_user_agent = user_agent.generate_navigator()[\"user_agent\"]\n if new_user_agent == self.user_agent:\n self.get_new_user_agent()\n\n return new_user_agent" ]
[ "0.7373288", "0.7214154", "0.71819866", "0.71819866", "0.7149229", "0.7119391", "0.702122", "0.70164865", "0.69798416", "0.6925093", "0.68710935", "0.6868542", "0.6849086", "0.6776181", "0.6752655", "0.66153026", "0.6526037", "0.64982563", "0.6452998", "0.6432067", "0.6342217", "0.62417763", "0.62027854", "0.61219877", "0.61214817", "0.60775894", "0.6046527", "0.59784245", "0.5809818", "0.57786447" ]
0.7788731
0
Displays 2D crosssections of a 3D image along all 3 axis
def show_brain(img, cut_coords=None, figsize=(10,5), cmap="nipy_spectral", draw_cross = True, return_fig = False ): if(isinstance(img, str) and os.path.isfile(img)): img_arr = load_nifti(img) elif(isinstance(img, nibabel.Nifti1Image)): img_arr = img.get_data() elif(isinstance(img, np.ndarray)): assert img.ndim == 3, "The numpy.ndarray must be 3-dimensional with shape (H x W x Z)" img_arr = img else: raise TypeError("Invalid type provided for 'img'- {}. \ Either provide a 3-dimensional numpy.ndarray of a MRI image or path to \ the image file stored as a nifTI format.".format(type(img))) # print(img_arr.shape) # img_arr = np.moveaxis(img_arr, 0, 1) # print(img_arr.shape) x_len, y_len, z_len = img_arr.shape # if cut_coordinates is not specified set it to the center of the image if(cut_coords == None): cut_coords = (x_len//2, y_len//2, z_len//2) f, ax = plt.subplots(nrows=1, ncols=3, figsize=figsize) ax[0].set_title("Saggital cross-section at x={}".format(cut_coords[0])) ax[0].imshow( np.rot90(img_arr[cut_coords[0],:,:]), cmap=cmap, aspect="equal") #draw cross if(draw_cross): ax[0].axvline(x=cut_coords[1], color='k', linewidth=1) ax[0].axhline(y=cut_coords[2], color='k', linewidth=1) ax[1].set_title("Coronal cross-section at y={}".format(cut_coords[1])) ax[1].imshow( np.rot90(img_arr[:,cut_coords[1],:]), cmap=cmap, aspect="equal") ax[1].text(0.05, 0.95,'L', horizontalalignment='left', verticalalignment='top', transform=ax[1].transAxes , bbox=dict(facecolor='white') ) ax[1].text(0.95, 0.95,'R', horizontalalignment='right', verticalalignment='top' , transform=ax[1].transAxes , bbox=dict(facecolor='white') ) #draw cross if(draw_cross): ax[1].axvline(x=cut_coords[0], color='k', linewidth=1) ax[1].axhline(y=cut_coords[2], color='k', linewidth=1) ax[2].set_title("Axial cross-section at z={}".format(cut_coords[2])) ax[2].imshow( np.rot90(img_arr[:,:,cut_coords[2]]), cmap=cmap, aspect="equal" ) ax[2].text(0.05, 0.95,'L' , horizontalalignment='left', verticalalignment='top' , transform=ax[2].transAxes , bbox=dict(facecolor='white') ) ax[2].text(0.95, 0.95,'R', horizontalalignment='right', verticalalignment='top' , transform=ax[2].transAxes , bbox=dict(facecolor='white') ) #draw cross if(draw_cross): ax[2].axvline(x=cut_coords[0], color='k', linewidth=1) ax[2].axhline(y=cut_coords[1], color='k', linewidth=1) plt.tight_layout() if return_fig: return f
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show_current_pair_by_3d_slice(iS,iT):\n import matplotlib.pyplot as plt\n import easyreg.viewers as viewers\n fig, ax = plt.subplots(2,3)\n plt.setp(plt.gcf(), 'facecolor', 'white')\n plt.style.use('bmh')\n\n ivsx = viewers.ImageViewer3D_Sliced(ax[0][0], iS, 0, 'source X', True)\n ivsy = viewers.ImageViewer3D_Sliced(ax[0][1], iS, 1, 'source Y', True)\n ivsz = viewers.ImageViewer3D_Sliced(ax[0][2], iS, 2, 'source Z', True)\n\n ivtx = viewers.ImageViewer3D_Sliced(ax[1][0], iT, 0, 'target X', True)\n ivty = viewers.ImageViewer3D_Sliced(ax[1][1], iT, 1, 'target Y', True)\n ivtz = viewers.ImageViewer3D_Sliced(ax[1][2], iT, 2, 'target Z', True)\n\n\n feh = viewers.FigureEventHandler(fig)\n feh.add_axes_event('button_press_event', ax[0][0], ivsx.on_mouse_press, ivsx.get_synchronize, ivsx.set_synchronize)\n feh.add_axes_event('button_press_event', ax[0][1], ivsy.on_mouse_press, ivsy.get_synchronize, ivsy.set_synchronize)\n feh.add_axes_event('button_press_event', ax[0][2], ivsz.on_mouse_press, ivsz.get_synchronize, ivsz.set_synchronize)\n\n feh.add_axes_event('button_press_event', ax[1][0], ivtx.on_mouse_press, ivtx.get_synchronize, ivtx.set_synchronize)\n feh.add_axes_event('button_press_event', ax[1][1], ivty.on_mouse_press, ivty.get_synchronize, ivty.set_synchronize)\n feh.add_axes_event('button_press_event', ax[1][2], ivtz.on_mouse_press, ivtz.get_synchronize, ivtz.set_synchronize)\n\n feh.synchronize([ax[0][0], ax[1][0]])\n feh.synchronize([ax[0][1], ax[1][1]])\n feh.synchronize([ax[0][2], ax[1][2]])", "def print_image_slice_3d(input, num=0,direction=\"z\"):\n\t#print \"print slice at 3 directions\"\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tif(direction==\"x\"):\n\t\t#print \"xxxxx\"\n\t\tix=num\n\t\tprint \"(x = %d slice)\" % (ix)\n\t\tline = []\n\t\tfor iz in xrange(nz-1,-1,-1):\n\t\t\tline.append(\"Z \")\n\t\t\tline.append(\"%4i \" % iz)\n\t\t\tfor iy in xrange(ny):\n\t\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\t\tif ((iy + 1) % 5 == 0): \n\t\t\t\t\tline.append(\"\\n \")\n\t\t\t\t\tline.append(\" \")\n\t \t\tline.append(\"\\n\")\n\t \t\tif(ny%5 != 0): line.append(\"\\n\")\n\t\tprint \"\".join(line)\n\telif(direction==\"y\"):\n\t\t#print \"yyy\"\n\t\tiy=num\n\t\tprint \"(y = %d slice)\" % (iy)\n\t\tline = []\n\t\tfor iz in xrange(nz-1,-1,-1):\n\t\t\tline.append(\"Z \")\n\t\t\tline.append(\"%4i \" % iz)\n\t\t\tfor ix in xrange(nx):\n\t\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\t\tif ((ix + 1) % 5 == 0): \n\t\t\t\t\tline.append(\"\\n \")\n\t\t\t\t\tline.append(\" \")\n\t \t\tline.append(\"\\n\")\n\t \t\tif(nx%5 != 0): line.append(\"\\n\")\n\t\tprint \"\".join(line)\n\telse:\n\t\t#print \"zzzz\"\n\t\tiz=num\n\t\tprint \"(z = %d slice)\" % (iz)\n\t\tline = []\n\t\tfor iy in xrange(ny-1,-1,-1):\n\t\t\tline.append(\"Row \")\n\t\t\tline.append(\"%4i \" % iy)\n\t\t\tfor ix in xrange(nx):\n\t\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\t\tif ((ix + 1) % 5 == 0): \n\t\t\t\t\tline.append(\"\\n \")\n\t\t\t\t\tline.append(\" \")\n\t \t\tline.append(\"\\n\")\n\t \t\tif(nx%5 != 0): line.append(\"\\n\")\n\t\tprint \"\".join(line)", "def show_3D(im): # pragma: no cover\n im = ~np.copy(im)\n if im.ndim < 3:\n raise Exception('show_3D only applies to 3D images')\n im = spim.rotate(input=im, angle=22.5, axes=[0, 1], order=0)\n im = spim.rotate(input=im, angle=45, axes=[2, 1], order=0)\n im = spim.rotate(input=im, angle=-17, axes=[0, 1], order=0, reshape=False)\n mask = im != 0\n view = np.where(mask.any(axis=2), mask.argmax(axis=2), 0)\n view = view.max() - view\n f = view.max()/5\n view[view == view.max()] = -f\n view = (view + f)**2\n return view", "def cell_edges3d_cartesian(self, axis2, axis3):", "def drawCrossSection(self, zpos, axes, lo, hi, dest):\n\n opts = self.opts\n xax = axes[0]\n yax = axes[1]\n zax = axes[2]\n xmin = lo[xax]\n ymin = lo[yax]\n xmax = hi[xax]\n ymax = hi[yax]\n vertices = self.vertices.ravel('C')\n indices = self.indices\n\n dest.bindAsRenderTarget()\n dest.setRenderViewport(xax, yax, lo, hi)\n\n # Figure out the equation of a plane\n # perpendicular to the Z axis, and\n # located at the z position. This is\n # used as a clipping plane to draw\n # the mesh intersection.\n clipPlaneVerts = np.zeros((4, 3), dtype=np.float32)\n clipPlaneVerts[0, [xax, yax]] = [xmin, ymin]\n clipPlaneVerts[1, [xax, yax]] = [xmin, ymax]\n clipPlaneVerts[2, [xax, yax]] = [xmax, ymax]\n clipPlaneVerts[3, [xax, yax]] = [xmax, ymin]\n clipPlaneVerts[:, zax] = zpos\n\n planeEq = glroutines.planeEquation(clipPlaneVerts[0, :],\n clipPlaneVerts[1, :],\n clipPlaneVerts[2, :])\n\n gl.glClearColor(0, 0, 0, 0)\n\n gl.glClear(gl.GL_COLOR_BUFFER_BIT |\n gl.GL_DEPTH_BUFFER_BIT |\n gl.GL_STENCIL_BUFFER_BIT)\n\n gl.glEnableClientState(gl.GL_VERTEX_ARRAY)\n gl.glEnable(gl.GL_CLIP_PLANE0)\n gl.glEnable(gl.GL_CULL_FACE)\n gl.glEnable(gl.GL_STENCIL_TEST)\n gl.glFrontFace(gl.GL_CCW)\n gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_FILL)\n\n gl.glClipPlane(gl.GL_CLIP_PLANE0, planeEq)\n gl.glColorMask(gl.GL_FALSE, gl.GL_FALSE, gl.GL_FALSE, gl.GL_FALSE)\n\n # First and second passes - render front and\n # back faces separately. In the stencil buffer,\n # subtract the mask created by the second\n # render from the mask created by the first -\n # this gives us a mask which shows the\n # intersection of the mesh with the clipping\n # plane.\n gl.glStencilFunc(gl.GL_ALWAYS, 0, 0)\n direction = [gl.GL_INCR, gl.GL_DECR]\n\n # If the mesh coordinate transformation\n # has a negative determinant, it means\n # the back faces will be facing the camera,\n # so we need to render the back faces first.\n if npla.det(opts.getTransform('mesh', 'display')) > 0:\n faceOrder = [gl.GL_FRONT, gl.GL_BACK]\n else:\n faceOrder = [gl.GL_BACK, gl.GL_FRONT]\n\n for face, direction in zip(faceOrder, direction):\n\n gl.glStencilOp(gl.GL_KEEP, gl.GL_KEEP, direction)\n gl.glCullFace(face)\n\n gl.glVertexPointer(3, gl.GL_FLOAT, 0, vertices)\n gl.glDrawElements(gl.GL_TRIANGLES,\n len(indices),\n gl.GL_UNSIGNED_INT,\n indices)\n\n # Third pass - render the intersection\n # of the front and back faces from the\n # stencil buffer to the render texture.\n gl.glColorMask(gl.GL_TRUE, gl.GL_TRUE, gl.GL_TRUE, gl.GL_TRUE)\n\n gl.glDisable(gl.GL_CLIP_PLANE0)\n gl.glDisable(gl.GL_CULL_FACE)\n gl.glDisableClientState(gl.GL_VERTEX_ARRAY)\n\n gl.glStencilFunc(gl.GL_NOTEQUAL, 0, 255)\n\n gl.glColor(*opts.getConstantColour())\n\n # Disable alpha blending - we\n # just want the colour copied\n # into the texture as-is.\n with glroutines.disabled(gl.GL_BLEND):\n gl.glBegin(gl.GL_QUADS)\n gl.glVertex3f(*clipPlaneVerts[0, :])\n gl.glVertex3f(*clipPlaneVerts[1, :])\n gl.glVertex3f(*clipPlaneVerts[2, :])\n gl.glVertex3f(*clipPlaneVerts[3, :])\n gl.glEnd()\n\n gl.glDisable(gl.GL_STENCIL_TEST)\n\n dest.unbindAsRenderTarget()\n dest.restoreViewport()", "def showCortexImg(pV,nV):\n # object arrays of the positive and negative images\n pos_cort_img = np.empty(8, dtype=object)\n neg_cort_img = np.empty(8, dtype=object)\n for t in range(8):\n # cortical mapping functions\n lpos, rpos = cortex.cort_img(pV[:,t,:], L, L_loc, R, R_loc, cort_size, G)\n lneg, rneg = cortex.cort_img(nV[:,t,:], L, L_loc, R, R_loc, cort_size, G)\n pos_cort_img[t] = np.concatenate((np.rot90(lpos),np.rot90(rpos,k=3)),axis=1)\n neg_cort_img[t] = np.concatenate((np.rot90(lneg),np.rot90(rneg,k=3)),axis=1)\n # stack all images into a grid\n posRGcort = np.vstack((pos_cort_img[:4]))\n negRGcort = np.vstack((neg_cort_img[:4]))\n posYBcort = np.vstack((pos_cort_img[4:]))\n negYBcort = np.vstack((neg_cort_img[4:]))\n mergecort = np.concatenate((posRGcort,negRGcort,posYBcort,negYBcort),axis=1)\n return mergecort", "def plot_3D_compare_voxels(Y_data_test, Y_pred_data, X_data_test, ref_shape):\n sample_len = Y_data_test.shape[0]\n for i in np.arange(0, sample_len):\n true_lab = Y_data_test[i, ]\n true_loc = np.where(true_lab == 1)\n pred_lab = Y_pred_data[i, ]\n pred_loc = np.where(pred_lab == 1)\n volume = X_data_test[i, ]\n voxels = ~(volume==0)\n fig = plt.figure(i)\n ax = plt.axes(projection=\"3d\")\n axl = plt.gca()\n axl.set_xlim3d([0, ref_shape[0]])\n axl.set_ylim3d([0, ref_shape[1]])\n axl.set_zlim3d([0, ref_shape[2]])\n vx = fig.gca(projection='3d')\n vx.voxels(voxels, facecolors=volume, edgecolor='k')\n ax.scatter3D(true_loc[0], true_loc[1], true_loc[2], marker=\".\", alpha=0.9)\n ax.scatter3D(pred_loc[0], pred_loc[1], pred_loc[2], marker=\".\", alpha=0.05)\n\n fig.set_facecolor('black')\n ax.set_facecolor('black')\n ax.grid(False)\n ax.w_xaxis.pane.fill = False\n ax.w_yaxis.pane.fill = False\n ax.w_zaxis.pane.fill = False\n\n ax.set_xlabel('Width', c='white')\n ax.set_ylabel('Depth', c='white')\n ax.set_zlabel('Height', c='white')\n\n plt.show()", "def __c3dSeg(self, bg, seg, tgPng, scale, opacity):\n for axes in ['x', 'y', 'z']:\n cmd = 'c3d ' + bg + ' -scale ' + scale + ' ' + seg + ' '\n cmd += '-foreach -slice ' + axes + ' 50% -endfor '\n cmd += '-oli ' + os.path.join(self.toadDir, \"templates/lookup_tables/\") + 'FreeSurferColorLUT_ItkSnap.txt ' + opacity + ' -type uchar -omc ' + axes + '.png'\n self.launchCommand(cmd)\n cmd = 'pngappend x.png + y.png + z.png ' + tgPng\n self.launchCommand(cmd)\n cmd = 'rm x.png y.png z.png'\n self.launchCommand(cmd)", "def planeSliceGFig3(uxmax, uymax, rF2, lc, ax, ay, m, n, npoints = 3000, gsizex = 2048, gsizey = 2048, comp = True):\n\n # Calculate coefficients\n alp = rF2*lc\n coeff = alp*np.array([1./ax**2, 1./ay**2])\n uF2x, uF2y = rF2*np.array([1./ax**2, 1./ay**2])\n\n # Calculate caustic intersections\n ucross = polishedRoots(causticEqSlice, uxmax, uymax, args = (alp, m, n, ax, ay))\n ncross = len(ucross)\n upcross = mapToUp(ucross.T, alp, ax, ay)\n p = np.argsort(upcross[0])\n upcross = upcross.T[p]\n ucross = ucross[p]\n print(upcross)\n print(ucross)\n\n # Calculate sign of second derivative at caustics\n sigs = np.zeros(ncross)\n for i in range(ncross):\n sigs[i] = np.sign(ax**2/rF2 + lc*(lensh(*[ucross[i][0], ucross[i][1]])[0]))\n print(sigs)\n\n # Set up quantities for proper u' plane slicing\n ymin = -m*uxmax + n\n ymax = m*uxmax + n\n if ymin < -uymax:\n xmin = (-uymax - n)/m\n ymin = m*xmin + n\n else:\n xmin = -uxmax\n if ymax > uymax:\n xmax = (uymax - n)/m\n ymax = m*xmax + n\n else:\n xmax = uxmax\n xx = np.linspace(gridToPixel(xmin, uxmax, gsizex/2), gridToPixel(xmax, uxmax, gsizex/2) - 1, gsizex)\n yy = np.linspace(gridToPixel(ymin, uymax, gsizey/2), gridToPixel(ymax, uymax, gsizey/2) - 1, gsizey)\n\n cdist = uxmax/(np.abs(100*lc))\n print(cdist)\n\n bound = np.insert(upcross, 0, np.array([[xmin, ymin]]), axis = 0) # set up boundaries\n bound = np.append(bound, np.array([[xmax, ymax]]), axis = 0)\n # print(bound)\n midpoints = [(bound[i] + bound[i+1])/2. for i in range(len(bound) - 1)] # find middle point between boundaries\n nzones = len(midpoints)\n nreal = np.zeros(nzones)\n print(nzones)\n for i in range(nzones): # find number of roots at each midpoint\n mpoint = midpoints[i]\n nreal[i] = len(findRoots(lensEq, 2*uxmax, 2*uymax, args = (mpoint, coeff), N = 1000))\n upxvecs = np.array([np.linspace(bound[i-1][0] + cdist, bound[i][0] - cdist, npoints) for i in range(1, ncross + 2)]) # generate upx vector\n # print(upxvecs)\n segs = np.asarray([lineVert(upx, m, n) for upx in upxvecs]) # generate slice across plane\n diff = difference(nreal) # determine number of complex solutions\n if comp == True:\n ncomplex = np.ones(nzones)*100\n for i in range(nzones):\n if diff[i] == 0 or diff[i] == -2:\n ncomplex[i] = 1\n elif diff[i] == -4:\n ncomplex[i] = 2\n elif diff[i] == 4:\n ncomplex[i] = 0\n else:\n ncomplex = np.zeros(nzones)\n \n print(nreal)\n print(ncomplex)\n\n # Solve lens equation at each coordinate\n allroots = rootFinder(segs, nreal, ncomplex, npoints, ucross, uxmax, uymax, coeff)\n \n # Calculate fields\n allfields = []\n for i in range(nzones):\n fields = obsCalc(GOfield, allroots[i], len(allroots[i][0]), npoints, 3, args=(rF2, lc, ax, ay))\n allfields.append(fields)\n\n # Construct uniform asymptotics\n asymp = uniAsymp(allroots, allfields, nreal, ncomplex, npoints, nzones, sigs)\n interp = UnivariateSpline(upxvecs.flatten(), asymp, s = 0)\n finx = np.linspace(xmin, xmax, 4*npoints)\n asymG = interp(finx)\n\n # KDI\n rx = np.linspace(-2*uxmax, 2*uxmax, gsizex)\n ry = np.linspace(-2*uymax, 2*uymax, gsizey)\n dux = 4*uxmax/gsizex\n duy = 4*uymax/gsizey\n extent = (-uxmax, uxmax, -uymax, uymax)\n ux, uy = np.meshgrid(rx, ry)\n lens = lensPhase(ux, uy, lc)\n lensfft = fft2(lens)\n geo = geoPhase(ux, uy, uF2x, uF2y)\n geofft = fft2(geo)\n fieldfft = lensfft*geofft\n field = fftshift(ifft2(fieldfft))\n soln = np.abs((dux*duy*field)**2/(4*pi**2*uF2x*uF2y))\n soln = soln[int(0.25*gsizex):int(0.75*gsizex), int(0.25*gsizey):int(0.75*gsizey)]\n\n # Plots\n fig = plt.figure(figsize = (15, 6), dpi = 100)\n grid = gs.GridSpec(1, 2)\n # tableax = plt.subplot(grid[1, :])\n # tableax2 = plt.subplot(grid[2, :])\n ax0, ax1 = plt.subplot(grid[0, 0]), plt.subplot(grid[0, 1])\n\n rx = np.linspace(-uxmax, uxmax, gsizex)\n ry = np.linspace(-uymax, uymax, gsizey)\n ux, uy = np.meshgrid(rx, ry)\n\n rx2 = np.linspace(xmin, xmax, gsizex)\n im0 = ax0.imshow(soln, origin = 'lower', extent = extent, aspect = 'auto') # Plot entire screen\n cbar = fig.colorbar(im0, ax = ax0)\n # cbar.set_label(r'$\\log{G}$', fontsize = 16)\n cbar.set_label('G', fontsize=18)\n cbar.ax.tick_params(labelsize=14)\n ucaus = causCurve([ux, uy], lc*np.array([uF2x, uF2y]))\n cs = plt.contour(np.linspace(-uxmax, uxmax, gsizex), ry, ucaus, levels = [0, np.inf], linewidths = 0)\n paths = cs.collections[0].get_paths()\n uppaths = []\n for p in paths:\n cuvert = np.array(p.vertices).T\n upx, upy = mapToUp(cuvert, alp, ax, ay)\n ax0.plot(upx, upy, color = 'white') # Plot caustic curves\n ax0.scatter(upcross.T[0], upcross.T[1], color = 'white')\n ax0.plot(rx2, rx2*m + n, color = 'white') # Plot observer motion\n ax0.set_xlabel(r\"$u'_x$\", fontsize = 18)\n ax0.set_ylim([-uymax, uymax])\n ax0.set_xlim([-uxmax, uxmax])\n ax0.set_ylabel(r\"$u'_y$\", fontsize = 18)\n ax0.tick_params(labelsize = 14)\n # ax0.set_title(\"Gain in the u' plane\")\n\n G = map_coordinates(soln.T, np.vstack((xx, yy))) # Plot gain along observer motion\n G = G - G[-1] + 1\n ax1.plot(rx2, G, color = 'blue', label = \"FFT gain\")\n for caus in upcross.T[0]:\n ax1.plot([caus, caus], [-10, 1000], ls = 'dashed', color = 'black')\n ax1.plot(finx, asymG, color = 'red', label = r\"$2^{nd}$ order GO gain\")\n ax1.set_ylim(-cdist, np.max(asymG) + 1.)\n ax1.set_xlim(np.min(rx2), np.max(rx2))\n ax1.set_xlabel(r\"$u'_x$\", fontsize = 18)\n ax1.set_ylabel('G', fontsize = 18)\n # ax1.set_title(\"Slice Gain\")\n ax1.tick_params(labelsize = 14)\n ax1.grid()\n ax1.legend(loc = 1, fontsize = 14)\n\n\n # col_labels = ['Parameter', 'Value'] # Create table with parameter values\n # if np.abs(dm/pctocm) < 1:\n # dmlabel = \"{:.2E}\".format(Decimal(dm/pctocm))\n # else:\n # dmlabel = str(dm/pctocm)\n # tablevals = [[r'$d_{so} \\: (kpc)$', np.around(dso/pctocm/kpc, 2)], [r'$d_{sl} \\: (kpc)$', np.around(dsl/pctocm/kpc, 3)], [r'$a_x \\: (AU)$', np.around(ax/autocm, 3)], [r'$a_y \\: (AU)$', np.around(ay/autocm, 3)], [r'$DM_l \\: (pc \\, cm^{-3})$', dmlabel], [r\"$\\nu$ (GHz)\", f/GHz], ['Slope', np.around(m, 2)], ['Offset', n]]\n # tableax.axis('tight')\n # tableax.axis('off')\n # table = tableax.table(cellText = np.asarray(tablevals).T, colWidths = np.ones(8)*0.045, rowLabels = col_labels, loc = 'center')\n # table.auto_set_font_size(False)\n # table.set_fontsize(11)\n # table.scale(2.5, 2.5)\n \n # row_label = ['Lens shape']\n # val = [['$%s$' % sym.latex(lensf)]]\n # tableax2.axis('tight')\n # tableax2.axis('off')\n # table2 = tableax2.table(cellText=val, colWidths=[0.0015*len(sym.latex(lensf))], rowLabels=row_label, loc='top')\n # table2.auto_set_font_size(False)\n # table2.set_fontsize(12)\n # table2.scale(2.5, 2.5)\n\n grid.tight_layout(fig, pad = 1.5)\n plt.show()\n return", "def plot_3D_compare(true_lab, pred_lab):\n ref_shape = [true_lab.shape[1], true_lab.shape[2], true_lab.shape[3]]\n true_loc = np.where(true_lab == 1)\n pred_loc = np.where(pred_lab == 1)\n fig = plt.figure()\n ax = plt.axes(projection=\"3d\")\n axl = plt.gca()\n axl.set_xlim3d([0, ref_shape[0]])\n axl.set_ylim3d([0, ref_shape[1]])\n axl.set_zlim3d([0, ref_shape[2]])\n ax.scatter3D(true_loc[0], true_loc[1], true_loc[2], marker=\".\", alpha=0.9)\n ax.scatter3D(pred_loc[0], pred_loc[1], pred_loc[2], marker=\".\", alpha=0.05)\n\n fig.set_facecolor('black')\n ax.set_facecolor('black')\n ax.grid(False)\n ax.w_xaxis.pane.fill = False\n ax.w_yaxis.pane.fill = False\n ax.w_zaxis.pane.fill = False\n\n ax.set_xlabel('Width', c='white')\n ax.set_ylabel('Depth', c='white')\n ax.set_zlabel('Height', c='white')\n\n plt.show()", "def showIntersections(self, ifOverlapped=True):\n if ifOverlapped:\n combined = np.sum(self.Intersections2D)\n combined.show()\n else:\n for c2 in self.Intersections2D:\n c2.show()", "def test():\n\n file = 'crosssection.dat'\n f = open(file,'r')\n lines = f.readlines()\n nline = len(lines)\n points = np.zeros(shape=(nline,4))\n sigtable = np.zeros(nline)\n for i in range(nline):\n points[i,0] = float(lines[i].split()[0])\n points[i,1] = float(lines[i].split()[1])\n points[i,2] = float(lines[i].split()[2])\n points[i,3] = float(lines[i].split()[3])\n sigtable[i] = float(lines[i].split()[4])\n\n nbin = 60\n npts = nline/nbin\n\n # checking lensing cross section against magnitude\n '''\n for i in range(npts):\n plt.plot(points[i*nbin:(i+1)*nbin,3],sigtable[i*nbin:(i+1)*nbin])\n plt.show()\n '''\n npts = npts/nbin\n\n # checking lensing cross section against velocity dispersion\n '''\n for i in range(nline):\n mask, = np.where((points[:,1]==points[i,1])&(points[:,0]==points[i,0])\\\n &(points[:,3]==points[i,3]))\n vel = points[mask,2]\n sigma = sigtable[mask]\n plt.plot(vel,sigma)\n plt.show()\n '''\n\n # checking lensing cross section against lens redshift\n #'''\n for i in range(3000,nline):\n mask, = np.where((points[:,1]==points[i,1])&(points[:,2]==points[i,2])\\\n &(points[:,3]==points[i,3]))\n print mask\n zl = points[mask,0]\n sigma = sigtable[mask]\n plt.plot(zl,sigma)\n plt.show()\n #'''\n\n # checking lensing cross section against source redshift\n for i in reversed(range(nline)):\n mask, = np.where((points[:,0]==points[i,0])&(points[:,2]==points[i,2])\\\n &(points[:,3]==points[i,3]))\n print mask\n zs = points[mask,1]\n sigma = sigtable[mask]\n plt.plot(zs,sigma)\n plt.show()", "def visualize_in_3d(self,**kwargs):\n fig = plt.figure(figsize=(7,7))\n ax = fig.add_subplot(111, projection='3d')\n\n points = np.vstack([\n c.to_matrix() for c in self.contours if c.inclusion\n ])\n points[:,:2] = points[:,:2] * self.scan.pixel_spacing\n\n # Center the points at the origin for \n # spherical coordinates conversion.\n points = points - points.mean(axis=0)\n\n # Triangulate the azimuth and zenith transformation.\n azimuth = np.arctan2(points[:,1],points[:,0])\n zenith = np.arccos(points[:,2] / np.linalg.norm(points,axis=1))\n azi_zen = np.c_[azimuth.flatten(),zenith.flatten()]\n triangles = Delaunay(azi_zen).simplices\n\n # Start the points at 0 on every axis.\n # This lets the axis ticks to be interpreted as length in mm.\n points = points - points.min(axis=0)\n\n ax.set_xlabel('length (mm)')\n ax.set_ylabel('length (mm)')\n ax.set_zlabel('length (mm)')\n\n # Plot the points.\n ax.plot_trisurf(points[:,0], points[:,1], points[:,2],\n triangles=triangles, **kwargs)\n plt.show()", "def vis_mechanically_coupled_regions(img_dir,output_dir,data,dbscn_length,dbscn_min_size,display_not_save=False):\n #Read in the image that is segmented/labelled for nuclei\n img=imread(img_dir)\n\n #save plots to show clusters\n fig = plt.figure(figsize=(6, 2))\n ax0 = fig.add_subplot(131)\n ax1 = fig.add_subplot(132)\n ax3 = fig.add_subplot(133)\n #show segmented image labels\n ax0.imshow(img,aspect='auto') \n ax0.axis('off')\n #nuclear centroid color-coded by their orientation\n img1=ax1.scatter(data[\"Y\"], data[\"X\"], c=data[\"angles\"],s=1)\n ax1.set_xlim(0,img.shape[0])\n ax1.set_ylim(img.shape[1],0)\n plt.colorbar(img1)\n ax1.axis('off')\n\n # plot the cluster assignments\n img3=ax3.scatter(data[data[\"clusters\"]> -1][\"Y\"], data[data[\"clusters\"]> -1][\"X\"], \n c=data[data[\"clusters\"]> -1][\"clusters\"],cmap=\"plasma\",s=1)\n ax3.set_xlim(0,img.shape[0])\n ax3.set_ylim(img.shape[1],0)\n ax3.axis('off')\n\n #add titles\n ax0.title.set_text('Segmented Image')\n ax1.title.set_text('Filtered Orientation')\n ax3.title.set_text('Clusters')\n\n if display_not_save:\n plt.show()\n else: \n plt.savefig((output_dir+\"/\"+img_dir.rsplit('/', 1)[-1][:-4]+\"_\"+str(dbscn_length)+\"_\"+ str(dbscn_min_size)+\".png\"),dpi=600, bbox_inches = 'tight',pad_inches = 0)\n fig.clf()\n plt.close(fig)\n plt.close('all')\n \n \n del fig,ax0,ax1,ax3,img1,img3", "def imshow_multiview_keypoints_3d(pose_result, skeleton=None, pose_kpt_color=None, pose_link_color=None, space_size=[8000, 8000, 2000], space_center=[0, -500, 800], kpt_score_thr=0.0):\n fig = plt.figure()\n ax = plt.axes(projection='3d')\n ax.set_xlim3d(space_center[0] - space_size[0] * 0.5, space_center[0] + space_size[0] * 0.5)\n ax.set_ylim3d(space_center[1] - space_size[1] * 0.5, space_center[1] + space_size[1] * 0.5)\n ax.set_zlim3d(space_center[2] - space_size[2] * 0.5, space_center[2] + space_size[2] * 0.5)\n pose_kpt_color = np.array(pose_kpt_color)\n pose_kpt_color = pose_kpt_color[..., ::-1] / 255.0\n for kpts in pose_result:\n xs, ys, zs, scores = kpts.T\n valid = scores > kpt_score_thr\n ax.scatter(xs[valid], ys[valid], zs[valid], marker='o', color=pose_kpt_color[valid])\n for link, link_color in zip(skeleton, pose_link_color):\n link_indices = [_i for _i in link]\n xs_3d = kpts[link_indices, 0]\n ys_3d = kpts[link_indices, 1]\n zs_3d = kpts[link_indices, 2]\n kpt_score = kpts[link_indices, 3]\n if kpt_score.min() > kpt_score_thr:\n _color = np.array(link_color[::-1]) / 255.0\n ax.plot(xs_3d, ys_3d, zs_3d, color=_color)\n fig.tight_layout()\n fig.canvas.draw()\n img_w, img_h = fig.canvas.get_width_height()\n img_vis = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8).reshape(img_h, img_w, -1)\n img_vis = mmcv.rgb2bgr(img_vis)\n plt.close(fig)\n return img_vis", "def viz(self,slices):\n #layers_copy = deepcopy(self.layers)\n self.layers_copy = self.layers\n imgs = [torch.zeros([1,3,self.N_in,self.N_in])]\n \n for layer in self.layers:\n if isinstance(layer,nn.Conv2d):\n layer2 = nn.Conv2d(3,3,layer.kernel_size,layer.stride,layer.padding)\n imgs.append(layer2(imgs[-1]))\n else:\n imgs.append(layer(imgs[-1]))\n \n assert(len(self.projs) == len(imgs)-1)\n for proj,img in zip(self.projs[::-1],imgs[::-1]):\n (x1,x2),(y1,y2) = slices\n img[0,:,x1:x2+1,y1:y2+1] = 255\n slices = proj(slices)\n (x1,x2),(y1,y2) = slices\n imgs[0][0,:,x1:x2+1,y1:y2+1] = 255\n \n dim = int(np.floor(np.sqrt(len(self.layers))))+1\n fig,axes = plt.subplots(dim,dim,figsize=(10,10))\n for i,img in enumerate(imgs):\n a,b = np.unravel_index(i,(dim,dim))\n axes[a,b].imshow((img[0].detach().permute(1,2,0).numpy()).astype(np.uint8))\n axes[a,b].set_title(str(i))", "def _draw3dseg(ax, annot, idx1, idx2, c='r', alpha=1):\n\n ax.plot(\n [annot[idx1, 0], annot[idx2, 0]], [annot[idx1, 1], annot[idx2, 1]], [annot[idx1, 2], annot[idx2, 2]],\n c=c,\n alpha=alpha)", "def planeSliceGnoKDI(uxmax, uymax, rF2, lc, ax, ay, m, n, npoints = 5000, comp = True):\n\n # Calculate coefficients\n alp = rF2*lc\n coeff = alp*np.array([1./ax**2, 1./ay**2])\n uF2x, uF2y = rF2*np.array([1./ax**2, 1./ay**2])\n\n # Calculate caustic intersections\n ucross = polishedRoots(causticEqSlice, uxmax, uymax, args = (alp, m, n, ax, ay))\n ncross = len(ucross)\n upcross = mapToUp(ucross.T, alp, ax, ay)\n p = np.argsort(upcross[0])\n upcross = upcross.T[p]\n ucross = ucross[p]\n print(upcross)\n print(ucross)\n\n # Calculate sign of second derivative at caustics\n sigs = np.zeros(ncross)\n for i in range(ncross):\n sigs[i] = np.sign(ax**2/rF2 + lc*(lensh(*[ucross[i][0], ucross[i][1]])[0]))\n print(sigs)\n\n # Set up quantities for proper u' plane slicing\n ymin = -m*uxmax + n\n ymax = m*uxmax + n\n if ymin < -uymax:\n xmin = (-uymax - n)/m\n ymin = m*xmin + n\n else:\n xmin = -uxmax\n if ymax > uymax:\n xmax = (uymax - n)/m\n ymax = m*xmax + n\n else:\n xmax = uxmax\n\n cdist = uxmax/(np.abs(50*lc))\n print(cdist)\n\n bound = np.insert(upcross, 0, np.array([[xmin, ymin]]), axis = 0) # set up boundaries\n bound = np.append(bound, np.array([[xmax, ymax]]), axis = 0)\n midpoints = [(bound[i] + bound[i+1])/2. for i in range(len(bound) - 1)] # find middle point between boundaries\n nzones = len(midpoints)\n nreal = np.zeros(nzones)\n print(nzones)\n for i in range(nzones): # find number of roots at each midpoint\n mpoint = midpoints[i]\n nreal[i] = len(findRoots(lensEq, 2*uxmax, 2*uymax, args = (mpoint, coeff), N = 1000))\n upxvecs = np.array([np.linspace(bound[i-1][0] + cdist, bound[i][0] - cdist, npoints) for i in range(1, ncross + 2)]) # generate upx vector\n segs = np.asarray([lineVert(upx, m, n) for upx in upxvecs]) # generate slice across plane\n diff = difference(nreal) # determine number of complex solutions\n if comp == True:\n ncomplex = np.ones(nzones)*100\n for i in range(nzones):\n if diff[i] == 0 or diff[i] == -2:\n ncomplex[i] = 1\n elif diff[i] == -4:\n ncomplex[i] = 2\n elif diff[i] == 4:\n ncomplex[i] = 0\n else:\n ncomplex = np.zeros(nzones)\n \n print(nreal)\n print(ncomplex)\n\n # Solve lens equation at each coordinate\n allroots = rootFinder(segs, nreal, ncomplex, npoints, ucross, uxmax, uymax, coeff)\n \n # Calculate fields\n allfields = []\n for i in range(nzones):\n fields = obsCalc(GOfield, allroots[i], len(allroots[i][0]), npoints, 3, args=(rF2, lc, ax, ay))\n allfields.append(fields)\n\n # Construct uniform asymptotics\n asymp = uniAsymp(allroots, allfields, nreal, ncomplex, npoints, nzones, sigs)\n interp = UnivariateSpline(upxvecs.flatten(), asymp, s = 0)\n finx = np.linspace(xmin, xmax, 4*npoints)\n asymG = interp(finx)\n\n # Plots\n fig = plt.figure(figsize = (6, 10))\n # grid = gs.GridSpec(1, 2)\n # tableax = plt.subplot(grid[1, :])\n # tableax2 = plt.subplot(grid[2, :])\n # ax0, ax1 = plt.subplot(grid[0, 0]), plt.subplot(grid[0, 1])\n\n # rx = np.linspace(-uxmax, uxmax, gsizex)\n # ry = np.linspace(-uymax, uymax, gsizey)\n # ux, uy = np.meshgrid(rx, ry)\n\n # rx2 = np.linspace(xmin, xmax, gsizex)\n # im0 = ax0.imshow(soln, origin = 'lower', extent = extent, aspect = 'auto') # Plot entire screen\n # cbar = fig.colorbar(im0, ax = ax0)\n # cbar.set_label(r'$\\log{G}$', fontsize = 16)\n # cbar.set_label('G', fontsize=16)\n # ucaus = causCurve([ux, uy], lc*np.array([uF2x, uF2y]))\n # cs = plt.contour(np.linspace(-uxmax, uxmax, gsizex), ry, ucaus, levels = [0, np.inf], linewidths = 0)\n # paths = cs.collections[0].get_paths()\n # uppaths = []\n # for p in paths:\n # cuvert = np.array(p.vertices).T\n # upx, upy = mapToUp(cuvert, alp, ax, ay)\n # ax0.plot(upx, upy, color = 'white') # Plot caustic curves\n # ax0.scatter(upcross.T[0], upcross.T[1], color = 'white')\n # ax0.plot(rx2, rx2*m + n, color = 'white') # Plot observer motion\n # ax0.set_xlabel(r\"$u'_x$\", fontsize = 16)\n # ax0.set_ylim([-uymax, uymax])\n # ax0.set_xlim([-uxmax, uxmax])\n # ax0.set_ylabel(r\"$u'_y$\", fontsize = 16)\n # ax0.set_title(\"Gain in the u' plane\")\n\n # G = map_coordinates(soln.T, np.vstack((xx, yy))) # Plot gain along observer motion\n # G = G - G[-1] + 1\n fig = plt.figure(figsize = (7, 3), dpi = 100)\n ax1 = plt.subplot()\n # ax1.plot(rx2, G, color = 'blue', label = \"Gain from FFT\")\n for caus in upcross.T[0]:\n ax1.plot([caus, caus], [-10, 1000], ls = 'dashed', color = 'black')\n ax1.plot(finx, asymG, color = 'blue')\n ax1.set_ylim(-cdist, np.max(asymG) + 1.)\n ax1.set_xlim(xmin, xmax)\n ax1.set_xlabel(r\"$u'_x$\", fontsize = 16)\n ax1.set_ylabel('G', fontsize = 16)\n # ax1.set_title(\"Slice Gain\")\n ax1.grid()\n # ax1.legend(loc = 1)\n\n\n # col_labels = ['Parameter', 'Value'] # Create table with parameter values\n # if np.abs(dm/pctocm) < 1:\n # dmlabel = \"{:.2E}\".format(Decimal(dm/pctocm))\n # else:\n # dmlabel = str(dm/pctocm)\n # tablevals = [[r'$d_{so} \\: (kpc)$', np.around(dso/pctocm/kpc, 2)], [r'$d_{sl} \\: (kpc)$', np.around(dsl/pctocm/kpc, 3)], [r'$a_x \\: (AU)$', np.around(ax/autocm, 3)], [r'$a_y \\: (AU)$', np.around(ay/autocm, 3)], [r'$DM_l \\: (pc \\, cm^{-3})$', dmlabel], [r\"$\\nu$ (GHz)\", f/GHz], ['Slope', np.around(m, 2)], ['Offset', n]]\n # tableax.axis('tight')\n # tableax.axis('off')\n # table = tableax.table(cellText = np.asarray(tablevals).T, colWidths = np.ones(8)*0.045, rowLabels = col_labels, loc = 'center')\n # table.auto_set_font_size(False)\n # table.set_fontsize(11)\n # table.scale(2.5, 2.5)\n \n # row_label = ['Lens shape']\n # val = [['$%s$' % sym.latex(lensf)]]\n # tableax2.axis('tight')\n # tableax2.axis('off')\n # table2 = tableax2.table(cellText=val, colWidths=[0.0015*len(sym.latex(lensf))], rowLabels=row_label, loc='top')\n # table2.auto_set_font_size(False)\n # table2.set_fontsize(12)\n # table2.scale(2.5, 2.5)\n\n # grid.tight_layout(fig, pad = 1.5)\n plt.tight_layout()\n plt.show()\n return", "def plot_poly_3d(points_sets, point_matches, name, img1, img2):\r\n # source for code used to plot:\r\n # https://stackoverflow.com/questions/4622057/plotting-3d-polygons-in-python-matplotlib\r\n # https://stackoverflow.com/questions/18897786/transparency-for-poly3dcollection-plot-in-matplotlib\r\n\r\n colour_list = ['r', 'g', 'b', 'c', 'm', 'y']\r\n\r\n # plot of matching points\r\n fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(20, 11))\r\n fig.suptitle(\"{}\".format(name))\r\n ax[0].imshow(img1)\r\n ax[0].set_title(\"Left Image\")\r\n ax[1].imshow(img2)\r\n ax[1].set_title(\"Right Image\")\r\n\r\n i = 0 # tracks the corresponding point in point_matches\r\n for s in range(len(points_sets)):\r\n for p in range(len(points_sets[s])):\r\n ax[0].scatter(point_matches[i, 0, 0], point_matches[i, 0, 1], c=colour_list[s])\r\n ax[1].scatter(point_matches[i, 1, 0], point_matches[i, 1, 1], c=colour_list[s])\r\n i += 1\r\n\r\n plt.show()\r\n\r\n # plot of recovered depth\r\n fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(20, 11))\r\n ax = fig.add_subplot(111, projection='3d')\r\n ax.set_title(\"Recovered Depth ({})\".format(name))\r\n ax.set_xlabel('x axis')\r\n ax.set_ylabel('y axis')\r\n ax.set_zlabel('z axis')\r\n\r\n for s in range(len(points_sets)):\r\n pts = points_sets[s]\r\n\r\n x, y, z = np.array(pts)[:, 0], np.array(pts)[:, 1], np.array(pts)[:, 2]\r\n # x = [0, 1, 1, 0]\r\n # y = [0, 0, 1, 1]\r\n # z = [1, 1, 1, 1]\r\n\r\n ax.scatter(x, y, z, c=colour_list[s])\r\n\r\n vertices = [[0, 1, 2], [0, 1, 3], [0, 2, 3], [1, 2, 3]]\r\n\r\n tupleList = list(zip(x, y, z))\r\n\r\n poly3d = [[tupleList[vertices[ix][iy]] for iy in range(len(vertices[0]))] for ix in range(len(vertices))]\r\n\r\n collection = Poly3DCollection(poly3d, linewidths=1, alpha=0.2)\r\n collection.set_facecolor(colour_list[s])\r\n collection.set_alpha(0.3)\r\n ax.add_collection3d(collection)\r\n ax.add_collection3d(Line3DCollection(poly3d, colors='k', linewidths=0.2, linestyles=':'))\r\n\r\n plt.show()", "def draw3DPts(pcl_1, pcl_2=None, color_1=None, color_2=None):\n input_size_1 = list(pcl_1.size() )\n B = input_size_1[0]\n C = input_size_1[1]\n N1 = input_size_1[2]\n if pcl_2 is not None:\n input_size_2 = list(pcl_2.size() )\n N2 = input_size_2[2]\n\n pcl_1_cpu = pcl_1.cpu().numpy()\n if pcl_2 is not None:\n pcl_2_cpu = pcl_2.cpu().numpy()\n if color_1 is not None:\n color_1_cpu = color_1.cpu().numpy()\n else:\n color_1_cpu = None\n if color_2 is not None:\n color_2_cpu = color_2.cpu().numpy()\n else:\n color_2_cpu = None\n \n \n for i in range(B):\n # fig = plt.figure(i)\n # ax = fig.gca(projection='3d')\n # plt.cla()\n\n pcd_o3d_1 = np_batch_to_o3d_pcd(i, pcl_1_cpu, color_1_cpu)\n\n if pcl_2 is not None:\n pcd_o3d_2 = np_batch_to_o3d_pcd(i, pcl_2_cpu, color_2_cpu)\n draw_pcls(pcd_o3d_1, pcd_o3d_2, uniform_color=color_1 is None)\n else:\n draw_pcls(pcd_o3d_1, uniform_color=color_1 is None)\n\n # plt.axis('equal')\n # plt.show()\n # plt.gca().set_aspect('equal')\n # plt.gca().set_zlim(-10, 10)\n # plt.gca().set_zlim(0, 3.5)", "def plot_cube_cross_section(cube, waypoints, short_name, ini_dict,\n bl_depth=None, titleprefix=None,\n plotdir=None, filesuffix='', tight=False,\n verbose=1):\n\n #Get some variables from ini_dict if possible.\n max_height = ini_dict.get('max_height', None)\n if max_height is not None:\n max_height = float(max_height)\n\n #If levels not already set, get levels from ini_dict if possible\n #(and corresponding number of levels)\n levels_dict = ini_dict.get('levels_dict', {})\n if short_name in levels_dict:\n levels = levels_dict[short_name]\n else:\n levels = ini_dict.get('levels_list', None)\n if levels is not None:\n levels = [float(v) for v in levels]\n if levels is not None:\n nlevels = len(levels)\n else:\n nlevels = ini_dict.get('nlevels', 10)\n\n cmap = ini_dict.get('cmap', 'YlGnBu')\n cbar_label = ini_dict.get('cbar_label', 'default')\n cbar = ini_dict.get('cbar', True)\n cbar_num_fmt = ini_dict.get('cbar_num_fmt', None)\n line_colours_list = ini_dict.get('line_colours_list', COLOURS)\n line_colour = line_colours_list[0]\n cbar_orientation = ini_dict.get('cbar_orientation', 'vertical')\n\n # Extract a section along the waypoints\n section = cube_functions.extract_section(cube, waypoints)\n\n #Plot against a sensible vertical coordinate\n if section.coords('model_level_number') and section.coords('level_height'):\n section.remove_coord('model_level_number')\n iris.util.promote_aux_coord_to_dim_coord(section, 'level_height')\n\n zcoordname, = cube_functions.guess_coord_names(section, ['Z'])\n if zcoordname is None:\n print('Not plotting cross section for '+cube.attributes['short_name']\n +' from '+cube.attributes['label']+' (no vertical coordinate)')\n return None\n scoordname = 'i_sample_point'\n\n #Limit section to maximum required height\n if max_height is not None:\n if section.coords('level_height'):\n section = section.extract(iris.Constraint(\n level_height=lambda c: c <= max_height))\n else:\n raise UserWarning('Cannot limit to max_height as level_height'\n + 'coordinate does not exist')\n if bl_depth:\n bl_depth_section = cube_functions.extract_section(\n bl_depth, waypoints)\n\n\n #---\n #Set up field layer\n flr = field_layer.FieldLayer(section)\n flr.set_layerstyle(plottype='pcolormesh',\n colorscale='linear',\n levels=levels,\n nlevels=nlevels,\n mask=True,\n cmap=cmap)\n\n flr.cbar = cbar\n flr.cbar_orientation = cbar_orientation\n flr.cbar_label = cbar_label\n flr.cbar_num_fmt = cbar_num_fmt\n\n # Set colour for boundary layer line plots\n line_colour = line_colour if line_colour else 'black'\n\n #---\n #Loop over fields within layer (sample point & Z coords)\n for layer_slice in flr.layer_slice([scoordname, zcoordname]):\n fplt = field_plot.FieldPlot(ini_dict)\n #Don't plot fields which are entirely nan data\n if isinstance(layer_slice.cube.data, np.ma.MaskedArray):\n if np.sum(np.isfinite(layer_slice.cube.data.data)) == 0:\n #All nan data\n warnings.warn('All nan data, no gridded field plot created')\n continue\n else:\n #Not a masked array\n if np.sum(np.isfinite(layer_slice.cube.data)) == 0:\n #All nan data\n warnings.warn('All nan data, no gridded field plot created')\n continue\n fplt.add_layer(layer_slice)\n\n if titleprefix is None:\n titleprefix = ('Cross section for '\n + cube.name().replace('_', ' ') + '\\n')\n fplt.titleprefix = titleprefix\n # Set a specific figsize for this type of plot:\n fplt.plot(figsize=[15.0, 6.0])\n\n # Get the coordinate that was actually used on the vertical axis.\n vert_axis_coord = iplt._get_plot_defn(flr.cube,\n iris.coords.BOUND_MODE,\n ndims=2).coords[0]\n\n plt.gca().set_ylabel(vert_axis_coord.name().replace('_', ' ')\n + ' (' + units_str(str(vert_axis_coord.units)) + ')')\n if vert_axis_coord.has_bounds():\n plt.gca().set_ylim([0, vert_axis_coord.bounds[-1, 1]])\n plt.gca().set_xlabel('Section point')\n plt.gca().set_xlim([-0.5, 29.5]) # n_sample_points=30.\n\n # Also plot BL depth at the same time\n if bl_depth:\n time = layer_slice.cube.coord('time').units.num2date(\n layer_slice.cube.coord('time').points[0])\n bl_depth_slice = bl_depth_section.extract(iris.Constraint\n (time=time))\n if bl_depth_slice: # found matching time\n # Plot\n iplt.plot(bl_depth_slice, label='Boundary Layer Height',\n color=line_colour, linestyle='--')\n # Add label\n plt.gca().legend()\n\n fplt.save_fig(plotdir=plotdir, fileprefix='CrossSection_',\n filesuffix=filesuffix,\n tight=tight, verbose=verbose)\n\n return section", "def pipeline(image):\n # undistort image\n undistorted_image = undistort_image(image)\n superimposed_image = find_lanes(undistorted_image)\n labels = find_vehicles(undistorted_image)\n\n draw_img = draw_labeled_bboxes(superimposed_image, labels)\n\n \n return draw_img", "def plot_3D_compare_list(Y_data_test_list, Y_pred_data_list, ref_shape):\n sample_len = Y_data_test_list.shape[0]\n num_classes = Y_data_test_list.max()+1\n for i in np.arange(0, sample_len):\n for c in np.arange(1, num_classes):\n fig = plt.figure()\n plt.figtext(0.1, 0.1, 'Predicted segmentation for test sample No.: ' + str(i + 1) + ', class: ' + str(c), color='white')\n true_lab = Y_data_test_list[i, ]\n true_loc = np.where(true_lab == c)\n pred_lab = Y_pred_data_list[i, ]\n pred_loc = np.where(pred_lab == c)\n\n # concurring locations\n true_copy = true_lab.copy()\n np.place(true_copy, true_copy != c, 99)\n pred_copy = pred_lab.copy()\n np.place(pred_copy, pred_copy != c, 98)\n same_loc = np.where(true_copy == pred_copy)\n\n ax = plt.axes(projection=\"3d\")\n axl = plt.gca()\n axl.set_xlim3d([0, ref_shape[0]])\n axl.set_ylim3d([0, ref_shape[1]])\n axl.set_zlim3d([0, ref_shape[2]])\n ax.scatter3D(true_loc[0], true_loc[1], true_loc[2], marker=\".\", alpha=0.2,\n edgecolor=\"dodgerblue\", facecolor=\"dodgerblue\")\n ax.scatter3D(pred_loc[0], pred_loc[1], pred_loc[2], marker=\".\", alpha=0.01,\n edgecolor=\"lightcoral\", facecolor=\"lightcoral\")\n ax.scatter3D(same_loc[0], same_loc[1], same_loc[2], marker=\".\", alpha=1,\n edgecolor=\"white\", facecolor=\"white\")\n\n fig.set_facecolor('black')\n ax.set_facecolor('black')\n ax.grid(False)\n ax.w_xaxis.pane.fill = False\n ax.w_yaxis.pane.fill = False\n ax.w_zaxis.pane.fill = False\n\n ax.set_xlabel('Width', c='white')\n ax.set_ylabel('Depth', c='white')\n ax.set_zlabel('Height', c='white')\n\n plt.show()", "def show(self):\n from matplotlib import pyplot as plt\n from mpl_toolkits.mplot3d import Axes3D\n\n fig = plt.figure()\n ax = Axes3D(fig)\n pos = self.cluster.get_positions()\n from itertools import combinations\n for tri in self.mesh.simplices:\n for comb in combinations(tri, 2):\n x1 = pos[comb[0], 0]\n x2 = pos[comb[1], 0]\n y1 = pos[comb[0], 1]\n y2 = pos[comb[1], 1]\n z1 = pos[comb[0], 2]\n z2 = pos[comb[1], 2]\n ax.plot([x1, x2], [y1, y2], zs=[z1, z2], color=\"black\")\n plt.show()", "def draw_3dbox_in_2d_cv2(image, obj, P):\n import cv2\n bbox2 = get_coords_2d(obj, P).astype(np.int)\n bbox2 = tuple(map(tuple, bbox2))\n cv2.line(image, bbox2[2], bbox2[3], (255, 0, 0), 1, cv2.LINE_AA)\n cv2.line(image, bbox2[3], bbox2[7], (255, 0, 0), 1, cv2.LINE_AA)\n cv2.line(image, bbox2[7], bbox2[6], (255, 0, 0), 1, cv2.LINE_AA)\n cv2.line(image, bbox2[6], bbox2[2], (255, 0, 0), 1, cv2.LINE_AA)\n\n cv2.line(image, bbox2[0], bbox2[1], (0, 255, 0), 1, cv2.LINE_AA)\n cv2.line(image, bbox2[1], bbox2[5], (0, 255, 0), 1, cv2.LINE_AA)\n cv2.line(image, bbox2[5], bbox2[4], (0, 255, 0), 1, cv2.LINE_AA)\n cv2.line(image, bbox2[4], bbox2[0], (0, 255, 0), 1, cv2.LINE_AA)\n\n cv2.line(image, bbox2[0], bbox2[3], (0, 0, 255), 1, cv2.LINE_AA)\n cv2.line(image, bbox2[4], bbox2[7], (0, 0, 255), 1, cv2.LINE_AA)\n cv2.line(image, bbox2[1], bbox2[2], (255, 255, 0), 1, cv2.LINE_AA)\n cv2.line(image, bbox2[5], bbox2[6], (255, 255, 0), 1, cv2.LINE_AA)\n return image", "def axis3D(xlow,xhigh,xfirst,xstep,ylow,yhigh,yfirst,ystep,\\\n zlow,zhigh,zfirst,zstep):\n dislin.graf3d(xlow,xhigh,xfirst,xstep,ylow,yhigh,yfirst,ystep,\\\n zlow,zhigh,zfirst,zstep)", "def cube(im_in, azimuth=30., elevation=45., filename=None,\n do_axis=True, show_label=True,\n cube_label = {'x':'x', 'y':'y', 't':'t'},\n colormap='gray', roll=-180., vmin=0., vmax=1.,\n figsize=figsize, dpi=300, **kwargs):\n im = im_in.copy()\n\n N_X, N_Y, N_frame = im.shape\n fx, fy, ft = get_grids(N_X, N_Y, N_frame)\n import numpy as np\n from vispy import app, scene, use\n try:\n AffineTransform = scene.transforms.AffineTransform\n except:\n AffineTransform = scene.transforms.MatrixTransform\n\n use(app='pyglet', gl='pyopengl2')\n from vispy.util.transforms import perspective, translate, rotate\n canvas = scene.SceneCanvas(size=figsize, bgcolor='white', dpi=dpi)\n view = canvas.central_widget.add_view()\n\n# frame = scene.visuals.Cube(size = (N_X/2, N_frame/2, N_Y/2), color=(0., 0., 0., 0.),\n# edge_color='k',\n# parent=view.scene)\n for p in ([1, 1, 1, -1, 1, 1], [1, 1, -1, -1, 1, -1], [1, -1, 1, -1, -1, 1],[1, -1, -1, -1, -1, -1],\n [1, 1, 1, 1, -1, 1], [-1, 1, 1, -1, -1, 1], [1, 1, -1, 1, -1, -1], [-1, 1, -1, -1, -1, -1],\n [1, 1, 1, 1, 1, -1], [-1, 1, 1, -1, 1, -1], [1, -1, 1, 1, -1, -1], [-1, -1, 1, -1, -1, -1]):\n# line = scene.visuals.Line(pos=np.array([[p[0]*N_Y/2, p[1]*N_X/2, p[2]*N_frame/2], [p[3]*N_Y/2, p[4]*N_X/2, p[5]*N_frame/2]]), color='black', parent=view.scene)\n line = scene.visuals.Line(pos=np.array([[p[0]*N_X/2, p[1]*N_frame/2, p[2]*N_Y/2],\n [p[3]*N_X/2, p[4]*N_frame/2, p[5]*N_Y/2]]), color='black', parent=view.scene)\n\n opts = {'parent':view.scene, 'cmap':'grays', 'clim':(0., 1.)}\n image_xy = scene.visuals.Image(np.rot90(im[:, :, 0], 3), **opts)\n tr_xy = AffineTransform()\n tr_xy.rotate(90, (1, 0, 0))\n tr_xy.translate((-N_X/2, -N_frame/2, -N_Y/2))\n image_xy.transform = tr_xy\n\n image_xt = scene.visuals.Image(np.fliplr(im[:, -1, :]), **opts)\n tr_xt = AffineTransform()\n tr_xt.rotate(90, (0, 0, 1))\n tr_xt.translate((N_X/2, -N_frame/2, N_Y/2))\n image_xt.transform = tr_xt\n\n image_yt = scene.visuals.Image(np.rot90(im[-1, :, :], 1), **opts)\n tr_yt = AffineTransform()\n tr_yt.rotate(90, (0, 1, 0))\n tr_yt.translate((+N_X/2, -N_frame/2, N_Y/2))\n image_yt.transform = tr_yt\n\n if do_axis:\n t = {}\n for text in ['x', 'y', 't']:\n t[text] = scene.visuals.Text(cube_label[text], parent=canvas.scene, face='Helvetica', color='black')\n t[text].font_size = 8\n t['x'].pos = canvas.size[0] // 3, canvas.size[1] - canvas.size[1] // 8\n t['t'].pos = canvas.size[0] - canvas.size[0] // 5, canvas.size[1] - canvas.size[1] // 6\n t['y'].pos = canvas.size[0] // 12, canvas.size[1] // 2\n\n cam = scene.TurntableCamera(elevation=35, azimuth=30)\n cam.fov = 45\n cam.scale_factor = N_X * 1.7\n if do_axis: margin = 1.3\n else: margin = 1\n cam.set_range((-N_X/2, N_X/2), (-N_Y/2*margin, N_Y/2/margin), (-N_frame/2, N_frame/2))\n view.camera = cam\n if not(filename is None):\n im = canvas.render()\n app.quit()\n import vispy.io as io\n io.write_png(filename, im)\n else:\n app.quit()\n return im", "def imshow_mesh_3d(img, vertices, faces, camera_center, focal_length, colors=(76, 76, 204)):\n H, W, C = img.shape\n if not has_pyrender:\n warnings.warn('pyrender package is not installed.')\n return img\n if not has_trimesh:\n warnings.warn('trimesh package is not installed.')\n return img\n try:\n renderer = pyrender.OffscreenRenderer(viewport_width=W, viewport_height=H)\n except (ImportError, RuntimeError):\n warnings.warn('pyrender package is not installed correctly.')\n return img\n if not isinstance(colors, list):\n colors = [colors for _ in range(len(vertices))]\n colors = [color_val(c) for c in colors]\n depth_map = np.ones([H, W]) * np.inf\n output_img = img\n for idx in range(len(vertices)):\n color = colors[idx]\n color = [(c / 255.0) for c in color]\n color.append(1.0)\n vert = vertices[idx]\n face = faces[idx]\n material = pyrender.MetallicRoughnessMaterial(metallicFactor=0.2, alphaMode='OPAQUE', baseColorFactor=color)\n mesh = trimesh.Trimesh(vert, face)\n rot = trimesh.transformations.rotation_matrix(np.radians(180), [1, 0, 0])\n mesh.apply_transform(rot)\n mesh = pyrender.Mesh.from_trimesh(mesh, material=material)\n scene = pyrender.Scene(ambient_light=(0.5, 0.5, 0.5))\n scene.add(mesh, 'mesh')\n camera_pose = np.eye(4)\n camera = pyrender.IntrinsicsCamera(fx=focal_length[0], fy=focal_length[1], cx=camera_center[0], cy=camera_center[1], zfar=100000.0)\n scene.add(camera, pose=camera_pose)\n light = pyrender.DirectionalLight(color=[1.0, 1.0, 1.0], intensity=1)\n light_pose = np.eye(4)\n light_pose[:3, 3] = np.array([0, -1, 1])\n scene.add(light, pose=light_pose)\n light_pose[:3, 3] = np.array([0, 1, 1])\n scene.add(light, pose=light_pose)\n light_pose[:3, 3] = np.array([1, 1, 2])\n scene.add(light, pose=light_pose)\n color, rend_depth = renderer.render(scene, flags=pyrender.RenderFlags.RGBA)\n valid_mask = (rend_depth < depth_map) * (rend_depth > 0)\n depth_map[valid_mask] = rend_depth[valid_mask]\n valid_mask = valid_mask[:, :, None]\n output_img = valid_mask * color[:, :, :3] + (1 - valid_mask) * output_img\n return output_img", "def _draw3dseg(ax, annot, idx1, idx2, c=None):\n ax.plot(\n [annot[idx1, 0], annot[idx2, 0]], [annot[idx1, 1], annot[idx2, 1]], [annot[idx1, 2], annot[idx2, 2]],\n c=c)", "def imshow_keypoints_3d(pose_result, img=None, skeleton=None, pose_kpt_color=None, pose_link_color=None, vis_height=400, kpt_score_thr=0.3, num_instances=-1, *, axis_azimuth=70, axis_limit=1.7, axis_dist=10.0, axis_elev=15.0):\n show_img = img is not None\n if num_instances < 0:\n num_instances = len(pose_result)\n elif len(pose_result) > num_instances:\n pose_result = pose_result[:num_instances]\n elif len(pose_result) < num_instances:\n pose_result += [dict()] * (num_instances - len(pose_result))\n num_axis = num_instances + 1 if show_img else num_instances\n plt.ioff()\n fig = plt.figure(figsize=(vis_height * num_axis * 0.01, vis_height * 0.01))\n if show_img:\n img = mmcv.imread(img, channel_order='bgr')\n img = mmcv.bgr2rgb(img)\n img = mmcv.imrescale(img, scale=vis_height / img.shape[0])\n ax_img = fig.add_subplot(1, num_axis, 1)\n ax_img.get_xaxis().set_visible(False)\n ax_img.get_yaxis().set_visible(False)\n ax_img.set_axis_off()\n ax_img.set_title('Input')\n ax_img.imshow(img, aspect='equal')\n for idx, res in enumerate(pose_result):\n dummy = len(res) == 0\n kpts = np.zeros((1, 3)) if dummy else res['keypoints_3d']\n if kpts.shape[1] == 3:\n kpts = np.concatenate([kpts, np.ones((kpts.shape[0], 1))], axis=1)\n valid = kpts[:, 3] >= kpt_score_thr\n ax_idx = idx + 2 if show_img else idx + 1\n ax = fig.add_subplot(1, num_axis, ax_idx, projection='3d')\n ax.view_init(elev=axis_elev, azim=axis_azimuth)\n x_c = np.mean(kpts[valid, 0]) if sum(valid) > 0 else 0\n y_c = np.mean(kpts[valid, 1]) if sum(valid) > 0 else 0\n ax.set_xlim3d([x_c - axis_limit / 2, x_c + axis_limit / 2])\n ax.set_ylim3d([y_c - axis_limit / 2, y_c + axis_limit / 2])\n ax.set_zlim3d([0, axis_limit])\n ax.set_aspect('auto')\n ax.set_xticks([])\n ax.set_yticks([])\n ax.set_zticks([])\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_zticklabels([])\n ax.dist = axis_dist\n if not dummy and pose_kpt_color is not None:\n pose_kpt_color = np.array(pose_kpt_color)\n assert len(pose_kpt_color) == len(kpts)\n x_3d, y_3d, z_3d = np.split(kpts[:, :3], [1, 2], axis=1)\n _color = pose_kpt_color[..., ::-1] / 255.0\n ax.scatter(x_3d[valid], y_3d[valid], z_3d[valid], marker='o', color=_color[valid])\n if not dummy and skeleton is not None and pose_link_color is not None:\n pose_link_color = np.array(pose_link_color)\n assert len(pose_link_color) == len(skeleton)\n for link, link_color in zip(skeleton, pose_link_color):\n link_indices = [_i for _i in link]\n xs_3d = kpts[link_indices, 0]\n ys_3d = kpts[link_indices, 1]\n zs_3d = kpts[link_indices, 2]\n kpt_score = kpts[link_indices, 3]\n if kpt_score.min() > kpt_score_thr:\n _color = link_color[::-1] / 255.0\n ax.plot(xs_3d, ys_3d, zs_3d, color=_color, zdir='z')\n if 'title' in res:\n ax.set_title(res['title'])\n fig.tight_layout()\n fig.canvas.draw()\n img_w, img_h = fig.canvas.get_width_height()\n img_vis = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8).reshape(img_h, img_w, -1)\n img_vis = mmcv.rgb2bgr(img_vis)\n plt.close(fig)\n return img_vis" ]
[ "0.6118277", "0.60820824", "0.6006694", "0.5964072", "0.59441346", "0.5893757", "0.5872753", "0.58612025", "0.5786242", "0.5712049", "0.56787944", "0.5658226", "0.5636408", "0.5630997", "0.5592465", "0.55571723", "0.55545217", "0.5547363", "0.55383724", "0.5521879", "0.55030596", "0.54990524", "0.54898", "0.54799134", "0.54596055", "0.5458485", "0.5450101", "0.5446186", "0.5440322", "0.5433341" ]
0.6111827
1
This function is modified from django.forms.models.model_to_dict. It can also return uneditable fields when SETTINGS.OMIT_UN_EDITABLE_FIELDS = False.
def model_to_dict(instance, fields=None, exclude=None): opts = instance._meta data = {} omit = getattr(SETTINGS, 'OMIT_UN_EDITABLE_FIELDS', False) for f in chain(opts.concrete_fields, opts.private_fields, opts.many_to_many): if omit and not getattr(f, 'editable', False): continue if fields and f.name not in fields: continue if exclude and f.name in exclude: continue data[f.name] = f.value_from_object(instance) return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_readonly_fields(self, request, obj=None):\n return [field.name for field in self.model._meta.fields]", "def get_readonly_fields(self, request, obj=None):\n if self.readonly_model:\n return fields_for_model(model=self.model)\n if obj is None:\n return list(self.add_readonly_fields) + list(self.readonly_fields)\n else:\n return list(self.change_readonly_fields) + list(self.readonly_fields)", "def editable_metadata_fields(self):\r\n def jsonify_value(field, json_choice):\r\n if isinstance(json_choice, dict):\r\n json_choice = dict(json_choice) # make a copy so below doesn't change the original\r\n if 'display_name' in json_choice:\r\n json_choice['display_name'] = get_text(json_choice['display_name'])\r\n if 'value' in json_choice:\r\n json_choice['value'] = field.to_json(json_choice['value'])\r\n else:\r\n json_choice = field.to_json(json_choice)\r\n return json_choice\r\n\r\n def get_text(value):\r\n \"\"\"Localize a text value that might be None.\"\"\"\r\n if value is None:\r\n return None\r\n else:\r\n return self.runtime.service(self, \"i18n\").ugettext(value)\r\n\r\n metadata_fields = {}\r\n\r\n # Only use the fields from this class, not mixins\r\n fields = getattr(self, 'unmixed_class', self.__class__).fields\r\n\r\n for field in fields.values():\r\n\r\n if field.scope != Scope.settings or field in self.non_editable_metadata_fields:\r\n continue\r\n\r\n # gets the 'default_value' and 'explicitly_set' attrs\r\n metadata_fields[field.name] = self.runtime.get_field_provenance(self, field)\r\n metadata_fields[field.name]['field_name'] = field.name\r\n metadata_fields[field.name]['display_name'] = get_text(field.display_name)\r\n metadata_fields[field.name]['help'] = get_text(field.help)\r\n metadata_fields[field.name]['value'] = field.read_json(self)\r\n\r\n # We support the following editors:\r\n # 1. A select editor for fields with a list of possible values (includes Booleans).\r\n # 2. Number editors for integers and floats.\r\n # 3. A generic string editor for anything else (editing JSON representation of the value).\r\n editor_type = \"Generic\"\r\n values = field.values\r\n if isinstance(values, (tuple, list)) and len(values) > 0:\r\n editor_type = \"Select\"\r\n values = [jsonify_value(field, json_choice) for json_choice in values]\r\n elif isinstance(field, Integer):\r\n editor_type = \"Integer\"\r\n elif isinstance(field, Float):\r\n editor_type = \"Float\"\r\n elif isinstance(field, List):\r\n editor_type = \"List\"\r\n elif isinstance(field, Dict):\r\n editor_type = \"Dict\"\r\n elif isinstance(field, RelativeTime):\r\n editor_type = \"RelativeTime\"\r\n metadata_fields[field.name]['type'] = editor_type\r\n metadata_fields[field.name]['options'] = [] if values is None else values\r\n\r\n return metadata_fields", "def get_fields(self, exclude=('id',)):\n fields = {}\n for field in self._meta.fields:\n if not field.name in exclude and getattr(self, field.name):\n fields[field.name] = getattr(self, field.name)\n return fields", "def readonly_dict_validator(self, dict_fields, model_name, erp_readonly=[]):\n readonly_fields = self.env['settings.field'].sudo().search([('model_id.model', '=', model_name)])\n\n if readonly_fields:\n erp_readonly.extend(readonly_fields.readonly_field_ids.filtered(lambda x: x.id not in [er.id for er in erp_readonly]))\n\n for field in erp_readonly:\n if field.name in dict_fields and 'readonly' not in dict_fields[field.name]:\n dict_fields[field.name]['readonly'] = True\n\n return dict_fields", "def get_readonly_fields(self, request, obj=None):\n return list(self.readonly_fields) + [field.name for field in obj._meta.fields]", "def get_readonly_fields(self, request, obj=None):\n return list(self.readonly_fields) + [field.name for field in obj._meta.fields]", "def my_model_to_dict(instance, fields=None, exclude=None):\n # avoid a circular import\n from django.db.models.fields.related import ManyToManyField\n opts = instance._meta\n data = {}\n for f in opts.concrete_fields + opts.many_to_many:\n # if not f.editable:\n # continue\n if fields and not f.name in fields:\n continue\n if exclude and f.name in exclude:\n continue\n if isinstance(f, ManyToManyField):\n # If the object doesn't have a primary key yet, just use an empty\n # list for its m2m fields. Calling f.value_from_object will raise\n # an exception.\n if instance.pk is None:\n data[f.name] = []\n else:\n # MultipleChoiceWidget needs a list of pks, not object instances.\n data[f.name] = list(f.value_from_object(instance).values_list('pk', flat=True))\n else:\n value = f.value_from_object(instance)\n if isinstance(value, decimal.Decimal) or isinstance(value, datetime) or isinstance(value, date):\n value_str = str(value)\n data[f.name] = value_str\n else:\n data[f.name] = f.value_from_object(instance)\n return data", "def get_readonly_fields(self, request, obj=None):\n if obj:\n return self.readonly_fields\n return ()", "def get_readonly_fields(self, request, obj):\n if obj:\n return ('isd_id',)\n return ()", "def get_non_pk_fields(self, filtered=False):\n\n return QueryBuilder.columns_to_dict(self, self.non_pk_columns, filtered=filtered)", "def clean(self):\n cleaned_data = super().clean()\n cleaned_data = {key: field for key, field in cleaned_data.items()\n if field is not None}\n return cleaned_data", "def copy_fields(self, model):\n fields = super(HistoricalRecords, self).copy_fields(model)\n for name, field in self.additional_fields.items():\n assert name not in fields\n assert hasattr(self, 'get_%s_value' % name)\n fields[name] = field\n return fields", "def modelfields(entity) -> Dict[str, Field]:\n return entity.__modelfields__", "def get_readonly_fields(self, request, obj=None):\n readonly_fields = super(BaseAdmin, self).get_readonly_fields(request,\n obj=obj)\n if obj:\n readonly_fields = list(readonly_fields)\n fieldnames_for_object = map(lambda f: f.name, obj._meta.fields)\n for fieldname in self._READONLY_FIELDS_AFTER_CREATION:\n if fieldname in fieldnames_for_object:\n readonly_fields.append(fieldname)\n return readonly_fields", "def get_readonly_fields(self, request, obj):\n self.request = request\n # fields that won't be editable. Just remove one to make it editable\n readonly_fields = ('git_username','git_name','repo_synced','last_compiled','provider','site_url_long','build_url_long','slug')\n if obj:\n readonly_fields = ('git_url',)+readonly_fields\n return readonly_fields\n #return super(RepositoryAdmin, self).get_readonly_fields(request, obj)", "def get_initial(self):\n if self.fields is None:\n return self.object.__dict__.copy()\n else:\n return {field:getattr(self.object,field,'') for field in self.fields}", "def get_all_fields(self):\n fields = []\n for f in self._meta.fields:\n\n fname = f.name \n # resolve picklists/choices, with get_xyz_display() function\n get_choice = 'get_'+fname+'_display'\n if hasattr( self, get_choice):\n value = getattr( self, get_choice)()\n else:\n try :\n value = getattr(self, fname)\n except User.DoesNotExist:\n value = None\n\n # only display fields with values and skip some fields entirely\n if f.editable and value and f.name not in ('id', 'status', 'workshop', 'user', 'complete') :\n\n fields.append(\n {\n 'label':f.verbose_name, \n 'name':f.name, \n 'value':value,\n }\n )\n return fields", "def _set_default_edit_fields(self):\n default_edit_fields = []\n\n for col in self.table.get_column_names():\n req = False\n if col[-3:].lower() == '_id':\n # foreign key\n req = True\n \n dict_template = self._get_field_list_dict()\n \n dict_template['name'] = '{}'.format(col)\n dict_template['label'] = '{}'.format(col).replace('_',' ').title()\n dict_template['type'] = 'text'\n try:\n dict_template['type'] = '{}'.format(self.table.get_column_type(col))\n except KeyError:\n pass\n dict_template['req'] = req\n \n default_edit_fields.append(dict_template)\n \n return default_edit_fields", "def jsonable(self, *args, **options):\n d = {}\n for field_name, field in self.schema.normal_fields.items():\n field_val = getattr(self, field_name, None)\n field_val = field.jsonable(self, field_val)\n if field_val is not None:\n d[field_name] = field_val\n\n return d", "def get_readonly_fields(self, request, obj=None):\n if obj and obj.source == DigitizedWork.HATHI:\n return self.hathi_readonly_fields + self.readonly_fields\n return self.readonly_fields", "def get_readonly_fields(self, request, obj=None):\n if not self.all_fields_readonly or (request.user.is_superuser and self.superuser_skips_all_readonly):\n return self.readonly_fields\n print self.fieldsets\n print list(set(\n [field.name for field in self.opts.local_fields] +\n [field.name for field in self.opts.local_many_to_many]\n ))\n if self.fieldsets:\n return flatten_fieldsets(self.fieldsets)\n \n else:\n return list(set(\n [field.name for field in self.opts.local_fields] +\n [field.name for field in self.opts.local_many_to_many]\n ))", "def get_readonly_fields(self, request, obj=None):\n if request.user.has_perm('prescription.can_admin'):\n return super(PrescriptionMixin, self).get_readonly_fields(request, obj)\n\n current = self.prescription\n if current and not current.is_draft:\n return self.list_editable\n else:\n return super(PrescriptionMixin, self).get_readonly_fields(request, obj)", "def doc_to_dict(instance, fields=None, exclude=None):\n data = {}\n for name, field in instance._fields.items():\n if fields and not name in fields:\n continue\n if exclude and name in exclude:\n continue\n data[name] = field.__get__(instance, None)\n return data", "def get_renamed_input_fields(self):\n return self.renamed_input_fields", "def get_fields(self):\n\t\treturn self.__fields.copy()", "def field_to_dict(self, field):\n input_field = {}\n x = {}\n if not DEBUG:\n x = {\n \"type\": str(field.__class__.__name__),\n \"widget\": str(field.widget.__class__.__name__),\n }\n\n # help text for input\n if hasattr(field, 'help_text'):\n x.update({\"help_text\": field.help_text})\n\n # label for input\n if hasattr(field, 'label'):\n x.update({\"label\": field.label})\n\n # place holder object for for input\n if hasattr(field, 'initial'):\n input_field.update({\"placeholder\": field.initial})\n\n # min length object for for input\n if hasattr(field, 'min_length'):\n input_field.update({\"min_length\": field.min_length})\n\n # max object for for input\n if hasattr(field, 'max_length'):\n input_field.update({\"max_length\": field.max_length})\n\n # hidden object for for input\n if hasattr(field, 'widget.is_hidden'):\n input_field.update({\"hidden\": field.widget.is_hidden})\n\n # is required object for for input\n if hasattr(field.widget, 'is_required'):\n input_field.update({\"required\": field.widget.is_required})\n\n # all attributes for for input\n if hasattr(field.widget, 'attrs'):\n x.update({\"attrs\": field.widget.attrs})\n\n # type object for for input\n if 'data-field-type' in field.widget.attrs:\n input_field.update({\"type\": field.widget.attrs['data-field-type']})\n\n x.update({\"input_field\": input_field})\n return x", "def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:\n from .entity import Entity\n\n from .entity import Entity\n\n fields: Dict[str, Callable[[Any], None]] = {\n \"isDefault\": lambda n : setattr(self, 'is_default', n.get_bool_value()),\n \"lastModifiedDateTime\": lambda n : setattr(self, 'last_modified_date_time', n.get_datetime_value()),\n \"locale\": lambda n : setattr(self, 'locale', n.get_str_value()),\n \"messageTemplate\": lambda n : setattr(self, 'message_template', n.get_str_value()),\n \"subject\": lambda n : setattr(self, 'subject', n.get_str_value()),\n }\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields", "def get_readonly_fields(self, request, obj=None):\n if obj and obj.cwr:\n return (\n 'nwr_rev', 'description', 'works', 'filename', 'view_link',\n 'download_link')\n else:\n return ()", "def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:\n from .identity_set import IdentitySet\n from .notebook import Notebook\n from .onenote_entity_schema_object_model import OnenoteEntitySchemaObjectModel\n from .onenote_section import OnenoteSection\n from .section_group import SectionGroup\n\n from .identity_set import IdentitySet\n from .notebook import Notebook\n from .onenote_entity_schema_object_model import OnenoteEntitySchemaObjectModel\n from .onenote_section import OnenoteSection\n from .section_group import SectionGroup\n\n fields: Dict[str, Callable[[Any], None]] = {\n \"createdBy\": lambda n : setattr(self, 'created_by', n.get_object_value(IdentitySet)),\n \"displayName\": lambda n : setattr(self, 'display_name', n.get_str_value()),\n \"lastModifiedBy\": lambda n : setattr(self, 'last_modified_by', n.get_object_value(IdentitySet)),\n \"lastModifiedDateTime\": lambda n : setattr(self, 'last_modified_date_time', n.get_datetime_value()),\n }\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields" ]
[ "0.6307178", "0.6280546", "0.6266776", "0.6190274", "0.6138479", "0.60863304", "0.60863304", "0.60848", "0.6051414", "0.604039", "0.6031091", "0.60262895", "0.6009271", "0.6008673", "0.5987545", "0.5934254", "0.5926652", "0.5916412", "0.5908221", "0.58884656", "0.5886619", "0.5874731", "0.5833664", "0.5812048", "0.58029354", "0.5791399", "0.57861465", "0.5785929", "0.57377887", "0.5721504" ]
0.72209966
0
The quadrant in the cartesian plane this point is located in.
def quadrant(self) -> Quadrant: if self.x > 0: if self.y > 0: return Quadrant.I if self.y < 0: return Quadrant.IV if self.x < 0: if self.y > 0: return Quadrant.II if self.y < 0: return Quadrant.III return Quadrant.ORIGIN
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _determine_quadrant(self, row, column) -> QuadrantEnum:\n if row < 4 and column < 5:\n return QuadrantEnum.TOP_LEFT\n elif row < 4 and column >= 5:\n return QuadrantEnum.TOP_RIGHT\n elif row >= 4 and column < 5:\n return QuadrantEnum.BOTTOM_LEFT\n else:\n return QuadrantEnum.BOTTOM_RIGHT", "def getQuadrilaterals(self):\n pass", "def bounding_quadrant(self):\n quadrant = Quadrant.empty_quadrant(2)\n for point in self.endpoints:\n quadrant.add_point(point)\n return quadrant", "def _determine_quadrant(row, column) -> QuadrantEnum:\n if row < 4 and column < 5:\n return QuadrantEnum.TOP_LEFT\n elif row < 4 and column >= 5:\n return QuadrantEnum.TOP_RIGHT\n elif row >= 4 and column < 5:\n return QuadrantEnum.BOTTOM_LEFT\n else:\n return QuadrantEnum.BOTTOM_RIGHT", "def quadrant(pAx, pAy, pBx, pBy):\n###############################################################################\n\n if (pBx>pAx and pBy>pAy):\n return 1\n elif (pBx<pAx and pBy>pAy):\n return 2\n elif (pBx<pAx and pBy<pAy):\n return 3\n elif (pBx>pAx and pBy<pAy):\n return 4\n else:\n return 0", "def quadrant(xcoord, ycoord):\n\n xneg = bool(xcoord < 0)\n yneg = bool(ycoord < 0)\n if xneg is True:\n if yneg is False:\n return 2\n return 3\n if yneg is False:\n return 1\n return 4", "def get_quadrant(x, y):\n try:\n x = int(x)\n y = int(y)\n except ValueError:\n return (0)\n\n if y >= 0 and x > 0:\n return (1)\n elif y >= 0 and x < 0:\n return (2)\n elif y < 0 and x < 0:\n return (3)\n else:\n return (4)", "def quadrant(point_x, point_y):\n if point_x == 0 and point_y == 0:\n print \"O\n \"\n elif point_x == 0 and (point_y < 0 or point_y > 0):\n print \"Y\"\n elif point_y == 0 and (point_x < 0 or point_x > 0):\n print \"X\"\n elif point_x > 0 and point_y > 0:\n print \"Q1\"\n elif point_x < 0 and point_y > 0:\n print \"Q2\"\n elif point_x < 0 and point_y < 0:\n print \"Q3\"\n elif point_x > 0 and point_y < 0:\n print \"Q4\"", "def _determine_quadrant_player(self, row, column) -> QuadrantEnum:\n if 4 > row > 0 and 5 > column > 0:\n return QuadrantEnum.TOP_LEFT\n elif 4 > row > 0 and 5 <= column < 9:\n return QuadrantEnum.TOP_RIGHT\n elif 4 <= row < 7 and 5 > column > 0:\n return QuadrantEnum.BOTTOM_LEFT\n elif 4 <= row < 7 and 5 <= column < 9:\n return QuadrantEnum.BOTTOM_RIGHT", "def z(self):\n return self.coords[2]", "def xy(self):\n return self.coords.xy", "def get_origin(self):\n return self.coord_cls(x=0, y=0, z=0, system=self)", "def z(self):\n return self._coords[2]", "def get_coord(self):\n return self.coord", "def _determine_quadrant_player(row, column) -> QuadrantEnum:\n if 4 > row > 0 and 5 > column > 0:\n return QuadrantEnum.TOP_LEFT\n elif 4 > row > 0 and 5 <= column < 9:\n return QuadrantEnum.TOP_RIGHT\n elif 4 <= row < 7 and 5 > column > 0:\n return QuadrantEnum.BOTTOM_LEFT\n elif 4 <= row < 7 and 5 <= column < 9:\n return QuadrantEnum.BOTTOM_RIGHT", "def arg(self, x=None, y=None):\n if x == None:\n x = self.x\n if y == None:\n y = self.y\n if x > 0 and y >= 0: # quadrant I and positive x axis\n return math.atan(y/x)\n elif x < 0: # quadrant II and III and negative x axis\n return math.atan(y/x) + math.pi\n elif x > 0 and y < 0: # quadrant IV\n return math.atan(y/x) + 2*math.pi\n elif y > 0: # positive y axis\n return math.pi / 2\n elif y < 0: # negative y axis\n return math.pi * 3 / 2", "def get_vertex(self):\n V = circumcenter(self.Cents)\n return V", "def get_z(self):\n return self.coords[2]", "def get_points(self):\n return self._quadrature_points", "def get_origin(self):\n return self.coord_cls(x=0, y=0, system=self)", "def origin_z(self):\n return self.locations_z[0]", "def coord(self):\r\n return self.model.coord", "def getZCoord(self, x, y):\n n = self.normal()\n z = (-n.x * (x - self.p0.x) - n.y * (y - self.p0.y) + n.z * self.p0.z) / n.z\n return z", "def eval_Dxy(self):\n\n return self.X - self.Y", "def generate_quadrant_coordinates(self):\n qw = 1. # quadrant width\n Qc = np.array([[0, 0]])\n Qo = np.array([[-qw, -qw], [qw, qw], [qw, -qw], [-qw, qw]])\n Qstd = np.array([qw])\n if self.center_first_l_levels > 0:\n Qstd *= 0.\n\n ls = self.per_level_speed\n s = np.array([[ls[0], ls[0]]])\n\n row_idx = 0 # start filling adjacency mat from root node\n col_idx = 1 # skip the root node and start from 2nd node\n for l in range(self.nl):\n Qo /= 2\n qw /= 2\n for n in range(self.nn[l]):\n for c in range(self.nc[l]):\n pc = Qc[row_idx]\n cc = pc + Qo[c]\n Qc = np.append(Qc, [cc], axis=0)\n if l < self.center_first_l_levels - 1:\n Qstd = np.append(Qstd, 0)\n else:\n Qstd = np.append(Qstd, qw)\n col_idx += 1\n s = np.append(s, [[ls[l+1], ls[l+1]]], axis=0)\n # Increase parent index after populating all its children nodes\n row_idx += 1\n\n return Qc.transpose(), Qstd.transpose(), s.transpose()", "def get_origin(self) -> Vec:\n if self.is_brush():\n bbox_min, bbox_max = self.get_bbox()\n return (bbox_min + bbox_max) / 2\n else:\n return Vec.from_str(self['origin'])", "def _coord(self, x, y):\n gridEdge = 7 # originally 5\n y = gridEdge - y\n cx = 100 * (x - 1) + 50\n cy = 100 * (y - 1) + 50\n r = 20\n return (cx - r, cy - r, cx + r, cy + r)", "def quadrant(coordinates: tuple, center: tuple):\n angle = calculate_angle(center, coordinates)\n if angle =< 90:\n return \"LL\"\n if angle =< 180:\n return \"UL\"\n if angle =< 270:\n return \"UR\"\n return \"LR\"", "def get_quadrant(azimuth, azi_start_pos, azi_end_pos):\n nbeam = len(azimuth)\n if azi_start_pos > azi_end_pos:\n iter_plus = np.append(np.arange(azi_start_pos, nbeam), np.arange(0, azi_end_pos + 1))\n iter_minus = np.arange(azi_end_pos, azi_start_pos + 1)[::-1]\n else:\n iter_plus = np.arange(azi_start_pos, azi_end_pos)\n iter_minus = np.append(np.arange(azi_end_pos, nbeam), np.arange(0, azi_start_pos + 1))[::-1]\n\n quad = [None] * 4\n quad[0] = iter_plus[: len(iter_plus) // 2]\n quad[1] = iter_minus[: len(iter_minus) // 2]\n quad[2] = iter_plus[len(iter_plus) // 2 :][::-1]\n quad[3] = iter_minus[len(iter_minus) // 2 :][::-1]\n\n return quad", "def DistanceFromOrigin(self):\r\n return ((self.x ** 2) + (self.y ** 2)) ** 0.5" ]
[ "0.6771368", "0.65648866", "0.6534175", "0.6522615", "0.6497287", "0.6482107", "0.6441157", "0.6324514", "0.62548697", "0.614596", "0.6116718", "0.6105604", "0.60872346", "0.60645896", "0.6024275", "0.6012685", "0.59936947", "0.59838605", "0.59789324", "0.59648126", "0.5921413", "0.59090656", "0.5881586", "0.58584267", "0.58574045", "0.5855447", "0.58485353", "0.58203536", "0.57998776", "0.57814676" ]
0.78478485
0
A tuple of this point's x and y coordinates.
def xy(self) -> Tuple[float, float]: return (self.x, self.y)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def coordinates(self) -> Tuple[int, int]:\n return self.x, self.y", "def xy(self) -> Tuple[int, int]:\n return self._x, self._y", "def get_point(self):\n return self._x, self._y", "def coordinates(self) -> Tuple[float, float, float, float, float]:\n return (self.x, self.y, self.x + self.width, self.y + self.height)", "def getXY(self):\n return (self.X,self.Y)", "def get(self):\n return (self.x,self.y);", "def coordinate(self) -> Tuple[float, float]:\n return self.lat, self.lon", "def get_pos(self):\n return (self.x, self.y)", "def coords(self) -> Tuple[float, float]:\n return self.lat, self.lng", "def coords2D(self):\n return (self.x, self.y)", "def get(self):\n return self.x, self.y", "def get_position(self):\n return (self.x_pos, self.y_pos)", "def position(self):\n return self.x, self.y", "def point(self):\n return self.x, self.y, self.z", "def position(self):\n return self._x, self._y", "def xy(self):\n return self.coords.xy", "def as_point(self):\n return round(self.x), round(self.y)", "def coords(self):\n return (self.x, self.y, self.z)", "def coords(self):\n return (self.x, self.y, self.z)", "def get_values(self):\n return (self.x,self.y)", "def get_location(self):\r\n return self.__x, self.__y", "def tuple(self) -> Tuple[float, float]:\n return (self.latitude, self.longitude)", "def coordinates(self):\n return self.xy", "def coordinates(self):\n return np.array([self.x, self.y])", "def coordinate(self):\n\t\tif self.boldness_coord is None and self.price_coord is None and self.hold_coord is None:\n\t\t\treturn None\n\n\t\treturn (self.boldness_coord, self.price_coord, self.hold_coord)", "def coordinates(self):\n return self.latitude, self.longitude", "def getCoord(self, i):\n _x = self.__xpts[i]\n _y = self.__ypts[i]\n return _x, _y", "def getBallPos(self) -> (int,int):\n return self.x, self.y", "def get(self):\r\n return ((self.x, self.y), self.dir)", "def getMousePosition(self):\n return (self.mouseData.x, self.mouseData.y)" ]
[ "0.8953007", "0.8937962", "0.86783415", "0.86111736", "0.8321993", "0.82544005", "0.82386565", "0.82255226", "0.8197248", "0.80881447", "0.8072225", "0.79811084", "0.7958825", "0.7913098", "0.79067475", "0.79028577", "0.7887499", "0.7886007", "0.7886007", "0.78790206", "0.78716904", "0.7867114", "0.78595066", "0.7847862", "0.7783251", "0.7760666", "0.77426475", "0.76673245", "0.75867397", "0.75201994" ]
0.90129477
0
An iterator over x and y coordinates.
def __iter__(self) -> Iterable[Tuple[float, float]]: return iter([self.x, self.y])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __iter__(self):\n pt = (self.x, self.y)\n for i in pt:\n yield i", "def iter_coordinates(self):\n for coord in self.position:\n yield coord", "def __iter__(self):\n yield self._x\n yield self._y", "def __iter__(self):\n yield self.x\n yield self.y\n # Or, you could also do:\n # return iter([self.x, self.y])", "def __iter__(self):\n for coord in self.position:\n yield coord", "def iter_coords():\n yield (0, 0)\n incr = 0\n x = 1\n y = 0\n\n while True:\n incr += 2\n\n top = y + incr - 1\n bot = y - 1\n left = x - incr\n right = x\n\n yield (x, y)\n while y < top:\n y += 1\n yield (x, y)\n\n while x > left:\n x -= 1\n yield (x, y)\n\n while y > bot:\n y -= 1\n yield (x, y)\n\n while x < right:\n x += 1\n yield (x, y)\n\n x += 1", "def iter_points(self):\n for x in range(self.left, self.right + 1):\n for y in range(self.top, self.bottom + 1):\n yield Point(x, y)", "def __iter__(self):\n return self.coords.__iter__()", "def index_iterator((x_min, x_max, y_min, y_max)):\n for row in xrange(y_min, y_max):\n for col in xrange(x_min, x_max):\n yield (row, col)", "def __iter__(self) -> Iterator[Position]:\n for row in range(self.width):\n for col in range(self.height):\n yield (row, col)", "def __iter__(self):\n for y in range(self.origin.y, self.origin.y + self.size.y):\n for x in range(self.origin.x, self.origin.x + self.size.x):\n yield Vec2(x, y)", "def __iter__(self):\n for label, coord_seq in self.coords.items():\n for coordinate in coord_seq:\n yield (label, tuple(coordinate),)", "def iter_neighbors(x: int, y: int) -> t.Generator[COORDINATE, None, None]:\n yield x - 1, y\n yield x + 1, y\n yield x, y - 1\n yield x, y + 1", "def __iter__(self):\n for point in self.points:\n yield point", "def __iter__(self):\n for idx in range(0, self.Npoints):\n position = self.start + (self.end-self.start)/self.Npoints*idx\n yield position\n raise StopIteration()", "def __iter__(self):\n return iter([self.min_x_box, self.max_x_box, self.min_y_box, self.max_y_box])", "def iter_positions(self):\n for loc in self.iter_locations():\n yield loc.position", "def __iter__(self):\n return self.points.__iter__()", "def read_coords(self, coords: np.ndarray) -> Iterator[XData]:\n indexes = np.vstack([\n world_to_image(coords[:, 0], self.meta.image.x_coordinates),\n world_to_image(coords[:, 1], self.meta.image.y_coordinates),\n ]).T\n return self.read_ix(indexes)", "def data(self) -> Generator[Tuple[int, int], None, None]:\n x = self.x_start\n y = self.y_start\n yield x, y\n while True:\n x += self.x_rate\n y += self.y_rate\n yield int(x), int(y)", "def __iter__(self):\n for y, row in enumerate(self.rows):\n for x, char in enumerate(row):\n pos = Position(x, y)\n yield pos, char", "def __iter__(self):\n width = self.GetWidth()\n height = self.GetHeight()\n pixels = self.GetPixels()\n \n\n\n\n class PixelFacade(object):\n def Get(self):\n return pixels.Get()\n def Set(self, *args, **kw):\n return pixels.Set(*args, **kw)\n def __str__(self):\n return str(self.Get())\n def __repr__(self):\n return 'pixel(%d,%d): %s' % (x,y,self.Get())\n X = property(lambda self: x)\n Y = property(lambda self: y)\n \n pf = PixelFacade() \n for y in xrange(height):\n pixels.MoveTo(self, 0, y)\n for x in xrange(width):\n\n\n\n yield pf \n pixels.nextPixel()", "def __iter__(self):\n width = self.GetWidth()\n height = self.GetHeight()\n pixels = self.GetPixels()\n \n\n\n\n class PixelFacade(object):\n def Get(self):\n return pixels.Get()\n def Set(self, *args, **kw):\n return pixels.Set(*args, **kw)\n def __str__(self):\n return str(self.Get())\n def __repr__(self):\n return 'pixel(%d,%d): %s' % (x,y,self.Get())\n X = property(lambda self: x)\n Y = property(lambda self: y)\n \n pf = PixelFacade() \n for y in xrange(height):\n pixels.MoveTo(self, 0, y)\n for x in xrange(width):\n\n\n\n yield pf \n pixels.nextPixel()", "def ifind_at(self, x, y):\n for sym in self.itersymbols():\n bx0,by0,bx1,by1 = sym.sym.bbox()\n if bx0 <= x <= bx1 and by0 <= y <= by1:\n yield sym.sym", "def getCoord(self, i):\n _x = self.__xpts[i]\n _y = self.__ypts[i]\n return _x, _y", "def __iter__(self):\n for p in self.positions(): # use same order as positons()\n yield p.element() # but yield each element", "def __iter__(self):\n return iter((self.r, self.g, self.b))", "def __iter__(self) -> Iterable[Union[Point, LabwareLike]]:\n return iter(\n (\n self._point,\n self._labware,\n )\n )", "def __iter__(self) -> Iterator[Number]:\n return (getattr(self, p) for p in ['xmin', 'ymin', 'xmax', 'ymax'])", "def __iter__(self):\n return ((x, y) for y, x in self._items.items())" ]
[ "0.8114274", "0.7996599", "0.76811117", "0.7629678", "0.74141836", "0.7341875", "0.727359", "0.7255214", "0.724903", "0.7178277", "0.7174249", "0.704348", "0.70082766", "0.69353855", "0.6815294", "0.68150526", "0.66570723", "0.65559846", "0.65280896", "0.6511601", "0.6490005", "0.64852226", "0.64852226", "0.6421416", "0.6405176", "0.6366063", "0.63652474", "0.63309914", "0.6327806", "0.6311195" ]
0.80211693
1
Raise each coordinate by `exponent` and return a new Point.
def __pow__(self, exponent: float) -> PointType: return Point(self.x ** exponent, self.y ** exponent)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __ipow__(self, exponent: float) -> PointType:\n self.x **= exponent\n self.y **= exponent\n return self", "def __pow__(self, exponent):\n return type(self)(self.parent(),\n self._simplify(pow(self._express, exponent)))", "def __pow__(self, exponent):\n return self.runtime.pow(self, exponent)", "def power(base, exponent):\n return base ** exponent", "def __pow__(self, exponent: int):\n\t\tif exponent < 0:\n\t\t\traise ValueError(\"Negative powers not supported\")\n\t\telif exponent == 0:\n\t\t\treturn SquareMatrix(self._rows, 1)\n\t\telse:\n\t\t\tres = self\n\t\t\tfor i in range(1, exponent):\n\t\t\t\tres *= self\n\t\t\treturn res", "def raise_iter(number, exponent):\n prod = 1\n for i in range(exponent, 0, -1):\n prod *= number\n return prod", "def power(self, exponent: float):\n theta, phi = self.params\n return RGate(exponent * theta, phi)", "def power(num, exponent):\n return num ** exponent", "def truncated_power(self, exponent, degs = None):\n f = self.to_Poly()\n return f.truncated_power(\n exponent = exponent, degs = degs\n ).to_TaylorGrid(self.params)", "def exponent(num,power=2):\n return num ** power", "def __pow__(self, exponent):\n return Quantity(pow(self._value, exponent), pow(self.unit, exponent))", "def setExponent(self, *args):\n return _libsbml.Unit_setExponent(self, *args)", "def power(num, exponent):\n power = num ** exponent\n return power", "def __pow__(self, other) -> 'MultiVector':\n\n if not isinstance(other, (int, float)):\n raise ValueError(\"exponent must be a Python int or float\")\n\n if abs(round(other) - other) > _eps:\n raise ValueError(\"exponent must have no fractional part\")\n\n other = int(round(other))\n\n if other == 0:\n unit_out = self._newMV(dtype=self.value.dtype) + 1\n return unit_out\n\n newMV = self._newMV(np.array(self.value)) # copy\n\n for i in range(1, other):\n newMV = newMV * self\n\n return newMV", "def addExponent(self):\n\t\t# if the exponent part is not set and this number is allowed an exponent\n\t\tif(self.exponent == None and self.allowExponent):\n\t\t\t# set the exponent to another number (disallowing exponents since we can't\n\t\t\t# have an exponent with an exponent\n\t\t\tself.exponent = Number(allowExponent = False)", "def __mul__(self, factor: float) -> Point:\n print(\"__mul__ was called\")\n return Point(self.x * factor, self.y * factor)", "def __pow__(self, exponent, modulus=None):\n raise NotImplementedError", "def power(self,p):\r\n\t\t\r\n\t\t# raise to power\r\n\t\tr,o = Li._expand(self,p)\r\n\t\t\r\n\t\treturn Li(r)", "def calculate_exponent():\n pass", "def exponential(self,*datas):\n\t\tdatas = list(datas)\n\t\tresult = datas.pop(0)\n\t\tfor data in datas:\n\t\t\tresult **= data\n\n\t\treturn result", "def expexp(x,y,z,p):\n\n \"\"\"\n Fermat's little theorem can be exploited to handle large values.\n This theorem states that:\n (a^p) is equivalent to (a mod p)\n This is the same as:\n (a^(p - 1)) is equivalent to (1 mod p)\n Thus, modular exponentiation can be done with (p - 1) to get\n (y^z mod (p - 1)), which is stored as b.\n For each test, the b values are:\n Test 1: b = 0\n Test 2: b = 4\n Test 3: b = 72\n Test 4: b = 72\n As shown, these values are much smaller to handle. Now, \n perform modular exponentiation again, this time with (p),\n to get (x^(y^z) mod p), store as a, and return.\n For each test, the a values are:\n Test 1: a = 1\n Test 2: a = 16\n Test 3: a = 1\n Test 4: a = 4\n Each return value matches the expected values in the test,\n therefore the algorithm is correct.\n \"\"\"\n b = pow(y, z, p - 1)\n a = pow(x, b, p)\n return a", "def power(base, exp):\n base_v, base_d = Tensor.get_value_and_deriv(base)\n exp_v, exp_d = Tensor.get_value_and_deriv(exp)\n\n result = base_v ** exp_v\n a = base_d.mul(exp_v * base_v ** (exp_v - 1.0))\n b = exp_d.mul(result * np.log(base_v))\n return Tensor(result, a + b)", "def raises(number, exponent):\n if exponent == 0:\n return 1\n else:\n return number * raises(number, exponent - 1)", "def __pow__(self, exp):\n # We have (p o Q)^e = p^e o Q\n coeff = (self._unit_simplex_polynomial**exp).coeff\n if isinstance(exp, numbers.Integral):\n r = self.degree() * exp\n else:\n r = 0\n for i in range(len(exp)):\n r += self[i].degree() * exp[i]\n return PolynomialBernsteinSimplex(coeff, self.vertices, r)", "def exponential(base, multiplier, limit):\n def func():\n if base < 0:\n raise ValueError('base must be non-negative')\n\n if multiplier < 0:\n raise ValueError('multiplier must be non-negative')\n\n if limit < 0:\n raise ValueError('limit must be non-negative')\n\n delay = base\n for exp in range(limit):\n yield delay**exp * multiplier\n\n return func", "def adjust_E(self, power):\n if self.pwr_in_tot:\n fac = power/self.pwr_in_tot\n else:\n fac = 1.0\n fac = min(1.01, fac)\n self.Ey = sqrt(fac)*self.Ey", "def spread(self, n=2):\n for point in self.points:\n point *= n", "def exp(x):\n raise NotImplementedError", "def truncated_power(self, exponent, degs = None):\n a = exponent\n return self.truncated_fun(\n fun_der = lambda k, t: binom(a, k) * factorial(k) * t**(a-k),\n degs = degs\n )", "def pow(space, w_base, w_exponent, w_modulus):\n return space.pow(w_base, w_exponent, w_modulus)" ]
[ "0.7288927", "0.61586154", "0.61224526", "0.6076266", "0.60731536", "0.60344225", "0.60247", "0.5891363", "0.58524084", "0.5728584", "0.57192135", "0.5697429", "0.56703067", "0.562234", "0.5572597", "0.5537855", "0.5530972", "0.55304945", "0.5511318", "0.5456827", "0.5404782", "0.5372317", "0.5365125", "0.53639895", "0.5286638", "0.52865803", "0.5279254", "0.5274439", "0.5261529", "0.5252982" ]
0.73620385
0
Raise each coordinate by `exponent` inplace and return self.
def __ipow__(self, exponent: float) -> PointType: self.x **= exponent self.y **= exponent return self
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __pow__(self, exponent):\n return type(self)(self.parent(),\n self._simplify(pow(self._express, exponent)))", "def __pow__(self, exponent: int):\n\t\tif exponent < 0:\n\t\t\traise ValueError(\"Negative powers not supported\")\n\t\telif exponent == 0:\n\t\t\treturn SquareMatrix(self._rows, 1)\n\t\telse:\n\t\t\tres = self\n\t\t\tfor i in range(1, exponent):\n\t\t\t\tres *= self\n\t\t\treturn res", "def __pow__(self, exponent):\n return self.runtime.pow(self, exponent)", "def __pow__(self, exponent):\n return Quantity(pow(self._value, exponent), pow(self.unit, exponent))", "def raise_iter(number, exponent):\n prod = 1\n for i in range(exponent, 0, -1):\n prod *= number\n return prod", "def __pow__(self, other) -> 'MultiVector':\n\n if not isinstance(other, (int, float)):\n raise ValueError(\"exponent must be a Python int or float\")\n\n if abs(round(other) - other) > _eps:\n raise ValueError(\"exponent must have no fractional part\")\n\n other = int(round(other))\n\n if other == 0:\n unit_out = self._newMV(dtype=self.value.dtype) + 1\n return unit_out\n\n newMV = self._newMV(np.array(self.value)) # copy\n\n for i in range(1, other):\n newMV = newMV * self\n\n return newMV", "def power(self, exponent: float):\n theta, phi = self.params\n return RGate(exponent * theta, phi)", "def setExponent(self, *args):\n return _libsbml.Unit_setExponent(self, *args)", "def truncated_power(self, exponent, degs = None):\n f = self.to_Poly()\n return f.truncated_power(\n exponent = exponent, degs = degs\n ).to_TaylorGrid(self.params)", "def __pow__(self, exponent: float) -> PointType:\n return Point(self.x ** exponent, self.y ** exponent)", "def __rpow__(self, other) -> 'MultiVector':\n\n # Let math.log() check that other is a Python number, not something\n # else.\n\n # pow(x, y) == exp(y * log(x))\n newMV = general_exp(math.log(other) * self)\n\n return newMV", "def addExponent(self):\n\t\t# if the exponent part is not set and this number is allowed an exponent\n\t\tif(self.exponent == None and self.allowExponent):\n\t\t\t# set the exponent to another number (disallowing exponents since we can't\n\t\t\t# have an exponent with an exponent\n\t\t\tself.exponent = Number(allowExponent = False)", "def exp(self):\n return type(self)(self.parent(), self._simplify(self._express.exp()))", "def __rpow__(self, other):\n if not (isNumeric(other) or isinstance(other, Expression)):\n raise excep.biogemeError(\n f'This is not a valid expression: {other}'\n )\n return Power(other, self)", "def raises(number, exponent):\n if exponent == 0:\n return 1\n else:\n return number * raises(number, exponent - 1)", "def power(self,p):\r\n\t\t\r\n\t\t# raise to power\r\n\t\tr,o = Li._expand(self,p)\r\n\t\t\r\n\t\treturn Li(r)", "def __imul__(self, x):\n ls=len(self)\n for i in self.desc():\n for j in range(ls):\n self.g_val(self.val(i,j)*x,i,j)\n return self", "def __pow__(self, power):\n value = power * (self.val) ** (power - 1)\n der = {k: value * v for k, v in self.der.items()}\n return AutoDiffReverse(self.val ** power, None, der)", "def exp(self):\n return Factor().__build( VarSet(self.v) , np.exp(self.t) )", "def exponent(self):\n return self.__exponent", "def __pow__(self, exponent, modulus=None):\n raise NotImplementedError", "def exponent(self, exponent):\n if self.local_vars_configuration.client_side_validation and exponent is None: # noqa: E501\n raise ValueError(\"Invalid value for `exponent`, must not be `None`\") # noqa: E501\n\n self._exponent = exponent", "def calculate_exponent():\n pass", "def power(base, exponent):\n return base ** exponent", "def power(num, exponent):\n return num ** exponent", "def __pow__(self,power):\n return Factor().__build( VarSet(self.v) , np.power(self.t,power) )", "def adjust_E(self, power):\n if self.pwr_in_tot:\n fac = power/self.pwr_in_tot\n else:\n fac = 1.0\n fac = min(1.01, fac)\n self.Ey = sqrt(fac)*self.Ey", "def _mask_for_values_on_exponent_limit(self):\n lower_exponent = self.exponent.equality_mask(self.exp_lower)\n upper_exponent = self.exponent.equality_mask(self.exp_upper)\n\n mantissa_lower_limit = self.mantissa.get_lower_limit_mask(self.mantissa_lower)\n mantissa_upper_limit = self.mantissa.get_upper_limit_mask(self.mantissa_upper)\n\n lower_limit = logical_and_on_list_of_masks(\n [lower_exponent, mantissa_lower_limit])\n upper_limit = logical_and_on_list_of_masks(\n [upper_exponent, mantissa_upper_limit])\n return self._combine_limit_masks(lower_limit, upper_limit)", "def exp(self, X, U):\n raise NotImplementedError", "def __rpow__(self, power):\n value = power ** self.val\n der = {k: value * v * np.log(power) for k, v in self.der.items()}\n return AutoDiffReverse(value, None, der)" ]
[ "0.68471605", "0.68286675", "0.6560746", "0.63714254", "0.6354218", "0.63479114", "0.6300082", "0.6216885", "0.6170165", "0.60661626", "0.5901398", "0.58412004", "0.58182156", "0.57910985", "0.5763578", "0.5707981", "0.56934494", "0.5692054", "0.5682847", "0.5680657", "0.567733", "0.56364536", "0.5636195", "0.56345326", "0.5611753", "0.5588384", "0.55763364", "0.55704737", "0.55625653", "0.55522794" ]
0.6919949
0
Apply absolute value to each coordinate and return a new Point.
def __abs__(self) -> PointType: return Point(abs(self.x), abs(self.y))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_absolute(self, points):\r\n\r\n # remember if we got a list\r\n is_list = isinstance(points, list)\r\n\r\n points = ensure_numeric(points, num.float)\r\n if len(points.shape) == 1:\r\n # One point has been passed\r\n msg = 'Single point must have two elements'\r\n if not len(points) == 2:\r\n raise ShapeError, msg \r\n\r\n\r\n msg = 'Input must be an N x 2 array or list of (x,y) values. '\r\n msg += 'I got an %d x %d array' %points.shape \r\n if not points.shape[1] == 2:\r\n raise ShapeError, msg \r\n \r\n \r\n # Add geo ref to points\r\n if not self.is_absolute():\r\n points = copy.copy(points) # Don't destroy input \r\n points[:,0] += self.xllcorner \r\n points[:,1] += self.yllcorner\r\n\r\n \r\n if is_list:\r\n points = points.tolist()\r\n \r\n return points", "def __abs__(self):\n v = zeros_como(self)\n\n for i in range(self.n):\n v[i] = abs(self[i])\n\n return v", "def coordinates_abs(self, source):\n if not hasattr(self, 'azimuth'):\n return self.center_abs(source)\n else:\n return (*self.center_abs(source), self.azimuth, self.elevation)", "def __abs__(self):\n retval = self.copy()\n retval._val = abs(retval._val)\n return retval", "def _from_pixels_abs(self, point):\n point = self.resolution.from_pixels(point)\n self.max_x = max(self.max_x, point[0])\n self.max_y = max(self.max_y, point[1])\n self.min_x = min(self.min_x, point[0])\n self.min_y = min(self.min_y, point[1])\n return point", "def get_adjusted_points(self) -> Sequence[Point]:\n if not self._adjusted_cache_dirty:\n return self._adjusted_points # type: ignore\n\n def _adjust_point(point) -> Point:\n x, y = point\n\n x *= self.scale[0]\n y *= self.scale[1]\n\n return (x + self.position[0], y + self.position[1])\n\n self._adjusted_points = [_adjust_point(point) for point in self.points]\n self._adjusted_cache_dirty = False\n return self._adjusted_points # type: ignore [return-value]", "def absolute_value(x):\n x_star = x.clone()\n x_star[1] *= -1\n return elementwise_mult(x, x_star)[0].sqrt_()", "def handle_absolute(self, event):\n point = self._get_absolute(event)\n x_pos = round(point.x)\n y_pos = round(point.y)\n x_event, y_event = self.emulate_abs(x_pos, y_pos, self.timeval)\n self.events.append(x_event)\n self.events.append(y_event)", "def abs(self, a):\n return abs(a)", "def __abs__(self):\n return Vector.createFromPoint(self).norm", "def getAbsCoords( self, x=None, y=None ):\n\n if x is None:\n x = self.x\n if y is None:\n y = self.y\n\n p = self.parent\n\n absX = 0\n absY = 0\n\n while( p != None ):\n absX += p.x\n absY += p.y\n\n p = p.parent\n\n absX += x\n absY += y\n\n return absX, absY", "def getPoints(self,currPt,xform):\n if self.isAbs:\n # Absolute values.\n newPts = []\n for pt in self.points:\n x,y = xform.transformPoint(pt[0],\n pt[1])\n newPts.append(Point(x,y))\n\n return newPts\n else:\n # Relative points, offset with the currPt\n pts = []\n for pt in self.points:\n sx,sy = xform.scalePoint(pt[0],\n pt[1])\n x = sx + currPt[0]\n y = sy + currPt[1]\n pts.append(Point(x,y))\n\n return pts", "def abs(f):\n return f.per(dmp_abs(f.rep, f.lev, f.dom))", "def __abs__(self):\n out = self.copy()\n out.addFunction(Query.Function.Abs)\n return out", "def handle_absolute(self, event):\n (x_val, y_val) = self._get_absolute(event)\n x_event, y_event = self.emulate_abs(\n int(x_val),\n int(y_val),\n self.timeval)\n self.events.append(x_event)\n self.events.append(y_event)", "def toabs(self, value, isworld=-1):\n return _coordsys.coordsys_toabs(self, value, isworld)", "def absolute_values( values ):\n absVal = []\n for val in values:\n absVal.append( abs(val))\n\n return absVal", "def l1(self, points):\n new_points = []\n sum = []\n for point in points:\n for i in range(len(point.coordinates)):\n if (i < len(sum)):\n sum[i] += abs(point.coordinates[i])\n else:\n sum.append(abs(point.coordinates[i]))\n for point in points:\n new_coordinates = point.coordinates\n new_coordinates = [(new_coordinates[i]/ sum[i]) for i in range(len(point.coordinates))]\n new_points.append(Point(point.name, new_coordinates, point.label))\n return new_points", "def scalar_abs(self, dst, src):\n return self._scalar_single_func('abs', dst, src)", "def copy_abs(self):\r\n return (self._real.fma(self._real, self._imag*self._imag)).sqrt()", "def absoluted(values):\n values = _normalize(values)\n for v in values:\n yield abs(v)", "def make_abs_geo(self, parent=None):\n self.abs_geo = RapidPos(self.rot_sca_abs(parent=parent))", "def absolute(requestContext, seriesList):\n for series in seriesList:\n series.name = \"absolute(%s)\" % (series.name)\n series.pathExpression = series.name\n for i,value in enumerate(series):\n series[i] = safeAbs(value)\n return seriesList", "def __abs__(self):\n\t\tval = abs(self.val)\n\t\tif 0 in self.val:\n\t\t\traise ValueError(\"Absolute value is not differentiable at 0.\")\n\n\t\tder_copy = np.copy(self.der)\n\t\tif len(der_copy.shape):\n\t\t\tfor i, val_i in enumerate(self.val):\n\t\t\t\tif val_i < 0:\n\t\t\t\t\tder_copy[i] = -1 * der_copy[i]\n\t\treturn Var(val, der_copy)", "def abs__inplace(a):", "def handle_abs(self):\n # pylint: disable=no-member\n x_raw = self.microbit.accelerometer.get_x()\n y_raw = self.microbit.accelerometer.get_y()\n x_abs = ('Absolute', 0x00, x_raw)\n y_abs = ('Absolute', 0x01, y_raw)\n return x_abs, y_abs", "def abs_(a):", "def applyToPoints(self, points):\n return [point + self for point in points]", "def absolute_to_relative(self, x, y):\n rel_x = (x - self.width / 2) / (self.width / 2)\n if rel_x > 1:\n rel_x = 1\n elif rel_x < -1:\n rel_x = -1\n\n rel_y = (self.height / 2 - y) / (self.height / 2)\n if rel_y > 1:\n rel_y = 1\n elif rel_y < -1:\n rel_y = -1\n\n return rel_x, rel_y", "def get_relative(self, points):\r\n\r\n # remember if we got a list\r\n is_list = isinstance(points, list)\r\n\r\n points = ensure_numeric(points, num.float)\r\n if len(points.shape) == 1:\r\n #One point has been passed\r\n msg = 'Single point must have two elements'\r\n if not len(points) == 2:\r\n raise ShapeError, msg \r\n\r\n if not points.shape[1] == 2:\r\n msg = ('Input must be an N x 2 array or list of (x,y) values. '\r\n 'I got an %d x %d array' % points.shape)\r\n raise ShapeError, msg \r\n\r\n # Subtract geo ref from points\r\n if not self.is_absolute():\r\n points = copy.copy(points) # Don't destroy input \r\n points[:,0] -= self.xllcorner \r\n points[:,1] -= self.yllcorner\r\n\r\n if is_list:\r\n points = points.tolist()\r\n \r\n return points" ]
[ "0.6606349", "0.6376209", "0.6323174", "0.61907077", "0.616326", "0.61606514", "0.61422604", "0.61412853", "0.606106", "0.6055513", "0.605538", "0.6054866", "0.6054316", "0.6041184", "0.6007135", "0.59953696", "0.59925735", "0.59842604", "0.59756476", "0.59686637", "0.59338707", "0.5908239", "0.5875841", "0.58433515", "0.58400977", "0.5837745", "0.5796097", "0.5777352", "0.5767015", "0.5757341" ]
0.6905627
0
Inverts each coordinate and return a new Point.
def __invert__(self) -> PointType: return Point(~self.x, ~self.y)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __invert__(self):\n \n return Vector(-self.y, self.x)", "def inverse(self):\n def inv(v):\n v[0], v[1] = v[1] , v[0]\n for v in [self.point1 , self.pointN , self.unitv, self.normalv]:\n inv(v)\n\n self.points = numpy.roll(self.points,1,axis=1)\n self.a, self.b = self.b, self.a\n self.angle = numpy.arccos( self.unitv[0] )*numpy.sign(self.unitv[1] )\n return", "def invert(self):\n self.vertices.reverse()", "def inverse(self, x, y):", "def reverse(self):\n x = self._x * -1\n y = self._y * -1\n return Point(x,y)", "def __invert__(self):\n a = self.angle\n x, y = Vector.cartesian([1, a])\n return Vector(x, y)", "def inverse(self):\n cdef StdVectorFst result = self.copy()\n result.invert()\n return result", "def inverseCoordinates(coords):\n newlist = []\n if isPoint(coords):\n return [coords[1], coords[0]]\n elif not isinstance(coords, list) and not isinstance(coords, tuple):\n raise ValueError('coordinates to inverse must be minimum a point')\n for i, it in enumerate(coords):\n p = isPoint(it)\n if not p and (isinstance(it, list) or isinstance(it, tuple)):\n newlist.append(inverseCoordinates(it))\n else:\n newp = [it[1],it[0]]\n newlist.append(newp)\n return newlist", "def invert(self):\n tmp = self.pvt\n self.pvt = self.nvt\n self.nvt = tmp\n tmp = self.pFace\n self.pFace = self.nFace\n self.nFace = tmp", "def inverse(self, point):\n raise NotImplementedError('The Lie group inverse is not implemented.')", "def invert(self):\n self._c = ~self._c", "def inverse(self):\n data = np.linalg.inv(self._data)\n return self.create(self.rows, self.cols, data)", "def inverted( self ):\n return self._modifier(\n self,\n lambda x: invert_bits( x, self.nr_of_pins )\n )", "def _inv(self) -> None:\n\n self.inv(inplace=True)", "def __invert__(self):\n return self.inverse()", "def invert( self ) :\n\n series_ = self.copy( )\n for l in xrange( 1, len( series_ ), 2 ) : series_.coefficients[l] *= -1\n return( series_ )", "def inverse(self):\n return self.invert()", "def inv(self):\n\n self.x, self.y = self.y, self.x\n self._x_, self._y_ = self._y_, self._x_\n self.xfac, self.yfac = 1 / self.yfac, 1 / self.xfac\n self._xfac_, self._yfac_ = 1 / self._yfac_, 1 / self._xfac_\n self._u = 1 / self._u.conj()", "def inverse(self):\n return ~self", "def invert_inplace(a):", "def invert(self):\n raise NotImplementedError()", "def inv_inplace(a):", "def inverse_transform(self, X, copy=...):\n ...", "def inv(self):\n return self.conjugate()", "def __neg__(self):\n return self.from_points(-v for v in self._vectors)", "def Inverted(self):\n return self._CreateTransformed(self._filtered_symbols,\n filtered_symbols=self._symbols,\n section_name=SECTION_MULTIPLE)", "def __invert(self, args):", "def invert_y(self):\n return Position(x_or_tuple=self.x, y=self.y * -1)", "def inverse_transform(self, X):\n\n pass # pragma: no cover", "def inv(self):\n self.inverse = not self._inverse\n return self" ]
[ "0.6987597", "0.6770105", "0.6633013", "0.6607589", "0.6598548", "0.6497842", "0.6492611", "0.643507", "0.6412205", "0.6395882", "0.6278318", "0.62737286", "0.6256399", "0.62236995", "0.6181065", "0.617367", "0.6169034", "0.6162642", "0.61406845", "0.6122141", "0.6090737", "0.6033205", "0.6022788", "0.5995691", "0.59793025", "0.5949103", "0.59478986", "0.59401834", "0.5884361", "0.58603144" ]
0.7646539
0
Return the floating point squared distance between self and other. If other is not given, the squared distance from self to the origin is returned.
def distance_squared(self, other: PointOrIterable = None) -> float: return sum((((other or Point()) - self) ** 2))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def squaredDistanceTo(self,other):\n if not isinstance(other,Point):\n return \n return (self.longitude - other.getLongitude())**2 +(self.latitude - other.getLatitude())**2", "def distance(self, other: PointOrIterable = None) -> float:\n return (self.distance_squared(other or Point())) ** 0.5", "def get_dist_sqrd(self, other):\n return (self.x - other[0])**2 + (self.y - other[1])**2", "def distance(self, other):\n x_diff_sq = (self.x-other.x)**2\n y_diff_sq = (self.y-other.y)**2\n return (x_diff_sq + y_diff_sq)**0.5", "def distance(self, other):\n xd, yd = self.x-other.x, self.y-other.y\n return math.sqrt(xd**2 + yd**2)", "def distance(self, other):\n dx = self.x - other.x\n dy = self.y - other.y\n return math.sqrt(dx*dx + dy*dy)", "def distance(self, other):\n return math.sqrt((self.x - other.x)**2 + (self.y - other.y)**2)", "def dist(self, other: \"Vector\", sqr=False) -> float: #distance between 2 vectors\n if sqr:\n return (self-other).sqr_mag()\n return (self-other).mag()", "def distance_sq(self, other_vector):\n return sum((x - y) ** 2 for x, y in zip(self.vector, other_vector))", "def distance(self, other: \"Point\") -> float:\n if not isinstance(other, self.__class__):\n raise TypeError(\"Expected `other` to be an instance of `{}`\"\\\n .format(self.__class__))\n dx = self.x - other.x\n dy = self.y - other.y\n return sqrt((dx ** 2) + (dy ** 2))", "def distance_to(self, other):\n p_self, p_other = self.closest_points(other)\n return np.linalg.norm(p_self - p_other)", "def distance_to(self, other):\n dx = other.x - self.x\n dy = other.y - self.y\n return math.sqrt(dx ** 2 + dy ** 2)", "def distanceTo(self, other):\n result = (other._x - self._x) * (other._x - self._x) \\\n + (other._y - self._y) * (other._y - self._y)\n return result ** 0.5", "def distance(self,other):\n return math.sqrt((self.x - other.x)**2 +(self.y - other.y)**2)", "def distance_to(self, other):\n x0,y0 = self.x, self.y\n x1,y1 = other.x, other.y\n dist = math.sqrt((x1-x0) ** 2 + (y1-y0) ** 2)\n return int(dist)", "def get_distance(self, other):\n return math.sqrt((self.x - other[0])**2 + (self.y - other[1])**2)", "def calculate_distance(self, other):\n return math.sqrt((self.center[0] - other.center[0]) ** 2 + (self.center[1] - other.center[1]) ** 2)", "def dist(self, other):\n return math.sqrt((self.x - other.x)**2 +\n (self.y - other.y)**2 +\n (self.z - other.z)**2)", "def distance_to(self, other):\n if type(other) == GeoPoint:\n other = other.to_cartesian()\n d0 = self.x - other.x\n d1 = self.y - other.y\n d2 = self.z - other.z\n\n return math.sqrt(d0 * d0 + d1 * d1 + d2 * d2)", "def distance(self, other):\n\n return hypot(self.x - other.x, self.y - other.y)", "def distance(self, other):\n return float(abs(self.x - other.x) + abs(self.y - other.y))", "def distance(self, other):\n x, y, z = (self.x-other.x), (self.y-other.y), (self.z-other.z)\n return math.sqrt(x**2 + y**2 + z**2)", "def distance(self, other_vector):\n return self.distance_sq(other_vector) ** 0.5", "def distance(self, other):\n # only used in triangle.__str__\n return hypot(self.x - other.x, self.y - other.y)", "def distance(self, other):\n # distance = math.sqrt((self.position.x - other.position.x) ** 2 +\n # (self.position.y - other.position.y) ** 2)\n distance = math.sqrt(sum((self.position - other.position) ** 2))\n return distance", "def distance_to(self, other: Geometry[Scalar]) -> Scalar:\n return (self._distance_to_point(other)\n if isinstance(other, Point)\n else (non_negative_min(self._distance_to_point(point)\n for point in other.points)\n if isinstance(other, Multipoint)\n else other.distance_to(self)))", "def distanceTo(self,other):\n if not isinstance(other,Point):\n return \n return math.sqrt((self.longitude - other.getLongitude())**2 +(self.latitude - other.getLatitude())**2)", "def calculate_distance(self, other_point):\n return math.sqrt(\n (self._x - other_point._x)**2 +\n (self._y - other_point._y)**2)", "def distance_to(self, other):\n return float(sqrt(pow(self._row - other._row, 2) + pow(self._col - other._col, 2)))", "def distance2(self, other):\n # Used for distances in random triangle close to point\n return pow(self.x - other.x, 2) + pow(self.y - other.y, 2)" ]
[ "0.80755734", "0.80645823", "0.7802913", "0.78016543", "0.76762885", "0.7650648", "0.7602126", "0.7595", "0.75585", "0.7540435", "0.7521282", "0.7496888", "0.7477617", "0.74663025", "0.74459094", "0.7387185", "0.73853767", "0.7372873", "0.7359835", "0.73340523", "0.7304411", "0.72949016", "0.72771", "0.7274509", "0.7211934", "0.7167577", "0.71139157", "0.70996267", "0.706037", "0.702395" ]
0.8365737
0
Return a floating point value indicating the winding direction of the points [self, b, c]. If ccw 0, counter clockwise winding If ccw == 0, the three points are colinear
def ccw(self, b: PointOrIterable, c: PointOrIterable) -> float: try: return ((b.x - self.x) * (c.y - self.y)) - ((c.x - self.x) * (b.y - self.y)) except AttributeError: pass return ((b[0] - self.x) * (c[1] - self.y)) - ((c[0] - self.x) * (b[1] - self.y))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def wind_direction(self):\n names = ['anc_wind_direction']\n return self.sensor.get_with_fallback('wind_direction', names)", "def wind_direction(self):\n return self.flow_field.wind_direction", "def ccw(p1, p2, p3):\n return (p2[0] - p1[0])*(p3[1] - p1[1]) - (p2[1] - p1[1])*(p3[0] - p1[0])", "def direction(a:tuple, b:tuple, c:tuple)->int:\n return ((b[1] - a[1]) * (c[0] - b[0])) - ((b[0] - a[0]) * (c[1] - b[1]))", "def ccw(p1: np.ndarray, p2: np.ndarray, p3: np.ndarray) -> int:\n dx1 = p2[0] - p1[0]\n dy1 = p2[1] - p1[1]\n dx2 = p3[0] - p1[0]\n dy2 = p3[1] - p1[1]\n\n dx1dy2 = dx1 * dy2\n dy1dx2 = dy1 * dx2\n\n if dx1dy2 > dy1dx2:\n return 1\n if dx1dy2 < dy1dx2:\n return -1\n if dx1 * dx2 < 0 or dy1 * dy2 < 0:\n return -1\n if dx1 * dx1 + dy1 * dy1 < dx2 * dx2 + dy2 * dy2:\n return 1\n\n return 0", "def ccw(a, b, c):\n return (c.y - a.y) * (b.x - a.x) > (b.y - a.y) * (c.x - a.x)", "def direction(self):\n norm=math.sqrt(self.x**2 + self.y**2 + self.z**2)\n return Vector3(self.x/norm, self.y/norm, self.z/norm)", "def get_direction(self, c1, c2):\n \n if c2[0] == c1[0]+1: return NORTH\n elif c2[1] == c1[1]+1: return EAST\n elif c2[0] == c1[0]-1: return SOUTH\n elif c2[1] == c1[1]-1: return WEST\n\n raise ValueError", "def ccw(A, B, C):\n return (B.x - A.x) * (C.y - A.y) > (B.y - A.y) * (C.x - A.x)", "def is_ccw(points):\n points = np.asanyarray(points, dtype=np.float64)\n\n if (len(points.shape) != 2 or\n points.shape[1] != 2):\n raise ValueError('CCW is only defined for 2D')\n xd = np.diff(points[:, 0])\n yd = np.column_stack((\n points[:, 1],\n points[:, 1])).reshape(-1)[1:-1].reshape((-1, 2)).sum(axis=1)\n area = np.sum(xd * yd) * .5\n ccw = area < 0\n\n return ccw", "def make_shape_ccw(self):\n\n if not(self.closed):\n return\n\n # Optimization for closed shapes\n # Start value for the first sum\n\n if self.isDirectionOfGeosCCW(self.geos):\n self.reverse()\n logger.debug(self.tr(\"Had to reverse the shape to be CW\"))\n self.cw = True", "def windcal(v,u):\r\n \r\n ws = (u**2 + v**2)**0.5\r\n wd = np.arctan2(u,v)\r\n wd_ang = wd *180/np.pi\r\n wd_ang = wd_ang + 180\r\n\r\n return wd_ang,ws", "def wind_bearing(self) -> float:\r\n return self._first_timeserie[\"data\"][\"instant\"][\"details\"][\r\n \"wind_from_direction\"\r\n ]", "def calc_wdir(u,v):\n wdir = np.arctan2(u,v) * dper + 180.\n return wdir", "def covar(self):\n a, c, d, b = self.to_ccw()\n return a * d - b * c", "def dewpoint(self):\n return float(self._current_observation['dewpoint_c'])", "def computeW(self):\n E = np.where(self.v > 0, 1, -1)\n # theshold the connections to only -1,1\n binary_weights = np.where(self.c > 0, 1, self.c)\n binary_weights = np.where(binary_weights < 0, -1, binary_weights)\n W = np.sum(binary_weights * np.dot(E.reshape(-1,1), E.reshape(1,-1))) # W = C * E * E\n self.W = W\n if np.sum(binary_weights) != 0:\n self.W = self.W / np.sum(binary_weights) # W / W*\n return self.W", "def clockwise(p1, p2, p3):\n\tv1 = p2 - p1\n\tv2 = p3 - p2\n\tc = (v2.x * v1.y) - (v1.x * v2.y)\n\tif c > 0:\n\t\treturn True\n\telse:\n\t\treturn False", "def wavenumber_direction(self, x1, x2, x3, system='cartesian'):\n\n # k0 components\n (x0, y0, z0) = Point(x1, x2, x3, system).cartesian()\n psi = self.psi(x0, y0, z0, system)\n k0x = (derivative(lambda x: self.psi(x, y0, z0, system), x0)/psi).imag\n k0y = (derivative(lambda y: self.psi(x0, y, z0, system), y0)/psi).imag\n k0z = (derivative(lambda z: self.psi(x0, y0, z, system), z0)/psi).imag\n\n if (ma.isinf(k0x) is True\n or ma.isinf(k0y) is True\n or ma.isinf(k0z) is True):\n return (0, 0, 0)\n\n if (ma.isnan(k0x) is True\n or ma.isnan(k0y) is True\n or ma.isnan(k0z) is True):\n return (0, 0, 0)\n\n # normalize k0 vector\n if k0x != 0 or k0y != 0 or k0z != 0:\n k = [k0x, k0y, k0z]\n absk = np.linalg.norm(k)\n return (k0x/absk, k0y/absk, k0z/absk)\n return (0, 0, 0)", "def convertToWindDirection(wb):\n if wb >= 0 and wb < 11.25:\n return \"N\"\n elif wb >= 11.25 and wb < 33.75:\n return \"NNE\"\n elif wb >= 33.75 and wb < 56.25:\n return \"NE\"\n elif wb >= 56.25 and wb < 78.75:\n return \"ENE\"\n elif wb >= 78.75 and wb < 101.25:\n return \"E\"\n elif wb >= 101.25 and wb < 123.75:\n return \"ESE\"\n elif wb >= 123.75 and wb < 146.25:\n return \"SE\"\n elif wb >= 146.25 and wb < 168.75:\n return \"SSE\"\n elif wb >= 168.75 and wb < 191.25:\n return \"S\"\n elif wb >= 191.25 and wb < 213.75:\n return \"SSW\"\n elif wb >= 213.75 and wb < 236.25:\n return \"SW\"\n elif wb >= 236.25 and wb < 258.75:\n return \"WSW\"\n elif wb >= 258.75 and wb < 281.25:\n return \"W\"\n elif wb >= 281.25 and wb < 303.75:\n return \"WNW\"\n elif wb >= 303.75 and wb < 326.25:\n return \"NW\"\n elif wb >= 326.25 and wb < 348.75:\n return \"NNW\"\n elif wb >= 348.75 and wb < 360:\n return \"N\"\n else:\n return \"NA\"", "def direction(self):\r\n return 180 - atan2(self.x, self.y)*180/pi", "def compute_windchill(t,v):\n a = 35.74\n b = 0.6215\n c = 35.75\n d = 0.4275\n v16 = v**0.16\n wci = a+(b*t)-(c*v16)+(d*t*v16)\n return wci", "def find_upwinding_direction(self):\n self.upwinded_face_cell = []\n for cell_index in range(self.mesh.get_number_of_cells()):\n for [face_index, orientation] in zip(self.mesh.get_cell(cell_index), \n self.mesh.get_cell_normal_orientation(cell_index)):\n if orientation*self.current_velocity[face_index]>0:\n self.upwinded_face_cell.append([face_index, cell_index])\n\n ## Set fractional flow for dirichlet cells set by points\n ## based on up-winded flow direction. If the flow is out \n ## of the cell, this is done automatically in previous \n ## loops. However, for flow into the cell, we must find \n ## the saturation from the cell it points to. \n for face_index in self.mesh.get_dirichlet_pointer_faces():\n (cell_index, orientation) = self.mesh.get_dirichlet_pointer(face_index)\n if self.current_velocity[face_index]*orientation<0.:\n self.upwinded_face_cell.append([face_index, cell_index])", "def acWF(self):\n cg = self.surfaceW / self.spanW # mean geometric chord\n A = self.acW / self.cMACW\n B = 1.8 * self.fuselageDiameter * self.fuselageDiameter * self.lfn / (self.clAlphaWF * self.surfaceW * self.cMACW)\n C = 0.273 * self.fuselageDiameter * cg * (self.spanW - self.fuselageDiameter) * tan(radians(self.sweep25W))\n D = ((1 + self.taperRatioW) * (self.spanW + 2.15 * self.fuselageDiameter) * self.cMACW**2)\n return (A - B + C / D) * self.cMACW", "def counterclockwise(self, p1, p2, p3):\n return self.cross(Point(p2.x - p1.x, p2.y - p1.y), Point(p3.x - p1.x, p3.y - p1.y))", "def _find_wing_coord(self):\n frac = 0.1\n r = 0.5\n sin45 = np.sin(np.pi / 4.)\n\n if self.out == True:\n d = r - frac * sin45\n elif self.out == False:\n d = r + frac * sin45\n else:\n raise TypeError(\"arg: out must be True or False\")\n\n a = np.sqrt(frac**2 * sin45**2 + d**2)\n alpha = np.arccos(d / a)\n return [a, alpha]", "def direction(self):\n return atan2d(self.y, self.x)", "def is_ccw(self, b: PointOrIterable, c: PointOrIterable) -> bool:\n result = self.ccw(b, c)\n if result == 0:\n raise ColinearPoints(self, b, c)\n return result > 0", "def compute_direction(self, feats):\n if feats.name == \"ARNC\":\n if feats[\"z-score\"] < -1.5:\n return Directions.long_dir\n elif feats[\"z-score\"] > 1.5:\n return Directions.short_dir\n elif feats.name == \"UNG\":\n if feats[\"z-score\"] < -1.5:\n return Directions.short_dir\n elif feats[\"z-score\"] > 1.5:\n return Directions.long_dir", "def is_ccw(point_a, point_b, point_c):\r\n return is_on_line(point_a, point_b, point_c) > 0" ]
[ "0.62705654", "0.623689", "0.6155503", "0.60985595", "0.60472417", "0.60310966", "0.5945787", "0.5914404", "0.5833745", "0.58321136", "0.5825404", "0.5813546", "0.5805641", "0.57567066", "0.56856924", "0.56779724", "0.56537074", "0.56489015", "0.56035715", "0.56031066", "0.5591107", "0.5557685", "0.5530382", "0.5517186", "0.5507099", "0.5468569", "0.54670155", "0.54467446", "0.5429027", "0.5426494" ]
0.6929871
0
Return True if the angle [self, b, c] has counter clockwise winding, else False. Raises the exception `ColinearPoints` if the points compose a line.
def is_ccw(self, b: PointOrIterable, c: PointOrIterable) -> bool: result = self.ccw(b, c) if result == 0: raise ColinearPoints(self, b, c) return result > 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_colinear(self, b: PointType, c: PointType) -> bool:\n return self.ccw(b, c) == 0", "def is_ccw(point_a, point_b, point_c):\r\n return is_on_line(point_a, point_b, point_c) > 0", "def is_ccw(points):\n points = np.asanyarray(points, dtype=np.float64)\n\n if (len(points.shape) != 2 or\n points.shape[1] != 2):\n raise ValueError('CCW is only defined for 2D')\n xd = np.diff(points[:, 0])\n yd = np.column_stack((\n points[:, 1],\n points[:, 1])).reshape(-1)[1:-1].reshape((-1, 2)).sum(axis=1)\n area = np.sum(xd * yd) * .5\n ccw = area < 0\n\n return ccw", "def is_ccw(a, b, c):\n p = b - a\n q = c - a\n area = p.x * q.y - q.x * p.y\n\t # May want to throw an exception if area == 0\n return area > 0", "def isclockwise(self):\n s = sum((seg[1][0] - seg[0][0]) * (seg[1][1] + seg[0][1])\n for seg in self.segment_tuples)\n return s > 0", "def clockwise(p1, p2, p3):\n\tv1 = p2 - p1\n\tv2 = p3 - p2\n\tc = (v2.x * v1.y) - (v1.x * v2.y)\n\tif c > 0:\n\t\treturn True\n\telse:\n\t\treturn False", "def is_clockwise(vertices):\n v = vertices\n area = ((v[1][0] - v[0][0]) * (v[1][1] + v[0][1]) +\n (v[2][0] - v[1][0]) * (v[2][1] + v[1][1]) +\n (v[0][0] - v[2][0]) * (v[0][1] + v[2][1])) / 2\n return (area > 0)", "def are_vertices_clockwise(self,line):\r\n \r\n import numpy as np\r\n \r\n signed_area = 0\r\n for idx in range(line.shape[0]):\r\n \r\n x1 = line[idx,0]\r\n y1 = line[idx,1]\r\n if idx == line.shape[0]-1:\r\n x2 = line[0,0]\r\n y2 = line[0,1]\r\n else:\r\n x2 = line[idx+1,0]\r\n y2 = line[idx+1,1]\r\n \r\n signed_area += (x1 * y2 - x2 * y1)\r\n \r\n return (np.sign(signed_area) == -1.)", "def are_vertices_clockwise(self,line):\r\n \r\n import numpy as np\r\n \r\n signed_area = 0\r\n for idx in range(line.shape[0]):\r\n \r\n x1 = line[idx,0]\r\n y1 = line[idx,1]\r\n if idx == line.shape[0]-1:\r\n x2 = line[0,0]\r\n y2 = line[0,1]\r\n else:\r\n x2 = line[idx+1,0]\r\n y2 = line[idx+1,1]\r\n \r\n signed_area += (x1 * y2 - x2 * y1)\r\n \r\n return (np.sign(signed_area) == -1.)", "def areColinear(self, other):\n perpVect = np.array([-self.vector[c.Y], self.vector[c.X]])\n vect1 = other.end[:2]-self.start[:2]\n vect2 = other.start[:2]-self.start[:2]\n cosTheda1 = (np.dot(perpVect, vect1)/\n (np.linalg.norm(perpVect)*np.linalg.norm(vect1)))\n if abs(cosTheda1) > 0.0001:\n return False\n cosTheda2 = (np.dot(perpVect, vect2)/\n (np.linalg.norm(perpVect)*np.linalg.norm(vect2)))\n return not(abs(cosTheda2) > 0.0001)", "def convex(self):\n x, y = self.center\n angles = []\n l = len(self.points)\n for i in range(l - 1):\n A = self.points[(i + l - 1) % l]\n B = self.points[i % l]\n C = self.points[(i + 1) % l]\n u = Vector.createFromTwoPoints(A, B)\n v = Vector.createFromTwoPoints(C, B)\n angle = v ^ u\n if angle > pi:\n return True\n return False", "def validate_clockwise_points(points):\n \n if len(points) != 8:\n raise Exception(\"Points list not valid.\" + str(len(points)))\n \n point = [\n [int(points[0]) , int(points[1])],\n [int(points[2]) , int(points[3])],\n [int(points[4]) , int(points[5])],\n [int(points[6]) , int(points[7])]\n ]\n edge = [\n ( point[1][0] - point[0][0])*( point[1][1] + point[0][1]),\n ( point[2][0] - point[1][0])*( point[2][1] + point[1][1]),\n ( point[3][0] - point[2][0])*( point[3][1] + point[2][1]),\n ( point[0][0] - point[3][0])*( point[0][1] + point[3][1])\n ]\n \n summatory = edge[0] + edge[1] + edge[2] + edge[3];\n return summatory <= 0", "def is_percolates(self):\n return self._uf.connected(self._top_idx, self._bottom_idx)", "def is_ccw(geometry, **kwargs):\n return lib.is_ccw(geometry, **kwargs)", "def isCollinear(a,b,c):\r\n #return slope(a, b) == slope(b, c) == slope(c, a) #DOES NOT WORK\r\n #return (b[0] - a[0]) * (c[1] - a[1]) == (c[0] - a[0]) * (b[1] - a[1]) \r\n #return distance(a,b) + distance(b,c) == distance(a,c)\r\n x1 = a[0]\r\n y1 = a[1]\r\n x2 = b[0]\r\n y2 = b[1]\r\n x3 = c[0]\r\n y3 = c[1] \r\n if (x1*(y2 - y3)) + (x2*(y3 - y1)) + (x3*(y1-y2)) == 0: \r\n return True\r\n else:\r\n return False", "def ring_is_clockwise(ring):\n total = 0\n for (pt1, pt2) in pairwise(ring):\n total += (pt2[0] - pt1[0]) * (pt2[1] + pt1[1])\n return total >= 0", "def has_arc(self, a, b):\n return self.matrix[a][b] != 0", "def is_right_angle(a, b, c):\n if a == 0 or b == 0 or c == 0:\n return False\n else :\n return (a == b + c) or (b == c + a) or (c == a + b)", "def is_colinear(a, b, c):\n return (b[0] - a[0]) * (c[1] - a[1]) - (b[1] - a[1]) * (c[0] - a[0]) == 0", "def validate_clockwise_points(points):\n\n if len(points) != 8:\n raise Exception(\"Points list not valid.\" + str(len(points)))\n\n point = [\n [int(points[0]), int(points[1])],\n [int(points[2]), int(points[3])],\n [int(points[4]), int(points[5])],\n [int(points[6]), int(points[7])]\n ]\n edge = [\n (point[1][0] - point[0][0]) * (point[1][1] + point[0][1]),\n (point[2][0] - point[1][0]) * (point[2][1] + point[1][1]),\n (point[3][0] - point[2][0]) * (point[3][1] + point[2][1]),\n (point[0][0] - point[3][0]) * (point[0][1] + point[3][1])\n ]\n\n summatory = edge[0] + edge[1] + edge[2] + edge[3];\n if summatory > 0:\n raise Exception(\n \"Points are not clockwise. The coordinates of bounding quadrilaterals have to be given in clockwise order. Regarding the correct interpretation of 'clockwise' remember that the image coordinate system used is the standard one, with the image origin at the upper left, the X axis extending to the right and Y axis extending downwards.\")", "def has_undercoordinated_c(self) -> bool:\n if self._undercoordinated_carbon is not None:\n return self._undercoordinated_carbon\n\n self._has_undercoordinated_carbon()\n return self._undercoordinated_carbon", "def _has_undercoordinated_carbon(self, tolerance: int = 10):\n undercoordinated_carbon = False\n\n for site_index in self.c_indices:\n cn = self.get_cn(site_index) # pylint:disable=invalid-name\n if cn == 2:\n # ToDo: Check if it is bound to metal, then it might be a carbide\n neighbors = self.get_connected_sites(site_index)\n angle = _maximum_angle(\n self.structure.get_angle(\n site_index, neighbors[0].index, neighbors[1].index\n )\n )\n if np.abs(180 - angle) > tolerance:\n undercoordinated_carbon = True\n break\n self._undercoordinated_carbon = undercoordinated_carbon", "def hasCollinearPoints(listOfPoints):\r\n for points in listOfPoints:\r\n if isCollinear(points[0], points[1], points[2]): #If any of the points are collinear\r\n return True\r\n else:\r\n pass\r\n return False #If none of the points are collinear\r", "def assert_collinear(*points: Point, tolerance: float = 1e-2) -> bool:\n if len(points) < 3:\n raise ValueError(\"CurveChecker.assert_collinear() must be called with at least three points\")\n\n thetas = [np.arctan2(p0[1] - p1[1], p0[0] - p1[0]) for p0, p1 in zip(points, points[1:])]\n for t0, t1 in zip(thetas, thetas[1:]):\n if abs(t0 - t1) > tolerance:\n return False\n\n return True", "def verify_legal_rotation(self, direction):\n test_figure = None\n if direction == \"CW\":\n test_figure = self.get_block_positions(self.active_piece.get_cw_rotation())\n elif direction == \"CCW\":\n test_figure = self.get_block_positions(self.active_piece.get_ccw_rotation())\n\n for b_x, b_y in test_figure:\n if b_x < 0 or b_x >= self.WIDTH:\n return False\n\n if b_y < 0 or b_y >= self.HEIGHT:\n return False\n\n if self.board[b_y][b_x] != 0:\n return False\n return True", "def isDiagonal(self):\n raise Exception('Deprecated')\n return self.direction % 2 == 1", "def is_inside(self, points):\n points = np.atleast_2d(points) - self.centroid\n return np.logical_and(\n np.linalg.norm(points, axis=-1) <= self.radius,\n # At present circles are not orientable, so the z position must\n # match exactly.\n np.isclose(points[:, 2], 0),\n )", "def isEquilateral(self):\n\t\treturn self.a == self.b == self.c", "def ccw(a, b, c):\n return (c.y - a.y) * (b.x - a.x) > (b.y - a.y) * (c.x - a.x)", "def is_triangle(a, b, c):\n a, b, c = sorted([a, b, c])\n return True if a > abs(b - c) and a < (b + c) else False" ]
[ "0.7609235", "0.7557099", "0.6990511", "0.68427503", "0.683419", "0.68226945", "0.6582589", "0.64759666", "0.64759666", "0.64698136", "0.6272435", "0.6197709", "0.6191934", "0.61795664", "0.61134416", "0.60998017", "0.6020702", "0.59578556", "0.59384805", "0.58934057", "0.58770293", "0.5875998", "0.58245224", "0.5815634", "0.58026606", "0.56799877", "0.5641739", "0.5591065", "0.5567462", "0.5562478" ]
0.7955458
0
Return a new Point midway between `self` and `other`. If other is not given, the midpoint between self and the origin is returned.
def midpoint(self, other: PointType = None) -> PointType: return (self + (other or Point())) / 2
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mid_point(a: Point, b: Point) -> Point:\n return Point((a.x + b.x) / 2, (a.y + b.y) / 2)", "def halfway(self, target):\n mx = (self.x + target.x) / 2\n my = (self.y + target.y) / 2\n return Point(mx, my)", "def halfway(self, target):\r\n mx = (self.x + target.x)/2\r\n my = (self.y + target.y)/2\r\n return Point(mx, my)", "def halfway(self, target):\n mx = (self.x + target.x)/2\n my = (self.y + target.y)/2\n return Point(mx, my)", "def halfway(self, target):\n mx = (self.x + target.x)/2\n my = (self.y + target.y)/2\n return Point(mx, my)", "def halfway(self, target):\n mx = (self.x + target.x)/2\n my = (self.y + target.y)/2\n return Point(mx, my)", "def midpoint(self) -> Point:\n l = self._line.meet(infty_hyperplane(self.dim))\n return harmonic_set(*self.vertices, l)", "def intersect(self, other):\n if isinstance(other, Arc):\n return other.intersect(self)\n elif not isinstance(other, LineSegment):\n raise TypeError(other)\n S = (self.p2 - self.p1).scale(1.)\n T = (other.p2 - other.p1).scale(1.)\n denom = S.y * T.x - S.x * T.y\n if nearly_zero(denom):\n if nearly_zero(S.cross(other.p1 - self.p1)):\n q1 = (other.p1 - self.p1) * S / (S * S)\n q2 = (other.p2 - self.p1) * S / (S * S)\n if q2 < q1:\n q1, q2 = q2, q1\n left, right = max(0, q1), min(1, q2)\n if left < right:\n return LineSegment(self.p1 + left * S, self.p1 + right * S)\n return None\n a = (T.x * (other.p1.y - self.p1.y) - T.y * (other.p1.x - self.p1.x)) / denom\n b = (S.x * (other.p1.y - self.p1.y) - S.y * (other.p1.x - self.p1.x)) / denom\n if 0 <= a <= 1 and 0 <= b <= 1:\n return self.p1 + a * S\n # else return None because we don't intersect", "def midpoint(a, b):\n mp = [(a.x + b.x) / 2, (a.y + b.y) / 2]\n return Vector(*mp)", "def getMidPoint(self):\n return p.Point((self.start.normalVector + self.end.normalVector)/2.0)", "def distanceKmTo(self, other):\n lon1 = math.radians(self.longitude)\n lon2 = math.radians(other.longitude)\n dlon = lon2 - lon1\n lat1 = math.radians(self.latitude)\n lat2 = math.radians(other.latitude)\n dlat = lat2 - lat1\n\n \n a = math.sin(dlat / 2)**2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon / 2)**2\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n\n return Point.R * c", "def calculate_distance(self, other):\n return math.sqrt((self.center[0] - other.center[0]) ** 2 + (self.center[1] - other.center[1]) ** 2)", "def crossSegment(self, other):\n ml = self.getLine(correct=False)\n ol = other.getLine(correct=False)\n point = ml.crossLine(ol)\n if point:\n if (point in self) and (point in other):\n return point", "def distanceTo(self, other):\n result = (other._x - self._x) * (other._x - self._x) \\\n + (other._y - self._y) * (other._y - self._y)\n return result ** 0.5", "def crossHalfLine(self, other):\n ml = self.getLine(correct=False)\n ol = other.getLine(correct=False)\n point = ml.crossLine(ol)\n if point:\n if (point in self) and (point in other):\n return point", "def distanceTo(self,other):\n if not isinstance(other,Point):\n return \n return math.sqrt((self.longitude - other.getLongitude())**2 +(self.latitude - other.getLatitude())**2)", "def distance(self, other):\n return math.sqrt((self.x - other.x)**2 + (self.y - other.y)**2)", "def test_midpoint(self):\n p1 = Point(0, 0)\n p2 = Point(10, 10)\n midpoint = p1.midpoint(p2)\n self.assertAlmostEqual(midpoint.lat, 5)\n self.assertAlmostEqual(midpoint.lon, 5)", "def crossSegment(self, other, e=1e-14, **kwargs):\n # Determine the point of intersection between the line of the given segment ang the line\n line = other.getLine()\n point = self.crossLine(line)\n if point is None:\n return None\n x, y = point\n # Determine if the point of intersection belongs to both the segment and the line\n if other.xmin - e <= point.x <= other.xmax + e and other.ymin - e <= y <= other.ymax + e:\n return Point(x, y, **kwargs)\n # By default if nothing is returned the function returns None", "def mh_dist(self, OtherPoint=None):\n if OtherPoint is None:\n OtherPoint=GridPoint(0,0)\n return abs(OtherPoint.x - self.x) + abs(OtherPoint.y - self.y)", "def intersection(self, other):\n log.info('self: '+str(self)+' other: '+str(other))\n if self == other:\n # Used to be return True, that is definitely not right (expects Coordinate)\n # Do we want start or end ? Does it matter? Lines are the same, everything is\n # an intersection.\n return self.start\n # If any of the start/end points match, return that point.\n if self.end==other.start or self.end == other.end:\n return self.end \n if self.start==other.start or self.start == other.end: \n return self.start\n\n # Line equation: y = mx + b\n # m = (y2-y1)/(x2-x1)\n # B_self = y - M_self*x\n # Pick any x/y on the line - try end point\n # B_self = self.end.lat - M_self*self.end.lon\n # B_other = other.end.lat - M_self*self.end.lon\n from pyresample.spherical_geometry import Coordinate\n\n selfendlon = self.end.lon\n selfstartlon = self.start.lon\n otherendlon = other.end.lon\n otherstartlon = other.start.lon\n # Not sure if this is necessary, or good...\n# if self.end.lon < 0:\n# selfendlon = self.end.lon + 2*math.pi\n# if self.start.lon < 0:\n# selfstartlon = self.start.lon + 2*math.pi\n# if other.end.lon < 0:\n# otherendlon = other.end.lon + 2*math.pi\n# if other.start.lon < 0:\n# otherstartlon = other.start.lon + 2*math.pi\n\n log.info(' self lons: '+str(math.degrees(selfstartlon))+' '+str(math.degrees(selfendlon))+' other lons: '+str(math.degrees(otherstartlon))+' '+str(math.degrees(otherendlon)))\n\n # If both vertical, will be no intersection\n if abs(selfendlon - selfstartlon) < EPSILON and abs(otherendlon - otherstartlon) < EPSILON:\n log.info(' Both vertical, no intersection')\n return None\n # If self is vertical, but not parallel, intersection will be selfstartlon and lat = Mother*lon+B_other\n if abs(selfendlon - selfstartlon) < EPSILON:\n lon = selfstartlon\n M_other = (other.end.lat - other.start.lat)/(otherendlon-otherstartlon)\n B_other = other.end.lat - M_other*otherendlon\n lat = M_other*lon+B_other\n log.info(' self is vertical')\n #Make sure it falls within the segment and not outside.\n # Previously was only checking lat, need to \n # also check lon or opposite side of world would match\n if (lat > min([self.end.lat,self.start.lat]) and \n lat < max([self.end.lat,self.start.lat]) and\n lon > min([otherendlon,otherstartlon]) and\n lon < max([otherendlon,otherstartlon])):\n log.info(' and intersects')\n # Apparently Coordinate takes degrees ??? And must be -180 to 180 ?!\n # MLS use wrap_longitudes?\n if lon > math.pi:\n lon -= 2*math.pi\n return Coordinate(math.degrees(lon),math.degrees(lat))\n else:\n return None\n # same for other\n if abs(otherendlon - otherstartlon) < EPSILON:\n lon = otherstartlon\n M_self = (self.end.lat - self.start.lat)/(selfendlon-selfstartlon)\n B_self = self.end.lat - M_self*selfendlon\n lat = M_self*lon+B_self\n log.info(' other is vertical')\n #Make sure it falls within the segment and not outside.\n # Previously was only checking lat, need to \n # also check lon or opposite side of world would match\n if (lat > min([other.end.lat,other.start.lat]) and \n lat < max([other.end.lat,other.start.lat]) and \n lon > min([selfendlon,selfstartlon]) and\n lon < max([selfendlon,selfstartlon])):\n log.info(' and intersects')\n # Apparently Coordinate takes degrees ??? And must be -180 to 180 ?!\n # MLS Use wrap_longitudes?\n if lon > math.pi:\n lon -= 2*math.pi\n return Coordinate(math.degrees(lon),math.degrees(lat))\n else:\n return None\n\n \n\n # Get slopes of the lines \n M_self = (self.end.lat - self.start.lat)/(selfendlon-selfstartlon)\n M_other = (other.end.lat - other.start.lat)/(otherendlon-otherstartlon)\n \n # If they are parallel, no intersection\n if (M_self-M_other) < EPSILON:\n log.info(' self and other are parallel, no intersection')\n return None\n\n # Get the y-intercepts of the lines \n B_self = self.end.lat - M_self*selfendlon\n B_other = other.end.lat - M_other*otherendlon\n\n # Solve the equation\n # y=m1x+b1 and y=m2x+b2, equate y's so m1x+b1=m2x+b2, x = (b1-b2)/(m2-m1)\n # equate x's so x=(y-b1)/m1=(y-b2)/m2, y = (b1m2-b2m1)/(m2-m1)\n lon = (B_self - B_other)/(M_other - M_self)\n lat = (B_self*M_other - B_other*M_self)/(M_other-M_self)\n\n # Make sure lat/lon intersects within the line segment, and not outside.\n if (lat > min([other.end.lat,other.start.lat]) and \n lat < max([other.end.lat,other.start.lat]) and\n lon > min([otherendlon,otherstartlon]) and \n lon < max([otherendlon,otherstartlon]) and\n lat > min([self.end.lat,self.start.lat]) and \n lat < max([self.end.lat,self.start.lat]) and\n lon > min([selfendlon,selfstartlon]) and \n lon < max([selfendlon,selfstartlon])):\n log.info(' self and other intersect within segment')\n # Apparently Coordinate takes degrees ??? And must be -180 to 180 ?!\n # MLS use wrap longitudes?\n if lon > math.pi:\n lon -= 2*math.pi\n return Coordinate(math.degrees(lon),math.degrees(lat))\n else:\n log.info(' self and other intersect, but not within segment')\n return None", "def __truediv__(self, other):\n return Point(x / other for x in self.data)", "def root_midpoint(self):\n node1, node2, distance = self.find_middle_point()\n self.root_nodes(node1, node2, distance)", "def midpoint(point1, point2):\n\n x, y = (int((point1[0] + point2[0]) / 2), int((point1[1] + point2[1]) / 2))\n return (x, y)", "def midpoint(ptA, ptB):\n return( (ptA[0] + ptB[0]) * 0.5, (ptA[1]+ ptB[1]) * 0.5 )", "def harversine_distance(self, other):\n\n lat1, lon1, lat2, lon2 = (\n a/180*pi for a in [self.stop_lat, self.stop_lon, other.stop_lat, other.stop_lon])\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat/2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon/2) ** 2\n c = 2 * asin(min(1, sqrt(a)))\n d = 3956 * 1609.344 * c\n return d", "def distance(self, other):\n x_diff_sq = (self.x-other.x)**2\n y_diff_sq = (self.y-other.y)**2\n return (x_diff_sq + y_diff_sq)**0.5", "def distance(self, other):\n return float(abs(self.x - other.x) + abs(self.y - other.y))", "def __add__(self, other):\n return Point(self.x+other.x, self.y+other.y)", "def distance_to(self, other: Geometry[Scalar]) -> Scalar:\n return (self._distance_to_point(other)\n if isinstance(other, Point)\n else (non_negative_min(self._distance_to_point(point)\n for point in other.points)\n if isinstance(other, Multipoint)\n else other.distance_to(self)))" ]
[ "0.6676469", "0.60114807", "0.6004466", "0.5985635", "0.5985635", "0.5985635", "0.59546316", "0.5879427", "0.5844241", "0.5816637", "0.580738", "0.58004814", "0.5790193", "0.57144624", "0.5713127", "0.571122", "0.56574553", "0.56470776", "0.5636528", "0.5634694", "0.5630058", "0.56193787", "0.56055135", "0.5597262", "0.5589404", "0.55881345", "0.5583736", "0.55497915", "0.5534554", "0.5532526" ]
0.8038037
0
True if self is bounded by the points [p, q], else False The bounds are checked by less than or equal to (<=) so self is considered between if it resides on any of the lines constructed using [p,q].
def between(self, p: PointType, q: PointType) -> bool: i = min(p.x, q.x) <= self.x <= max(p.x, q.x) j = min(p.y, q.y) <= self.y <= max(p.y, q.y) return i and j
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def inside(self, p: PointType, q: PointType) -> bool:\n\n # XXX re-implement with ccw and a list of points instead of a pair\n\n i = min(p.x, q.x) < self.x < max(p.x, q.x)\n j = min(p.y, q.y) < self.y < max(p.y, q.y)\n\n return i and j", "def onSegment(self, p, q, r):\n if ((q.x <= max(p.x, r.x)) and (q.x >= min(p.x, r.x)) and\n (q.y <= max(p.y, r.y)) and (q.y >= min(p.y, r.y))):\n return True\n return False", "def is_bound(self, point):\n return self.__begin == point or self.__end == point", "def on_segment(point_p, point_q, point_r):\n if (point_q.x <= max(point_p.x, point_r.x)\n and point_q.x >= min(point_p.x, point_r.x)\n and point_q.y <= max(point_p.y, point_r.y)\n and point_q.y >= min(point_p.y, point_r.y)):\n return True\n return False", "def within(p, q, r):\r\n return p <= q <= r or r <= q <= p", "def _point_within_bounds(bounds, p):\n A, B = bounds\n # we have to add epsilon since test against horizontal or vertical\n # lines may fail if the point is off by numerical precision\n eps = 1e-10\n (Ax,Ay), (Bx,By), (px,py)=A,B,p\n return (\n (min((Ax,Bx))-eps<=px<=max((Ax,Bx))+eps) and\n (min((Ay,By))-eps<=py<=max((Ay,By))+eps)\n )", "def _check_bound(self, q):\n mat = ur_utils.forward(q, self._ik_params)\n xyz = mat[:3, 3]\n inside_bound = np.all(self._end_effector_low <= xyz) and np.all(xyz <= self._end_effector_high)\n inside_buffer_bound = (np.all(self._end_effector_low + self._box_bound_buffer <= xyz) and \\\n np.all(xyz <= self._end_effector_high - self._box_bound_buffer))\n return inside_bound, inside_buffer_bound, mat, xyz", "def contains_point(self, p):\n return self.begin <= p < self.end", "def isInternal(self, aPoint):\n if (aPoint.x >= self.pMin.x and aPoint.x <= self.pMax.x) \\\n and (aPoint.y >= self.pMin.y and aPoint.y <= self.pMax.y):\n return True\n else:\n return False", "def boundary_check(limits : tuple, coords : tuple) -> bool:\n xl,xh,yl,yh = limits\n x,y = coords\n bound_x = xl <= x and x < xh\n bound_y = yl <= y and y < yh\n return bound_x and bound_y", "def in_bounds(self, point):\n # Sanity checks\n # Check that point has same number of dimensions as graph\n if not len(point) == len(self.dimensions):\n raise Exception(\"Point has \" + str(len(point)) + \" dimensions, Coordination Space has \" + \\\n str(len(self.dimensions)) + \" dimensions.\")\n\n for i, coordinate in enumerate(point):\n if coordinate > self.dimensions[i] or coordinate < 0:\n return False\n\n return True", "def inBounds(self, px, py):\n return px >= 0 and py >= 0 and px < self.w and py < self.h", "def check_boundedness(self):\n if SymEq.check_boundedness(self.aMatrix,\n self.bMatrix,\n self.eqMatrix,\n SymEq.get_var_list(self.raw_expression)):\n return True\n else:\n raise Exception(\"[RectangleSet ERROR]: (Initial) Set NOT Bounded.\")", "def within(self, x, y):\n return x >= self.top_x and x <= self.bottom_x and y >= self.bottom_y and y <= self.top_y", "def __contains__(self,pos):\n # Permet de donner une contenance a l objet\n # Il devient comme une liste de point\n # Ainsi on peut le parcourir comme on le ferai avec une liste\n xmin=self.pos[0]\n xmax=self.pos[0]+self.dim[0]\n ymin=self.pos[1]\n ymax=self.pos[1]+self.dim[1]\n xpt=pos[0]\n ypt=pos[1]\n return (xpt>=xmin and xpt<=xmax and ypt>=ymin and ypt<=ymax)", "def __contains__(self,pos):\n # Permet de donner une contenance a l objet\n # Il devient comme une liste de point\n # Ainsi on peut le parcourir comme on le ferai avec une liste\n xmin=self.pos[0]\n xmax=self.pos[0]+self.dim[0]\n ymin=self.pos[1]\n ymax=self.pos[1]+self.dim[1]\n xpt=pos[0]\n ypt=pos[1]\n return (xpt>=xmin and xpt<=xmax and ypt>=ymin and ypt<=ymax)", "def inside_limits(self, point):\n if not self.regions:\n # Use rectangle check\n lat, lon = point.latitude, point.longitude\n if (lon > self.limits[0] and lat > self.limits[1] and\n lon < self.limits[2] and lat < self.limits[3]):\n return True\n else:\n return False\n else:\n # Check inside all possible regions\n p = Point((point.longitude, point.latitude))\n print(p, point)\n # import IPython; IPython.embed()\n for name, poly in self.regions.items():\n # if poly.contains(p):\n if p.intersects(poly):\n return name\n return False", "def _in_bounds(self, x, y):\r\n return 0 <= x < 8 and 0 <= y < 8", "def IsBound(self) -> bool:", "def contains(self, pt):\n x,y = pt.as_tuple()\n return (self.left <= x <= self.right and\n self.top <= y <= self.bottom)", "def contains_point(self, x, y):\r\n if self.m == None:\r\n if abs(x - self.start[0]) > 0.6:\r\n return False\r\n else:\r\n if (y >= self.start[1] and y <= self.end[1]) or \\\r\n (y <= self.start[1] and y >= self.end[1]):\r\n return True\r\n else:\r\n return False\r\n else: \r\n y0 = int(self.m * x + self.n)\r\n if abs(y - y0) > 0.6: \r\n return False \r\n else: \r\n if ((x >= self.start[0] and x <= self.end[0]) or \\\r\n (x <= self.start[0] and x >= self.end[0])) and \\\r\n ((y >= self.start[1] and y <= self.end[1]) or \\\r\n (y <= self.start[1] and y >= self.end[1])): \r\n return True\r\n else:\r\n return False", "def is_bounded(self):\n return True", "def __ge__(self, other):\n result = False\n if isinstance(other, Shape) and (self.area >= other.area):\n result = True\n return result", "def in_bounds(self, x, y):\n return x >= 0 and x < 8 and y >= 0 and y < 8", "def contains_pt(self, pt):\n x, y = pt\n if not self.x - self.radius < x < self.x + self.radius:\n return False\n if not self.y - self.radius < y < self.y + self.radius:\n return False\n return True", "def box_valid(self):\n return ((self.lt.x >= 0)\n and (self.lt.y >= 0)\n and (self.rb.x >= self.lt.x)\n and (self.rb.y >= self.lt.y))", "def intersect_with(self, other):\n point = self._lines_intersection(other)\n\n if point is False:\n return False\n\n if point is True:\n return not(\n self.min_x() > other.max_x() or\n other.min_x() > self.max_x() or\n self.min_y() > other.max_y() or\n other.min_y() > self.max_y()\n )\n\n else:\n return (\n self.contains_point(point) and\n other.contains_point(point) and\n point\n )", "def _inside_bounds(A, B):\n for axis in 'xyz':\n minA, maxA = axis_bounds(A, axis)\n minB, maxB = axis_bounds(B, axis)\n if (minA <= minB) or (maxA >= maxB):\n return False\n\n return True", "def contains_point(self, x, y = None):\n x, y = y is not None and Point(x, y) or Point(x[0], x[1])\n\n cond1 = self.min_x() <= x <= self.max_x()\n cond2 = self.min_y() <= y <= self.max_y()\n return self.is_point_on_same_line(x, y) and cond1 and cond2", "def isPointInside(self, p):\n x,y = p[0], p[1]\n A = self.left <= x <= self.right\n B = self.bottom <= y <= self.top\n return (A and B)" ]
[ "0.76864725", "0.72723484", "0.7039567", "0.70286196", "0.6840898", "0.67668295", "0.6696259", "0.6675997", "0.6499764", "0.6421155", "0.6416931", "0.6390172", "0.63788533", "0.63346475", "0.6312514", "0.6312514", "0.6284577", "0.62795115", "0.6248119", "0.62449133", "0.62267524", "0.6202693", "0.61994886", "0.6195924", "0.6191851", "0.61709726", "0.61373734", "0.61364734", "0.6132411", "0.6119155" ]
0.80105644
0
True if self is bounded by the points (p, q), else False The bounds are checked by less than (<) so self is considered inside if it does not reside on any of the lines constructed using (p,q).
def inside(self, p: PointType, q: PointType) -> bool: # XXX re-implement with ccw and a list of points instead of a pair i = min(p.x, q.x) < self.x < max(p.x, q.x) j = min(p.y, q.y) < self.y < max(p.y, q.y) return i and j
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def between(self, p: PointType, q: PointType) -> bool:\n\n i = min(p.x, q.x) <= self.x <= max(p.x, q.x)\n j = min(p.y, q.y) <= self.y <= max(p.y, q.y)\n\n return i and j", "def onSegment(self, p, q, r):\n if ((q.x <= max(p.x, r.x)) and (q.x >= min(p.x, r.x)) and\n (q.y <= max(p.y, r.y)) and (q.y >= min(p.y, r.y))):\n return True\n return False", "def on_segment(point_p, point_q, point_r):\n if (point_q.x <= max(point_p.x, point_r.x)\n and point_q.x >= min(point_p.x, point_r.x)\n and point_q.y <= max(point_p.y, point_r.y)\n and point_q.y >= min(point_p.y, point_r.y)):\n return True\n return False", "def within(p, q, r):\r\n return p <= q <= r or r <= q <= p", "def _point_within_bounds(bounds, p):\n A, B = bounds\n # we have to add epsilon since test against horizontal or vertical\n # lines may fail if the point is off by numerical precision\n eps = 1e-10\n (Ax,Ay), (Bx,By), (px,py)=A,B,p\n return (\n (min((Ax,Bx))-eps<=px<=max((Ax,Bx))+eps) and\n (min((Ay,By))-eps<=py<=max((Ay,By))+eps)\n )", "def is_bound(self, point):\n return self.__begin == point or self.__end == point", "def isPointInside(self, p):\n x,y = p[0], p[1]\n A = self.left <= x <= self.right\n B = self.bottom <= y <= self.top\n return (A and B)", "def isPointInside(self, p):\n x,y = p[0], p[1]\n A = self.left <= x <= self.right\n B = self.bottom <= y <= self.top\n return (A and B)", "def is_inside(self, p):\n s, t = self.get_barycentric_coord(p)\n if 0 <= s <= 1 and 0 <= t <= 1 and s + t <= 1:\n return True\n else:\n return False", "def contains_point(self, p):\n return self.begin <= p < self.end", "def _check_bound(self, q):\n mat = ur_utils.forward(q, self._ik_params)\n xyz = mat[:3, 3]\n inside_bound = np.all(self._end_effector_low <= xyz) and np.all(xyz <= self._end_effector_high)\n inside_buffer_bound = (np.all(self._end_effector_low + self._box_bound_buffer <= xyz) and \\\n np.all(xyz <= self._end_effector_high - self._box_bound_buffer))\n return inside_bound, inside_buffer_bound, mat, xyz", "def inBounds(self, px, py):\n return px >= 0 and py >= 0 and px < self.w and py < self.h", "def inside_limits(self, point):\n if not self.regions:\n # Use rectangle check\n lat, lon = point.latitude, point.longitude\n if (lon > self.limits[0] and lat > self.limits[1] and\n lon < self.limits[2] and lat < self.limits[3]):\n return True\n else:\n return False\n else:\n # Check inside all possible regions\n p = Point((point.longitude, point.latitude))\n print(p, point)\n # import IPython; IPython.embed()\n for name, poly in self.regions.items():\n # if poly.contains(p):\n if p.intersects(poly):\n return name\n return False", "def within(self, x, y):\n return x >= self.top_x and x <= self.bottom_x and y >= self.bottom_y and y <= self.top_y", "def inside(self, x, on_boundary):\n return bool((near(x[0], xmin) or near(x[1], ymin)) and \\\n (not ((near(x[0], xmin) and near(x[1], ymax)) \\\n or (near(x[0], xmax) and near(x[1], ymin)))) \\\n and on_boundary)", "def __contains__(self,pos):\n # Permet de donner une contenance a l objet\n # Il devient comme une liste de point\n # Ainsi on peut le parcourir comme on le ferai avec une liste\n xmin=self.pos[0]\n xmax=self.pos[0]+self.dim[0]\n ymin=self.pos[1]\n ymax=self.pos[1]+self.dim[1]\n xpt=pos[0]\n ypt=pos[1]\n return (xpt>=xmin and xpt<=xmax and ypt>=ymin and ypt<=ymax)", "def __contains__(self,pos):\n # Permet de donner une contenance a l objet\n # Il devient comme une liste de point\n # Ainsi on peut le parcourir comme on le ferai avec une liste\n xmin=self.pos[0]\n xmax=self.pos[0]+self.dim[0]\n ymin=self.pos[1]\n ymax=self.pos[1]+self.dim[1]\n xpt=pos[0]\n ypt=pos[1]\n return (xpt>=xmin and xpt<=xmax and ypt>=ymin and ypt<=ymax)", "def contains(self, pt):\n x,y = pt.as_tuple()\n return (self.left <= x <= self.right and\n self.top <= y <= self.bottom)", "def isInternal(self, aPoint):\n if (aPoint.x >= self.pMin.x and aPoint.x <= self.pMax.x) \\\n and (aPoint.y >= self.pMin.y and aPoint.y <= self.pMax.y):\n return True\n else:\n return False", "def in_bounds(self, point):\n # Sanity checks\n # Check that point has same number of dimensions as graph\n if not len(point) == len(self.dimensions):\n raise Exception(\"Point has \" + str(len(point)) + \" dimensions, Coordination Space has \" + \\\n str(len(self.dimensions)) + \" dimensions.\")\n\n for i, coordinate in enumerate(point):\n if coordinate > self.dimensions[i] or coordinate < 0:\n return False\n\n return True", "def boundary_check(limits : tuple, coords : tuple) -> bool:\n xl,xh,yl,yh = limits\n x,y = coords\n bound_x = xl <= x and x < xh\n bound_y = yl <= y and y < yh\n return bound_x and bound_y", "def contains_pt(self, pt):\n x, y = pt\n if not self.x - self.radius < x < self.x + self.radius:\n return False\n if not self.y - self.radius < y < self.y + self.radius:\n return False\n return True", "def _in_bounds(self, x, y):\r\n return 0 <= x < 8 and 0 <= y < 8", "def inBounds(self,pos):\n return ((pos.x<WIDTH) & (pos.x>=0) & (pos.y<HEIGHT) & (pos.y>=0))", "def contains_point(self, x, y):\r\n if self.m == None:\r\n if abs(x - self.start[0]) > 0.6:\r\n return False\r\n else:\r\n if (y >= self.start[1] and y <= self.end[1]) or \\\r\n (y <= self.start[1] and y >= self.end[1]):\r\n return True\r\n else:\r\n return False\r\n else: \r\n y0 = int(self.m * x + self.n)\r\n if abs(y - y0) > 0.6: \r\n return False \r\n else: \r\n if ((x >= self.start[0] and x <= self.end[0]) or \\\r\n (x <= self.start[0] and x >= self.end[0])) and \\\r\n ((y >= self.start[1] and y <= self.end[1]) or \\\r\n (y <= self.start[1] and y >= self.end[1])): \r\n return True\r\n else:\r\n return False", "def _inside_bounds(A, B):\n for axis in 'xyz':\n minA, maxA = axis_bounds(A, axis)\n minB, maxB = axis_bounds(B, axis)\n if (minA <= minB) or (maxA >= maxB):\n return False\n\n return True", "def box_valid(self):\n return ((self.lt.x >= 0)\n and (self.lt.y >= 0)\n and (self.rb.x >= self.lt.x)\n and (self.rb.y >= self.lt.y))", "def in_bounds(self, x, y):\n return x >= 0 and x < 8 and y >= 0 and y < 8", "def is_point_in(self, point):\n return (self.upperleft[0] <= point[0] <= self.upperright[0] and self.upperleft[1] <= point[1] <= self.bottomleft[1])", "def is_point_in(self, point):\n return (self.upperleft[0] <= point[0] <= self.upperright[0] and self.upperleft[1] <= point[1] <= self.bottomleft[1])" ]
[ "0.79599667", "0.737079", "0.711591", "0.70875436", "0.6964512", "0.6917188", "0.68929106", "0.68929106", "0.6871995", "0.68415177", "0.6788423", "0.67268765", "0.67224866", "0.6712168", "0.6707262", "0.66991687", "0.66991687", "0.66832346", "0.6665249", "0.6620001", "0.65715486", "0.65571505", "0.65467817", "0.65327", "0.6467945", "0.64656156", "0.64486974", "0.6440009", "0.6417326", "0.6417326" ]
0.8203387
0
Get local plan with defined lookahead distance
def get_local_plan(self, ind): size = len(self.global_plan.poses) if ind < 0 or ind >= size: raise ValueError("ind must be between 0 and %d"%size) start = self.global_plan.poses[ind].pose local_path = Path() found_ind = None for i in range(ind, size): candidate = self.global_plan.poses[i].pose dist = self.calc_distance(start, candidate) if dist >= self.look_ahead_distance: break else: local_path.poses.append(candidate) found_ind = i return found_ind, local_path
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_lookahead_point(self):\n lookahead_target_dist = self.lookahead_dist #+ (1 + self.curr_v)\n\n if self.path_point_idx == len(self.current_path) - 1 or self.path_point_idx == -1:\n #End of path, no more lookahead\n return self.path_point\n\n prev_pt = self.current_path[self.path_point_idx]\n curr_pt = self.current_path[self.path_point_idx + 1]\n pt_dist = np.hypot((prev_pt - curr_pt)[0], (prev_pt - curr_pt)[1])\n curr_dist = pt_dist\n c = self.path_point_idx\n while curr_dist < lookahead_target_dist and c < len(self.current_path) - 1:\n prev_pt = self.current_path[c]\n curr_pt = self.current_path[c + 1]\n pt_dist = np.hypot((prev_pt - curr_pt)[0], (prev_pt - curr_pt)[1])\n curr_dist += pt_dist\n c += 1\n\n if curr_dist < lookahead_target_dist:\n return self.current_path[-1]\n else:\n #Interpolate to get the actual lookahead point\n frac = (curr_dist - lookahead_target_dist) / pt_dist\n pt = frac * prev_pt + (1-frac) * curr_pt\n return pt", "def test_find_closest_waypoints_nearest(self):\n planner = WaypointPlanner(make_example_base_waypoints())\n\n planner.position = Vector3(0, 0, 0)\n waypoints = planner.find_closest_waypoints(1)\n self.assertEqual(1, len(waypoints))\n self.assertEqual(0, waypoints[0].pose.pose.position.x)\n self.assertEqual(0, waypoints[0].pose.pose.position.y)\n self.assertEqual(0, waypoints[0].pose.pose.position.z)\n\n planner.position = Vector3(0.9, 0.9, 0)\n waypoints = planner.find_closest_waypoints(2)\n self.assertEqual(2, len(waypoints))\n self.assertEqual(1, waypoints[0].pose.pose.position.x)\n self.assertEqual(1, waypoints[0].pose.pose.position.y)\n self.assertEqual(2, waypoints[1].pose.pose.position.x)\n self.assertEqual(2, waypoints[1].pose.pose.position.y)\n\n # Check it wraps back around to the start.\n planner.position = Vector3(0.9, 0.9, 0)\n waypoints = planner.find_closest_waypoints(3)\n self.assertEqual(3, len(waypoints))\n self.assertEqual(1, waypoints[0].pose.pose.position.x)\n self.assertEqual(1, waypoints[0].pose.pose.position.y)\n self.assertEqual(2, waypoints[1].pose.pose.position.x)\n self.assertEqual(2, waypoints[1].pose.pose.position.y)\n self.assertEqual(0, waypoints[2].pose.pose.position.x)\n self.assertEqual(0, waypoints[2].pose.pose.position.y)", "def _get_next_waypoint(self, tolerance_step):\n print('\\nGetting new nav plan.')\n\n for i in range(4):\n try:\n self.plan = self.swarmie.get_plan(\n self.goal,\n tolerance=self.tolerance,\n use_home_layer=self.avoid_home\n )\n break # plan received\n except rospy.ServiceException:\n print('ServiceException.')\n if i < 3:\n print('Expanding tolerance.')\n self.tolerance += tolerance_step\n else:\n raise # tried 3 times, we give up\n\n print('Received nav plan.')\n pose = self.plan.plan.poses[0]\n\n return Point(x=pose.pose.position.x, y=pose.pose.position.y)", "def test_PRP(initial):\n return plan_route((initial[0],initial[1]), initial[2],\n # Goals:\n [(2,3),(3,2)],\n # Allowed locations:\n [(0,0),(0,1),(0,2),(0,3),\n (1,0),(1,1),(1,2),(1,3),\n (2,0), (2,3),\n (3,0),(3,1),(3,2),(3,3)])", "def global_plan(\n world: carla.World, # pylint: disable=no-member\n origin: carla.Location, # pylint: disable=no-member\n destination: carla.Location, # pylint: disable=no-member\n) -> Tuple[Sequence[carla.Waypoint], Sequence[Any], float]: # pylint: disable=no-member\n try:\n from agents.navigation.global_route_planner import GlobalRoutePlanner # pylint: disable=import-error\n from agents.navigation.global_route_planner_dao import GlobalRoutePlannerDAO # pylint: disable=import-error\n except ImportError:\n raise ImportError(\n \"Missing CARLA installation, \"\n \"make sure the environment variable CARLA_ROOT is provided \"\n \"and that the PythonAPI is `easy_install`ed\")\n\n # Setup global planner.\n grp_dao = GlobalRoutePlannerDAO(wmap=world.get_map(), sampling_resolution=1)\n grp = GlobalRoutePlanner(grp_dao)\n grp.setup()\n # Generate plan.\n waypoints, roadoptions = zip(*grp.trace_route(origin, destination))\n # Accummulate pairwise distance.\n distances = [0.0]\n for i in range(1, len(waypoints)):\n loc_tm1 = waypoints[i - 1].transform.location\n loc_tm1 = np.asarray([loc_tm1.x, loc_tm1.y, loc_tm1.z])\n loc_t = waypoints[i].transform.location\n loc_t = np.asarray([loc_t.x, loc_t.y, loc_t.z])\n distances.append(np.linalg.norm(loc_tm1 - loc_t))\n\n return waypoints, roadoptions, distances", "def get_opt_plan(task):\n\n\tT = 15.0\n\tweights = 0\n\tif task == TABLE_TASK or task == COFFEE_TASK:\n\t\tweights = 1\n\telif task == LAPTOP_TASK:\n\t\tweights = 10\n\n\t# initialize start/goal based on task \n\tif task == COFFEE_TASK or task == HUMAN_TASK:\n\t\tpick = pick_shelf\n\telse:\n\t\tpick = pick_basic\n\n\tif task == LAPTOP_TASK:\n\t\tplace = place_higher\n\telse:\n\t\tplace = place_lower\n\t\t\n\tstartRad = np.array(pick)*(math.pi/180.0)\n\tgoalRad = np.array(place)*(math.pi/180.0)\n\tstart = startRad\n\tgoal = goalRad\n\n\tplan = Planner(task)\t\n\tplan.replan(start, goal, weights, 0.0, T, 0.1)\n\n\tplan.kill_planner()\n\treturn plan", "def closest(start, incoming_angle, timeleft):\n visited = set()\n frontier = [ (0, 0, 0, incoming_angle, start) ]\n distances = {}\n while frontier:\n (cost, difficulty, count, in_angle, n) = heappop(frontier)\n if n in visited:\n continue\n distances[n] = cost\n if cost > timeleft:\n # cannot reach a non visited edge on time\n return None\n edges = sorted(n.edges, key=priority(in_angle))\n for edge in edges:\n if cost + edge.cost <= timeleft:\n # we can take this edge\n if edge.distance > 0:\n return compute_path(distances, cost, start, n) + [ edge ]\n else:\n if edge.stop not in visited:\n difficulty = max(e2.difficulty for e2 in edge.stop.edges)\n candidate = (cost + edge.cost, difficulty, count + edge.visits, edge.angle, edge.stop)\n # print candidate\n heappush(frontier, candidate)\n visited.add(n)\n return None", "def local_optimum(self, tour: Tour):\n better = tour\n cnt = 0\n while better is not None:\n # if cnt % 20 == 0:\n print(\"---------------------------------------------\\n The \", cnt, \"time.\")\n print(\"Improved tour:\", list(better.iter_vertices()))\n print(\"Improved cost:\", self.tour_cost(better))\n cnt += 1\n tour = better\n better = self.improve(tour)\n return tour", "def closest_on_screen_point_optim(trajectory, viewpoint, yaw, gaze_on_screen):\n \n traj_angles = dp.world_to_angles_through_screen(trajectory, viewpoint, yaw) \n \n #pprint(traj_angles)\n\n dist, idx = closest_node_tree(traj_angles, gaze_on_screen)\n ml_screen_ref = traj_angles[idx] \n\n return(idx, ml_screen_ref)", "def test_find_closest_waypoints_no_position(self):\n planner = WaypointPlanner(make_example_base_waypoints())\n self.assertIsNone(planner.find_closest_waypoints(1))", "def heuristic(current, goal):\r\n distance = getDistance(current, goal)\r\n return distance", "def guess_anchor(self):\n return self.nearest(\"VP\")", "def closest_point(self, point, start_param=None, Ns=25):\n x, z = self.rotate_to_xz_plane(point)\n la = self._closest_point(x, z, start_param, Ns)\n return la", "def next_gps(self):\n \n return Waypoint(0.0, 0.0)", "def travelling_salesman(points, start=None):\n if start is None:\n start = points[0]\n return min([perm for perm in permutations(points) if perm[0] == start], key=total_distance)", "def localMin0(R, L, W):\n fo = costFunction(R, W)\n vacantL = vacantPoint(L)\n beta = None\n q = None\n\n while True:\n fmin = fo\n\n for alpha in range(0, len(R)):\n for p in range(0, len(vacantL)):\n TxpR = transpositionMatrix(R, vacantL, alpha, p)\n ftrial = costFunction(TxpR, W)\n if ftrial < fmin:\n fmin = ftrial\n beta = alpha\n q = p\n\n if (beta != None) and (q != None):\n TaqR = transpositionMatrix(R, vacantL, beta, q)\n vacantL[q] = R[beta].copy()\n R = TaqR.copy()\n beta = None\n q = None\n\n if fmin <= fo:\n return fmin, R", "def heuristic(current, goal):\r\n # First tried manhattan distance but wasn't good enough so did direct distance which makes sense since the robot came move diagonally \r\n #return abs(current[0]-goal[0])+abs(current[1]-goal[1])\r\n return math.sqrt((current[0]-goal[0])**2+(current[1]-goal[1])**2)", "def plan(self, y0, y1, precision, min_visibility): # @UnusedVariable\n return PlanningResult(success=False, plan=None, status='Not implemented')", "def predict_plan(self, time_step: TimeStep, state: PlannerState,\n epsilon_greedy):\n pass", "def next_step(self, goal, traps=False): #TODO: test (maybe change to l1 dist?)\n kyu = PriorityQueue()\n kyu.put((0, self.player))\n came_from = {self.player: None}\n costs_agg = {self.player: 0}\n\n while not kyu.empty():\n curr = kyu.get()[1]\n if curr == goal: break\n\n for next in self.valid_neighbors(curr):\n new_cost = costs_agg[curr] + (5 if traps and self.traps[next] else 1)\n if next not in costs_agg.keys() or new_cost < costs_agg[next]:\n costs_agg[next] = new_cost\n kyu.put((new_cost + l2(next, goal), next))\n came_from[next] = curr\n \n if goal in came_from.keys():\n return came_from[goal]\n else:\n raise RuntimeWarning(\"no path between monster and player\")\n return goal", "def spanning(self):\n return self._spanning.get_waarde()", "def closest_on_screen_point(trajectory, viewpoint, yaw, gaze_on_screen):\n\n traj_angles = dp.world_to_angles_through_screen(trajectory, viewpoint, yaw) \n #pprint(traj_angles)\n\n #onscreen_idx, dists, *_ = find_closest_index(traj_angles, gaze_on_screen)\n #idx = closest_node(traj_angles, gaze_on_screen)\n idx = find_closest_index(traj_angles, gaze_on_screen)\n # print(idx)\n\n #traj_ref = trajectory[idx, :]\n screen_ref = traj_angles[idx, :]\n world_ref = trajectory[idx, :]\n\n path_dist = ab_path_length(trajectory, viewpoint, world_ref)\n path_dist /= 8.0 #time headway\n\n #plot_traj(screen_ref, gaze_on_screen, traj_angles)\n\n return(idx, screen_ref, world_ref, path_dist)#, traj_angles)", "def FindClosestPoint(self, ):\n ...", "def opt_settlement(player, board, gains, goal=\"default\"):\n goal_index = goal_list.get(goal, 0)\n vertex_score = lambda t: vertex_eval(player, board, t[0], gains, goal_index)\n vertex_list = [(v, board.get_vertex_location(v)) for v in range(board.max_vertex+1) \\\n if board.if_can_build(\"settlement\", *(board.get_vertex_location(v)))]\n return max(vertex_list, key = vertex_score, default=(None, None))", "def alignment_plan(self):\n F = self.stft_size // 2 + 1\n alignment_plan_lower_start = range(\n self.segment_start + self.segment_shift,\n F - self.segment_width, self.segment_shift\n )\n\n alignment_plan_higher_start = range(\n self.segment_start - self.segment_shift, 0, -self.segment_shift\n )\n\n alignment_plan_start = interleave(\n alignment_plan_lower_start, alignment_plan_higher_start\n )\n\n alignment_plan = [\n [\n self.main_iterations, self.segment_start,\n self.segment_start + self.segment_width\n ]\n ] + [\n [\n self.sub_iterations, s, s + self.segment_width\n ] for s in alignment_plan_start\n ]\n\n alignment_plan[2 * len(alignment_plan_higher_start)][1] = 0\n alignment_plan[-1][2] = F\n return alignment_plan", "def getNearestPassenger(driver, passengers):\n driver = driver.reshape((1, 2))\n assert (driver.shape == (1, 2))\n assert (passengers.shape[1] == 5)\n\n mask = passengers[:, -1] == 1\n tmp = passengers[mask]\n\n dis = np.linalg.norm(tmp[:, 0:2].copy() - driver)\n mini = np.argmin(dis)\n tmp[mini, -1] = 0\n passengers[mask] = tmp\n return tmp[mini, 0:4], passengers, tmp.shape[0] - 1", "def original_solver(state, agent, verbose=3):\n tasks = state.goals[agent]\n if verbose>0: print('** pyhop, verbose={}: **\\n state = {}\\n tasks = {}'.format(verbose, state.__name__, tasks))\n result = seek_plan(state,tasks,[],0,verbose)\n if verbose>0: print('** result =',result,'\\n')\n return result", "def _distance_next(self):\n\n self.distance = 10\n\n # Here a set index to 0 if the car is finishing a lap\n # Also reset the farthest\n if self.index > (len(self.x_trajectory) - 6) and self.closed:\n self.index = 0\n self.farthest = -1\n self.laps += 1\n\n for w in range(self.index, self.index + 20):\n\n self.dist_point = math.sqrt((self.x_trajectory[w] - self.x)**2\n + (self.y_trajectory[w] - self.y)**2)\n\n if self.dist_point < self.distance:\n self.distance = self.dist_point\n self.index = w\n\n if w >= (len(self.x_trajectory) - 1):\n break\n\n self._calc_side()\n\n self.distance = self.distance * self.sign\n\n return self.distance", "def plan(cur_pos: Node, goal_pos: Node, cur_heading: float, new_obst_segments: [ObstacleSegment]) \\\n -> (NavMode, [Node]):\n global d_reach, v_followed, v_diff, d_followed_rel, obst_id_to_follow, bf_waypoint\n # find updated obstacle with obst_id_to_follow\n obst_to_follow = find_obst(obst_id_to_follow, new_obst_segments)\n\n # check if we lost the obstacle\n if obst_to_follow is None:\n logger.info(\"BF: Lost obstacle segment; End of Routine\")\n return NavMode.MTG, None\n else:\n # update BF Waypoint\n bf_waypoint.update(obst_to_follow)\n\n # Calculate d_reach, d_followed, v_diff, d_followed_rel\n v_diff = bf_waypoint.get_pos_change()\n d_reach = goal_pos.dist_2d(bf_waypoint.cur_pos)\n assert v_followed is not None and v_diff is not None\n\n v_followed_rel = v_followed + v_diff\n d_followed_rel = v_followed_rel.dist_2d(goal_pos)\n\n # if d_reach < d_followed_rel\n if d_followed_rel - d_reach > config_.D_TOL:\n # switch to MTG\n logger.info(\"BF: path length decreased by %.2f below original minimum; End of Routine\"\n % (d_followed_rel - d_reach))\n return NavMode.MTG, None\n else:\n # choose new BF waypoint\n bf_waypoint = find_new_bf_waypoint(obst_to_follow, cur_pos, None)\n if bf_waypoint is None:\n return NavMode.MTG, None\n\n # generate path to bfWaypoint\n path = [bf_waypoint.cur_pos, goal_pos]\n\n # update v_followed\n v_followed = v_followed_rel\n\n return NavMode.BF, path", "def plan(self):\n\t\topt_r, opt_h = self.optimizer.maximize(bounds=self.bounds) # 1-D\n\t\tplan_r, plan_h = opt_r, opt_h\n\t\t# Update optimal control in trajectories\n\t\tself.traj_r.u = plan_r # numpy robot plan\n\t\tself.traj_r.u_th = plan_r # Theano robot plan\n\t\tself.traj_h.u = plan_h # numpy human plan\n\t\tself.traj_h.u_th = plan_h # Theano human plan\n\t\treturn self.traj_r.u # return plan in shape of self.traj.u" ]
[ "0.5957022", "0.59043604", "0.5696798", "0.55463475", "0.5463718", "0.5410066", "0.5407258", "0.5370748", "0.5359951", "0.52572113", "0.521189", "0.52114105", "0.5210574", "0.520301", "0.5192764", "0.5162559", "0.5125558", "0.50986946", "0.5083363", "0.50627804", "0.5048645", "0.50464016", "0.502353", "0.50185215", "0.5014771", "0.50138694", "0.500071", "0.49947482", "0.49907956", "0.49859825" ]
0.6283821
0
Reset local planner variables
def reset(self): self.robot_path_ind = 0 self.goal_path_ind = None self.global_plan = Path()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reset():\n\n global optimizer_data\n global optimizer_len\n\n optimizer_data = []\n optimizer_len = 0\n return", "def cleanup_and_reset(self):\n self.mem.set(self.mem.META_PLAN, None)\n self.mem.set(self.mem.META_GOALS, None)\n self.mem.set(self.mem.META_CURR_GOAL, None)", "def reset(self):\n self.solver = None", "def reset():", "def reset():", "def reset():", "def __reset_variables(self):\r\n self.__running = True", "def __resetLocal__(self,featureVals):\n self.amITrained = False\n self._amplitudes = {}\n self._eigs = {}\n self._modes = {}\n self.__Atilde = {}\n self.pivotValues = None\n self.KDTreeFinder = None\n self.featureVals = None", "def reset(self):\n self.reserve.reset()\n self.revenue.reset()\n self.transfers.reset()\n self.missions.reset()\n self.debt.reset()\n self.genfund.reset()\n self.macro.reset(self.pop[self.start_yr],self.eco_first)\n self.summary = pd.DataFrame(index=self.names,columns=[t for t in range(self.start_yr,self.stop_yr)])\n self.year = self.start_yr", "def reset(self) -> None:\n self.statistics = defaultdict(self._mp_hack)\n self._is_ddp = get_rank() > -1", "def _reset(self) -> None:", "def _reset(self) -> None:", "def _reset(self):", "def reset(self):\n self.jobs: Dict[int, JobRow] = {}\n self.events: Dict[int, List[EventRow]] = {}\n self.job_counter = count(1)\n self.queries: List[Tuple[str, Dict[str, Any]]] = []\n self.notify_event = None\n self.notify_channels = []\n self.periodic_defers: Dict[str, int] = {}\n self.table_exists = True", "def _reset(self):\n self._model._reset()\n super(RDPAnalyzer, self)._reset()", "def _reset(self):\n pass", "def reset(self):\n self._varstate = None\n self.frozen = False", "def reset():\r\n pass", "def reset(self):\n self.algo_state = {}\n self.actual_repetitions = 0\n self.next_session = -1\n self.last_session = -1\n self.past_quality = []", "def _reset(lp):\n if hasattr(lp, \"solverModel\"):\n delattr(lp, \"solverModel\")\n for v in lp.variables():\n if hasattr(v, \"_xprs\"):\n delattr(v, \"_xprs\")\n for c in lp.constraints.values():\n if hasattr(c, \"_xprs\"):\n delattr(c, \"_xprs\")", "def reset(self):\n self.edges = None\n self.chi = None\n self.k = None\n self.n_bins = None\n self.classes = None\n self.n_params = None", "def reset(self):", "def reset(self):", "def reset(self):", "def reset(self):", "def reset(self):\n for var in self.var_list:\n var.value = None\n var.domain = copy.deepcopy(var.init_domain)", "def reset(self):\n\n self.cost = {} # record cost value at each iteration\n self.cost_change = {} # record the change of cost items\n self.prim_var = {} # record primal variable values for each iteration\n self.prim_var_change = {} # record the change of primal variable between two consective iterations\n self.dual_var = {} # record dual variable values for each iteration\n self.dual_var_change = {} # record the change of dual variable between any two consective iterations\n self.fea_conditions = {} # record the satisfication of feasiblity conditions at each iteration", "def reset(self):\n \n pass", "def reset(self):\n ...", "def reset(self):\n ..." ]
[ "0.73573047", "0.72122705", "0.7178374", "0.6857298", "0.6857298", "0.6857298", "0.68345904", "0.67704684", "0.6718428", "0.66984993", "0.669352", "0.669352", "0.66802955", "0.6661716", "0.6655433", "0.66490394", "0.6638487", "0.66238254", "0.6584989", "0.6580846", "0.6580699", "0.65769684", "0.65769684", "0.65769684", "0.65769684", "0.65640634", "0.6562452", "0.65552634", "0.6544296", "0.6544296" ]
0.7329392
1
Calculate nearest index from local plan
def calc_nearest_ind(self, robot_pose): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def index(self) -> None:\n self._nearest_point = kd.Tree(self._points).nearest_point", "def nearest_sparse(self, query):\n self.best_dist = float(\"inf\")\n self.best_element = None\n self._register_best_element = self._register_best_element_single \n self._nearest_sparse_recursive(self._sparse2seq(query), self.root, 0.0)\n return self.best_element,self.best_dist", "def nearest(self, query):\n nearest_trees = list(map(lambda t: t.get_nearest_neighbor(query), self.trees))\n distances_pool = list(zip(map(lambda x: self.dist_fn(x, query), self.pool), self.pool))\n best = None\n best_cost = np.inf\n for cost, near in nearest_trees + distances_pool:\n if cost <= best_cost:\n best = near\n best_cost = cost\n return best", "def get_nearest(self, lat, long):\n dist = 100000\n index = 0\n j = 0\n for i in self.dcs:\n new_dist = ZipCodesData.distance_on_sphere(lat, long, i.lat, i.long)\n if new_dist < dist:\n dist = new_dist\n index = j\n j += 1\n return index", "def calc_nearest_index(self, cx, cy, cyaw, pind):\n dx = [self.x - icx for icx in cx[pind:(pind + N_IND_SEARCH)]]\n dy = [self.y - icy for icy in cy[pind:(pind + N_IND_SEARCH)]]\n\n d = [idx ** 2 + idy ** 2 for (idx, idy) in zip(dx, dy)]\n\n mind = min(d)\n\n ind = d.index(mind) + pind\n\n mind = np.sqrt(mind)\n\n dxl = cx[ind] - self.x\n dyl = cy[ind] - self.y\n\n angle = pi_2_pi(cyaw[ind] - math.atan2(dyl, dxl))\n if angle < 0:\n mind *= -1\n\n return ind, mind", "def build_index(dataset, n_neighbors):\n# Initialize FLANN\n pyflann.set_distance_type(distance_type='euclidean')\n flann = pyflann.FLANN()\n params = flann.build_index(dataset,algorithm='kdtree',trees=4)\n #print params\n nearest_neighbors, dists = flann.nn_index(dataset, n_neighbors, checks=params['checks'])\n return nearest_neighbors, dists", "def lsh_search(self,query_index, num_neighbors = 10):\r\n def l1(u,v):\r\n return dt.norm(np.array(u)-np.array(v), ord=1)\r\n \r\n start_time = time.time()\r\n #print(start_time)\r\n buckets = self.get_candidates(query_index)\r\n distance1 = buckets.map(lambda p : p + (l1(p[0],query_index[0]),))\r\n distance_sort = distance1.map(lambda y : (y[3],y[1]))\r\n distance_sorted = distance_sort.sortByKey()\r\n lsh_End_time = time.time()- start_time\r\n return (distance_sorted.take(num_neighbors),lsh_End_time)\r\n raise NotImplementedError", "def find_nearest(ref_array,target_array):\n ref_tree = scipy.spatial.cKDTree(ref_array)\n dist, indices = ref_tree.query(target_array, k=1)\n return indices", "def find_closest_index(traj, point):\n\n\t#TODO: vectorise function to receive any length of points.\n\n\tdistances = np.subtract(np.array(point),traj) \n\tdistances = distances.reshape(-1,2)\n\t#distances = distances[~np.isnan(distances)].reshape(-1,2)\n\n\t#print(\"distances\")\n\t#pprint(distances)\n\tdist_array = np.linalg.norm(distances, axis = 1)\n\t#pprint(dist_array)\n\t#dist_array = np.sqrt((distances[:,0]**2)+(distances[:,1]**2)) #array of distances from trajectory to gaze landing point in world. \n\tidx = np.nanargmin(abs(dist_array)) #find smallest difference in pythag distance from 0,0 to get closest point. \n\tdists = distances[idx, :]\n\tdist = dist_array[idx]\n\n\treturn idx#, dists, dist\n\t#return idx", "def nearest_vertex_to(self, point):\n distances = self.distances_to(point)\n idx = np.argmin(distances)\n return idx", "def get_nearest(src_points, candidates, k_neighbors=1):\n\n # Create tree from the candidate points\n tree = BallTree(candidates, leaf_size=15, metric='haversine')\n distances, indices = tree.query(src_points, k=k_neighbors)\n\n # Transpose to get distances and indices into arrays\n distances = distances.transpose()\n indices = indices.transpose()\n\n # Get closest indices and distances (i.e. array at index 0)\n # note: for the second closest points, you would take index 1, etc.\n closest = indices[0]\n closest_dist = distances[0]\n\n # Return indices and distances\n return closest, closest_dist", "def find_closest_pt(ref_lon, ref_lat, tlon, tlat):\n\n # compute great circle distance from location to model grid points\n dist = gc_dist(ref_lon, ref_lat, tlon, tlat)\n\n # find j index of closest grid point\n work = N.take(dist,N.argmin(dist,0),0).diagonal()\n jj = N.argsort(work)[0]\n\n # find i index of closest grid point\n work = N.take(dist,N.argmin(dist,1),1).diagonal()\n ii = N.argsort(work)[0]\n\n return ii, jj", "def getNearestSampleIndex(test, trainX):\n dist_matrix = test - trainX\n dist_square = dist_matrix ** 2\n dist_sums = dist_square.sum(axis=1)\n distance_vector = np.sqrt(dist_sums)\n return (distance_vector).argmin()", "def _nearest(arrlist_1, arrlist_2):\n tree = KDTree(arrlist_1);\n pts = tree.query(arrlist_2)\n\n return tree.data[pts[1][pts[0].argmin()]]", "def get_closest_waypoint_idx(self):\n\n # TODO:\n # The churchlot waypoints are roughly circular but have self-\n # intersecting endpoints, so I'm not sure how this code will \n # yield good results. Might need some additional filtering\n # logic to force a choice consistent with the vehicle pose yaw\n # in order to avoid jumping onto the wrong path.\n\n # Vehicle position short reference\n pos = self.pose.pose.position\n\n # Find the closest waypoint index\n # If closest index is zero bump to 1 since we don't want slice for \n # prev_coord to look at the final map waypoint.\n closest_idx = max(self.waypoint_tree.query([pos.x, pos.y], 1)[1], 1)\n\n # Get closest point\n closest_coord = self.waypoints_2d[closest_idx]\n prev_coord = self.waypoints_2d[closest_idx-1]\n\n # Convert coordinates into 2D numpy vectors\n closest_vec = np.array(closest_coord)\n prev_vec = np.array(prev_coord)\n pos_vec = np.array([pos.x, pos.y])\n\n # Find vec(close-prev) dot vec(pos-close) \n val = np.dot(closest_vec - prev_vec, pos_vec - closest_vec)\n\n # If pos is ahead of closest...\n if val > 0: \n\n # Advance index so that closest is ahead of pos\n closest_idx = (closest_idx + 1) % len(self.waypoints_2d)\n\n # Return closest index\n return closest_idx", "def __get_closest_waypoint_index(self, x, y):\n return self.__waypoint_tree.query([x, y], 1)[1]", "def _nearest_point_index(points, point):\n distance = sys.float_info.max\n index = None\n for i, p in enumerate(points):\n temp = _vec_distance(p, point)\n if temp < distance:\n distance = temp\n index = i\n return index, distance", "def nearest_point_index(self, point):\n return _nearest_point_index(self._points, point)", "def closest_node(self, where, cartesian=False, threshold=None, vincenty=False, haversine=False):\n\n if not vincenty or not haversine:\n if cartesian:\n x, y = self.grid.x, self.grid.y\n else:\n x, y = self.grid.lon, self.grid.lat\n dist = np.sqrt((x - where[0])**2 + (y - where[1])**2)\n elif vincenty:\n grid_pts = np.asarray([self.grid.lon, self.grid.lat]).T\n where_pt_rep = np.tile(np.asarray(where), (len(self.grid.lon),1))\n dist = np.asarray([vincenty_distance(pt_1, pt_2) for pt_1, pt_2 in zip(grid_pts, where_pt_rep)])*1000\n elif haversine:\n grid_pts = np.asarray([self.grid.lon, self.grid.lat]).T\n where_pt_rep = np.tile(np.asarray(where), (len(self.grid.lon),1))\n dist = np.asarray([haversine_distance(pt_1, pt_2) for pt_1, pt_2 in zip(grid_pts, where_pt_rep)])*1000\n index = np.argmin(dist)\n if threshold:\n if dist.min() < threshold:\n index = np.argmin(dist)\n else:\n index = None\n\n return index", "def find_nearest_neighbors_idx(X, x, k):\n ## homework:start\n result = \n ## homework:end\n return result", "def get_local_plan(self, ind):\n size = len(self.global_plan.poses)\n if ind < 0 or ind >= size:\n raise ValueError(\"ind must be between 0 and %d\"%size)\n \n start = self.global_plan.poses[ind].pose\n local_path = Path()\n found_ind = None\n for i in range(ind, size):\n candidate = self.global_plan.poses[i].pose\n dist = self.calc_distance(start, candidate)\n if dist >= self.look_ahead_distance:\n break\n else:\n local_path.poses.append(candidate)\n found_ind = i\n\n return found_ind, local_path", "def get_nearest(self, vector, limit):\n raise NotImplementedError", "def nearest(self, value):\n coords = value[:2] # value only has 2 coords (x, y) right now, but it may have theta in the future\n hits = self.idx.nearest(self.make_bounding_box(coords), 1, objects=False)\n for hit in hits:\n # take the first index in the event of any ties\n return self.nodes[hit]\n \n \n \n #assert that value is valid here\n \"\"\"def recur(node, depth=0):\n closest, distance = node, self.cost(node.value, value)\n if depth < self.max_size:\n for child in node.children:\n (child_closest, child_distance) = recur(child, depth+1)\n if child_distance < distance:\n closest = child_closest\n distance = child_distance \n return closest, distance\n return recur(self.root)[0]\"\"\"", "def get_closest_waypoint_idx(self):\n\n # Position\n x = self.car_pose.pose.position.x\n y = self.car_pose.pose.position.y\n closest_idx = self.waypoint_tree.query([x, y], 1)[1]\n\n # Coordinates\n closest_coord = self.waypoints_2d[closest_idx]\n prev_coord = self.waypoints_2d[closest_idx - 1]\n\n # Hyper Plane\n cl_vect = np.array(closest_coord)\n prev_vect = np.array(prev_coord)\n pos_vect = np.array([x, y])\n\n val = np.dot(cl_vect - prev_vect, pos_vect - cl_vect)\n\n if val > 0:\n closest_idx = (closest_idx + 1) % len(self.waypoints_2d)\n\n return closest_idx", "def __find_another_nearest_medoid(self, point_index, current_medoid_index):\n other_medoid_index = -1\n other_distance_nearest = float(\"inf\")\n for index_medoid in self.__current:\n if index_medoid != current_medoid_index:\n other_distance_candidate = euclidean_distance_square(\n self.__pointer_data[point_index],\n self.__pointer_data[current_medoid_index],\n )\n\n if other_distance_candidate < other_distance_nearest:\n other_distance_nearest = other_distance_candidate\n other_medoid_index = index_medoid\n\n return other_medoid_index", "def get_main_points(neuron):\n (branch_index,) = np.where(neuron.branch_order[neuron.n_soma:] == 2)\n (endpoint_index,) = np.where(neuron.branch_order[neuron.n_soma:] == 0)\n selected_index = np.union1d(branch_index + neuron.n_soma,\n endpoint_index + neuron.n_soma)\n selected_index = np.append(range(neuron.n_soma), selected_index)\n return selected_index", "def __find_another_nearest_medoid(self, point_index, current_medoid_index):\r\n other_medoid_index = -1\r\n other_distance_nearest = float('inf')\r\n for index_medoid in self.__current:\r\n if (index_medoid != current_medoid_index):\r\n other_distance_candidate = euclidean_distance_square(self.__pointer_data[point_index], self.__pointer_data[current_medoid_index])\r\n\r\n if other_distance_candidate < other_distance_nearest:\r\n other_distance_nearest = other_distance_candidate\r\n other_medoid_index = index_medoid\r\n\r\n return other_medoid_index", "def find_stn_idx(ref_lon, ref_lat, tlon, tlat):\n\n # compute great circle distance from location to model grid points\n dist = gc_dist(ref_lon, ref_lat, tlon, tlat)\n\n # find the indices of the two closest grid points with distinct longitudes\n work = N.take(dist,N.argmin(dist,0),0).diagonal()\n Jlist = N.argsort(work)[:2]\n del work\n\n # find the indices of the two closest grid points with distinct latitudes\n work = N.take(dist,N.argmin(dist,1),1).diagonal()\n Ilist = N.argsort(work)[:2]\n del work\n\n return Ilist, Jlist", "def _find_nearest_grid(self,lon,lat,period):\n\t\tgroup = self['%g_sec'%( period )]\n\t\tlonArr = group['lonArr'].value\n\t\tlatArr = group['latArr'].value\n\t\tdiff_Arr = np.dstack((lonArr, latArr)) - np.array([lon, lat]) # 3-d array ( , ,2)\n\t\tdiff_Arr[:,:,0] = diff_Arr[:,:,0] * np.cos(lat/180.*np.pi)\n\t\tdist_sq = np.sum(diff_Arr**2,axis=-1)\n\t\tind1, ind2 = np.where(dist_sq == np.min(dist_sq))\n\t\treturn ind1[0], ind2[0]", "def getclosest_ij(lats,lons,latpt,lonpt):\n dist_lat = (lats-latpt)**2 # find squared distance of every point on grid\n dist_lon = (lons-lonpt)**2\n minindex_lat = dist_lat.argmin() # 1D index of minimum dist_sq element\n minindex_lon = dist_lon.argmin()\n return minindex_lat, minindex_lon # Get 2D index for latvals and lonvals arrays from 1D index" ]
[ "0.666966", "0.6518415", "0.6329833", "0.62365556", "0.6211519", "0.6202459", "0.61500686", "0.6134303", "0.6106491", "0.6095605", "0.6073815", "0.60622567", "0.60303104", "0.59884435", "0.5978137", "0.59692085", "0.59623927", "0.595972", "0.5958901", "0.59389544", "0.5936738", "0.59261644", "0.5925661", "0.59158015", "0.5913211", "0.59029895", "0.58974063", "0.58952326", "0.5867427", "0.5858136" ]
0.6538584
1
Initialise the HyperStream class. This starts the logger, loads the config files, connects to the main mongodb, and initialises the managers (channels, plates, workflows).
def __init__(self, loglevel=logging.INFO, file_logger=True, console_logger=True, mqtt_logger=None, config_filename="hyperstream_config.json"): self.config_filename = config_filename self._session = None self.parameters = dict( loglevel=loglevel, file_logger=file_logger, console_logger=console_logger, mqtt_logger=mqtt_logger ) self.logger = HyperStreamLogger( default_loglevel=loglevel, file_logger=file_logger, console_logger=console_logger, mqtt_logger=mqtt_logger) self.config = HyperStreamConfig(filename=config_filename) self.client = Client(self.config.mongo) # Define some managers self.channel_manager = ChannelManager(self.config.plugins) self.plate_manager = PlateManager() self.workflow_manager = WorkflowManager(channel_manager=self.channel_manager, plate_manager=self.plate_manager) self.plugins = PluginContainer() # The following are to keep pep happy - will be populated below self.tools = None self.factors = None self.current_workflow = None # Used in the new API - the current workflow being defined self.populate_tools_and_factors()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initialize( self, logger, loop, netconf_ip, netconf_port, statistics,\n xml_to_json_translator):\n self.init_stream_handler(logger, loop, \n netconf_ip, netconf_port, statistics, xml_to_json_translator)", "def initialize(self, logger, loop, netconf_ip, netconf_port, statistics,\n xml_to_json_translator):\n self.init_stream_handler(logger, loop, \n netconf_ip, netconf_port, statistics, xml_to_json_translator)", "def _startup(self):\n self._logger.debug(\"About to start up plugin %s\", self.unique_name)\n\n if not self._ez_client.can_connect():\n raise RestConnectionError(\"Cannot connect to the Beer-garden server\")\n\n # If namespace couldn't be determined at init try one more time\n if not self._legacy and not self._config.namespace:\n self._setup_namespace()\n\n self._system = self._initialize_system()\n self._instance = self._initialize_instance()\n\n if self._config.working_directory is None:\n app_parts = [self._system.name, self._instance.name]\n if self._system.namespace:\n app_parts.insert(0, self._system.namespace)\n\n self._config.working_directory = appdirs.user_data_dir(\n appname=os.path.join(*app_parts), version=self._system.version\n )\n\n workdir = Path(self._config.working_directory)\n if not workdir.exists():\n workdir.mkdir(parents=True)\n\n self._logger.debug(\"Initializing and starting processors\")\n self._admin_processor, self._request_processor = self._initialize_processors()\n self._admin_processor.startup()\n self._request_processor.startup()\n\n self._logger.debug(\"Setting signal handlers\")\n self._set_signal_handlers()", "def __init__(self):\n self.__default_config = ConfigParams.from_tuples(\n 'options.max_pool_size', 2,\n 'options.connect_timeout', 5000,\n 'options.auto_reconnect', True,\n 'options.max_page_size', 100,\n 'options.debug', True\n )\n\n # The logger\n self._logger: CompositeLogger = CompositeLogger()\n # The connection resolver\n self._connection_resolver: MongoDbConnectionResolver = MongoDbConnectionResolver()\n # The configuration options.\n self._options: ConfigParams = ConfigParams()\n # The MongoDB connection object.\n self._connection: pymongo.MongoClient = None\n # The MongoDB database name.\n self._database_name: str = None\n # The MongoDb database object.\n self._db: database.Database = None", "def __init__(self, config_path, setup_celery=True):\n _log.info(\"GNU MediaGoblin %s main server starting\", __version__)\n _log.debug(\"Using config file %s\", config_path)\n ##############\n # Setup config\n ##############\n\n # Open and setup the config\n global_config, app_config = setup_global_and_app_config(config_path)\n\n setup_crypto()\n\n ##########################################\n # Setup other connections / useful objects\n ##########################################\n\n # Setup Session Manager, not needed in celery\n self.session_manager = session.SessionManager()\n\n # load all available locales\n setup_locales()\n\n # Set up plugins -- need to do this early so that plugins can\n # affect startup.\n _log.info(\"Setting up plugins.\")\n setup_plugins()\n\n # Set up the database\n self.db = setup_database()\n\n # Register themes\n self.theme_registry, self.current_theme = register_themes(app_config)\n\n # Get the template environment\n self.template_loader = get_jinja_loader(\n app_config.get('local_templates'),\n self.current_theme,\n PluginManager().get_template_paths()\n )\n\n # Set up storage systems\n self.public_store, self.queue_store = setup_storage()\n\n # set up routing\n self.url_map = get_url_map()\n\n # set up staticdirector tool\n self.staticdirector = get_staticdirector(app_config)\n\n # Setup celery, if appropriate\n if setup_celery and not app_config.get('celery_setup_elsewhere'):\n if os.environ.get('CELERY_ALWAYS_EAGER', 'false').lower() == 'true':\n setup_celery_from_config(\n app_config, global_config,\n force_celery_always_eager=True)\n else:\n setup_celery_from_config(app_config, global_config)\n\n #######################################################\n # Insert appropriate things into mediagoblin.mg_globals\n #\n # certain properties need to be accessed globally eg from\n # validators, etc, which might not access to the request\n # object.\n #######################################################\n\n setup_globals(app=self)\n\n # Workbench *currently* only used by celery, so this only\n # matters in always eager mode :)\n setup_workbench()\n\n # instantiate application meddleware\n self.meddleware = [common.import_component(m)(self)\n for m in meddleware.ENABLED_MEDDLEWARE]", "def __init__(self):\n super(GithubCollector, self).__init__()\n config_file = ('collectors.cfg')\n log_file = self.config['Github']['log_file']\n logging.config.fileConfig(config_file,\n defaults={'GithubCollector': log_file}\n )\n self.logger = logging.getLogger('GithubCollector')\n self.elasticsearch = Elasticsearch(['localhost:9200'])\n self.redis = redis.Redis(host='127.0.0.1', port=6379, password='')\n self.timestamp = datetime.date.today().isoformat()", "def __init__(self):\n self.import_config()\n # Set up configuration\n\n # Register exit handler\n atexit.register(self.exit_handler)\n\n # Set up logging\n self.logger = logging.getLogger('unisonctrl')\n self.logger.setLevel(logging.INFO)\n\n # Set up main log file logging\n logFileFormatter = logging.Formatter(\n fmt='[%(asctime)-s] %(levelname)-9s : %(message)s',\n datefmt='%m/%d/%Y %I:%M:%S %p'\n )\n\n # Size based log rotation\n if (self.config['rotate_logs'] == \"size\"):\n logfileHandler = logging.handlers.RotatingFileHandler(\n self.config['unisonctrl_log_dir'] + os.sep + 'unisonctrl.log',\n # maxBytes=50000000, # 50mb\n maxBytes=5000, # 50mb\n backupCount=20\n )\n\n # Timed log rotation\n elif (self.config['rotate_logs'] == \"time\"):\n logfileHandler = logging.handlers.TimedRotatingFileHandler(\n self.config['unisonctrl_log_dir'] + os.sep + 'unisonctrl.log',\n when=\"midnight\",\n backupCount=14, # Keep past 14 days\n )\n\n # No log rotation\n elif (self.config['rotate_logs'] == \"off\"):\n logfileHandler = logging.FileHandler()\n\n else:\n logfileHandler = logging.FileHandler()\n\n logfileHandler.setLevel(logging.DEBUG)\n logfileHandler.setFormatter(logFileFormatter)\n self.logger.addHandler(logfileHandler)\n\n # Send logs to console when running\n consoleFormatter = logging.Formatter('[%(asctime)-22s] %(levelname)s : %(message)s')\n consoleHandler = logging.StreamHandler()\n consoleHandler.setLevel(logging.INFO)\n consoleHandler.setFormatter(consoleFormatter)\n self.logger.addHandler(consoleHandler)\n\n # Disabling debugging on the storage layer, it's no longer needed\n self.data_storage = DataStorage(False, self.config)\n\n self.logger.info(\"UnisonCTRL Starting\")\n\n # Clean up dead processes to ensure data files are in an expected state\n self.cleanup_dead_processes()", "def __init__(self):\n # instantiate logger\n self.log = logging.getLogger('blog')\n # open sqlite db\n db_path=application.config.get('sqlite.path', './db.sqlite')\n self.conn = sqlite3.connect(db_path)\n self.cursor = self.conn.cursor()\n self._create_schema()", "def __init__(self):\n config = self.read_config()\n self.deployment = config['deployment']\n self.deployment_config = config[self.deployment]\n logger.info(f'Initializing storage client with the {self.deployment} deployment config {pformat(self.deployment_config)}')\n\n # get the MLOS config from the user else default it from the deployment config file\n # self.mlos_config = config['MLOS']\n # logger.info(f'Initializing storage client with the MLOS config {pformat(self.mlos_config)}')\n\n # setup the mount path\n if self.deployment == \"LOCAL\":\n self.mount_dir = self.setup_mount()\n logger.info(f'Mount directory setup completed: {self.mount_dir}')", "def __init__(self):\n\n #initiate logging\n file_name = os.path.splitext(sys.argv[0])\n tc_name = file_name[0].split('/')[-1]\n log_name = os.path.join(config.LOG_DIR, ''.join([tc_name, '.log']))\n log.init(log_name)\n self.logging = logging.getLogger('objects')", "def __init__(self, cfg_mongo):\n # Connect to mongodb\n self.logger = logging.getLogger(\"DB\")\n self.cfg_mongo = cfg_mongo\n\n # Parse location for binary data storage\n assert cfg_mongo[\"datastore\"] in [\"gridfs\", \"numpy\", \"adios2\"]\n self.datastore = cfg_mongo[\"datastore\"]", "def init(self):\r\n self._parse_options(self._force_args)\r\n self._maybe_daemonize()\r\n self._setup_modules()\r\n self._state = self.INITIALIZED", "def __init__(self):\n # Read configuration into dictionary\n self.directories = general.config_directories()\n self.config = general.read_yaml_files(self.directories)", "def __init__(self, *args, **kwargs):\n self.__is_connected__ = False\n self.logger = kwargs.get('logger',None)\n if ( self.logger is None ):\n # Get an instance of a logger\n console = logging.StreamHandler()\n formatter = logging.Formatter('%(asctime)s: %(levelname)-8s %(message)s',\"%Y-%m-%d %H:%M:%S\")\n console.setFormatter(formatter)\n logging.getLogger('').addHandler(console)\n self.logger = logging.getLogger('')\n self.logger.setLevel(logging.INFO)\n # initial log entry\n self.logger.debug(\"%s: %s version [%s]\" % (self.__class__.__name__, inspect.getfile(inspect.currentframe()),__version__))\n # initialize variables - so all are listed here for convenience\n self.dict_config = {} # dictionary, see cdh_manager.cfg example\n self.__cm_cdh__ = None\n self.__boto_ec2__ = None\n self.data = DataObjectSample(logger=self.logger)", "def __init__(self):\n\n self.db = ImageDB()\n self.vitess = VitessConn()\n self.minio = MinioConn()", "def __init__(self):\n\t\tself._logger = None\n\t\tself._instanciate_logger()\n\t\tself._video_manager = VideoManager(self, self._logger)\n\t\tself._video_thread = None\n\t\tself._audio_manager = AudioManager(self, self._logger)\n\t\tself._audio_thread = None\n\t\tself._input_thread = None\n\t\tself._trigger_manager = None\n\t\tself.is_running = False", "async def init(self):\n self.init_connection_params()\n self._pool = await self._create_pool()\n\n return self", "async def setup(self):\n\t\tlogging.config.dictConfig(self.log_settings['log'])\n\t\tself.logger = logging.getLogger('Responder3')\n\t\tself.create_dir_strucutre()\n\n\t\tif 'handlers' in self.log_settings:\n\t\t\tasync for handlerclass, handler in self.get_handlers():\n\t\t\t\tawait self.start_extension(handlerclass, self.log_settings[self.log_settings['handlers'][handler]])", "def __init__(self):\n INSTALL_DIR = dirname(__file__)\n CONFIG_DIR = '/etc/Model2WADL/'\n logging.basicConfig(level=logging.ERROR)\n logging.config.fileConfig([join(CONFIG_DIR, 'logging.conf'), expanduser('~/.logging.conf'), 'logging.conf'])\n self.__log = logging.getLogger('thesis')\n\n self.__log.debug(\"Reading general configuration from Model2WADL.cfg\")\n self.__m2wConfig = ConfigParser.SafeConfigParser()\n self.__m2wConfig.read(\n [join(CONFIG_DIR, 'Physical2Virtual.cfg'), expanduser('~/.Physical2Virtual.cfg'), 'Physical2Virtual.cfg'])\n\n self.__baseURI = self.__m2wConfig.get(\"Config\", \"baseURI\")\n self.__basePackage = self.__m2wConfig.get(\"Config\", \"basePackage\")\n self.__schemaFile = self.__m2wConfig.get(\"Config\", \"schemaFile\")\n self.__model = None\n self.__input = None\n self.__output = None", "def initialise(self):\n\n\t\t# Init the sync module\n\t\tSync.init(Conf.get(('redis', 'sync'), {\n\t\t\t\"host\": \"localhost\",\n\t\t\t\"port\": 6379,\n\t\t\t\"db\": 1\n\t\t}))", "def __init__(self):\n self.database = Database()\n self.load_config()", "def init(): \n\tset_verbosity()\n\t_set_threads()\n\t_set_heartbeat()\n\t#_set_storage()\n\t\n\tinit_targets()\n\t\n\tsend_heartbeat(start=True)\n\t\n\tinfo_msg = \"init plugin script\"\n\tlogger.info(info_msg)\n\n\tinit_plugin()\n\n\tinfo_msg = \"loaded %s plugin(s)\" %(len(kb.plugins.handle))\n\tlogger.info(info_msg)", "def _init_component(self):\n setup_info = self._serializer.read_msg()\n\n pid = os.getpid()\n self._serializer.send_msg({'pid': pid})\n self._create_pidfile(setup_info['pidDir'], pid)\n\n return StormConfig(setup_info['conf']), setup_info['context']", "def init(self):\n self.filename, file_extension = os.path.splitext(os.path.basename(__file__))\n\n # parse argument\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--configdir\", help=\"your config.ini directory\", type=str)\n parser.add_argument(\"--logdir\", help=\"your log directory\", type=str)\n args = parser.parse_args()\n\n # determine config directory\n if args.configdir:\n config_file = os.path.join(args.configdir, 'config.ini')\n else:\n config_file = os.path.join(os.path.abspath(os.path.dirname(__file__)), '../config', 'config.ini')\n\n if args.logdir:\n log_file = os.path.join(args.logdir, '%s.log' % self.filename)\n else:\n log_file = os.path.join(os.path.abspath(os.path.dirname(__file__)), '../logs', '%s.log' % self.filename)\n\n # load config\n self.config = configparser.ConfigParser()\n self.config.read(config_file)\n\n # init logger\n logbook.set_datetime_format(\"local\")\n self.logger = logbook.Logger(name=self.filename)\n format_string = '%s %s' % ('[{record.time:%Y-%m-%d %H:%M:%S.%f%z}] {record.level_name}',\n '{record.module}:{record.lineno}: {record.message}')\n if self.config.has_option('handler_stream_handler', 'verbose'):\n log_handler = logbook.StreamHandler(sys.stdout, level=self.config.get('Logger', 'level'), bubble=True,\n format_string=format_string)\n self.logger.handlers.append(log_handler)\n log_handler = logbook.TimedRotatingFileHandler(log_file, level=self.config.get('Logger', 'level'),\n date_format='%Y%m%d', backup_count=5, bubble=True,\n format_string=format_string)\n self.logger.handlers.append(log_handler)\n else:\n log_handler = logbook.TimedRotatingFileHandler(log_file, level=self.config.get('Logger', 'level'),\n date_format='%Y%m%d', backup_count=5, bubble=True,\n format_string=format_string)\n self.logger.handlers.append(log_handler)\n\n # init database\n self.db = AdhocDatabaseHandler.instantiate_from_configparser(self.config, self.logger)", "def __init__(self):\n self.sp, self.user = self.init_auth_client()\n self.logger = logging.getLogger(__name__)", "def __init__(self, config=None, path=None, workdir=None):\r\n\t\tself.connection_logger = logging.getLogger('Connections')\r\n\t\tself.logger = logging.getLogger('Server')\r\n\r\n\t\t# Loading all of the configuration variables\r\n\t\tdatabase_type = string.lower(config.get_index(index='DatabaseType', datatype=str))\r\n\t\tdatabase = config.get_index(index='DatabaseName', datatype=str)\r\n\t\tuser = config.get_index(index='DatabaseUser', datatype=str)\r\n\t\tpassword = config.get_index(index='DatabasePassword', datatype=str)\r\n\t\tself.work_factor = config.get_index(index='WorkFactor', datatype=int)\r\n\t\tdebug = config.get_index(index='Debug', datatype=bool)\r\n\t\tif (database_type == 'sqlite'):\r\n\t\t\tdatabase_location = path + config.get_index(index='TargetDatabase', datatype=str)\r\n\t\telse:\r\n\t\t\tdatabase_location = config.get_index(index='TargetDatabase', datatype=str)\r\n\r\n\t\t# Load server messages\r\n\t\tself.auth_low_argc = config.get_index('AuthLowArgC', str)\r\n\t\tself.auth_invalid_combination = config.get_index('AuthInvalidCombination', str)\r\n\t\tself.auth_connected = config.get_index('AuthConnected', str)\r\n\t\tself.auth_replace_connection = config.get_index('AuthReplaceConnection', str)\r\n\t\tself.auth_connection_replaced = config.get_index('AuthConnectionReplaced', str)\r\n\t\tself.auth_connect_suggestion = config.get_index('AuthConnectSuggestion', str).replace('\\\\n','\\n')\r\n\t\tself.auth_replace_connection_global = config.get_index('AuthReplaceConnectionGlobal', str)\r\n\t\tself.game_client_disconnect = config.get_index('GameClientDisconnect', str)\r\n \t\t\r\n\t\t# Loading welcome/exit messages\r\n\t\twith open(workdir + 'config/welcome_message.txt') as f:\r\n\t\t\tself.welcome_message_data = f.read() + '\\n'\r\n\t\twith open(workdir + 'config/exit_message.txt') as f:\r\n\t\t\tself.exit_message_data = f.read() + '\\n'\r\n\r\n\t\tself.interface = interface.Interface(config=config, workdir=workdir, server=self, debug=debug)\r\n\r\n\t\t# Connect/Create our database is required\r\n\t\tdatabase_exists = True\r\n\t\tif (database_type == 'sqlite'):\r\n\t\t\ttry:\r\n\t\t\t\twith open(database_location) as f: pass\r\n\t\t\texcept IOError as e:\r\n\t\t\t\tself.logger.info('This appears to be your first time running the ScalyMUCK server. We must initialise your database ...')\r\n\t\t\t\tdatabase_exists = False\r\n\r\n\t\t\tdatabase_engine = create_engine('sqlite:///%s' % (database_location), echo=False)\r\n\t\telse:\r\n\t\t\turl = database_type + '://' + user + ':' + password + '@' + database_location + '/' + database\r\n\t\t\ttry:\r\n\t\t\t\tdatabase_engine = create_engine(url, echo=False, pool_size=20, max_overflow=0)\r\n\t\t\texcept OperationalError as e:\r\n\t\t\t\tself.logger.error(str(e))\r\n\t\t\t\tself.logger.error('URL: ' + url)\r\n\t\t\t\tself.is_running = False\r\n\t\t\t\treturn\r\n\r\n\t\tself.world = world.World(engine=database_engine, server=self)\r\n\t\tgame.models.Base.metadata.create_all(database_engine)\r\n\r\n\t\t# Check to see if our root user exists\r\n\t\tif (database_type != 'sqlite'):\r\n\t\t\troot_user = self.world.find_player(name='RaptorJesus')\r\n\t\t\tif (root_user is None):\r\n\t\t\t\tdatabase_exists = False\r\n\r\n\t\tgame.models.server = self\r\n\t\tgame.models.world = self.world\r\n\r\n\t\tif (database_exists is False):\r\n\t\t\troom = self.world.create_room('Portal Room Main')\r\n\t\t\tuser = self.world.create_player(name='RaptorJesus', password='ChangeThisPasswordNowPlox', workfactor=self.work_factor, location=room, admin=True, sadmin=True, owner=True)\r\n\t\t\troom.set_owner(user)\r\n\r\n\t\t\tself.logger.info('The database has been successfully initialised.')\r\n\r\n\t\tself.interface.initialize(config=config, world=self.world, session=self.world.session, engine=database_engine, workdir=workdir, server=self, debug=debug)\r\n\t\r\n\t\tself.telnet_server = TelnetServer(port=config.get_index(index='ServerPort', datatype=int),\r\n\t\t\t\t\t\taddress=config.get_index(index='ServerAddress', datatype=str),\r\n\t\t\t\t\t on_connect = self.on_client_connect,\r\n\t\t\t\t\t on_disconnect = self.on_client_disconnect,\r\n\t\t\t\t\t timeout = 0.05)\r\n\r\n\t\tself.database_status.connect(self.callback_database_status)\r\n\t\r\n\t\tself.logger.info('ScalyMUCK successfully initialised.')\r\n\t\tself.is_running = True", "def init_stream_handler(\n self, \n logger, \n loop, \n netconf_ip, \n netconf_port,\n statistics,\n xml_to_json_translator):\n self._logger = logger\n self._asyncio_loop = loop\n self._encoding = \"xml\"\n self._netconf_ip = netconf_ip\n self._netconf_port = netconf_port\n self._stat = statistics\n self._xml_to_json_translator = xml_to_json_translator", "def __init__(self):\n\n # Read configuration\n config = configparser.ConfigParser()\n current_directory = os.path.dirname(os.path.realpath(__file__))\n config_file = '{0}/../etc/parserd.conf'.format(current_directory)\n config_file = os.path.abspath(config_file)\n config.read(config_file)\n\n config_keys_needed = ['LogDir', 'LogFile', 'RestApiUrl', 'RunInterval',\n 'DbEmail', 'DbPassword']\n\n if 'parserd' not in config:\n raise ParserConfigError\n if not all(key in config['parserd'] for key in config_keys_needed):\n raise ParserConfigError\n\n self.run_interval = int(config['parserd']['RunInterval'])\n\n \n self.db_rest_api = DbRestApi(config['parserd']['RestApiUrl'],\n config['parserd']['DbEmail'],\n config['parserd']['DbPassword'])\n\n # Set up logger\n log_dir = '{0}/{1}'.format(\n current_directory,\n config['parserd']['LogDir'])\n log_dir = os.path.abspath(log_dir)\n log_file = config['parserd']['LogFile']\n full_log_file = '{0}/{1}'.format(log_dir, log_file)\n full_log_file = os.path.abspath(full_log_file)\n\n if not os.path.exists(log_dir):\n os.makedirs(log_dir)\n class_name = type(self).__name__\n self.logger = logging.getLogger(__name__)\n fh = logging.FileHandler(full_log_file)\n formatter_string = ('%(asctime)s - %(levelname)s'\n ' - {0}'\n ' - %(message)s').format(class_name)\n formatter = logging.Formatter(formatter_string)\n fh.setFormatter(formatter)\n self.logger.addHandler(fh)\n self.logger.setLevel(logging.DEBUG)", "def __init__(self, options, positionals):\n\n print \"* Starting up LOPHI Master Process\"\n\n self.COMMANDS = {G.CTRL_CMD_START: self.command_start,\n G.CTRL_CMD_LIST: self.command_list,\n G.CTRL_CMD_PAUSE: self.command_abstract,\n G.CTRL_CMD_UNPAUSE: self.command_abstract,\n G.CTRL_CMD_SPLASH: self.command_splash,\n G.CTRL_CMD_UPDATE_HW: self.command_update_hw,\n G.CTRL_CMD_STOP: self.command_abstract,\n G.CTRL_CMD_DIE: self.command_abstract,\n G.CTRL_CMD_ATTACH: self.command_abstract,\n G.CTRL_CMD_EXECUTE: self.command_abstract}\n\n self.MSG_TYPES = set([G.CTRL_TYPE, G.REG_TYPE])\n\n # response header\n self.RESP_HEADER = \"[LOPHI Master] \"\n\n logger.debug(\"Importing config files...\")\n\n # Save our config file\n self.master_config_file = options.config_file\n\n # Save our config file\n self.analysis_directory = options.analysis_directory\n\n # Read our config into an internal structure \n self.config_list = Configs.import_from_config(self.master_config_file,\n \"controller\")\n\n # Read our analysis scripts into an internal structure\n self.update_analysis()\n\n # Connect to our database\n self.DB_analysis = DB.DatastoreAnalysis(options.services_host)\n\n # Set our RabbitMQ host\n self.amqp_host = options.services_host", "def __init__(self):\n log.msg(\"Initializing Twitch parser.\")\n\n # initialize our data members\n self.streams = tuple()\n self.crc32 = 0" ]
[ "0.67473614", "0.670801", "0.6669442", "0.66132694", "0.6499854", "0.64482814", "0.64375705", "0.6405556", "0.6398037", "0.6321428", "0.6306508", "0.6279179", "0.6277945", "0.6263203", "0.62060374", "0.62049055", "0.61645144", "0.61528885", "0.61439127", "0.6142933", "0.6140264", "0.6139993", "0.6092932", "0.60920125", "0.6089977", "0.60818595", "0.6080144", "0.60702777", "0.6062263", "0.6060465" ]
0.78285134
0
Entry point. Using the "with" syntax starts a new session
def __enter__(self): self.new_session() return self
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def session(self):", "def session(project, engine_sessionmaker, connection):\n _, Session = engine_sessionmaker\n\n try:\n session = Session(bind=connection)\n yield session\n finally:\n session.close()", "def __enter__(self):\r\n if not self._session:\r\n self.restart()\r\n return self", "def _set_session(self):\n self.__session = sessionmaker(bind=self.__engine)()", "def create_session():\n session = Session()\n try:\n yield session\n session.commit()\n except:\n session.rollback()\n raise\n finally:\n session.close()", "async def start_session(self):\n\t\t...", "def open(self):\n self._session = self.session_open()", "def db_session():\n engine = create_my_engine()\n connection = engine.connect()\n db_session = scoped_session(sessionmaker(autocommit=False, autoflush=True, bind=engine))\n yield db_session\n db_session.close()\n connection.close()", "def db_session(request, config):\n from h import db\n engine = db.make_engine(config.registry.settings)\n session = db.Session(bind=engine)\n try:\n yield session\n finally:\n session.close()\n engine.dispose()", "def new_session(self):\n return self.Session()", "def session(get_session):\n return get_session()", "def test_set_session():", "def set_db_session():\n g.s = database.db_session()", "def start_session(db_path):\n engine = create_engine('sqlite:///' + db_path)\n Session = sessionmaker(bind=engine)\n return Session()", "def _new_session(self, m):\n # Create a new session for this model, initialize\n # variables, and save / restore from\n # checkpoint.\n self._session = tf.Session(\n '',\n config=tf.ConfigProto(\n allow_soft_placement=True, log_device_placement=False))\n self.session.run(m.init)\n\n # Load in a previous checkpoint, or save this one\n self.extract_model_spec()\n try:\n yield\n finally:\n tf.Session.reset('')\n self._session = None", "def db_session() -> Generator:\n _session = session()\n try:\n yield _session\n finally:\n _session.close()", "def session():\n def session():\n return BaseUrlSession()\n return session", "def single_threaded_session():\n return make_session(num_cpu=1)", "def load_session(session):\n def inner():\n web.ctx.session = session\n return inner", "async def create_session(self):\n # Creating a session under an async function is recommended\n self.session = aiohttp.ClientSession()", "def session_context(func):\r\n def wrapper(*args, **kwargs):\r\n self = args[0]\r\n with self._create_db_session() as db:\r\n self.db = db\r\n return func(*args, **kwargs)\r\n return wrapper", "def session_scope(config):\n db, Session = setup_db(config)\n with db.connect() as conn:\n session = Session(bind=conn)\n try:\n yield session\n session.commit()\n except: # noqa\n session.rollback()\n raise\n finally:\n session.close()", "def main():\n\n run_manual_session()\n # run_automated_session()", "def init_session(self):\n pass", "def init_session(self):\n pass", "def new_session(self):\n self.command(\"new\")", "def getSession(f):\n def wrapper(*args,**kwargs):\n s = _getSession()\n return f(*[s]+list(args),**kwargs)\n s.close()\n return wrapper", "def session(**kwargs) -> typing.ContextManager[Session]:\n new_session = Session(**kwargs)\n try:\n yield new_session\n new_session.commit()\n except Exception:\n new_session.rollback()\n raise\n finally:\n new_session.close()", "def __init__(self, session):\n self._session = session", "def main():\n settings = {}\n settings['sqlalchemy.url'] = os.environ['DATABASE_URL']\n config = Configurator(settings=settings)\n config.include('TechLurker.models')\n SessionFactory = config.registry[\"dbsession_factory\"]\n session = SessionFactory()\n return session" ]
[ "0.7039566", "0.6990874", "0.67499155", "0.6730254", "0.67289436", "0.6700694", "0.6683485", "0.65724987", "0.6554034", "0.6518529", "0.6489163", "0.64856803", "0.6430929", "0.6397401", "0.6395417", "0.6392703", "0.63916653", "0.63534707", "0.63483596", "0.63387537", "0.6328805", "0.63283205", "0.63223904", "0.63190174", "0.63190174", "0.6312144", "0.63086665", "0.63016653", "0.6292726", "0.6277625" ]
0.749841
0
Cleanup operations. Closes the session and flushes and closes the loggers
def _cleanup(self): if self.current_session is not None: self.current_session.close() self.current_session = None for handler in list(self.logger.root_logger.handlers): self.logger.root_logger.removeHandler(handler) handler.flush() handler.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clean_up(self):\n self.session.close()", "def cleanup(self):\n async def close_session():\n await self.logi.close()\n\n self.loop.run_until_complete(close_session())\n\n self.loop.close()\n self.logi = None\n if os.path.isfile(CACHE_FILE):\n os.remove(CACHE_FILE)", "def cleanup(self):\n del self.session", "def cleanup(self):\n if self.log_fo:\n self.log_fo.close()", "def _cleanup(self):\n \tself._gqcnn.close_session()", "def __del__(self):\n\n self.session.close()", "def teardown_session(e):\n my_db.close()\n OT_spider.close()", "def _cleanup(self):\n\n self._orm_session.close()\n self._db_transaction.rollback()\n self._db_connection.close()\n\n self._db_engine = None\n self._db_connection = None\n self._db_transaction = None\n self._orm_session = None\n self._orm_session_proxy = None\n\n self.__is_connected = False\n\n self._execute_deferred_queries()", "async def cleanup(app):\n # await client_session.close()", "def __exit__(self, exc_type, exc_val, exc_tb):\n self.session.close()", "def __del__(self):\n\n # cleanup logging handlers\n for handler in self.logger.handlers[::-1]:\n try:\n handler.acquire()\n handler.flush()\n handler.close()\n except (OSError, ValueError):\n pass\n finally:\n handler.release()", "def cleanup_after_session(self):\n\n self._aprs_service.reset_tracker()", "def tearDown(self):\n self.session.close()", "def __del__(self):\n if logging is not None:\n pass\n self.close()", "def release(self):\n self.errorlog.close()\n del self.errorlog\n del self.printer", "def __del__(self):\n self.close_connection()\n self.close_engine()", "def close(self):\r\n _logger.debug(\"Closing sessions...\")\r\n dbs = self._sessions.keys()\r\n while len(dbs) > 0:\r\n session = self._sessions.pop(dbs.pop())\r\n session.close()\r\n if self.__provider is not None:\r\n self.__provider.close()\r\n self.__provider = None", "def __del__(self):\n self._close_http_session()", "def close(self):\n for logger in self._loggers:\n logger.close()", "def close_session(self):\n self.session.execute(QUERY_DROP_TABLE_1)\n self.session.execute(QUERY_DROP_TABLE_2)\n self.session.execute(QUERY_DROP_TABLE_3)\n self.session.shutdown()\n self.cluster.shutdown()", "def finalize():\n\n # Close syslog like a good citizen\n syslog.closelog()", "def dispose(self):\n clear_session()", "def __del__(self):\n try:\n self.api.transport.session.close()\n except Exception as e:\n log.debug(f\"Failed to close VSO API connection with: {e}\")", "def close(self):\n self.session.close()\n self.session = None", "def teardown_request(response_or_exc):\n try:\n logging.info(\"Try close session\")\n db.session.remove()\n\n db.session.remove()\n except:\n logging.error(\"Error close session\")", "def close(self):\n if self.authenticated:\n self.db.logout()\n if self.connection is not None:\n self.connection.close()", "def close(self):\n for key, logger in self._loggers.items():\n logger.close()", "def __del__(self):\n\n self.logfd.close()", "def tearDown(self):\n DBSession.close()\n daemons.execute_in_thread('radicale', lambda: transaction.commit())\n teardown_db()\n transaction.commit()\n DBSession.close_all()\n config['tg.app_globals'].sa_engine.dispose()", "def cleanup(_):\n self.db.close()" ]
[ "0.8010654", "0.76645267", "0.75910866", "0.75116664", "0.7349199", "0.7326595", "0.73215795", "0.7284442", "0.7192665", "0.7167402", "0.71294385", "0.7050958", "0.7023023", "0.7016426", "0.69936794", "0.69187576", "0.6898732", "0.6898154", "0.6891282", "0.6827511", "0.68162906", "0.68152726", "0.67811245", "0.677455", "0.67614627", "0.6756236", "0.67544407", "0.67421466", "0.6737528", "0.6729702" ]
0.80990434
0
Get the current session
def current_session(self): return self._session
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_current_session(self):\n if self.session is not None:\n return self.session\n else:\n return None", "def get_session(self):\n return self.session", "def session(self):\n return self.session_store.get_session()", "def session(self):\n return self.session_store.get_session()", "def session(self):\n return self.session_store.get_session()", "def getSession(self):\n return self.request.getSession()", "def getSession(self):\n session = app.settings.cherrypy.session.get(self.session)\n return session", "def get_session(self):\n return self._session()", "def _get_current_session(self) -> Dict[str, Any]:\n return self._data[-1]", "def current_session():\n if _session_stack.empty:\n raise ValueError('No session is activated.')\n return _session_stack.top", "def get_session(self):\n yield from self._ensure_session_valid()\n return self.session", "def session(self):\n\t\treturn self._session", "def get_current_session(self) -> SessionType:", "def get_current_session():\n if SessionFactoryPool.current_session is None:\n session = SessionFactoryPool.create_new_session()\n SessionFactoryPool.current_session = session\n\n return SessionFactoryPool.current_session", "def session(self):\n return self.ssession()", "def session(self):\n return session", "def get_session(cls):\r\n if cls._session is not None:\r\n return cls._session\r\n else:\r\n raise RuntimeError('Session not set.')", "def session(self):\n return self.__session", "def session(self):\n return self._session", "def session(self):\n return self._session", "def session(self):\n if not self._session: #Create new session if none exists\n return self._new_session()\n return self._session", "def session(self):\n if self._session is None:\n self.init_session()\n\n return self._session", "def getSession():\n return call(\"getSession\")", "def session(self):\n return self._application_handler.session", "def session(self):\n\n return self._session", "def get_session(self):\n session = Session(self.settings)\n self.sessions.append(session)\n return session", "def get_session():\n name = request.args.get('name')\n sch = Scheduler()\n return sch.get_session(name)", "def getCurrentUsername(self):\n session = self.getSession()\n if session is not None:\n return session", "def session(self):\n return self.session_store.get_session(backend=\"datastore\")", "def _session(self):\n if self.session is None:\n self.session = create_session(self.config, self.auth)\n return self.session" ]
[ "0.8996255", "0.85965174", "0.82989895", "0.82989895", "0.82958096", "0.8282755", "0.8281556", "0.82587373", "0.8178331", "0.80580115", "0.79930717", "0.7990043", "0.79854894", "0.7956763", "0.78924745", "0.7880293", "0.78327733", "0.780845", "0.78052527", "0.78052527", "0.7787687", "0.7732461", "0.7696177", "0.7695797", "0.7621693", "0.75874555", "0.75335604", "0.7526466", "0.74958426", "0.7485623" ]
0.8756857
1
Set the current session
def current_session(self, session): if self._session is None: self._session = session else: if session is None or self._session.session_id != session.session_id: self._session.active = False self._session = session
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_session(session):\n\n global session_\n session_ = session\n import observatory.api.server.api as api\n\n api.session_ = session", "def setSession( self, name, value, REQUEST=None, cookie=None ):\n SetSessionValue( self, name, value, REQUEST, cookie )", "def fusion_api_set_active_session(self, sessionId):\n return self.loginsession.set_active_session(sessionId)", "def set_current_user(self, user):\n self.session['u'] = user.get().key.urlsafe()", "def session(self, value: ClientSession):\r\n self._session = value", "def _set_session(self):\n self.__session = sessionmaker(bind=self.__engine)()", "def use_session(cls, session):\r\n cls._session = session", "def set_session(context, key, value):\n session_manager = getToolByName(context, 'session_data_manager')\n session = session_manager.getSessionData()\n session[key] = value", "def set_login_session(self, session_id=None):\r\n meta = self.get_meta()\r\n old_login = meta.get('session_id', None)\r\n if old_login:\r\n SessionStore(session_key=old_login).delete()\r\n meta['session_id'] = session_id\r\n self.set_meta(meta)\r\n self.save()", "def do_session(self, name):\n if name != \"\":\n self.session = name\n else:\n print('\\n'+self.session+'\\n')", "def set(self, session):\n raise InvalidSessionException('Need to be implemented')", "def test_set_session():", "def set_session_property(self, key, value):\n\n self.session[key] = value", "def login(self):\n backend = self.backend\n self.session[backend.session_id_key] = self[\"id\"]\n self.session[backend.session_backend_key] = backend.session_backend_val\n self.session[backend.session_hash_key] = self._get_session_hash(\n self[\"password\"]\n )", "def session(self):", "def set_session_cookie(self):\n self.driver.get('{domain}/home/learn/index#/{cid}/go'.format(domain=domain,cid=cid))\n for subCookie in self.driver.get_cookies():\n self.session.cookies.set(subCookie[u'name'], self.driver.get_cookie(subCookie[u'name'])['value'])\n if config.DEBUG:\n print \"session cookies :: \\n{}\".format(self.session.cookies)", "def set_db_session():\n g.s = database.db_session()", "async def session(self,ctx):\n if ctx.invoked_subcommand is None:\n await ctx.send(\"The current main session is \" + \"```\" + await self.config.sessions.main() + \"```\")", "def _set_session_value(self, req, section_name, option_name, option_value):\n name = 'inieditor|%s|%s' % (section_name, option_name)\n req.session[name] = option_value", "def on_after_render(self, request):\n self.write_session_cookie(request)\n return self.storage.set_session(request.session_uuid, request.session)", "def startSession(self):\n self.storage.insert(self.__json__())", "def do_SetSessionName (self, line):\r\n OpensslTracking.session = line", "def setSessionStore(self, store):\n pass", "def SetCurrent(env):\n global ENV\n ENV[threading.current_thread().ident] = env", "def set_session(aws_access_key_id=None,\n aws_secret_access_key=None,\n aws__session_token=None,\n region_name=None,\n profile_name=None,\n boto_session=None):\n global __session, client\n __session = boto_session if boto_session is not None else boto3.session.Session(**larry.core.copy_non_null_keys(locals()))\n client = __session.client('sts')", "def test_set_session_id(self, context):\n context.set_session_id(b\"abc\")", "def session(rq):\n rq.session['username']='wxy'\n return HttpResponse(__file__ + '::session and first user is my daugter:' + rq.session['username'])", "def session_id(self, session_id):\n\n self._session_id = session_id", "def init_session(self):\n pass", "def init_session(self):\n pass" ]
[ "0.73562306", "0.7294437", "0.7265423", "0.7152156", "0.7137305", "0.69370335", "0.68532306", "0.68211675", "0.67735296", "0.67608225", "0.67585516", "0.6742993", "0.66266817", "0.6609575", "0.65909326", "0.6488937", "0.6426722", "0.6381807", "0.63759553", "0.63736165", "0.6354772", "0.63532454", "0.63391274", "0.6317668", "0.6315513", "0.6286827", "0.6236151", "0.62134135", "0.61805964", "0.61805964" ]
0.7321693
1
Clears all stored sessions, optionally excluding active sessions
def clear_sessions(self, inactive_only=True, clear_history=False): Session.clear_sessions(self, inactive_only, clear_history)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clear_sessions():\n call_command(\"clearsessions\")", "def clear_session(self):\n self.mongo_database.cache.delete_many({\"session_id\": self.session_id})", "def clear_session(self):\n self.session_mgr.clear_session()", "def clearSessionWithoutLoggingOut(request):\n for key in list(request.session.keys()):\n if not key.startswith('_'):\n del request.session[key]\n return", "def clear_all_cookies():", "def clear(self):\n # Orphan all objects\n for obj in self.uow:\n state(obj).session = None\n self.uow.clear()\n self.imap.clear()", "def clean_sessions():\n while not QUIT:\n # Find number of known tokens\n size = conn.zcard('recent:')\n\n if size <= LIMIT:\n time.sleep(1)\n continue\n\n # Collect tokens to remove\n end_index = min(size - LIMIT, 100)\n sessions = conn.zrange('recent:', 0, end_index - 1)\n\n # Collect key names for tokens\n session_keys = []\n for sess in sessions:\n session_keys.append('viewed:' + token)\n session_keys.append('cart:' + token)\n\n # Delete view, login, and recent keys\n conn.delete(*session_keys)\n conn.hdel('login:', *tokens)\n conn.zrem('recent:', *tokens)", "def clean_session(self):\n unused_entries = ['root_freespace', 'home_freespace', 'hardvideo',\n 'optional_partitions', 'boot_id', 'greeter', 'display',\n 'boot_size', 'root_size', 'swap_size', 'home_size',\n 'root_id', 'lvm', 'swap_id', 'home_id', 'luks',\n 'user_passwd', 'root_passwd', 'desktop', 'gpu_driver',\n 'vga_controller', 'gpu_proprietary', 'desktop_extra']\n\n for unused in unused_entries:\n del self.user[unused]", "def reset_flask_session_on_logout():\n session.clear()", "def deleteall(update, context, sessions_file):\n\terr_code, err_msg = delete_all_sessions(sessions_file)\n\tupdate.effective_message.reply_text('All sessions were deleted.')\n\treturn", "def clear_all(self):\n self.clear_redis()\n self.clear_cache()", "def discard_all_sessions(self):\n\n url = f\"{self.server_and_port}/web_api/show-sessions\"\n headers = self.get_headers()\n payload = {\n \"limit\": 20, # This will make 20 calls to the API at most, if there are more sessions than that its trouble\n \"view-published-sessions\": False,\n }\n request = requests.post(url, json=payload, headers=headers, verify=self.ssl_verify)\n try:\n request.raise_for_status()\n except Exception as e:\n # The errors returned by this api aren't very good\n # It's a 400 with some error text.\n raise PluginException(\n cause=\"There was problem publishing to Check Point NGFW.\",\n assistance=request.text,\n data=e,\n )\n\n url_discard = f\"{self.server_and_port}/web_api/discard\"\n sessions = request.json().get(\"objects\")\n for session in sessions:\n uid = session.get(\"uid\")\n discard_payload = {\"uid\": uid}\n\n requests.post(url_discard, json=discard_payload, headers=headers, verify=self.ssl_verify)\n\n self.publish() # Yes, you have to publish that you are not publishing\n self.logout()\n\n self.get_sid()", "def clear_cookies(self):\n self.base_driver.delete_all_cookies()", "def clear(self, request):\n del request.session[self.id]", "def clean_current_session(self):\n return self.clean_session(self.session_id)", "def do_logout():\n del session[CURRENT_USER_KEY]", "def close_all(cls) -> None:\n\n close_all_sessions()", "def logout():\n _cookies = ['user', 'pass', 'hash']\n for cookie in _cookies:\n util.web.delete_cookie(cookie)", "def logout_all(self, request):\n request.user.auth_token_set.all().delete()\n return Response(None, status=status.HTTP_204_NO_CONTENT)", "def close_all_sessions() -> None:\n\n for sess in _sessions.values():\n sess.close()", "def flush_all(cls):\n for sess in cls._session_registry.values():\n sess.flush()", "def clean_up_old_session(user_name=None):\n cherrypy.log.error(\"cleaning all sessions for %s\" % user_name)\n if \"slycatauth\" in cherrypy.request.cookie:\n try:\n # cherrypy.log.error(\"found old session trying to delete it \")\n sid = cherrypy.request.cookie[\"slycatauth\"].value\n couchdb = slycat.web.server.database.couchdb.connect()\n session = couchdb.get(\"session\", sid)\n if session is not None:\n couchdb.delete(session)\n except:\n # if an exception was throw there is nothing to be done\n pass\n if user_name is not None:\n try:\n couchdb = slycat.web.server.database.couchdb.connect()\n sessions = [session for session in couchdb.scan(\"slycat/sessions\") if\n session[\"creator\"] == user_name]\n if sessions:\n #cherrypy.log.error(\"sessions found %s\" % user_name)\n for session in sessions:\n couchdb.delete(session)\n #cherrypy.log.error(\"sessions deleted %s\" % user_name)\n except:\n # if an exception was throw there is nothing to be done\n pass", "def clear(self):\n self._state[\"data\"].clear()\n self._state[\"session\"].request_rerun()", "def clear(self):\n self._state[\"data\"].clear()\n self._state[\"session\"].request_rerun()", "def clear(self):\n self._state[\"data\"].clear()\n self._state[\"session\"].request_rerun()", "def clear(self):\r\n self._state[\"data\"].clear()\r\n self._state[\"session\"].request_rerun()", "def clear(self):\r\n self._state[\"data\"].clear()\r\n self._state[\"session\"].request_rerun()", "def kill_all():\n base = MongoTestServer.get_base_dir()\n\n print \"======================================\"\n print \"Cleaning up previous sessions under \" + base\n print \"======================================\"\n\n for mongo in os.listdir(base):\n if mongo.startswith('tmp'):\n mongo = os.path.join(base, mongo)\n print \"Previous session: \" + mongo\n lock = os.path.join(mongo, 'mongod.lock')\n if os.path.exists(lock):\n print \"Lock file found: \" + lock\n p = subprocess.Popen([\"lsof\", \"-Fp\", \"--\", lock],\n stdout=subprocess.PIPE)\n (out, _) = p.communicate()\n if out:\n pid = out[1:].strip()\n print \"Owned by pid: \" + pid + \" killing...\"\n p = subprocess.Popen([\"kill -9 %s\" % pid], shell=True)\n p.communicate()\n print \"Removing: \" + mongo\n shutil.rmtree(mongo, True)", "def shutdown(self) -> None:\n logger.info(\"shutting down all sessions\")\n while self.sessions:\n _, session = self.sessions.popitem()\n session.shutdown()", "def forget(self, request):\n # Clear session\n request.session.invalidate()\n return []" ]
[ "0.8116094", "0.7734233", "0.75402987", "0.7404016", "0.717296", "0.7071539", "0.6903001", "0.681045", "0.671897", "0.66833776", "0.66807574", "0.66768897", "0.6660443", "0.66087145", "0.6586084", "0.65702164", "0.6561426", "0.65318245", "0.6481722", "0.64585", "0.6429566", "0.6396746", "0.6364791", "0.6364791", "0.6364791", "0.6364042", "0.6364042", "0.6351832", "0.63421154", "0.6317661" ]
0.7810983
1
Add the workflow to the workflow manager
def add_workflow(self, workflow): self.workflow_manager.add_workflow(workflow)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_workflow(workflow):\n\n launchpad = LaunchPad.auto_load()\n launchpad.add_wf(workflow)", "def add_workflow(workflow):\n\n launchpad = LaunchPad.auto_load()\n launchpad.add_wf(workflow)", "def add_workflow(self, workflow):\n self._data_dict[self.KEY_BI_WORKFLOWS].append(workflow)", "def add_workflow_step(self, wf_step):\n self._data_dict[self.KEY_WF_STEPS].append(wf_step)", "def add_workflow(self, data):\n workflow_id = str(uuid.uuid4()).replace('-', '')\n try:\n self._session.add(WorkflowEntity(\n id=workflow_id,\n name=data['name'],\n description=data['description'],\n username=data['username'],\n git=data['git'],\n version=data['version'],\n inputs=data['inputs'],\n parameters=data['parameters'],\n final_output=data['final_output'],\n apps=data['apps'],\n public=data['public'],\n enable=data['enable'],\n test=data['test'],\n created=None,\n modified=None\n ))\n except SQLAlchemyError as err:\n Log.an().error('sql exception [%s]', str(err))\n return False\n\n return workflow_id", "def add_workflow_test(self, workflow_test_configuration):\n self._workflows[workflow_test_configuration.name] = workflow_test_configuration", "def start_workflow(self, **params):\n raise NotImplementedError", "def start_workflow(self, workflow_name, workflow_input, **params):\n raise NotImplementedError", "def init_workflow():\n pass", "def __init__(self, workflow):\n self.workflow = workflow", "def initialize_workflow(self, workflow):\n with self._driver.session() as session:\n session.write_transaction(tx.create_workflow_node, workflow)\n session.write_transaction(tx.create_workflow_requirement_nodes,\n requirements=workflow.requirements)\n session.write_transaction(tx.create_workflow_hint_nodes, hints=workflow.hints)\n session.write_transaction(tx.create_workflow_input_nodes, inputs=workflow.inputs)\n session.write_transaction(tx.create_workflow_output_nodes, outputs=workflow.outputs)", "def workflow_step(self, workflow_step):\n\n self._workflow_step = workflow_step", "def add_wf(self,wfname):\n wf = Workflow()\n if not wf.is_tag_valid(wfname): \n raise pawstools.WfNameError(wf.tag_error_message(wfname))\n wf.message_callback = self.logmethod\n self.workflows[wfname] = wf", "def create(*, db_session, workflow_in: WorkflowCreate) -> Workflow:\n project = project_service.get_by_name_or_raise(\n db_session=db_session, project_in=workflow_in.project\n )\n plugin_instance = plugin_service.get_instance(\n db_session=db_session, plugin_instance_id=workflow_in.plugin_instance.id\n )\n workflow = Workflow(\n **workflow_in.dict(exclude={\"plugin_instance\", \"project\"}),\n plugin_instance=plugin_instance,\n project=project,\n )\n\n db_session.add(workflow)\n db_session.commit()\n return workflow", "def initialize_workflow(self, workflow):\n if self.workflow_loaded():\n raise RuntimeError(\"attempt to re-initialize existing workflow\")\n if workflow.requirements is None:\n workflow.requirements = []\n if workflow.hints is None:\n workflow.hints = []\n\n self._workflow_id = workflow.id\n # Load the new workflow into the graph database\n self._gdb_interface.initialize_workflow(workflow)", "def create_workflow(self, workflow_id, name, owner, description, online=False, monitor=False, safe=True):\n try:\n w = Workflow(\n workflow_id=workflow_id,\n name=name,\n owner=owner,\n description=description,\n online=online,\n monitor=monitor\n )\n\n self.workflow_manager.add_workflow(w)\n self.current_workflow = w\n yield w\n except KeyError as e:\n if safe:\n raise e\n else:\n self.current_workflow = self.workflow_manager.workflows[workflow_id]\n yield self.workflow_manager.workflows[workflow_id]\n finally:\n self.current_workflow = None", "def main(workflow):\n \n if SHOW_UPDATES and workflow.update_available:\n workflow.add_item('A new version is available',\n 'Action this item to install the update',\n autocomplete='workflow:update',\n icon=ICON_SYNC)\n\n LOGGER.debug('Started create workflow')\n query = workflow.args[0]\n LOGGER.debug(query)\n\n core.autocompleteTags(workflow, LOGGER, query)\n\n # construct result\n title, tags = core.separateTags(query)\n\n tags_string = ', '.join(tags)\n query_string = constructCreateQuery(title, tags)\n\n LOGGER.debug('title: {!r}'.format(title))\n LOGGER.debug('query_string: {!r}'.format(query_string))\n if tags:\n workflow.add_item(title=\"Create note with title '{}' \".format(title),\n subtitle='Tags: ' + tags_string, arg=query_string, valid=True)\n else:\n workflow.add_item(title=\"Create note with title '{}'\".format(title),\n arg=query_string, valid=True)\n\n workflow.send_feedback()", "def workflow_definition(self, workflow_definition):\n\n self._workflow_definition = workflow_definition", "def add_workflow_entry(entry_message, data=''):\n return partial(__add_entry,\n event_type='WORKFLOW',\n entry_message=entry_message,\n data=data)", "def adc_api_workflow_create():\n workflow_json = request.get_json(force=True)\n\n return jsonify(adc.workflow_create(workflow_json=workflow_json))", "def create_workflow_file(self, workflow: Workflow, props: PropertySet):", "def createWorkflow(self, session, order_item_id):\n if DEBUG:\n print __name__, 'createWorkflow(%s)' % order_item_id\n workflow_code = self.getWorkflowByOrderItemId(order_item_id)\n orderitem = self.orders.getOrderItem(session, order_item_id)\n now = datetime.now()\n if orderitem.orig_order_item_id not in (order_item_id, None):\n activity_code = FIRST_REORDER_ACTIVITY\n user_id = User.AUTO\n else:\n activity_code = self.workflowdef[workflow_code][0]\n # if not a reorder add a product_item entry record too\n if orderitem.product_item_id: # gift-certs have no product_item\n self.histories.updateProductHistory(\n session, orderitem.product_item_id, activity_code, now)\n user_id = User.COMMERCE\n self.histories.updateOrderHistory(session, order_item_id, \n activity_code, now)\n work_date = orderitem.order.purchase_date\n if not work_date:\n raise WorkflowError(\"Unable to create workflow for order_item: \"\n \"%s. purchase_date is blank.\" % order_item_id)\n wi = WorkflowItem(workflow_id=self.workflows.getId(workflow_code),\n order_item_id=order_item_id,\n product_item_id=orderitem.product_item_id,\n activity_id=self.activities.getId(activity_code),\n state_id=State.INPROGRESS,\n user_id=user_id,\n skip_activities='',\n created=now,\n work_date=work_date)\n session.add(wi)\n workitem = self.getWorkflowItem(order_item_id)\n next = self.getNextActivity(session, workitem)\n workitem.activity_id = self.activities.getId(next)", "def set_workflow_field(apps, schema_editor):\n ScheduledOperation = apps.get_model('ontask', 'ScheduledOperation')\n for sitem in ScheduledOperation.objects.all():\n if sitem.workflow:\n continue\n\n if not sitem.action:\n raise Exception('Unable to set workflow in ScheduledOperation')\n\n sitem.workflow = sitem.action.workflow\n sitem.save()", "def new_workflow(self, upload_file, name=\"\", description=\"\", submit=None):\n data = upload_file.file.read()\n if not name:\n name = upload_file.filename.replace(\".xml\", \"\")\n workflow = Workflow(name=name, description=description,\n data=data,\n created_by=identity.current.user.id)\n log.info(\"Saved new workflow %d\", workflow.id)\n raise redirect(\"/workflow/%d\" % workflow.id)", "def view_add(\n request: HttpRequest,\n workflow: Optional[Workflow] = None,\n) -> JsonResponse:\n # Get the workflow element\n if workflow.nrows == 0:\n messages.error(\n request,\n _('Cannot add a view to a workflow without data'))\n return JsonResponse({'html_redirect': ''})\n\n # Form to read/process data\n form = ViewAddForm(request.POST or None, workflow=workflow)\n\n return save_view_form(\n request,\n form,\n 'table/includes/partial_view_add.html',\n )", "def add_flow(self, flow: FlowRoot):\n with self._lock:\n self.flow_roots[flow.name] = flow", "def addTask(self, task):\n self.tasklist.append(task)", "def do_workflow(self, arg=None):\n\n def add_steps_to_workflow(curr_flow):\n while True:\n cmd_call = simple_input('Please choose a command to add to the workflow.', cmds, True)\n if cmd_call not in ['DONE', 'EXIT']:\n if self.is_output_cmd(cmd_call):\n curr_flow.add_output(cmd_call)\n else:\n curr_flow.add_step(cmd_call)\n cmds.pop(cmds.index(cmd_call))\n\n _conf = simple_input('Do you want to configure this command?', ['Y','N'], True) if self.is_configureable(cmd) else None\n if _conf == 'Y':\n curr_flow.configure_step(cmd_call)\n\n elif cmd_call == 'DONE':\n break\n else:\n return\n return curr_flow.has_steps()\n\n def confirm_workflow(curr_flow):\n checks = [('START', 'Start workflow?'), ('ADD', 'Do you want to add more steps?'),\n ('RESTART', 'Do you want to start over?')]\n curr_flow.draw_steps()\n for check in checks:\n _continue = simple_input(check[1], ['Y', 'N', 'EXIT'])\n if _continue == 'Y':\n return check[0]\n if _continue == 'EXIT':\n return 'EXIT'\n return 'INVALID'\n\n print('Preparing Workflow Wizard...')\n options = sorted(self.cmds + self.output_cmds)\n from smores.workflow import Workflow\n workflow = Workflow(self)\n target, load_type = self.validate_args('', 'file')\n if target:\n _l = True if target in self.inputs['files'].keys() else False\n workflow.add_target(target, load_type, _l)\n print('Please choose the commands you would like to add to the workflow.'\n '\\nCommands will be executed in the order in which they are added.'\n '\\n\\nPlease note that some commands have dependencies that must be satisfied. An overview of '\n 'command dependencies is available on the main SMOREs wiki on Github')\n print('\\nAvailable Commands for WorkFlow')\n cmds = []\n for i, _o in enumerate(options):\n print('{1}'.format(i, _o))\n cmds.append(_o)\n cmds.append('DONE')\n steps_added = add_steps_to_workflow(workflow)\n while steps_added:\n _run = confirm_workflow(workflow)\n if _run == 'START':\n break\n elif _run == 'ADD':\n _ = add_steps_to_workflow(workflow)\n elif _run == 'RESTART':\n self.do_workflow('')\n else:\n return\n workflow.run()\n print('Workflow has completed.')\n return\n\n else:\n print('Workflows currently have to be setup without the file already being loaded.')\n return", "def add_task(self, task):\n self.tasks.append(task)", "def add_task(self, task):\n self.tasks.append(task)" ]
[ "0.82768464", "0.82768464", "0.7846967", "0.69754046", "0.67232186", "0.6609379", "0.66010517", "0.6353788", "0.63196826", "0.62862545", "0.62820166", "0.6208596", "0.606798", "0.6051274", "0.5934013", "0.5899704", "0.5891364", "0.58334327", "0.5800309", "0.5752983", "0.57394934", "0.5682649", "0.5677526", "0.56410354", "0.56033844", "0.5567941", "0.55473435", "0.55432147", "0.55348915", "0.55348915" ]
0.89757365
0
Function to populate factory functions for the tools and factors for ease of access.
def populate_tools_and_factors(self): for tool_channel in self.channel_manager.tool_channels: if tool_channel.channel_id == "tools": # These are the core tools setattr(self, "tools", ToolContainer()) setattr(self, "factors", FactorContainer()) tool_container = getattr(self, "tools") factor_container = getattr(self, "factors") else: # This is a plugin, so ends in "_tools" plugin_name = "_".join(tool_channel.channel_id.split("_")[:-1]) setattr(self.plugins, plugin_name, PluginWrapper()) plugin = getattr(self.plugins, plugin_name) tool_container = plugin.tools factor_container = plugin.factors for tool_stream in tool_channel.streams: try: # This is the tool initializer tool_function = self.channel_manager.get_tool_class(tool_stream.name) setattr(tool_container, tool_stream.name, tool_function) def create_factory_function(tool_func): """ This wrapper is needed to capture the tool_function closure :param tool_func: The tool function :return: The factory function """ base = tool_function.__bases__[0] if base == Tool: def tool_factory_function(sources, alignment_node=None, **parameters): """ Factory function for creating factors inside a workflow :param sources: source nodes :param alignment_node: alignment node :return: the created factor :type sources: list[Node] | tuple[Node] | None """ if not self.current_workflow: raise ValueError("No workflow context - use create_workflow first") # find matching tools (possibly different parameters) matches = [f for f in self.current_workflow.factors if f.tool.__class__ == tool_func] # make sure parameters are all the same full_matches = [m for m in matches if m.sources == sources and m.alignment_node == alignment_node and dict(m.tool.parameters_dict) == parameters] if len(full_matches) == 1: tool = full_matches[0].tool else: tool = tool_func(**parameters) return dict( workflow=self.current_workflow, tool=tool, sources=sources, alignment_node=alignment_node) return tool_factory_function elif base == MultiOutputTool: def tool_factory_function(source, splitting_node=None, **parameters): """ Factory function for creating factors inside a workflow :param source: source node :param splitting_node: splitting node :return: the created factor :type source: Node """ if not self.current_workflow: raise ValueError("No workflow context - use create_workflow first") # find matching tools (possibly different parameters) matches = [f for f in self.current_workflow.factors if f.tool.__class__ == tool_func] # make sure parameters are all the same full_matches = [m for m in matches if m.source == source and m.splitting_node == splitting_node and dict(m.tool.parameters_dict) == parameters] if len(full_matches) == 1: tool = full_matches[0].tool else: tool = tool_func(**parameters) return dict( workflow=self.current_workflow, tool=tool, source=source, splitting_node=splitting_node) return tool_factory_function elif base == AggregateTool: def tool_factory_function(sources, alignment_node, aggregation_meta_data, **parameters): """ Factory function for creating factors inside a workflow :param aggregation_meta_data: the meta data to aggregate over :param sources: source nodes :param alignment_node: alignment node :return: the created factor :type sources: list[Node] | tuple[Node] | None """ if not self.current_workflow: raise ValueError("No workflow context - use create_workflow first") # find matching tools (possibly different parameters) matches = [f for f in self.current_workflow.factors if f.tool.__class__ == tool_func] # make sure parameters are all the same full_matches = [m for m in matches if m.sources == sources and m.alignment_node == alignment_node and dict(m.tool.parameters_dict) == parameters] if len(full_matches) == 1: tool = full_matches[0].tool else: tool = tool_func(aggregation_meta_data=aggregation_meta_data, **parameters) return dict( workflow=self.current_workflow, tool=tool, sources=sources, alignment_node=alignment_node) return tool_factory_function elif base == SelectorTool: def tool_factory_function(sources, selector_meta_data, **parameters): """ Factory function for creating factors inside a workflow :param selector_meta_data: the meta data to select over :param sources: source nodes :return: the created factor :type sources: list[Node] | tuple[Node] | None """ if not self.current_workflow: raise ValueError("No workflow context - use create_workflow first") # find matching tools (possibly different parameters) matches = [f for f in self.current_workflow.factors if f.tool.__class__ == tool_func] # make sure parameters are all the same full_matches = [m for m in matches if m.sources == sources and m.selector_meta_data == selector_meta_data and dict(m.tool.parameters_dict) == parameters] if len(full_matches) == 1: tool = full_matches[0].tool else: tool = tool_func(selector_meta_data=selector_meta_data, **parameters) return dict( workflow=self.current_workflow, tool=tool, sources=sources) return tool_factory_function elif base == PlateCreationTool: def tool_factory_function(source, **parameters): """ Factory function for creating factors inside a workflow :param source: source node :return: the created factor :type source: Node """ if not self.current_workflow: raise ValueError("No workflow context - use create_workflow first") return dict( workflow=self.current_workflow, tool=tool_func(**parameters), source=source) return tool_factory_function else: raise NotImplementedError setattr(factor_container, tool_stream.name, create_factory_function(tool_function)) except (NameError, AttributeError, ImportError) as e: logging.warn('Unable to load tool {}: {}'.format(tool_stream.name, e))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_factory_function(tool_func):\n base = tool_function.__bases__[0]\n if base == Tool:\n def tool_factory_function(sources, alignment_node=None, **parameters):\n \"\"\"\n Factory function for creating factors inside a workflow\n\n :param sources: source nodes\n :param alignment_node: alignment node\n :return: the created factor\n :type sources: list[Node] | tuple[Node] | None\n\n \"\"\"\n if not self.current_workflow:\n raise ValueError(\"No workflow context - use create_workflow first\")\n\n # find matching tools (possibly different parameters)\n matches = [f for f in self.current_workflow.factors if f.tool.__class__ == tool_func]\n # make sure parameters are all the same\n full_matches = [m for m in matches if m.sources == sources\n and m.alignment_node == alignment_node\n and dict(m.tool.parameters_dict) == parameters]\n\n if len(full_matches) == 1:\n tool = full_matches[0].tool\n else:\n tool = tool_func(**parameters)\n\n return dict(\n workflow=self.current_workflow,\n tool=tool,\n sources=sources,\n alignment_node=alignment_node)\n\n return tool_factory_function\n elif base == MultiOutputTool:\n def tool_factory_function(source, splitting_node=None, **parameters):\n \"\"\"\n Factory function for creating factors inside a workflow\n\n :param source: source node\n :param splitting_node: splitting node\n :return: the created factor\n :type source: Node\n\n \"\"\"\n if not self.current_workflow:\n raise ValueError(\"No workflow context - use create_workflow first\")\n\n # find matching tools (possibly different parameters)\n matches = [f for f in self.current_workflow.factors if\n f.tool.__class__ == tool_func]\n # make sure parameters are all the same\n full_matches = [m for m in matches if m.source == source\n and m.splitting_node == splitting_node\n and dict(m.tool.parameters_dict) == parameters]\n\n if len(full_matches) == 1:\n tool = full_matches[0].tool\n else:\n tool = tool_func(**parameters)\n\n return dict(\n workflow=self.current_workflow,\n tool=tool,\n source=source,\n splitting_node=splitting_node)\n\n return tool_factory_function\n\n elif base == AggregateTool:\n def tool_factory_function(sources, alignment_node, aggregation_meta_data, **parameters):\n \"\"\"\n Factory function for creating factors inside a workflow\n\n :param aggregation_meta_data: the meta data to aggregate over\n :param sources: source nodes\n :param alignment_node: alignment node\n :return: the created factor\n :type sources: list[Node] | tuple[Node] | None\n\n \"\"\"\n if not self.current_workflow:\n raise ValueError(\"No workflow context - use create_workflow first\")\n\n # find matching tools (possibly different parameters)\n matches = [f for f in self.current_workflow.factors if\n f.tool.__class__ == tool_func]\n # make sure parameters are all the same\n full_matches = [m for m in matches if m.sources == sources\n and m.alignment_node == alignment_node\n and dict(m.tool.parameters_dict) == parameters]\n\n if len(full_matches) == 1:\n tool = full_matches[0].tool\n else:\n tool = tool_func(aggregation_meta_data=aggregation_meta_data, **parameters)\n\n return dict(\n workflow=self.current_workflow,\n tool=tool,\n sources=sources,\n alignment_node=alignment_node)\n\n return tool_factory_function\n elif base == SelectorTool:\n def tool_factory_function(sources, selector_meta_data, **parameters):\n \"\"\"\n Factory function for creating factors inside a workflow\n\n :param selector_meta_data: the meta data to select over\n :param sources: source nodes\n :return: the created factor\n :type sources: list[Node] | tuple[Node] | None\n\n \"\"\"\n if not self.current_workflow:\n raise ValueError(\"No workflow context - use create_workflow first\")\n\n # find matching tools (possibly different parameters)\n matches = [f for f in self.current_workflow.factors if\n f.tool.__class__ == tool_func]\n # make sure parameters are all the same\n full_matches = [m for m in matches if m.sources == sources\n and m.selector_meta_data == selector_meta_data\n and dict(m.tool.parameters_dict) == parameters]\n\n if len(full_matches) == 1:\n tool = full_matches[0].tool\n else:\n tool = tool_func(selector_meta_data=selector_meta_data, **parameters)\n\n return dict(\n workflow=self.current_workflow,\n tool=tool,\n sources=sources)\n\n return tool_factory_function\n elif base == PlateCreationTool:\n def tool_factory_function(source, **parameters):\n \"\"\"\n Factory function for creating factors inside a workflow\n\n :param source: source node\n :return: the created factor\n :type source: Node\n\n \"\"\"\n if not self.current_workflow:\n raise ValueError(\"No workflow context - use create_workflow first\")\n\n return dict(\n workflow=self.current_workflow,\n tool=tool_func(**parameters),\n source=source)\n\n return tool_factory_function\n else:\n raise NotImplementedError", "def tool_factory_function(sources, selector_meta_data, **parameters):\n if not self.current_workflow:\n raise ValueError(\"No workflow context - use create_workflow first\")\n\n # find matching tools (possibly different parameters)\n matches = [f for f in self.current_workflow.factors if\n f.tool.__class__ == tool_func]\n # make sure parameters are all the same\n full_matches = [m for m in matches if m.sources == sources\n and m.selector_meta_data == selector_meta_data\n and dict(m.tool.parameters_dict) == parameters]\n\n if len(full_matches) == 1:\n tool = full_matches[0].tool\n else:\n tool = tool_func(selector_meta_data=selector_meta_data, **parameters)\n\n return dict(\n workflow=self.current_workflow,\n tool=tool,\n sources=sources)", "def get_factory():", "def tool_factory_function(source, splitting_node=None, **parameters):\n if not self.current_workflow:\n raise ValueError(\"No workflow context - use create_workflow first\")\n\n # find matching tools (possibly different parameters)\n matches = [f for f in self.current_workflow.factors if\n f.tool.__class__ == tool_func]\n # make sure parameters are all the same\n full_matches = [m for m in matches if m.source == source\n and m.splitting_node == splitting_node\n and dict(m.tool.parameters_dict) == parameters]\n\n if len(full_matches) == 1:\n tool = full_matches[0].tool\n else:\n tool = tool_func(**parameters)\n\n return dict(\n workflow=self.current_workflow,\n tool=tool,\n source=source,\n splitting_node=splitting_node)", "def tool_factory_function(sources, alignment_node, aggregation_meta_data, **parameters):\n if not self.current_workflow:\n raise ValueError(\"No workflow context - use create_workflow first\")\n\n # find matching tools (possibly different parameters)\n matches = [f for f in self.current_workflow.factors if\n f.tool.__class__ == tool_func]\n # make sure parameters are all the same\n full_matches = [m for m in matches if m.sources == sources\n and m.alignment_node == alignment_node\n and dict(m.tool.parameters_dict) == parameters]\n\n if len(full_matches) == 1:\n tool = full_matches[0].tool\n else:\n tool = tool_func(aggregation_meta_data=aggregation_meta_data, **parameters)\n\n return dict(\n workflow=self.current_workflow,\n tool=tool,\n sources=sources,\n alignment_node=alignment_node)", "def tool_factory_function(source, **parameters):\n if not self.current_workflow:\n raise ValueError(\"No workflow context - use create_workflow first\")\n\n return dict(\n workflow=self.current_workflow,\n tool=tool_func(**parameters),\n source=source)", "def tool_factory_function(sources, alignment_node=None, **parameters):\n if not self.current_workflow:\n raise ValueError(\"No workflow context - use create_workflow first\")\n\n # find matching tools (possibly different parameters)\n matches = [f for f in self.current_workflow.factors if f.tool.__class__ == tool_func]\n # make sure parameters are all the same\n full_matches = [m for m in matches if m.sources == sources\n and m.alignment_node == alignment_node\n and dict(m.tool.parameters_dict) == parameters]\n\n if len(full_matches) == 1:\n tool = full_matches[0].tool\n else:\n tool = tool_func(**parameters)\n\n return dict(\n workflow=self.current_workflow,\n tool=tool,\n sources=sources,\n alignment_node=alignment_node)", "def create_analysis_tools(self):\r\n raise NotImplementedError()", "def get_tools(self, env_jf, tool_factory):\n tool_name_mapping = {t.value: t for t in ToolTypes}\n tools = []\n\n marker_sec_goals = []\n if 'marker_sec_goals' in self.jf and self.settings.with_subgoals:\n for marker_sec_goal in self.jf['marker_sec_goals']:\n pos = eval(marker_sec_goal)\n marker_sec_goals.append(tool_factory.create(ToolTypes.GOAL, pos,\n {'color': subgoal_color, 'radius':\n self.settings.sec_goal_radius}))\n\n self.marker_sec_goals = marker_sec_goals\n\n target_sec_goals = []\n if 'target_sec_goals' in self.jf and self.settings.with_subgoals:\n for target_sec_goal in self.jf['target_sec_goals']:\n pos = eval(target_sec_goal)\n target_sec_goals.append(tool_factory.create(ToolTypes.GOAL, pos,\n {'color': subgoal_color, 'radius':\n self.settings.sec_goal_radius}))\n\n self.target_sec_goals = target_sec_goals\n\n for env_tool in env_jf:\n for prop, val in env_tool.items():\n if prop != 'name' and prop != 'color' and isinstance(val, str):\n env_tool[prop] = eval(val)\n elif prop == 'color' and isinstance(val, str):\n env_tool[prop] = val\n\n name = env_tool['name']\n pos = np.array(env_tool['pos'])\n lookup_name = name + ''\n if 'id' in env_tool:\n tool_id = env_tool['id']\n lookup_name = name + ':' + str(tool_id)\n noise = get_noise(self.eval_rnd_map, lookup_name)\n\n pos += noise\n pass_params = {k: v for k, v in env_tool.items() if k not in ['name','pos', 'id']}\n\n tool_type = tool_name_mapping[name]\n tools.append(tool_factory.create(tool_type, pos, pass_params))\n\n tools.extend(marker_sec_goals)\n tools.extend(target_sec_goals)\n return tools", "def factory_type_dict():\n return {'filter' : filters.generate_filter,\n 'global_options' : global_options.generate_global_options,\n 'input_device' : input_devices.generate_input_device,\n 'input_stream' : input_streams.generate_input_stream,\n 'output_device' : output_devices.generate_output_device,\n 'output_stream' : output_streams.generate_output_stream}", "def makeTestProcessingTool(test_processing_tool_path, test_processing_factory_path):\r\n\r\n className = splitext(basename(test_processing_tool_path))[0]\r\n factory_name = splitext(basename(test_processing_factory_path))[0]\r\n\r\n with open(test_processing_tool_path, 'w') as f:\r\n f.write(\"\"\"\\\r\n'''\r\nTest processing tool class - should be deleted upon completion of test\r\n'''\r\n\r\n'''___Built-In Modules___'''\r\nimport sys\r\nfrom os.path import dirname\r\n\r\n'''___Third-Party Modules___'''\r\n\r\n'''___NPL Modules___'''\r\ndataProcessing_directory = dirname(dirname(dirname(dirname(__file__))))\r\nsys.path.append(dataProcessing_directory)\r\nfrom AbstractProcessingTool import AbstractProcessingTool\r\n\r\nsys.path.append(dirname(__file__))\r\nfrom %s import %s\r\n\r\n\r\nclass %s(AbstractProcessingTool):\r\n\r\n def setProcessingFactory(self, product_string):\r\n processingFactory = %s\r\n return processingFactory\r\n\r\n\r\nif __name__ == \"__main__\":\r\n pass\r\n\"\"\" % (factory_name, factory_name, className, factory_name))\r\n\r\n return 0", "def get_tools(cls):\n pass", "def factory_method(self):\n pass", "def factory_method(self):\n pass", "def get_parts(self, tool_factory):\n if not self.settings.override_level_settings:\n if 'max_num_steps' in self.jf:\n self.max_num_steps = self.jf['max_num_steps']\n if 'overlap_thresh' in self.jf:\n self.settings.overlap_threshold = self.jf['overlap_thresh']\n\n\n if 'marker_must_hit' in self.jf:\n self.marker_must_hit = self.jf['marker_must_hit']\n if 'sec_goal_reward' in self.jf:\n self.sec_goal_reward = self.jf['sec_goal_reward']\n if 'target_reward' in self.jf:\n self.target_reward = self.jf['target_reward']\n if 'goal_is_basket' in self.jf:\n self.goal_is_basket = self.jf['goal_is_basket']\n if 'ball_is_basket' in self.jf:\n self.ball_is_basket = self.jf['ball_is_basket']\n if 'moving_goal' in self.jf:\n self.moving_goal = self.jf['moving_goal']\n if 'target_ball_radius' in self.jf:\n self.target_ball_radius = self.jf['target_ball_radius']\n\n if self.task_id is None or self.eval_rnd_map is None:\n self.eval_rnd_map = self.gen_noise_apply_map()\n\n target_pos = self.gen_target_pos + get_noise(self.eval_rnd_map, 'target')\n goal_pos = self.gen_goal_pos + get_noise(self.eval_rnd_map, 'goal')\n\n if self.ball_is_basket:\n target_ball = tool_factory.create(ToolTypes.BASKET_BALL, target_pos)\n elif self.target_ball_radius is not None:\n target_ball = tool_factory.create(ToolTypes.TARGET_BALL, target_pos, {'radius': self.target_ball_radius})\n else:\n target_ball = tool_factory.create(ToolTypes.TARGET_BALL, target_pos)\n env_tools = self.get_tools(self.env_jf, tool_factory)\n\n return env_tools, target_ball, goal_pos", "def get_tools(filters=None, expand=False, **kwargs):\n avail = [dict(name=k, **v.metadata) for k, v in load_plugins('tool').items()]\n\n if filters is not None:\n for k, v in filters.items():\n if k == 'dataset':\n m = get_metadata(v).get(v)\n kwargs['datatype'] = m.get('datatype')\n kwargs['parameters'] = m.get('parameter')\n catalog_entry = m.get('catalog_entry')\n geometry = get_metadata(catalog_entry).get(catalog_entry).get('geometry')\n if geometry is not None:\n kwargs['geotype'] = geometry.geom_type\n return get_tools(filters=kwargs, expand=expand)\n elif k == 'group':\n avail = [f for f in avail if v == f['group']]\n else:\n avail = [f for f in avail if f['operates_on'][k] is None or v in f['operates_on'][k]]\n\n if expand:\n avail = {f.pop('name'): f for f in avail}\n else:\n avail = [f['name'] for f in avail]\n\n return avail", "def __init__(self):\n \n self.label = \"ArcSDM Tools\"\n self.alias = \"ArcSDM\" \n\n # List of tool classes associated with this toolbox\n self.tools = [PartitionNNInputFiles, CombineNNOutputFiles, NeuralNetworkOutputFiles, NeuralNetworkInputFiles, \n CalculateWeightsTool,SiteReductionTool,CategoricalMembershipToool,\n CategoricalAndReclassTool, TOCFuzzificationTool, CalculateResponse, LogisticRegressionTool, Symbolize, \n ROCTool, AgterbergChengCITest, AreaFrequencyTable, GetSDMValues, GrandWofe]", "def __init__(self):\n self.label = \"Create\"\n self.alias = \"\"\n\n # List of tool classes associated with this toolbox\n if core.get_pass():\n self.tools = [Fbound, Roads, Diekdikisi]\n else:\n self.tools = []", "def __init__(self, factory, *args, **kwargs):\n super(HelperDict, self).__init__(*args, **kwargs)\n self.factory = factory", "def _make_functions(namespace):\n for fil in registry.filters:\n func_name = camel2enthought(fil.id)\n class_name = fil.id\n if func_name.endswith('_filter'):\n func_name = func_name[:-7]\n class_name = class_name[:-6]\n class_name = class_name + 'Factory'\n\n # Don't create any that are already defined.\n if class_name in namespace:\n continue\n\n # The class to wrap.\n klass = new.classobj(class_name, \n (_AutomaticFilterFactory,),\n {'__doc__': fil.help,}\n )\n klass._metadata = fil\n\n # The mlab helper function.\n func = make_function(klass)\n\n # Inject class/function into the namespace and __all__.\n namespace[class_name] = klass\n namespace[func_name] = func\n __all__.append(func_name)", "def __init__(self):\n self.label = \"RAPID Tools\"\n self.alias = \"RAPIDTools\"\n\n # List of tool classes associated with this toolbox\n self.tools = [AddSPTFields,\n AutomaticRAPIDfileGenerator, \n CopyDataToServer,\n CreateNetworkConnectivityFile,\n CreateNetworkConnectivityFileNHDPlus,\n CreateMuskingumParameterFiles,\n CreateMuskingumKFile,\n CreateMuskingumKfacFile,\n CreateMuskingumXField, \n CreateMuskingumXFile, \n CreateRivIDGageFile, \n CreateSubsetFile,\n CreateWeightTableFromWRFGeogrid,\n CreateInflowFileFromWRFHydroRunoff,\n CreateWeightTableFromECMWFRunoff,\n CreateInflowFileFromECMWFRunoff,\n CreateWeightTableFromLDASRunoff,\n CreateWeightTableFrom2DLatLonRunoff,\n CreateDischargeTable,\n CreateDischargeMap,\n FlowlineToPoint,\n DEMtoStreamNetwork,\n PublishDischargeMap,\n StreamNetworktoRAPID,\n StreamNetworktoSPT,\n UpdateWeightTable,\n UpdateDischargeMap,\n ]", "def _derived_features(self):\n for created_feature, creator in self.feature_creators.items():\n self.parameters[created_feature] = creator(self.parameters)", "def __init__(self):\n self.pyranose_fac = PyranoseFactory()\n self.furanose_fac = FuranoseFactory()\n self.open_fac = OpenFactory()\n\n self.keys = set(self.pyranose_fac.keys()).union(self.furanose_fac.keys())", "def register_standard_tools(self):\n t = self.add_tool(SelectTool)\n self.set_default_tool(t)\n self.add_tool(RectZoomTool)\n self.add_tool(BasePlotMenuTool, \"item\")\n self.add_tool(ExportItemDataTool)\n try:\n import spyderlib.widgets.objecteditor # analysis:ignore\n self.add_tool(EditItemDataTool)\n except ImportError:\n pass\n self.add_tool(ItemCenterTool)\n self.add_tool(DeleteItemTool)\n self.add_separator_tool()\n self.add_tool(BasePlotMenuTool, \"grid\")\n self.add_tool(BasePlotMenuTool, \"axes\")\n self.add_tool(DisplayCoordsTool)\n if self.get_itemlist_panel():\n self.add_tool(ItemListPanelTool)", "def __init__(self):\n self.label = \"PFRR Tools\"\n self.alias = \"PFRR Tools\"\n\n # List of tool classes associated with this toolbox\n self.tools = [Ending_Point, Range_Distance, PFRR]", "def CreateTool(tool_name, adb):\n if not tool_name:\n return BaseTool()\n\n ctor = TOOL_REGISTRY.get(tool_name)\n if ctor:\n return ctor(adb)\n else:\n print 'Unknown tool %s, available tools: %s' % (\n tool_name, ', '.join(sorted(TOOL_REGISTRY.keys())))\n sys.exit(1)", "def _init_extractors(self):\n @self.extractors_wrapper(\"networkx\")\n def get_nx_extractor(graph):\n \"\"\"\n :param graph: networkx.Graph\n :returns: projx.nx_extractor\n \"\"\"\n return nx_xtrct.nx_extractor(\n self.extractor_json[self.extractor_name], graph\n )\n\n @self.extractors_wrapper(\"neo4j\")\n def get_neo4j_extractor(graph):\n \"\"\"\n :returns: projx.nx_extractor\n \"\"\"\n return neo4j_xtrct.neo4j_extractor(\n self.extractor_json[self.extractor_name], graph\n )\n\n @self.extractors_wrapper(\"edgelist\")\n def get_edgelist_extractor(graph):\n \"\"\"\n :returns: projx.nx_extractor\n \"\"\"\n return edgelist_xtrct.edgelist_extractor(\n self.extractor_json[self.extractor_name], graph\n )", "def __init__( self, config_filenames, tool_root_dir, app, tool_conf_watcher=None ):\n # The _dynamic_tool_confs list contains dictionaries storing\n # information about the tools defined in each shed-related\n # shed_tool_conf.xml file.\n self._dynamic_tool_confs = []\n self._tools_by_id = {}\n self._integrated_section_by_tool = {}\n # Tool lineages can contain chains of related tools with different ids\n # so each will be present once in the above dictionary. The following\n # dictionary can instead hold multiple tools with different versions.\n self._tool_versions_by_id = {}\n self._workflows_by_id = {}\n # In-memory dictionary that defines the layout of the tool panel.\n self._tool_panel = ToolPanelElements()\n self._index = 0\n self.data_manager_tools = odict()\n self._lineage_map = LineageMap( app )\n # Sets self._integrated_tool_panel and self._integrated_tool_panel_config_has_contents\n self._init_integrated_tool_panel( app.config )\n # The following refers to the tool_path config setting for backward compatibility. The shed-related\n # (e.g., shed_tool_conf.xml) files include the tool_path attribute within the <toolbox> tag.\n self._tool_root_dir = tool_root_dir\n self.app = app\n self._tool_watcher = get_tool_watcher( self, app.config )\n if tool_conf_watcher:\n self._tool_conf_watcher = tool_conf_watcher # Avoids (re-)starting threads in uwsgi\n else:\n self._tool_conf_watcher = get_tool_conf_watcher(lambda: self.handle_reload_toolbox())\n self._filter_factory = FilterFactory( self )\n self._tool_tag_manager = tool_tag_manager( app )\n self._init_tools_from_configs( config_filenames )\n if self.app.name == 'galaxy' and self._integrated_tool_panel_config_has_contents:\n # Load self._tool_panel based on the order in self._integrated_tool_panel.\n self._load_tool_panel()\n self._save_integrated_tool_panel()", "def _register_factory(self):\n for name, info in self._plugins.items():\n if info['priority']:\n factory = getattr(info['plugin'], 'factory', None)\n if callable(factory):\n registry[info['factory']] = info['plugin'].factory\n registry.freeze()", "def named_factory(klass):\n\n class _factory(Location):\n zope.interface.implements(INamedUtilBase)\n def __init__(self):\n self.title = klass.title\n self.label = klass.label\n self.description = klass.description\n def __call__(self, *a, **kw):\n # returns an instantiated factory with a context\n factory = klass(*a, **kw)\n factory.__name__ = self.__name__\n return factory\n # create/return instance of the factory that instantiates the \n # classes below.\n return _factory()" ]
[ "0.7297803", "0.6938939", "0.68173605", "0.65987945", "0.6445594", "0.64262986", "0.63660765", "0.6365673", "0.6329075", "0.60519093", "0.6003902", "0.594429", "0.5937257", "0.5937257", "0.59068316", "0.58427525", "0.58331877", "0.57943475", "0.5717975", "0.57174146", "0.56580216", "0.5618565", "0.5596282", "0.5589234", "0.5580796", "0.55786926", "0.55616343", "0.555557", "0.55398774", "0.5505331" ]
0.7389015
0
Load a model based on its name model.name and the checkpoint iteration step
def load_model_by_name(model, global_step, device=None, path="/scratch/users/zucks626/ADNI/IPMI/checkpoints/"): # path = "/scratch/users/zucks626/ADNI/ae_cls/checkpoints/" file_path = path + model.name + "/" + 'model-{:05d}.pt'.format(global_step) state = torch.load(file_path, map_location=device) model.load_state_dict(state) print("Loaded from {}".format(file_path))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_model(self, checkpoint):\n print(f'Load parameters from {checkpoint}')\n epoch = re.match(r\"[0-9]*\", os.path.basename(checkpoint)).group(0)\n self.epoch_i = int(epoch)\n self.model.load_state_dict(torch.load(checkpoint))", "def maybe_load_model(name, split_num, checkpoint_dir, resume_from_epoch,\n batch_norm, l1_factor, l2_factor, optimizer):\n basename = get_basename(name, split_num)\n model_filename_fmt = get_model_filename_fmt(basename)\n model_filename = model_filename_fmt.format(epoch=resume_from_epoch)\n\n checkpoint_path = os.path.join(checkpoint_dir, model_filename)\n\n if resume_from_epoch > 0 and os.path.isfile(checkpoint_path):\n\n click.secho(f\"Found model checkpoint '{checkpoint_path}'. \"\n f\"Resuming from epoch {resume_from_epoch}.\", fg='green')\n\n model = load_model(checkpoint_path)\n\n initial_epoch = resume_from_epoch\n\n else:\n\n click.secho(f\"Could not load model checkpoint '{checkpoint_path}' \"\n \"or `resume_from_epoch == 0`. Building new model.\",\n fg='yellow')\n\n model = build_model(output_dim=1, batch_norm=batch_norm,\n kernel_regularizer=l1_l2(l1_factor, l2_factor))\n # optimizer = Adam(beta_1=0.5)\n model.compile(loss='binary_crossentropy', optimizer=optimizer,\n metrics=['accuracy'])\n\n initial_epoch = 0\n\n return model, initial_epoch", "def load_model(model, checkpoint_path: str): \r\n checkpoint = torch.load(checkpoint_path)\r\n model.load_state_dict(checkpoint['model'])\r\n epoch = checkpoint['epoch']\r\n print('Loaded model from {}, epoch {}'.format(checkpoint_path, epoch))", "def load(self, sess, step=None):\n if step==None:\n ckpt_path = tf.train.latest_checkpoint(self.model.ckpt_dir)\n else:\n ckpt_path = os.path.join(self.model.ckpt_dir, 'model-'+str(step))\n self.saver.restore(sess, ckpt_path)\n step = tf.train.global_step(sess, self.gstep)\n print('Load model at step {} from check point {}.'.format(step, ckpt_path))", "def load_checkpoint(tag, params, model):\r\n file_name = os.path.join(\"saved_models\", params.path, tag + \".pt\")\r\n logger.info(\"Load checkpoint from %s\" % file_name)\r\n if os.path.exists(file_name):\r\n checkpoint = torch.load(file_name, map_location='cpu')\r\n params.training_id = checkpoint['training_id']\r\n logger.info(checkpoint['training_id'])\r\n model.global_step = checkpoint['global_step']\r\n model.load_state_dict(checkpoint['model'])\r\n for i, optimizer in enumerate(model.optimizers):\r\n optimizer.load_state_dict(checkpoint['optimizers'][i])\r\n else:\r\n raise Exception(\"Checkpoint not found.\")", "def load_model(model, transfer_from, sess):\n param_path = final_param_path(model.name, transfer_from)\n step_to_load = FINAL_PARAM_STEPS[model.name][transfer_from]\n util.load_checkpoint_at_step(\n model_name=model.name,\n global_step=step_to_load,\n saver=tf.train.Saver(),\n sess=sess,\n path=param_path)", "def load_checkpoint(model, model_name='model', validation_id=None):\n path = output_path(_checkpoint_path.format(model_name), validation_id=validation_id, have_validation=True)\n _load_model(model.module if type(model) is torch.nn.DataParallel else model, model_name, path=path, reload=True)", "def load_model(self, ckpt_fn):\n checkpoint = torch.load(ckpt_fn)\n self.net_.load_state_dict(checkpoint[\"model\"])\n self.optimizer_.load_state_dict(checkpoint[\"optimizer\"])\n self.epoch_ = checkpoint[\"epoch\"]\n self.global_step_ = checkpoint[\"global_step\"]\n self.model_samples_ = deque(checkpoint[\"model_samples\"])\n self.sampler.load_state_dict(checkpoint[\"sampler_state\"])\n self.ais_loss.load_state_dict(checkpoint[\"ais_state\"])\n self.replay_prob = checkpoint[\"replay_prob\"]\n self.max_replay = checkpoint[\"max_replay\"]", "def load_model(name):\n\tmodel = joblib.load(\"data/{}/{}.model\".format(name, name))\n\t# Setting n_jobs to 1 in case it was set to a higher number while training the model seems to makes predictions of single samples much faster.\n\tmodel.n_jobs = 1\n\treturn model", "def preload_model(\n mlflow_tracking_uri: str,\n experiment_name: str,\n run_id: str,\n):\n\n mlflow.set_tracking_uri(mlflow_tracking_uri)\n mlflow.set_experiment(experiment_name)\n experiment_details = mlflow.get_experiment_by_name(experiment_name)\n\n mlflow.end_run()\n mlflow.start_run(run_id=run_id)\n\n # pull model from tracking uri\n artifact_loc = (\n str(experiment_details.artifact_location)\n .replace(\"file:\", \"\")\n .replace(\"///\", \"\")\n )\n loc_prefix = \"\"\n if \"P1-AnalyzeTrades\" not in os.getcwd():\n loc_prefix = r\"P1-AnalyzeTrades/\"\n\n metrics, params, tags = parse_mlflow_info(mlflow.get_run(run_id))\n\n model_type = get_model_type(tags)\n\n if model_type == \"sklearn\":\n try: # first try local path]\n mdl = pickle.load(\n open(f\"{artifact_loc}/{run_id}/artifacts/model/model.pkl\", \"rb\")\n )\n except: # then try repo specific path for finalized cases\n mdl = pickle.load(\n open(f\"{loc_prefix}mlruns/0/{run_id}/artifacts/model/model.pkl\", \"rb\")\n )\n else:\n # for h2o models\n h2o.init()\n try:\n logged_model = f\"runs:/{run_id}/model\"\n # logged_model = f'mlruns/0/{run_id}/artifacts/model'\n mdl = mlflow.pyfunc.load_model(logged_model)\n\n # mojo deprecated\n # mdl = h2o.import_mojo(f'{artifact_loc}/{run_id}/artifacts/')\n except:\n logged_model = f\"{loc_prefix}mlruns/0/{run_id}/artifacts/model\"\n mdl = mlflow.pyfunc.load_model(logged_model)\n\n mlflow.end_run()\n\n # load cat dict, if available\n cat_dict = {}\n cat_dict_loc = f\"{artifact_loc}/{run_id}/artifacts/cat_dict.pkl\"\n if os.path.exists(cat_dict_loc):\n cat_dict = pickle.load(open(cat_dict_loc, \"rb\"))\n else: # then try repo specific path for finalized cases\n cat_dict_loc = f\"{loc_prefix}mlruns/0/{run_id}/artifacts/cat_dict.pkl\"\n if os.path.exists(cat_dict_loc):\n cat_dict = pickle.load(open(cat_dict_loc, \"rb\"))\n\n return mdl, cat_dict", "def load(self, model_name: str, model_dir: str = \"checkpoints\") -> None:\n self.model.load_state_dict(\n torch.load(os.path.join(model_dir, f\"{model_name}.pt\"))\n )", "def load_checkpoint(self, model):\n print(f\"load model {self.save_model_path}\")\n model.load_state_dict(torch.load(self.save_model_path))", "def load_model(model_name):\n model = get_model(training = False)\n checkpoint = torch.load('../models/' + model_name)\n model.load_state_dict(checkpoint['model_state_dict'])\n return model", "def load_model(self, ckpt_name=\"best_model.pth\"):\n path = \"/\".join(ckpt_name.split(\"/\")[:-1])\n chkpt = torch.load(ckpt_name)\n self.start_epoch = chkpt['epoch']\n self.best_metric = chkpt['best_metric']\n\n # fix the DataParallel caused problem with keys names\n if self.multi_gpu_flag:\n new_state_dict = fix_multigpu_chkpt_names(chkpt['state_dict'], drop=False)\n self.net.load_state_dict(new_state_dict)\n else:\n try:\n self.net.load_state_dict(chkpt['state_dict'])\n except:\n new_state_dict = fix_multigpu_chkpt_names(chkpt['state_dict'], drop=True)\n self.net.load_state_dict(new_state_dict)\n\n if self.load_optimizer_state:\n self.optimizer.load_state_dict(chkpt['optimizer'])\n logging.info(\"******** State loaded ********\")\n\n training_meta = pickle.load(open(f\"{path}/training_meta.pickle.dat\", \"rb\"))\n for k, v in training_meta.items():\n if k in self.__class__.__params:\n setattr(self, k, v)\n logging.info(\"******** Training params loaded ********\")", "def load_checkpoint(self, label):\n model_dir = os.path.join(\n config.results_dir, config.experiment_name, 'checkpoints')\n model_file = os.path.join(model_dir, '{}_net.pth.tar'.format(label))\n\n print(\"Loading model from {}\".format(model_file))\n model_dict = torch.load(model_file)\n\n self.use_cuda = model_dict['use_cuda']\n if self.use_cuda:\n self.net.cuda()\n\n self.net.load_state_dict(model_dict['net_state_dict'])", "def load_model(model_name):\r\n model = joblib.load(model_name)\r\n return model", "def load_checkpoints(args, model): \n print('Loading the model checkpoints from iter {}...'.format(args.resume_iter))\n checkpoint_path = os.path.join(config.checkpoint_path, args.model_type)\n\n gen_g_path = os.path.join(checkpoint_path, '{}-Gen_g.ckpt'.format(args.resume_iter))\n gen_f_path = os.path.join(checkpoint_path, '{}-Gen_f.ckpt'.format(args.resume_iter))\n model.gen_g.load_state_dict(torch.load(gen_g_path, map_location=lambda storage, loc: storage))\n model.gen_f.load_state_dict(torch.load(gen_f_path, map_location=lambda storage, loc: storage))\n\n if args.train:\n dis_c_path = os.path.join(checkpoint_path, '{}-Dis_c.ckpt'.format(args.resume_iter))\n dis_t_path = os.path.join(checkpoint_path, '{}-Dis_t.ckpt'.format(args.resume_iter))\n model.dis_c.load_state_dict(torch.load(dis_c_path, map_location=lambda storage, loc: storage))\n model.dis_t.load_state_dict(torch.load(dis_t_path, map_location=lambda storage, loc: storage))", "def load_model(self, fname: str) -> None:\n checkpoint_data = torch.load(fname)\n\n # Load the models\n # P-Net\n model_import_path = checkpoint_data['p_net']['model_import_path']\n imp = importlib.import_module(model_import_path)\n mod = getattr(imp, checkpoint_data['p_net']['model_name'])\n self.p_net = mod()\n self.p_net.set_params(checkpoint_data['p_net'])\n # Q-Net\n model_import_path = checkpoint_data['q_net']['model_import_path']\n imp = importlib.import_module(model_import_path)\n mod = getattr(imp, checkpoint_data['q_net']['model_name'])\n self.q_net = mod()\n self.q_net.set_params(checkpoint_data['q_net'])", "def load_from_checkpoint(results_dir, load_fn, args):\n ckpt_dir = os.path.join(results_dir, \"tb\", \"version_0\", \"checkpoints\")\n files = os.listdir(ckpt_dir)\n assert len(files) > 0, \"Checkpoint directory is empty\"\n ckpt_path = os.path.join(ckpt_dir, files[-1])\n model = load_fn(checkpoint_path=ckpt_path, args=args)\n return model", "def load_model(path):\n if os.path.isfile(path):\n print(\"=> loading checkpoint '{}'\".format(path))\n checkpoint = torch.load(path)\n\n # # size of the top layer\n # N = checkpoint['state_dict']['top_layer.bias'].size()\n #\n # # build skeleton of the model\n # sob = 'sobel.0.weight' in checkpoint['state_dict'].keys()\n # model = models.__dict__[checkpoint['arch']](sobel=sob, out=int(N[0]))\n #\n # # deal with a dataparallel table\n # def rename_key(key):\n # if not 'module' in key:\n # return key\n # return ''.join(key.split('.module'))\n #\n # checkpoint['state_dict'] = {rename_key(key): val\n # for key, val\n # in checkpoint['state_dict'].items()}\n #\n # # load weights\n # model.load_state_dict(checkpoint['state_dict'])\n # print(\"Loaded\")\n # else:\n # model = None\n # print(\"=> no checkpoint found at '{}'\".format(path))\n\n # net = models.__dict__['ResNet18'](low_dim=128)\n # net = models.__dict__['resnet18'](low_dim=128)\n\n net = models.__dict__['alexnet'](out=128)\n # net = models.__dict__['Alexnet_C'](out=args.low_dim)\n\n net = torch.nn.DataParallel(net, device_ids=range(torch.cuda.device_count()))\n net.load_state_dict(checkpoint['net'])\n\n return net", "def create_or_load_model(model, model_dir, session, name):\n latest_ckpt = tf.train.latest_checkpoint(model_dir)\n if latest_ckpt:\n start_time = time.time()\n # It only takes a few seconds to initialize all variables.\n session.run(tf.global_variables_initializer())\n logging.info(\n \"Initialize %s model with fresh parameters before loading variables \"\n \"from the checkpoint, time %.2fs\", name,\n time.time() - start_time)\n model = load_model(model, latest_ckpt, session, name)\n else:\n start_time = time.time()\n session.run(tf.global_variables_initializer())\n session.run(tf.tables_initializer())\n utils.print_out(\" created %s model with fresh parameters, time %.2fs\" %\n (name, time.time() - start_time))\n\n global_step = model.global_step.eval(session=session)\n return model, global_step", "def load_model_from_checkpoint(self, path: str):\n ckpt = torch.load(path, map_location='cpu')\n self.net_q.encoder.load_state_dict(ckpt['encoder'])\n self.net_q.head.load_state_dict(ckpt['head'])\n self.net_ps.load_state_dict(ckpt['net_ps'])\n self.net_k.load_state_dict(ckpt['net_k'])\n self.queue.load_state_dict(ckpt['queue'])\n self.optimizer.load_state_dict(ckpt['optimizer'])\n if 'scheduler' in ckpt:\n self.scheduler.load_stae_dict(ckpt['scheduler'])\n self.move_optimizer_states(self.optimizer, self.local_rank)", "def load_model (checkpoint_path, model, opt_fn=None, loss_fn=None, epoch=None):\n\n if not os.path.exists(checkpoint_path):\n raise Exception (\"The {} does not exist!\".format(checkpoint_path))\n\n ckpt = torch.load(checkpoint_path)\n model.load_state_dict(ckpt['model_state_dict'])\n\n if opt_fn is not None and loss_fn is not None:\n opt_fn.load_state_dict(ckpt['optimizer_state_dict'])\n epoch = ckpt['epoch']\n loss_fn = ckpt['loss']\n return model, opt_fn, loss_fn, epoch\n else:\n return model", "def load_model(path, model, optimizer):\n print(\"LOADING MODEL...\")\n ckpt = tf.train.Checkpoint(model=model, optimizer=optimizer)\n status = ckpt.restore(tf.train.latest_checkpoint(path))\n ckpt_manager = tf.train.CheckpointManager(\n checkpoint=ckpt, \n directory=FLAGS.model_dir, \n max_to_keep=3 \n )\n return model, optimizer, ckpt, ckpt_manager", "def load(model, name=\"store/base\"):\n if torch.cuda.is_available():\n pretrained_dict = torch.load(name + \".pt\")\n else:\n pretrained_dict = torch.load(name + \".pt\", map_location=torch.device('cpu'))\n print(\"Loaded\", name + \" model.\")\n model_dict = model.state_dict()\n model_dict.update(pretrained_dict)\n model.load_state_dict(model_dict)", "def checkpoint_load(checkpoint_path, gpu):\n model_info = torch.load(checkpoint_path)\n model = models.vgg19(pretrained=True)\n for param in model.parameters():\n param.requires_grad = False\n \n model.class_to_idx = model_info['class_to_idx']\n\n model = classifier(model)\n model.load_state_dict(model_info[\"model_state_dict\"])\n return model, model.class_to_idx", "def load(self):\n if self.model is None:\n raise Exception(\"Build the model first.\")\n\n print(\"Loading model checkpoint {} ...\\n\".format(self.config[\"model\"][\"restore_model\"]))\n self.model.load_weights(self.config[\"model\"][\"restore_model\"])\n print(\"Model loaded!\")", "def load(self):\n if self.model is None:\n raise Exception(\"Build the model first.\")\n\n print(\"Loading model checkpoint {} ...\\n\".format(self.config[\"model\"][\"restore_model\"]))\n self.model.load_weights(self.config[\"model\"][\"restore_model\"])\n print(\"Model loaded!\")", "def load(loadname, checkpoint=None):\n ckpt_dir = \"./models/tf_ckpt_\" + loadname + \"/\"\n if checkpoint is not None:\n status = checkpoint.restore(tf.train.latest_checkpoint(ckpt_dir))\n status.assert_consumed()\n print(\"Loaded checkpoint\")\n else:\n print(\"Not Loading any checkpoint\")\n print(\"Starting training from initial configuration\")", "def __load(self, model_name):\n\n print(\"Loading model.\")\n tstart = datetime.now()\n\n # Temporary directory to extract the zipped information\n with tempfile.TemporaryDirectory() as dirpath:\n\n # Unzip the directory that contains the saved model(s)\n with zipfile.ZipFile(model_name + \".zip\", \"r\") as zip_ref:\n zip_ref.extractall(dirpath)\n\n # Load metadata\n metadata = pickle.load(open(dirpath + \"/metadata.pickle\", \"rb\"))\n\n # Re-load metadata\n self.__dict__.update(metadata)\n\n # Load all sub-models\n try:\n self.__mol_to_latent_model = load_model(\n dirpath + \"/mol_to_latent_model.h5\"\n )\n except:\n print(\"'mol_to_latent_model' not found, setting to None.\")\n self.__mol_to_latent_model = None\n\n self.__latent_to_states_model = load_model(\n dirpath + \"/latent_to_states_model.h5\"\n )\n self.__batch_model = load_model(dirpath + \"/batch_model.h5\")\n \n # Build sample_model out of the trained batch_model\n self.__build_sample_model(batch_input_length=1) # Single-output model\n self.__build_sample_model(\n batch_input_length=256 # could also be self.batch_size\n ) # Multi-output model\n\n print(\"Loading finished in %i seconds.\" % ((datetime.now() - tstart).seconds))" ]
[ "0.78126186", "0.75728905", "0.75673294", "0.7478123", "0.74248904", "0.7383905", "0.7324118", "0.7305647", "0.7303674", "0.73004097", "0.7277768", "0.7242201", "0.7198431", "0.71938014", "0.71598846", "0.712388", "0.7121004", "0.7102762", "0.7091739", "0.70794976", "0.7078112", "0.7035582", "0.7018757", "0.7016472", "0.6994088", "0.69900393", "0.6977675", "0.6977675", "0.6977318", "0.69502294" ]
0.81570524
0
Computes a embedding given wide feature indices and values If wide_ftrs_sp_val is specified, users should keep consistency between wide_ftrs_sp_idx and wide_ftrs_sp_val the value of wide_ftrs_sp_idx[i] should be wide_ftrs_sp_val[i].
def __init__(self, num_wide_sp: int, wide_ftrs_sp_idx: tf.Tensor, sp_emb_size: int, wide_ftrs_sp_val: tf.Tensor = None, padding_idx: int = 0, initializer=tf.contrib.layers.xavier_initializer()): wide_ftrs_sp_idx = tf.cast(wide_ftrs_sp_idx, dtype=tf.float32) if wide_ftrs_sp_val is None: # Default to 1 if values unspecified wide_ftrs_sp_val = tf.ones(tf.shape(wide_ftrs_sp_idx), dtype=tf.float32) self._num_wide_sp = num_wide_sp self._padding_idx = padding_idx with tf.variable_scope('wide', reuse=tf.AUTO_REUSE): # Feature weights self.ftrs_weight = tf.get_variable('wide_ftrs_sp_weight', shape=[num_wide_sp, sp_emb_size], initializer=initializer, trainable=True) # A hack to combine idx and val so that we can process them together in `tf.map_fn` later # Shape=[batch_size, max_group_size, max_wide_ftrs_sp_size*2], max_wide_ftrs_size is the maximum number of # sparse wide features in a document in the batch wide_ftrs_sp_idx_with_value = tf.concat([wide_ftrs_sp_idx, wide_ftrs_sp_val], axis=-1) # Compute embedding sample-wise self.embedding = tf.map_fn(self._compute_embedding_score_per_record, wide_ftrs_sp_idx_with_value, dtype=tf.float32)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _compute_embedding_score_per_record(self, wide_ftrs_idx_with_value_per_record):\n\n # Split idx and val back\n wide_ftrs_sp_idx, wide_ftrs_sp_val = tf.split(wide_ftrs_idx_with_value_per_record, 2, axis=-1)\n wide_ftrs_sp_idx = tf.cast(wide_ftrs_sp_idx, dtype=tf.int64)\n\n # Transformation\n shape = tf.ones(shape=[tf.shape(wide_ftrs_sp_idx)[0], 1], dtype=tf.int64) * self._num_wide_sp\n valid_wide_ftrs_idx_mask = tf.cast(tf.not_equal(wide_ftrs_sp_idx, self._padding_idx), tf.float32)\n wide_ftrs_sp_idx = tf.expand_dims(wide_ftrs_sp_idx, -1)\n\n # Get sparse feature vector v where v[ftr_idx_i] = ftr_val_i and v[other] = 0\n wide_ftrs_sp = sparse_tensor_merge(wide_ftrs_sp_idx,\n wide_ftrs_sp_val * valid_wide_ftrs_idx_mask,\n shape)\n\n # Feature weights\n bias = self.ftrs_weight[0]\n\n # Compute embedding\n embedding = tf.sparse.matmul(wide_ftrs_sp, self.ftrs_weight) + bias\n return embedding", "def embed_features(batch, f_size):\n for f in range(Config.num_feature):\n feature_val = batch[:, f]\n num_cat_value = Config.schema[f]\n\n if f == 0:\n if num_cat_value == 1:\n vector = tf.reshape(feature_val, [-1, 1])\n else:\n vector = tf.nn.embedding_lookup(embed_dict[f], tf.cast(\n feature_val, tf.int32))\n else:\n if num_cat_value == 1:\n vector = tf.concat(1, [vector, tf.reshape(feature_val,\n [-1, 1])])\n else:\n vector = tf.concat(1, [vector, tf.nn.embedding_lookup(\n embed_dict[f], tf.cast(feature_val, tf.int32))])\n\n result = tf.reshape(vector, [-1, 1, f_size])\n\n return result", "def build_sense_embedding(target_sense_to_id, word_freq, EMBEDDING_DIM):\r\n res = {}\r\n wordvecs = load_glove(EMBEDDING_DIM)\r\n \r\n for target_sense_list in target_sense_to_id:\r\n for key, _ in target_sense_list.items():\r\n sense_vector = np.zeros(EMBEDDING_DIM)\r\n senses = key.split(',')\r\n n = 0\r\n for sensekey in senses:\r\n #print(sensekey) \r\n if '/' in sensekey:\r\n continue\r\n sense_synset = sc2ss(sensekey)\r\n if sense_synset:\r\n sense_vector += build_sense_vector(sense_synset, word_freq, wordvecs)\r\n n += 1\r\n if n != 0:\r\n res[key] = sense_vector/n\r\n return res", "def get_W(word_vecs, vocab, k=300):\n vocab_size = len(word_vecs)\n word_idx_map = dict()\n W = np.zeros(shape=(vocab_size+2, k), dtype='float32')\n W[0] = np.zeros(k, dtype='float32') # padding vector\n i = 1\n for word in vocab:\n \tif word_vecs.has_key(word):\n \tW[i] = word_vecs[word]\n \tword_idx_map[word] = i\n \ti += 1\n else:\n \tword_idx_map[word] = vocab_size+1\n W[vocab_size+1] = np.zeros(k, dtype='float32')\n return W, word_idx_map", "def get_W(word_vecs, vocab, k=300):\n vocab_size = len(vocab)\n word_idx_map = dict()\n W = np.zeros(shape=(vocab_size, k), dtype='float32')\n i = 0\n for word in vocab:\n W[i] = word_vecs[word]\n word_idx_map[word] = i\n i += 1\n # W[0] = np.zeros(k, dtype='float32')\n return W, word_idx_map", "def get_W(word_vecs, k=300):\n vocab_size = len(word_vecs)\n word_idx_map = dict()\n W = np.zeros(shape=(vocab_size + 1, k), dtype='float32')\n W[0] = np.zeros(k, dtype='float32')\n i = 1\n for word in word_vecs:\n W[i] = word_vecs[word]\n word_idx_map[word] = i\n i += 1\n return W, word_idx_map", "def _compute_span_pair_embeddings(self,\n top_span_embeddings: torch.FloatTensor,\n antecedent_embeddings: torch.FloatTensor,\n genre_embedding: torch.FloatTensor,\n trigger_same_type_agreement_embeddings: torch.FloatTensor,\n realies_same_type_agreement_embeddings: torch.FloatTensor,\n antecedent_offsets: torch.FloatTensor):\n\n # Shape: (1, max_antecedents, embedding_size)\n antecedent_distance_embeddings = self._distance_embedding(\n util.bucket_values(antecedent_offsets,\n num_total_buckets=self._num_distance_buckets))\n\n # Shape: (1, 1, max_antecedents, embedding_size)\n antecedent_distance_embeddings = antecedent_distance_embeddings.unsqueeze(0)\n\n expanded_distance_embeddings_shape = (antecedent_embeddings.size(0), # batch_size\n antecedent_embeddings.size(1), # num_spans_to_keep\n antecedent_embeddings.size(2), # max_antecedents\n antecedent_distance_embeddings.size(-1)) # embedding_size\n # Shape: (batch_size, num_spans_to_keep, max_antecedents, embedding_size)\n antecedent_distance_embeddings = antecedent_distance_embeddings.expand(*expanded_distance_embeddings_shape)\n antecedent_genre_embeddings = genre_embedding.view(1, 1, 1, -1).expand_as(antecedent_distance_embeddings)\n feature_embeddings = self._dropout(torch.cat(\n [antecedent_genre_embeddings, realies_same_type_agreement_embeddings, trigger_same_type_agreement_embeddings],-1))\n # ], -1))\n\n # [antecedent_distance_embeddings, antecedent_genre_embeddings, trigger_same_type_agreement_embeddings,\n # realies_same_type_agreement_embeddings], -1))\n # feature_embeddings = self._dropout(torch.cat(\n # [antecedent_genre_embeddings, trigger_same_type_agreement_embeddings], -1\n # ))\n\n # Shape: (batch_size, num_spans_to_keep, max_antecedents, embedding_size)\n target_embeddings = top_span_embeddings.unsqueeze(2).expand_as(antecedent_embeddings)\n # Shape: (batch_size, num_spans_to_keep, max_antecedents, embedding_size)\n span_pair_embeddings = torch.cat([target_embeddings,\n antecedent_embeddings,\n antecedent_embeddings * target_embeddings,\n feature_embeddings], -1)\n return span_pair_embeddings", "def get_WS(w2v):\n # get set of MAX_NGRAM-grams in text\n lines = open(INFNAME_FORMAT.format(\"train\")).readlines() \\\n + open(INFNAME_FORMAT.format(\"test\")).readlines()\n raw = [process_line(l) for l in lines ]\n ngrams_in_data = set()\n for words in raw:\n for ngram in tweet_to_ngrams(words):\n ngrams_in_data.add(ngram)\n\n # load sentiment features from model\n clf_pipe = pickle.load(open(CLF_FNAME, 'rb')) # model\n\n vect = clf_pipe.best_estimator_.named_steps['vect']\n clf = clf_pipe.best_estimator_.named_steps['clf']\n\n features_to_sent_idx = vect.vocabulary_ # map from model features to sentiment index\n # currently, sentiment = 2 * (count_pos / (count_pos + count_neg)) - 1\n sentiments = clf.feature_count_[1,:] / np.sum(clf.feature_count_, axis=0) # in [0,1]\n sentiments = 2 * sentiments - 1 # rescale to [-1,1]\n\n features_to_sent = {feat: sentiments[idx] for (feat,idx) in features_to_sent_idx.items()}\n\n # build WS and ngram_idx_map for each MAX_NGRAM-gram in the text\n k = len(next(iter(w2v.values()))) # dimension of embedding\n WS = np.zeros(shape=(len(ngrams_in_data) + 1, k + MAX_NGRAM), dtype='float32')\n ngram_idx_map = {}\n\n index = 1 # first row is left 0, for padding in the cnn. This is also neutral sentiment.\n # For Vader Sentiment analysis\n# vader_analyzer = SentimentIntensityAnalyzer()\n\n\n for ngram in ngrams_in_data:\n ngram_idx_map[ngram] = index\n\n # set word embedding, note that unknown words already randomized in load_embedding \n words = ngram.split(' ')\n WS[index,:k] = w2v[words[-1]] # embedding of last word\n\n # set sentiment embedding\n for n in range(MAX_NGRAM): # for 1, 2, ... length ngrams\n sub_ngram = ' '.join(words[-1 - n:]) \n\n # Naive Bayes Sentiment feature --------------------------------\n sent = features_to_sent.get(sub_ngram, 0.0) # default to neutral 0\n # --------------------------------------------------------------\n\n# # TextBlob sentiment feature -----------------------------------\n# sent = TextBlob(sub_ngram).sentiment.polarity\n# # --------------------------------------------------------------\n\n# # Vader sentiment feature -------------------------------------\n# sent = vader_analyzer.polarity_scores(sub_ngram)['compound']\n# # -------------------------------------------------------------\n WS[index,k+n] = sent\n\n index += 1\n\n return WS, ngram_idx_map", "def get_W(self,word_vecs, k=300):\n vocab_size = len(word_vecs)\n word_idx_map = dict()\n W = np.zeros(shape=(vocab_size+1, k), dtype='float32')\n W[0] = np.zeros(k, dtype='float32')\n i = 1\n for word in word_vecs:\n W[i] = word_vecs[word]\n word_idx_map[word] = i\n i += 1\n return W, word_idx_map", "def embedding(inputs,\n vocab_dim,\n embedding_dim,\n reuse,\n validate_indices=False,\n w_init=tf.random_uniform_initializer(-1., 1.),\n trainable=True,\n normalize=False,\n vocab_freqs=None,\n name=\"Embedding\"):\n\n input_shape = util.get_input_shape(inputs)\n assert len(input_shape) == 2, \"Input Tensor shape must be 2-D\"\n\n with tf.variable_scope(name, reuse=reuse):\n with tf.device('/cpu:0'):\n W = tf.get_variable(\n \"W\", shape=[vocab_dim, embedding_dim], initializer=w_init, trainable=trainable)\n if normalize:\n assert vocab_freqs is not None\n vocab_freqs = tf.constant(vocab_freqs, dtype=tf.float32, shape=(vocab_dim, 1))\n W = _normalize(W, vocab_freqs)\n\n output = tf.cast(inputs, tf.int32)\n output = tf.nn.embedding_lookup(W, output, validate_indices=validate_indices)\n\n shape = [-1] + output.get_shape().as_list()[1:3] + [1]\n # seq_length = util.retrieve_seq_length(tf.reshape(inputs, shape))\n\n return output", "def scatter_embedding_vector(values, indices, bucket_num):\n ps_ids = {}\n indices_list = indices.tolist()\n for i, item_id in enumerate(indices_list):\n ps_id = int_to_id(item_id, bucket_num)\n if ps_id not in ps_ids:\n ps_ids[ps_id] = [(i, item_id)]\n else:\n ps_ids[ps_id].append((i, item_id))\n results = {}\n for ps_id, i_item_id in ps_ids.items():\n i = [v[0] for v in i_item_id]\n item_id = [v[1] for v in i_item_id]\n results[ps_id] = (values[i, :], item_id)\n return results", "def add_word_embedding_op(self):\n if self.pos:\n print(\"adding pos embeddings\")\n with tf.variable_scope(\"pos\"):\n _pos_embeddings = tf.Variable(self.pos_embeddings,\n name=\"la_pos_embeddings\",\n dtype=tf.float32, trainable=False)\n pos_embeddings = tf.nn.embedding_lookup(_pos_embeddings, self.pos_ids,\n name=\"pos_embeddings\")\n self.pos_vecs = pos_embeddings\n print(\"adding word_embeddings\")\n with tf.variable_scope(\"words\"):\n _word_embeddings = tf.Variable(self.embeddings, name=\"_word_embeddings\",\n dtype=tf.float32, trainable=False)\n word_embeddings = tf.nn.embedding_lookup(_word_embeddings,\n self.word_ids,\n name=\"word_embeddings\")\n if self.use_window:\n print(\"Concatenating word vectors of context words\")\n word_embeddings_sl = tf.nn.embedding_lookup(_word_embeddings,\n self.word_ids_sl,\n name=\"word_embeddings_sl\")\n word_embeddings_sr = tf.nn.embedding_lookup(_word_embeddings,\n self.word_ids_sr,\n name=\"word_embeddings_sr\")\n word_embeddings = tf.concat([word_embeddings_sr, word_embeddings,\n word_embeddings_sl], axis=-1)\n if self.use_char_embeddings:\n print(\"adding CNN for char embeddings\")\n with tf.variable_scope(\"chars\"):\n _char_embeddings = tf.get_variable(name=\"_char_embeddings\",\n dtype=tf.float32,\n shape=[self.char_count, \n self.c_dim_input])\n char_embeddings = tf.nn.embedding_lookup(_char_embeddings, \n self.char_ids, \n name=\"char_embeddings\")\n s = char_embeddings.shape\n # the shape of our char_embeddings is now (batch_size, max number of words\n # in each sentence, max number of chars in each word, self.c_dim )\n char_filter = tf.get_variable(\"char_filter\", dtype=tf.float32,\n shape=[self.c_filter_width, \n self.c_filter_height,\n self.c_dim_input,\n self.c_dim_output])\n print(\"adding 2d convolution layer\")\n char_conv_layer = tf.nn.conv2d(char_embeddings, char_filter, \n strides=[1, 1, 1, 1], \n padding=\"SAME\")\n char_conv_layer = tf.nn.tanh(char_conv_layer)\n print(\"adding 2d pooling layer\")\n char_conv_layer = tf.layers.max_pooling2d(char_conv_layer, \n 1, \n strides=1)\n char_output = tf.reshape(char_conv_layer, shape=[-1, self.max_len, \n self.max_word_length*\n self.c_dim_output])\n word_embeddings = tf.concat([word_embeddings, char_output], axis=-1)\n if self.pos and self.concat_pos:\n print(\"concatenating pos with word_embeddings\")\n word_embeddings = tf.concat([word_embeddings, pos_embeddings], axis=-1)\n self.word_embeddings = word_embeddings\n if self.use_additional and self.hybrid:\n print(\"using additional embeddings\")\n _word_embeddings_2 = tf.Variable(self.additional_embeddings,\n name=\"two_word_embeddings\",\n dtype=tf.float32, trainable=False)\n word_embeddings_2 = tf.nn.embedding_lookup(_word_embeddings_2,\n self.word_ids,\n name=\"two_word_embeddings\")\n self.word_embeddings_2 = word_embeddings_2", "def vectorize(tokens_list, feature_fns, min_freq, vocab=None):\n ###TODO\n \n features = []\n feature_freq = {}\n vocabulary = {}\n \n # 2 case : for vocab\n # case 1: \n if (vocab == None):\n \n for doc in tokens_list: \n #print('doc#=%d tokens=%s'%(i,doc)) \n data = featurize(doc,feature_fns)\n #print('data=',data)\n \n for feature in data: \n if feature[1] > 0 : \n if feature[0] not in feature_freq.keys():\n feature_freq.setdefault(feature[0],1) \n else :\n feature_freq[feature[0]] += 1\n \n if feature[0] not in vocabulary.keys() :\n vocabulary.setdefault(feature[0], None) \n \n features.append(data)\n \n # sort vocab according to features (alphabetical order)\n vacab_list = sorted(feature_freq.keys(), key =lambda x: x,reverse=False)\n \n for colIndex,term in enumerate(vacab_list) :\n #print('colIndex = %d, term = %s'%(colIndex,term))\n vocabulary[term] = colIndex\n\n else: # case 2 \n \n # vocab already present\n #print('Vocab already present')\n vocabulary = vocab.copy() \n \n \n for doc in tokens_list: \n data = featurize(doc,feature_fns) \n \n test_data = [] \n for feature in data: \n # only take feature present in vocab \n if feature[0] in vocabulary.keys():\n #print('feature = ',feature) \n if feature[1] > 0 : \n test_data.append(feature) \n if feature[0] not in feature_freq.keys():\n feature_freq.setdefault(feature[0],1) \n else :\n feature_freq[feature[0]] += 1\n \n #print('test_data = ',len(test_data)) \n features.append(test_data)\n #test_data.clear()\n #print('features = ',features)\n \n \n # build a csr_matrix \n row = []\n col = []\n data = [] \n \n for docID,feat_list in enumerate(features) :\n for term in feat_list:\n if (feature_freq[term[0]] >= min_freq): # (zero values are not stored)\n \n row.append(docID)\n col.append(vocabulary[term[0]])\n data.append(term[1])\n \n #print('row =',row)\n #print('col =',col)\n #print('data=',data)\n \n X = csr_matrix((data, (row, col)), shape=(len(features), len(vocabulary)), dtype=np.int64)\n \n #print('X ->')\n #print(X.toarray())\n #print(' size of X = ',X.get_shape())\n \n return(X, vocabulary)", "def get_W(word_vecs, k=300):\n vocab_size = len(word_vecs)\n word_idx_map = dict()\n W = np.zeros(shape=(vocab_size+1, k)) \n W[0] = np.zeros(k)\n\n for i, word in enumerate(word_vecs):\n W[i+1] = word_vecs[word] # i+1 as i=0 is already filled with zeros\n word_idx_map[word] = i\n i += 1\n return W, word_idx_map", "def get_W(word_vecs, k=300):\n vocab_size = len(word_vecs)\n word_idx_map = dict()\n W = np.zeros(shape=(vocab_size + 1, k))\n W[0] = np.zeros(k)\n i = 1\n for word in word_vecs:\n W[i] = word_vecs[word]\n word_idx_map[word] = i\n i += 1\n return W, word_idx_map", "def source_embedding_fairseq(self):\r\n return tf.get_variable(\r\n name=\"W\",\r\n shape=[self.params[\"feature.dim\"], self.params[\"embedding.dim\"]],\r\n initializer=tf.random_normal_initializer(\r\n mean=0.0,\r\n stddev=0.1))", "def get_pretrained_embeddings(source_vocab,embed_df):\r\n \r\n num_tokens = len(source_vocab)\r\n embedding_dim = embed_df.shape[1]\r\n weights = np.zeros((num_tokens,embedding_dim),dtype=np.float32)\r\n \r\n for idx in range(num_tokens):\r\n token = source_vocab.lookup_index(idx)\r\n if token in embed_df.index:\r\n weights[idx,:] = embed_df.loc[token]\r\n else:\r\n weights[idx,:] = np.random.randn(1,embedding_dim)\r\n \r\n embed_tensor = torch.FloatTensor(weights)\r\n return embed_tensor", "def _get_embeddings_and_idf_scale(dataloader: DataLoader, target_len: int, model: Module, device: Optional[Union[str, torch.device]]=None, num_layers: Optional[int]=None, all_layers: bool=False, idf: bool=False, verbose: bool=False, user_forward_fn: Callable[[Module, Dict[str, Tensor]], Tensor]=None) ->Tuple[Tensor, Tensor]:\n embeddings_list: List[Tensor] = []\n idf_scale_list: List[Tensor] = []\n for batch in _get_progress_bar(dataloader, verbose):\n with torch.no_grad():\n batch = _input_data_collator(batch, device)\n if not all_layers:\n if not user_forward_fn:\n out = model(batch['input_ids'], batch['attention_mask'], output_hidden_states=True)\n out = out.hidden_states[num_layers if num_layers is not None else -1]\n else:\n out = user_forward_fn(model, batch)\n _check_shape_of_model_output(out, batch['input_ids'])\n out = out.unsqueeze(1)\n else:\n if user_forward_fn:\n raise ValueError('The option `all_layers=True` can be used only with default `transformers` models.')\n out = model(batch['input_ids'], batch['attention_mask'], output_hidden_states=True)\n out = torch.cat([o.unsqueeze(1) for o in out.hidden_states], dim=1)\n out /= out.norm(dim=-1).unsqueeze(-1)\n out, attention_mask = _output_data_collator(out, batch['attention_mask'], target_len)\n processed_attention_mask = _process_attention_mask_for_special_tokens(attention_mask)\n out = torch.einsum('blsd, bs -> blsd', out, processed_attention_mask)\n embeddings_list.append(out.cpu())\n input_ids_idf = batch['input_ids_idf'] * processed_attention_mask if idf else processed_attention_mask.type(out.dtype)\n input_ids_idf /= input_ids_idf.sum(-1, keepdim=True)\n idf_scale_list.append(input_ids_idf.cpu())\n embeddings = torch.cat(embeddings_list)\n idf_scale = torch.cat(idf_scale_list)\n return embeddings, idf_scale", "def get_W(word_vecs, k=200):\n vocab_size = len(word_vecs)\n word_idx_map = dict()\n W = np.zeros(shape=(vocab_size+1, k)) \n W[0] = np.zeros(k)\n i = 1\n for word in word_vecs:\n W[i] = word_vecs[word]\n word_idx_map[word] = i\n i += 1\n return W, word_idx_map", "def embed(self, features, feature_name, params):\n with tf.variable_scope(\"%s_embed\" % feature_name):\n embed_matrix = tf.get_variable(\"embedding_matrix\",\n [getattr(params, \"%s_vocab_size\" % feature_name), \n getattr(params, \"%s_embed_size\" % feature_name)])\n return tf.nn.embedding_lookup(embed_matrix, features[feature_name])", "def _ShardTestEmbeddings(self, weights, biases, num_shards):\n with ops.Graph().as_default() as g:\n sharded_weights = variable_scope.get_variable(\n \"w\",\n partitioner=partitioned_variables.fixed_size_partitioner(num_shards),\n initializer=constant_op.constant(weights))\n sharded_biases = variable_scope.get_variable(\n \"b\",\n partitioner=partitioned_variables.fixed_size_partitioner(num_shards),\n initializer=constant_op.constant(biases))\n with self.session(graph=g) as sess:\n self.evaluate(variables.global_variables_initializer())\n return self.evaluate([list(sharded_weights), list(sharded_biases)])", "def embedding(\n input,\n weight,\n padding_idx=None,\n max_norm=None,\n norm_type=2.0,\n scale_grad_by_freq=False,\n sparse=False,\n):\n\n assert sparse is False, \"Not support sparse=True yet!\"\n if padding_idx is not None:\n if padding_idx > 0:\n assert padding_idx < weight.size(\n 0\n ), \"Padding_idx must be within num_embeddings\"\n elif padding_idx < 0:\n assert padding_idx >= -weight.size(\n 0\n ), \"Padding_idx must be within num_embeddings\"\n padding_idx = weight.size(0) + padding_idx\n\n if max_norm is not None:\n with flow.no_grad():\n weight = flow._C.embedding_renorm_(weight, input, max_norm, norm_type)\n\n if padding_idx is None and not scale_grad_by_freq:\n return flow._C.gather(weight, input, axis=0)\n else:\n return flow._C.embedding(weight, input, padding_idx, scale_grad_by_freq)", "def embed_data(\n self,\n data: Dict[str, tf.SparseTensor]\n ) -> Tuple[tf.Tensor, tf.Tensor]:\n\n batch_shape = tf.shape(data[\"t\"])[:-1]\n flat_data = nest.map_structure(batches.flatten_batch, data)\n flat_data = nest.map_structure(batches.sparse_fill_empty_rows, flat_data)\n\n context_embeddings = (\n self.embedding.provide_embeddings_to_forward_fn(\n flat_data, feature_types=self._config.context_features))\n context_embeddings = nest.map_structure(\n batches.get_unflatten_batch_fn(batch_shape), context_embeddings)\n\n sequential_embeddings = (\n self.embedding.provide_embeddings_to_forward_fn(\n flat_data, feature_types=self._config.sequential_features))\n sequential_embeddings = nest.map_structure(\n batches.get_unflatten_batch_fn(batch_shape), sequential_embeddings)\n\n dt = tf.divide(tf.cast(data[\"dt\"], dtype=tf.float32), 5400.)\n t = tf.divide(tf.cast(data[\"t\"], dtype=tf.float32), 5400.)\n dt_log = tf.log(dt + 1.)\n\n embedding_dict = sequential_embeddings.copy()\n embedding_dict.update(context_embeddings)\n embedding_dict[\"dt_s\"] = tf.matmul(dt_log, self.w_dt)\n combined_embedding = self._combine_embeddings_for_input(embedding_dict)\n inputs = combined_embedding\n if self._config.get(\"apply_bias\", False):\n inputs = inputs + tf.get_variable(\n \"_\".join([self._config.embedding_type, \"final_bias\"]),\n shape=[self.get_total_embedding_size()],\n initializer=tf.zeros_initializer)\n time_vect = t\n\n return inputs, time_vect", "def embedding_model(\n n_factors: int = 50,\n window: int = 5,\n min_count: int = 1,\n learning_rate: float = 0.05,\n negative_samples: int = 10,\n negative_exponent: float = 0.75,\n workers: int = 4,\n n_iterations: int = 10,\n batch_size: int = 10000,\n skip_gram: int = 0,\n) -> Word2Vec:\n logger.info(\"Defining Embedding Neural Network model.\")\n model = Word2Vec(\n vector_size=n_factors,\n window=window,\n min_count=min_count,\n alpha=learning_rate,\n negative=negative_samples,\n ns_exponent=negative_exponent,\n workers=workers,\n epochs=n_iterations,\n batch_words=batch_size,\n sg=skip_gram,\n compute_loss=True,\n )\n return model", "def build_word_embeddings(self):\n if self.mode == \"encode\":\n # Word embeddings are fed from an external vocabulary which has possibly\n # been expanded (see vocabulary_expansion.py).\n encode_emb1 = tf.placeholder(tf.float32, (\n None, None, self.config.word_embedding_dim), \"encode_emb1\")\n # No sequences to decode.\n encode_emb2 = tf.placeholder(tf.float32, (\n None, None, self.config.word_embedding_dim), \"encode_emb2\")\n elif self.mode == \"test\":\n encode_emb1 = tf.placeholder(tf.float32, (\n None, None, self.config.word_embedding_dim), \"encode_emb1\")\n # No sequences to decode.\n encode_emb2 = tf.placeholder(tf.float32, (\n None, None, self.config.word_embedding_dim), \"encode_emb2\")\n else:\n word_emb = tf.get_variable(\n name=\"word_embedding\",\n shape=[self.config.vocab_size, self.config.word_embedding_dim],\n initializer=self.uniform_initializer)\n\n encode_emb1 = tf.nn.embedding_lookup(word_emb, self.encode_ids1)\n encode_emb2 = tf.nn.embedding_lookup(word_emb, self.encode_ids2)\n\n\n self.encode_emb1 = encode_emb1\n self.encode_emb2 = encode_emb2", "def add_word_embeddings_op(self):\n with tf.variable_scope(\"words\"):\n if self.config.embeddings is None:\n self.logger.info(\"WARNING: randomly initializing word vectors\")\n _word_embeddings = tf.get_variable(\n name=\"_word_embeddings\",\n dtype=tf.float32,\n shape=[self.config.nwords, self.config.dim_word])\n else:\n _word_embeddings = tf.Variable(\n self.config.embeddings,\n name=\"_word_embeddings\",\n dtype=tf.float32,\n trainable=self.config.train_embeddings)\n\n word_embeddings = tf.nn.embedding_lookup(_word_embeddings,\n self.word_ids, name=\"word_embeddings\")\n\n with tf.variable_scope(\"chars\"):\n if self.config.use_chars:\n # get char embeddings matrix\n _char_embeddings = tf.get_variable(\n name=\"_char_embeddings\",\n dtype=tf.float32,\n shape=[self.config.nchars, self.config.dim_char])\n char_embeddings = tf.nn.embedding_lookup(_char_embeddings,\n self.char_ids, name=\"char_embeddings\")\n\n # put the time dimension on axis=1\n s = tf.shape(char_embeddings)\n char_embeddings = tf.reshape(char_embeddings,\n shape=[s[0]*s[1], s[-2], self.config.dim_char])\n word_lengths = tf.reshape(self.word_lengths, shape=[s[0]*s[1]])\n\n # bi lstm on chars\n cell_fw = tf.contrib.rnn.LSTMCell(self.config.hidden_size_char,\n state_is_tuple=True)\n cell_bw = tf.contrib.rnn.LSTMCell(self.config.hidden_size_char,\n state_is_tuple=True)\n _output = tf.nn.bidirectional_dynamic_rnn(\n cell_fw, cell_bw, char_embeddings,\n sequence_length=word_lengths, dtype=tf.float32)\n\n # read and concat output\n _, ((_, output_fw), (_, output_bw)) = _output\n output = tf.concat([output_fw, output_bw], axis=-1)\n\n # shape = (batch size, max sentence length, char hidden size)\n output = tf.reshape(output,\n shape=[s[0], s[1], 2*self.config.hidden_size_char])\n word_embeddings = tf.concat([word_embeddings, output], axis=-1)\n\n self.word_embeddings = tf.nn.dropout(word_embeddings, self.dropout)", "def call(self, x, *args, **kwargs):\n with tf.name_scope(\"embedding\"):\n # fills out of bound values with padding symbol\n out_bound_mask = tf.cast(x > (self.vocab_size - 1), dtype=tf.int32)\n x *= 1 - out_bound_mask\n x += out_bound_mask * tf.cast(self.pad_sym, dtype=tf.int32)\n\n embeddings = tf.gather(self.shared_weights, x)\n if self.embed_scale:\n # Scale embedding by the sqrt of the hidden size\n embeddings *= self.hidden_size ** 0.5\n\n if self.mask_paddings:\n # Create binary array of size [batch_size, length]\n # where 1 = padding, 0 = not padding\n padding = get_padding(x, padding_value=self.pad_sym)\n\n # Set all padding embedding values to 0\n # embeddings *= tf.expand_dims(1 - padding, -1)\n embeddings *= tf.cast(tf.expand_dims(1.0 - padding, -1), dtype=embeddings.dtype)\n return embeddings", "def init_embeddings(self, weight, words):\n # wrap in tensor\n if isinstance(weight, list):\n weight = torch.Tensor(weight).float()\n if isinstance(weight, np.ndarray):\n weight = torch.from_numpy(weight).float()\n # check embedding size\n if weight.size(1) != self.embedding_dim:\n raise ValueError(\"Mismatched embedding dim {} for model \"\n \"with dim {}\".format(weight.size(1),\n self.embedding_dim))\n\n self_idxs, other_idxs = [], []\n for other_idx, word in enumerate(words):\n try:\n self_idxs.append(self.d.s2i[word])\n other_idxs.append(other_idx)\n except KeyError:\n pass\n\n other_idxs = torch.LongTensor(other_idxs)\n self_idxs = torch.LongTensor(self_idxs)\n self.weight.data[self_idxs] = weight[other_idxs]", "def get_W(word_vecs, k):\n vocab_size = len(word_vecs)\n word_idx_map = dict()\n W = np.zeros(shape=(vocab_size+1, k)) \n W[0] = np.zeros(k)\n i = 1\n for word in word_vecs:\n W[i] = word_vecs[word]\n word_idx_map[word] = i\n i += 1\n return W, word_idx_map", "def compute_residuals_(words, wx2_idxs, idx2_vec, idx2_aid, idx2_fx, aggregate):\n words_values = pdh.ensure_values(words)\n idx2_aid_values = pdh.ensure_values(idx2_aid)\n idx2_vec_values = pdh.ensure_values(idx2_vec)\n idx2_fx_values = pdh.ensure_values(idx2_fx)\n wx_sublist = pdh.ensure_index(wx2_idxs)\n # Build lists w.r.t. words\n idxs_list = [idxs.astype(INDEX_TYPE) for idxs in pdh.ensure_values_subset(wx2_idxs, wx_sublist)]\n aids_list = [idx2_aid_values.take(idxs) for idxs in idxs_list]\n #wx2_idxs_values = pdh.ensure_values_subset(wx2_idxs, wx_sublist)\n #idxs_list = [pdh.ensure_values(idxsdf).astype(INDEX_TYPE) for idxsdf in wx2_idxs_values] # 13 ms\n if utool.DEBUG2:\n #assert np.all(np.diff(wx_sublist) == 1), 'not dense'\n assert all([len(a) == len(b) for a, b in zip(idxs_list, aids_list)]), 'bad alignment'\n assert idx2_vec_values.shape[0] == idx2_fx_values.shape[0]\n assert idx2_vec_values.shape[0] == idx2_aid_values.shape[0]\n # Prealloc output\n if utool.VERBOSE:\n print('[smk_index] Residual Vectors for %d words. aggregate=%r' %\n (len(wx2_idxs), aggregate,))\n # Nonaggregated residuals\n #_args1 = (words_values, wx_sublist, idxs_list, idx2_vec_values)\n #rvecs_list = smk_speed.compute_nonagg_rvec_listcomp(*_args1) # 125 ms 11%\n words_list = [words_values[wx:wx + 1] for wx in wx_sublist] # 1 ms\n vecs_list = [idx2_vec_values.take(idxs, axis=0) for idxs in idxs_list] # 5.3 ms\n rvecs_list = [smk_core.get_norm_rvecs(vecs, word)\n for vecs, word in zip(vecs_list, words_list)] # 103 ms # 90%\n if aggregate:\n # Aggregate over words of the same aid\n tup = smk_speed.compute_agg_rvecs(rvecs_list, idxs_list, aids_list) # 38%\n (aggvecs_list, aggaids_list, aggidxs_list) = tup\n aggfxs_list = [[idx2_fx_values.take(idxs) for idxs in aggidxs]\n for aggidxs in aggidxs_list]\n if WITH_PANDAS:\n _args2 = (wx_sublist, aggvecs_list, aggaids_list, aggfxs_list)\n # Make aggregate dataframes\n wx2_aggvecs, wx2_aggaids, wx2_aggfxs = pdh.pandasify_agg_list(*_args2) # 617 ms 47%\n else:\n wx2_aggvecs = {wx: aggvecs for wx, aggvecs in zip(wx_sublist, aggvecs_list)}\n wx2_aggaids = {wx: aggaids for wx, aggaids in zip(wx_sublist, aggaids_list)}\n wx2_aggfxs = {wx: aggfxs for wx, aggfxs in zip(wx_sublist, aggfxs_list)}\n if utool.DEBUG2:\n from ibeis.model.hots.smk import smk_debug\n smk_debug.check_wx2(words, wx2_aggvecs, wx2_aggaids, wx2_aggfxs)\n\n return wx2_aggvecs, wx2_aggaids, wx2_aggfxs\n else:\n # Make residuals dataframes\n # compatibility hack\n fxs_list = [[idx2_fx_values[idx:idx + 1] for idx in idxs] for idxs in idxs_list]\n if WITH_PANDAS:\n _args3 = (wx_sublist, idxs_list, rvecs_list, aids_list, fxs_list)\n wx2_rvecs, wx2_aids, wx2_fxs = pdh.pandasify_rvecs_list(*_args3) # 405 ms\n else:\n wx2_rvecs = {wx: rvecs for wx, rvecs in zip(wx_sublist, rvecs_list)}\n wx2_aids = {wx: aids for wx, aids in zip(wx_sublist, aids_list)}\n wx2_fxs = {wx: fxs for wx, fxs in zip(wx_sublist, fxs_list)}\n if utool.DEBUG2:\n from ibeis.model.hots.smk import smk_debug\n smk_debug.check_wx2(words, wx2_rvecs, wx2_aids, wx2_fxs)\n return wx2_rvecs, wx2_aids, wx2_fxs" ]
[ "0.75221336", "0.5563098", "0.5548879", "0.55042124", "0.5398143", "0.5337651", "0.5324334", "0.5312726", "0.5289014", "0.5243177", "0.51948035", "0.51796144", "0.5149877", "0.51468027", "0.51436365", "0.51356083", "0.5126982", "0.51263756", "0.5117215", "0.51115566", "0.51042014", "0.5094814", "0.5073136", "0.5069238", "0.5054315", "0.5047948", "0.504715", "0.5001645", "0.4998603", "0.499626" ]
0.7290438
1