query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
listlengths
30
30
negative_scores
listlengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Make a bar plot of a categorical variable, given as a field field_name in the structured array data. Field categories and their names are given in the dict field_categories.
def barplot(data, field_name, field_categories): categories, counts = np.unique(data[field_name], return_counts=True) fig = plt.figure(figsize=(4, 3)) axes = fig.add_axes([0, 0, 1, 1]) # left, bottom, width, height (range 0 to 1) axes.bar(range(len(categories)), counts, fc="gray") # fc is the face color axes.set_xlabel("") axes.set_ylabel('Count') axes.set_title(field_name) fig.autofmt_xdate(rotation=45) axes.set_xticks(range(len(categories))) axes.set_xticklabels([field_categories[c] for c in categories]);
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bar_plot(df, field_name, graph_title, threshold_value, x_axis_label, y_axis_label):\n\n x = df[field_name].value_counts().sort_values()\n x[x > threshold_value].plot(kind='barh', figsize=(12, 8), title=graph_title, x=x_axis_label, y=y_axis_label)\n return", "def category_bar_chart(df):\n label_names = df.drop(['message', 'original', 'genre', 'id'], axis=1).columns\n label_counts = []\n for column in label_names:\n label_counts.append(df[column].sum())\n return {\n 'data': [\n Bar(\n x=label_names,\n y=label_counts\n )\n ],\n\n 'layout': {\n 'title': 'Distribution of Labelled Categories',\n 'yaxis': {\n 'title': \"Count\",\n 'type': 'log'\n },\n 'xaxis': {\n 'title': \"Category\"\n }\n }\n }", "def make_categorical_bar_source(df, x_field, y_field='None', df_orig=None, agg='count'):\n if df_orig is None:\n df_orig = df\n\n # handle x-only aggregations separately\n if agg == 'percent' or agg == 'count':\n # percent aggregations are a special case, since pandas doesn't directly support\n if agg == 'percent':\n\n # percent on discrete col using proportion, on continuous using percent\n if df[y_field].dtype == 'object':\n agg_func = 'count'\n else:\n agg_func = 'sum'\n\n total = float(getattr(df_orig[y_field], agg_func)())\n series = df.groupby(x_field)[y_field].apply(lambda x, total_agg=total, f=agg_func:\n 100*(getattr(x, f)()/total_agg))\n elif agg == 'count':\n series = df.groupby(x_field).size()\n else:\n raise ValueError('Unrecognized Aggregation Type for Y of \"None\"')\n\n # here we have a series where the values are the aggregation for the index (bars)\n result = pd.DataFrame(data={'labels': series.index, 'heights': series.values})\n\n # x and y aggregations\n else:\n # Get the y values after grouping by the x values\n group = df.groupby(x_field)[y_field]\n aggregate = getattr(group, agg)\n result = aggregate().reset_index()\n result.rename(columns={x_field: 'labels', y_field: 'heights'}, inplace=True)\n\n return ColumnDataSource(data=result)", "def make_bar_plot(x, y, title):\n return plotly.graph_objs.Figure(\n data=[plotly.graph_objs.Bar(x=list(x), y=list(y))],\n layout=plotly.graph_objs.Layout(title=title)\n )", "def set_categorical(self, meta_field):\n self._data[meta_field] = pd.Categorical(self._data[meta_field])", "def plot_individual_bar_chart_graph(data_values, title,\r\n number_of_keys,\r\n max_val,\r\n vals_for_bar_chart,\r\n file_in):\r\n\r\n n_groups = len(vals_for_bar_chart)\r\n fig, ax = plt.subplots()\r\n index = np.arange(n_groups)\r\n bar_width = 0.9\r\n opacity = 0.4\r\n # print vals_for_bar_chart\r\n rects1 = plt.bar(index,\r\n vals_for_bar_chart,\r\n bar_width,\r\n alpha=opacity,\r\n color='b') # label='whatever'\r\n plt.xlabel('number in cluster')\r\n plt.ylabel('Count')\r\n plt.title(title+\"_barchart\")\r\n plt.legend()\r\n pylab.grid(True)\r\n ax.set_yscale('symlog')\r\n ax.set_xscale('symlog')\r\n plt.tight_layout()\r\n plt.show()\r\n pylab.savefig(file_in + \"_\" + title + '_barchart.png')\r\n plt.close()\r\n pylab.close()", "def category_map(data, row_labels, col_labels, categories, cmap=\"tab10\", ax=None, cmax=None,\r\n cbar_kw={}, cbarlabel=\"\", title = \"Default\", x_title=\" \",y_title=\" \", saveFile = None, **kwargs):", "def _bar_example_2(quantity_by_fruit):\n ch = chartify.Chart(blank_labels=True, x_axis_type=\"categorical\")\n ch.set_title(\"Vertical bar plot - Label sort\")\n ch.set_subtitle(\"Set `categorical_order_by` to sort by labels\")\n ch.plot.bar(\n data_frame=quantity_by_fruit,\n categorical_columns=\"fruit\",\n numeric_column=\"quantity\",\n categorical_order_by=\"labels\",\n categorical_order_ascending=True,\n )\n ch.show(_OUTPUT_FORMAT)", "def city_bargraph(self, plot_col):\n try:\n self.cit\n except AttributeError:\n self._init_cit()\n\n col = [self.cit.df[plot_col][city] for city in self.city_names]\n self.single_barplot(col=col, section_labels=self.city_names,\n title=plot_col + ' by City',\n xlabel='Cities', ylabel=plot_col, bar_direction='horizontal',\n text_rotation='horizontal', barWidth=1.5, x_pad=0.1)\n return", "def featuresBarPlot(barNames,barValues):\n plt.bar(range(0,len(barNames)),barValues)\n plt.xticks(range(0,len(barNames)), barNames,rotation='vertical')\n plt.show()", "def plot_bar(label_array, acc_array, f1_array, width=0.5, axis_label=None, graph_title=None, file_name=\"\", dpi=100):\n plt.figure(figsize=plt.figaspect(1.), dpi=dpi)\n x = np.arange(len(label_array)) # the label locations\n plt.bar(x - 0.5 * width, acc_array, width, label='Accuracy')\n plt.bar(x + 0.5 * width, f1_array, width, label='F1 score')\n plt.ylim([0, 1.1])\n plt.xticks(x, labels=label_array)\n if axis_label is None:\n axis_label = ['Set', 'Values']\n plt.xlabel(axis_label[0])\n plt.ylabel(axis_label[1])\n if graph_title is None:\n graph_title = graph_title\n plt.title(graph_title)\n plt.tight_layout()\n plt.legend()\n plt.grid()\n if file_name:\n plt.savefig(file_name, bbox_inches='tight')\n plt.show()\n return", "def plot_catplot(\n df: pd.DataFrame,\n cat_col: str,\n quanti_cols_list: list,\n order: list = None,\n h: int = 5,\n w: int = 10,\n) -> sns.catplot:\n\n # iterate over quantiative variables\n # and plot catplot against the category variable\n for col in quanti_cols_list:\n sns.catplot(\n data=df,\n kind=\"bar\",\n x=cat_col,\n y=col,\n order=order,\n ci=None,\n height=h,\n aspect=w / h,\n )\n plt.show()\n return", "def plot_cat(df, cat_columns, hue = \"default_payment_next_month\"):\n fig = plt.figure(figsize = (20,(len(cat_columns)/2+1)*8))\n loc = 1\n for col in cat_columns:\n ax = fig.add_subplot(len(cat_columns)/2+1, 2, loc)\n df_plot = df[[col, hue, \"id\"]].groupby([col, hue]).count()\n df_plot.reset_index(inplace = True)\n sns.barplot(x=col, y= \"id\", hue = hue, data=df_plot, palette = \"GnBu_d\", ax = ax);\n plt.legend(title = \"default payment (1=yes, 0=no)\")\n plt.ylim([0.0001,15000])\n plt.ylabel(\"clients\");\n loc += 1", "def barh(self, column_for_categories, overlay=False, **vargs):\n yticks, labels = self._split(column_for_categories)\n index = np.arange(self.num_rows)\n margin = 0.1\n width = 1 - 2 * margin\n if overlay:\n width /= len(labels)\n def draw(axis, label, color):\n if overlay:\n ypos = index + margin + (1-2*margin)*labels.index(label)/len(labels)\n else:\n ypos = index\n axis.barh(ypos, self[label], width, alpha=0.8, color=color, **vargs)\n def annotate(axis, ticks):\n axis.set_yticks(index+0.5) # Center labels on bars\n axis.set_yticklabels(ticks, stretch='ultra-condensed')\n height = max(4, len(index)/2)\n self._visualize(labels, yticks, overlay, draw, annotate, height)", "def _bar_example_1(quantity_by_fruit):\n ch = chartify.Chart(blank_labels=True, x_axis_type=\"categorical\")\n ch.set_title(\"Vertical bar plot\")\n ch.set_subtitle(\"Automatically sorts by value counts.\")\n ch.plot.bar(\n data_frame=quantity_by_fruit,\n categorical_columns=\"fruit\",\n numeric_column=\"quantity\",\n )\n ch.show(_OUTPUT_FORMAT)", "def _bar_example_3(quantity_by_fruit):\n ch = chartify.Chart(blank_labels=True, y_axis_type=\"categorical\")\n ch.set_title(\"Horizontal bar plot\")\n ch.set_subtitle(\"Horizontal with color grouping\")\n ch.plot.bar(\n data_frame=quantity_by_fruit,\n categorical_columns=\"fruit\",\n numeric_column=\"quantity\",\n color_column=\"fruit\",\n )\n ch.show(_OUTPUT_FORMAT)", "def _is_categorical(df, field):\n return df[field].dtype.name == 'category'", "def _bar_example_4(quantity_by_fruit):\n ch = chartify.Chart(x_axis_type=\"categorical\", blank_labels=True)\n ch.set_title(\"Vertical bar plot with labels\")\n ch.set_subtitle(\"Hidden y-axis\")\n ch.plot.bar(\n data_frame=quantity_by_fruit,\n categorical_columns=\"fruit\",\n numeric_column=\"quantity\",\n color_column=\"fruit\",\n )\n ch.style.color_palette.reset_palette_order()\n ch.plot.text(\n data_frame=quantity_by_fruit,\n categorical_columns=\"fruit\",\n numeric_column=\"quantity\",\n text_column=\"quantity\",\n color_column=\"fruit\",\n )\n # Adjust the axis range to prevent clipping of the text labels.\n ch.axes.set_yaxis_range(0, 1200)\n ch.axes.hide_yaxis()\n ch.show(_OUTPUT_FORMAT)", "def BarPlot(data,colormap='Paired',ax=None,headers='show',value_max=None,x_ticklabels_rotation=90,**kws):\r\n if ax is None:\r\n ax=plt.subplot(111)\r\n\r\n if value_max is None:\r\n value_max=data.sum(1).max()\r\n\r\n data.plot(kind='bar', stacked=True,colormap=colormap, ax=ax,**kws)\r\n ax.set_ylim((0,value_max))\r\n\r\n\r\n #reverse legend order\r\n handles, labels = ax.get_legend_handles_labels()\r\n ax.legend(reversed(handles),reversed(data.columns),bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\r\n\r\n #AXES\r\n if (headers is None or headers=='hide'):\r\n ax.get_xaxis().set_visible(False)\r\n ax.get_xaxis().set_ticks([])\r\n elif headers=='show':\r\n plt.setp(ax.get_xticklabels(),rotation=x_ticklabels_rotation)\r\n ax.set_xlabel(None,visible=False)\r\n\r\n\r\n #plt.tight_layout()\r\n\r\n\r\n return ax", "def character_attribute_charts(self):\n\n file_or_case = \"case\"\n if self.ui.radioButton_file.isChecked():\n file_or_case = \"file\"\n attribute = self.ui.comboBox_char_attributes.currentText()\n title = _(\"Attribute bar chart\")\n subtitle = \"<br><sup>\" + _(file_or_case) + _(\" attribute: \") + attribute\n self.ui.comboBox_char_attributes.blockSignals(True)\n self.ui.comboBox_char_attributes.setCurrentIndex(0)\n self.ui.comboBox_char_attributes.blockSignals(False)\n\n cur = self.app.conn.cursor()\n cur.execute(\"select value, count(value) from attribute where attr_type=? and name=? group by value order by upper(value)\",\n [file_or_case, attribute])\n res = cur.fetchall()\n labels = []\n values = []\n for r in res:\n labels.append(r[0])\n values.append(r[1])\n # Create pandas DataFrame\n data = {'Value': labels, 'Count': values}\n df = pd.DataFrame(data)\n fig = px.bar(df, x='Count', y='Value', orientation='h', title=title + subtitle)\n fig.show()\n self.helper_export_html(fig)", "def _category_plot(self, element, x, y, data):\n labelled = ['y' if self.invert else 'x'] if x != 'index' else []\n if self.value_label != 'value':\n labelled.append('x' if self.invert else 'y')\n\n if 'xlabel' in self._plot_opts and 'x' not in labelled:\n labelled.append('x')\n if 'ylabel' in self._plot_opts and 'y' not in labelled:\n labelled.append('y')\n\n opts = {'plot': dict(self._plot_opts, labelled=labelled),\n 'style': dict(self._style_opts),\n 'norm': self._norm_opts}\n\n id_vars = [x]\n if any(v in self.indexes for v in id_vars):\n data = data.reset_index()\n data = data[y+[x]]\n\n if check_library(data, 'dask'):\n from dask.dataframe import melt\n else:\n melt = pd.melt\n\n df = melt(data, id_vars=[x], var_name=self.group_label, value_name=self.value_label)\n kdims = [x, self.group_label]\n vdims = [self.value_label]+self.hover_cols\n if self.subplots:\n obj = Dataset(df, kdims, vdims).to(element, x).layout()\n else:\n obj = element(df, kdims, vdims)\n return obj.redim(**self._redim).relabel(**self._relabel).opts(**opts)", "def barplot(self, x = \"Predictor\", color = None, opacity = 1, template = \"ggplot2\", \n has_title = True, barmode=\"stack\", is_horizontal = False, title = None, is_percent = False,\n show_num = False):\n if color: #Produce either a stacked or grouped bar plot\n df_stack = self._df.groupby([x,color]).size().reset_index()\n df_stack['Percentage'] = self._df.groupby([x, color]).size().groupby(level = 0).apply(lambda \n x:100 * x/float(x.sum())).values\n df_stack.columns = [x, color, 'Count', 'Percentage']\n df_stack['Percentage'] = round(df_stack['Percentage'], 2)\n \n x_clean, df_clean = clean_varname(df_stack, var = x)\n color_clean, df_clean = clean_varname(df_clean, var = color)\n \n if has_title:\n if not title:\n title = f\"Bar Plot of {x_clean} and {color_clean}\"\n else:\n title = None\n \n \n # 8 different variations for how this graph can appear:\n if is_horizontal:\n if is_percent:\n if show_num: #Show percentages on stacked bar graph\n fig = px.bar(df_clean, y = x_clean, x = 'Percentage', \n color = color_clean, template = template, barmode=barmode, \n opacity = opacity, title = title, text = df_clean['Percentage'])\n else:\n fig = px.bar(df_clean, y = x_clean, x = 'Percentage', \n color = color_clean, template = template, barmode=barmode, \n opacity = opacity, title = title)\n else:\n if show_num: #Show counts on stacked bar graph:\n fig = px.bar(df_clean, y = x_clean, x = 'Count', \n color = color_clean, template = template, barmode=barmode, \n opacity = opacity, title = title, text = df_clean['Count'])\n else:\n fig = px.bar(df_clean, y = x_clean, x = 'Count', \n color = color_clean, template = template, barmode=barmode, \n opacity = opacity, title = title)\n else:\n if is_percent:\n if show_num:\n fig = px.bar(df_clean, x = x_clean, y = 'Percentage', \n color = color_clean, template = template, barmode=barmode, \n opacity = opacity, title = title, text = df_clean['Percentage'])\n else:\n fig = px.bar(df_clean, x = x_clean, y = 'Percentage', \n color = color_clean, template = template, barmode=barmode, \n opacity = opacity, title = title)\n else:\n if show_num:\n fig = px.bar(df_clean, x = x_clean, y = 'Count', \n color = color_clean, template = template, barmode=barmode, \n opacity = opacity, title = title, text = df_clean['Count'])\n else:\n fig = px.bar(df_clean, x = x_clean, y = 'Count', \n color = color_clean, template = template, barmode=barmode, \n opacity = opacity, title = title) \n \n return fig\n \n else: #Create a basic bar plot\n df_stack = self._df.groupby([x]).size().reset_index()\n df_stack['Percentage'] = self._df.groupby([x]).size().groupby(level = 0).apply(lambda", "def stacked_bar(data, series_labels, category_labels=None, \n show_values=False, value_format=\"{}\", y_label=None, \n colors=None, grid=False, reverse=False, legend=False):\n\n ny = len(data[0])\n ind = list(range(ny))\n\n axes = []\n cum_size = np.zeros(ny)\n\n data = np.array(data)\n majority_cluster = np.argmax(data.sum(axis=1))\n data = data[:,data[majority_cluster,:].argsort()[::-1]]\n if reverse:\n data = np.flip(data, axis=1)\n category_labels = reversed(category_labels)\n\n for i, row_data in enumerate(data):\n color = colors[i] if colors is not None else None\n axes.append(plt.bar(ind, row_data, bottom=cum_size, \n label=series_labels[i], color=color, width=1))\n cum_size += row_data\n\n if category_labels:\n plt.xticks(ind, category_labels)\n\n if y_label:\n plt.ylabel(y_label)\n plt.yticks([])\n plt.xticks([])\n if legend:\n plt.legend(loc='upper right', bbox_to_anchor=(1.5, 1))\n\n if grid:\n plt.grid()\n if show_values:\n for axis in axes:\n for bar in axis:\n w, h = bar.get_width(), bar.get_height()\n plt.text(bar.get_x() + w/2, bar.get_y() + h/2, \n value_format.format(h), ha=\"center\", \n va=\"center\")\n plt.xlim((0, data.shape[1]))", "def bar(\n df,\n x=None,\n y=\"value\",\n bars=\"variable\",\n order=None,\n bars_order=None,\n orient=\"v\",\n legend=True,\n title=True,\n ax=None,\n cmap=None,\n **kwargs,\n):\n\n # default x-axis to time-col attribute from an IamDataFrame, else use \"year\"\n x = x or time_col_or_year(df)\n\n # cast to DataFrame if necessary\n # TODO: select only relevant meta columns\n if not isinstance(df, pd.DataFrame):\n df = df.as_pandas()\n\n for col in set(SORT_IDX) - set([x, bars]):\n if len(df[col].unique()) > 1:\n msg = \"Can not plot multiple {}s in bar plot with x={}, bars={}\"\n raise ValueError(msg.format(col, x, bars))\n\n if ax is None:\n fig, ax = plt.subplots()\n\n # long form to one column per bar group\n _df = reshape_mpl(df, x, y, bars, **{x: order, bars: bars_order})\n\n # explicitly get colors\n defaults = default_props(reset=True, num_colors=len(_df.columns), colormap=cmap)[\n \"color\"\n ]\n rc = run_control()\n color = []\n for key in _df.columns:\n c = next(defaults)\n if \"color\" in rc and bars in rc[\"color\"] and key in rc[\"color\"][bars]:\n c = rc[\"color\"][bars][key]\n color.append(c)\n\n # change year to str to prevent pandas/matplotlib from auto-ordering (#474)\n if _df.index.name == \"year\":\n _df.index = map(str, _df.index)\n\n # plot data\n kind = \"bar\" if orient.startswith(\"v\") else \"barh\"\n _df.plot(kind=kind, color=color, ax=ax, **kwargs)\n\n # add legend\n ax.legend(loc=\"center left\", bbox_to_anchor=(1.0, 0.5))\n if not legend:\n ax.legend_.remove()\n\n # add default labels if possible\n if orient == \"v\":\n ax.set_xlabel(x.capitalize())\n else:\n ax.set_ylabel(x.capitalize())\n units = df[\"unit\"].unique()\n if len(units) == 1 and y == \"value\":\n if orient == \"v\":\n ax.set_ylabel(units[0])\n else:\n ax.set_xlabel(units[0])\n\n # build a default title if possible\n _title = []\n for var in [\"model\", \"scenario\", \"region\", \"variable\"]:\n values = df[var].unique()\n if len(values) == 1:\n _title.append(\"{}: {}\".format(var, values[0]))\n if title and _title:\n title = \" \".join(_title) if title is True else title\n ax.set_title(title)\n\n return ax", "def __init__(self, output_path, input_path, config_dir, labels, conditions_dict, axes_vars):\n super(VegaGraphBar, self).__init__(\n output_path, input_path, config_dir, labels, conditions_dict, axes_vars)\n # the graph type set as the name of the class\n self.graph_type = \"bar\"", "def barplot(self, name: str, y_label: str, img_title: str):\n path = C.TEST_DIR\n\n sns.set(style='whitegrid')\n sns.set_palette(sns.color_palette(C.IRT_COLORS))\n df = pd.read_csv(path + name + '.csv')\n ax = sns.barplot(data=df)\n ax.set(ylabel=y_label, title=img_title)\n\n self.save_plot(name)\n plt.show()", "def plot_categorical(self, plot_categorical_columns=None):\n assert isinstance(\n plot_categorical_columns, (list, type(None))\n ), \"plot_categorical_columns should be of type list\"\n\n # Count distinct values in each column\n col_nunique = self.df_baseline.nunique()\n\n # won't plot categoricals with more than 20 values\n if plot_categorical_columns is None:\n plot_categorical_columns = [\n col\n for col in col_nunique.index\n if ((col_nunique[col] <= 20) & (col in self.categorical_columns))\n ]\n\n logger.info(\n \"Plotting the following categorical column(s):\", plot_categorical_columns\n )\n\n fig, ax = plt.subplots(\n len(plot_categorical_columns),\n 2,\n figsize=(10, 5 * len(plot_categorical_columns)),\n )\n\n for i, col in enumerate(plot_categorical_columns):\n\n if len(plot_categorical_columns) == 1:\n _ax0 = ax[0]\n _ax1 = ax[1]\n elif len(plot_categorical_columns) > 1:\n _ax0 = ax[i, 0]\n _ax1 = ax[i, 1]\n\n # Get all values and counts from baseline and sample dfs\n df_baseline_values = (\n self.df_baseline[col]\n .value_counts(normalize=True, dropna=False)\n .index.values\n )\n df_sample_values = (\n self.df_sample[col]\n .value_counts(normalize=True, dropna=False)\n .index.values\n )\n\n # Get all unique values in the union of both lists above\n all_values = np.union1d(df_baseline_values, df_sample_values)\n\n # recount values in each df to include missing values in each - impute by zero\n df_baseline_values = (\n self.df_baseline[col].value_counts()[all_values].fillna(0)\n )\n df_sample_values = self.df_sample[col].value_counts()[all_values].fillna(0)\n\n # generate side-by-side barplots\n (\n df_baseline_values.rename(\"Proportion\")\n .sort_index()\n .reset_index()\n .pipe((sns.barplot, \"data\"), x=\"index\", y=\"Proportion\", ax=_ax0)\n )\n _ax0.set_title(col + \", baseline\")\n _ax0.set(xlabel=col)\n (\n df_sample_values.rename(\"Proportion\")\n .sort_index()\n .reset_index()\n .pipe((sns.barplot, \"data\"), x=\"index\", y=\"Proportion\", ax=_ax1)\n )\n _ax1.set(xlabel=col)\n _ax1.set_title(col + \", sample\")\n\n plt.close(fig)\n\n return fig", "def bar(variable, name, data=None, x_v=None, color_set=custom_bw,\n ax_size=(20, 6), highlight=None, ax=None):\n\n common_set_up(ax_size) # Apply basic plot style\n\n fig = sns.barplot(x=x_v, y=variable, data=data, saturation=1, ax=ax,\n color=color_set[2], label=name,\n )\n\n sns.despine(offset=2, trim=True, left=True, bottom=True)\n\n # Set title and axes\n title_color = '#192231'\n font_colour = '#9099A2'\n if ax is None:\n fig.set_title('{0}'.format(name),\n fontsize=20, color=title_color)\n fig.set_ylabel('Frequency',\n color=font_colour)\n fig.set_xlabel('{0}'.format(name),\n color=font_colour)\n\n if highlight:\n bars = fig.patches\n bars[highlight].set_color(color_set[1])\n\n return fig", "def plot_meta_counts(self, meta_field, normalize=False, sort_values=True):\n counts = self.sample_meta[meta_field].value_counts(normalize=normalize)\n colname = \"counts\" if normalize is False else \"frequency\"\n df = pd.DataFrame({meta_field: counts.index.values, colname: counts.values})\n return barplot(df=df, x=meta_field, y=colname)", "def _plot_group_bars(ax, xss, field, side):\n\n #translate side to index\n if(side == 'left'):\n side = 0\n else:\n side = 1\n #plot the bars\n x = range(len(xss))\n values = [xs.ROW_edge_fields[field].values[side] for xs in xss]\n ax.bar(x, values, color=[_colormap[i%len(_colormap)] for i in range(len(xss))],\n bottom=0.0, align='center', alpha=0.8, width=0.6)\n ax.set_xticks(x)\n ax.set_xticklabels([textwrap.fill(xs.sheet, 15) for xs in xss],\n rotation='vertical', fontsize=11)" ]
[ "0.62241316", "0.6152364", "0.5592704", "0.5530848", "0.55261475", "0.5514559", "0.5509452", "0.5494726", "0.549077", "0.54689884", "0.5450162", "0.5434566", "0.5409188", "0.538418", "0.536517", "0.5325083", "0.5314956", "0.5302128", "0.5273056", "0.5260679", "0.5229185", "0.5213955", "0.51878846", "0.5171707", "0.5153558", "0.5146117", "0.51449406", "0.51327956", "0.51080096", "0.5105418" ]
0.8023683
0
divide the figure in (nx, ny) subplots
def __init__(self, nx, ny, nxsize=5.4, nysize=6.2): self.nx = nx self.ny = ny self.n = 1 plt.figure(figsize=(nysize*ny, nxsize*nx)) plt.subplot(nx, ny, self.n)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_four_subplots():\n pass", "def subplots(nrows=1, ncols=1, sharex=False, sharey=False, squeeze=True, apportion = None, debug = 0, *args, **kwargs):\n #Note: we use squeeze = False internally, then return axes according to the keyword\n fig, axes = pylab_subplots(nrows=nrows, ncols=ncols, sharex=sharex, sharey=sharey, squeeze=False, *args, **kwargs)\n nrows = len(axes[:,0].flatten())\n # start with even allocation of unity\n fracts = np.ones(nrows)\n # if just one arg, that is the first allocation\n if apportion != None:\n if len(np.shape(apportion)) == 0:\n fracts[0]=apportion\n # fill up the rest\n for (i,a) in enumerate(apportion):\n if i<nrows: fracts[i] = a\n # now make into a fractions\n fracts = fracts/np.sum(fracts)\n\n #loop over axes, bottom to top, extract the space below and the height for each (ignore space above\n above = [] ; height = []\n lasty = 1\n for (i,ax) in enumerate(axes[:,0]):\n bb = ax.get_position().get_points()\n pos = bb.flatten() \n height.append(pos[3]-pos[1])\n above.append(lasty - pos[3] )\n lasty = pos[1]\n\n# loop again, building down from top according to new share, keep margins\n yabove_0 = 1 # the norm. y coord of the bottom of the above graph\n print(above, height)\n for col in range(np.shape(axes)[1]):\n for (i,ax) in enumerate(axes[:,col]):\n if (i==0): yabove = yabove_0\n bb = ax.get_position().get_points()\n pos = bb.flatten() \n # convert to x0,y0, dx, dy form by subtracting origin\n newh = height[i]*fracts[i]*nrows\n\n pos[1] = yabove - newh - above[i]\n pos[3] = newh\n pos[2] = pos[2] - pos[0]\n yabove = pos[1]\n if debug>0: print(pos)\n ax.set_position(pos)\n\n if squeeze: \n if len(np.shape(axes[0]))==0: axes = axes.flatten() \n if len(axes) == 1: axes = axes[0]\n return(fig, axes)", "def subplots(fig_width=None, fig_height=None, *args, **kwargs):\n fig_width, fig_height = get_width_height(fig_width, fig_height, columns=2)\n fig, axes = plt.subplots(figsize=(fig_width, fig_height), *args, **kwargs)\n return fig, axes", "def _make_subplots(n_plots, max_cols=5, row_height=3, sharex=False, sharey=False):\n n_rows, n_cols = find_pretty_grid(n_plots, max_cols=max_cols)\n fig, axes = plt.subplots(n_rows, n_cols,\n figsize=(4 * n_cols, row_height * n_rows),\n constrained_layout=True,\n sharex=sharex, sharey=sharey)\n # we don't want ravel to fail, this is awkward!\n axes = np.atleast_2d(axes)\n return fig, axes", "def subplots(\n self,\n nrows=1,\n ncols=1,\n width_ratios=None,\n height_ratios=None,\n wspace=None,\n hspace=None,\n ):\n if width_ratios is not None and len(width_ratios) != ncols:\n raise ValueError(\n \"Expected the given number of width ratios to match the \"\n \"number of columns of the grid\"\n )\n if height_ratios is not None and len(height_ratios) != nrows:\n raise ValueError(\n \"Expected the given number of height ratios to match the \"\n \"number of rows of the grid\"\n )\n\n if wspace is not None:\n warnings.warn(\"The 'wspace' option is not implemented\", stacklevel=2)\n if hspace is not None:\n warnings.warn(\"The 'hspace' option is not implemented\", stacklevel=2)\n\n if width_ratios is None:\n width_ratios = [1] * ncols\n\n if height_ratios is None:\n height_ratios = [1] * nrows\n\n # x and y pad limits, starting at 0 and ending at 1\n # Reverse `height_ratios` to make row ordering correct (top to bottom)\n xpos = np.cumsum(([0] + width_ratios) / np.sum(width_ratios))\n ypos = np.cumsum(([0] + height_ratios[::-1]) / np.sum(height_ratios))\n\n # ROOT requires TH1s and TPads to have unique names; use random string\n rand_str = _rand_str()\n\n # Create empty array to store Axes objects in grid\n axs = np.ndarray((nrows, ncols), dtype=object)\n\n for row in range(nrows):\n for col in range(ncols):\n # Construct input arguments to TPad constructor\n pad_args = (\n \"PAD_{}_{}_{}\".format(rand_str, row, col), # name\n \"\", # title\n xpos[col], # xlow\n ypos[-row - 2], # ylow\n xpos[col + 1], # xup\n ypos[-row - 1], # yup\n )\n axs[row, col] = Axes(pad_args)\n self._canvas.cd() # Go back to main canvas\n\n self._axes = axs\n\n self._canvas.Modified()\n\n # If grid has only one column or one row, flatten the returned array\n if ncols == 1 and nrows != 1:\n axs = np.reshape(axs, (1, nrows))[0]\n elif ncols != 1 and nrows == 1:\n axs = np.reshape(axs, (1, ncols))[0]\n elif ncols == 1 and nrows == 1:\n axs = axs[0, 0]\n\n return axs", "def plot_main(self):\n\n f, axes = plt.subplots(2, 3, figsize=(16, 8))\n self.data_plot(ax=axes[0, 0])\n self.model_plot(ax=axes[0, 1])\n self.normalized_residual_plot(ax=axes[0, 2], v_min=-6, v_max=6)\n self.source_plot(ax=axes[1, 0], convolution=False, deltaPix_source=0.01, numPix=100)\n self.convergence_plot(ax=axes[1, 1], v_max=1)\n self.magnification_plot(ax=axes[1, 2])\n f.tight_layout()\n f.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0., hspace=0.05)\n return f, axes", "def __call__(self, i):\n plt.subplot(self.nx, self.ny, i)\n return True", "def add_subplot(gridRows, gridCols, plotNo):\n pl.subplot(gridRows, gridCols, plotNo)", "def axes_subplots():\n # gerenate data\n x = np.arange(0, 6 * np.pi+0.2, 0.2)\n y_1 = np.cos(x)\n y_2 = np.sin(2*x)\n y_3 = y_1 + y_2\n\n # display multiple\n fig, axs = plt.subplots(3, 1, sharex=True)\n fig.suptitle('Subplots w/ shared axes')\n axs[0].plot(x, y_1)\n axs[1].plot(x, y_2)\n axs[2].plot(x, y_3)\n axs[0].set_ylabel('$y$')\n axs[1].set_ylabel('$y$')\n axs[2].set_ylabel('$y$')\n\n plt.show()\n\n return None", "def plotgrid(data,d=10,shape=(30,30)):\n ion()\n gray()\n clf()\n for i in range(min(d*d,len(data))):\n subplot(d,d,i+1)\n row = data[i]\n if shape is not None: row = row.reshape(shape)\n imshow(row)\n ginput(1,timeout=0.1)", "def plot_gridSubplot(shape, loc, colspan=1, rowspan=1):\n return plt.subplot2grid(shape=shape,\n loc=loc,\n colspan=colspan,\n rowspan=rowspan)", "def makeQuadSubplots(df_rad_obs, \n df_dir_obs, \n df_rad_sen, \n df_dir_sen, \n suptitle='Big title',\n eps=3, \n min_samples=50):\n fig, axs = plt.subplots(2, 2, \n figsize=(10,10)\n )\n\n fig.suptitle('Clustering Output', fontsize=20)\n\n populateSubPlot(df=df_rad_obs,\n eps=eps, \n min_samples=min_samples,\n fig=fig, \n axs=axs, \n row=0, \n col=0, title='Obsever Wards Radiant')\n\n\n populateSubPlot(df=df_dir_obs, \n eps=eps, \n min_samples=min_samples,\n fig=fig, \n axs=axs, \n row=0, \n col=1, title='Obsever Wards Dire')\n\n\n populateSubPlot(df=df_rad_sen, \n eps=eps, \n min_samples=min_samples,\n fig=fig, \n axs=axs, \n row=1, \n col=0, title='Sentry Wards Radiant')\n\n populateSubPlot(df=df_dir_sen, \n eps=eps, \n min_samples=min_samples,\n fig=fig, \n axs=axs, \n row=1, \n col=1, title='Sentry Wards Dire')\n \n \n return fig, axs", "def __init__(self):\n self.fig = pl.figure(1,figsize=(8,6), dpi=80 , frameon = True , facecolor = '0.75' , edgecolor = 'w')\n self.fig.add_subplot(111 , axisbg = 'w' , projection = 'rectilinear') #if you want to add axes on particular place: fig.add_axes([0.15, 0.1, 0.7, 0.3]) where -> [begin , bottom to start axes , width , height ]\n self.separated = True #if we have a list and need to plot the plots separated", "def _plot_grid(frames: Figure, ncols: int = 3) -> Figure:\n for frame in frames:\n frame.plot_height = frame.plot_height // ncols\n frame.plot_width = frame.plot_width // ncols\n return gridplot(frames, ncols=ncols)", "def make_subplots(self, mode, n_subplots, data, axes=None, figure=None):\n if axes is None and figure is not None:\n axes = figure.axes\n\n if axes is None:\n if self.config['ncols'] is None or self.config['nrows'] is None:\n self.config['ncols'], self.config['nrows'] = self.infer_ncols_nrows(n_subplots, **self.config)\n\n if self.config['figsize'] is None:\n self.config['figsize'] = self.infer_figure_size(mode, n_subplots, data, **self.config)\n\n figure_keys = ['figsize', 'ncols', 'nrows', 'facecolor', 'dpi', 'tight_layout', 'sharex', 'sharey']\n figure_config = self.config.filter(keys=figure_keys, prefix='figure_')\n figure, axes = plt.subplots(**figure_config)\n axes = to_list(axes)\n else:\n axes = to_list(axes)\n if len(axes) < n_subplots:\n raise ValueError(f\"Not enough axes provided — got ({len(axes)}) for {n_subplots} subplots.\")\n\n figure = axes[0].figure\n ncols, nrows = figure.axes[0].get_subplotspec().get_gridspec().get_geometry()\n figure_config = {\n 'ncols': ncols,\n 'nrows': nrows,\n 'figsize': figure.get_size_inches(),\n 'dpi': figure.dpi\n }\n\n subplots = [Subplot(self, ax=ax, index=ax_num) for ax_num, ax in enumerate(axes)]\n\n return figure, subplots, figure_config", "def canvas(*args, figsize=(6, 4)):\n\n assert len(args), 'Args required'\n assert len(args) <= 2, 'Too many args'\n \n if len(args) == 2:\n nrow = args[0]\n ncol = args[1]\n else:\n npanel = args[0]\n nrow = int(np.sqrt(npanel))\n ncol = int(npanel/nrow) + min(1, npanel%nrow)\n \n return plt.subplots(\n nrow, ncol, \n figsize=(figsize[0]*ncol, figsize[1]*nrow), \n constrained_layout=False,\n squeeze=False,\n )", "def get_ax(rows=1, cols=1, size=8):\n fig , ax = plt.subplots(rows, cols, figsize=(size*cols, size*rows))\n return fig,ax", "def test_sizergrid():\n regular_grid(8, 3)\n mpl.show()", "def plot(self):\n attr = self.Graph[\"root\"]\n if (self.type == 0 or self.type == 1):\n self.subplot_1(attr, 0)\n else:\n self.subplot_2(attr, 0)", "def num_to_subplots_axes(num):\n cols = int(math.ceil(math.sqrt(num)))\n rows = int(math.ceil(float(num) / cols))\n return rows, cols", "def show(self):\n \n \n \n \n \n \n r = 4\n f, axarr = plt.subplots(r, r, figsize=(8,8))\n counter = 0\n for i in range(r):\n for j in range(r):\n temp = self.x[counter,:]\n counter += 1\n img = self.x[counter,:]\n axarr[i][j].imshow(img)\n #######################################################################\n # #\n # #\n # TODO: YOUR CODE HERE #\n # #\n # #\n #######################################################################", "def prepare_subplot(fig, point_list, cols, rows, number, lims):\n ax = fig.add_subplot(cols, rows, number)\n ax.scatter(list(map(lambda e: e[0], point_list)), list(map(lambda e: e[1], point_list)), s=9)\n ax.set_xlim(lims[0], lims[1])\n ax.set_ylim(lims[0], lims[1])\n return ax", "def frame():\n fig = plt.figure(figsize = (6, 3))\n\n plt.subplots_adjust(left=.15, bottom=.2, right=.95, top=.9)\n ax = fig.add_subplot(111)\n \n ax.tick_params(axis=\"x\", labelsize=12)\n ax.tick_params(axis=\"y\", labelsize=12)\n\n return fig, ax", "def plot_setup(rows, cols, d=0, buffer=(0, 0)):\n # setup plot\n plt.close('all')\n mpl.rcParams['axes.labelsize'] = 'large'\n mpl.rcParams['ytick.labelsize'] = 'x-small'\n mpl.rcParams['xtick.labelsize'] = 'x-small'\n mpl.rcParams['figure.subplot.wspace'] = buffer[0]\n mpl.rcParams['figure.subplot.hspace'] = buffer[1]\n\n plt.rc('text', usetex=True)\n plt.rc('font', family='serif')\n\n figsize = (6 * cols + buffer[0], 5.5 * rows + buffer[1])\n fig, axs = plt.subplots(nrows=rows, ncols=cols, figsize=figsize)\n if d is not 0:\n axs = trim_axes(axs, d)\n\n return fig, axs", "def plot_slices(num_rows, num_columns, width, height, data):\n# data = np.rot90(np.array(data))\n data = np.transpose(data)\n data = np.reshape(data, (num_rows, num_columns, width, height))\n rows_data, columns_data = data.shape[0], data.shape[1]\n heights = [slc[0].shape[0] for slc in data]\n widths = [slc.shape[1] for slc in data[0]]\n fig_width = 12.0\n fig_height = fig_width * sum(heights) / sum(widths)\n f, axarr = plt.subplots(\n rows_data,\n columns_data,\n figsize=(fig_width, fig_height),\n gridspec_kw={\"height_ratios\": heights},\n )\n for i in range(rows_data):\n for j in range(columns_data):\n axarr[i, j].imshow(data[i][j], cmap=\"gray\")\n axarr[i, j].axis(\"off\")\n plt.subplots_adjust(wspace=0, hspace=0, left=0, right=1, bottom=0, top=1)\n plt.show()", "def plot_all(self, cmap='Greys', size=(10,10)):\n\n fig, ((ax0, ax1), (ax2, ax3)) = plt.subplots(nrows=2,\n ncols=2,\n sharex=True,\n sharey=True)\n\n ax0.imshow(self.I, cmap=cmap)\n ax0.set_title(f'Original {self.I.shape}',\n fontsize=15)\n ax1.imshow(self.W, cmap=cmap)\n ax1.set_title(f'W Loadings {self.W.shape}',\n fontsize=15)\n ax2.imshow(self.H, cmap=cmap)\n ax2.set_title(f'H Loadings {self.H.shape}',\n fontsize=15)\n ax3.imshow(self.E, cmap=cmap)\n ax3.set_title(f'W * H with n={self._n_components} {self.E.shape}',\n fontsize=15)\n\n fig.set_figheight(size[0])\n fig.set_figwidth(size[1])\n fig.tight_layout()\n plt.show()", "def make_figure(shape=1,figsize=None,**kwargs):\n\n if np.size(shape)==1:\n nrows,ncols = (1,shape)\n elif np.size(shape)==2:\n nrows,ncols = shape\n else:\n print('Invalid shape')\n return\n\n if figsize is None:\n figsize = (10*ncols, 10*nrows)\n\n fig,axes = plt.subplots(nrows,ncols,figsize=figsize,**kwargs)\n\n fig.subplots_adjust(left=0.05,right=0.95,\n bottom=0.05,top=0.9,\n wspace=0.05) \n \n return fig,axes", "def plot_gallery(images , h, w, n_row=3, n_col=6):\n plt.figure(figsize=(1.7 * n_col, 2.3 * n_row))\n plt.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)\n for i in range(len(images)):\n plt.subplot(n_row, n_col, i + 1)\n plt.imshow(images[i].reshape((h, w)), cmap=plt.cm.gray)\n\n plt.xticks(())\n plt.yticks(())", "def getSetup(figsize, gridd, multz=None, empts=None):\n sns.set(style=\"whitegrid\", font_scale=0.7, color_codes=True, palette=\"colorblind\", rc={\"grid.linestyle\": \"dotted\", \"axes.linewidth\": 0.6})\n\n # create empty list if empts isn't specified\n if empts is None:\n empts = []\n\n if multz is None:\n multz = dict()\n\n # Setup plotting space and grid\n f = plt.figure(figsize=figsize, constrained_layout=True)\n gs1 = gridspec.GridSpec(*gridd, figure=f)\n\n # Get list of axis objects\n x = 0\n ax = list()\n while x < gridd[0] * gridd[1]:\n if x not in empts and x not in multz.keys(): # If this is just a normal subplot\n ax.append(f.add_subplot(gs1[x]))\n elif x in multz.keys(): # If this is a subplot that spans grid elements\n ax.append(f.add_subplot(gs1[x: x + multz[x] + 1]))\n x += multz[x]\n x += 1\n\n return (ax, f)", "def generate_image_grid(sess, df, filenames,op, op2):\n #x_points = np.arange(0, 1, 1.5).astype(np.float32)\n #y_points = np.arange(0, 1, 1.5).astype(np.float32)\n\n nx, ny = 12, 1\n #plt.subplot()\n gs = gridspec.GridSpec(nx, ny, hspace=1, wspace=0.05)\n # input_x = sess.run(op2, feed_dict={x_input: df[0:24]})\n #\n # plt.imshow(np.array(df[0].tolist()).reshape(28, 28), cmap='gray')\n # plt.show()\n # x = sess.run(op, feed_dict={decoder_input: input_x[0].reshape(1,2)})\n # img = np.array(x.tolist()).reshape(28, 28)\n #\n # plt.imshow(img, cmap='gray')\n # plt.show()\n\n \"\"\" grid \"\"\"\n input_x = sess.run(op2, feed_dict={x_input: df[0:24]})\n for i, g in enumerate(gs):\n\n x = sess.run(op, feed_dict={decoder_input: input_x[i].reshape(1,2)})\n ax = plt.subplot(g)\n img = np.array(x.tolist()).reshape(28, 28)\n ax.imshow(img, cmap='gray')\n ax.set_xticks([])\n ax.set_yticks([])\n #ax.set_aspect('auto')\n ax.set_title(filenames[i])\n plt.show()\n\n for i, g in enumerate(gs):\n\n ax = plt.subplot(g)\n img = np.array(df[i].tolist()).reshape(28, 28)\n ax.imshow(img, cmap='gray')\n ax.set_xticks([])\n ax.set_yticks([])\n #ax.set_aspect('auto')\n ax.set_title(filenames[i])\n plt.show()" ]
[ "0.7502299", "0.71985096", "0.6763827", "0.6570539", "0.65698874", "0.6540221", "0.6515875", "0.6473783", "0.6385505", "0.6382876", "0.6320926", "0.6312594", "0.6271411", "0.6214693", "0.6210137", "0.6206027", "0.620597", "0.6148369", "0.61265504", "0.6113325", "0.6111227", "0.6081227", "0.6077908", "0.6057943", "0.6050793", "0.60497457", "0.6045181", "0.6034337", "0.6027978", "0.60080105" ]
0.7220459
1
locate in subplot i in figure
def __call__(self, i): plt.subplot(self.nx, self.ny, i) return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def subplotInterface(self, annotate = True, idx = 0, verbose = 1,\\\n align_base = \"cell_1\", scale = False, save = False,\\\n format = \"pdf\", dpi = 100, row = None,\\\n col = None):\n\n\n if type(idx) != list:\n \"\"\"Just send it to the standard plotInterfaces\"\"\"\n self.plotInterface(idx = idx, annotate = annotate,\\\n verbose = verbose, align_base = align_base,\\\n scale = scale, save = save, format = format,\\\n dpi = dpi, handle = False)\n return\n\n if verbose > 0:\n self.printInterfaces(idx)\n\n if row is None and col is None:\n col = len(idx)\n row = 1\n elif col is None:\n col = np.int(np.ceil(len(idx) / row))\n elif row is None:\n row = np.int(np.ceil(len(idx) / col))\n\n hFig = plt.figure()\n for N, item in enumerate(idx):\n\n self.plotInterface(annotate = annotate, idx = item, verbose = verbose - 1,\\\n align_base = align_base, scale = scale, save = False,\\\n handle = True, col = col, row = row, N = N+1)\n\n plt.tight_layout(h_pad = 0.3, w_pad = 0.3)\n if save:\n if save is True:\n add = \"\"\n for i in idx:\n add += \"_%s\" % i\n ut.save_fig(filename = \"interface%s.%s\" % (add, format), format = format,\\\n dpi = dpi, verbose = verbose)\n else:\n ut.save_fig(filename = save, format = format, dpi = dpi,\\\n verbose = verbose)\n plt.close()\n else:\n plt.show()", "def __init__(self, subplot_objects):\n self.subplot_objects = subplot_objects", "def subplots(nrows=1, ncols=1, sharex=False, sharey=False, squeeze=True, apportion = None, debug = 0, *args, **kwargs):\n #Note: we use squeeze = False internally, then return axes according to the keyword\n fig, axes = pylab_subplots(nrows=nrows, ncols=ncols, sharex=sharex, sharey=sharey, squeeze=False, *args, **kwargs)\n nrows = len(axes[:,0].flatten())\n # start with even allocation of unity\n fracts = np.ones(nrows)\n # if just one arg, that is the first allocation\n if apportion != None:\n if len(np.shape(apportion)) == 0:\n fracts[0]=apportion\n # fill up the rest\n for (i,a) in enumerate(apportion):\n if i<nrows: fracts[i] = a\n # now make into a fractions\n fracts = fracts/np.sum(fracts)\n\n #loop over axes, bottom to top, extract the space below and the height for each (ignore space above\n above = [] ; height = []\n lasty = 1\n for (i,ax) in enumerate(axes[:,0]):\n bb = ax.get_position().get_points()\n pos = bb.flatten() \n height.append(pos[3]-pos[1])\n above.append(lasty - pos[3] )\n lasty = pos[1]\n\n# loop again, building down from top according to new share, keep margins\n yabove_0 = 1 # the norm. y coord of the bottom of the above graph\n print(above, height)\n for col in range(np.shape(axes)[1]):\n for (i,ax) in enumerate(axes[:,col]):\n if (i==0): yabove = yabove_0\n bb = ax.get_position().get_points()\n pos = bb.flatten() \n # convert to x0,y0, dx, dy form by subtracting origin\n newh = height[i]*fracts[i]*nrows\n\n pos[1] = yabove - newh - above[i]\n pos[3] = newh\n pos[2] = pos[2] - pos[0]\n yabove = pos[1]\n if debug>0: print(pos)\n ax.set_position(pos)\n\n if squeeze: \n if len(np.shape(axes[0]))==0: axes = axes.flatten() \n if len(axes) == 1: axes = axes[0]\n return(fig, axes)", "def subplot_fit(self):\r\n\r\n self.open_subplot_figure(number_subplots=12)\r\n\r\n self.figures_2d(data=True)\r\n\r\n self.set_title(label=\"Data (Source Scale)\")\r\n self.figures_2d(data=True, use_source_vmax=True)\r\n self.set_title(label=None)\r\n\r\n self.figures_2d(signal_to_noise_map=True)\r\n self.figures_2d(model_image=True)\r\n\r\n self.set_title(label=\"Lens Light Model Image\")\r\n self.figures_2d_of_planes(plane_index=0, model_image=True)\r\n\r\n # If the lens light is not included the subplot index does not increase, so we must manually set it to 4\r\n self.mat_plot_2d.subplot_index = 6\r\n\r\n final_plane_index = len(self.fit.tracer.planes) - 1\r\n\r\n self.mat_plot_2d.cmap.kwargs[\"vmin\"] = 0.0\r\n\r\n self.set_title(label=\"Lens Light Subtracted Image\")\r\n self.figures_2d_of_planes(plane_index=final_plane_index, subtracted_image=True, use_source_vmax=True)\r\n\r\n self.set_title(label=\"Source Model Image (Image Plane)\")\r\n self.figures_2d_of_planes(plane_index=final_plane_index, model_image=True, use_source_vmax=True)\r\n\r\n self.mat_plot_2d.cmap.kwargs.pop(\"vmin\")\r\n\r\n self.set_title(label=\"Source Plane (Zoomed)\")\r\n self.figures_2d_of_planes(plane_index=final_plane_index, plane_image=True, use_source_vmax=True)\r\n\r\n\r\n self.set_title(label=None)\r\n\r\n self.mat_plot_2d.subplot_index = 9\r\n\r\n self.figures_2d(normalized_residual_map=True)\r\n\r\n self.mat_plot_2d.cmap.kwargs[\"vmin\"] = -1.0\r\n self.mat_plot_2d.cmap.kwargs[\"vmax\"] = 1.0\r\n\r\n self.set_title(label=\"Normalized Residual Map (1 sigma)\")\r\n self.figures_2d(normalized_residual_map=True)\r\n self.set_title(label=None)\r\n\r\n self.mat_plot_2d.cmap.kwargs.pop(\"vmin\")\r\n self.mat_plot_2d.cmap.kwargs.pop(\"vmax\")\r\n\r\n self.figures_2d(chi_squared_map=True)\r\n\r\n self.set_title(label=\"Source Plane (No Zoom)\")\r\n self.figures_2d_of_planes(\r\n plane_index=final_plane_index,\r\n plane_image=True,\r\n zoom_to_brightest=False,\r\n use_source_vmax=True\r\n )\r\n\r\n self.set_title(label=None)\r\n\r\n self.mat_plot_2d.output.subplot_to_figure(\r\n auto_filename=\"subplot_fit\"\r\n )\r\n self.close_subplot_figure()", "def subplot(plot_function):\n @functools.wraps(plot_function)\n def f(self, ax=None, **kwargs):\n if ax is None:\n fig, ax = plt.subplots()\n plot_function(self, ax, **kwargs)\n return fig\n else:\n return plot_function(self, ax, **kwargs)\n return f", "def subplot_to_figure(self):\n if self.format is \"show\":\n plt.show()\n elif self.format is \"png\":\n plt.savefig(self.path + self.filename + \".png\", bbox_inches=\"tight\")", "def plot(self):\n attr = self.Graph[\"root\"]\n if (self.type == 0 or self.type == 1):\n self.subplot_1(attr, 0)\n else:\n self.subplot_2(attr, 0)", "def new_subplotspec(self, loc, rowspan=1, colspan=1):\n loc1, loc2 = loc\n subplotspec = self[loc1:loc1+rowspan, loc2:loc2+colspan]\n return subplotspec", "def show(self):\n \n \n \n \n \n \n r = 4\n f, axarr = plt.subplots(r, r, figsize=(8,8))\n counter = 0\n for i in range(r):\n for j in range(r):\n temp = self.x[counter,:]\n counter += 1\n img = self.x[counter,:]\n axarr[i][j].imshow(img)\n #######################################################################\n # #\n # #\n # TODO: YOUR CODE HERE #\n # #\n # #\n #######################################################################", "def plot_xy(self, subplot = None, center_of_gravity = False, marker = \"b-\"):\n if subplot is None:\n plot = plt.subplot(111)\n\n for mass in self.flatten():\n x,y = mass.geometry.project_xy.plot_coordinates\n subplot.plot(x, y, marker)\n\n if center_of_gravity:\n x, y, _ = mass.center_of_gravity_global.as_tuple()\n subplot.plot(x, y,\"*\")\n\n x, y, _ = self.center_of_gravity_global.as_tuple()\n subplot.plot(x, y, \"o\")\n\n return subplot", "def add_figure(self,sig,index,title='',xlabel='',ylabel=''):\n self.last_index = index\n ax = self.fig.add_subplot(self.position+index)\n ax.set_title(title)\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n ax.plot(sig)", "def __init__(self):\n self.fig = pl.figure(1,figsize=(8,6), dpi=80 , frameon = True , facecolor = '0.75' , edgecolor = 'w')\n self.fig.add_subplot(111 , axisbg = 'w' , projection = 'rectilinear') #if you want to add axes on particular place: fig.add_axes([0.15, 0.1, 0.7, 0.3]) where -> [begin , bottom to start axes , width , height ]\n self.separated = True #if we have a list and need to plot the plots separated", "def subplot_labels(plot):\n a = plt.text(0.05, 0.8, '(a)', fontsize='x-large', weight='bold',\n horizontalalignment='center', verticalalignment='center',\n transform=plot.ax_th.transAxes)\n b = plt.text(0.065, 0.80, '(b)', fontsize='x-large', weight='bold',\n horizontalalignment='center', verticalalignment='center',\n transform=plot.ax_lon.transAxes)\n c = plt.text(0.30, 0.80, '(c)', fontsize='x-large', weight='bold',\n horizontalalignment='center', verticalalignment='center',\n transform=plot.ax_hist.transAxes)\n d = plt.text(0.065, 0.95, '(d)', fontsize='x-large', weight='bold',\n horizontalalignment='center', verticalalignment='center',\n transform=plot.ax_plan.transAxes)\n e = plt.text(0.30, 0.95, '(e)', fontsize='x-large', weight='bold',\n horizontalalignment='center', verticalalignment='center',\n transform=plot.ax_lat.transAxes)\n return [a,b,c,d,e]", "def subplot(\r\n self,\r\n data: bool = False,\r\n noise_map: bool = False,\r\n signal_to_noise_map: bool = False,\r\n model_data: bool = False,\r\n residual_map: bool = False,\r\n normalized_residual_map: bool = False,\r\n chi_squared_map: bool = False,\r\n auto_filename: str = \"subplot_fit\",\r\n ):\r\n self._subplot_custom_plot(\r\n data=data,\r\n noise_map=noise_map,\r\n signal_to_noise_map=signal_to_noise_map,\r\n model_image=model_data,\r\n residual_map=residual_map,\r\n normalized_residual_map=normalized_residual_map,\r\n chi_squared_map=chi_squared_map,\r\n auto_labels=AutoLabels(filename=auto_filename),\r\n )", "def populateSubPlot(df,\n eps=3,\n min_samples=50,\n fig=None, \n axs=None, \n row=None, \n col=None,\n title='Some Ward',\n img=Image.open('maps/map_detailed_723.jpeg')\n ):\n #assign labels to data and get unique labels\n db_labels, unique_labels = getLabels(df,\n eps=eps, \n min_samples=min_samples)\n \n #set the title\n axs[row,col].set_title(title)\n \n #slap image on background\n axs[row,col].imshow(img, extent=[0, 128, 0, 128])\n\n #for each label and color\n for label, color in zip(unique_labels, colors):\n #places where label matches\n label_arg = np.argwhere(db_labels==label).ravel()\n #reduced version of where labels occur\n df_label_cluster = df.iloc[label_arg]\n #add scatter to plot\n axs[row,col].scatter(df_label_cluster['x'],\n df_label_cluster['y'], label=str(label),\n color=color)", "def add_figure1(self,x,y,index=1,title='',xlabel='',ylabel=''):\n self.last_index = index\n ax = self.fig.add_subplot(self.position+index)\n ax.set_title(title)\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n ax.plot(x,y)", "def plot_subplots_vel(self, fig_num: int, title: str, y_label: str, vel: np.ndarray) -> matplotlib.figure.Figure:\n fig = plt.figure(fig_num)\n axs = fig.subplots(3, 1, sharex=True)\n marker_graph_init(axs, vel, y_label, self.frame_nums, color='blue')\n plt.tight_layout()\n fig.suptitle(title)\n make_interactive()\n return fig", "def plot_data_assemble(self,kwargs_seg, add_mask ,img_name='data.pdf',cutout_text='lensed image',font_size=28):\n mask = self.data_mask\n image = self.raw_image\n picked_data = self.data\n selem = np.ones((add_mask, add_mask))\n img_mask = ndimage.binary_dilation(mask.astype(np.bool), selem)\n fig, (ax1, ax2, ax3,ax4) = plt.subplots(1, 4, figsize=(19, 10))\n ax1.imshow(image, origin='lower', cmap=\"gist_heat\")\n ax1.set_title('Cutout Image',fontsize =font_size)\n ax1.text(image.shape[0] * 0.2, image.shape[0] * 0.05, cutout_text,size=20, color='white',weight=\"bold\")\n ax1.axis('off')\n segments_deblend_list, xcenter, ycenter, c_index=kwargs_seg\n ax2.imshow(segments_deblend_list, origin='lower')\n for i in range(len(xcenter)):\n ax2.text(xcenter[i] * 1.1, ycenter[i], 'Seg' + repr(i), size=20,color='w',weight=\"bold\")\n ax2.text(image.shape[0] * 0.2, image.shape[0] * 0.9, 'Seg' + repr(c_index) + ' ' + 'in center',\n size=20, color='white',weight=\"bold\")\n ax2.set_title('Segmentations',fontsize =font_size)\n ax2.axis('off')\n ax3.imshow(img_mask+mask, origin='lower',cmap=\"gist_heat\")\n ax3.set_title('Selected pixels',fontsize =font_size)\n ax3.text(image.shape[0] * 0.1, image.shape[0] * 0.05, 'pixels (S/N >' + repr(self.snr) + ')',size=20, color='white',weight=\"bold\")\n ax3.text(image.shape[0] * 0.1, image.shape[0] * 0.9, 'additional pixels', size=20, color='r',weight=\"bold\")\n ax3.axis('off')\n ax4.imshow(picked_data, origin='lower',cmap=\"gist_heat\")\n ax4.set_title('Processed Image',fontsize =font_size)\n ax4.axis('off')\n plt.show()\n fig.savefig(img_name)\n return 0", "def EventSubsetDisplay( tubes, quantities, PMTFlatMapPositive, tubes_to_plot, title=\"Charge\", cutrange=[-1,-1], padding=10):\n PMTFlatMapPositive_values = [PMTFlatMapPositive[tube] for tube in tubes_to_plot]\n subset_x_values = np.array([value[0] for value in PMTFlatMapPositive_values])\n subset_y_values = np.array([value[1] for value in PMTFlatMapPositive_values])\n \n # set up dimensions for subset preimage with short tank data\n min_subplot_x_value = subset_x_values.min() - padding\n max_subplot_x_value = subset_x_values.max() + padding\n\n min_subplot_y_value = subset_y_values.min() - padding\n max_subplot_y_value = subset_y_values.max() + padding\n \n fig, ax= plt.subplots(figsize=[30,30])\n preimage = np.zeros( preimage_dimensions )\n\n subset_quantities = []\n for idx, tube in enumerate( tubes ):\n if cutrange[0] != cutrange[1]:\n if quantities[idx] < cutrange[0] or quantities[idx] > cutrange[1]:\n continue\n for dx in range(-3,4):\n for dy in range(-3,4):\n if abs(dx)==3 and abs(dy)==3:\n continue\n if tube in tubes_to_plot: \n #print( \"idx=\", idx, \" len(quantities)=\",len(quantities), \" tube=\", tube, \" len(PMTFlatMap)=\", len(PMTFlatMapPositive))\n preimage[ PMTFlatMapPositive[tube][1]+dx, PMTFlatMapPositive[tube][0]+dy ] = quantities[idx]\n subset_quantities.append(quantities[idx])\n \n subset_quantities = np.array(subset_quantities)\n\n imgmin = subset_quantities.min()\n imgmax = subset_quantities.max()\n \n if cutrange[0] != cutrange[1]:\n imgmin = cutrange[0]\n imgmax = cutrange[1]\n \n subset_image = preimage[min_subplot_y_value:max_subplot_y_value, min_subplot_x_value:max_subplot_x_value]\n \n im = ax.imshow( subset_image, extent = [min_subplot_x_value, max_subplot_x_value, min_subplot_y_value, max_subplot_y_value], vmin=imgmin, vmax=imgmax )\n\n fig.suptitle(title, fontsize=80)\n\n plt.rc('xtick', labelsize=24) \n plt.rc('ytick', labelsize=24) \n plt.xlabel('Distance CCW on perimeter from x-axis (cm)', fontsize=48)\n plt.ylabel('Y (cm)', fontsize=48)\n \n plt.set_cmap('gist_heat_r')\n\n # Create colourbar\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n cbar = plt.colorbar(im, cax=cax)\n cbar.ax.tick_params(labelsize=24)\n\n # Fix title height\n plt.tight_layout()", "def plot_individual(xdict, ydict, xprop, yprop, documents, spline):\n figure_array = {}\n for item in documents:\n xlabel = \"\\\\textbf{\" + label_dict[xprop] + \"}\"\n ylabel = \"\\\\textbf{\" + label_dict[yprop] + \"}\"\n x = xdict[item[\"path_id\"]]\n y = ydict[item[\"path_id\"]]\n # fig_title = item[\"path_id\"] + \"(\" + item[\"pretty_formula\"] + \")\" # Individual traces\n # fig_title = yprop + item[\"cation_type\"] # Plot by cation\n fig_title = yprop # All together\n figure_array[item[\"path_id\"]] = plt.figure(fig_title, figsize=(6,6), dpi=plotting_dpi)\n ax = figure_array[item[\"path_id\"]].add_subplot(111) \n ax.scatter(x,y, s=70, zorder=2, color=color_dict[item[\"cation_type\"]], linewidths=2.5, edgecolors='black')\n if spline:\n tck = interpolate.splrep(x, y, s=0)\n xnew = np.arange(0, 100, 0.1)\n splfit = interpolate.splev(xnew, tck, der=0)\n x = xnew\n y = splfit\n if item[\"path_id\"][-3:] == \"002\":\n ax.plot(x,y, linewidth=2.5, zorder=1, color=color_dict[item[\"cation_type\"]], linestyle='dashed')\n elif item[\"path_id\"][-3:] == \"003\":\n ax.plot(x,y, linewidth=2.5, zorder=1, color=color_dict[item[\"cation_type\"]], linestyle='dotted')\n else:\n ax.plot(x,y, linewidth=2.5, zorder=1, color=color_dict[item[\"cation_type\"]])\n ax.set_xlabel(xlabel, fontsize=24)\n # ax.set_ylim([0,1200])\n # ax.set_xlim([0,100])\n ax.set_ylabel(ylabel, fontsize=24)\n ax.tick_params(axis='x', labelsize=22)\n ax.tick_params(axis='y', labelsize=22)\n border_width = 2\n [i.set_linewidth(border_width) for i in ax.spines.itervalues()]\n plt.tight_layout()\n plt.legend(loc='best', prop={'size': 14})\n plt.rc('text', usetex=True)\n plt.rc('font', family='sans-serif')\n plt.tight_layout()\n plt.show()", "def etio_subplot(df, ax, title, graph_color='skyblue'):\n\n post_dx_histo = histo_dx_includes(df)\n hist_df = pd.DataFrame({\"Dx\": post_dx_histo.index, \"Count\": post_dx_histo.data})\n #hist_df = hist_df.drop(1)\n print(hist_df)\n\n graph_range = range(1,len(hist_df.index)+1)\n ax.hlines(y=graph_range, xmin=0, xmax=hist_df['Count'], color=graph_color)\n ax.plot(hist_df['Count'], graph_range, \"D\", color=graph_color)\n ax.set_yticks(range(1, len(hist_df['Dx'])+1))\n ax.set_yticklabels(hist_df['Dx'], fontsize='10')\n\n ax.set_title(title, fontsize='10')\n return ax", "def plot_pcs_slice_sub(self,data_in,large_slice,plot_slice,\n indiv=0,color_array=None,sz=8):\n fig = plt.figure(figsize=(sz,6))\n gs = GridSpec(sz+len(self.states_list),1)\n feature_ax = plt.subplot(gs[:sz,:])\n stateseq_ax = plt.subplot(gs[sz+1])\n\n if color_array is None:\n color_array = self._get_colors()\n\n r_plot_slice = list(map(lambda x: large_slice[0] + x, plot_slice))\n z, perm = relabel_model_z(self,index=indiv)\n z = z[r_plot_slice]\n stateseq_norep, durations = rle(z)\n\n max_ = ceil(data_in.max()-data_in.min()) +1\n data_in=data_in[:,plot_slice]\n ttime = np.arange(data_in.shape[1])\n for ii in range(0,data_in.shape[0]):\n feature_ax.plot(ttime,data_in[ii,:] + ii*max_,'k')\n\n feature_ax.set_xlim((0,len(plot_slice)))\n feature_ax.set_ylim((data_in.min()-1,data_in.shape[0]*max_-1))\n feature_ax.set_yticks([])\n feature_ax.set_xticks([])\n\n stateseq_ax.imshow(z[:,np.newaxis].T,aspect='auto',\n cmap=ListedColormap(color_array),vmin=0,vmax=len(perm))\n stateseq_ax.set_yticks([])\n stateseq_ax.set_xticks([])\n\n for ii, pos in enumerate(durations.cumsum()):\n if durations[ii] >=1:\n feature_ax.axvline(pos,\n color=color_array[stateseq_norep[ii]],\n linestyle=':')\n return", "def __init__(self, subplot_class, *args, **kwargs):\n import pylab\n self.fig = pylab.figure(*args, **kwargs)\n self.subplot_class = subplot_class", "def draw_img(original, bit_imgs, title, sub_title):\n fig, axs = plt.subplots(nrows=2, ncols=5, figsize=(17, 7))\n fig.suptitle(title)\n print(axs.shape) # (2,5)\n\n img_number = 8\n\n for i, ax in enumerate(axs):\n for j, a in enumerate(ax):\n img_number -= 1\n\n if i == 0 and j == 0:\n a.imshow(original, cmap='gray')\n a.set_title(sub_title)\n img_number += 1\n print(i,j,'original')\n\n elif i == 1 and j == 0:\n img_number += 1\n print(i,j,'-- pass --')\n\n else:\n a.imshow(bit_imgs[img_number], cmap='gray')\n a.set_title(f\"BIT [{img_number}]\")\n print(i,j, img_number)\n\n print(ax.shape)\n print(i, ax)\n plt.show()", "def one_data_figure_sep(obs, fig, subplot_spec=None, **kwargs):\n if subplot_spec is None:\n gs = gridspec.GridSpec(2,1,height_ratios = [3,1], hspace=0)\n else:\n gs = gridspec.GridSpecFromSubplotSpec(2, 1, hspace=0,\n subplot_spec=subplot_spec,\n height_ratios = [3,1])\n \n \n spec = pl.Subplot(fig, gs[0,0])\n spec.plot(obs['wavelength'], obs['spectrum'], **kwargs)\n spec.set_ylabel(r'$f_\\lambda \\times \\, C$')\n pl.setp(spec.get_xticklabels(), visible = False)\n fig.add_subplot(spec)\n unc = pl.Subplot(fig, gs[1,0])\n unc.plot(obs['wavelength'], obs['unc'], **kwargs)\n unc.set_ylabel(r'$\\sigma f_\\lambda$')\n unc.set_xlabel(r'$\\lambda (\\AA)$')\n fig.add_subplot(unc)\n return fig, gs", "def plot_gridSubplot(shape, loc, colspan=1, rowspan=1):\n return plt.subplot2grid(shape=shape,\n loc=loc,\n colspan=colspan,\n rowspan=rowspan)", "def subplottPNG(self):\n os.chdir(self.mainDir)\n folder = os.listdir(u'.')\n folders = [f for f in folder if f[0] == 'S']\n\n for subject in folders:\n\n try: # go to the 'results' directory\n resultsDir = os.path.join(os.path.join(self.mainDir, subject),'results')\n os.chdir(resultsDir)\n\n # find all files with .png extension\n pngfiles = glob.glob('*.png')\n pngfiles.sort(key = lambda x:x[0])\n pngfiles.sort(key = lambda x:x[1])\n\n fig = plt.figure()\n\n for ii, filename in enumerate(pngfiles):\n f = plt.subplot(4,4,ii+1)\n f.set_axis_off()\n f.set_xlabel('ses:'+str(ii+1))# f.set_figheight(15)\n fig.set_figwidth(30)\n fig.set_figheight(30)\n fig.tight_layout()\n img = matplotlib.image.imread(filename)\n plt.imshow(img)\n\n figname = subject + '_subplot'+ '.png'\n matplotlib.pyplot.savefig(figname)\n\n except Exception as errMessage:\n print(errMessage)", "def visualize_2_panel(path: str, outfile: str, kernel: str, s_above=5):\n sns.set(style=\"white\", color_codes=True, font_scale=1)\n fig, axes = plt.subplots(1, 2, figsize=(10, 5))\n fig.suptitle(SUB_TITLE, y=0.93)\n\n x, y = np.load('{}/meshgrids.npy'.format(path)) # coordinates\n sig = np.load('{}/sig_{}.npy'.format(path, kernel))\n data = np.load('{}/queried-data.npy'.format(path)).item()\n\n ra, dec, n_star = data[\"ra\"], data[\"dec\"], len(data[\"ra\"])\n extent = [x.min(), x.max(), y.min(), y.max()] # arg extent for imshow\n\n is_peak = sig > s_above\n mask = data[\"sig_{}\".format(kernel)] > s_above\n\n axes[0].plot(ra, dec, '.', c='deepskyblue', ms=0.5, alpha=0.5)\n axes[0].plot(ra[mask], dec[mask], '.', c='orange', ms=1)\n axes[0].set_title('%d stars' % n_star)\n axes[1].set_title('%s: sig > %0.1f= %d pixels' % (kernel, s_above, np.sum(is_peak)))\n\n for u in range(2):\n axes[u].imshow(is_peak, cmap='copper', vmin=-0.01, vmax=1.01,\n extent=extent, origin='lower')\n axes[u].tick_params(axis='both', which='both',\n labelleft=False, labelbottom=False)\n axes[u].set_xlim(axes[u].set_xlim()[::-1]) # flipping\n\n _filename = \"{}-{}.png\".format(outfile, kernel)\n plt.savefig(_filename, bbox_inches='tight', dpi=300)", "def plot(self):\n fig, axes = plt.subplots(math.ceil(len(self.plots) / self.col_wrap), self.col_wrap)\n\n for ps, ax in zip(self.plots, axes.flatten()):\n for p in ps:\n if p.x is not None and p.y is not None:\n p.method(x=p.x, y=p.y, *p.args, ax=ax, **p.kwargs)\n else:\n p.method(*p.args, ax=ax, **p.kwargs)\n\n return fig, axes", "def add_subplot(gridRows, gridCols, plotNo):\n pl.subplot(gridRows, gridCols, plotNo)" ]
[ "0.63224703", "0.6281179", "0.59312135", "0.58704233", "0.580318", "0.5788824", "0.5757078", "0.575006", "0.57305676", "0.56653893", "0.5630787", "0.5624858", "0.5617859", "0.5590443", "0.5575057", "0.5545475", "0.55385506", "0.55196583", "0.5518725", "0.5510385", "0.5487405", "0.54713005", "0.54627454", "0.54510057", "0.5447261", "0.54329145", "0.5428601", "0.542112", "0.5403584", "0.53998065" ]
0.671145
0
hist function extended to plot stattistics
def _hist(xs, bins=100, range=None, stats=('entries', 'mean', 'rms'), xylabels = (), stats_xypos=(0.1, 0.7), *args, **kargs): if (range==None): range = (np.min(xs), np.max(xs)) cc = hst.hist(xs, bins=bins, range=range, *args, **kargs); if (not stats): return cc ys, xedges = np.histogram(xs, bins, range=range) ns = len(xs) sel = np.logical_and(xs >= range[0], xs <= range[1]) nos, mean, rms = len(xs[sel]), np.mean(xs[sel]), np.std(xs[sel]) epsilon = (1.*nos)/(1.*ns) ss = '' if ('total entries') in stats: ss += 'total entries {0:d} \n'.format(ns) if ('entries') in stats: ss += 'entries {0:d} \n'.format(nos) if ('mean') in stats: ss += 'mean {0:.3f} \n'.format(mean) if ('rms') in stats: ss += 'rms {0:.3f} \n'.format(rms) xp, yp = _xypos(xedges, ys, xf=stats_xypos[0], yf=stats_xypos[1]) ##plt.set_label(ss) # plt.gca().set_label(ss) # plt.legend() plt.text(xp, yp, ss) return cc
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hist(self, bins):\n x = self.x\n plt.hist(x, bins)\n plt.xlabel('Observed Data')\n plt.ylabel('Frequency')\n plt.show()", "def plot_hist(self):\n labels = [self.get_class_str(action, obj)\n for (action, obj, subj, rec, beg, end) in self.action_clips]\n visualize.plot_hist(labels, proportion=True)", "def hist(self):\r\n plt.hist(self.data_array, bins='auto', density=False, facecolor='b')\r\n plt.title(self.column_name)\r\n plt.savefig(self.column_name + \".svg\")\r\n plt.close()", "def hist(data):\n\n fig = plt.figure()\n ax1 = fig.add_subplot(111)\n plt.hold(True)\n for x in xrange(len(data[:,0,0])):\n counts, edges = np.histogram(data[x,:,:],bins=100)\n centers = [(edges[i]+edges[i+1])/2.0 for i,v in enumerate(edges[:-1])]\n ax1.plot(centers,counts)\n plt.hold(False)\n\n plt.show(block=False)\n\n # return fig", "def hist(self, bins=\"unit\", closed=\"left\", stat=\"sum\"):\n return self.dist.hist(bins=bins, closed=closed, stat=stat)", "def makeHistogram(values, numBins, xLabel, yLabel, title=None):", "def normalized_hist_by_stability(metdat, catinfo, vertloc=80):\n\n stabconds = utils.get_stabconds()\n stabcol, _, _= utils.get_vertical_locations(catinfo['columns']['stability flag'], location=vertloc)\n colors = utils.get_colors(len(stabconds), basecolor='span')\n\n temp = metdat[stabcol].dropna()\n garb = temp.groupby(temp.index.hour).value_counts(normalize=True)\n garb.index.names = ['hour','stabclass']\n garb = garb.reorder_levels(['stabclass','hour'])\n\n hours = np.arange(24)\n newbottom = np.zeros(24)\n\n fig,ax = plt.subplots()\n for jj,cond in enumerate(stabconds):\n # Use this for missing data, also works for full data\n a = garb.loc[cond]\n b = a.index.tolist()\n c = a.values.tolist()\n for i in range(len(hours)):\n if (hours[i]) in b:\n pass\n else:\n b.insert(i,hours[i])\n c.insert(i,0)\n\n d = pd.Series(data = c, index = b)\n ax.bar(hours, d, color=colors[jj], bottom=newbottom)\n newbottom += c #<-- for if missing data, also works for full data \n\n #ax.bar(hours, garb.loc[cond], color=colors[jj], bottom=newbottom)\n #newbottom += garb.loc[cond]\n\n ax.set_ylabel('Probability [%]')\n ax.set_xlabel('Time of Day [Hour]')\n fig.legend(stabconds) \n #fig.legend(stabconds, loc=6, bbox_to_anchor=(1,0.5),framealpha=0)\n fig.tight_layout()\n\n return fig, ax", "def plot_histogram(self,ax=None,**kwargs):\n if not ax:\n fig = plt.figure()\n ax = fig.add_subplot(111)\n probs,bins,patches = ax.hist(self.scores_list,normed=True,label=\"Sample\",**kwargs)\n ax.vlines(self.xhat,*ax.get_ylim(),label='Mean',color='r')\n ax.legend()\n return ax,probs,bins", "def plot_hist(df, num_bins=8):\n df.hist(figsize=(24, 20), bins=num_bins)\n plt.axes", "def getHistogram( self, img):\n bins = 256\n range_scale = [0,254]\n nivel_transparencia = 0.5\n plt.hist(img.ravel(),bins,range_scale, label=\"histogram\", alpha=nivel_transparencia);\n plt.legend(loc='upper right')\n plt.show()", "def PlotHist(self, label=None):\n ys, xs, patches = plt.hist(self.test_stats)\n plt.vlines(self.actual, 0, max(ys), linewidth=3, color='black')\n plt.xlabel('test statistic')\n plt.ylabel('count')\n plt.show()", "def plot_hist(self):\n \n plt.figure();\n self.dist_frame.plot(kind='hist',legend=False,orientation='horizontal')", "def histo ( self ,\n xbins = 20 , xmin = None , xmax = None ,\n ybins = 20 , ymin = None , ymax = None ,\n hpars = () , \n histo = None ,\n integral = False ,\n errors = False , \n density = False ) :\n \n \n histos = self.make_histo ( xbins = xbins , xmin = xmin , xmax = xmax ,\n ybins = ybins , ymin = ymin , ymax = ymax ,\n hpars = hpars ,\n histo = histo )\n\n # loop over the historgam bins \n for ix,iy,x,y,z in histo.items() :\n\n xv , xe = x.value() , x.error()\n yv , ye = y.value() , y.error()\n \n # value at the bin center \n c = self ( xv , yv , error = errors ) \n\n if not integral : \n histo[ix,iy] = c\n continue\n\n # integral over the bin \n v = self.integral( xv - xe , xv + xe , yv - ye , yv + ye )\n \n if errors :\n if 0 == c.cov2 () : pass\n elif 0 != c.value() and 0 != v : \n v = c * ( v / c.value() )\n \n histo[ix,iy] = v \n\n ## coovert to density historgam, if requested \n if density : histo = histo.density()\n \n return histo", "def setup_hist(self):\n self.x_min = {}\n self.x_max = {}\n self.x_max_minus_min = {}\n self.dx = {}\n self.n_bins = {}\n\n self.histogram_edges = {}\n self.histogram_values = {}\n self.histogram_cdf = {}", "def drawHist(data, xLabel, unit, binSize, title):\n mean = np.mean(data)\n median = np.median(data)\n mode = stats.mode(data)[0].astype(float)\n \n q1, q3 = np.percentile(data, [25, 75])\n iqr = q3 - q1\n sigma = np.std(data)\n \n \n bins = np.arange(min(data), max(data) + 1, binSize)\n plt.style.use('dark_background')\n fig, ax = plt.subplots(figsize=(12,7))\n plt.hist(data, bins=bins, histtype='bar') \n plt.title(title)\n plt.xlabel(xLabel + \" \" + unit)\n plt.ylabel('count')\n ymax = ax.get_ylim()[1]\n ax.vlines(mean, 0, ymax, color='red', label='mean')\n ax.vlines(mean-sigma, 0, ymax, color='red', linestyle='--', \n label='mean +/- std')\n ax.vlines(mean+sigma, 0, ymax, color='red', linestyle='--')\n plt.legend()\n plt.show()\n \n print(\"Einheit: \", unit)\n print(\"Minimum: \", round(data.min(),3))\n print(\"Maximum: \", round(data.max(),3))\n print(\"Mittelwert: \", round(mean,3))\n print(\"Median: \", round(median,3))\n print(\"Modus: \", round(mode[0],3))\n print(\"Standardabweichung: \", round(sigma, 3))\n print(\"1. Quartil: \", round(q1,3))\n print(\"3. Quartil: \", round(q3,3))\n print(\"Quartilsdifferenz: \", round(iqr,3))", "def hist(self, ax=None, nburnt=0, xlabel=None):\n # creating figure if not given as input\n fig = None\n if not ax:\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n # histogram of value of samples\n bins = np.arange(self._minval - 0.5 * self._step,\n self._maxval + 1.5 * self._step,\n self._step)\n samples = self.samples[nburnt:self.nsample]\n ax.hist(samples, bins=bins, normed=True, label='sampled distribution')\n\n # prior (uniform) distribution\n if self._maxval > self._minval:\n x = 2 * [self._minval] + 2 * [self._maxval]\n y = [0.0] + 2 * [1.0 / (self._maxval - self._minval)] + [0.0]\n ax.plot(x, y, '-', lw=2, color='grey', label='prior distribution')\n\n # legend, labels and title\n ax.legend(loc='upper right', fontsize=10, framealpha=0.8)\n ax.set_xlabel(self.name if not xlabel else xlabel)\n ax.set_ylabel('Probability density')\n ax.set_title('Nb of samples: {}'.format(len(samples)))\n ax.grid(True)\n\n # statistics\n s = \"Mean & std dev:\\n{:.3G} +/- {:.3G}\".format(np.mean(samples),\n np.std(samples))\n quantiles = np.percentile(samples, [2.5, 97.5])\n s += \"\\n95% confidence interval:\\n{:.3G}, {:.3G}\".format(*quantiles)\n ax.text(min(ax.get_xlim()), max(ax.get_ylim()), s,\n fontdict={'fontsize': 10},\n horizontalalignment='left',\n verticalalignment='top',\n bbox={'color': 'w', 'alpha': 0.8})\n\n if fig:\n fig.show()", "def distribution_magnitude_histogram(cur, var, table, label):\n x = select(cur,var, table)\n print(\"Number of entries: \", len(x))\n print(\"Maximum: \", max(x))\n print(\"Minimum: \", min(x))\n \n fig = plt.figure()\n ax = fig.add_subplot(1,1,1)\n ax.set_xlabel(\"Sentiment Magnitude\")\n ax.set_ylabel(\"Number of Sentences\")\n fig.suptitle(label)\n ax.hist(x, bins = 20)\n plt.show()", "def end_hist(pulse, trap):\n all_trial_n, all_trial_n_ave = trap.sideband_cool_sch(pulse)\n n_max = np.amax(all_trial_n)\n hist_xar = sp.arange(n_max + 1) - 0.5\n \n # fig, ax = plt.subplots()\n plt.hist(all_trial_n[:, -1], bins = hist_xar)\n plt.xlabel('Phonon State')\n plt.ylabel('Distribution')\n # return ax", "def hist(self, color=\"#FFFFFF\", axes_style=\"darkgrid\", context=\"notebook\",\n col_wrap=4, exhibit_path=None, **kwargs):\n import matplotlib.pyplot as plt\n import seaborn as sns\n sns.set_context(context)\n\n # data0 = self.sims_data[[\"sim\", \"origin\", \"dev\", \"rectype\", \"latest\", \"reserve\",]]\n # data0 = data0[(data0[\"dev\"]==data0[\"dev\"].max()) & (data0[\"rectype\"]==\"forecast\")].reset_index(drop=True)\n # data0 = data0.drop([\"dev\", \"rectype\", \"latest\"], axis=1)\n #\n # # Include additional origin representing aggregate distribution.\n # data1 = data0.groupby(\"sim\", as_index=False)[[\"reserve\"]].sum()\n # data1[\"origin\"] =\"total\"\n # data = pd.concat([data0, data1])\n data = self.reserve_dist\n\n # Get mean, min and max ultimate and reserve by origin.\n med_data = data.groupby(\"origin\", as_index=False)[[\"reserve\"]].median().rename(\n {\"reserve\": \"med_res\"}, axis=1).set_index(\"origin\")\n min_data = data.groupby(\"origin\", as_index=False)[[\"reserve\"]].min().rename(\n {\"reserve\": \"min_res\"}, axis=1).set_index(\"origin\")\n max_data = data.groupby(\"origin\", as_index=False)[[\"reserve\"]].max().rename(\n {\"reserve\": \"max_res\"}, axis=1).set_index(\"origin\")\n dfmetrics = functools.reduce(lambda df1, df2: df1.join(df2), (med_data, min_data, max_data))\n dfmetrics = dfmetrics.applymap(lambda v: 0 if v < 0 else v).reset_index(drop=False)\n\n with sns.axes_style(axes_style):\n\n pltkwargs = {\"color\": color, \"bins\": 20, \"edgecolor\": \"#484848\",\n \"alpha\": 1., \"linewidth\": .45}\n\n if kwargs is not None:\n pltkwargs.update(kwargs)\n\n grid = sns.FacetGrid(\n data, col=\"origin\", col_wrap=col_wrap, margin_titles=False,\n despine=True, sharex=False, sharey=False,\n )\n\n hists = grid.map(plt.hist, \"reserve\", **pltkwargs)\n grid.set_axis_labels(\"\", \"\")\n grid.set_titles(\"\", size=6)\n\n # Change ticklabel font size and place legend on each facet.\n origin_vals = sorted([int(ii) for ii in data[\"origin\"].unique() if ii != \"total\"])\n dindex = {jj: ii for ii, jj in enumerate(origin_vals)}\n dindex.update({\"total\": max(dindex.values()) + 1})\n data[\"origin_index\"] = data[\"origin\"].map(dindex)\n origin_order = data[[\"origin_index\", \"origin\"]].drop_duplicates().sort_values(\n \"origin_index\"\n ).origin.values\n\n with warnings.catch_warnings():\n\n warnings.simplefilter(\"ignore\")\n\n for origin, ax_ii in zip(origin_order, grid.axes):\n\n # xmin = np.max([0, dfmetrics[dfmetrics.origin == origin][\"min_res\"].item()])\n xmax = dfmetrics[dfmetrics.origin == origin][\"max_res\"].item() * 1.025\n xmed = dfmetrics[dfmetrics.origin == origin][\"med_res\"].item()\n origin_str = \"{}\".format(origin)\n ax_ii.set_xlim([0, xmax])\n ax_ii.axvline(xmed, color=\"#E02C70\", linestyle=\"--\", linewidth=1.5)\n ax_ii.grid(False)\n\n ymedloc = max(rect.get_height() for rect in ax_ii.patches) * .30\n ax_ii.set_yticks([])\n ax_ii.set_yticklabels([])\n ax_ii.tick_params(\n axis=\"x\", which=\"both\", bottom=True, top=False, labelbottom=True\n )\n ax_ii.set_xticklabels(\n [\"{:,.0f}\".format(jj) for jj in ax_ii.get_xticks()], size=7\n )\n ax_ii.annotate(\n origin_str, xy=(.85, .925), xycoords='axes fraction',\n textcoords='axes fraction', fontsize=9, rotation=0, color=\"#000000\",\n )\n ax_ii.annotate(\n \"median = {:,.0f}\".format(xmed), (xmed, ymedloc), xytext=(7.5, 0),\n textcoords=\"offset points\", ha=\"center\", va=\"bottom\", fontsize=7,\n rotation=90, color=\"#000000\"\n )\n\n # Draw border around each facet.\n for _, spine in ax_ii.spines.items():\n spine.set(visible=True, color=\"#000000\", linewidth=.50)\n\n if exhibit_path is not None:\n plt.savefig(exhibit_path)\n else:\n plt.show()", "def hist_of_numeric(X):\n figsize(10,3)\n for col in get_numeric(X):\n print(col)\n X[col].hist(bins=50)\n show()", "def super_hist(self, data_list, alpha=0.5, log_scale=True, bins=45):\r\n\r\n fig, _ = mp.subplots(1, 1, figsize=(15, 10), constrained_layout=True)\r\n\r\n names = []\r\n for data in data_list:\r\n plot_data = data[data.Day_First_N_Infections != \"None\"]\r\n column_data = plot_data[\"Day_First_N_Infections\"].values\r\n sns.distplot(column_data,\r\n kde=False,\r\n bins=bins,\r\n hist_kws={\r\n \"linewidth\": 1,\r\n \"alpha\": alpha,\r\n \"edgecolor\": 'black',\r\n \"log\": log_scale\r\n })\r\n\r\n mp.legend(loc='upper left', fontsize=20)\r\n mp.xlabel(\"Days from outbreak to case number \" + str(data_list[0].N) +\r\n \" in county\",\r\n fontsize=18)\r\n mp.ylabel(\"Frequency\", fontsize=18)\r\n\r\n fig.savefig(\"hist_N\" + str(data_list[0].N) + \"_\" + \"_\".join(names) +\r\n \".png\")", "def distribution_sentimentscore_histogram(cur, var, table, label):\n x = select(cur,var, table)\n print(\"Number of entries: \", len(x))\n print(\"Maximum: \", max(x))\n print(\"Minimum: \", min(x))\n \n fig = plt.figure()\n ax = fig.add_subplot(1,1,1)\n ax.set_xlabel(\"Sentiment Score\")\n ax.set_ylabel(\"Number of Loans\")\n fig.suptitle(label)\n ax.hist(x, bins = 15)\n plt.show()", "def test_normal(self):\r\n s = np.random.normal(-0.42, 0.55, 5000)\r\n plt.hist(s, 30, density=False)\r\n plt.xlabel('Interlayer point energy [eV]')\r\n plt.ylabel('Frequency')\r\n plt.show()", "def hist(det):\n\n title = fluka.particle.get(det.dist, \"undefined\")\n title += \" #diamond \"\n title += \"reg %d\" % det.reg\n title += \" #diamond \"\n title += \"%g cm^{3}\" % det.volume\n title += \" #diamond \"\n title += \"%g < E < %g GeV\" % (det.elow, det.ehigh)\n title += getAxesTitle(det)\n return ROOT.TH1F(det.name, title, det.ne, getEbins(det))", "def featuresHist_colors(self, **kwargs):\n # Selecting bins automatically:\n bins_onpower = np.arange(self.onpower_train.min().values[0],\n self.onpower_train.max().values[0],\n (self.onpower_train.max().values[0] -\n self.onpower_train.min().values[0]) / 50)\n\n bins_offpower = np.arange(self.offpower_train.min().values[0],\n self.offpower_train.max().values[0],\n (self.offpower_train.max().values[0] -\n self.offpower_train.min().values[0]) / 50)\n\n bins_duration = np.arange(self.duration_train.min().values[0],\n self.duration_train.max().values[0],\n (self.duration_train.max().values[0] -\n self.duration_train.min().values[0]) / 50)\n\n # If a bin has been specified update the bin sizes.\n # Updating bins with specified values.\n for key in kwargs:\n if key == 'bins_onpower':\n bins_onpower = kwargs[key]\n elif key == 'bins_offpower':\n bins_offpower = kwargs[key]\n elif key == 'bins_duration':\n bins_duration = kwargs[key]\n else:\n print(\"Non valid kwarg\")\n\n # Plot:\n fig1 = plt.figure()\n ax1 = fig1.add_subplot(311)\n ax2 = fig1.add_subplot(312)\n ax3 = fig1.add_subplot(313)\n\n start = 0\n end = 0\n for ind in np.arange(len(self.stats)):\n\n if self.stats[ind]['Nevents'] != 0:\n if ind == 0:\n start = 0\n else:\n start = end\n end += self.stats[ind]['Nevents']\n ax1.hist(\n self.onpower_train[start:end].onpower.values, bins=bins_onpower, alpha=0.5)\n ax2.hist(\n self.offpower_train[start:end].offpower.values, bins=bins_offpower, alpha=0.5)\n ax3.hist(\n self.duration_train[start:end].duration.values, bins=bins_duration, alpha=0.5)\n\n ax1.set_title(\"Feature: Onpower\")\n ax1.set_xlabel(\"Watts\")\n ax1.set_ylabel(\"Counts\")\n\n ax2.set_title(\"Feature: Offpower\")\n ax2.set_xlabel(\"Watts\")\n ax2.set_ylabel(\"Counts\")\n\n ax3.set_title(\"Feature: Duration\")\n ax3.set_xlabel(\"Seconds\")\n ax3.set_ylabel(\"Counts\")", "def hist(self, overlay=False, **vargs):\n n = len(self)\n colors = list(itertools.islice(itertools.cycle(('b', 'g', 'r')), n))\n if overlay:\n plt.figure(figsize=(6, 4))\n plt.hist(self.columns, color=colors, **vargs)\n plt.legend(self.column_labels)\n else:\n _, axes = plt.subplots(n, 1, figsize=(6, 4 * n))\n if n==1 : axes = [axes]\n for axis, label, color in zip(axes, self.column_labels, colors):\n axis.hist(self[label], color=color, **vargs)\n axis.set_xlabel(label, fontsize=16)", "def test_traj () :\n samples = getAllTraj()\n states = []\n for t in samples : \n states.extend([toInternalStateRep(s) for s, _, _ in t])\n states = np.stack(states)\n xRange = np.linspace(-np.pi, np.pi, 100)\n yRange = np.linspace(-np.pi, np.pi, 100)\n plotHist(states, xRange, yRange, 'theta1', 'theta2', 'S Count')", "def makeHist(data, bins, wgt=None, factor=1.0, pdf=False):\n n_arr, bins = np.histogram(data, bins, weights=wgt)\n ctr_bins = centerOfBins(bins)\n \n if pdf == True:\n n_arr = asFloat(n_arr) / (float(sum(n_arr)) * (bins[1:] - bins[:-1]))\n else:\n n_arr = asFloat(n_arr) * factor\n \n return n_arr, ctr_bins", "def build_hist(concept_values: np.ndarray, num_bins: int = 100) -> np.ndarray:\n hist, _ = np.histogram(concept_values, bins=num_bins, range=(0., 1.), density=True)\n return hist", "def add_histogram(self, tag, values, global_step=None, bins='tensorflow'):\n values = make_np(values)\n self.vis.histogram(make_np(values), opts={'title': tag})" ]
[ "0.700293", "0.6858992", "0.6851635", "0.6733778", "0.6721069", "0.67167056", "0.6709508", "0.6697659", "0.6695027", "0.66849524", "0.66489035", "0.66478205", "0.66446716", "0.6577029", "0.657074", "0.65556145", "0.65371954", "0.65273106", "0.6521044", "0.6478086", "0.64745295", "0.64732736", "0.64575535", "0.64159966", "0.63928205", "0.63845086", "0.63781714", "0.6372578", "0.63642097", "0.6358726" ]
0.71179366
0
Insert first datapoint in the database.
def _insert_datapoint(self): # Insert if db_datapoint.idx_datapoint_exists(1) is False: record = Datapoint( id_datapoint=general.encode(self.reserved), agent_label=general.encode(self.reserved), agent_source=general.encode(self.reserved) ) database = db.Database() database.add(record, 1047)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def store_datapoint(sql, parts):\n t = datetime.fromtimestamp(parts[0])\n humid = parts[1]\n temp_c = parts[2]\n temp_f = parts[3]\n heat_c = parts[4]\n heat_f = parts[5]\n c = sql.cursor()\n c.execute(\"INSERT INTO points VALUES (?,?,?,?,?,?)\",\n (t, humid, temp_c, temp_f, heat_c, heat_f))\n sql.commit()", "def insert_one(self, data):\n _client = self.client\n _db = _client[self.database]\n _col = _db[self.collection]\n\n x = _col.insert_one(data)\n\n return x", "def insert_data(self):\n\n pass", "def test_0_data_insertion(self):\n s = self.fitness.insert_in_database(self.fitness_dict, date_time=self.dt1)\n self.assertTrue(s)", "def InsertNextPoint(self, ):\n ...", "def insert(self, data):\r\n pass", "def insert(self, data):\n\n if not data:\n raise ValueError('invalid data')\n\n # TODO: validate and insert data into model", "def run(self):\n self.db.table('points').insert({\n 'name': 'biblioteca',\n 'rfid': '123456'\n })", "def insert_data(self, table_name, data):\n for data_point in data:\n query = \"INSERT INTO %s(%s) VALUES (%s)\"\n\n fields = \", \".join(data_point.keys())\n values = \", \".join([self.pack_data(value) for value in data_point.values()])\n self.cursor.execute(query % (table_name, fields, values))\n self.db_connection.commit()", "def insert(self):\n self.getDbRecord().insert()\n\n return", "def insert_point(self, point):\n points = self._points;\n if len(points) < 3:\n points.append(point)\n else:\n points.insert(_insertion_index(points, point), point)", "def insert(self):\n pass", "def InsertUniquePoint(self, , p_int):\n ...", "def InsertUniquePoint(self, , p_int):\n ...", "def InsertPoint(self, p_int, ):\n ...", "def InsertUniquePoint(self, p_int, p_int_1, p_float=..., p_float=..., p_float=..., *args, **kwargs):\n ...", "def insert_first(self, e):\n self._insert_between(e, self._head, self._head._next)", "def insert_values():\n pass", "def insert_data(v):\n \n try:\n assert(type(v) is np.ndarray)\n except:\n print(\"The input v should be a numpy array!\")\n return None\n \n conn, tunnel = create_db_conn()\n cur = conn.cursor()\n\n last_id = None\n \n # Reference: https://dev.mysql.com/doc/connector-odbc/en/connector-odbc-usagenotes-functionality-last-insert-id.html\n \n try:\n cur.execute(\"USE %s;\"%(config['db']))\n cur.execute(\"INSERT INTO %s () VALUES ();\"%(config[\"default-table\"]))\n cur.execute(\"SELECT LAST_INSERT_ID();\")\n\n conn.commit()\n last_id = cur.fetchone()[0]\n print(\"last_id:\", last_id)\n\n np.save(path.join(config[\"data-dir\"], str(last_id)), v)\n\n except Exception as e:\n print(\"insert_data failed\")\n print(e)\n\n conn.close()\n tunnel.close()\n return last_id", "def add_to_db_single(self, element):\r\n def quot(string):\r\n \"\"\" Replace \" with ' in text strings that goes into the\r\n db, right now it is only done on the name, but it should\r\n be done on all fields that might contain such characters\r\n \"\"\"\r\n return string.replace('\"', \"'\")\r\n\r\n # Make entry i measurements table\r\n query = ('INSERT INTO {table} SET '\r\n 'time=FROM_UNIXTIME({time}), '\r\n 'type=2, '\r\n 'timestep={timestep}, '\r\n 'comment=\"{comment}\", '\r\n 'pass_energy={pass_energy}, '\r\n 'excitation_energy={excitation_energy}, '\r\n 'number_of_scans={number_of_scans}, '\r\n 'project=\"{project}\", '\r\n 'file_name=\"{file_name}\", '\r\n 'name=\"{name}\";').format(\r\n table=self.tables['measurements'],\r\n time=element[0]['date'],\r\n timestep=element[0]['dwell_time'],\r\n comment=element[0]['unique_name'],\r\n pass_energy=element[0]['pass_energy'],\r\n excitation_energy=element[0]['excitation_energy'],\r\n number_of_scans=element[0]['num_scans'],\r\n project=element[0]['project'],\r\n file_name=element[0]['unique_name'].replace('\\\\', '\\\\\\\\'),\r\n name=quot(element[0]['name']))\r\n\r\n # Christian, comment this in to see a list of metadata\r\n #print element[0]\r\n self.cursor.execute(query) # COMMENT\r\n\r\n # Get the id of it\r\n query = ('select id from {table} where type=2 '\r\n 'order by id desc limit 1;').\\\r\n format(table=self.tables['measurements'])\r\n self.cursor.execute(query)\r\n id_ = self.cursor.fetchall()[0][0]\r\n\r\n # Add the data to xy_values table in chunks of 100 data points\r\n counter = 0\r\n query_reset = 'INSERT INTO {table} (measurement, x, y) VALUES'.format(\r\n table=self.tables['xy'])\r\n query = query_reset\r\n # element[1] is tuple of data: (Array(x0, x1, x2), Array(y0, y1, y2)).\r\n # The zip statement (where * pulls out both value) turns it into:\r\n # [(x0, y0), (x1, y1), (x2, y2)]\r\n for x_value, y_value in zip(*element[1]):\r\n counter += 1\r\n query += '({0},{1},{2})'.format(id_, x_value, y_value)\r\n if counter < 100:\r\n query += ','\r\n else:\r\n query += ';'\r\n self.cursor.execute(query)\r\n counter = 0\r\n query = query_reset\r\n # Remember to write the last less than 100 points\r\n if query != query_reset:\r\n # Remove the last , and add a ;\r\n query = query[0: -1] + ';'\r\n self.cursor.execute(query)", "def insert_data(self) -> None:\n if self.min_insert_size > self.insert_count:\n LOG.debug(\"Not enough data for insert....\")\n return\n LOG.debug(f'Inserting {self.insert_count} records...')\n self.insert.write(self.copy_trailer)\n self.insert.seek(0)\n conn = pg.connect(self.dsn)\n with conn.cursor() as cur:\n cur.copy_expert(self.cmd, self.insert)\n conn.commit()\n conn.close()\n self.insert.close()\n self.create_byte_buffer()", "def insert_data(self):\n # Make a connexion with a mock database\n self.generate_data_collection()", "async def insert_one(self, model):\n\n pass", "def insert(self):\n sql = u'INSERT INTO %s' % self.table()\n keys = []\n values = []\n format_values = []\n for field in self.fields():\n attr = object.__getattribute__(self, field)\n if attr.auto_value:\n continue\n keys.append(field)\n format_values.append(attr.format)\n values.append(attr._value)\n keys_str = u'( %s )' % u', '.join(keys)\n values_str = u'VALUES( %s )' % u', '.join(format_values)\n sql = '%s %s %s;' % (sql, keys_str, values_str)\n connection.execute(sql, values)\n primary_k = self.__class__.get_primary()\n primary = object.__getattribute__(self, primary_k)\n primary.value = connection.connection.insert_id()", "def do_insert_data(self, *args):\n print(\"Provide data to insert\")\n self.connection_obj.insert_into_table(**self.__class__.populate_data())\n print(\"Data Insertion Successful\")", "def data_insertion(self, data_dict: Dict):\n\n #self.__create_db()\n self.__create_table()\n\n self.current_state = self.system.insert().values(\n timestamp = data_dict['timestamp'],\n vibration_sensor = data_dict['vibration_sensor'],\n flow = data_dict['flow'],\n pressure = data_dict['pressure'],\n power_consumption = data_dict['power_consumption'],\n failure_times = data_dict['failure_times'],\n operational = data_dict['operational']\n )\n\n self.connection.execute(self.current_state)\n\n if self.max_table_size is not None:\n self.__cleanup_dt()", "def test_insert_empty_data(self):\n self.engine.insert_data(self.empty_data)\n self.assertDictEqual(self.ds.store, {})", "def insert_day():\n analytics.insert_day(6)", "def add_data_single(self, pt, val):\n self.gp_core.add_data_single(pt, val)", "def insert(self, sample, *args):\n raise NotImplementedError" ]
[ "0.66409856", "0.6614501", "0.6461848", "0.6433478", "0.6331537", "0.62894434", "0.623728", "0.6231327", "0.61550397", "0.61478513", "0.6059122", "0.60588497", "0.59783554", "0.59783554", "0.59761745", "0.5968749", "0.59624904", "0.59598815", "0.59538317", "0.59033364", "0.5881398", "0.58750373", "0.5859063", "0.58371425", "0.5818611", "0.5784081", "0.5734912", "0.5701849", "0.56893784", "0.5686575" ]
0.7998137
0
Insert first department in the database.
def _insert_department(self): # Insert if db_department.idx_department_exists(1) is False: record = Department( code=general.encode(self.reserved), name=general.encode(self.reserved)) database = db.Database() database.add(record, 1102)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def seed4():\n if Department.find_by_identity(app.config['SEED_DEPARTMENT']) is not None:\n return None\n\n params = {\n 'departmentname': 'testdept',\n 'deptowneremail': '[email protected]'\n }\n\n return Department(**params).save()", "def add_department():\n form = AddDepartment()\n if request.method == 'POST':\n if form.validate_on_submit():\n new_department = Department(name=form.name.data)\n db.session.add(new_department)\n try:\n db.session.commit()\n except IntegrityError:\n db.session.rollback()\n flash('Department already exists!', 'warning')\n return redirect(url_for('add_department'))\n\n flash(f'Department {form.name.data} created!', 'success')\n return redirect(url_for('home'))\n\n flash('Name not defined.', 'warning')\n return render_template('department/department_add.html', form=form)", "def add_department():\n\tcheck_admin()\n\n\tadd_department = True\n\n\tform = DepartmentForm()\n\tif form.validate_on_submit():\n\t\tdepartment = Department(name=form.name.data,description=form.description.data)\n\n\t\ttry:\n\t\t\t#add department to the database\n\t\t\tdb.session.add(department)\n\t\t\tdb.session.commit()\n\t\t\tflash(\"You have successsfully added a new department.\")\n\t\texcept:\n\t\t\t#incase the department already exists\n\t\t\tflash(\"Error: department already exists.\")\n\t#once the admin creates a new department,they will be redirected to the departments page\n\treturn render_template('admin/departments/department.html',action=\"Add\", add_department= add_department,form=form,title = \"Add Department\")", "def add_department():\r\n check_admin()\r\n\r\n add_department = True\r\n\r\n form = DepartmentForm()\r\n if form.validate_on_submit():\r\n department = Department(name=form.name.data,\r\n description=form.description.data)\r\n try:\r\n # add department to the database\r\n db.session.add(department)\r\n db.session.commit()\r\n flash('You have successfully added a new department.')\r\n except:\r\n # in case department name already exists\r\n flash('Error: department name already exists.',category='error')\r\n\r\n # redirect to departments page\r\n return redirect(url_for('admin.list_departments'))\r\n\r\n # load department template\r\n return render_template('admin/departments/department.html', action=\"Add\",\r\n add_department=add_department, form=form,\r\n title=\"Add Department\")", "def add_department():\n details = request.get_json()\n errors = check_department_keys(request)\n if errors:\n return raise_error(400, \"Invalid {} key\".format(', '.join(errors)))\n department_name = details['department_name']\n if DepartmentsModel().get_department_name(department_name):\n return raise_error(400,\n \"{} department already exists\".format(department_name))\n response = DepartmentsModel(department_name).save()\n return Serializer.serialize(response, 201, \"Department added successfully\")", "def add_department():\n logger.debug('Routed to /departments/add')\n\n if request.method == 'POST':\n name = request.form.get(\"name\")\n email = request.form.get(\"email\")\n\n try:\n ds.add(name, email)\n except IntegrityError as exception:\n logger.error('Can\\'t add department with name %s and email \"%s\". '\n 'Exception: %s', name, email, str(exception))\n session['name'] = name\n session['email'] = email\n flash(f'Department with name {name} already exists.')\n return redirect(request.referrer)\n except Exception as exception:\n logger.error('Can\\'t add department with name %s and email %s. '\n 'Exception: %s', name, email, str(exception))\n abort(404)\n return redirect(url_for('department.show_all_departments'))\n\n titles = ['Name', 'E-mail']\n return render_template('add_department.html',\n title='Add department',\n table_title='Adding new department',\n headers=titles)", "def populate_dept():\n\n logging.basicConfig(level=logging.INFO)\n logger = logging.getLogger(__name__)\n\n database = SqliteDatabase('personjob.db')\n\n logger.info('Working with Department class')\n logger.info('Note how I use constants and a list of tuples as a simple schema')\n logger.info('Normally you probably will have prompted for this from a user')\n\n dept_num = 0\n dept_name = 1\n dept_manager = 2\n\n department = [\n ('B100', 'Business', 'Smith'),\n ('A200', 'Administration', 'Jones'),\n ('C300', 'Computers', 'Howard'),\n ]\n\n logger.info('Creating Department records: iterate through the list of tuples')\n logger.info('Prepare to explain any errors with exceptions')\n logger.info('and the transaction tells the database to fail on error')\n\n try:\n database.connect()\n database.execute_sql('PRAGMA foreign_keys = ON;')\n for department in department:\n with database.transaction():\n new_dept = Department.create(\n dept_num=department[dept_num],\n dept_name=department[dept_name],\n dept_manager=department[dept_manager])\n new_dept.save()\n logger.info('Database add successful')\n\n logger.info('Print the Department records we saved...')\n for saved_dept in Department:\n logger.info(f'{saved_dept.dept_num} department code is for {saved_dept.dept_name} ' + \\\n f'and the manager is: {saved_dept.dept_manager}')\n\n except Exception as e:\n logger.info(f'Error creating = {department[dept_num]}')\n logger.info(e)\n logger.info('See how the database protects our data')\n\n finally:\n logger.info('database closes')\n database.close()", "def create_by_dep(self, by_dep):\n sql = ''' INSERT INTO by_dpt(code_fede, dpt, nb, sex, year) VALUES(?, ?, ?, ?, ?) '''\n self.__cur.execute(sql, by_dep)\n # Save all the changes\n return self.__cur.lastrowid", "def populate_dept():\n\n logging.basicConfig(level=logging.INFO)\n logger = logging.getLogger(__name__)\n\n database = SqliteDatabase('personjob.db')\n\n logger.info('Working with Deparment class')\n logger.info('Note how I use constants and a list of tuples as a simple '\n 'schema')\n logger.info('Normally you probably will have prompted for this from a '\n 'user')\n\n DEPT_NUMBER = 0\n DEPT_NAME = 1\n DEPT_MANAGER = 2\n\n depts = [\n ('AD01', 'Admin', 'Manager1'),\n ('HR02', 'Human Resources', 'Manager2'),\n ('AN03', 'Analyst', 'Manager3'),\n ('SO04', 'Software', 'Manager4'),\n ('TE05', 'Tech', 'Manager5'),\n ]\n\n logger.info('Creating Department records: iterate through the list of '\n 'tuples')\n logger.info('Prepare to explain any errors with exceptions')\n logger.info('and the transaction tells the database to fail on error')\n\n try:\n database.connect()\n database.execute_sql('PRAGMA foreign_keys = ON;')\n for dept in depts:\n with database.transaction():\n new_dept = Department.create(\n dept_number=dept[DEPT_NUMBER],\n dept_name=dept[DEPT_NAME],\n dept_manager=dept[DEPT_MANAGER])\n new_dept.save()\n logger.info('Database add successful')\n\n logger.info('Print the Department records we saved...')\n for saved_dept in Department:\n logger.info(f'{saved_dept.dept_name} has '\n f'dept number {saved_dept.dept_number} '\n f'with {saved_dept.dept_manager} as manager')\n\n except Exception as e:\n logger.info(f'Error creating = {depts[DEPT_NUMBER]}')\n logger.info(e)\n logger.info('See how the database protects our data')\n\n finally:\n logger.info('database closes')\n database.close()", "def populate_depts():\n\n logging.basicConfig(level=logging.INFO)\n logger = logging.getLogger(__name__)\n\n database = SqliteDatabase('personjob.db')\n\n logger.info('Working with Department class')\n\n DEPT_NUM = 0\n DEPT_NAME = 1\n DEPT_MGR = 2\n\n depts = [\n ('ASYS', 'Analyst', 'Ryan Duck'),\n ('ADMN', 'Administration', 'Pamela Alcaline'),\n ('BUSI', 'Business', 'Monica Lows'),\n ('MGMT', 'Managment', 'George Sorry'),\n ]\n\n try:\n database.connect()\n database.execute_sql('PRAGMA foreign_keys = ON;')\n for dept in depts:\n with database.transaction():\n new_department = Department.create(\n department_number = dept[DEPT_NUM],\n department_name = dept[DEPT_NAME],\n department_manager = dept[DEPT_MGR],)\n new_department.save()\n\n logger.info('Print the Department records we saved...')\n for dept in Department:\n logger.info(f'{dept.department_number} : {dept.department_manager} ' +\\\n f'manages {dept.department_name}')\n\n except Exception as e:\n logger.info(f'Error creating = {dept[DEPT_NAME]}')\n logger.info(e)\n\n finally:\n logger.info('database closes')\n database.close()", "def post():\n\n logger.debug('Catch POST request by URL /api/departments.')\n args = department_args.parse_args()\n try:\n id_ = ds.add(name=args['name'], email=args['email'])\n created_department = ds.get(id_)\n except IntegrityError:\n return {'message': f\"Department with name {args['name']} already \"\n \"exists.\"}, 404\n except Exception:\n return {'message': \"Can't post department.\"}, 404\n return marshal_departments(created_department), 201", "def test_department_creation(self):\n res = self.client().post(service_url, json={\"dep_name\": \"test dep 4\", \"description\": \"testing department 4\"})\n self.assertEqual(res.status_code, 201)\n self.assertIn('dep 4', str(res.data))", "def insert(self):\n self.getDbRecord().insert()\n\n return", "def department(self, department: object):\n\n self._department = department", "def insert(self, teacher: Teacher):\n sql = f''' INSERT INTO {self.table_name}({','.join([f[0] for f in Teacher.FIELDS])})\n VALUES({('?,' * len(Teacher.FIELDS))[:-1]}) '''\n print(sql)\n teacher_dict = teacher.json_dump()\n print(teacher_dict)\n # assert 1==2\n self.cursor.execute(sql, teacher_dict)\n self.conn.commit()", "def department(self, department):\n\n self._department = department", "def department(self, department):\n\n self._department = department", "def setUp(self):\n self.test_faculty = Faculty(name='Test', slug='test')\n self.test_faculty.full_clean()\n self.test_faculty.save()\n self.test_department = Department(name='Test', slug='test', faculty=self.test_faculty)\n self.test_department.full_clean()\n self.test_department.save()", "def singleInsert(self, table_name, fields, field_values, field_types=[]):\n if not self.checkTable(table_name):\n self.createTable(table_name, fields, field_types)\n self.transactionInsert(table_name, fields, field_values)\n self.transactionEnd()", "def set_department_by_id(department_id):\n return Department.query.filter(id=department_id).one()", "def insert_school(mongo_collection, **kwargs):\n result = mongo_collection.insert_one(kwargs)\n return result.inserted_id", "def insert_one(self, data):\n _client = self.client\n _db = _client[self.database]\n _col = _db[self.collection]\n\n x = _col.insert_one(data)\n\n return x", "def insert_person():\r\n body = request.get_json()\r\n\r\n try:\r\n INSERT_PERSON_SCHEMA.validate(body)\r\n except SchemaError as err:\r\n raise ServiceBodyError(str(err))\r\n\r\n with sqlite_client:\r\n person = (body.get('name'), body.get('cpf'))\r\n message = add_person(sqlite_client, person)\r\n\r\n return jsonify({'id': message})", "def insert_school(mongo_collection, **kwargs):\n doc = mongo_collection.insert_one(kwargs)\n return doc.inserted_id", "def put(id_=None):\n\n logger.debug('Catch PUT request by URL /api/departments/%i.', id_)\n try:\n args = department_args.parse_args()\n ds.update(id_, name=args['name'], email=args['email'])\n except Exception:\n return {'message': \"Can't update department.\"}, 404\n return marshal_departments(ds.get(id_)), 200", "def populate_db():\n\n logging.basicConfig(level=logging.INFO)\n logger = logging.getLogger(__name__)\n\n database = SqliteDatabase('../data/personjob.db') # navigate relative path to the db\n\n logger.info('Working with Department class')\n logger.info('Creating department records')\n\n department_number = 0\n department_name = 1\n department_manager = 2\n\n departments = [\n ('A111', 'Asset Management', 'Dave Sanders'),\n ('B222', 'Human Resources', 'Tammy Murray'),\n ('C333', 'Payroll', 'Daddy Warbucks'),\n ]\n\n try:\n database.connect()\n database.execute_sql('PRAGMA foreign_keys = ON;')\n for department in departments:\n with database.transaction():\n new_department = Department.create(\n department_number = department[department_number],\n department_name = department[department_name],\n deptartment_manager = department[department_manager]\n )\n new_department.save()\n logger.info('Department has been added to the database')\n\n logger.info('Reading and print all department data...')\n for saved_department in Department:\n logger.info(f'{saved_department.department_name} ' + \\\n f'Manager: {saved_department.department_manager}. ' + \\\n f'Department number: {saved_department.department_number}')\n\n except Exception as e:\n logger.info(f'Error creating = {department[department_number]}')\n logger.info(e)\n\n finally:\n logger.info('database closes')\n database.close()", "def insert_school(mongo_collection, **kwargs):\n return mongo_collection.insert_one(kwargs).inserted_id", "def insert_school(mongo_collection, **kwargs):\n\n id_ = mongo_collection.insert_one(kwargs).inserted_id\n\n return id_", "def insert(self):\n ret = True\n\n schema = self.schema\n fields = self.depopulate(False)\n\n q = self.query\n q.set_fields(fields)\n pk = q.insert()\n if pk:\n fields = q.fields\n fields[schema.pk.name] = pk\n self._populate(fields)\n\n else:\n ret = False\n\n return ret", "def insert_player(document):\n players_col.insert_one(document)" ]
[ "0.64102626", "0.63995546", "0.61741805", "0.61718684", "0.6119254", "0.60806566", "0.5931589", "0.5917951", "0.59139866", "0.5867223", "0.5838998", "0.5770903", "0.5567153", "0.54696494", "0.5416664", "0.5407628", "0.5407628", "0.53592044", "0.53505164", "0.5342802", "0.5284177", "0.5279867", "0.52689904", "0.5268377", "0.5211248", "0.5206298", "0.52047336", "0.52038515", "0.5201019", "0.5188371" ]
0.79333127
0
Insert first billcode in the database.
def _insert_billcode(self): # Insert if db_billcode.idx_billcode_exists(1) is False: record = Billcode( code=general.encode(self.reserved), name=general.encode(self.reserved)) database = db.Database() database.add(record, 1104)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def insert(self, sql):\n try:\n # Execute the SQL command\n self.cursor.execute(sql)\n # Commit your changes in the database\n self.db.commit()\n except:\n # Rollback in case there is any error\n self.db.rollback()", "def insert_to_db(self) -> None:\n query = '''INSERT INTO ESLReceipts(Transaction_Number, Date, Description, Memo,\n Amount_Debit, Amount_Credit, Balance, Check_Number, \n Fees, Card_Type, Is_Payment, Is_Transaction, User_id)\n VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?);'''\n self.db.commit(query, values=self.to_tuple())\n\n if self.is_transaction \\\n and self.transaction is not None \\\n and not self.transaction.exists_in_db():\n self.transaction.insert_to_db()", "def insert(self):\n self.getDbRecord().insert()\n\n return", "def _insert_single(self, disc, class_num):\n self.cursor.execute(self.INSERT, (disc, class_num))\n self.conn.commit()", "def _insert(self):\n self.account_number = randint(1111111,9999999)\n with sqlite3.connect(self.dbpath) as connection: \n cursor = connection.cursor()\n INSERTSQL = \"\"\"INSERT INTO accounts(first_name, last_name, \n username, email_address, \n password_hash, balance, \n account_number, admin,\n api_key) \n VALUES (:first_name, :last_name, \n :username, :email_address, \n :password_hash, :balance, \n :account_number, :admin,\n :api_key); \"\"\"\n values = {\n \"first_name\": self.first_name,\n \"last_name\": self.last_name,\n \"username\": self.username,\n \"email_address\": self.email_address,\n \"password_hash\": self.password_hash, \n \"balance\": self.balance, \n \"account_number\": self.account_number,\n \"admin\": self.admin,\n \"api_key\": randint(111111111, 999999999)\n }\n try: \n cursor.execute(INSERTSQL, values)\n self.id = cursor.lastrowid\n except sqlite3.IntegrityError:\n raise ValueError(\"ticker not set or a position for this ticker already exists\")", "def insert_statement() -> str:\n pass", "def insert(self, name, email, phone, address, state, zip, country, amount, message):\n params = {'name':name, 'email':email, 'phone':phone,'address':address,'state':state,\\\n 'zip':zip,'country':country,'amount':amount,'message':message}\n connection = sqlite3.connect(DB_FILE)\n cursor = connection.cursor()\n cursor.execute(\"insert into foodbank (name, email, phone, address, state, zip, country, amount, message)\\\n VALUES (:name, :email, :phone, :address, :state, :zip, :country, :amount, :message)\", params)\n\n connection.commit()\n cursor.close()\n return True", "def insert_one(self, data):\n _client = self.client\n _db = _client[self.database]\n _col = _db[self.collection]\n\n x = _col.insert_one(data)\n\n return x", "def run(self):\n self.db.table('points').insert({\n 'name': 'biblioteca',\n 'rfid': '123456'\n })", "def insert(title, author, year, isbn,shelf,raw):\n\n conn_obj = mysql.connector.connect(host='localhost',database='mydb',user='root',password='kks')\n cur_obj = conn_obj.cursor()\n sql=\"INSERT INTO book (title, author, year, isbn,shelf,raw) VALUES(%s, %s, %s, %s, %s, %s)\"\n cur_obj.execute(sql,(title, author, year, isbn,shelf,raw))\n conn_obj.commit()\n conn_obj.close()", "def insert(self):\n db.session.add(self)\n db.session.commit()", "def insert(self):\n db.session.add(self)\n db.session.commit()", "def insert(self):\n db.session.add(self)\n db.session.commit()", "def add_to_database():\n db_conn.execute(\"INSERT INTO Fietsenstalling (Naam, Achternaam, Adress, FietsNr, PIN) VALUES \"\n \"(?, ?, ?, ?, ?);\",(Naam, Achternaam, Adress, FietsNr, PIN))\n\n db_conn.commit()", "def insert_address_info_company(address,company_id,extern_company_id,con,cur):\n psql_address=f\"\"\" insert into address \n (extern_id,line1,line2,city,postal_code,state,country,company_id,extern_client_id)\n values \n {\n address.id,\n address.line1,\n address.line2,\n address.city,\n address.postal_code,\n address.state,\n address.country,\n company_id,\n extern_company_id,};\"\"\"\n psql=psql_address\n cur.execute(psql)\n print(psql)\n con.commit()", "def create_card(conn, card):\n sql = ''' INSERT INTO card(id,number,pin,balance)\n VALUES(?,?,?,?) '''\n cur = conn.cursor()\n cur.execute(sql, card)\n conn.commit()\n return cur.lastrowid", "def insert(self):\n sql = u'INSERT INTO %s' % self.table()\n keys = []\n values = []\n format_values = []\n for field in self.fields():\n attr = object.__getattribute__(self, field)\n if attr.auto_value:\n continue\n keys.append(field)\n format_values.append(attr.format)\n values.append(attr._value)\n keys_str = u'( %s )' % u', '.join(keys)\n values_str = u'VALUES( %s )' % u', '.join(format_values)\n sql = '%s %s %s;' % (sql, keys_str, values_str)\n connection.execute(sql, values)\n primary_k = self.__class__.get_primary()\n primary = object.__getattribute__(self, primary_k)\n primary.value = connection.connection.insert_id()", "def insert_book(self, title, author, year, isbn):\n self.cursor.execute(\"INSERT INTO Book VALUES(NULL, ?, ?, ?, ?)\",\n (title, author, year, isbn))\n self.connection.commit()", "def insert_customer(self):\n if self.check_user():\n return False\n else:\n cursor = self.db.cursor()\n cursor.execute(\n \"INSERT INTO costumers (dni, costumername, costumerlastname, costumeraddress, costumerpostcode, costumertlfnumber, costumerbirth) VALUES (?, ?, ?, ?, ?, ?, ?)\",\n (self.dni, self.name, self.last_name, self.address, self.postal_code, self.t_number, self.date_birth))\n self.db.commit()\n return True", "def insert(self):\n\t\t# create utc-date for when bird is added\n\t\tself.added = datetime.utcnow().strftime(\"%Y-%m-%d\")\n\n\t\t# build our bird-dict\n\t\tbird = {\n\t\t\t\"name\": self.name, \n\t\t\t\"family\": self.family, \n\t\t\t\"continents\": self.continent, \n\t\t\t\"visible\": self.visible, \n\t\t\t\"added\": self.added\n\t\t}\n\n\t\t# insert bird\n\t\tid = self.M.insert(bird)\n\n\t\treturn id", "def insert(self, data):\r\n pass", "def add_income(self, conn, data):\n sql = 'UPDATE card SET balance = balance + ? WHERE number = ?;'\n c = conn.cursor()\n c.execute(sql, data)\n conn.commit()\n print(\"Income was added!\")\n self.menus()", "def create_person(conn, person, first_name, last_name):\n sql = ''' INSERT INTO person(firstname,lastname)\n VALUES(?,?) '''\n cur = conn.cursor() # cursor object\n cur.execute(sql, person)\n # print(str(cur.lastrowid))\n # return cur.lastrowid # returns the row id of the cursor object, the person id\n first_name.set('')\n last_name.set('')\n messagebox.showinfo('Success', 'Person Successfully Added to Database!')", "def sql_insert(self, sqlstr):\n get_connection().insert_raw(sqlstr)\n return 1", "def test_code_add_primary_key(self):\n sql = \"\"\"\n CREATE TABLE public.address_no_primary_key\n (\n code character varying(10),\n address character varying(255),\n geometry geometry(Point,4326)\n );\n \"\"\"\n cursor = self.conn.get_connection().cursor()\n cursor.execute(sql)\n\n layer = DataBaseLayer()\n layer.db_connection = self.conn\n layer.name = 'address_no_primary_key'\n layer.table = 'address_no_primary_key'\n layer.pk_field = 'code'\n layer.geom_field = 'geometry'\n layer.anonymous_view = True\n layer.anonymous_add = True\n layer.anonymous_update = True\n layer.anonymous_delete = True\n layer.save()\n\n with ModelFactory(layer) as Model:\n primary_key = None\n for f in Model._meta.fields:\n if getattr(f, 'primary_key', None):\n primary_key = f.name\n break\n self.assertEqual(primary_key, 'code')", "def insert_into_request_info(self):\n insert_query = f\"\"\"\n Insert into Request_Info \n (\n {self.__fields[2]},\n {self.__fields[3]},\n {self.__fields[4]},\n {self.__fields[5]},\n {self.__fields[6]},\n {self.__fields[7]},\n {self.__fields[8]},\n {self.__fields[9]},\n {self.__fields[10]},\n {self.__fields[11]},\n {self.__fields[12]},\n {self.__fields[13]})\n values(\n '{self.args.FirstName}',\n '{self.args.LastName}',\n '{self.args.MiddleName}',\n '{self.args.DOB}',\n '{self.args.Gender}',\n '{self.args.National}',\n '{self.args.City}',\n '{self.args.State}',\n '{self.args.PinCode}',\n '{self.args.Qualification}',\n '{self.args.Salary}',\n '{self.args.Pan}')\n \"\"\"\n self.__log.write_log(\"Inserted into the table request_info\")\n self.execute(insert_query)", "def make_new_student(first_name, last_name, github):\n QUERY = \"\"\"\n INSERT INTO Students VALUES(?, ?, ?)\"\"\"\n \n db_cursor.execute(QUERY, (first_name, last_name, github))\n db_connection.commit()\n print \"Successfully added student: %s %s\" % (first_name, last_name)", "def do_insert_data(self, *args):\n print(\"Provide data to insert\")\n self.connection_obj.insert_into_table(**self.__class__.populate_data())\n print(\"Data Insertion Successful\")", "def insert_submission(conn, data):\n with conn.begin():\n insert_stmt = schema.submission.insert().values({\n 'fingerprint': data['fingerprint'],\n 'length': data['length'],\n 'bitrate': data.get('bitrate'),\n 'source_id': data['source_id'],\n 'mbid': data.get('mbid'),\n 'puid': data.get('puid'),\n 'format_id': data.get('format_id'),\n })\n id = conn.execute(insert_stmt).inserted_primary_key[0]\n logger.debug(\"Inserted submission %r with data %r\", id, data)\n return id", "def insert_address_info(address,client_id,extern_client_id,con,cur):\n psql_address=f\"\"\" insert into address \n (extern_id,line1,line2,city,postal_code,state,country,client_id,extern_client_id)\n values \n {\n address.id,\n address.line1,\n address.line2,\n address.city,\n address.postal_code,\n address.state,\n address.country,\n client_id,\n extern_client_id,};\"\"\"\n psql=psql_address\n cur.execute(psql)\n con.commit()" ]
[ "0.6301685", "0.6257179", "0.6171408", "0.6077293", "0.5851242", "0.57877386", "0.57575035", "0.5745975", "0.574445", "0.57382643", "0.57061344", "0.57061344", "0.57061344", "0.5701996", "0.56728", "0.5659489", "0.56475896", "0.56186724", "0.56084", "0.56060004", "0.559914", "0.5558006", "0.5520624", "0.55164814", "0.5514396", "0.54901093", "0.5466698", "0.5447655", "0.5444338", "0.5443235" ]
0.82463765
0
Insert first agent and device in the database.
def _insert_agent_device(self): # Initialize key variables idx_agent = 1 idx_device = 1 # Add agent if db_agent.idx_agent_exists(idx_agent) is False: # Generate a UID and add a record in the database record = Agent( id_agent=general.encode(self.reserved), name=general.encode(self.reserved)) database = db.Database() database.add(record, 1109) # Add device if db_device.idx_device_exists(idx_device) is False: record = Device( description=general.encode(self.reserved), devicename=general.encode(self.reserved) ) database = db.Database() database.add(record, 1106) # Add to Agent / Device table if db_deviceagent.device_agent_exists(idx_device, idx_agent) is False: record = DeviceAgent(idx_device=idx_device, idx_agent=idx_agent) database = db.Database() database.add(record, 1107)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def insert_into_db(self, database):\n\n # insert person\n keys = \"\"\n values = \"\"\n for key, value in self.person.items():\n # location\n if key == \"location\":\n # ensure location is in table\n database.select(f\"\"\"DO $do$ BEGIN IF NOT EXISTS (SELECT * FROM p21_cdm.location WHERE city='{value['city']}' \n AND zip='{value['zip']}') THEN INSERT INTO p21_cdm.location (city, zip) \n VALUES ('{value['city']}', '{value['zip']}'); END IF; END; $do$\"\"\")\n continue\n\n keys += f\"{key},\"\n values += f\"'{value}',\"\n\n database.select(f\"\"\"INSERT INTO p21_cdm.person (location_id, {keys[:-1]}) \n VALUES((SELECT location_id \n FROM p21_cdm.location\n WHERE city='{self.person['location']['city']}' \n and zip='{self.person['location']['zip']}'), \n {values[:-1]})\"\"\")\n\n # insert visits\n for visit in self.visits:\n keys = \"person_id,\"\n values = f\"'{self.person['person_id']}',\"\n for key, value in visit.items():\n if key == \"care_site_name\":\n # ensure care site is in table\n database.select(f\"\"\"DO $do$ BEGIN IF NOT EXISTS (SELECT * \n FROM p21_cdm.care_site \n WHERE care_site_name='{value}') \n THEN INSERT INTO p21_cdm.care_site (care_site_name) \n VALUES ('{value}'); END IF; END; $do$\"\"\")\n continue\n\n keys += f\"{key},\"\n values += f\"'{value}',\"\n\n database.select(f\"\"\"INSERT INTO p21_cdm.visit_occurrence (care_site_id, {keys[:-1]}) \n VALUES((SELECT care_site_id\n FROM p21_cdm.care_site\n WHERE care_site_name='{visit['care_site_name']}'),\n {values[:-1]}) \n RETURNING visit_occurrence_id\"\"\")\n\n # insert measurements, observations, conditions & procedures\n for data, tablename in [(self.measurements, \"measurement\"),\n (self.observations, \"observation\"),\n (self.conditions, \"condition_occurrence\"),\n (self.procedures, \"procedure_occurrence\")]:\n for entry in data:\n keys = \"person_id,\"\n values = f\"'{self.person['person_id']}',\"\n\n for key, value in entry.items():\n keys += f\"{key},\"\n values += f\"'{value}',\"\n\n entry[\"sql_id\"] = database.select(f\"\"\"INSERT INTO p21_cdm.{tablename}({keys[:-1]})\n VALUES({values[:-1]}) RETURNING {tablename}_id\"\"\")[0][0]\n\n # insert fact_relationships in both directions\n for table1, entry1, table2, entry2 in self.fact_relations:\n # 44818890 = Finding associated with (SNOMED)\n database.select(f\"\"\"INSERT INTO p21_cdm.fact_relationship(domain_concept_id_1, fact_id_1, \n domain_concept_id_2, fact_id_2, \n relationship_concept_id)\n VALUES('{table1}','{entry1['sql_id']}','{table2}','{entry2['sql_id']}','44818890')\"\"\")\n # 44818792 = Associated with finding (SNOMED)\n database.select(f\"\"\"INSERT INTO p21_cdm.fact_relationship(domain_concept_id_1, fact_id_1, \n domain_concept_id_2, fact_id_2, \n relationship_concept_id)\n VALUES('{table2}','{entry2['sql_id']}','{table1}','{entry1['sql_id']}','44818792')\"\"\")\n\n # make transactions persistent\n database.commit()", "async def insert_one(self, model):\n\n pass", "def insert(self):\n self.getDbRecord().insert()\n\n return", "def insert(self):\n sql = u'INSERT INTO %s' % self.table()\n keys = []\n values = []\n format_values = []\n for field in self.fields():\n attr = object.__getattribute__(self, field)\n if attr.auto_value:\n continue\n keys.append(field)\n format_values.append(attr.format)\n values.append(attr._value)\n keys_str = u'( %s )' % u', '.join(keys)\n values_str = u'VALUES( %s )' % u', '.join(format_values)\n sql = '%s %s %s;' % (sql, keys_str, values_str)\n connection.execute(sql, values)\n primary_k = self.__class__.get_primary()\n primary = object.__getattribute__(self, primary_k)\n primary.value = connection.connection.insert_id()", "def insert(self, dev_schema):\n headers = self.get_headers()\n rv = self.api_request('POST', self.url + '/device', dev_schema, headers)\n if rv is None:\n raise Exception('Failed to store device schema')\n return rv", "def do_insert_data(self, *args):\n print(\"Provide data to insert\")\n self.connection_obj.insert_into_table(**self.__class__.populate_data())\n print(\"Data Insertion Successful\")", "def _insert_datapoint(self):\n # Insert\n if db_datapoint.idx_datapoint_exists(1) is False:\n record = Datapoint(\n id_datapoint=general.encode(self.reserved),\n agent_label=general.encode(self.reserved),\n agent_source=general.encode(self.reserved)\n )\n database = db.Database()\n database.add(record, 1047)", "def db_add_device_record(db_path, device_info):\n path_exist = os.path.exists(db_path)\n if path_exist is False:\n print '!!!Error, database does not exist.'\n return\n\n try:\n with db.connect(db_path) as conn:\n cursor = conn.cursor()\n # Prepare and execute SQL statement.\n # The REPLACE statement is an alias for the \"INSERT OR REPLACE\"\n # variant of the INSERT statement. This alias is provided for\n # compatibility with other SQL database engines.\n tup = (device_info['device_name'],\n device_info['os_type'],\n device_info['ip_address'],\n \", \".join(device_info['interfaces']))\n sql = (\"REPLACE INTO Devices VALUES (?,?,?,?)\")\n cursor.execute(sql, tup)\n conn.commit()\n except (db.OperationalError) as e:\n print(\"!!!Error, %s\" % repr(e))", "def insert_agents(db, prefix, stmts_wo_agents=None, **kwargs):\n verbose = kwargs.pop('verbose', False)\n if len(kwargs):\n raise IndraDatabaseError(\"Unrecognized keyword argument(s): %s.\"\n % kwargs)\n\n agent_tbl_obj = db.tables[prefix + '_agents']\n\n if stmts_wo_agents is None:\n stmts_wo_agents, num_stmts = \\\n get_statements_without_agents(db, prefix, verbose=verbose)\n else:\n num_stmts = None\n\n if verbose:\n if num_stmts is None:\n try:\n num_stmts = len(stmts_wo_agents)\n except TypeError:\n logger.info(\"Could not get length from type: %s. Turning off \"\n \"verbose messaging.\" % type(stmts_wo_agents))\n verbose = False\n\n # Construct the agent records\n logger.info(\"Building agent data for insert...\")\n if verbose:\n print(\"Loading:\", end='', flush=True)\n agent_data = []\n for i, db_stmt in enumerate(stmts_wo_agents):\n # Convert the database statement entry object into an indra statement.\n stmt = stmts_from_json([json.loads(db_stmt.json.decode())])[0]\n\n if prefix == 'pa':\n stmt_id = db_stmt.mk_hash\n else: # prefix == 'raw'\n stmt_id = db_stmt.id\n\n agent_data.extend(_get_agent_tuples(stmt, stmt_id))\n\n # Optionally print another tick on the progress bar.\n if verbose and num_stmts > 25 and i % (num_stmts//25) == 0:\n print('|', end='', flush=True)\n\n if verbose and num_stmts > 25:\n print()\n\n if prefix == 'pa':\n cols = ('stmt_mk_hash', 'db_name', 'db_id', 'role')\n else: # prefix == 'raw'\n cols = ('stmt_id', 'db_name', 'db_id', 'role')\n db.copy(agent_tbl_obj.__tablename__, agent_data, cols)\n return", "def insert_pa_agents_directly(db, stmts, verbose=False):\n if verbose:\n num_stmts = len(stmts)\n\n # Construct the agent records\n logger.info(\"Building agent data for insert...\")\n if verbose:\n print(\"Loading:\", end='', flush=True)\n agent_data = []\n for i, stmt in enumerate(stmts):\n agent_data.extend(_get_agent_tuples(stmt, stmt.get_hash(shallow=True)))\n\n # Optionally print another tick on the progress bar.\n if verbose and num_stmts > 25 and i % (num_stmts//25) == 0:\n print('|', end='', flush=True)\n\n if verbose and num_stmts > 25:\n print()\n\n cols = ('stmt_mk_hash', 'db_name', 'db_id', 'role')\n db.copy('pa_agents', agent_data, cols)\n return", "def insert(self):\n db.session.add(self)\n db.session.commit()", "def insert(self):\n db.session.add(self)\n db.session.commit()", "def insert(self):\n db.session.add(self)\n db.session.commit()", "def insert_db():\n populate_tables()", "def test_insert(self):\n query = \"insert into cds values(%s,%s,%s,%s)\"\n values = (109876,\"cinderella\",\"big 5\",5)\n self.a.insert(query,values)\n query1 = \"select * from cds where id=109876\"", "def insert_values():\n pass", "def insert_sensors(sensors):\n\t\tif sensors is None:\n\t\t\treturn\n\t\tcon = mdb.connect('localhost', 'root', 'sensepass', 'sensecambrowser')\n\t\twith con:\n\t\t\tfor sensor in sensors:\n\t\t\t\t\"\"\"\n\t\t\t\tusername = annotation[0]\n\t\t\t\tpath = annotation[1]\n\t\t\t\tconcept = annotation[2]\n\t\t\t\tuid = DBHelper.get_user_id(username)\n\t\t\t\tiid = DBHelper.get_user_id(path)\n\t\t\t\tcid = DBHelper.get_user_id(concept)\n\t\t\t\tquery = \"insert into annotater_annotationaction(annotator_id,image_id,concept_id) values(%s,%s,%s)\" % (uid,iid,cid)\n\t\t\t\tcur = con.cursor()\n\t\t\t\tcur.execute(query)\n\t\t\t\t\"\"\"", "def bulk_insert(cls, device_id, imeis):\n insertion_object = []\n for imei in imeis:\n insertion_object.append({'imei': imei, 'normalized_imei': imei[0:14], 'device_id': device_id})\n res = db.engine.execute(ImeiDevice.__table__.insert(), insertion_object)\n res.close()", "def insertDevice(self, device):\n self.devices.append(device)\n if device.bench and device.bench not in self.benches:\n logger.warning(\"Insterting *new* bench %s\", device.bench.name)\n self.benches.append(device.bench)", "def test_create_device1(self):\n pass", "def data_insertion(self, data_dict: Dict):\n\n #self.__create_db()\n self.__create_table()\n\n self.current_state = self.system.insert().values(\n timestamp = data_dict['timestamp'],\n vibration_sensor = data_dict['vibration_sensor'],\n flow = data_dict['flow'],\n pressure = data_dict['pressure'],\n power_consumption = data_dict['power_consumption'],\n failure_times = data_dict['failure_times'],\n operational = data_dict['operational']\n )\n\n self.connection.execute(self.current_state)\n\n if self.max_table_size is not None:\n self.__cleanup_dt()", "def fill(self):\n\n self.db.batch_insert_camera_from_api()", "def insert_one(self, data):\n _client = self.client\n _db = _client[self.database]\n _col = _db[self.collection]\n\n x = _col.insert_one(data)\n\n return x", "def insert_data(self):\n # Make a connexion with a mock database\n self.generate_data_collection()", "def test_0_data_insertion(self):\n s = self.fitness.insert_in_database(self.fitness_dict, date_time=self.dt1)\n self.assertTrue(s)", "def insert(self):\n\t\t# create utc-date for when bird is added\n\t\tself.added = datetime.utcnow().strftime(\"%Y-%m-%d\")\n\n\t\t# build our bird-dict\n\t\tbird = {\n\t\t\t\"name\": self.name, \n\t\t\t\"family\": self.family, \n\t\t\t\"continents\": self.continent, \n\t\t\t\"visible\": self.visible, \n\t\t\t\"added\": self.added\n\t\t}\n\n\t\t# insert bird\n\t\tid = self.M.insert(bird)\n\n\t\treturn id", "def insert_data():\n\tBase.metadata.drop_all(engine)\n\tBase.metadata.create_all(engine)\n\tu1 = insert_user(\"[email protected]\", \"/static/image/avatar.JPG\")\n\tu2 = insert_user(\"[email protected]\", \"/static/image/avatar.JPG\")\n\tc = insert_catalog(u1.id, \"Sichuan Dish\")\n\tinsert_catalog(u1.id, \"Fujian Dish\")\n\tinsert_catalog(u1.id, \"Guangdong Dish\")\n\tinsert_catalog(u2.id, \"Zhejiang Dish\")\n\tinsert_catalog(u2.id, \"Beijing Dish\")\n\tinsert_item(u1.id, \"Iphone 6 plus\", c, 'Is a phone', None)\n\tinsert_item(u1.id, \"Hot pot\", c, \"Hot hot hot\", None)\n\tinsert_item(u2.id, \"Kong Bao Chicken\", c, \"Classic\", None)", "def add_device(cls, values):\n return cls.dbdriver.add_device(values)", "def test_add_device(self):\n\n pass", "def test_1_data_insertion_multiple_users(self):\n s = self.fitness.insert_in_database(self.fitness_dict, date_time=self.dt1)\n self.assertEqual(s, True)\n s_1 = self.fitness_1.insert_in_database(self.fitness_dict_1, date_time=self.dt1)\n self.assertEqual(s_1, True)" ]
[ "0.5917118", "0.57357377", "0.5714411", "0.55926985", "0.5586116", "0.5455014", "0.54244137", "0.5374424", "0.5357495", "0.53562266", "0.5317439", "0.5317439", "0.5317439", "0.5261197", "0.52423173", "0.52157986", "0.5213579", "0.52010274", "0.5193928", "0.51743084", "0.5136725", "0.5128185", "0.51063246", "0.50945294", "0.5092432", "0.5076626", "0.5070621", "0.5067209", "0.5045602", "0.50336343" ]
0.7941295
0
Insert first config in the database.
def _insert_config(self): # Initialize key variables key_values = [('version', '0.0.0.0')] # Cycle through all the key value pairs for item in key_values: key = item[0] value = item[1] # Check if value exists and insert if not if db_configuration.config_key_exists(key) is False: record = Configuration( config_key=general.encode(key), config_value=general.encode(value)) database = db.Database() database.add(record, 1108)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def insert(self):\n self.getDbRecord().insert()\n\n return", "def config_skeleton():\n config = Config()\n config.set_to_default()\n config.save()", "def config_db():", "def insert_db():\n populate_tables()", "def _save_to_database(self, data):\n self._logger.info(\"Saving new config to database\")\n\n query1 = \"DELETE FROM project_config WHERE config_site = ?\"\n query2 = \"\"\"INSERT INTO project_config (config_site, config_json)\n VALUES (?, ?)\"\"\"\n\n dump = json.dumps(data)\n with self._bot.localdb as cursor:\n cursor.execute(\"BEGIN\")\n cursor.execute(query1, (self._bot.wikiid,))\n cursor.execute(query2, (self._bot.wikiid, dump))", "def insert(self):\n db.session.add(self)\n db.session.commit()", "def insert(self):\n db.session.add(self)\n db.session.commit()", "def insert(self):\n db.session.add(self)\n db.session.commit()", "def add_cfg (conn, cfg):\n\n try:\n csr = conn.cursor()\n\n cmd = \"INSERT INTO {tbl} ({col1}, {col2}, {col3}) VALUES (%s, %s, %s);\".\\\n format(tbl = _tbl_config,\n col1 = _tbl_config_col1,\n col2 = _tbl_config_col2,\n col3 = _tbl_config_col3)\n print(cmd)\n\n dt = datetime.datetime.now()\n vals = (cfg[cfg_url], cfg[cfg_bws], cfg[cfg_usr_name])\n csr.execute(cmd, vals)\n\n csr.close()\n\n except Exception as ex:\n print(\"Error - add_cfg: {0}\".format(ex))\n rc_err = ex.args[0]\n return rc_err\n\n return rc_ok", "def insert(self):\n sql = u'INSERT INTO %s' % self.table()\n keys = []\n values = []\n format_values = []\n for field in self.fields():\n attr = object.__getattribute__(self, field)\n if attr.auto_value:\n continue\n keys.append(field)\n format_values.append(attr.format)\n values.append(attr._value)\n keys_str = u'( %s )' % u', '.join(keys)\n values_str = u'VALUES( %s )' % u', '.join(format_values)\n sql = '%s %s %s;' % (sql, keys_str, values_str)\n connection.execute(sql, values)\n primary_k = self.__class__.get_primary()\n primary = object.__getattribute__(self, primary_k)\n primary.value = connection.connection.insert_id()", "def init_db_admin_config():\n from app.constantes import TEMP_CHAUDIERE_FAILURE_DEFAULT, \\\n CHAUDIERE_DB_ROTATE_HOURS_DEFAULT, \\\n CHAUDIERE_MINUTE_DB_ROTATE_DAYS_DEFAULT, \\\n ALERTS_ENABLE_DEFAULT\n from app.models.admin_config import AdminConfig\n \n db.create_all('admin_config')\n \n if AdminConfig.first(AdminConfig) == None:\n new_config = AdminConfig(\n temp_chaudiere_failure = TEMP_CHAUDIERE_FAILURE_DEFAULT,\n chaudiere_db_rotate_hours = CHAUDIERE_DB_ROTATE_HOURS_DEFAULT,\n chaudiere_minute_db_rotate_days = CHAUDIERE_MINUTE_DB_ROTATE_DAYS_DEFAULT,\n alerts_enable = ALERTS_ENABLE_DEFAULT,\n comment = ''\n )\n \n db.session.add(new_config)\n db.session.commit()\n print(\"\\n * NEW AdminConfig\")\n print(AdminConfig.first(AdminConfig))", "async def insert_one(self, model):\n\n pass", "def insert_event_to_db(self):\n try:\n events_coll.insert_one(self.event_info_to_dic())\n except Exception as e:\n print(e)", "def test_new_empty_config(self):\n # set up\n mock_rowcount = PropertyMock(return_value=0)\n type(self.mock_get_cur.return_value).rowcount = mock_rowcount\n self.mock_get_cur.return_value.fetchone.return_value = (1,)\n\n # run SUT\n new_config_id = new_config()\n\n # confirm appropriate sql was executed for an empty config\n self.mock_get_cur.return_value.execute.assert_called_once_with(\n \"INSERT INTO config (key_value_pairs) VALUES (%s) RETURNING config_id\",\n ('',),\n )\n\n # confirm we have a reasonable id\n self.assertEqual(type(new_config_id), type(0))", "def insert(self):\n ret = True\n\n schema = self.schema\n fields = self.depopulate(False)\n\n q = self.query\n q.set_fields(fields)\n pk = q.insert()\n if pk:\n fields = q.fields\n fields[schema.pk.name] = pk\n self._populate(fields)\n\n else:\n ret = False\n\n return ret", "def insert(self):\n pass", "def test_bad_insert(db):\n with pytest.raises(error.InvalidSerialization):\n _ = db.insert_current('config', db, store_permanently=False)\n\n with pytest.raises(error.InvalidSerialization):\n _ = db.insert('config', db)", "def insert(self, sql):\n try:\n # Execute the SQL command\n self.cursor.execute(sql)\n # Commit your changes in the database\n self.db.commit()\n except:\n # Rollback in case there is any error\n self.db.rollback()", "def run(self):\n self.db.table('points').insert({\n 'name': 'biblioteca',\n 'rfid': '123456'\n })", "def insert_values():\n pass", "def insert_data(self):\n\n pass", "def insert_one(self, data):\n _client = self.client\n _db = _client[self.database]\n _col = _db[self.collection]\n\n x = _col.insert_one(data)\n\n return x", "def insert(self, value):\n # Build insert query\n into_sql = ''\n col_sql = ''\n val_sql = []\n for key, val in value.items():\n into_sql += ', {}'.format(key)\n col_sql += ', ?'\n val_sql.append(val)\n # Execute query\n self.execute(\"insert into {} ({}) values ({})\".format(self.name, into_sql[2:], col_sql[2:]), val_sql)", "def Insert(self):\n sql = 'INSERT INTO %s ( %s ) VALUES ( %s )' % (\n self.table_name,\n ', '.join(self.values),\n ', '.join(['?' for _ in self.values])\n )\n return Database().Execute(sql, tuple(self.values.values()))", "def test_db(self):\n db.tests.insert_one({'name': 'test-name'})\n r = db.tests.find_one({'name': 'test-name'})\n self.assertEqual(r['name'], 'test-name')\n\n db.tests.insert_one({'_id': '_id', 'a': 'A', 'b': 'B', 'c': 'c'})", "def test_create_config_with_save(self) -> None:\n config = self.integration.create_config(name='Config 1', save=True)\n self.assertFalse(config.enabled)\n self.assertIsNotNone(config.pk)", "def configure(self, config):\n # create the follower table if it doesn't already exist\n model.follower_table.create(checkfirst=True)", "def insert_data(self):\n # Make a connexion with a mock database\n self.generate_data_collection()", "def populate_database(self):\n self.insert_products()\n self.insert_categories()\n self.insert_products_categories()\n self.insert_stores()\n self.insert_products_stores()", "def insert_to_db(self) -> None:\n query = \"\"\"INSERT INTO Users(Username, Password, Firstname, Surname, Currency_id,\n Has_First_Sign_In, Account_Created, Last_Sign_In)\n VALUES(?,?,?,?,?,?,?,?);\"\"\"\n self.db.commit(query, values=self.to_tuple())" ]
[ "0.65543216", "0.6231456", "0.6197239", "0.618433", "0.61325914", "0.61080337", "0.61080337", "0.61080337", "0.6015693", "0.5981799", "0.5916322", "0.58256114", "0.58244497", "0.5773632", "0.57164454", "0.5700255", "0.5685849", "0.5667353", "0.565591", "0.56507236", "0.5622573", "0.5600113", "0.55998135", "0.5563058", "0.55486673", "0.55451274", "0.5504754", "0.54979193", "0.54535687", "0.5453206" ]
0.7516463
0
Setup daemon scripts and file permissions.
def setup(self): # Set bashrc file self._bashrc() # Return if not running script as root user if self.running_as_root is False: return # Return if user prompted doesn't exist if self.infoset_user_exists is False: return # Set file permissions self._file_permissions() # Setup systemd self._systemd()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup_permissions():\n sudo('chown %s:%s -R %s' % (env.apache_user, env.apache_user, env.whole_path_symlinked))", "def setupEnvironment(self, chroot, rundir, nodaemon, umask, pidfile):\n daemon = not nodaemon\n\n if chroot is not None:\n os.chroot(chroot)\n if rundir == '.':\n rundir = '/'\n os.chdir(rundir)\n if daemon and umask is None:\n umask = 0o077\n if umask is not None:\n os.umask(umask)\n if daemon:\n from twisted.internet import reactor\n self.config[\"statusPipe\"] = self.daemonize(reactor)\n if pidfile:\n with open(pidfile, 'wb') as f:\n f.write(intToBytes(os.getpid()))", "def setup_dirs():\n\n require('environment', provided_by=env.environments)\n sudo('mkdir -p %(log_dir)s' % env, user=env.deploy_user)\n sudo('chmod a+w %(log_dir)s' % env )\n sudo('mkdir -p %(services)s/nginx' % env, user=env.deploy_user)\n sudo('mkdir -p %(services)s/supervisor' % env, user=env.deploy_user)\n sudo('mkdir -p %(services)s/gunicorn' % env, user=env.deploy_user)\n sudo('mkdir -p %(media_root)s' % env)\n sudo('chown %(webserver_user)s %(media_root)s' % env)\n sudo('mkdir -p %(static_root)s' % env)\n sudo('chown %(webserver_user)s %(static_root)s' % env)", "def setup_data_dir():\n for dir_ in [PATH, WORKSPACE, RUNTEST_PATH, ENV_PATH, REPO_PATH]:\n if not os.path.isdir(dir_):\n os.makedirs(dir_)\n\n if not os.path.isdir(KEY_PATH):\n shutil.copytree(os.path.join(PATH, 'keys'), KEY_PATH)\n for key in os.listdir(KEY_PATH):\n if os.path.isfile(os.path.join(KEY_PATH, key)):\n os.chmod(os.path.join(KEY_PATH, key), 384) # 0600\n\n if not os.path.isdir(SEL_PATH):\n shutil.copytree(os.path.join(PATH, 'selinux'), SEL_PATH)\n for rule in os.listdir(SEL_PATH):\n if os.path.isfile(os.path.join(SEL_PATH, rule)):\n os.chmod(os.path.join(SEL_PATH, rule), 384) # 0600", "def setup():\n # Ignore errors if the user already exists.\n with settings(user=env.ROOT_USER, password=env.ROOT_PASS, warn_only=True):\n # Create a new system user.\n result = execute('system.user_create',\n env.SYSTEM_USER,\n env.SYSTEM_PASS)\n\n # Upload SSH key for the new system.\n if result.get(env.host):\n execute('system.user_sshkey', env.SYSTEM_USER)\n\n ##############################\n # RUN SERVER UPDATES\n ##############################\n\n execute('system.update')\n\n ##############################\n # BASIC SERVER SECURITY\n ##############################\n\n # Disable password authentication.\n execute('system.ssh_disable_password_authentication')\n # Disable root login.\n execute('system.ssh_disable_root_login')\n # Restart SSH.\n execute('system.ssh_restart')\n\n # Install ufw\n execute('ufw.install')\n # Deny incoming connections.\n execute('ufw.default')\n # Allow SSH (22/tcp) access.\n execute('ufw.allow', 'ssh')\n # Allow HTTP (80/tcp) access.\n execute('ufw.allow', 'http')\n # Allow HTTPS (443/tcp) access.\n execute('ufw.allow', 'https')\n # Enable the firewall.\n execute('ufw.enable')\n\n # Install supervisor\n execute('supervisor.install')\n\n # Install mercurial\n execute('mercurial.install')\n\n # Install nginx\n execute('nginx.install')\n execute('nginx.config')\n execute('nginx.restart')\n\n # Setup Python Environment.\n require('PYTHON_VENV')\n\n execute('python.dev')\n execute('python.venv', env.PYTHON_VENV)\n execute('python.install', env.PYTHON_VENV)\n\n # Deploy the project.\n #\n # fab --config=config.conf project.clone \\\n # project.config \\\n # project.migrate \\\n # project.collectstatic \\\n # project.restart\n execute('project.clone')\n execute('project.config')\n execute('project.migrate')\n execute('project.collectstatic')\n execute('project.restart')\n\n execute('supervisor.restart')\n execute('supervisor.reread')\n execute('supervisor.update')", "def pid_permissions():\n config = Config()\n try:\n user = pwd.getpwnam(config.user)\n group = grp.getgrnam(config.group)\n os.chown(config.pidfile, user.pw_uid, group.gr_gid)\n except (KeyError, PermissionError):\n logger.error(\"Unable to change pidfile ownership permissions.\")\n raise SystemExit(os.EX_USAGE)", "def configure_path_permissions(self):\n\n import vms.db\n import vms.kvm\n import vms.config\n\n try:\n passwd = pwd.getpwnam(FLAGS.libvirt_user)\n libvirt_uid = passwd.pw_uid\n libvirt_gid = passwd.pw_gid\n except Exception, e:\n raise Exception(\"Unable to find the libvirt user %s. \"\n \"Please use the --libvirt_user flag to correct.\"\n \"Error: %s\" % (FLAGS.libvirt_user, str(e)))\n\n try:\n vmsfs_path = vms.kvm.config.find_vmsfs()\n except Exception, e:\n raise Exception(\"Unable to located vmsfs. \"\n \"Please ensure the module is loaded and mounted. \"\n \"Error: %s\" % str(e))\n\n try:\n for path in vmsfs_path, os.path.join(vmsfs_path, 'vms'):\n os.chown(path, libvirt_uid, libvirt_gid)\n os.chmod(path, 0770)\n except Exception, e:\n raise Exception(\"Unable to make %s owner of vmsfs: %s\" %\n FLAGS.libvirt_user, str(e))\n\n def can_libvirt_write_access(dir):\n # Test if libvirt_user has W+X permissions in dir (which are\n # necessary to create files). Using os.seteuid/os.setegid is\n # insufficient because they don't affect supplementary\n # groups. Hence we run\n # sudo -u $libvirt_user test -w $dir -a -x $dir\n # We're not using os.system because of shell escaping of directory\n # name. We're not using subprocess.call because it's buggy: it\n # returns 0 regardless of the real return value of the command!\n command = ['sudo', '-u', FLAGS.libvirt_user,\n 'test', '-w', dir, '-a', '-x', dir]\n child = os.fork()\n if child == 0:\n os.execvp('sudo', ['sudo', '-u', FLAGS.libvirt_user,\n 'test', '-w', dir, '-a', '-x', dir])\n while True:\n pid, status = os.waitpid(child, 0)\n if pid == child:\n return os.WIFEXITED(status) and os.WEXITSTATUS(status) == 0\n\n def mkdir_libvirt(dir):\n if not os.path.exists(dir):\n LOG.debug('does not exist %s', dir)\n utilities.make_directories(dir)\n os.chown(dir, libvirt_uid, libvirt_gid)\n os.chmod(dir, 0775) # ug+rwx, a+rx\n if not can_libvirt_write_access(dir):\n raise Exception(\"Directory %s is not writable by %s (uid=%d). \"\n \"If it already exists, make sure that it's \"\n \"writable and executable by %s.\" %\n (dir, FLAGS.libvirt_user, libvirt_uid,\n FLAGS.libvirt_user))\n try:\n db_path = vms.db.vms.path\n mkdir_libvirt(os.path.dirname(db_path))\n utilities.touch(db_path)\n os.chown(db_path, libvirt_uid, libvirt_gid)\n\n # TODO: This should be 0660 (ug+rw), but there's an error I can't\n # figure out when libvirt creates domains: the vms.db path (default\n # /dev/shm/vms.db) can't be opened by bsddb when libvirt launches\n # kvm. This is perplexing because it's launching it as root!\n os.chmod(db_path, 0666) # aug+rw\n\n dirs = [config.SHELF,\n config.SHARED,\n config.LOGS,\n config.CACHE,\n config.STORE]\n for dir in dirs:\n if dir != None:\n mkdir_libvirt(dir)\n except Exception, e:\n raise Exception(\"Error creating directories and setting \"\n \"permissions for user %s. Error: %s\" %\n (FLAGS.libvirt_user, str(e)))", "def setup():\n\n debs = (\"python-setuptools\", \"apache2\", \"libapache2-mod-wsgi\")\n\n require(\"hosts\", provided_by=[production, staging])\n sudo(\"apt-get install %s\" % \" \".join(debs))\n sudo(\"easy_install virtualenv pip\")\n sudo(\"mkdir -p %(path)s\" % env)\n with cd(\"%(path)s\" % env):\n sudo(\"mkdir -p releases; mkdir -p packages\")\n sudo(\"virtualenv --no-site-packages .\")\n sudo(\"mkdir -p /var/log/twit-demo; chown www-data:www-data /var/log/twit-demo\")", "def setup():\n\n with cd(env.homedir):\n\n # clone repository from github\n sudo('git clone https://github.com/starzel/demo.starzel.de.git', user=env.deploy_user) # noqa: E501\n\n with cd(env.directory):\n\n # requirements\n # sudo('python python-dev build-essential zlib1g-dev libssl-dev libxml2-dev libxslt1-dev wv poppler-utils libtiff5-dev libjpeg62-dev zlib1g-dev libfreetype6-dev liblcms1-dev libwebp-dev') # noqa: E501\n\n # prepare buildout\n sudo('ln -s local_production.cfg local.cfg', user=env.deploy_user)\n sudo('echo -e \"[buildout]\\nlogin = admin\\npassword = admin\" > secret.cfg', user=env.deploy_user) # noqa: E501\n\n # bootstrap and run bildout once\n sudo('./bin/pip install -r requirements.txt', user=env.deploy_user)\n sudo('./bin/buildout', user=env.deploy_user)\n\n # start supervisor which starts plone instance also\n sudo('./bin/supervisord', user=env.deploy_user)", "def setup(self):\n # Backup existing configuration\n if self.always_backup_existing_config:\n self.backup()\n\n self.host.open(self.remote_path, 'wb', use_sudo=self.use_sudo).write(self.content)\n\n if self.make_executable:\n self.host.run(\"chmod a+x '%s'\" % esc1(self.host.expand_path(self.remote_path)), use_sudo=self.use_sudo)", "def setup():\n # Create the Dallinger config file if it does not already exist.\n config_name = \".dallingerconfig\"\n config_path = os.path.join(os.path.expanduser(\"~\"), config_name)\n\n if os.path.isfile(config_path):\n log(\"Dallinger config file already exists.\", chevrons=False)\n\n else:\n log(\"Creating Dallinger config file at ~/.dallingerconfig...\", chevrons=False)\n src = os.path.join(\n os.path.dirname(os.path.realpath(__file__)),\n \"..\",\n \"default_configs\",\n config_name,\n )\n shutil.copyfile(src, config_path)", "def _admin_setup(self):\n admin_pkgs = ['rsync', 'dsh', 'git', 'git-core', 'nginx',\n 'subversion', 'git-daemon-sysvinit', 'expect']\n\n with settings(hide('running', 'stdout', 'stderr', 'warnings')):\n sudo('apt-get install %s %s ' % (self.apt_opts,\n ' '.join(admin_pkgs)))\n if sudo('test -e %s'\n % (self.repo_base + '/' + self.repo_name)).failed:\n sudo('mkdir -p %s' % (self.repo_base + '/' + self.repo_name))\n local_path = self.tmpl_dir + '/*'\n remote_path = self.repo_base + '/' + self.repo_name + '/'\n if put(local_path, remote_path,\n use_sudo=True, mirror_local_mode=True).failed:\n status = 500\n msg = '''\n Uploading template files to remote admin system has\n failed. [remote path: %s]\n ''' % remote_path\n disconnect_all()\n raise UploadTemplatesError(status, msg)\n \"\"\"\n Let's change the ownership of the pushed templates\n \"\"\"\n sudo('chown -R root.root %s' % remote_path)\n\n \"\"\"\n Initialize git repo\n \"\"\"\n git_dir = remote_path + \".git\"\n sudo('git --git-dir=%s --work-tree=%s init'\n % (git_dir, remote_path))\n sudo('git --git-dir=%s --work-tree=%s add .'\n % (git_dir, remote_path))\n sudo('git --git-dir=%s --work-tree=%s commit -qam \"initial\"'\n % (git_dir, remote_path))\n if sudo('test -e %s' % git_dir).failed:\n status = 500\n msg = 'Issue initializing git repo on admin setup'\n raise ResponseError(status, msg)\n\n if sudo('test -e /root/local').succeeded:\n sudo('mv -f /root/local /root/local.old')\n sudo('git clone -q file:///%s /root/local'\n % (remote_path,))\n else:\n sudo('git clone -q file:///%s /root/local'\n % (remote_path,))\n\n \"\"\"\n Syncing repo files to admin /\n \"\"\"\n self._sync_files('admin')\n\n \"\"\"\n Install swift packages for admin system\n \"\"\"\n self._swift_install('admin')\n\n \"\"\"\n Restarting some services\n \"\"\"\n if sudo('service git-daemon start').failed:\n status = 500\n msg = 'Error restarting git-daemon'\n raise ResponseError(status, msg)\n if sudo('service nginx restart').failed:\n status = 500\n msg = 'Error restarting nginx'\n raise ResponseError(status, msg)\n \"\"\"\n Reboot system\n \"\"\"\n if sudo('reboot').failed:\n status = 500\n msg = 'Error trying to reboot system'\n raise ResponseError(status, msg)\n else:\n print \"\\nRebooting ... Please wait until it's back online\"\n print \"to proceed with any other deploys\\n\"\n print \"Also verify that all required services\"\n print \"are running on the admin system after the reboot\"\n else:\n status = 500\n msg = 'System has been setup previously ... Aborting'\n raise ResponseError(status, msg)", "def run_commands (self):\n cwd = os.getcwd()\n data = []\n data.append('config_dir = %r' % os.path.join(cwd, \"config\"))\n data.append(\"install_data = %r\" % cwd)\n data.append(\"install_scripts = %r\" % cwd)\n self.create_conf_file(data)\n super(MyDistribution, self).run_commands()", "def setup_directories():\n run('mkdir -p %(path)s' % env)\n run('mkdir -p %(env_path)s' % env)\n run('mkdir -p %(log_path)s;' % env)\n sudo('chgrp -R www-data %(log_path)s; chmod -R g+w %(log_path)s;' % env)\n \n with settings(warn_only=True):\n run('ln -s %(log_path)s %(path)s/logs' % env)", "def fixpermissions():\n try:\n stats = os.stat(SCRIPT_LOC)\n os.chown(DNS_LOC, stats.st_uid, stats.st_gid)\n os.chmod(DNS_LOC, stats.st_mode)\n except AttributeError:\n pass\n except OSError:\n print '>> Unable to change permissions of ' + DNS_LOC + os.linesep + \\\n ' ^^ This is a non-fatal error ^^'", "def setup():\n require('hosts', provided_by=[prod])\n require('code_root')\n sudo('apt-get update')\n sudo('apt-get install -y python-setuptools')\n sudo('easy_install pip')\n sudo('pip install virtualenv')\n sudo('aptitude install -y apache2')\n sudo('aptitude install -y libapache2-mod-wsgi')\n sudo('apt-get install -y nginx')\n update_webserver_config()\n sudo('mkdir -p %s; cd %s; virtualenv .;' % (env.code_root, env.code_root))\n sudo('cd %s;mkdir releases; mkdir shared; mkdir packages; mkdir shared/media; mkdir shared/media/file;' % (env.code_root))\n deploy()", "def fix_permissions():\n for root, dirs, files in os.walk('build'):\n for d in dirs:\n os.chmod(os.path.join(root, d), 0o755)\n for f in files:\n os.chmod(os.path.join(root, f), 0o644)\n\n # The executable must be executable.\n os.chmod('build/usr/bin/qastetray', 0o755)", "def user_routine():\n\n root()\n\n createuser()\n make_su(s.username)\n\n user()\n sudo(\"mkdir -p /home/web/\")\n sudo(\"chown -R %s /home/web/\" % s.username)", "def setup(self):\n Utils.check_dir(os.path.join(expanduser('~'), '.drupdates', 'plugins'))", "def setup_scripts(self, context):\n path = os.path.abspath(os.path.dirname(__file__))\n path = os.path.join(path, 'scripts')\n self.install_scripts(context, path)", "def start_daemon(self, *args, **kwargs):\n pass", "def install_init_script():\n run('sudo touch %s' % env.init_script)\n run('sudo chown %s %s' % (env.user, env.init_script))\n run('sudo update-rc.d %s defaults' % os.path.basename(env.init_script))\n update_init_script()", "def fix_file_perms():\n yield\n os.chmod('tackle.yaml', int('0o644', 8))", "def setup():\n sudo(\"minv_setup.sh\")", "def set_syco_permissions():\n x(\"chown -R root:root /opt/syco\")\n x(\"chmod 0755 /opt/syco\")\n x(\"chmod 0750 /opt/syco/var\")\n x(\"chmod 0750 /opt/syco/var/mysql\")\n x(\"chmod 0750 /opt/syco/var/mysql/mysql-lvm-backup.py\")\n x(\"chmod 0750 /opt/syco/var/mysql/mysqldump-backup.sh\")", "def bootstrap():\n validate_configurator_version()\n\n # put new mkinitcpio.conf in place\n run(\"mv /etc/mkinitcpio.conf.pacnew /etc/mkinitcpio.conf\")\n sed(\"/etc/mkinitcpio.conf\",\n 'MODULES=\"\"',\n 'MODULES=\"xen-blkfront xen-fbfront xen-kbdfront xen-netfront xen-pcifront xenbus_probe_frontend xenfs\"') # nopep8\n sed(\"/etc/mkinitcpio.conf\",\n 'HOOKS=\"base udev autodetect modconf block filesystems keyboard fsck',\n 'HOOKS=\"base udev block filesystems shutdown autodetect\"')\n\n # upgrade pacakges\n run(\"pacman --noconfirm -Syu\")\n\n # put new pacman.conf in place\n run(\"mv /etc/pacman.conf.pacnew /etc/pacman.conf\")\n\n # install essential packages\n run(\"pacman --noconfirm -S base-devel\")\n run(\"pacman --noconfirm -S curl git rsync\")\n\n # create a user, named 'aur', to safely install AUR packages under fakeroot\n # uid and gid values auto increment from 1000\n # to prevent conficts set the 'aur' user's gid and uid to 902\n run(\"groupadd -g 902 aur && useradd -m -u 902 -g 902 -G wheel aur\")\n\n # allow users in the wheel group to sudo without a password\n uncomment(\"/etc/sudoers\", \"wheel.*NOPASSWD\")\n\n # install yaourt and upgrade non-pacman rackspace installed packages\n sudo(\"rm -rf /home/aur/.builds && mkdir /home/aur/.builds/\", user=\"aur\")\n with cd(\"/home/aur/.builds/\"):\n sudo(\"bash <(curl aur.sh) -si --noconfirm package-query yaourt\", user=\"aur\")\n sudo(\"yaourt --noconfirm -S xe-guest-utilities\", user=\"aur\")\n\n # allow fabric to sftp with contrib.files.put\n # http://stackoverflow.com/questions/10221839/cant-use-fabric-put-is-there-any-server-configuration-needed # nopep8\n # change before reboot because then the sshd config will be reloaded\n # sed(\"/etc/ssh/sshd_config\", \"Subsystem sftp /usr/lib/openssh/sftp-server\",\n # \"Subsystem sftp internal-sftp\")\n\n # systemd\n sed(\"/boot/grub/menu.lst\",\n \"kernel /boot/vmlinuz-linux root=/dev/xvda1 ro console=hvc0\",\n \"kernel /boot/vmlinuz-linux root=/dev/xvda1 ro console=hvc0 init=/usr/lib/systemd/systemd\")\n reboot()\n if not contains(\"/proc/1/comm\", \"systemd\"):\n abort(\"systemd is not installed properly\")\n server = [s for s in env.bootmachine_servers if s.public_ip == env.host][0]\n run(\"hostnamectl set-hostname {0}\".format(server.name))\n run(\"mv /etc/locale.gen.pacnew /etc/locale.gen.conf\")\n uncomment(\"/etc/locale.gen\", \"en_US.UTF-8 UTF-8\")\n uncomment(\"/etc/locale.gen\", \"en_US ISO-8859-1\")\n run(\"locale-gen\")\n run(\"localectl set-locale LANG='en_US.utf8'\")\n run(\"timedatectl set-timezone US/Central\")", "def setup():\n op_dir = rootfs.get_working_dir()\n if not os.path.isdir(op_dir):\n os.mkdir(op_dir)", "def init_daemon(cf):\n #logfile = cf.get('logfile', '/tmp/watcher.log')\n pidfile = cf.get('pidfile', '/tmp/watcher.pid')\n # uid\n uid = cf.get('uid', None)\n if uid is not None:\n try:\n uid = int(uid)\n except ValueError as e:\n if uid != '':\n logger.warning('Incorrect uid value: %r' %(e)) \n uid = None\n # gid\n gid = cf.get('gid', None)\n if gid is not None:\n try:\n gid = int(gid)\n except ValueError as e:\n if gid != '':\n logger.warning('Incorrect gid value: %r' %(e)) \n gid = None\n\n umask = cf.get('umask', None)\n if umask is not None:\n try:\n umask = int(umask)\n except ValueError as e:\n if umask != '':\n logger.warning('Incorrect umask value: %r' %(e)) \n umask = None\n\n wd = cf.get('working_directory', None)\n if wd is not None and not os.path.isdir(wd):\n if wd != '':\n logger.warning('Working directory not a valid directory (\"%s\"). Set to default (\"/\")' %(wd)) \n wd = None\n\n return {'pidfile':pidfile, 'stdin':None, 'stdout':None, 'stderr':None, 'uid':uid, 'gid':gid, 'umask':umask, 'working_directory':wd}", "def setupRunDir(self):\n\n pass", "def _setup(self):\n # Look for ini file\n if not os.path.isfile(self.ini_file):\n self._fail('Cannot find ini file')\n\n self._setup_logging()\n\n # Import debexpo root directory\n sys.path.append(os.path.dirname(self.ini_file))\n\n # Initialize Pylons app\n conf = appconfig('config:' + self.ini_file)\n pylons.config = load_environment(conf.global_conf, conf.local_conf)\n\n # Change into the incoming directory\n incoming_dir = pylons.config['debexpo.upload.incoming']\n logging.info(\"Changing dir to %s\", incoming_dir)\n os.chdir(incoming_dir)\n\n # Look for the changes file\n if not os.path.isfile(self.changes_file):\n self._fail('Cannot find changes file')" ]
[ "0.68749136", "0.63898104", "0.6315376", "0.61940134", "0.61697245", "0.6156293", "0.61302006", "0.6121493", "0.6063271", "0.604604", "0.60355693", "0.59841716", "0.5958335", "0.5945734", "0.5910354", "0.58855337", "0.5800965", "0.5783833", "0.57625", "0.57576156", "0.57484114", "0.57261395", "0.56944287", "0.5691346", "0.5680272", "0.5675134", "0.56735075", "0.5669569", "0.5668106", "0.56451225" ]
0.6931322
0
Set bashrc file environment variables.
def _bashrc(self): # Initialize key variables root_directory = self.root_directory # Determine username to use if self.running_as_root is True: # Edit local user's bashrc file username = self.infoset_user else: # Edit selected user's bashrc file username = getpass.getuser() # Read bashrc file home_directory = os.path.expanduser('~{}'.format(username)) filepath = '{}/.bashrc'.format(home_directory) # Do nothing if .bashrc file doesn't exist if (os.path.isfile(filepath) is False) or ( os.path.exists(filepath) is False): return # Read contents of file with open(filepath, 'r') as f_handle: contents = f_handle.read() # Create string to append to the end of the file if 'PYTHONPATH' in contents: export_string = """\ # Automatically inserted by the infoset-ng installation script # It appended the requied PYTHONPATH to your your existing PYTHONPATH PYTHONPATH=$PYTHONPATH:{} export PYTHONPATH """.format(root_directory) else: export_string = """\ # Automatically inserted by the infoset-ng installation script # It appended the requied PYTHONPATH to your your existing PYTHONPATH PYTHONPATH={} export PYTHONPATH """.format(root_directory) # Append the PYTHONPATH to the end of the contents = '{}{}'.format(contents, export_string) with open(filepath, 'w') as f_handle: f_handle.write(contents)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def install_bash_profile():\n run('cat %(bash_profile)s >> ~/.bash_profile' % env)", "def bash(ctx):\n if os.environ.get('RUNNING_ON_DOCKER', False):\n prefix = 'docker-'\n else:\n prefix = ''\n\n file_name = prefix + 'bashrc'\n rcfile = os.path.join(os.path.dirname(__file__), 'data', file_name)\n _execvp('bash', ['bash', '--rcfile', rcfile])", "def update_bash_profile(extra_paths=()):\n lines = [\n '',\n '# Add paths for Software-Carpentry-installed scripts and executables',\n 'export PATH=\\\"$PATH:{}\\\"'.format(':'.join(\n make_posix_path(path) for path in extra_paths),),\n '',\n '# Make nano the default editor',\n 'export EDITOR=nano',\n '',\n ]\n config_path = os.path.join(os.path.expanduser('~'), '.bash_profile')\n with open(config_path, 'a') as f:\n f.write('\\n'.join(lines))", "def change_environment_variables():\n values = load('environment.yaml')\n\n for key in values.keys():\n os.environ[key] = values[key]\n\n info(f'Changed environment variables to {values}')", "def set_env_var(self):\n\n list_env_vars = self.config.items('environment_variables')\n for env_var in list_env_vars:\n os.environ[env_var[0].upper()] = env_var[1]", "def shell_source(script):\n import subprocess, os\n pipe = subprocess.Popen(\". %s; env\" % script, stdout=subprocess.PIPE, shell=True)\n output = pipe.communicate()[0]\n env = dict((line.split(\"=\", 1) for line in output.splitlines()))\n os.environ.update(env)", "def shell_source(script):\n import subprocess, os\n pipe = subprocess.Popen(\". %s; env\" % script, stdout=subprocess.PIPE, shell=True)\n output = pipe.communicate()[0]\n env = dict((line.split(\"=\", 1) for line in output.splitlines()))\n os.environ.update(env)", "def update_env_in_script(fn, names):\n with open(fn) as ifs:\n content = ifs.read()\n content = _prepend_env_paths(content, names)\n with open(fn, 'w') as ofs:\n ofs.write(content)", "def _set_ci_environment_variables(parent_shell):\n variables_to_set = {\n \"JOBSTAMPS_ALWAYS_USE_HASHES\": \"1\",\n \"CLINT_FORCE_COLOR\": \"1\",\n \"PYTHONDONTWRITEBYTECODE\": \"1\"\n }\n\n for key, value in variables_to_set.items():\n os.environ[key] = value\n parent_shell.overwrite_environment_variable(key, value)", "def read_envdir():\n env_dir = \"env\"\n env_vars = glob.glob(os.path.join(env_dir, '*'))\n for env_var in env_vars:\n with open(env_var, 'r') as env_var_file:\n os.environ.setdefault(env_var.split(os.sep)[-1],\n env_var_file.read().strip())", "def env(config, args):\n print config.template(\"scripts/env.sh\", project=args.project)", "def setUpEnvironmentVariables(basedir):\n\tif sys.platform == 'win32':\n\t\toldpath = os.environ[\"PATH\"]\n\t\tcwd = os.getcwd()\n\t\tos.environ[\"PATH\"] = oldpath + ';' + cwd + fileSeperator + basedir + fileSeperator + \"platform-tools\"\n\t\tprint os.environ[\"PATH\"]\n\telse:\n\t\tcwd = os.getcwd()\n\t\toldpath = os.environ[\"PATH\"]\n\t\tnewpath = cwd + fileSeperator + basedir + fileSeperator + \"tools:\" + fileSeperator + cwd + fileSeperator + basedir + fileSeperator + \"platform-tools\"\n\t\tos.environ[\"PATH\"] = oldpath + fileSeperator + newpath", "def GetBashEnvFromFile(this, filename):\n DB_RE = re.compile(\"export (.+)=(.+)\")\n ret = {}\n if filename is not None:\n with open( filename, \"r\" ) as f:\n for line in f:\n m = DB_RE.search(line.strip())\n if m:\n name = m.group(1)\n val = m.group(2)\n # Check for quotes\n if val[0] in \"'\\\"\" and val[0]==val[-1]:\n val = val[1:-1]\n ret[name] = val\n for name in (MYSQL_HOST, MYSQL_USER, MYSQL_PASSWORD, MYSQL_DATABASE):\n if name not in ret:\n try:\n ret[name] = os.environ[name]\n except KeyError as e:\n logging.error(\"%s not in environment not in %s\",name,filename)\n raise\n return ret", "def _patch_etc_environment( cls, env_file, dirs=None, dirs_var='PATH', env_pairs=None ):\n\n def parse_entry( s ):\n m = cls.env_entry_re.match( s )\n return m.group( 1 ), m.group( 2 )\n\n env_file.seek( 0 )\n env = dict( parse_entry( _ ) for _ in env_file.read( ).splitlines( ) )\n\n # Do we have directories to add to a path?\n if dirs is not None:\n path = filter( None, env.get( dirs_var, '' ).split( ':' ) )\n path.extend( dirs )\n env[ dirs_var ] = ':'.join( path )\n\n # Do we have other environment variables to write?\n if env_pairs is not None:\n for (k, v) in env_pairs.iteritems():\n env[k] = v\n\n env_file.seek( 0 )\n env_file.truncate( 0 )\n for var in sorted( env.items( ) ): \n env_file.write( '%s=\"%s\"\\n' % var )", "def set_envs(self):\n # pylint:disable=protected-access\n # Need to call sys.__getframe() to get the filename and method/func\n # for logging information.\n\n # Useful for logging\n # Logging output: TIME UTC |TYPE (DEBUG, INFO, WARNING, etc.) |\n # [File : function]| Message\n cur_filename = sys._getframe().f_code.co_filename\n cur_function = sys._getframe().f_code.co_name\n\n self.logger.info('Setting env variables from config file...')\n # Set all the environment variables that are needed by the\n # MET config file.\n\n tmp_amodel = self.c_dict['AMODEL']\n if tmp_amodel:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_amodel_str = str(tmp_amodel).replace(\"\\'\", \"\\\"\")\n tmp_amodel = ''.join(tmp_amodel_str.split())\n self.add_env_var('AMODEL', tmp_amodel)\n else:\n self.add_env_var('AMODEL', \"[]\")\n\n tmp_bmodel = self.c_dict['BMODEL']\n if tmp_bmodel:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_bmodel_str = str(tmp_bmodel).replace(\"\\'\", \"\\\"\")\n tmp_bmodel = ''.join(tmp_bmodel_str.split())\n self.add_env_var('BMODEL', tmp_bmodel)\n else:\n self.add_env_var('BMODEL', \"[]\")\n\n tmp_desc = self.c_dict['DESC']\n if tmp_desc:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_desc_str = str(tmp_desc).replace(\"\\'\", \"\\\"\")\n tmp_desc = ''.join(tmp_desc_str.split())\n self.add_env_var('DESC', tmp_desc)\n else:\n self.add_env_var('DESC', \"[]\")\n\n tmp_storm_id = self.c_dict['STORM_ID']\n if tmp_storm_id:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_storm_id_str = str(tmp_storm_id).replace(\"\\'\", \"\\\"\")\n tmp_storm_id = ''.join(tmp_storm_id_str.split())\n self.add_env_var('STORM_ID', tmp_storm_id)\n else:\n self.add_env_var('STORM_ID', \"[]\")\n\n tmp_basin = self.c_dict['BASIN']\n if tmp_basin:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_basin_str = str(tmp_basin).replace(\"\\'\", \"\\\"\")\n tmp_basin = ''.join(tmp_basin_str.split())\n self.add_env_var('BASIN', tmp_basin)\n else:\n self.add_env_var('BASIN', \"[]\")\n\n tmp_cyclone = self.c_dict['CYCLONE']\n if tmp_cyclone:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_cyclone_str = str(tmp_cyclone).replace(\"\\'\", \"\\\"\")\n tmp_cyclone = ''.join(tmp_cyclone_str.strip())\n self.add_env_var('CYCLONE', tmp_cyclone)\n else:\n self.add_env_var('CYCLONE', \"[]\")\n\n tmp_storm_name = self.c_dict['STORM_NAME']\n if tmp_storm_name:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_storm_name_str = str(tmp_storm_name).replace(\"\\'\", \"\\\"\")\n tmp_storm_name = ''.join(tmp_storm_name_str.strip())\n self.add_env_var('STORM_NAME', tmp_storm_name)\n else:\n self.add_env_var('STORM_NAME', \"[]\")\n\n if self.c_dict['INIT_BEG']:\n self.add_env_var('INIT_BEG', self.c_dict['INIT_BEG'])\n else:\n self.add_env_var('INIT_BEG', \"\")\n\n if self.c_dict['INIT_END']:\n self.add_env_var('INIT_END', self.c_dict['INIT_END'])\n else:\n self.add_env_var('INIT_END', \"\")\n\n tmp_init_include = self.c_dict['INIT_INCLUDE']\n if tmp_init_include:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_include_str = str(tmp_init_include).replace(\"\\'\", \"\\\"\")\n tmp_init_include = ''.join(tmp_init_include_str.strip())\n self.add_env_var('INIT_INCLUDE', tmp_init_include)\n else:\n self.add_env_var('INIT_INCLUDE', \"[]\")\n\n tmp_init_exclude = self.c_dict['INIT_EXCLUDE']\n if tmp_init_exclude:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_exclude_str = str(tmp_init_exclude).replace(\"\\'\", \"\\\"\")\n tmp_init_exclude = ''.join(tmp_init_exclude_str.strip())\n self.add_env_var('INIT_EXCLUDE', tmp_init_exclude)\n else:\n self.add_env_var('INIT_EXCLUDE', \"[]\")\n\n tmp_init_hour = self.c_dict['INIT_HOUR']\n if tmp_init_hour:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_hour_str = str(tmp_init_hour).replace(\"\\'\", \"\\\"\")\n tmp_init_hour = ''.join(tmp_init_hour_str.split())\n self.add_env_var('INIT_HOUR', tmp_init_hour)\n else:\n self.add_env_var('INIT_HOUR', \"[]\")\n\n tmp_valid_begin = self.c_dict['VALID_BEG']\n if tmp_valid_begin:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_begin_str = str(tmp_valid_begin).replace(\"\\'\", \"\\\"\")\n tmp_valid_begin = ''.join(tmp_valid_begin_str.strip())\n self.add_env_var('VALID_BEG', tmp_valid_begin)\n else:\n self.add_env_var('VALID_BEG', '')\n\n tmp_valid_end = self.c_dict['VALID_END']\n if tmp_valid_end:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_end_str = str(tmp_valid_end).replace(\"\\'\", \"\\\"\")\n tmp_valid_end = ''.join(tmp_valid_end_str.strip())\n self.add_env_var('VALID_END', tmp_valid_end)\n else:\n self.add_env_var('VALID_END', \"\")\n\n tmp_valid_include = self.c_dict['VALID_INCLUDE']\n if tmp_valid_include:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_include_str = str(tmp_valid_include).replace(\"\\'\", \"\\\"\")\n tmp_valid_include = ''.join(tmp_valid_include_str.strip())\n self.add_env_var('VALID_INCLUDE', tmp_valid_include)\n else:\n self.add_env_var('VALID_INCLUDE', \"[]\")\n\n tmp_valid_exclude = self.c_dict['VALID_EXCLUDE']\n if tmp_valid_exclude:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_exclude_str = str(tmp_valid_exclude).replace(\"\\'\", \"\\\"\")\n tmp_valid_exclude = ''.join(tmp_valid_exclude_str.strip())\n self.add_env_var('VALID_EXCLUDE', tmp_valid_exclude)\n else:\n self.add_env_var('VALID_EXCLUDE', \"[]\")\n\n tmp_valid_hour = self.c_dict['VALID_HOUR']\n if tmp_valid_hour:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_hour_str = str(tmp_valid_hour).replace(\"\\'\", \"\\\"\")\n tmp_valid_hour = ''.join(tmp_valid_hour_str.strip())\n self.add_env_var('VALID_HOUR', tmp_valid_hour)\n else:\n self.add_env_var('VALID_HOUR', \"[]\")\n\n tmp_lead_req = self.c_dict['LEAD_REQ']\n if tmp_lead_req:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_lead_req_str = str(tmp_lead_req).replace(\"\\'\", \"\\\"\")\n tmp_lead_req = ''.join(tmp_lead_req_str.strip())\n self.add_env_var('LEAD_REQ', tmp_lead_req)\n else:\n self.add_env_var('LEAD_REQ', \"[]\")\n\n tmp_lead = self.c_dict['LEAD']\n if tmp_lead:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_lead_str = str(tmp_lead).replace(\"\\'\", \"\\\"\")\n tmp_lead = ''.join(tmp_lead_str.strip())\n self.add_env_var('LEAD', tmp_lead)\n else:\n self.add_env_var('LEAD', \"[]\")\n\n tmp_init_mask = self.c_dict['INIT_MASK']\n if tmp_init_mask:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_mask_str = str(tmp_init_mask).replace(\"\\'\", \"\\\"\")\n tmp_init_mask = ''.join(tmp_init_mask_str.strip())\n self.add_env_var('INIT_MASK', tmp_init_mask)\n else:\n self.add_env_var('INIT_MASK', \"[]\")\n\n tmp_valid_mask = self.c_dict['VALID_MASK']\n if tmp_valid_mask:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_mask_str = str(tmp_valid_mask).replace(\"\\'\", \"\\\"\")\n tmp_valid_mask = ''.join(tmp_valid_mask_str.strip())\n self.add_env_var('VALID_MASK', tmp_valid_mask)\n else:\n self.add_env_var('VALID_MASK', \"[]\")\n\n tmp_track_watch_warn = self.c_dict['TRACK_WATCH_WARN']\n if tmp_track_watch_warn:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_track_watch_warn_str = str(tmp_track_watch_warn).replace(\"\\'\",\n \"\\\"\")\n tmp_track_watch_warn = ''.join(tmp_track_watch_warn_str.strip())\n self.add_env_var('TRACK_WATCH_WARN', tmp_track_watch_warn)\n else:\n self.add_env_var('TRACK_WATCH_WARN', \"[]\")\n\n tmp_column_thresh_name = self.c_dict['COLUMN_THRESH_NAME']\n if tmp_column_thresh_name:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_column_thresh_name_str = str(tmp_column_thresh_name).replace(\n \"\\'\", \"\\\"\")\n tmp_column_thresh_name = ''.join(tmp_column_thresh_name_str.strip())\n self.add_env_var('COLUMN_THRESH_NAME', tmp_column_thresh_name)\n else:\n self.add_env_var('COLUMN_THRESH_NAME', \"[]\")\n\n tmp_column_thresh_val = self.c_dict['COLUMN_THRESH_VAL']\n if tmp_column_thresh_val:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_column_thresh_val_str = str(tmp_column_thresh_val).replace(\"\\'\",\n \"\\\"\")\n tmp_column_thresh_val = ''.join(tmp_column_thresh_val_str.strip())\n self.add_env_var('COLUMN_THRESH_VAL', tmp_column_thresh_val)\n else:\n self.add_env_var('COLUMN_THRESH_VAL', \"[]\")\n\n tmp_column_str_name = self.c_dict['COLUMN_STR_NAME']\n if tmp_column_str_name:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_column_str_name = str(tmp_column_str_name).replace(\"\\'\",\n \"\\\"\")\n tmp_column_str_name = ''.join(tmp_column_str_name.strip())\n self.add_env_var('COLUMN_STR_NAME', tmp_column_str_name)\n else:\n self.add_env_var('COLUMN_STR_NAME', \"[]\")\n\n tmp_column_str_val = self.c_dict['COLUMN_STR_VAL']\n if tmp_column_str_val:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_column_str_val_str = str(tmp_column_str_val).replace(\"\\'\", \"\\\"\")\n tmp_column_str_val = ''.join(tmp_column_str_val_str.strip())\n self.add_env_var('COLUMN_STR_VAL', tmp_column_str_val)\n else:\n self.add_env_var('COLUMN_STR_VAL', \"[]\")\n\n tmp_init_thresh_name = self.c_dict['INIT_THRESH_NAME']\n if tmp_init_thresh_name:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_thresh_name_str = str(tmp_init_thresh_name).replace(\"\\'\",\n \"\\\"\")\n tmp_init_thresh_name = ''.join(tmp_init_thresh_name_str.strip())\n\n self.add_env_var('INIT_THRESH_NAME', tmp_init_thresh_name)\n\n else:\n self.add_env_var('INIT_THRESH_NAME', \"[]\")\n\n tmp_init_thresh_val = self.c_dict['INIT_THRESH_VAL']\n if tmp_init_thresh_val:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_thresh_val_str = str(tmp_init_thresh_val).replace(\"\\'\",\n \"\\\"\")\n tmp_init_thresh_val = ''.join(tmp_init_thresh_val_str.strip())\n self.add_env_var('INIT_THRESH_VAL', tmp_init_thresh_val)\n else:\n self.add_env_var('INIT_THRESH_VAL', \"[]\")\n\n tmp_init_str_name = self.c_dict['INIT_STR_NAME']\n if tmp_init_str_name:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_str_name_str = str(tmp_init_str_name).replace(\"\\'\", \"\\\"\")\n tmp_init_str_name = ''.join(tmp_init_str_name_str.strip())\n self.add_env_var('INIT_STR_NAME', tmp_init_str_name)\n else:\n self.add_env_var('INIT_STR_NAME', \"[]\")\n\n tmp_init_str_val = self.c_dict['INIT_STR_VAL']\n if tmp_init_str_val:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_str_val_str = str(tmp_init_str_val).replace(\"\\'\", \"\\\"\")\n tmp_init_str_val = ''.join(tmp_init_str_val_str.strip())\n self.add_env_var('INIT_STR_VAL', tmp_init_str_val)\n else:\n self.add_env_var('INIT_STR_VAL', \"[]\")\n\n # boolean values for WATER_ONLY\n if self.c_dict['WATER_ONLY']:\n flag = \"TRUE\"\n else:\n flag = \"FALSE\"\n self.add_env_var('WATER_ONLY', flag)\n\n # boolean value for LANDFALL\n if self.c_dict['LANDFALL']:\n flag = \"TRUE\"\n else:\n flag = \"FALSE\"\n self.add_env_var('LANDFALL', flag)\n\n if self.c_dict['LANDFALL_BEG']:\n self.add_env_var('LANDFALL_BEG',\n self.c_dict['LANDFALL_BEG'])\n else:\n # Set to default\n self.add_env_var('LANDFALL_BEG', '-24')\n\n if self.c_dict['LANDFALL_END']:\n self.add_env_var('LANDFALL_END',\n self.c_dict['LANDFALL_END'])\n else:\n # Set to default\n self.add_env_var('LANDFALL_END', '00')\n\n # boolean value for MATCH_POINTS\n if self.c_dict['MATCH_POINTS'] == 'true':\n flag = \"TRUE\"\n else:\n flag = \"FALSE\"\n self.add_env_var('MATCH_POINTS', flag)\n\n if self.c_dict['CONFIG_FILE']:\n self.add_env_var('CONFIG_FILE',\n self.c_dict['CONFIG_FILE'])\n else:\n self.log_error(\n cur_filename + '|' + cur_function +\n ': no MET TC-Stat config file found. Exiting')\n sys.exit(1)\n\n jobs_list_tmp = self.c_dict['JOBS_LIST']\n if jobs_list_tmp:\n # MET is expecting a string\n jobs_list_str = '\"' + jobs_list_tmp + '\"'\n self.add_env_var('JOBS', jobs_list_str)\n else:\n self.log_error('No jobs list defined. Please check your METplus'\n 'config file. Exiting...')\n sys.exit(1)\n return 0", "def SetBootloaderEnv(script, name, val):\n script.AppendExtra('set_bootloader_env(\"%s\", \"%s\");' % (name, val))", "def _set_credentials():\n # Override credentials here if necessary\n if env.user == 'ubuntu':\n env.key_filename = [\n os.path.expanduser('~/.ssh/ubuntu-id_dsa')]\n env.abort_on_prompts = True\n env.disable_known_hosts = True\n env.use_shell = False", "def SetEnvironment(env):\n os.environ.clear()\n os.environ.update(env)", "def load_envs_from_file(file_path=constants.ENV_FILE_DEFAULT_PATH.value):\n #pylint: disable=unspecified-encoding\n with open(file_path, \"r\") as file:\n for line in file:\n line = line.strip()\n if not line:\n continue\n if line.startswith(\"#\"):\n continue\n key, value = line.split(\"=\", 1)\n environ[key] = value", "def add_to_path(path):\n from fabric.contrib.files import append\n import vars\n vars = vars.Vars()\n for file in [ vars.os.default_shell_config, vars.os.default_loginshell_config ]:\n append(file, \"export PATH=$PATH:\"+path, use_sudo=True)", "def create_vars_dot_env(self):\n\n print(\"Creating vars.env in your Google Drive!\")\n\n with open(self.envpath, \"w\") as envfile:\n envfile.write(\"COLAB_ENV = Active\\n\")", "def print_set_env_command(name, value):\n shell_type = get_shell_type()\n if shell_type == Shell.LINUX:\n print(f'export {name!s}=\"{value!s}\";')\n elif shell_type == Shell.POWER_SHELL:\n print(f'$Env:{name!s}=\"{value!s}\";')\n else:\n print(f\"set {name!s}={value!s}\")", "def build_vsh_rc_file(venv_path: Path, working: Optional[Path] = None) -> Path:\n default_working_path = Path('.')\n working = Path(working or default_working_path)\n vsh_venv_config_path = venv_path / '.vshrc'\n if vsh_venv_config_path.parent.exists() and not vsh_venv_config_path.exists():\n if working.exists() and working.is_dir():\n with vsh_venv_config_path.open('w') as config:\n config.write(f'cd {working.absolute()}\\n')\n terminal.echo(f'Set default path to: {f\"{terminal.blue(str(working))}\"}')\n terminal.echo(f'To edit, update: {terminal.yellow(str(vsh_venv_config_path))}')\n return vsh_venv_config_path", "def load_envs(env_file: Optional[str] = None) -> None:\n dotenv.load_dotenv(dotenv_path=env_file, override=True)", "def set_env():\n env.local_dotenv_path = os.path.join(\n os.path.dirname(__file__), 'etc/base_image/.env')\n dotenv.load_dotenv(env.local_dotenv_path)\n env.project_name = os.environ.get('PROJECT_NAME', '')\n env.project_dir = posixpath.join('/srv/images/', env.project_name)\n env.use_ssh_config = True\n\n # Bug: when setting this inside a function. Using host_string as workaround\n env.hosts = [os.environ.get('HOST_NAME', ''), ]\n env.host_string = os.environ.get('HOST_NAME', '')\n\n env.base_image_name = os.environ.get('BASE_IMAGE_NAME', '')\n env.build_dir = '/srv/build'\n env.local_path = os.path.dirname(__file__)", "def loadrc():\n from os.path import expanduser, exists\n kw = {}\n rcfile = expanduser(\"~/.otterrc\")\n if exists(rcfile):\n for l in open(rcfile):\n if l and l[0] == '#':\n continue\n l = l.strip()\n k, v = l.split('=', 1)\n kw[k] = v\n return kw", "def set(self, shell=None):\n\n # iterate over the env variable objects and set them in the env\n for var in self._vars.itervalues():\n var.set(shell=shell)", "def setenv(name, value):\n os.environ[name] = value", "def generic_env_configure_vars(self, verbose=False):\n\n if self.settings.os == \"Windows\":\n self.output.fatal(\"Cannot build on Windows, sorry!\")\n return\n\n if self.settings.os == \"Linux\" or self.settings.os == \"Macos\":\n libs = 'LIBS=\"%s\"' % \" \".join([\"-l%s\" % lib for lib in self.deps_cpp_info.libs])\n ldflags = 'LDFLAGS=\"%s\"' % \" \".join([\"-L%s\" % lib for lib in self.deps_cpp_info.lib_paths]) \n archflag = \"-m32\" if self.settings.arch == \"x86\" else \"\"\n cflags = 'CFLAGS=\"-fPIC %s %s\"' % (archflag, \" \".join(self.deps_cpp_info.cflags))\n cpp_flags = 'CPPFLAGS=\"%s %s\"' % (archflag, \" \".join(self.deps_cpp_info.cppflags))\n command = \"env %s %s %s %s\" % (libs, ldflags, cflags, cpp_flags)\n # elif self.settings.os == \"Windows\" and self.settings.compiler == \"Visual Studio\":\n # cl_args = \" \".join(['/I\"%s\"' % lib for lib in self.deps_cpp_info.include_paths])\n # lib_paths= \";\".join(['\"%s\"' % lib for lib in self.deps_cpp_info.lib_paths])\n # command = \"SET LIB=%s;%%LIB%% && SET CL=%s\" % (lib_paths, cl_args)\n # if verbose:\n # command += \" && SET LINK=/VERBOSE\"\n \n return command", "def setup(self):\n # Set bashrc file\n self._bashrc()\n\n # Return if not running script as root user\n if self.running_as_root is False:\n return\n\n # Return if user prompted doesn't exist\n if self.infoset_user_exists is False:\n return\n\n # Set file permissions\n self._file_permissions()\n\n # Setup systemd\n self._systemd()" ]
[ "0.67692745", "0.66647166", "0.6390473", "0.60676485", "0.5944671", "0.58868504", "0.58868504", "0.58794713", "0.5842473", "0.58120805", "0.5724302", "0.5688453", "0.5672777", "0.55925816", "0.5587807", "0.5551319", "0.55474234", "0.55311364", "0.5530302", "0.55150586", "0.5479533", "0.54143304", "0.53916556", "0.5382002", "0.5368387", "0.5366393", "0.5356847", "0.5353307", "0.5346613", "0.53453624" ]
0.73845375
0
Count the number of occurences of target_class in the data
def count_target_class_data(data, target_class): count = 0 for row in data: if row[0] == target_class: count += 1 return count
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def count_target(self):\n tally = {}\n for obj in self.target:\n tally[obj] = 0\n\n ind = 0\n for label in self.labelList:\n filename = self.pathLabel + label\n f = open(filename, 'r')\n content = f.read().split('\\n')\n for line in content:\n items = line.split(' ')\n if items[0] in self.target:\n tally[items[0]] += 1\n f.close()\n if ind % 100 == 0:\n print(f'[COUNT] {ind} of {len(self.labelList)} processed')\n ind += 1\n \n print('[COUNT] done counting targets in dataset')\n print(tally)", "def count_plot_target_class(self):\r\n print(self.dataframe_name)\r\n print(self.data_frame.groupby([self.target_column]).size()) # print the sum of every class\r\n\r\n sns.countplot(data=self.data_frame, x=self.data_frame[self.target_column])\r\n plt.title(self.dataframe_name + ': Display the distribution of ' + self.target_column + ' class')\r\n plt.xlabel('Target Name: ' + self.target_column)\r\n plt.ylabel('Count')\r\n self.save_plot_as_image()\r\n plt.show()", "def get_class_count(df):\r\n \r\n return df[\"class\"].value_counts()", "def class_counts(rows):\n counts = {} # a dictionary of label -> count.\n for row in rows:\n # in our dataset format, the label is always the last column\n label = row[-1]\n if label not in counts:\n counts[label] = 0\n counts[label] += 1\n return counts", "def class_counts(rows):\n counts = {} # a dictionary of label -> count.\n for row in rows:\n # in our dataset format, the label is always the last column\n label = row[-1]\n if label not in counts:\n counts[label] = 0\n counts[label] += 1\n return counts", "def class_counts(rows):\n counts = {} # a dictionary of label -> count.\n for row in rows:\n # in our dataset format, the label is always the last column\n label = row[-1]\n if label not in counts:\n counts[label] = 0\n counts[label] += 1\n return counts", "def get_num_classes(df):\n classes = df.groupby('class_label')\n return classes.ngroups", "def count_classes(labels):\n class_dict = {}\n for image in labels:\n for row in image:\n for label in row:\n if label not in class_dict:\n class_dict[label] = 1\n else:\n class_dict[label] += 1\n return class_dict", "def get_num_classes(self):", "def determine_classes_based_on_target(dataset):\n gains = dataset[TARGET]\n dataset[GLOBAL_CLASS_COLUMN] = [POSITIVE_CLASS if i > ALPHA else NEGATIVE_CLASS for i in gains]\n return dataset", "def compute_metrics(self, target, data, weight):\n pred = self.predict(data, weight)\n assert len(pred) == len(target)\n # Calculate the mis-classification rate:\n N = len(pred)\n pred = np.reshape(pred, (N,))\n target = np.reshape(target, (N,))\n nb_misclass = np.count_nonzero(target - pred)\n return nb_misclass / N", "def getClassCounts(b):\n c = {k:0 for k in labels.keys()}\n for r in b:\n c[r[0]] += 1\n return c", "def class_distribution(y): \n # ===================== PLEASE WRITE HERE =====================\n \n bin_array = np.bincount(y)\n n_class1 = bin_array[1]\n n_class2 = bin_array[2]\n n_class3 = bin_array[3]\n \n # ===================== PLEASE WRITE HERE =====================\n \n print('Number of samples in class_1:', n_class1)\n print('Number of samples in class_2:', n_class2)\n print('Number of samples in class_3:', n_class3)", "def count_ner_labels(self, y_true, y_pred):\n return Counter(y_true), Counter(y_pred)", "def encoding_labelcount(df, target=None):\n if not target:\n target = ['user_id', 'title']\n\n norm = round(\n df.shape[0] / 10000) # normalize the count by /per 100000 entries\n for col in target:\n df[col + '_labelcount'] = df[col].map(df[col].value_counts()) / norm\n df.drop([col], axis=1, inplace=True)\n return None", "def get_relevant_indices(dataset, classes, target_classes):\n indices = []\n for i in range(len(dataset)):\n # Check if the label is in the target classes\n label_index = dataset[i][1] # ex: 3\n label_class = classes[label_index] # ex: 'cat'\n if label_class in target_classes:\n indices.append(i)\n return indices", "def calculate_ec_targets_used(oclass, total_targets):\n data_shards, parity_shards, group_number = get_ec_data_parity_group(oclass)\n group_size = data_shards + parity_shards\n if group_number in ('x', 'X'):\n group_number = max(1, total_targets // group_size)\n return group_size * int(group_number)", "def accuracy(predictions, targets):\n correct_count = 0\n for prediction, target in zip(predictions, targets):\n if prediction == target:\n correct_count += 1\n return correct_count / len(predictions)", "def resultCounter(detections):\n counter = 0\n for attribute, value in classIterator(detections):\n if 'crease' in attribute:\n counter += len(value)\n return counter", "def get_class_count(Y_category):\n # Assertions\n assert isinstance(Y_category, np.ndarray), \\\n 'Input must be a numpy ndarray.'\n cls, counts = np.unique(Y_category, return_counts = True)\n cls_counts = dict(zip(cls, counts))\n\n return cls_counts", "def num_classes():\n return NUM_CLASSES", "def num_classes(self) -> int:\n y = self.data.y\n if y is None:\n return 0\n elif y.numel() == y.size(0) and not torch.is_floating_point(y):\n return int(self.data.y.max()) + 1\n elif y.numel() == y.size(0) and torch.is_floating_point(y):\n return torch.unique(y).numel()\n else:\n return self.data.y.size(-1)", "def countclass(self, comb_res, origin_df):\r\n clsdic_ratio = {}\r\n self.clsdic_df = {}\r\n # totalcount = df['count'].sum() # no sum of count but the num of id which attr contains cls\r\n clslist = comb_res['组合需求'].apply(lambda x: x.split('.')[1]).unique().tolist()\r\n\r\n totalcount = len(origin_df[origin_df.attr.apply(self.judge, args=(clslist, ))])\r\n for cls in clslist:\r\n # dfcls = comb_res[comb_res['组合需求'].str.contains(cls)] # no count but distinct id\r\n df_cls = origin_df[origin_df.attr.apply(self.judge, args=(clslist, cls,))]\r\n self.clsdic_df[cls] = df_cls\r\n clsdic_ratio[cls] = round(len(df_cls) / totalcount * 100, 2)\r\n return sorted(clsdic_ratio.items(), key=lambda x: (x[1], x[0]), reverse=True)", "def calcNumberOfMajorityClassRows(self, data, structure):\n maxCount, classIndex = 0, structure['class']['index']\n for value in structure['class']['values']:\n newData = list(filter(lambda y: y[classIndex] == value, data))\n if len(newData) >= maxCount:\n maxCount = len(newData)\n return maxCount", "def classification_accuracy(output,\n target,\n class_wise=False,\n num_cls=6,\n excluded_cls_idx=None):\n\n with torch.no_grad():\n batch_size = target.size(0)\n\n # _ = the largest score, pred = cls_idx with the largest score\n _, pred = output.topk(1, 1, True, True)\n pred = pred.reshape(-1)\n\n acc = float(torch.sum(pred == target)) / float(batch_size) * 100\n return_dict = {'acc': acc}\n\n if excluded_cls_idx is not None:\n correct_count = torch.sum(\n (pred == target) * (target != excluded_cls_idx))\n labeled_count = torch.sum(target != excluded_cls_idx)\n if labeled_count:\n labeled_acc = float(correct_count) / float(labeled_count) * 100\n else:\n labeled_acc = 0\n\n return_dict['labeled_acc'] = labeled_acc\n return_dict['labeled_count'] = labeled_count\n else:\n return_dict['labeled_acc'] = acc\n return_dict['labeled_count'] = batch_size\n\n if class_wise:\n acc_class_wise = []\n per_class_count = []\n # actual number of classes <= num_cls=6\n for i in range(num_cls):\n total_sample_cls_i = torch.sum(target == i)\n if total_sample_cls_i:\n correct_samples_cls_i = torch.sum(\n (pred == i) * (target == i))\n acc_class_wise.append(\n float(correct_samples_cls_i) /\n float(total_sample_cls_i) * 100)\n else:\n acc_class_wise.append(0)\n per_class_count.append(total_sample_cls_i)\n\n return_dict['acc_class_wise'] = acc_class_wise\n return_dict['per_class_count'] = per_class_count\n\n return return_dict", "def count(self, cls=None):\n return len(self.all(cls))", "def test_class_counts(self):\n oz = ClassificationScoreVisualizer(GaussianNB())\n oz.fit(self.multiclass.X.train, self.multiclass.y.train)\n\n unique, counts = np.unique(self.multiclass.y.train, return_counts=True)\n npt.assert_array_equal(oz.classes_, unique)\n npt.assert_array_equal(oz.class_counts_, counts)", "def classes_calculations(input):\n counts, _ = np.histogram(input, bins=int(\n input.max() + 1), range=(0, int(input.max())))\n return np.nonzero(counts)[0]", "def classification(original_training_data):\n\n ''' Storing the dataframe as numpy array '''\n original_training_data_values = original_training_data.values\n\n ''' Storing the values of target attribute for finding out the counts of each recipetype'''\n target_column = original_training_data_values[:, -1]\n\n ''' Recipe_type stores the unique values of target attribute in the form of a list [Muffin Cupcake] \n cupcake_muffin_count stores the count of muffin and cupcakes in the form of a list [451 451]'''\n recipe_type, cupcake_muffin_count = np.unique(target_column, return_counts=True)\n\n ''' cupcake_muffin_count.argmax() returns the index of the highest value. In this case, it will return the index of \n muffin or cupcake count. '''\n majority_class = recipe_type[cupcake_muffin_count.argmax()]\n\n return majority_class", "def num_classes(self):\n raise NotImplementedError" ]
[ "0.7505753", "0.70607", "0.6935497", "0.6678549", "0.6678549", "0.6678549", "0.66415346", "0.6629299", "0.6511362", "0.6496359", "0.64001685", "0.6392629", "0.63592434", "0.6350829", "0.63047993", "0.6290631", "0.62288123", "0.6221511", "0.6201017", "0.6199443", "0.6178533", "0.6170708", "0.61509067", "0.6146602", "0.61354196", "0.6112497", "0.6092331", "0.6033419", "0.6016101", "0.6012512" ]
0.8777466
0
check if sequence a is a subsequence of b
def is_subsequence(a, b): i_a, i_b = 0, 0 while i_a < len(a) and i_b < len(b): if a[i_a].issubset(b[i_b]): i_a += 1 i_b += 1 return i_a == len(a)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_subsequence(subseq, seq):\n n = len(seq)\n m = len(subseq)\n\n if m > n:\n return False\n\n i = 0 # index of seq\n j = 0 # index of subseq\n\n while i < n and j < m:\n if seq[i] == subseq[j]:\n j += 1\n i += 1\n\n return j == m", "def isSubsequence(x: str, y: str) -> bool:\n it = iter(y)\n return all(c in it for c in x)", "def is_subseq(subseq, superseq):\n start = 0\n try:\n for ee in subseq:\n start = superseq.index(ee, start) + 1\n except ValueError:\n return False\n return True", "def verify_subseq(seq, subseq):\n\n # https://stackoverflow.com/questions/24017363/how-to-test-if-one-string-is-a-subsequence-of-another\n\n it = iter(seq)\n return all(c in it for c in subseq)", "def is_sub(ind_a, ind_b):\n while ind_a < len(s) and ind_b < len(p):\n if ind_a in removed or s[ind_a] != p[ind_b]:\n ind_a += 1\n continue\n ind_a += 1\n ind_b += 1\n return ind_b == len(p)", "def is_subset(a, b):\n return any(map(lambda x: b[x:x + len(a)] == a, range(len(b) - len(a) + 1)))", "def is_subspan(a: Tuple[int, int], b: Tuple[int, int]) -> bool:\n if a[0] >= b[0] and a[1] <= b[1]:\n return True\n else:\n return False", "def assert_contains_sequence(\n sequence: Sequence[T], subsequence: Sequence[T]\n ) -> None:\n if len(subsequence) == 0:\n return # all sequences contain the empty subsequence\n\n # Iterate over all windows of len(subsequence). Stop if the\n # window matches.\n for i in range(len(sequence) - len(subsequence) + 1):\n candidate = sequence[i : i + len(subsequence)]\n assert len(candidate) == len(subsequence) # sanity check\n if candidate == subsequence:\n return # found it\n raise AssertionError(f\"{subsequence} not found in {sequence}\")", "def seq_in_link(link, sub_link):\r\n # first_link = []\r\n # while link != Link.empty:\r\n # first_link.append(link.first)\r\n # link = link.rest\r\n #\r\n # while sub_link != Link.empty:\r\n # if sub_link.first in first_link:\r\n # index = first_link.index(sub_link.first)\r\n # first_link = first_link[index:]\r\n # sub_link = sub_link.rest\r\n # else:\r\n # return False\r\n # return True\r\n# this method is too complicated!\r\n while link != Link.empty and sub_link != Link.empty:\r\n if sub_link.first == link.first:\r\n sub_link = sub_link.rest\r\n link = link.rest\r\n\r\n if sub_link == Link.empty:\r\n return True\r\n else:\r\n return False", "def contains_sequence(dna1, dna2):\n return dna2 in dna1", "def isSubsequenceDP(self, s: str, t: str) -> bool:\n if len(s) == 0:\n return True\n if len(t) == 0:\n return False\n\n n = len(s)\n m = len(t)\n\n dp = [[0 for i in range(m)] for _ in range(n)]\n dp[0][0] = 1 if s[0] == t[0] else 0\n\n for i in range(1, n):\n if s[i] == t[0]:\n dp[i][0] = 1\n else:\n dp[i][0] = dp[i-1][0]\n\n for i in range(1, m):\n dp[0][i] = 1 if t[i] == s[0] else dp[0][i-1]\n\n for i in range(1, n):\n for j in range(1, m):\n if s[i] == t[j]:\n dp[i][j] = max(dp[i-1][j-1] + 1, dp[i][j-1])\n else:\n dp[i][j] = dp[i][j-1]\n\n return dp[-1][-1] == len(s)", "def __sub__(self, other: Seq) -> int:\n return sum(i != j for i, j in zip_longest(self.sequence, other.sequence))", "def subsequence_indices(a, b):\n index_b_mem = 0\n indices_b = []\n for index_a, itemset_a in enumerate(a):\n for index_b in range(index_b_mem, len(b)):\n if index_b == len(b) - 1:\n # we mark as finished\n index_b_mem = len(b)\n\n itemset_b = b[index_b]\n\n if itemset_a.issubset(itemset_b):\n indices_b.append(index_b)\n index_b_mem = index_b + 1\n break\n\n if index_b_mem == len(b):\n return indices_b\n\n return indices_b", "def contains_sequence(dna1, dna2):\r\n if dna2 in dna1:\r\n return True\r\n else:\r\n return False", "def sub_list(small_list, big_list):\n p = -1\n for c in small_list:\n p = position(c, big_list, p+1)\n if p == -1:\n return False\n return True", "def contains_sequence(dna1, dna2):\n if dna1.find(dna2):\n return True", "def list_in_list(a,b):\n if any(a == b[offset:offset+len(a)] for offset in range(len(b)-len(a)+1)):\n return True\n else: \n a.reverse()\n if any(a == b[offset:offset+len(a)] for offset in range(len(b)-len(a)+1)):\n return True\n else: return False", "def match(a, b):\n\n if len(b) > len(a):\n return False\n\n for i, xa in enumerate(a):\n if i < len(b):\n xb = b[i]\n if xb and xb != xa:\n return False\n else:\n break\n\n return True", "def _range_contains(self, a, b):\n\t\treturn b[0] >= a[0] and b[-1] <= a[-1]", "def compare_eq_len(a, b):\n n = len(a)\n m = len(b)\n\n # slide b across a from left to right till from just overlapping till full overlap\n overlap = 0 # stores length of the overlap\n lconcat = \"\" # this stores the shortest common superstring\n for j in range(m):\n starta = 0\n enda = j+1\n startb = m - (j+1)\n endb = m\n if a[starta:enda] == b[startb:endb]:\n # if an overlap is found, check if it is larger than the previously detected one\n # print(\"overlap found\")\n if len(a[starta:enda]) > overlap: \n overlap = len(a[starta:enda]) \n lconcat = b + a[enda:] # this is the current shortest common superstring\n # print(starta, enda, startb, endb, a[starta:enda], b[startb:endb])\n\n # print(\"-\")\n\n # slide b across a so that b starts from one element past a after full overlap\n rconcat = \"\"\n for j in range(m-1):\n starta = j+1\n enda = m\n startb = 0\n endb = m - (j+1)\n if a[starta:enda] == b[startb:endb]:\n if len(a[starta:enda]) > overlap: # if there is a bigger overlap then save it \n # print(\"overlap found\")\n overlap = len(a[starta:enda]) \n rconcat = a + b[endb:]\n # print(starta, enda, startb, endb, a[starta:enda], b[startb:endb])\n\n # after checking for overlaps there may be 1 or no shortest common\n # superstrings stored in both lconcat and rconcat. Choose the shortest one if it exists\n # or the concatenation of a and b if there are no overlaps. We may have to make some\n # arbitrary choices here.\n\n if not lconcat and not rconcat: # both lconcat and rconcat are empty, no overlaps\n superstring = a + b # append b to a (could prepend here too, this is an arbitrary choice)\n elif lconcat and not rconcat: # lconcat contains overlap and rconcat is empty\n superstring = lconcat \n elif rconcat and not lconcat: # rconcat contains overlap and lconcat is empty\n superstring = rconcat\n elif rconcat and lconcat and (len(lconcat) <= len(rconcat)): # use lconcat if it is shorter or equal len to rconat\n superstring = lconcat\n elif rconcat and lconcat and (len(rconcat) < len(lconcat)): # use rconcat only if it is shorter than lconat\n superstring = rconcat\n return superstring", "def subseqs_ids(subsequences, sequence):\n return [1 if subsequence in sequence else 0 for subsequence in subsequences]", "def is_subsequence_2d(subseq, seq):\n n = seq.shape[0]\n m = subseq.shape[0]\n w = seq.shape[1]\n\n if seq.shape[1] != subseq.shape[1]:\n return False\n\n if m > n:\n return False\n\n i = 0 # index of seq\n j = 0 # index of subseq\n k = 0 # index of second dimension\n\n while i < n and j < m:\n is_row_valid = True\n for k in range(w):\n if seq[i, k] != subseq[j, k]:\n is_row_valid = False\n break\n if is_row_valid:\n j += 1\n i += 1\n\n return j == m", "def common_end(a, b):\n return a[0] == b[0] or a[-1] == b[-1]\n # return [0] == [0] or [-1] == [-1]", "def is_subset(self, other):", "def overlap(a, b, min_length=3):\n start = 0 # start all the way at the left\n\n while True:\n start = a.find(b[:min_length], start) # look for b's suffix i a\n if start == -1: # no more occurrences to right\n return 0\n # found occurrence; check for full suffix/prefix match\n if b.startswith(a[start:]):\n return len(a) - start\n start += 1 # move just paat previous match", "def is_cyclic_permutation(A, B):\n # Check if same length\n if len(A) != len(B):\n return False\n # Check that contain the same elements\n if set(A) == set(B):\n longlist = A + A\n if contains_sublist(longlist, B):\n return True\n else:\n return False\n else:\n return False", "def isSubString(string1, string2, minMatchLength = 0):\n return (True)", "def find_overlapping(seq, subseq):\n \n pos, count = 0, 0\n while True:\n pos = seq.find(subseq, pos)\n if pos < 0:\n break\n pos += 1 \n count += 1\n return count", "def isFullyContained(self,b):\n if b.chr != self.chr: return False\n if(b.start>=self.start and b.end<=self.end):return True\n else:\n return False", "def _chain_equal(a,b):\n for a_part, b_part in zip(a.parts, b.parts):\n for a_seg, b_seg in zip(a_part, b_part):\n if not np.array_equal(a_seg, b_seg):\n return False\n return True" ]
[ "0.7517667", "0.75154275", "0.7415611", "0.7130467", "0.70990896", "0.7001119", "0.6875942", "0.6578957", "0.6524083", "0.65069354", "0.6502676", "0.64893323", "0.6413586", "0.64094514", "0.6328478", "0.6269901", "0.6263851", "0.6216728", "0.6101734", "0.60896814", "0.6042722", "0.60103875", "0.60099167", "0.59866256", "0.5922796", "0.59042436", "0.58973724", "0.588557", "0.58330894", "0.5824013" ]
0.8708662
0
Return itemset indices of b that itemset of a are included in
def subsequence_indices(a, b): index_b_mem = 0 indices_b = [] for index_a, itemset_a in enumerate(a): for index_b in range(index_b_mem, len(b)): if index_b == len(b) - 1: # we mark as finished index_b_mem = len(b) itemset_b = b[index_b] if itemset_a.issubset(itemset_b): indices_b.append(index_b) index_b_mem = index_b + 1 break if index_b_mem == len(b): return indices_b return indices_b
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def memberIDs(a, b):\n bind = {}\n for i, elt in enumerate(b):\n if elt not in bind:\n bind[elt] = i\n return [bind.get(itm, None) for itm in a] # None can be replaced by any other \"not in b\" value", "def intersect(self, other):\n result = IntSet()\n map(result.insert, [e for e in self.vals if e in other.vals])\n return result", "def intersect(iterable, other):\n return 0", "def intersect(a,b):\n\treturn list(set(a) & set(b))", "def intersect(a, b):\r\n return list(set(a) & set(b))", "def intersection(a, b):\n return list(set(a) & set(b))", "def intersect(a, b):\n return(list(set(a) & set(b)))", "def intersect(a, b):\n return list(set(a) & set(b))", "def find_a_in_b(a, b, a_fields=None, b_fields=None):\r\n def _view_(a):\r\n \"\"\"from the same name in arraytools\"\"\"\r\n return a.view((a.dtype[0], len(a.dtype.names)))\r\n #\r\n small, big = [a, b]\r\n if a.size > b.size:\r\n small, big = [b, a]\r\n if a_fields is not None:\r\n small = small[a_fields]\r\n small = _view_(small)\r\n if b_fields is not None:\r\n big = big[b_fields]\r\n big = _view_(big)\r\n if a.ndim >= 1: # last slice, if [:2] instead, it returns both indices\r\n indices = np.where((big == small[:, None]).all(-1))[1]\r\n return indices", "def get_idx_set(i, sets):\n idxs = []\n for j, set_j in enumerate(sets):\n if i in set_j: idxs.append(j)\n return idxs", "def intersection(A,B):\n set_A = A\n set_B = B\n sorted_intersection = []\n for elements in set_A:\n if elements in set_B:\n sorted_intersection.append(elements)\n return sorted_intersection", "def set_intersection(set_a, set_b):\n \n intersection = set_b & set_a\n \n return intersection", "def unIfInt(a, b):\n if len(intersect(a, b)) != 0:\n return (list(set(a).union(b)))", "def listops_intersect(list_a,list_b):\r\n\r\n retlist = []\r\n for item in list_a:\r\n if item in list_b:\r\n retlist.append(item)\r\n\r\n # ensure that a duplicated item in list_a is only listed once\r\n return listops_uniq(retlist)", "def intersection(self, other):\n intersection_set = Set()\n\n for bucket in self.buckets:\n for element in bucket.iterate():\n if other.contains(element):\n intersection_set.add(element)\n return intersection_set", "def get_jurisdiction_common_members(a: List[int], b: List[int]) -> Set[int]:\n a_set = set(a)\n b_set = set(b)\n\n if a_set & b_set:\n return a_set & b_set\n else:\n return set()", "def intersect(self, other):\n # Initialize a new intSet \n commonValueSet = intSet()\n # Go through the values in this set\n for val in self.vals:\n # Check if each value is a member of the other set \n if other.member(val):\n commonValueSet.insert(val)\n return commonValueSet", "def match_arrays(a, b):\n order = np.argsort(a)\n \n sorted_a = a[order]\n \n idx_sorted_a = np.searchsorted(sorted_a, b)\n \n mask = idx_sorted_a < sorted_a.shape[0]\n mask[mask] = sorted_a[idx_sorted_a[mask]] == b[mask]\n \n idx_sorted_a = idx_sorted_a[mask]\n idx_b = np.where(mask)[0]\n \n return order[idx_sorted_a], idx_b", "def where_in(a, b):\n return torch.nonzero((a[..., None] == b).any(-1)).squeeze()", "def __intersect(self, a, b):\n a = [elem.lower() for elem in a]\n b = [elem.lower() for elem in b]\n return list(set(a) & set(b))", "def intersect(self, other_list):\n assert type(other_list) == type(self)\n \n# if len(self.vals) >= len(other_list.vals):\n# big = self.vals\n# small = other_list.vals\n# else:\n# small = self.vals\n# big = other_list.vals\n# \n# common_list = intSet()\n# for e in big:\n# if e in small:\n# common_list.insert(e)\n# return common_list\n\n common_list = intSet() \n for e in self.vals:\n if other_list.member(e): #if the current e is a member of other_list\n common_list.insert(e)\n return common_list", "def __intersect(a, b):\n a = [elem.lower() for elem in a]\n b = [elem.lower() for elem in b]\n return list(set(a) & set(b))", "def connections(self, B):\n if not isinstance(B, Tensor):\n raise TypeError(f'{B} is not of {self.__class__}')\n return set(self.indexes).intersection(B.indexes)", "def venn(a,b):\n a = set(a)\n b = set(b)\n return map(list, (a.difference(b), a.intersection(b), b.difference(a)))", "def ismember(a, b):\n bind = {}\n for i, elt in enumerate(b):\n if elt not in bind:\n bind[elt] = True\n return np.array([bind.get(itm, False) for itm in a]) # None can be replaced by any other \"not in b\" value", "def intersection(set_1, set_2):\n intersection_list = []\n\n for number in set_1:\n if number in set_2:\n intersection_list.append(number)\n \n print(\"Intersection:\", intersection_list)\n return set_1, set_2", "def intersection(self, *other):\n new_ordered_set = OrderedSet()\n\n for element in self:\n for obj in other:\n if element not in obj:\n break\n else:\n new_ordered_set.add(element)\n\n return new_ordered_set", "def set_difference(lst1, lst2):\n elements = []\n indicies = []\n for indx, item in enumerate(lst1):\n if item not in lst2:\n elements.append(item)\n indicies.append(indx)\n return elements, indicies", "def find_intersection(nums1, nums2)->list:\n set1 = set(nums1)\n set2 = set(nums2)\n intersection = set()\n\n for i in set1:\n if i in set2: # The item is in both lists\n intersection.add(i)\n return list(intersection)", "def intersection(*seqs):\n return (item for item in seqs[0]\n if all(item in seq for seq in seqs[1:]))" ]
[ "0.6480096", "0.62859654", "0.6284794", "0.62498695", "0.62055707", "0.6100419", "0.60925007", "0.60800415", "0.6049581", "0.60341656", "0.60310316", "0.5903813", "0.59021163", "0.58024406", "0.5800228", "0.5716897", "0.567191", "0.5652813", "0.56439", "0.56128395", "0.5605463", "0.5584962", "0.5580288", "0.5569585", "0.552343", "0.55120885", "0.54953265", "0.5482153", "0.547894", "0.54716784" ]
0.74765635
0
Replaces all item in data by its encoding
def encode_data(data, item_to_encoding): for line in data: for i, itemset in enumerate(line[1:]): encoded_itemset = set() for item in itemset: encoded_itemset.add(item_to_encoding[item]) line[i + 1] = encoded_itemset return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def force_utf8(data):\n if isinstance(data, unicode):\n return data.encode(\"utf-8\")\n elif isinstance(data, list):\n return [force_utf8(i) for i in data]\n elif isinstance(data, dict):\n return {force_utf8(i): force_utf8(data[i]) for i in data}\n return data", "def SetDataEncoding(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def recodeToUtf8(data):\n try:\n data = data.decode('utf8').encode('utf8')\n return data\n except UnicodeDecodeError:\n encoding = chardet.detect(data)['encoding']\n logging.log(5, 'encoding should be %s' % encoding)\n if encoding == None:\n encoding = 'latin1'\n try:\n data = data.decode(encoding).encode('utf8')\n except UnicodeDecodeError:\n logging.warn('Error when decoding as %s' % encoding)\n data = data\n except LookupError:\n logging.warn('Unknown encoding when decoding as %s' % encoding)\n data = data\n\n return data\n\n return", "def encode_data(self, data):\n if self.unit == \"char\":\n data = self.char_encoding(data)\n elif self.unit == \"char-ngram\":\n data = self.ngram_encoding(data)\n elif self.unit == \"morpheme\" or self.unit == \"oracle\":\n data = self.morpheme_encoding(data)\n else:\n data = self.data_to_word_ids(data, False)\n return data", "def decode(self, encoded):", "def clean(row):\r\n for v in row:\r\n \tv = v.replace(\"\\xef\\xbb\\xbf\",\"\")\r\n return row", "def encode(self, decoded):", "def _decode_data(self, data):\r\n return data.decode('ISO-8859-1')", "def _force_unicode(data):\n try:\n data = unicode(data, \"utf-8\")\n except UnicodeDecodeError:\n data = unicode(data, \"latin1\")\n return data", "def _utf8_encode(self, d):\n \n\n #***Edit by H. Loho: Got rid of .lower(), because that messes up the Lucene modifiers OR, AND ********************\n\n\n\n for k, v in d.items():\n if isinstance(v, str):\n d[k] = v.encode('utf8')\n if isinstance(v, list):\n for index,item in enumerate(v):\n item = item.encode('utf8')\n v[index] = item\n if isinstance(v, dict):\n d[k] = self._utf8_encode(v)\n \n return d", "def decode(data): #@NoSelf", "def _encode(self, dataset):\n if self._look_up is None: # if we are encoding training set\n self._look_up = dict() # initialize look-up table as empty\n for col in dataset:\n if not is_numeric_dtype(dataset[col]): # for each column that is not numeric\n for val, label in enumerate(dataset[col].unique()): # attach a encode value for each of its label\n self._look_up[label] = val # add that value to the lookup table\n # Problem: Try other method of pandas for this task\n\n dataset.replace(self._look_up, inplace=True)", "def encode(self, text):", "def fill_charset(self, data):\n self.charset = get_optional_value(data, self.CHARSET, \"utf8mb4\")\n self.charset = self.charset or \"utf8mb4\"", "def _format_data(self, data, charset):\n\n return self._encode_data(data) if data else u''", "def SetDataEncoding(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def set_data_encoding(self, encoding):\n self._data_encoding = encoding", "def SetDataEncoding(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def decode(self, data):\n encoding = getattr(self, 'encoding', 'ascii')\n return data.decode(encoding, 'ignore')", "def __correct_encoding(self, encode, filename):\n if encode == 'None' or encode == self.__tencoding:\n return\n buffname = '~old' + filename\n self.__os.rename(filename, buffname)\n with open(buffname, 'r', encoding=self.__tencoding) as fr:\n with open(filename, 'w', encoding=self.__tencoding) as fw:\n for line in fr:\n fw.write(line[:-1] + '\\r\\n')\n self.__os.remove(buffname)", "def rebase_add_encoding_prefix():\n\tfilenames, clippings = load_clippings(inFolder)\n\tfor file, clip in zip(filenames, clippings):\n\t\timg = clip[\"imgEncoding\"]\n\t\tstr = \"data:image/png;base64,\"\n\t\tif str not in img:\n\t\t\tclip[\"imgEncoding\"] = str + img\n\t\t\twith open(file, \"w\") as outfile:\n\t\t\t\tjson.dump(clip, outfile)", "def enc(self, data):\n return data", "def encode(self):\n text_lines = [line.text for line in self._document.lines]\n encodings = self._bc.encode(text_lines)\n for (line, encoding) in zip(self._document.lines, encodings):\n line.encoding = encoding\n return self._document", "def _encode(self, data):\n raise NotImplementedError(\"_encode needs to be implemented in {} subclass\".format(type(self).__name__))", "def encode(self, strs):", "def encode(self, strs):", "def recode_for_write(sec):\n sec2 = []\n for i in sec:\n sec2.append(i.decode('utf-8').encode('cp1251'))\n# sec2.append(i.encode('cp1251'))\n return sec2", "def transform(self, data):", "def _translate_string(self, data):\n data = data.encode('iso-8859-1', errors='replace')\n\n for index, char in enumerate(data):\n yield self._meta.characters - 1 - self._ct[char]", "def addInfo(self, **data):\n for key, value in viewitems(data):\n # assumption: value is not iterable (list, dict, tuple, ...)\n # using unicode sandwich pattern\n key = decodeBytesToUnicode(key, \"ignore\")\n value = decodeBytesToUnicode(value, \"ignore\")\n self.data[key] = value\n return" ]
[ "0.66368276", "0.6455011", "0.6275243", "0.6227313", "0.621462", "0.61941475", "0.61907387", "0.61102164", "0.6099654", "0.6066702", "0.60402256", "0.60342956", "0.5956745", "0.59050936", "0.5870682", "0.58624417", "0.5813089", "0.57530445", "0.5732765", "0.57288027", "0.571206", "0.56987566", "0.5680945", "0.5680105", "0.5674229", "0.5674229", "0.5663167", "0.5662114", "0.5553637", "0.5539558" ]
0.70406705
0
Tries to replace itself with the settings of a given model. Returns true if it succeeds or false if the current model can not represent the same set of values.
def exchange_model(self, model): if self.can_be_exchanged(model): self._browser = model._browser return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def models_are_equivalent(model_a: TopLevelOscalModel, model_b: TopLevelOscalModel) -> bool:\n # this will change the second model as a side-effect\n model_b.metadata.last_modified = model_a.metadata.last_modified\n return model_a == model_b", "def _compare_settings(self, settings, original_fields):\r\n original_keys = original_fields.keys()\r\n if 'children' in original_keys:\r\n original_keys.remove('children')\r\n if len(settings) != len(original_keys):\r\n return True\r\n else:\r\n new_keys = settings.keys()\r\n for key in original_keys:\r\n if key not in new_keys or original_fields[key] != settings[key]:\r\n return True", "def replace(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"replace\")", "def _compare_settings(self, settings, original_fields):\n original_keys = list(original_fields.keys())\n if 'children' in original_keys:\n original_keys.remove('children')\n if len(settings) != len(original_keys):\n return True\n else:\n new_keys = list(settings.keys())\n for key in original_keys:\n if key not in new_keys or original_fields[key] != settings[key]:\n return True", "def __ne__(self, other):\n if not isinstance(other, SeedSettings):\n return True\n\n return self.to_dict() != other.to_dict()", "def _save_experiment_to_db_if_possible(\n self, experiment: Experiment, suppress_all_errors: bool = False\n ) -> bool:\n if self.db_settings_set:\n save_experiment(experiment=experiment, db_settings=self.db_settings)\n return True\n return False", "def validate(self):\n for field in self.fields:\n if field.validate():\n self.model.set(field.name, field.model_value)\n else:\n self.errors.append(field.error())\n return len(self.errors) == 0", "def checkModel(self, model):\n # TODO", "def __eq__(self, other):\n if not isinstance(other, Setting):\n return False\n\n return self.__dict__ == other.__dict__", "def models_compatible(model_a: ModuleModel, model_b: ModuleModel) -> bool:\n if model_a == model_b:\n return True\n return model_b.value in _load_v2_module_def(model_a)['compatibleWith']", "def test_model_updating_works_properly(self):\r\n tm = TestModel.objects.create(count=8, text='123456789')\r\n\r\n tm.count = 100\r\n tm.a_bool = True\r\n tm.save()\r\n\r\n tm2 = TestModel.objects(id=tm.pk).first()\r\n self.assertEquals(tm.count, tm2.count)\r\n self.assertEquals(tm.a_bool, tm2.a_bool)", "def apply_settings(self):\n return True", "def __eq__(self, other: 'UserSettings') -> bool:\n if not isinstance(other, self.__class__):\n return False\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, PopSettings):\n return False\n\n return self.__dict__ == other.__dict__", "def _check_by_changing():\n current_settings = read_from_archive(\n archive_path, TRAINING_SETTINGS_FILENAME\n )\n\n is_changed = False\n\n for key, obj in current_settings.items():\n if key == \"mark_up_source\":\n if obj != training_settings[key]:\n is_changed = True\n break\n elif key == \"bug_resolution\":\n current_metrics = {resolution[\"value\"] for resolution in obj}\n new_metrics = {\n resolution[\"value\"]\n for resolution in training_settings[\"bug_resolution\"]\n }\n if current_metrics.difference(new_metrics):\n is_changed = True\n break\n else:\n old_areas_of_testing = {\n entity[\"area_of_testing\"]: entity[\"entities\"]\n for entity in obj\n }\n new_areas_of_testing = {\n entity[\"area_of_testing\"]: entity[\"entities\"]\n for entity in training_settings[key]\n }\n for iteration, key_ in enumerate(old_areas_of_testing, 1):\n if key_ not in new_areas_of_testing or set(\n old_areas_of_testing[key_]\n ).difference(set(new_areas_of_testing[key_])):\n is_changed = True\n break\n\n if is_changed:\n delete_training_data(archive_path)", "def set(self, value):\n if value == self.value:\n return False\n self.value = value\n return True", "async def _update_values(self, model: Model):\n\n raise NotImplementedError", "def __eq__(self, other):\n if not isinstance(other, SeedSettings):\n return False\n\n return self.to_dict() == other.to_dict()", "def adjust_model_for_gpus(self) -> None:\n if self._model is None:\n raise ValueError(\"Model must be created before it can be adjusted.\")\n\n # Adjusting twice causes an error.\n if self.is_model_adjusted:\n logging.debug(\"model_and_info.is_model_adjusted is already True\")\n\n if self._optimizer:\n raise ValueError(\"Create an optimizer only after creating and adjusting the model.\")\n\n self._model = ModelAndInfo._adjust_for_gpus(model=self._model,\n config=self.config,\n model_execution_mode=self.model_execution_mode)\n\n self.is_model_adjusted = True\n logging.debug(\"model_and_info.is_model_adjusted set to True\")", "def _set_silent(self, model_instance, value):\n setattr(model_instance, self._cached_name, value)", "def _updateModel(self):\n # for each design variable in the dictionary:\n # loop through rows and cols setting design paramter values\n for dvName in self.DVs:\n dv = self.DVs[dvName]\n espParamIdx = dv.csmDesPmtr.pmtrIndex\n for localIdx in range(dv.nVal):\n rowIdx = localIdx // len(dv.cols)\n colIdx = localIdx % len(dv.cols)\n espRowIdx = dv.rows[rowIdx]\n espColIdx = dv.cols[colIdx]\n self.espModel.SetValuD(espParamIdx, irow=espRowIdx, icol=espColIdx, value=dv.value[localIdx])\n\n # finally, rebuild\n outtuple = self.espModel.Build(0, 0)\n # check that the number of branches built successfully matches the number when the model was first built on __init__\n # otherwise, there was an EGADS/CSM build failure at this design point\n if outtuple[0] != self.num_branches_baseline:\n return False\n else:\n # built correctly\n return True", "def is_model(model: Model) -> bool:\n for key in model:\n if not is_variable(key):\n return False\n return True", "def __eq__(self, other):\n if not isinstance(other, AccessPolicyLoggingSettingModel):\n return False\n\n return self.__dict__ == other.__dict__", "def __ne__(self, other: 'UserSettings') -> bool:\n return not self == other", "def set_default_model(self, model_id):\n try:\n self.default_model = self.model_dict[model_id]\n except KeyError:\n return False\n return False", "def __ne__(self, other):\n if not isinstance(other, ChartSettings):\n return True\n\n return self.to_dict() != other.to_dict()", "def test_set_value(self):\n setting_model = Setting(python_type='int',\n dry_value='42',\n default_value='9001')\n\n setting_model.value = 3000\n self.assertEqual(setting_model.value, 3000)\n self.assertEqual(setting_model.dry_value, '3000')\n setting_model.value = None\n self.assertEqual(setting_model.value, 9001)\n self.assertEqual(setting_model.dry_value, None)", "def solve_brute_force_save_intermediate(self, maximum_solutions=100000):\n if self.solved:\n return False\n\n if not hasattr(self, 'number_of_solutions'):\n self.number_of_solutions = self._get_number_of_solutions()\n if self.number_of_solutions > maximum_solutions:\n return False\n\n if not hasattr(self, 'all_solutions'):\n self.all_solutions = self._get_all_solutions()\n\n new_solution = None\n # only keep solutions that fit with the current solution\n self.all_solutions = [\n solution for solution in self.all_solutions if self._check_solution(solution)]\n # combine solutions\n for solution in self.all_solutions:\n if new_solution is None:\n new_solution = solution\n continue\n new_solution = self._get_matching_solution(\n new_solution, solution)\n\n if self.values == new_solution:\n return False\n if new_solution is not None:\n self.values = new_solution\n\n return True", "def edit_settings(self):\n while True:\n os.system('cls' if os.name == 'nt' else 'clear')\n valid_numbers, number_setting_corr = self.print_settings()\n print('Which setting you want to change? Enter \"number, new value\" to modify, or \"done\" to exit.')\n print('Observe the possible values for each setting! They are case sensitive. '\n 'Inputting wrong values might break the program. \\n')\n choice = input('Input:')\n if choice == 'done':\n break\n if ',' not in choice:\n print('Invalid input. Place the number, followed by a comma, followed by its value. Eg: 1,TRUE')\n continue\n if len(choice.split(',')) != 2:\n print('Invalid input, must have only one comma')\n continue\n\n var, val = choice.split(',')\n if var not in valid_numbers:\n print('Invalid number.')\n continue\n real_var = number_setting_corr[var] # Changes from a number to the actual parameter\n if val.lower() == 'true':\n setattr(self, real_var, True)\n continue\n elif val.lower() == 'false':\n setattr(self, real_var, False)\n continue\n else:\n setattr(self, real_var, val)\n\n # todo: check for all possible values to avoid inputting wrong settings and messing everything up.\n # if val not in valid_options_nl_sorting:\n # print('Invalid nonlinear sorting option. Case sensitive! Be very precise.')\n # continue\n # if val not in valid_options_lin_sorting:\n # print('Invalid linear sorting option. Case sensitive! Be very precise.')\n # continue\n # if val not in models:\n # print('Invalid nonlinear fitting model. Case sensitive! Be very precise.')\n # continue\n\n print('===Final settings===')\n _, _ = self.print_settings()\n self.save_settings()\n return", "def is_model(model: Model) -> bool:\r\n for key in model:\r\n if not (is_variable(key) and type(model[key]) is bool):\r\n return False\r\n return True" ]
[ "0.54065675", "0.52343124", "0.52181566", "0.5195714", "0.5108508", "0.51036304", "0.5078762", "0.50757647", "0.5062585", "0.50535893", "0.50410616", "0.50374234", "0.50266767", "0.5018507", "0.5015829", "0.4978108", "0.49635047", "0.4953279", "0.4943368", "0.49315923", "0.491482", "0.49017337", "0.48963076", "0.48714983", "0.4870359", "0.48561242", "0.4855816", "0.48460272", "0.4831458", "0.4804671" ]
0.6060511
0
Ensures that there are exactly 'request_lists' number of content lists. Returns whether a change was needed or not.
def _fit_content_lists(self, requested_lists): raise requested_lists > 0 or AssertionError if requested_lists != self._num_contents: while requested_lists < self._num_contents: self._pop_content_list() while requested_lists > self._num_contents: self._push_content_list()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def can_request_more():\n # Note: Files are restored in pairs (so we multiply by 2)\n active_requests = jobtracker.query(\"SELECT IFNULL(SUM(numrequested), 0) \" \\\n \"FROM requests \" \\\n \"WHERE status='waiting'\", fetchone=True)\n to_download = jobtracker.query(\"SELECT * FROM files \" \\\n \"WHERE status NOT IN ('downloaded', \" \\\n \"'added', \" \\\n \"'deleted', \" \\\n \"'terminal_failure')\")\n if active_requests == None:\n\tactive_requests = 0\n num_to_restore = active_requests\n num_to_download = len(to_download)\n used = get_space_used()\n reserved = get_space_committed()\n\n can_request = ((num_to_restore+num_to_download) < config.download.numrestored) and \\\n (used+reserved < config.download.space_to_use)\n return can_request", "def _check_size_of_lists(sequence_header, secstr_header):\n if len(sequence_header) != len(sequence):\n sys.exit(\"The size of the sequence list and sequence header doesn't match\")\n else:\n return True", "def __size_restriction_correct_list_list(self):\n\n strTestName = 'List size higher than the size of other list (correct)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('lRefParameter1', 'Ref. parameter')\n RxCSObject.paramType('lRefParameter1', list)\n\n # Now, let us define a list\n RxCSObject.paramAddMan('parameter1', 'List parameter')\n RxCSObject.paramType('parameter1', list)\n RxCSObject.paramSizEq('parameter1', 'lRefParameter1', mul=0.5)\n\n RxCSObject.lRefParameter1 = [21, 22, 23, 24, 25, 26]\n RxCSObject.parameter1 = [11, 12, 13]\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def is_list_request(message):\n list_found = re.search(\"list\", message)\n\n return list_found is not None", "def test_update_checklists_index_out_of_range(self):\r\n update_url = self.get_url(100)\r\n\r\n response = self.client.post(update_url)\r\n self.assertContains(response, 'Could not save checklist', status_code=400)", "def __size_restriction_incorrect_list_list(self):\n\n strTestName = 'List size higher or equal to the size of other list (incorrect)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('lRefParameter1', 'Ref. parameter')\n RxCSObject.paramType('lRefParameter1', list)\n\n # Now, let us define a list\n RxCSObject.paramAddMan('parameter1', 'List 1D parameter')\n RxCSObject.paramType('parameter1', list)\n RxCSObject.paramSizHE('parameter1', 'lRefParameter1', mul=0.5)\n\n RxCSObject.lRefParameter1 = [21, 22, 23, 24, 25, 26]\n RxCSObject.parameter1 = [11, 12]\n\n self.__parametersCheck_error(RxCSObject, SizeError, strTestName)", "def test_requests_num(self):\n\n requests_num = len(self.response.context['requests'])\n self.assertLessEqual(requests_num, 10)", "def test_update_checklists_no_index(self):\r\n returned_checklists = json.loads(self.client.get(self.checklists_url).content)\r\n # Verify that persisted checklists do not have expanded action URLs.\r\n # compare_checklists will verify that returned_checklists DO have expanded action URLs.\r\n pers = self.get_persisted_checklists()\r\n self.assertEqual('CourseOutline', get_first_item(pers[1]).get('action_url'))\r\n for pay, resp in zip(pers, returned_checklists):\r\n self.compare_checklists(pay, resp)", "def compare_checklists(self, persisted, request):\r\n self.assertEqual(persisted['short_description'], request['short_description'])\r\n expanded_checklist = expand_checklist_action_url(self.course, persisted)\r\n for pers, req in zip(expanded_checklist['items'], request['items']):\r\n self.assertEqual(pers['short_description'], req['short_description'])\r\n self.assertEqual(pers['long_description'], req['long_description'])\r\n self.assertEqual(pers['is_checked'], req['is_checked'])\r\n self.assertEqual(pers['action_url'], req['action_url'])\r\n self.assertEqual(pers['action_text'], req['action_text'])\r\n self.assertEqual(pers['action_external'], req['action_external'])", "def list_support_required(self):\n\t\treturn self.typemanager.has_lists", "def test_update_checklists_index_ignored_on_get(self):\r\n update_url = self.get_url(1)\r\n\r\n returned_checklists = json.loads(self.client.get(update_url).content)\r\n for pay, resp in zip(self.get_persisted_checklists(), returned_checklists):\r\n self.compare_checklists(pay, resp)", "def __size_restriction_correct_list_number(self):\n\n strTestName = 'List size higher than a number (correct)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'List parameter')\n RxCSObject.paramType('parameter1', list)\n RxCSObject.paramSizH('parameter1', 3)\n\n RxCSObject.parameter1 = [1, 2, 3, 4, 5, 6]\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def __size_restriction_incorrect_list_parameter(self):\n\n strTestName = 'List size equal to a parameter (incorrect)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('iRefParameter1', 'Ref. parameter')\n RxCSObject.paramType('iRefParameter1', int)\n\n # Now, let us define a list\n RxCSObject.paramAddMan('parameter1', 'List parameter')\n RxCSObject.paramType('parameter1', list)\n RxCSObject.paramSizEq('parameter1', 'iRefParameter1')\n\n RxCSObject.iRefParameter1 = 14\n RxCSObject.parameter1 = [11, 12, 13]\n\n self.__parametersCheck_error(RxCSObject, SizeError, strTestName)", "def test_api_list_view(dummy_request, add_models):\n from learning_journal.views.default import api_list_view\n assert len(api_list_view(dummy_request)) == len(ENTRIES)", "def test_update_checklists_post_no_index(self):\r\n response = self.client.post(self.checklists_url)\r\n self.assertContains(response, 'Could not save checklist', status_code=400)", "def current_requests(self):\n return len(self._current_requests)", "def valid_multiple_in_request(self):\n return self._repeatable[0] is True", "def __size_restriction_incorrect_list_number(self):\n\n strTestName = 'List size lower or equal to a number (incorrect)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'List parameter')\n RxCSObject.paramType('parameter1', list)\n RxCSObject.paramSizLE('parameter1', 3)\n\n RxCSObject.parameter1 = [1, 2, 3, 4, 5, 6]\n\n self.__parametersCheck_error(RxCSObject, SizeError, strTestName)", "def can_be_modified(self):\n return self.state in {RequestState.pending, RequestState.accepted}", "def __size_restriction_correct_list_parameter(self):\n\n strTestName = 'List size equal to a parameter (correct)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('iRefParameter1', 'Ref. parameter')\n RxCSObject.paramType('iRefParameter1', int)\n\n # Now, let us define a tuple\n RxCSObject.paramAddMan('parameter1', 'List parameter')\n RxCSObject.paramType('parameter1', list)\n RxCSObject.paramSizEq('parameter1', 'iRefParameter1')\n\n RxCSObject.iRefParameter1 = 3\n RxCSObject.parameter1 = [11, 12, 13]\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def applications_can_be_modified(self):\n return self.status in [\n ApplicationBatchStatus.DRAFT,\n ApplicationBatchStatus.RETURNED,\n ]", "def check_for_requests(self):\n while True:\n doc = self.cc.requests_coll.find_one_and_delete(\n {'receiver': 'validator'}, sort=[('_id', pymongo.ASCENDING)]\n )\n if doc is None:\n break\n\n if doc['action'] == 'validate_upload':\n print(\"fulfil request: set valid: {} for upload_id {}\".format(doc['valid'], doc['upload_id']))\n self.validate_upload(ObjectId(doc['upload_id']), doc['valid'])", "def test_get_checklists(self):\r\n response = self.client.get(self.checklists_url)\r\n self.assertContains(response, \"Getting Started With Studio\")\r\n # Verify expansion of action URL happened.\r\n self.assertContains(response, 'course_team/slashes:mitX+333+Checklists_Course')\r\n # Verify persisted checklist does NOT have expanded URL.\r\n checklist_0 = self.get_persisted_checklists()[0]\r\n self.assertEqual('ManageUsers', get_action_url(checklist_0, 0))\r\n payload = response.content\r\n\r\n # Now delete the checklists from the course and verify they get repopulated (for courses\r\n # created before checklists were introduced).\r\n self.course.checklists = None\r\n # Save the changed `checklists` to the underlying KeyValueStore before updating the modulestore\r\n self.course.save()\r\n modulestore = get_modulestore(self.course.location)\r\n modulestore.update_item(self.course, self.user.id)\r\n self.assertEqual(self.get_persisted_checklists(), None)\r\n response = self.client.get(self.checklists_url)\r\n self.assertEqual(payload, response.content)", "def test_add_course_multiple_lists_success(self):\n id = self.course_1.pk\n url = reverse('xds_api:add_course_to_lists', args=(id,))\n _, token = AuthToken.objects.create(self.user_2)\n data = {\n \"lists\": [self.list_3.pk]\n }\n response = \\\n self.client.post(url,\n data,\n HTTP_AUTHORIZATION='Token {}'.format(token))\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(self.list_3.experiences.all()), 1)", "def _check_entity_lists_nonempty(self) -> None:\n\n for entity_list in self.all_entity_lists:\n if len(entity_list) < 1:\n raise ValueError(f\"{entity_list.name} is empty. Nothing to launch.\")", "def check_cont(cont, reqs):\n for req_k, req_v in reqs.items():\n if req_k == 'files':\n for fr in req_v:\n fr_temp = fr.copy() # so subsequent calls don't have their minimum missing\n min_count = fr_temp.pop('minimum')\n count = 0\n for f in cont.get('files', []):\n is_satisfied, _ = check_cont(f, fr_temp)\n if 'deleted' in f or not is_satisfied:\n # Didn't find a match, on to the next one\n continue\n\n count += 1\n if count >= min_count:\n break\n\n if count < min_count:\n return False, f'Failed to find {min_count} file(s) with requirement {fr_temp} ({count} found)'\n\n else:\n is_satisfied, error = check_req(cont, req_k, req_v)\n if not is_satisfied:\n return is_satisfied, error\n return True, None", "def meetsreqs(request, requirements):\n for requirement in requirements:\n if requirement(request) == False:\n return False\n return True", "def check(self):\n self.__check_request_limit()", "def _checkSize(X1,X2):\n \n if len(X1) != len(X2):\n raise ValueError, 'Lists are differnt lengths'", "def fits(self, current_count, current_size, max_size, new_span):\n return thrift.LIST_HEADER_SIZE + current_size + len(new_span) <= max_size" ]
[ "0.6278202", "0.5973832", "0.59087783", "0.5907891", "0.5855553", "0.57828623", "0.57475185", "0.5690994", "0.5683942", "0.56583077", "0.56555396", "0.5647205", "0.5612722", "0.5565011", "0.55416954", "0.5533485", "0.55164355", "0.5491288", "0.54516184", "0.5421067", "0.5408369", "0.53952914", "0.53867686", "0.535914", "0.53459936", "0.533338", "0.5319653", "0.5312542", "0.52951366", "0.52877223" ]
0.6881133
0
After a series of push/pop/fit operations, this makes sure that we only have as many content lists referenced as necessary.
def _finalize_content_lists_change(self): while self._num_contents < len(self._contents): _, slot = self._contents.pop() self.disconnect_disconnectable(slot) raise self._num_contents == len(self._contents) or AssertionError
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _fit_content_lists(self, requested_lists):\n raise requested_lists > 0 or AssertionError\n if requested_lists != self._num_contents:\n while requested_lists < self._num_contents:\n self._pop_content_list()\n\n while requested_lists > self._num_contents:\n self._push_content_list()", "def content_lists(self):\n return NotImplementedError", "def __init__(self):\n self.contents = deque()", "def _remove_old_items(self):\n if self.size_limit is not None:\n while len(self) > self.size_limit:\n self.popitem(last=False)", "def reset(self):\n\t\tself.memory = deque(maxlen=1000) # Make a fast list push-pop\n\t\tself.loss = 0", "def __init__(self, contents=()):\n self._data = [self._Item(k, v) for k,v in contents] # empty by default\n if len(self._data) > 1:\n self._heapify()", "def __cullArchive(self):\n if len(self.genomes) <= self.max_size:\n return\n\n n_delete = len(self.genomes) - self.max_size\n indices = sorted([(lf, i) for i,lf in enumerate(self.local_fitnesses)])\n to_delete = set( i for _, i in indices[:n_delete] )\n self.genomes = [g for i,g in enumerate(self.genomes) if i not in to_delete]\n self.fitnesses = [f for i,f in enumerate(self.fitnesses) if i not in to_delete]\n self.features = [f for i,f in enumerate(self.features) if i not in to_delete]\n self.local_fitnesses = [f for i,f in enumerate(self.local_fitnesses) if i not in to_delete]\n\n assert len(self.genomes) <= self.max_size\n assert len(self.genomes) == len(self.fitnesses)\n assert len(self.genomes) == len(self.features)\n assert len(self.genomes) == len(self.local_fitnesses)", "def grow(self):\n \n self.body.append(self.body[-1])", "def grow(self):\n old = self.data # keep track of existing list\n self.capacity = self.capacity*2\n self.data = [None] * (self.capacity) # allocate list with new capacity\n walk = self.head\n for k in range(self.size): # only consider existing elements\n self.data[k] = old[walk] # intentionally shift indices\n walk = (1 + walk) % len(old) # use old size as modulus\n self.head = 0 # front has been realigned", "def pop_many(self, *_, **__): # pylint: disable=arguments-differ\n pass", "def pop_many(self, *_, **__): # pylint: disable=arguments-differ\n pass", "def pop():", "def store_all_content_info():\n all_content = ContentQueue(None, None)\n title = input('Title of your paragraph?')\n content = input('Content of your paragraph (single line)')\n images = image_option()\n enqueue_content(all_content, title, content, images)\n extra_content = add_paragraph_option()\n if len(extra_content) == 0:\n pass\n else:\n for i in range(len(extra_content)):\n enqueue_content(all_content, extra_content[i].p_title, extra_content[i].content, extra_content[i].image)\n\n return all_content", "def __init__(self):\n self.container = list() # All items will be added to the container when put_on_stack method invoked\n self.changed_last = False # This attribute changes to True when item is being put on the stack", "def pop(self):", "def pop(self):", "def __init__(self, contents=()):\n self. data = [ self._Item(k,v) for k,v in contents ] # empty by default\n if len(self._data) > 1:\n self._heapify()", "def storage_logic(self):\n if len(self.difference) > 0:\n # Assumes we have comics in the datastore and just want to add more.\n self.store_comics(self.difference)\n elif len(self.urls_from_datastore) == 0:\n # Assumes the datastore is empty.\n self.store_comics(self.urls_from_json)", "def test_it ():\n ll = Stack([1, 2, 3])\n\n print(\"initial list:\", ll, \"\\ninitially with\", ll.count, \"elements\")\n print(\"\\npush 10, 7, and then 13\\n\")\n ll.push(10)\n ll.push(7)\n ll.push(13)\n print(\"modified list:\", ll, \"\\nnow with\", ll.count, \"elements\")\n\n forty_two = 42\n print(\"\\npop: %d\\npush: %d\" % (ll.pop(), forty_two))\n ll.push(forty_two)\n\n print(\"\\ninterate over list data without consuming it\")\n n = 0\n for data in ll:\n n += 1\n print(\"%d -> %s\" % (n, repr(data)))\n print(\"That's all folkes! no more data to show!\")\n\n print(\"\\ntest mixed data types, float, string, tuple\")\n ll = Stack([3.14159, \"Hello Python\", (42, \"Excelsior!\")])\n print(ll)", "def __init__(self):\n self.stack = collections.deque([])", "def flush_structure(self):\n ...", "def knowledge_refresh(self):\n knowledge_len = len(self.knowledge)\n for i, sentence in enumerate(deepcopy(self.knowledge)):\n if sentence.cells != set():\n for j in range(i+1, knowledge_len):\n if self.knowledge[j].cells != set() and sentence.cells != self.knowledge[j].cells:\n if sentence.cells.issubset(self.knowledge[j].cells):\n new_set = self.knowledge[j].cells.difference(sentence.cells)\n new_count = self.knowledge[j].count - sentence.count\n if new_set != set():\n new_sentence = Sentence(cells=new_set, count=new_count)\n if not new_sentence in self.knowledge:\n self.knowledge.append(new_sentence)\n\n elif self.knowledge[j].cells.issubset(sentence.cells):\n new_set = sentence.cells.difference(self.knowledge[j].cells)\n new_count = sentence.count - self.knowledge[j].count\n if new_set != set():\n new_sentence = Sentence(cells=new_set, count=new_count)\n if not new_sentence in self.knowledge:\n self.knowledge.append(new_sentence)\n \n # remove unnecessery knowledge\n if sentence.cells == set() and sentence.known_mines() == set() and sentence.known_safes() == set():\n self.knowledge.remove(sentence)", "def push_back(self, *args):\n return _ida_frame.xreflist_t_push_back(self, *args)", "def _expand_main_list(self):\n\n # Compute how much to extend underlying list by\n new_length = self.resizing_factor * len(self.main_list)\n change_in_length = new_length - len(self.main_list)\n\n # Entend underlying list\n self.main_list.extend([None] * change_in_length)", "def __init__(self):\n self.mystack1 = []\n self.mystack2 = []", "def scan_content(content, cur_lists):\n\n #even if there are multiple tags for one list,\n #they should all have a separate key that was created in make_empty_lists()\n all_tags = list(cur_lists.keys())\n \n verified = find_verified(content)\n\n #print\n #print verified\n #print len(verified)\n\n #could customize these:\n skip_tags = [ 'skip', 'meh', 'blah', 'bad' ]\n\n #TODO:\n #for some content without titles, tags are sometimes placed in title\n #could scan titles for tags to place in appropriate list\n \n for item in verified:\n #now scan all segments in item for valid playlist tags...\n #do not recurse here! one level is sufficient now\n for segment in item.segments:\n skip = False\n for skip_tag in skip_tags:\n if skip_tag in segment.tags:\n skip = True\n\n if not skip and segment.tags:\n matched = False\n\n #add items to the appropriate list based on tags\n for tag in all_tags:\n if tag in segment.tags:\n cur_lists[tag].append(segment)\n matched = True\n \n #special cases: \n if not matched:\n for tag in segment.tags:\n if re.search(\"\\+\", tag):\n cur_lists['good'].append(segment)\n matched = True\n \n #this should be handled by check above now\n ## #sometimes 'skip' may be in the tag... e.g. \"skip?\"\n ## elif re.search(\"skip\", tag):\n ## matched = True\n \n if not matched: \n print(\"Couldn't match: \", segment.tags)\n\n #could append to misc, if wanted\n cur_lists['misc'].append(segment)\n\n\n else:\n #TODO:\n #if we put it on a list (not skip),\n #that is a good indication that it's worth remembering\n #this is a good chance to see\n #if any people / group related notes exist for the song\n #TODO:\n #or, just open the list\n #and apply it from there\n #that way the process could work for any list\n #(maybe just needs adaptation)\n pass\n \n #print segment.status\n\n print()\n #print content.debug(recurse=False)", "def pop_from_deque(self):", "def AdvanceQueue(self):\r\n self.data.pop(0)\r\n return", "def put(self, item): \n if len(self.contents) < self.max_size:\n self.contents.append(item)\n elif len(self.contents) >= self.max_size:\n print \"Backpack Full.\"", "def fill_batch_queue(self):\n\t\twhile True:\n\t\t\tif self._hps.mode.value != 'decode':\n\t\t\t\t# Get bucketing_cache_size-many batches of Examples into a list, then sort\n\t\t\t\tinputs = []\n\t\t\t\tfor _ in xrange(self._hps.batch_size.value * self._bucketing_cache_size):\n\t\t\t\t\tinputs.append(self._example_queue.get())\n\t\t\t\tinputs = sorted(inputs, key=lambda inp: inp.enc_len) # sort by length of encoder sequence\n\n\t\t\t\t# Group the sorted Examples into batches, optionally shuffle the batches, and place in the batch queue.\n\t\t\t\tbatches = []\n\t\t\t\tfor i in xrange(0, len(inputs), self._hps.batch_size.value):\n\t\t\t\t\tbatches.append(inputs[i:i + self._hps.batch_size.value])\n\t\t\t\t\n\t\t\t\tfor b in batches: # each b is a list of Example objects\n\t\t\t\t\tself._batch_queue.put(Batch(b, self._hps, self._vocab))\n\n\t\t\telse: # beam search decode mode\n\t\t\t\tex = self._example_queue.get()\n\t\t\t\tb = [ex for _ in xrange(self._hps.batch_size.value)]\n\t\t\t\tself._batch_queue.put(Batch(b, self._hps, self._vocab))" ]
[ "0.7201645", "0.55642855", "0.54273766", "0.53647256", "0.53465223", "0.5329691", "0.5302143", "0.52922094", "0.5268114", "0.5258695", "0.5258695", "0.5251591", "0.5224916", "0.5214627", "0.5214149", "0.5214149", "0.51835203", "0.5163969", "0.50850517", "0.5079492", "0.50349027", "0.5033021", "0.50112563", "0.50014925", "0.49979222", "0.49776006", "0.4977563", "0.49747866", "0.49696967", "0.49580726" ]
0.65103406
1
Returns the appropriate browser filter type for a given hotswap target.
def filter_type_for_hotswap_target(target): if isinstance(target, Live.Device.Device): if target.type == DeviceType.instrument: return FilterType.instrument_hotswap elif target.type == DeviceType.audio_effect: return FilterType.audio_effect_hotswap elif target.type == DeviceType.midi_effect: return FilterType.midi_effect_hotswap else: FilterType.disabled elif isinstance(target, Live.DrumPad.DrumPad): return FilterType.drum_pad_hotswap elif isinstance(target, Live.Chain.Chain): return filter_type_for_hotswap_target(target.canonical_parent) if target else FilterType.disabled return FilterType.disabled
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def guess_filter_type(filter_func):\r\n if hasattr(filter_func, 'contextfilter') or \\\r\n hasattr(filter_func, 'environmentfilter'):\r\n return JINJA2, False\r\n\r\n args = inspect.getargspec(filter_func)\r\n if len(args[0]) - (len(args[3]) if args[3] else 0) > 2:\r\n return JINJA2, False\r\n\r\n if hasattr(filter_func, 'needs_autoescape'):\r\n return DJANGO, True\r\n\r\n # Looks like your run of the mill Python function, which are\r\n # easily convertible in either direction.\r\n return False, True", "def make_browser_model(browser, filter_type = None):\n factories = {FilterType.instrument_hotswap: make_instruments_browser_model,\n FilterType.drum_pad_hotswap: make_drum_pad_browser_model,\n FilterType.audio_effect_hotswap: make_audio_effect_browser_model,\n FilterType.midi_effect_hotswap: make_midi_effect_browser_model}\n if filter_type == None:\n filter_type = filter_type_for_browser(browser)\n return factories.get(filter_type, make_fallback_browser_model)(browser)", "def get_best_mimetype():\n # find out what the client accepts\n return request.accept_mimetypes.best_match(\n current_app.blueprints[request.blueprint].response_mimetypes.keys()\n )", "def GetFlavor(params):\n flavors = {\n 'cygwin': 'win',\n 'win32': 'win',\n 'darwin': 'mac',\n }\n if 'flavor' in params:\n return params['flavor']\n if sys.platform in flavors:\n return flavors[sys.platform]\n if sys.platform.startswith('sunos'):\n return 'solaris'\n if sys.platform.startswith('freebsd'):\n return 'freebsd'\n if sys.platform.startswith('openbsd'):\n return 'openbsd'\n if sys.platform.startswith('netbsd'):\n return 'netbsd'\n if sys.platform.startswith('aix'):\n return 'aix'\n if sys.platform.startswith('zos'):\n return 'zos'\n if sys.platform.startswith('os390'):\n return 'zos'\n return 'linux'", "def getFilterClass(filterName, pkg=\"ufo2ft.filters\"):\n # TODO add support for third-party plugin discovery?\n # if filter name is 'Foo Bar', the module should be called 'fooBar'\n filterName = filterName.replace(\" \", \"\")\n moduleName = filterName[0].lower() + filterName[1:]\n module = importlib.import_module(\".\".join([pkg, moduleName]))\n # if filter name is 'Foo Bar', the class should be called 'FooBarFilter'\n className = filterName[0].upper() + filterName[1:]\n if not className.endswith(\"Filter\"):\n className += \"Filter\"\n return getattr(module, className)", "def get_type(self,\n filefilter: str) -> Tuple[str, str]:\n\n return self.types[self.filters.index(filefilter)]", "def _set_filter_type(filter):\n if filter == 'nat':\n return '-N'\n if filter == 'options':\n return '-O'\n if filter == 'filter':\n return '-R'", "def find_browser(request):\n #get the META data about the browser type\n browser=request.META['HTTP_USER_AGENT']\n #make META data all lowercase\n browser=browser.lower()\n #for firefox\n if 'firefox' in browser:\n browser='firefox'\n #for chrome\n elif 'chrome' in browser:\n browser='chrome'\n #for all others\n else:\n browser='unknown'\n\n return browser", "def Get_HighPassFilterMode(self):\r\n current = self.__readFromRegister(self.__REG_RW_CTRL_REG2, self.__MASK_CTRL_REG2_HPM)\r\n for mode in self.__HpmDict.keys():\r\n if self.__HpmDict[mode] == current:\r\n return mode", "def get_file_type(f_blacklist, f_seconds, f_spikes):\n if f_blacklist and f_seconds and f_spikes:\n return 'filtered'\n\n elif f_blacklist and not f_seconds and not f_spikes:\n return 'blist_filtered'\n\n elif not f_blacklist and f_seconds and f_spikes:\n return 'interval_filtered'\n\n elif not f_blacklist and not f_seconds and not f_spikes:\n return 'not_filtered'", "def _best_mime():\n supported = []\n renders = {}\n for renderer_cls in app.config.get(\"RENDERERS\"):\n renderer = import_from_string(renderer_cls)\n for mime_type in renderer.mime:\n supported.append(mime_type)\n renders[mime_type] = renderer\n\n if len(supported) == 0:\n abort(\n 500,\n description=debug_error_message(\n \"Configuration error: no supported mime types\"\n ),\n )\n\n best_match = request.accept_mimetypes.best_match(supported) or supported[0]\n return best_match, renders[best_match]", "def get_supported_browsers_suggestions():\n supported_browsers = [\n 'chrome',\n 'chrome-remote',\n 'chrome-headless',\n 'chrome-remote-headless',\n 'firefox',\n 'firefox-remote',\n 'ie',\n 'ie-remote'\n ]\n return supported_browsers", "def identifyTargetType(self, target):\n ipAddress = re.compile('\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}')\n ipFind = re.findall(ipAddress, target)\n if ipFind is not None and len(ipFind) > 0:\n return \"ip\"\n\n md5 = re.compile('[a-fA-F0-9]{32}', re.IGNORECASE)\n md5Find = re.findall(md5,target)\n if md5Find is not None and len(md5Find) > 0:\n return \"md5\"\n\n return \"hostname\"", "def get_filter(name):\n try:\n return FILTERS[name.upper()]\n except:\n msg = 'Unknown model of filter {}, options are {}'\n raise ValueError(msg.format(name, list(FILTERS.keys())))", "def guess_device_class(config: dict):\n uiid = config.get('uiid')\n # DualR3 in cover mode\n if uiid == 126 and config.get('params', {}).get('workMode') == 2:\n return 'cover'\n return UIIDS.get(uiid)", "def get_active_browser():\n if 'firefox' in udata.srcs:\n ubrowser = \"Firefox\"\n elif 'chrome' in udata.srcs:\n ubrowser = \"Chrome\"\n else:\n ubrowser = \"UNDEFINED\"\n return ubrowser", "def getFilterNameFromInt(cls, num):\n return cls.SUPPORTED_FILTERS[num]", "def get_filter_frame_type(self):\n return structures.CD_PROJECTION", "def _browser(self):\n name = self.arg.browser\n if 'phantomjs' in name:\n return self._phantomjs()\n elif 'firefox' in name:\n return self._firefox()\n elif 'chrome' in name:\n return self._chrome()", "def fusion_api_get_switch_types(self, param='', api=None, headers=None):\n return self.swtypes.get(api=api, headers=headers, param=param)", "def best_mime_type(accept_string, default):\n accepts = re.split(\"\\s*,\\s*\", accept_string)\n for accept in accepts:\n if accept == \"text/html\":\n return \"html\"\n elif accept == \"application/rdf+xml\":\n return \"pretty-xml\"\n elif accept == \"text/turtle\" or accept == \"application/x-turtle\":\n return \"turtle\"\n elif accept == \"application/n-triples\" or accept == \"text/plain\":\n return \"nt\"\n elif (accept == \"application/json\" or\n accept == \"application/ld+json\"):\n return \"json-ld\"\n elif accept == \"application/sparql-results+xml\":\n return \"sparql\"\n elif accept == \"application/sparql-results+json\":\n return \"sparql-json\"\n elif (accept == \"application/json\" or\n accept == \"application/javascript\"):\n if default == \"sparql-json\":\n return \"sparql-json\"\n else:\n return \"json-ld\"\n best_q = -1\n best_mime = default\n for accept in accepts:\n if \";\" in accept:\n mime = re.split(\"\\s*;\\s*\", accept)[0]\n extensions = re.split(\"\\s*;\\s*\", accept)[1:]\n for extension in extensions:\n if (\"=\" in extension and\n re.split(\"\\s*=\\s*\", extension)[0] == \"q\"):\n try:\n q = float(re.split(\"\\s*=\\s*\", extension)[1])\n except:\n continue\n if q > best_q:\n if mime == \"text/html\":\n best_q = q\n best_mime = \"html\"\n if mime == \"application/rdf+xml\":\n best_q = q\n best_mime = \"pretty-xml\"\n if (mime == \"text/turtle\" or\n mime == \"application/x-turtle\"):\n best_q = q\n best_mime = \"turtle\"\n if (mime == \"application/n-triples\" or\n mime == \"text/plain\"):\n best_q = q\n best_mime = \"nt\"\n if (mime == \"application/json\" or\n mime == \"application/ld+json\"):\n best_q = q\n best_mime = \"json-ld\"\n if mime == \"application/sparql-results+xml\":\n best_q = q\n best_mime = \"sparql\"\n if mime == \"application/sparql-results+json\":\n best_q = q\n best_mime = \"sparql-json\"\n return best_mime", "def determine_file_type(input_file):\r\n file_info, error = subprocess.Popen([settings.FILE, input_file], stdout=subprocess.PIPE).communicate()\r\n\r\n file_type = file_info.decode(\"utf-8\").split()[1]\r\n\r\n if file_type == \"tcpdump\":\r\n return \"pcap\"\r\n elif file_type == \"pcap-ng\":\r\n return \"pcapng\"\r\n elif file_type == \"data\" and (b\"nfdump\" in file_info or b\"nfcapd\" in file_info):\r\n return \"nfdump\"\r\n else:\r\n raise UnsupportedFileTypeError(\"The file type \" + file_type + \" is not supported.\")", "def get_type(benchmark):\n # TODO(metzman): Use classes to mock a benchmark config for\n # OSS_FUZZ_ON_DEMAND.\n default_value = os.getenv('EXPERIMENT_TYPE', BenchmarkType.CODE.value)\n return benchmark_config.get_config(benchmark).get('type', default_value)", "def find_driver_class(self, scheme_or_url: str) -> Optional[Type[Driver]]:\n index = scheme_or_url.find(\":\")\n if index > 0:\n scheme = scheme_or_url[0:index]\n else:\n scheme = scheme_or_url\n\n return self.drivers.get(scheme.lower())", "async def get_filter(self, **kwargs: Any) -> str:\n return self._telescope.filter_name", "def parse_browser_family(browser_key):\n return lookup_data.browser_keys.get(browser_key, 'Unknown')", "def GetDeviceTypeName(self):\n if self._device_type_name is None:\n self._device_type_name = self.LsbReleaseValue(\n key='DEVICETYPE', default='CHROMEBOOK')\n return self._device_type_name", "def choose_driver(is_remote, t_browser):\n if is_remote:\n return remote_driver(t_browser)\n return custom_driver(t_browser)", "def determine_object_type(obj):\n type_obj = \"Ethernet\"\n if isinstance(obj, dom_kvm.LinuxBridge):\n type_obj = BRIDGE\n elif isinstance(obj, dom_kvm.EthernetBond):\n type_obj = ETH_BOND\n elif isinstance(obj, dom_kvm.PhysicalPort):\n type_obj = ETHERNET\n elif isinstance(obj, dom_kvm.OpenVSwitch):\n type_obj = OVS_BR\n return type_obj", "def getFilter(self, type: int) -> int:\n ..." ]
[ "0.550746", "0.5350171", "0.5214283", "0.5172255", "0.51384073", "0.5098301", "0.5082551", "0.504926", "0.49898934", "0.49884063", "0.4926733", "0.49166557", "0.4882145", "0.48667943", "0.48643303", "0.48557383", "0.4853188", "0.47465545", "0.47167808", "0.46532175", "0.46306995", "0.46030295", "0.46013123", "0.45807773", "0.45771617", "0.45718434", "0.45647043", "0.45622733", "0.45355588", "0.45143053" ]
0.8288752
0
Factory that returns an appropriate browser model depending on the browser filter type and hotswap target.
def make_browser_model(browser, filter_type = None): factories = {FilterType.instrument_hotswap: make_instruments_browser_model, FilterType.drum_pad_hotswap: make_drum_pad_browser_model, FilterType.audio_effect_hotswap: make_audio_effect_browser_model, FilterType.midi_effect_hotswap: make_midi_effect_browser_model} if filter_type == None: filter_type = filter_type_for_browser(browser) return factories.get(filter_type, make_fallback_browser_model)(browser)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_browser(request):\n #get the META data about the browser type\n browser=request.META['HTTP_USER_AGENT']\n #make META data all lowercase\n browser=browser.lower()\n #for firefox\n if 'firefox' in browser:\n browser='firefox'\n #for chrome\n elif 'chrome' in browser:\n browser='chrome'\n #for all others\n else:\n browser='unknown'\n\n return browser", "def create_driver(browser_name):\n if browser_name == BaseConstants.CHROME:\n options = webdriver.ChromeOptions()\n options.add_argument('headless')\n if BaseConstants.HEADLESS_MODE:\n return webdriver.Chrome(options=options)\n else:\n return webdriver.Chrome()\n elif browser_name == BaseConstants.FIREFOX:\n options = Options()\n options.add_argument('--headless')\n if BaseConstants.HEADLESS_MODE:\n return webdriver.Firefox(options=options)\n else:\n return webdriver.Firefox()\n else:\n raise ValueError(f\"Unknown browser name: {browser_name}\")", "def _browser(self):\n name = self.arg.browser\n if 'phantomjs' in name:\n return self._phantomjs()\n elif 'firefox' in name:\n return self._firefox()\n elif 'chrome' in name:\n return self._chrome()", "def filter_type_for_hotswap_target(target):\n if isinstance(target, Live.Device.Device):\n if target.type == DeviceType.instrument:\n return FilterType.instrument_hotswap\n elif target.type == DeviceType.audio_effect:\n return FilterType.audio_effect_hotswap\n elif target.type == DeviceType.midi_effect:\n return FilterType.midi_effect_hotswap\n else:\n FilterType.disabled\n elif isinstance(target, Live.DrumPad.DrumPad):\n return FilterType.drum_pad_hotswap\n elif isinstance(target, Live.Chain.Chain):\n return filter_type_for_hotswap_target(target.canonical_parent) if target else FilterType.disabled\n return FilterType.disabled", "def browser(request):\n browser = request.config.getoption(\"--browser\").lower()\n device = request.config.getoption(\"--device\").lower()\n if browser == \"chrome\":\n driver = webdriver.Chrome(ChromeDriverManager().install())\n set_viewport_size(driver, device)\n elif browser == \"firefox\":\n driver = webdriver.Firefox(executable_path=GeckoDriverManager().install())\n set_viewport_size(driver, device)\n elif browser == \"edge_chromium\":\n driver = webdriver.Edge(EdgeChromiumDriverManager().install())\n elif browser == \"ie\":\n driver = webdriver.Ie(IEDriverManager().install())\n else:\n raise Exception(f\"{request.param} is not supported.\")\n yield driver\n driver.quit()", "def create_driver(browser_name):\n if browser_name == BaseConstants.CHROME:\n options = webdriver.ChromeOptions()\n options.add_argument('headless')\n return webdriver.Chrome(executable_path=BaseConstants.CHROME_DRIVER_PATH, options=options)\n if browser_name == BaseConstants.FIREFOX:\n options = webdriver.FirefoxOptions()\n options.add_argument('--headless')\n return webdriver.Firefox(executable_path=BaseConstants.FIREFOX_DRIVER_PATH, options=options)\n else:\n raise ValueError(f\"Unknown browser name:{browser_name}\")", "def choose_driver(is_remote, t_browser):\n if is_remote:\n return remote_driver(t_browser)\n return custom_driver(t_browser)", "def create_browser():\n browser = selenium.webdriver.Chrome()\n return browser", "def get_browser(self, settings=None):\n browser = Browser(self.get_wsgi_application())\n if settings is not None:\n settings(browser)\n self._browsers.append(browser)\n return browser", "def get_browser(self, name, job):\n browser = None\n if 'type' in job and job['type'] == 'traceroute':\n from .traceroute import Traceroute\n browser = Traceroute(self.options, job)\n elif name in self.browsers and 'exe' in self.browsers[name]:\n from .chrome_desktop import ChromeDesktop\n browser = ChromeDesktop(self.browsers[name]['exe'], self.options, job)\n return browser", "def get_active_browser():\n if 'firefox' in udata.srcs:\n ubrowser = \"Firefox\"\n elif 'chrome' in udata.srcs:\n ubrowser = \"Chrome\"\n else:\n ubrowser = \"UNDEFINED\"\n return ubrowser", "def _choose_model(self, model_str):\n if model_str == 'lg':\n return(LogisticRegression())\n elif model_str == 'rf':\n return(RandomForestClassifier())\n elif model_str == 'svm':\n # return SVC(C=1, kernel='linear') # linear boundary\n return SVC(C=1, kernel='poly', degree=2) # non-linear boundary\n # return SVC(C=1, kernel='rbf')\n # return SVC(C=1, kernel='sigmoid') # binary classification", "def _make_browser_preprocessing(self):\n train_df = self.train.copy(deep=False)\n browser = self._one_hot('device.browser')\n browserSize = self._one_hot('device.browserSize')\n browserVersion = self._one_hot('device.browserVersion')\n\n return pd.concat([browser,browserSize,browserVersion],axis=1,sort=True)", "def browser(request):\n return request.config.getoption(\"--browser\")", "def get_driver(browser):\n\n # Browser name aliases\n chrome = ('chrome', 'google', 'google chrome', 'googlechrome', 'google-chrome', 'google_chrome')\n firefox = ('firefox', 'ff', 'mozilla', 'gecko', 'geckodriver', 'fire fox', 'fire_fox', 'fire-fox')\n opera = ('opera', 'opera gx', 'operagx', 'opera_gx', 'opera-gx')\n explorer = ('explorer', 'ie', 'internet explorer', 'internet-explorer', 'internet_explorer')\n edge = ('edge', 'microsoft edge', 'microsoft_edge', 'microsoft-edge')\n\n # Download browser binaries according to settings.json\n if browser.lower() in chrome:\n return webdriver.Chrome(ChromeDriverManager().install())\n\n elif browser.lower() in firefox:\n return webdriver.Firefox(executable_path=GeckoDriverManager().install())\n\n elif browser.lower() in opera:\n return webdriver.Opera(OperaDriverManager().install())\n\n elif browser.lower() in explorer:\n return webdriver.Ie(IEDriverManager().install())\n\n elif browser.lower() in edge:\n return webdriver.Edge(executable_path=EdgeChromiumDriverManager().install())\n\n else:\n raise RuntimeError('Browser not found {}'.format(browser.lower()))", "def new_driver(name=\"chrome\"):\n if not name in DRIVERS:\n raise Exception(\"No driver support for '%s'\" % name)\n return DRIVERS[name]()", "def create_browser():\n\t#currently the one I use, but it should work\n\t#user_agent=\"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:38.0) Gecko/20100101 Firefox/38.0\"\n user_agent = \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/41.0.2272.76 Chrome/41.0.2272.76 Safari/537.36\"\n br=mechanize.Browser()\n\t#makes br behave like a real browser\n\tcj=cookielib.LWPCookieJar()\n\tbr.set_cookiejar(cj)\n\tbr.set_handle_equiv(True)\n\tbr.set_handle_gzip(True)\n\t#temporarily changed to False due to unwanted mobile redirection\n\tbr.set_handle_redirect(False)\n\tbr.set_handle_referer(True)\n\tbr.set_handle_robots(False)\n\t#debug messages if desired\n\tbr.set_debug_http(False)\n\tbr.set_debug_redirects(True)\n\tbr.set_debug_responses(False)\n\t#adding user agent...this is kind of shady\n\tbr.addheaders=[('User-agent',user_agent)]\n\treturn br", "def __setup_driver(driver_type: str) -> webdriver:\n if driver_type == \"chrome\":\n return __setup_chrome()\n if driver_type == \"edge\":\n return __setup_edge()\n if driver_type == \"safari\":\n return __setup_safari()\n if driver_type == \"firefox\":\n return __setup_firefox()", "def getbrowser():\n\n # Try to find the browser\n try:\n # Get the browser name\n webbrowser.get(using=None)\n\n # Catch an error\n except RuntimeError:\n # Return nothing\n return None", "def get_web_browser(self, settings=None):\n browser = SeleniumBrowser(self.get_wsgi_application())\n if settings is not None:\n settings(browser)\n self._browsers.append(browser)\n return browser", "def __get_browser(self):\n print(\"Setting WebDriver Chrome as headless mode\")\n options = chromeOptions()\n options.set_headless(headless=True)\n chrome = webdriver.Chrome(ChromeDriverManager().install(),\n chrome_options=options)\n return chrome", "def _browser(wh_conn):\n util = Utility()\n config = util.CONFIG\n br = util.get_plugin(config.PATH_BROWSER)\n br.connect(wh_conn)\n return br", "def new_browser():\n\n\tbrowser = mechanize.Browser()\n\tbrowser.set_handle_robots(False)\n\tbrowser.set_handle_refresh(False)\n\n\treturn browser", "def _instantiate_driver(self) -> webdriver:\n\n if self.driver is None: return Browser.run_chromedriver()\n\n return self.driver", "def get_driver(type='chrome', executable_path=None):\n if type == 'chrome':\n driver = get_chrome_driver(options_list=OPTIONS_LIST, executable_path=executable_path)\n elif type == 'firefox':\n driver = get_firefox_driver(options_list=OPTIONS_LIST, executable_path=executable_path)\n else:\n raise (\"Type must be either 'chrome' or 'firefox'.\")\n\n driver.set_window_size(1920, 1080)\n\n return driver", "def browser_class(choose_driver):\n yield choose_driver\n choose_driver.quit()", "def get_instance(driver_info: DriverInfo) -> webdriver:\n if driver_info.get_driver_type() == \"chrome\":\n options = webdriver.ChromeOptions()\n options.add_argument(\"--start-maximized\")\n return webdriver.Chrome(\n executable_path=os.path.join(\n driver_info.get_driver_path(),\n \"chromedriver\"\n ),\n chrome_options=options\n )", "def create_browser_instance():\n cmd = [browser_sync_path, 'start', '--proxy=localhost:8000']\n check_output(cmd)", "def createBrowser(self, mode=False):\n\t\tbr = mechanize.Browser(factory = mechanize.RobustFactory())\n\t\tbr.set_handle_equiv(True)\n\t\tbr.set_handle_redirect(True)\n\t\tbr.set_handle_referer(True)\n\t\tbr.set_handle_robots(False)\n\t\tbr.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)\n\t\tbr.set_debug_http(mode)\n\t\tbr.set_debug_responses(mode)\n\t\tbr.set_debug_redirects(mode)\n\t\tbr.addheaders = [(\"User-Agent\", \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.7 (KHTML, like Gecko) C hrome/16.0.912.63 Safari/535.7\")]\n\t\tcj = cookielib.LWPCookieJar()\n\t\tbr.set_cookiejar(cj)\n\t\treturn br", "def _pick_model(self):\r\n\r\n if self.network_type == 'AutoEncoder_3l':\r\n return AutoEncoder_3l.AutoEncoder_3l(self.network_type, self.loss_type, self.accuracy_type, self.learning_rate,\r\n training=self.is_training, num_filters=self.num_filters, nonlin=self.nonlin,\r\n num_classes=self.num_classes, optimizer=self.optimizer)\r\n\r\n else:\r\n raise ValueError('Architecture does not exist')" ]
[ "0.61912", "0.6001817", "0.5997133", "0.597749", "0.58534664", "0.5690751", "0.5627307", "0.5554387", "0.5523247", "0.5439414", "0.5395686", "0.5385105", "0.536546", "0.5281819", "0.5270705", "0.52703404", "0.52104986", "0.5208358", "0.51796514", "0.5151725", "0.51408345", "0.5128413", "0.5125922", "0.5107445", "0.5095725", "0.5081815", "0.5072225", "0.506265", "0.50315326", "0.5022954" ]
0.81959134
0
Creates the name of an item shortened by removing words from the parents name
def _shorten_item_name(self, shortening_limit, list_index, item_name): def is_short_enough(item_name): return len(item_name) <= 9 content_lists = self._browser_model.content_lists parent_lists = reversed(content_lists[max(0, list_index - 3):list_index]) for content_list in parent_lists: if is_short_enough(item_name): break parent_name = unicode(content_list.selected_item) stems = split_stem(parent_name) for stem in stems: short_name = make_stem_cleaner(stem)(item_name) short_name = full_strip(short_name) item_name = short_name if len(short_name) > 4 else item_name if is_short_enough(item_name): break return item_name[:-1] if len(item_name) >= shortening_limit and item_name[-1] == consts.CHAR_ELLIPSIS else item_name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _complete_name(self):\n for record in self:\n if record.parent_id:\n record.complete_name = record.parent_id.complete_name + ' / ' + record.name\n else:\n record.complete_name = record.name", "def generate_name_suggestion(self):\n if not self._container.group.parent_node:\n return generate_unique_name(self.text().lstrip(\"NX\"), [])\n return generate_unique_name(\n self.text().lstrip(\"NX\"),\n [\n g\n for g in self._container.group.parent_node.children\n if isinstance(g, Group)\n ],\n )", "def _make_name(words):\n return \" \".join(words)", "def make_name(self):\n first, last = \"\", \"\"\n\n def get_first(self):\n \"\"\" Generate a first name \"\"\"\n return \"%s%s\" % (\n weighted_choice([(\"\", 39), (\"We put our faith in \", 1)]),\n choice(self.first_chunks).title()\n )\n\n def get_last(self):\n \"\"\" Generate a last name \"\"\"\n return \"%s%s%s\" % (\n # As per the original list there's a 1/39 (not conting Bob)\n # chance for a 'Mc' prefix to the lastname\n #\n # Can also, with low propability be \"von <lastname>\"\n weighted_choice([(\"\", 35), (\"Mc\", 3), (\"von \", 1)]),\n choice(self.second_chunks).title(),\n choice(self.third_chunks))\n\n # Avoid the first name reappearing in the last name...\n while first.lower() in last.lower():\n first = get_first(self)\n last = get_last(self)\n\n # Always exclaimatory\n return \"%s %s!\" % (first, last)", "def _complete_name(self, cr, uid, ids, name, args, context=None):\n res = {}\n#####added \n context=context or {}\n \n for m in self.browse(cr, uid, ids, context=context):\n if context.get('no_complete_name'):\n res[m.id] = m.name\n return res\n names = [m.name]\n parent = m.location_id\n while parent:\n names.append(parent.name)\n parent = parent.location_id\n res[m.id] = ' / '.join(reversed(names))\n return res", "def getName(self,item):\n return item.s", "def _deal_with_super_sub_expanded(self, string, style=\"plain\"):\n if \"{\" in string:\n name, supers, subs = string, [], []\n else:\n name, supers, subs = split_super_sub(string)\n\n names = [translate(name) for name in name.split(\" \")]\n supers = [translate(sup) for sup in supers]\n subs = [translate(sub) for sub in subs]\n\n name = \" \".join(names)\n\n # apply the style only to the name\n if style == \"bold\":\n name = \"\\\\mathbf{{{}}}\".format(name)\n\n # glue all items together:\n if supers:\n name += \"^{%s}\" % \" \".join(supers)\n if subs:\n name += \"_{%s}\" % \" \".join(subs)\n\n return name", "def get_complete_name(self):\n if self.parent_id:\n name = '%s / %s'%(self.parent_id.get_complete_name(), self.name)\n else:\n name = self.name\n \n return name", "def make_title(words):", "def name(self):\n return 'n' + self._name\n # if self.children:\n # return 'fossil_' + self._name\n # else:\n # return 'society_' + self._name", "def standardize_name_for_look_up(name: Any) -> str:\n if not isinstance(name, str):\n return name\n\n name = name.lower().strip()\n name = \" \".join(name.split(\"_\"))\n name = name.translate(\n str.maketrans(\"\", \"\", string.punctuation)\n ) # remove punctuation\n name = \" \".join(\n [part for part in name.split(\" \") if part]\n ) # ensure there is only a single space between words\n return name", "def get_item_name(sp, item_type, item_id):\n if item_type == 'playlist':\n name = sp.playlist(playlist_id=item_id, fields='name').get('name')\n elif item_type == 'album':\n name = sp.album(album_id=item_id).get('name')\n elif item_type == 'track':\n name = sp.track(track_id=item_id).get('name')\n return sanitize(name)", "def tidy_protoname(desc):\n\n descc = pat_under.subn('_', desc)[0]\n descc = pat_removep.subn('', descc)[0]\n return descc", "def get_absname(self):\n if self.animal == None: # no parent animal\n return self.name\n else:\n return '.'.join((self.animal.name, self.name))", "def longname(self):\n if not self.parent:\n return self.name\n return '%s.%s' % (self.parent.longname, self.name)", "def normalize_reference_name(name):\n return name.strip().lower().replace(\"-\", \"_\").replace(\" \", \"_\")", "def GetUniqueName( name, elems ):\n digits = []\n for c in reversed( name ):\n if c.isdigit():\n digits.append( c )\n else:\n break\n \n stem = name[0:len( name ) - len( digits )]\n val = ''.join( digits )[::-1] or 0\n i = int( val )\n \n while True:\n i += 1\n newName = ''.join( [stem, str( i )] )\n if newName not in elems:\n break\n \n return newName", "def set_name(song: str) -> str:\n # Discard unwanted lines\n junk = ['', '[Chorus]', '[Bridge]']\n lines = [line for line in song.split('\\n') if line not in junk and len(\n line.split(' ')) != 1]\n\n # Choose random line, start and stop indicies\n line = choice(lines).split(' ')\n start = randint(0, len(line)-2)\n stop = randint(start+1, len(line)-1)\n line = line[start:stop+1]\n\n # Add words within range to string and capitalise the first word\n song_name = []\n punc = set([',', '.', '\"'])\n for idx, word in enumerate(line):\n # Check for trailing punctuation and remove unless ellipsis\n if idx == len(line)-1 and word[-1] in punc and word[-3:] != \"...\":\n word = word[:-1]\n song_name.append(capwords(word))\n return ' '.join(song_name)", "def get_short_name(self):\n split = self.name.split(' - ')\n # author, year, and first couple of words of paper title\n return \"{} ({}), {}\".format(split[0], split[1], \" \".join(split[2].split(' ')[:3]))", "def fix_name_nga(artist):\n if \"sculptor\" in artist:\n return artist[:artist.find(\"sculptor\")].strip()\n else:\n return artist.strip()", "def clean(self):\n pass\n #TODO check whether short name is really clean and short!", "def format_pname(player, lname=False, sparse=False):\n base = player.name.capitalize()\n if lname and not sparse:\n char = player.char_ob\n if char:\n base = char.item_data.longname or base\n if player.db.afk:\n base += \" {w(AFK){n\"\n if player.db.lookingforrp:\n base += \" {w(LRP){n\"\n if player.is_staff:\n base += \" {c(Staff){n\"\n return base", "def change_name(self, item):\n # Get the new name.\n new_name = str(item.text())\n if not new_name or not self.item_name:\n return None\n\n # See if the name was actually changed.\n if new_name == self.item_name:\n return None\n\n # If it was, change the name in the list/tree view and in Maya.\n if not new_name:\n item.setText(self.item_name)\n self.item_name = cmds.rename(self.item_name, new_name)\n item.setText(self.item_name)", "def normalize(item):\n item = item.lower().strip().rstrip('_')\n return item", "def _make_name(self, name=None):\n\n if name:\n new_name = name.split(\"/\")[-1].split(\".png\")[0]\n if new_name.startswith((\"AWS-\", \"Amazon-\")):\n new_name = new_name.split(\"-\", 1)[1]\n # Replace non-alphanumeric with underscores (1:1 mapping)\n new_name = re.sub(r'\\W+', '_', new_name)\n return new_name", "def _build_fullname(tree: dict) -> None:\n def _apply(item: dict) -> None:\n components = item.pop(\"components\")\n try:\n idx = components[::-1].index(None)\n except ValueError:\n pass\n else:\n components = components[len(components) - idx:]\n if components:\n item[\"fullname\"] = \".\".join(components)\n else:\n item[\"fullname\"] = None\n apply_tree(tree, _apply)", "def shorten(name):\r\n nameSplit = name.split()\r\n if len(nameSplit)<=2:\r\n # Returns name if it's only two words or less\r\n return name\r\n\r\n # Else returns first and last entry of split\r\n return(nameSplit[0]+' '+nameSplit[-1])", "def fix_name(row, index, name_map):\n # print(\"Input row: {}\".format(row))\n name = row[index].strip()\n # print(\"Name entry is {}\".format(name))\n if name.endswith(\" (yourself)\"):\n name = name[:-len(\" (yourself)\")]\n # print(\"Shortening to |{}|\".format(name))\n if name not in name_map:\n name_map[name] = name # Initially the identity transform\n row[index] = name_map[name]", "def asName(self, name):\r\n\t\tnewName = \"\"\r\n\t\ttoHigher = False\r\n\t\tfor char in name:\r\n\t\t\tif char in \"_-\":\r\n\t\t\t\ttoHigher = True\r\n\t\t\telse:\r\n\t\t\t\tif toHigher:\r\n\t\t\t\t\tnewName = newName + char.upper()\r\n\t\t\t\telse:\r\n\t\t\t\t\tnewName = newName + char\r\n\t\t\t\ttoHigher = False\r\n\t\treturn newName", "def full_name(self) -> str:\n # return self.separator.join(map(lambda x: x.name, self.path()))\n return self.separator.join(map(lambda x: x.tagged_name, self.path()))" ]
[ "0.66901696", "0.6240433", "0.61582357", "0.6134796", "0.6129895", "0.6053635", "0.6006241", "0.598846", "0.5954065", "0.5892455", "0.5827515", "0.58168906", "0.5776765", "0.57244366", "0.5722493", "0.57079446", "0.57067114", "0.57050925", "0.5695637", "0.56951994", "0.5687221", "0.5677496", "0.56696415", "0.56666243", "0.5662303", "0.5653551", "0.56348044", "0.56236297", "0.56141335", "0.5595422" ]
0.71447694
0
Monitor opening FIFO operation if timeout was expired (or disconnect was called in the meantime), then interrupt opening FIFO in other thread.
def _opening_monitor(self) -> None: timeout_time: float = time.time() + self._timeout while time.time() < timeout_time and not self._stop_waiting_for_opening.is_set(): if self._fifo_opened.is_set(): return time.sleep(0.1) self._stop_waiting_for_opening.set() self._unblock_open_fifo_operation()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _unblock_open_fifo_operation(self) -> None:\n if os.path.exists(self._fifo_out_path):\n open(self._fifo_out_path, 'wb', buffering=0)\n if os.path.exists(self._fifo_in_path):\n open(self._fifo_in_path, 'rb', buffering=0)", "def ack_or_timeout(receiver):\n\n timeout_starts = time.time() \n while not receiver.available(pipes[0]) and (time.time() - timeout_starts) < 1:\n time.sleep(0.01)", "def timeout_loop(self):\r\n\r\n while not done:\r\n\r\n if (softwareInterfaceTimeout != -1) and (interfaceNotUsed > softwareInterfaceTimeout):\r\n\r\n stop(False)\r\n\r\n \r\n\r\n time.sleep(MONITOR_INTERVAL/1000000.0)\r\n\r\n self.interfaceNotUsed = self.interfaceNotUsed + 1", "def timeout(self):\n self.timeout_scan_flag=True\n self.timer.stop()\n self.status_sig.emit([\"Update_Status\",\"Timeout during acquisition\",'log'])\n self.status_sig.emit([\"Timeout\"])", "def _timeout(self):\n self.inbox_.put_nowait(Atom('timeout'))", "def _wait_for_read_with_timeout(self, fd):\n try:\n with StorletTimeout(self.timeout):\n r, w, e = select.select([fd], [], [])\n except StorletTimeout:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n\n # When there is a task already running, we should cancel it.\n if self.task_id:\n try:\n self._cancel()\n except StorletRuntimeException:\n self.logger.warning(\n 'Task %s timed out, but failed to get canceled'\n % self.task_id)\n pass\n\n if exc_value is None:\n exc_value = exc_traceback\n if exc_value.__traceback__ is not exc_traceback:\n raise exc_value.with_traceback(exc_traceback)\n raise exc_value\n\n if fd not in r:\n raise StorletRuntimeException('Read fd is not ready')", "def _check_fifo(self):\n if not os.path.exists(self.fifo_path):\n raise FifoIsNotAvailable(\"trying to load from session that it's fifo is deleted!\")", "def TimedFlush(timeout, fh, kill_event):\n while True:\n try:\n fh.flush()\n # File handle is closed, exit.\n except ValueError:\n break\n # Wait for kill signal or timeout.\n if kill_event.wait(timeout):\n break\n print threading.currentThread(), 'TimedFlush: Finished'", "def timeout(self):\n pf.debug(\"TIMEOUT\")\n self.acceptData(TIMEOUT)", "def _cancel_ack_timeout(self):\n if self._ack_handle.active():\n self._ack_handle.cancel()", "def wait_for_disconnection(timeout):\n global connected \n\n total_time = 0\n while connected and total_time < timeout:\n time.sleep(1)\n total_time += 1\n\n if connected:\n raise RuntimeError('Could not disconnect to MQTT bridge.')", "def TestClosedSocketTimeout(self):\n self.txrx.timeout = 0.1 # Set a really short timeout so we don't hold up testing\n self.s.close()\n byte_array_msg_tx = bytes('\\x0C\\x0D\\x0E\\x0F\\x10\\x11', encoding=DATA_ENCODING)\n\n self.txrx.tx_msg(byte_array_msg_tx)\n with self.assertRaises(PercivalCommsError):\n reply = self.txrx.rx_msg()", "def _CheckForIdleQuit(self):\n timeout = time.time() + self.idle_timeout_secs\n while time.time() < timeout:\n if self._shutdown_requested_event.is_set():\n # An external source called shutdown()\n return\n elif self._rpc_received_event.is_set():\n logging.debug('Resetting the idle timeout')\n timeout = time.time() + self.idle_timeout_secs\n self._rpc_received_event.clear()\n time.sleep(1)\n # We timed out, kill the server\n logging.warning('Shutting down the server due to the idle timeout')\n self.shutdown()", "def initiate_connection(self) -> None:\n self._stop_waiting_for_opening.clear()\n self._make_fifo_file(self._fifo_out_path)\n self._make_fifo_file(self._fifo_in_path)\n if self._open_fifo_thread is None:\n self._open_fifo_thread = threading.Thread(target=self._open_fifo, daemon=True)\n self._open_fifo_thread.start()\n if self._opening_monitor_thread is None:\n self._opening_monitor_thread = threading.Thread(target=self._opening_monitor, daemon=True)\n self._opening_monitor_thread.start()", "def waitReadable( self, timeoutms=None ):\n if len( self.readbuf ) == 0:\n self.pollOut.poll( timeoutms )", "def _sco_close_check(self, adapter):\n\n # if stop() was called, the adapter won't be tracked, so stop checking\n if adapter not in self._connections:\n return\n\n ep = self._connections[adapter][\"epoll\"]\n sock = self._connections[adapter][\"socket\"]\n closed = False\n\n try:\n # timeout of 0 means no blocking\n result = ep.poll(1.0)\n closed = len(result) != 0\n except Exception as e:\n # assuming any error with the socket is a close\n logger.error(\"EPOLL error in _sco_close_check() - {}\".format(\n e))\n closed = True\n\n if closed:\n logger.info(\"Established SCO socket closed for adapter {}.\".format(\n adapter))\n # stop tracking and alert\n self.stop(adapter=adapter)\n if self.on_media_connected_changed:\n self.on_media_connected_changed(\n adapter=adapter,\n connected=False,\n socket=None,\n mtu=None,\n peer=None)\n else:\n # keep checking\n self.io_loop.call_later(\n delay=1,\n callback=self._sco_close_check,\n adapter=adapter)", "def test_lock_timeout():\n lock_unlock_timeout(0)", "def ReceiveTimeout(self) -> int:", "def ReceiveTimeout(self) -> int:", "def watch_for_ack_timeout(self):\n while True:\n sent_message, time_sent = self.awaiting_ack.get()\n timeout_time = time.time() + self.ACK_TIMEOUT\n if time_sent + self.ACK_TIMEOUT < time.time():\n sent_message.resent += 1\n if sent_message.resent < 15:\n self.send_message(sent_message)\n self._log.write_to_log(\n \"ACK\", f\"Attempt {sent_message.resent} to resend {sent_message.TYPE_STRING} message {sent_message.uuid} to {sent_message.destination_node.get_name()}\")\n else:\n self._log.write_to_log(\n \"ACK\", f\"Drop message to {sent_message.destination_node.get_name()}\")\n else:\n self.awaiting_ack.put((sent_message, time_sent))\n\n time.sleep(.3) # Ensure this thread doesn't hog the queue", "def do_monitor(self):\n while not self.expired:\n self.expired = True\n time.sleep(self.interval)\n self.dead_fn()", "def drain(self, timeout=None):\n self.t.join(timeout)\n return not self.t.is_alive()", "async def test_blocking_timeout(self):\n with await self.redis as r:\n result = await r.blpop('missing', timeout=1)\n assert result is None", "def assert_timeout(self) -> None:\n if self._cancelled:\n raise asyncio.TimeoutError from None", "def test_timeout(self):\n s1, s2 = self.create_bound_pair(zmqpy.PAIR, zmqpy.PAIR)\n poller = self.Poller()\n poller.register(s1, zmqpy.POLLIN)\n tic = time.time()\n evt = poller.poll(timeout=.005)\n toc = time.time()\n self.assertTrue(toc-tic < 0.1)\n tic = time.time()\n evt = poller.poll(timeout=5)\n toc = time.time()\n self.assertTrue(toc-tic < 0.1)\n self.assertTrue(toc-tic > .001)\n tic = time.time()\n evt = poller.poll(timeout=500)\n toc = time.time()\n self.assertTrue(toc-tic < 1)\n self.assertTrue(toc-tic > 0.1)", "def __check_stop(self):\n if not self.__parent_thread.is_alive():\n global _iom_shutdown\n self.__logger.info(\"Parent thread ended. Stopping IOManager.\")\n _iom_shutdown = True\n self.__running = False\n\n if not self.__wrappers and not self.__disconnected_wrappers and time.time() > self.__empty_time:\n self.__logger.info(\"No IOWrappers registered. Stopping IOManager\")\n self.__running = False\n elif self.__wrappers or self.__disconnected_wrappers:\n self.__empty_time = time.time() + 30", "def _pipe_monitor(self):\n while self._pipe_thread.isAlive() and not self._is_closed:\n if self._pipe.poll(1):\n recv_obj = self._pipe.recv()\n self.processKey(recv_obj)", "def _timeout_cbk(proc):\n proc.kill()\n raise RuntimeError(\"Timeout popped.\")", "def test_flush(queue):\n queue.put('oops')\n queue.flush()\n assert queue.get(timeout=1) is None", "def timeout_stopcomm(commhandle):\r\n\r\n return stopcomm(commhandle)" ]
[ "0.5875112", "0.5731592", "0.5588942", "0.54539853", "0.5290407", "0.52629447", "0.52584803", "0.5223161", "0.52226985", "0.5214648", "0.51609844", "0.5129125", "0.50584686", "0.5039045", "0.5037886", "0.5036288", "0.5028191", "0.50216097", "0.50216097", "0.49879858", "0.49746603", "0.49740192", "0.49725387", "0.4954337", "0.49526358", "0.4911525", "0.48994926", "0.4888911", "0.48822752", "0.48797062" ]
0.7664157
0
This is workaround for unblocking opening FIFO operation imitate opening FIFO "on the other side".
def _unblock_open_fifo_operation(self) -> None: if os.path.exists(self._fifo_out_path): open(self._fifo_out_path, 'wb', buffering=0) if os.path.exists(self._fifo_in_path): open(self._fifo_in_path, 'rb', buffering=0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _make_fifo(self):\n if os.path.exists(self.fifo_path):\n os.remove(self.fifo_path)\n os.mkfifo(self.fifo_path)", "def _open_fifo(self, path, flags):\n collectd.info('Opening: %s' % path)\n if self._open_nonblock:\n # NOTE: Open non-blocking, to detect when there is no reader. Or, so\n # reads can timeout using select or poll.\n flags |= os.O_NONBLOCK\n\n try:\n return os.open(path, flags)\n except OSError as err:\n # If opening for write, the error is likely errno.ENXIO. ENXIO occurs\n # when no reader has the other end open. e.g. when vsys is not running in\n # root context.\n raise VsysOpenException('Opening vsys fifo (%s) failed: %s' %\n (path, err))", "def makeFifo(filename):\n\ttry:\n\t\tos.mkfifo(filename)\n\t\tprint filename\n\texcept OSError, e:\n\t\tpass\n\t\n\tfifo = open(filename, 'w')\n\treturn fifo", "def _opening_monitor(self) -> None:\n timeout_time: float = time.time() + self._timeout\n while time.time() < timeout_time and not self._stop_waiting_for_opening.is_set():\n if self._fifo_opened.is_set():\n return\n time.sleep(0.1)\n self._stop_waiting_for_opening.set()\n self._unblock_open_fifo_operation()", "def _check_fifo(self):\n if not os.path.exists(self.fifo_path):\n raise FifoIsNotAvailable(\"trying to load from session that it's fifo is deleted!\")", "def test_fifo_sync(args=None):\n if args is None:\n args = Namespace(width=8, size=16, name='test')\n else:\n assert hasattr(args, 'width')\n assert hasattr(args, 'size')\n args = tb_default_args(args)\n\n reset = ResetSignal(0, active=1, isasync=True)\n clock = Clock(0, frequency=50e6)\n glbl = Global(clock, reset)\n fbus = FIFOBus(width=args.width)\n\n @myhdl.block\n def bench_fifo_sync():\n \n tbdut = fifo_sync(glbl, fbus, size=args.size)\n tbclk = clock.gen()\n \n @instance\n def tbstim():\n fbus.write_data.next = 0xFE\n reset.next = reset.active\n yield delay(33)\n reset.next = not reset.active\n for ii in range(5):\n yield clock.posedge\n\n # test the normal cases\n for num_bytes in range(1, args.size+1):\n\n # write some bytes\n for ii in range(num_bytes):\n yield clock.posedge\n fbus.write_data.next = ii + 0xCE\n fbus.write.next = True\n\n yield clock.posedge\n fbus.write.next = False\n fbus.write_data.next = 0xFE\n\n # if 16 bytes written make sure FIFO is full\n yield clock.posedge\n if num_bytes == args.size:\n assert fbus.full, \"FIFO should be full!\"\n assert not fbus.empty, \"FIFO should not be empty\"\n \n # fbus.read.next = True\n # yield clock.posedge\n for ii in range(5):\n yield clock.posedge\n if not fbus.empty:\n break\n\n for ii in range(num_bytes):\n fbus.read.next = True\n yield clock.posedge\n assert fbus.read_valid\n assert fbus.read_data == ii + 0xCE, \\\n \"rdata %x ii %x \" % (fbus.read_data, ii + 0xCE)\n\n fbus.read.next = False\n yield clock.posedge\n assert fbus.empty\n\n raise StopSimulation\n\n w = args.width\n write_data, read_data = Signals(intbv(0)[w:], 2)\n\n @always_comb\n def tbmon():\n write_data.next = fbus.write_data\n read_data.next = fbus.read_data\n\n return tbdut, tbclk, tbstim, tbmon\n\n run_testbench(bench_fifo_sync, args=args)", "def open(self):\n # NOTE: caller MUST open for writing BEFORE opening for reading.\n self._fd_out = self._open_fifo(self._path_in, os.O_WRONLY)\n self._fd_in = self._open_fifo(self._path_out, os.O_RDONLY)", "async def make(epoller: Epoller, ram: RAM, fd: FileDescriptor) -> AsyncFileDescriptor:\n status = FDStatus(EPOLL.NONE)\n epolled = await epoller.register(\n fd, EPOLL.IN|EPOLL.OUT|EPOLL.RDHUP|EPOLL.PRI|EPOLL.ERR|EPOLL.HUP|EPOLL.ET,\n status.posedge,\n )\n return AsyncFileDescriptor(ram, fd, status, epolled)", "def __setNonBlocking(fd):\n flags = fcntl.fcntl(fd, fcntl.F_GETFL)\n flags = flags | os.O_NONBLOCK\n fcntl.fcntl(fd, fcntl.F_SETFL, flags)", "def __checkFifo(path):\n pass # FIXME implement", "def make_nonblock(self, data_source):\n fd = self.get_fd(data_source)\n LOGGER.debug('Monitoring mking fd %d non-blocking for copy_to', fd)\n flags = fcntl.fcntl(fd, fcntl.F_GETFL)\n fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)", "def test_fifo_sync_random():\n pass", "def setNonBlocking(fd):\n flags = fcntl.fcntl(fd, fcntl.F_GETFL)\n flags = flags | os.O_NONBLOCK\n fcntl.fcntl(fd, fcntl.F_SETFL, flags)", "def UseOnlyOverlappedIO(self) -> bool:", "def rtl_fifo_sel():\n if cso.bypass_fifo:\n # data comes from the register file\n cso.tx_empty.next = itx.empty\n cso.tx_full.next = itx.full\n itx.write_data.next = cso.tx_byte\n\n cso.rx_empty.next = irx.empty\n cso.rx_full.next = irx.full\n cso.rx_byte.next = irx.read_data\n cso.rx_byte_valid.next = irx.read_valid\n\n # @todo: if cso.tx_byte write signal (written by bus) drive the\n # @todo: FIFO write signals, same if the cso.rx_byte is accessed\n itx.write.next = cso.tx_write\n irx.read.next = cso.rx_read\n\n else:\n # data comes from external FIFO bus interface\n fifobus.full.next = itx.full\n itx.write_data.next = fifobus.write_data\n itx.write.next = fifobus.write\n\n fifobus.empty.next = irx.empty\n fifobus.read_data.next = irx.read_data\n fifobus.read_valid.next = irx.read_valid\n irx.read.next = fifobus.read\n\n # same for all modes\n irx.write_data.next = rreg", "def unbuffer_fd(fileno: int):\n fcntl.fcntl(fileno, fcntl.F_SETFL, fcntl.fcntl(fileno, fcntl.F_GETFL) | os.O_NONBLOCK)", "def start():\n global receivers\n global __FIFO_CMD__, __FIFO_DAT__\n\n fd = os.open(__FIFO_DAT__, os.O_RDONLY)\n def callback_dat(fd_):\n \"\"\" Callback for the receiving DATA. \"\"\"\n # receive data\n data = os.read(fd_, 8)\n if data == '':\n return\n data, = struct.unpack('<Q', data)\n # TODO: Interpret data\n __recv_notifier__ = LambdaQSocketNotifier(fd, QSocketNotifier.Read, callback_dat)\n\n while True:\n ask_and_send_command()", "def setNonBlocking(fd):\n\n import fcntl\n\n flags = fcntl.fcntl(fd, fcntl.F_GETFL)\n flags = flags | os.O_NONBLOCK\n fcntl.fcntl(fd, fcntl.F_SETFL, flags)", "def test_set_nonblocking():\n\n f = tempfile.TemporaryFile()\n flags = fcntl.fcntl(f, fcntl.F_GETFL, os.O_NONBLOCK)\n assert (flags | os.O_NONBLOCK) != flags\n altered_f = prefork.set_nonblocking(f)\n flags = fcntl.fcntl(f, fcntl.F_GETFL, os.O_NONBLOCK)\n assert (flags | os.O_NONBLOCK) == flags\n\n # Destroy the file, even though GC will do that anyway.\n f.close()", "def getFIFO(self):\n return self._buf", "def _set_nonblocking(fd):\n oflags = fcntl.fcntl(fd, fcntl.F_GETFL)\n nflags = oflags | os.O_NONBLOCK\n fcntl.fcntl(fd, fcntl.F_SETFL, nflags)", "def test_wakeget(clean_queue):\n\n # Consumer thread\n def consumer(clean_queue):\n assert clean_queue.get() == (1, \"fifo\")\n clean_queue.task_done(\"fifo\")\n\n # Start the consumer\n thread = threading.Thread(target=consumer, args=(clean_queue,), daemon=True)\n thread.start()\n\n # Now put something\n clean_queue.put(1, \"fifo\")\n\n # Join the consumer\n thread.join()\n\n # Queue should be empty\n assert clean_queue.qsize == 0\n assert clean_queue.inprogress_size == 0", "def _fifo_tar(self):\r\n fifop = self.unsafe_common_dir / \"fifo.file\"\r\n fifo_tar = self.unsafe_common_dir / \"fifo.tar.gz\"\r\n os.mkfifo(fifop)\r\n with tarfile.open(fifo_tar, \"w:gz\") as tar:\r\n tar.add(fifop)\r\n\r\n return fifo_tar", "def seekable(self):\n ...", "def initiate_connection(self) -> None:\n self._stop_waiting_for_opening.clear()\n self._make_fifo_file(self._fifo_out_path)\n self._make_fifo_file(self._fifo_in_path)\n if self._open_fifo_thread is None:\n self._open_fifo_thread = threading.Thread(target=self._open_fifo, daemon=True)\n self._open_fifo_thread.start()\n if self._opening_monitor_thread is None:\n self._opening_monitor_thread = threading.Thread(target=self._opening_monitor, daemon=True)\n self._opening_monitor_thread.start()", "def example_one():\n fifo = deque()\n fifo.append(1) # Producer\n x = fifo.popleft() # Consumer", "def safe_fd(fd):\n toclose = []\n try:\n while fd < 3:\n toclose.append(fd)\n fd = _os.dup(fd)\n finally:\n for dfd in toclose:\n try:\n _os.close(dfd)\n except OSError:\n pass\n return fd", "def _open_fd_rw(self):\n self.fd = os.open(self.proxy, os.O_RDWR)", "def test_OpenCloseOneHundred(self):\n\n q = Queue(self.path)\n for i in range(1000):\n q.put('var%d' % i)\n del q\n q = Queue(self.path)\n self.assertEqual(1000, q.qsize())\n for i in range(1000):\n data = q.get()\n self.assertEqual('var%d' % i, data)\n q.task_done()\n with self.assertRaises(Empty):\n q.get_nowait()\n # assert adding another one still works\n q.put('foobar')\n data = q.get()", "def _exlock(self, fd):\n fcntl.lockf(fd, fcntl.LOCK_EX)" ]
[ "0.6802569", "0.62895423", "0.6190773", "0.6061759", "0.5897779", "0.5859627", "0.5841202", "0.58050406", "0.5755729", "0.574868", "0.5717438", "0.5699288", "0.5688702", "0.5677355", "0.56472605", "0.56437516", "0.56407607", "0.56212544", "0.5571513", "0.5566992", "0.5514071", "0.54820454", "0.5459597", "0.5421069", "0.5416719", "0.536882", "0.5359477", "0.5326957", "0.53217846", "0.53141546" ]
0.76129514
0
scrape and clean stuff up to save in a GameContainer which is just a shell to hold all needed data for parsing the container allows us to have a file that we can manually repair and reparse in the case of scoring errors
def scrape_to_container(gameid, cache_path=None, session=None, save_container=True): d = setup_scraper(gameid, cache_path, session) scraper, away_starting_lineup, home_starting_lineup, away_roster, home_roster = d #======================================================================= # Move into game container #======================================================================= home = scraper.home_team() away = scraper.away_team() gc = GameContainer(CONTAINER_PATH, gameid, away, home) gc.url = pss.make_html_url(gameid) gc.set_away_roster(away_roster) gc.set_home_roster(home_roster) for half in scraper.halfs(): gc.new_half() for raw_event in half.raw_events(): if raw_event.is_sub(): gc.add_sub(raw_event.title(), raw_event.text()) else: gc.add_event(raw_event.title(), raw_event.text(), raw_event.batter(), raw_event.batter_number()) if save_container: gc.save() if scraper.critical_errors: raise StandardError("Scraper finished with critical errors. GameContainer was saved for attempted repairs") return gc
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def parse_replay(request, game):\n\n game = game.lower()\n replay_file = request.files.get(\"replay\")\n if replay_file:\n if game == STARCRAFT:\n basic, result = await SC2Replay.process_replay(replay_file, request.args.get(\"load_map\", False))\n if result:\n # Lets create our db entry\n\n basic['private_replay'] = request.args.get('private_replay', False)\n replay_id = str(uuid.uuid4())\n basic[\"_id\"] = replay_id\n print(replay_id)\n unique_name = \".\".join([replay_id, \"SC2Replay\"])\n basic[\"replay_object_name\"] = unique_name\n basic[\"game_name\"] = request.args.get(\"replay_name\", datetime.utcnow())\n try:\n success = await request.app.object_storage.add_object(request.app.config.OS_CONTAINER, replay_file, unique_name)\n if success:\n # push results to mongoDB\n mongo = request.app.mongodb\n # Insert the basic information for the replay\n await mongo.starcraft_2_replays.info.insert_one(basic)\n # Insert event data\n events = dict(result['event'])\n events.update(basic)\n print(events)\n await mongo.starcraft_2_replays.replay_events.insert_one(events)\n # Insert stats data\n stats = dict(result['stats'])\n stats.update(basic)\n await mongo.starcraft_2_replays.replay_stats.insert_one(stats)\n\n return sanic.response.json(basic)\n except (swift.BluemixSwiftUnavailableError,\n swift.BluemixSwiftAuthenticationError,\n swift.BluemixSwiftRequestTimeoutError,\n bson.errors.InvalidDocument,\n pymongo.errors.ConnectionFailure):\n\n traceback.print_exc()\n data = {\n \"error\": \"Internal Server Error\",\n \"success\": False,\n \"game\": STARCRAFT\n }\n return sanic.response.json(data)", "def parse_game(f, sql, gid):\n\tbsoup = BeautifulSoup(f, \"lxml\")\n\t# the title is in the format:\n\t# J! Archive - Show #XXXX, aired 2004-09-16\n\t# the last part is all that is required\n\tairdate = bsoup.title.get_text().split()[-1]\n\tif not parse_round(bsoup, sql, 1, gid, airdate) or not parse_round(bsoup, sql, 2, gid, airdate):\n\t\t# one of the rounds does not exist\n\t\tpass\n\t# the final Jeopardy! round\n\tr = bsoup.find(\"table\", class_ = \"final_round\")\n\tif not r:\n\t\t# this game does not have a final clue\n\t\treturn\n\tcategory = r.find(\"td\", class_ = \"category_name\").get_text()\n\ttext = r.find(\"td\", class_ = \"clue_text\").get_text()\n\tanswer = BeautifulSoup(r.find(\"div\", onmouseover = True).get(\"onmouseover\"), \"lxml\")\n\tanswer = answer.find(\"em\").get_text()\n\t# False indicates no preset value for a clue\n\tinsert(sql, [gid, airdate, 3, category, False, text, answer])", "def test_main():\n html = None\n with open('/home/jgunter/Projects/gaia-project-scraper/test_log.txt') as f:\n html = f.read()\n\n if html:\n soup = BeautifulSoup(html, 'lxml')\n raw_game_log = soup.find('div', class_='col-12 order-last mt-4')\n log = GameLog.parse_from_HTML(raw_game_log)\n stats = Stats(log)\n stats.breakdown()", "def createScroeCardsScript(savFolder,m):\n if m == 's': #the start of the scorecard\n #read the parameters from the store file\n with open(savFolder+'Store.Nrg_HN002','r') as f:\n for line in f:\n if re.search('.PatientName = SimpleString',line):\n line2 = f.next()\n PatientName = readMe(line2,'s')\n if re.search('.MRN = SimpleString',line):\n line2 = f.next()\n MRN = readMe(line2,'s')\n if re.search('.Plan = SimpleString',line):\n line2 = f.next()\n PlanName = readMe(line2,'s')\n\n #write scripts that would create the score cards\n #heading\n f = open(savFolder+'createScoreCardsNrg_HN002.Script','w')\n f.write('/////////////////////////////////////\\n')\n f.write('//createScoreCardsNrg_HN002.Script///\\n')\n f.write('///Create score card in Pinnacle ////\\n')\n f.write('/////////////////////////////////////\\n')\n f.write('\\n')\n f.write('//create score card window and goals//\\n')\n f.write('WindowList.TrialScoreCardEditor.Create = \\\"Score Card Window\\\";\\n')\n f.write('\\n')\n f.write('//clear current score card\\n')\n f.write('Scorecard.DoseVolClinicalGoalList.DestroyAllChildren = \\\"Clear Score Card\\\";\\n')\n f.write('\\n')\n f.write('//name score cards\\n')\n f.write('Scorecard.Name = \\\"UVA NRG HN 002\\\";\\n')\n f.write('Scorecard.Description = \\\"Scorecard for %s %s\\\";\\n' %(PatientName, MRN))\n f.write('\\n')\n f.close()\n\n if m == 'p': #start writing PTV goals\n #read the parameters from the store file\n with open(savFolder+'Store.Nrg_HN002','r') as f:\n for line in f:\n if re.search('.PTV_N = SimpleString',line):\n line2 = f.next()\n PTV_N = readMe(line2,'s')\n if re.search('.PTVVol = SimpleString',line):\n line2 = f.next()\n PTVVol = readMe(line2,'s')\n if re.search('.CurrDose = Float',line):\n line2 = f.next()\n Dose = int(readMe(line2,'f'))\n #start writing goals\n f = open(savFolder+'createScoreCardsNrg_HN002.Script','a')\n if Dose == 6000:\n line = defineGoalinCards.writetext('','PTV_6000','Min DVH (%)',VT1=95,DT1=6000)\n f.write(line+'\\n')\n line = defineGoalinCards.writetext('','PTV_6000','Max DVH (%)',VT1=95,DT1=6000,VT2=95,DT2=6300)\n f.write(line+'\\n')\n line = defineGoalinCards.writetext('','PTV_6000','Min DVH (%)',VT1=99,DT1=5580,VT2=99,DT2=5400)\n f.write(line+'\\n')\n line = defineGoalinCards.writetext('','PTV_6000','Max Dose',DT1=6600,DT2=6900)\n f.write(line+'\\n')\n line = defineGoalinCards.writetext('','CTV_6000','Min DVH (%)',VT1=99,DT1=6000,VT2=95,DT2=6000)\n f.write(line+'\\n')\n if Dose == 5400:\n line = defineGoalinCards.writetext('','PTV_5400','Min DVH (%)',VT1=95,DT1=5400,VT2=95,DT2=5130)\n f.write(line+'\\n')\n line = defineGoalinCards.writetext('','PTV_5400','Min DVH (%)',VT1=99,DT1=5020,VT2=99,DT2=4800)\n f.write(line+'\\n')\n line = defineGoalinCards.writetext('','CTV_5400','Min DVH (%)',VT1=99,DT1=5400,VT2=95,DT2=5400)\n f.write(line+'\\n')\n if Dose == 4800:\n line = defineGoalinCards.writetext('','PTV_4800','Min DVH (%)',VT1=95,DT1=4800,VT2=95,DT2=4400)\n f.write(line+'\\n')\n line = defineGoalinCards.writetext('','PTV_4800','Min DVH (%)',VT1=99,DT1=4460,VT2=99,DT2=4320)\n f.write(line+'\\n')\n line = defineGoalinCards.writetext('','PTV_4800','Max Dose',DT1=5520,DT2=5760)\n f.write(line+'\\n')\n line = defineGoalinCards.writetext('','CTV_4800','Min DVH (%)',VT1=99,DT1=4800,VT2=95,DT2=4800)\n f.write(line+'\\n')\n f.close()\n\n if m == 'o': #start writing OAR tolerance\n #start writing goals\n f = open(savFolder+'createScoreCardsNrg_HN002.Script','a')\n line = defineGoalinCards.writetext('','SpinalCord_05','Max Dose',DT1=4800,DT2=5000)\n f.write(line+'\\n')\n line = defineGoalinCards.writetext('','SpinalCord','Max Dose',DT1=4500,DT2=4800)\n f.write(line+'\\n')\n line = defineGoalinCards.writetext('','BrainStem_03','Max Dose',DT1=5000,DT2=5200)\n f.write(line+'\\n')\n line = defineGoalinCards.writetext('','Parotid_L','Mean Dose',DT1=2600)\n f.write(line+'\\n')\n line = defineGoalinCards.writetext('','Parotid_R','Mean Dose',DT1=2600)\n f.write(line+'\\n')\n line = defineGoalinCards.writetext('','Larynx','Mean Dose',DT1=3500)\n f.write(line+'\\n')\n line = defineGoalinCards.writetext('','Pharynx','Mean Dose',DT1=4000)\n f.write(line+'\\n')\n line = defineGoalinCards.writetext('','Submandibular_L','Mean Dose',DT1=3900)\n f.write(line+'\\n')\n line = defineGoalinCards.writetext('','Submandibular_R','Mean Dose',DT1=3900)\n f.write(line+'\\n')\n line = defineGoalinCards.writetext('','OralCavity','Mean Dose',DT1=3200)\n f.write(line+'\\n')\n line = defineGoalinCards.writetext('','Esophagus_Upper','Mean Dose',DT1=3000)\n f.write(line+'\\n')\n line = defineGoalinCards.writetext('','NonPTV','Max DVH (cm^3)',VT1=1,DT1=6300)\n f.write(line+'\\n')\n line = defineGoalinCards.writetext('','Mandible','Max Dose',DT1=6300)\n f.write(line+'\\n')\n f.close()", "def scrape_all():\n\n # Scrape team information by season\n for team in scrape_utils.team_names():\n team_season_stats(team)\n # Each season\n print(team)\n for year in range(2019, 2020):\n # Game Logs\n season_game_logs(team, year)\n\n # Starting Lineups\n #player_scraper.get_starting_lineups(team, year)\n\n # Init mongo to get game IDS for box score scraping\n m = mongo.Mongo()\n\n # Game Information (Box Score and Play by Play)\n for year in range(2015, 2020):\n player_scraper.get_starting_lineups(year)\n for game in m.find('game_log', {'season': year}, {'_id': 1}):\n #team_scraper.play_by_play(game['_id'])\n player_scraper.player_box_score(game['_id'])\n\n print(game['_id'])\n\n\n\n # Get player information\n for player in scrape_utils.get_active_players():\n print(player)\n player_scraper.player_per_game(player)\n\n # Get betting lines (By Year) need from 2014\n for year in range(2015, 2020):\n team_scraper.betting_lines(2019)", "def parse_game_from_file(filename):\n contents = codecs.open(filename, 'r', encoding='utf-8').read()\n return parse_game(contents, dubious_check = True)", "def outer_parse_game(filename):\n contents = codecs.open(filename, 'r', encoding='utf-8').read()\n\n if not contents:\n # print 'empty game'\n return None\n if '<b>game aborted' in contents:\n # print 'skipping aborted game', filename\n return None\n if '<title>403 Forbidden' in contents or '<title>404 Not Found' in contents:\n return None\n try:\n parsed = parse_game(contents, dubious_check = True)\n parsed['_id'] = filename.split('/')[-1]\n return parsed\n except parse_common.BogusGameError, bogus_game_exception:\n # print 'skipped', filename, 'because', bogus_game_exception.reason\n return None\n except parse_common.ParseTurnHeaderError, p:\n print 'parse turn header error', p, filename\n except AssertionError, e:\n print filename\n raise e", "def scrape_game_data(game_url):\n try:\n http = urllib3.PoolManager(\n cert_reqs='CERT_REQUIRED', ca_certs=certifi.where())\n\n # Get game soup object.\n game_request = http.request(\"GET\", game_url)\n soup = BeautifulSoup(game_request.data, 'lxml')\n\n # Store game data in dictionary.\n game_dict = {\"game_url\": game_url}\n game_dict = get_smmry_data(soup, game_dict)\n game_dict = get_player_data(soup, game_dict)\n game_dict = get_card_data(soup, game_dict)\n\n return game_dict\n except MaxRetryError:\n pass", "def parse_game(game_str, dubious_check = False):\n\n if ISO_LOG_RE.match(game_str):\n game_dict = parse_iso_game.parse_game(game_str, dubious_check)\n elif GOKO_LOG_RE.match(game_str):\n game_dict = parse_goko_game.parse_game(game_str, dubious_check)\n \n\n assign_win_points(game_dict)\n if dubious_check and Game(game_dict).dubious_quality():\n raise parse_common.BogusGameError('Dubious Quality')\n\n return game_dict", "def load_game(filename):\n global width, height, dungeon_map # DO NOT REMOVE\n\n try:\n with open(filename, 'r') as file_handler:\n file_text = None # TODO: Replace None so that file_text contains all the text in the file.\n except FileNotFoundError as error:\n return False\n\n lines = file_text.split('\\n')\n line = None # TODO: Replace None so that your code removes the first string in the list lines and stores it in the variable line.\n temp_width, temp_height, player_x, player_y, player_symbol, werewolf_x, werewolf_y, werewolf_health, werewolf_stun_count = None\n # TODO: Replace None on the preceding line of code. Extract values for all of these variables from the variable line.\n # TODO: All of the variables on the line above EXCEPT player_symbol need to be integers, not strings.\n\n temp_dungeon_map = [] # Store the dungeon map from the file in a temporary variable before we decide to throw out our old map.\n while len(temp_dungeon_map) < temp_height and len(lines) > 0:\n row = None # TODO: Replace None so that your code removes the first string in the list lines and stores it in the variable row.\n if row != '':\n temp_dungeon_map.append(list(row)) # What does list do here?\n squares = 0\n for row in temp_dungeon_map: # Count up the number of squares in the file's map.\n squares += len(row)\n if squares != temp_width * temp_height:\n # Validation: If the number of squares in the dungeon map does not match the width and height values in the file, that is a problem.\n return False\n\n # The data from the file is valid so now we can update the real width, height, and dungeon_map variables.\n width = temp_width\n height = temp_height\n dungeon_map = temp_dungeon_map\n\n player_inventory = {}\n for line in lines:\n # TODO: Extract from the remaining lines of text in the file data for the player's inventory.\n # Each line of the inventory data has a single character for the item, followed by a space, followed by the count for that item.\n # Store the data in the player_inventory dictionary; each key in the dictionary is an item, and its matching value is the numeric count.\n pass\n\n # TODO: Return a tuple of values (that you got in his function) so that the order of the values matches the order in the tuple returned by load_default_game().", "def parse_game_details(game):\n rules = game.rules_file\n line = rules.readline()\n game_details = []\n while line:\n game_details.append(line)\n line = rules.readline()\n return game_details", "def extract_from_game(game_path):\n with open(game_path) as sgf_file:\n game_data = sgf_file.read().encode('utf-8')\n\n try:\n g = sgf.Sgf_game.from_bytes(game_data)\n _, moves = sgf_moves.get_setup_and_moves(g)\n except BaseException:\n print(\"bad file: \", game_path)\n return Counter(), {}\n\n return (extract_corners(moves), 1 if g.get_winner().lower() == 'b' else 0)", "def scrape_fifa_game(url, competition):\n # Need to add referee data.\n\n data = scrape_url(url)\n data = data.split(\"<h2>Advertisement</h2>\")[0]\n soup = BeautifulSoup(data)\n \n contents = soup.find(\"div\", {\"id\": \"mainContent\" })\n \n # Really, none of these games have a home team.\n\n #teams = get_contents(contents.find(\"div\", \"bold large teams\"))\n team1 = get_contents(contents.find(\"div\", \"lnupTeam\").find(\"div\", \"bold medium\"))\n team2 = get_contents(contents.find(\"div\", \"lnupTeam away\").find(\"div\", \"bold medium\"))\n\n #import pdb; pdb.set_trace()\n\n \n\n #try:\n # team1, team2 = [e.strip() for e in teams.split(\"-\")]\n #except:\n # import pdb; pdb.set_trace()\n\n score_string = get_contents(contents.find(\"div\", \"bold large result\"))\n\n if 'a.e.t.' in score_string:\n score_string = score_string.split('a.e.t')[0]\n\n team1_score, team2_score = [int(e) for e in score_string.split(\"(\")[0].split(\":\")]\n\n # Implement this if header order is more unpredictable.\n #game_head = contents.findAll(\"thead\")\n #head_teas = game_head.findAll(\"td\", text=True\n\n game_header = contents.find(\"thead\")\n game_info = contents.find(\"tbody\")\n \n game_ths = [get_contents(e) for e in game_header.findAll(\"td\")]\n game_tds = [get_contents(e) for e in game_info.findAll(\"td\")]\n\n game_dict = dict(zip(game_ths, game_tds))\n\n\n match = date_string = time = location = attendance = None\n\n #import pdb; pdb.set_trace()\n\n if 'Match' in game_dict:\n match = game_dict['Match']\n\n if 'Date' in game_dict:\n date_string = game_dict['Date']\n\n #'Time' \n\n if 'Attendance' in game_dict:\n if game_dict['Attendance']:\n attendance = int(game_dict['Attendance'])\n\n if 'Venue / Stadium' in game_dict:\n location = game_dict['Venue / Stadium']\n\n for e in 'Match', 'Date', 'Attendance', 'Venue / Stadium', 'Time': \n if e in game_dict:\n game_dict.pop(e)\n #print(game_dict.keys())\n\n # Standardize city and stadium fields\n try:\n city, stadium = [e.strip() for e in location.rsplit(\"/\", 1)]\n except:\n import pdb; pdb.set_trace()\n\n # Avoid duplication of city name?\n if stadium.endswith(city):\n nlocation = stadium\n else:\n nlocation = \"%s, %s\" % (stadium, city)\n\n date = datetime.datetime.strptime(date_string.strip(), \"%d %B %Y\")\n\n return {\n \"team1\": team1,\n \"team2\": team2,\n 'team1_score': team1_score,\n 'team2_score': team2_score,\n 'competition': competition,\n 'season': str(date.year),\n \"date\": date,\n \"location\": nlocation,\n \"attendance\": attendance,\n \"sources\": [url],\n }", "def parseBadFileAsString(self,myfile):\n \n def removeTopDocketTags(string):\n return re.sub(r'<dockets>\\n<docket>','',string)\n \n def removeBottomDocketTags(string):\n return re.sub(r'</docket>\\n</dockets>$','',string)\n\n def makeListOfDocketsAsText(string):\n text = removeTopDocketTags(string)\n text = removeBottomDocketTags(text)\n return re.split(r'</docket>\\n<docket>',text)\n\n def splitFileIntoListOfStringsOrThrowError(fileObject,myfile):\n docketListAsText = makeListOfDocketsAsText(fileObject.read())\n regex = re.compile('</*docket>')\n badDockets = []\n counter = 0\n for d in docketListAsText:\n counter += 1\n for m in [regex.search(d)]:\n if m:\n self.logger.error(\"****Docket # %s has %s in it:\\n\\t%s****\" % (counter, m.group(0), d))\n badDockets.append(m.group(0))\n \n #badDockets = [m.group(0) for d in docketListAsText for m in [regex.search(d)] if m]\n if badDockets == []:\n return docketListAsText\n else:\n self.logger.info(\n \"There were %s dockets with '<docket>' or '</docket>' inside the docket-specific string.\\n\\t\\t=>This file will have no output.\", \n len(badDockets) \n )\n raise JBGSyntaxError('JBGSyntaxError')\n\n def initializeRoot():\n return ET.Element(\"root\") \n\n def initializeLists():\n self.listOfGoodDockets = []\n self.listOfBadDockets = []\n self.listOfBadDocketNumbers = []\n \n #########################################################\n ##### MAIN PROCEDURAL BLOCK OF parseBadFileAsString #####\n #########################################################\n \n with open(myfile) as f:\n initializeLists()\n root = initializeRoot()\n try:\n docketListAsText = splitFileIntoListOfStringsOrThrowError(f,myfile)\n for d in docketListAsText:\n self.allDocketsCounter += 1\n d.strip()\n try:\n tree = ET.fromstring('<docket>%s</docket>' % d)\n self.goodDocketsCounter += 1 #has to be after parse or we will count bad dockets here as well\n root.append(tree)\n self.listOfGoodDockets.append(tree)\n except ET.XMLSyntaxError:\n self.badDocketsCounter += 1\n self.logger.info(\n \" --> XMLSyntaxError for docket # %s\", self.allDocketsCounter\n )\n self.listOfBadDocketNumbers.append(self.allDocketsCounter)\n self.listOfBadDockets.append(d)\n except JBGSyntaxError:\n pass\n self.logger.info(\"Total number of all dockets in this file was %s\", self.allDocketsCounter)\n self.logger.info(\"Total number of good dockets in this file was %s\", self.goodDocketsCounter)\n self.logger.info(\"Total number of bad dockets in this file was %s\", self.badDocketsCounter)\n self.logger.info(\n \"List of bad dockets' text starts on next line:\\n\" + \n '\\n'.join([\"Next bad docket is number %s:\\n\\t%s\" % (self.listOfBadDocketNumbers[index], badDocket) for index,badDocket in enumerate(self.listOfBadDockets)])\n# '\\n'.join(['Next bad docket is number ' + self.listOfBadDocketNumbers[index] + ':\\n\\t' + badDocket for index,badDocket in self.listOfBadDockets])\n )\n return ET.ElementTree(root)", "def get_player_data(player, battleTag, responce):\r\n # Convert responce to a \"soup\" object by passing it to the soup constructor, and specify lxml as encoder \r\n soup = BeautifulSoup(responce.text, 'lxml')\r\n # List to store Hero Names and Quick Scores \r\n heroes = []\r\n # Loop Through each HTML tag under '<div>' : class: 'name' and look for name contents\r\n # In children, decode and output contents \r\n for parent in soup.find_all('div', {'class': 'name' }): # Specify the parent classes name, type(bs4.element.Tag)\r\n for child in parent.findChildren('a', recursive = False): # Access all of its children, store inside child var type(bs4.element.Tag) \r\n heroes.append(child.decode_contents()) # Get the contents of the child, add to the heroes list type(str)\r\n \r\n quick_scores = [] # To Store the quickscores \r\n # Loop Through each HTML tag under 'div' : class: group special and look for name \r\n #contents In children, decode and output contents, \r\n for parent in soup.find_all('div', {'class': 'group special' }):\r\n children = parent.findChildren('div', recursive = False)\r\n if not 'padded' in children[1].get('class'):\r\n quick_scores.append(children[1].findChildren('div', {'class': 'value' }, recursive = False)[0].decode_contents())\r\n \r\n player_image_link =\"\" \r\n\r\n # Get the profile Icon of the player\r\n for link in soup.find_all('div', {'class': 'image-with-corner' }):\r\n images = link.find_all('img')\r\n for img in images:\r\n if \"image-player\" in img['class']: \r\n player_image_link = img['src']\r\n\r\n # Get the number of wins from each hero and overall number of wins by the player\r\n # This time using regex, because why not :>\r\n temp = re.findall(\"<span class=\\\"color-stat-win\\\">[0-9]+</span>\", responce.text)\r\n i = 0\r\n hero_wins = []\r\n for elt in temp: \r\n if i < len(quick_scores)+1:\r\n val = re.sub(\"[^0-9]\", \"\", elt)\r\n hero_wins.append(val)\r\n i = i+1\r\n \r\n player.total_wins = hero_wins[0] # First item is Overall wins by player so far\r\n hero_wins.pop(0) \r\n player.hero_wins = hero_wins # other elements are wins from heroes\r\n \r\n # Convert scores to numeric format i.e 11,534 to 11534\r\n numeric_scores = []\r\n for x in quick_scores:\r\n numeric_scores.append(int(x.replace(',', '')))\r\n \r\n player.battle_tag = battleTag\r\n player.heroes = heroes\r\n player.quick_scores = numeric_scores\r\n player.player_logo = player_image_link", "def parse_replay(replay):\n if replay is None:\n return None\n\n stats = GameStat(replay[\"num_players\"])\n stats.turns_total = len(replay['frames']) - 1\n for frame in replay[\"frames\"]:\n for event in frame.get(\"events\", []):\n player_tag = event[\"entity\"].get(\"owner\")\n if event[\"event\"] == \"spawned\":\n stats.ships_produced += 1\n stats.players[player_tag].ships_produced += 1\n elif event[\"event\"] == \"destroyed\":\n if event[\"entity\"][\"type\"] == \"ship\":\n stats.ships_destroyed += 1\n elif event[\"entity\"][\"type\"] == \"planet\":\n stats.planets_destroyed += 1\n if player_tag:\n stats.players[player_tag].planets_destroyed += 1\n elif event[\"event\"] == \"attack\":\n stats.players[player_tag].attacks_total += 1\n\n ships_alive_total = sum([len(ships) for ships in replay[\"frames\"][-1][\"ships\"].values()])\n for player_tag in stats.players.keys():\n stats.players[player_tag].ships_alive = len(replay[\"frames\"][-1][\"ships\"][str(player_tag)])\n # use max(1.0, ...) to avoid ZeroDivisionError\n stats.players[player_tag].ships_alive_ratio = 1.0 * stats.players[player_tag].ships_alive / max(1.0, stats.players[player_tag].ships_produced)\n stats.players[player_tag].ships_relative_ratio = 1.0 * stats.players[player_tag].ships_alive / max(1.0, ships_alive_total)\n\n for planet in replay[\"frames\"][-1][\"planets\"].values():\n if planet[\"owner\"] is not None:\n stats.players[planet[\"owner\"]].planets_controlled += 1\n\n return stats", "def upload_game():\n if (\"game_output\" not in flask.request.values or\n \"users\" not in flask.request.values):\n raise util.APIError(\n 400, message=\"Please provide both the game output and users.\")\n\n game_output = json.loads(flask.request.values[\"game_output\"])\n users = json.loads(flask.request.values[\"users\"])\n challenge = json.loads(flask.request.values.get(\"challenge\", \"null\"))\n\n replay_name = os.path.basename(game_output[\"replay\"])\n if replay_name not in flask.request.files:\n raise util.APIError(\n 400, message=\"Replay file not found in uploaded files.\")\n\n stats = parse_replay(decode_replay(flask.request.files[replay_name]))\n if stats is None:\n raise util.APIError(\n 400, message=\"Replay file cannot be parsed.\")\n\n # Store the replay and any error logs\n replay_key, bucket_class = store_game_artifacts(replay_name, users)\n\n with model.engine.begin() as conn:\n total_users = conn.execute(model.total_ranked_users).first()[0]\n # Sort the users to prevent deadlock in the stored_bot for update lock\n for user in sorted(users, key=lambda x: x['user_id']):\n stored_user = conn.execute(\n sqlalchemy.sql.select([\n model.users.c.id.label(\"user_id\"),\n model.users.c.on_email_list,\n model.users.c.github_email.label(\"email\"),\n model.users.c.player_level,\n model.users.c.creation_time,\n model.users.c.username,\n model.organizations.c.organization_name,\n ]).select_from(model.users.join(\n model.organizations,\n model.organizations.c.id == model.users.c.organization_id,\n isouter=True\n )).where(model.users.c.id == user[\"user_id\"])\n ).first()\n\n stored_bot = conn.execute(\n sqlalchemy.sql.select([\n model.bots.c.version_number,\n model.bots.c.language,\n model.bots.c.mu,\n model.bots.c.sigma,\n ], for_update=True).where(\n (model.bots.c.id == user[\"bot_id\"]) &\n (model.bots.c.user_id == user[\"user_id\"])\n )\n ).first()\n\n stored_rank = conn.execute(\n sqlalchemy.sql.select([\n model.ranked_bots_users.c.rank,\n ]).where(\n (model.ranked_bots_users.c.bot_id == user[\"bot_id\"]) &\n (model.ranked_bots_users.c.user_id == user[\"user_id\"])\n )\n ).first()\n\n if not stored_user or not stored_bot:\n raise util.APIError(400, message=\"User or bot doesn't exist\")\n\n # If the user has submitted a new bot in the meanwhile,\n # ignore the game\n if stored_bot[\"version_number\"] != user[\"version_number\"]:\n return util.response_success({\n \"message\": \"User {} has uploaded a new bot, discarding \"\n \"match.\".format(user[\"user_id\"])\n })\n\n user.update(dict(stored_user))\n user.update(dict(stored_bot))\n if stored_rank:\n user[\"leaderboard_rank\"] = stored_rank[\"rank\"]\n user[\"tier\"] = util.tier(stored_rank[\"rank\"], total_users)\n else:\n user[\"leaderboard_rank\"] = total_users\n user[\"tier\"] = util.tier(total_users, total_users)\n\n # Store game results in database\n game_id = store_game_results(conn, game_output, stats,\n replay_key, bucket_class,\n users, challenge)\n # Store game stats in database\n store_game_stats(conn, game_output, stats, game_id, users)\n # Update rankings\n if not challenge:\n update_rankings(conn, users)\n\n return util.response_success()", "def build(filename: str) -> State:\n create_list_from_string = lambda a: [(int(a[i]), int(a[i + 1])) for i in range(0, int(len(a)), 2)]\n logger.info(f'Creating game for file: {filename}.')\n f = open(filename) # For whatever relevant file.\n\n line1 = f.readline().strip().split(' ')\n h_size, v_size = int(line1[0]), int(line1[1])\n\n line2 = f.readline()\n real_start = line2.find(' ') + 1\n wall_list = create_list_from_string(line2[real_start:-1].strip().split(' '))\n\n line3 = f.readline()\n real_start = line3.find(' ') + 1\n box_list = create_list_from_string(line3[real_start:-1].strip().split(' '))\n\n line4 = f.readline()\n real_start = line4.find(' ') + 1\n loc_list = create_list_from_string(line4[real_start:-1].strip().split(' '))\n\n line5 = f.readline().strip().split(' ')\n start_loc = (int(line5[0]), int(line5[1]))\n\n our_game = State(**{\n 'size_h': h_size,\n 'size_v': v_size,\n 'wall_squares': wall_list,\n 'boxes': box_list,\n 'storage_locations': loc_list,\n 'starting_location': start_loc,\n 'move_taken_from_parent': None\n })\n Visual.handle_state(our_game, 'NEW_GAME_FROM_FILE')\n return our_game", "def _create_games(self):\n\n ''''''", "def ingest(path_to_replays, proc):\n # So that pysc2 runs:\n from absl import flags\n import sys\n FLAGS = flags.FLAGS\n flags.DEFINE_integer(\"proc\", 1, \"Amount of processors you want to devote.\")\n FLAGS(sys.argv)\n\n path_to_replays = Path(path_to_replays)\n\n if path_to_replays.name.endswith(\".SC2Replay\"):\n # it's actually just a replay.\n replay_files = [str(path_to_replays)]\n else:\n replay_files = [replay for replay in path_to_replays.glob(\"*.SC2Replay\")]\n\n # If the database already exists, we check if we have already\n # processed some of the replays, and substract them from the\n # set we want to process. That way, we don't process replays twice.\n client = pymongo.MongoClient(address, port_num)\n if DB_NAME in client.list_database_names():\n db = client[DB_NAME]\n replays = db[\"replays\"]\n parsed_files = set([\n Path(doc['replay_name']) for doc in replays.find()\n ])\n print(f\"Found {len(parsed_files)} replays in the database already.\")\n\n replay_files = set(replay_files)\n replay_files -= parsed_files\n replay_files = list(replay_files)\n\n client.close()\n\n if len(replay_files) > 1:\n replay_files_chunks = utils.split(replay_files, proc)\n\n # Ingesting the replays\n with mp.Pool(proc) as p:\n p.map(_ingest, replay_files_chunks)\n \n elif len(replay_files) == 1:\n _ingest(replay_files)\n\n else:\n raise ValueError(\"Found no new replays in path. Do they end on SC2Replay?\")", "def __parseGameState(self, s):\n self.__camps=[]\n self.__armies=[]\n idCamp = 0\n lines = s.split(\"\\n\")[:-1] # letzte leeres ding nicht liefern.\n for line in lines:\n tokens = line.split(\" \")\n if( (len(tokens) == 6) or (len(tokens) == 7) ):\n if( \"C\" in tokens[0] ):\n if( len(tokens) == 6 ):\n x = int(tokens[1])\n y = int(tokens[2])\n owner = int(tokens[3])\n mancount = int(tokens[4])\n size = int(tokens[5])\n self.__camps.append(Camp(idCamp, owner, mancount, size, x, y))\n idCamp=idCamp+1\n elif( \"A\" in tokens[0] ):\n if( len(tokens) == 7):\n owner = int(tokens[1])\n mancount = int(tokens[2])\n source = int(tokens[3])\n destination = int(tokens[4])\n totalTripLength = int(tokens[5])\n turnsRemaining = int(tokens[6])\n self.__armies.append(Army(owner, mancount, source, destination, totalTripLength, turnsRemaining))", "def main():\n\n containerTextStyle = {\n 'color': '#ffffff',\n 'font': '10px',\n 'stroke': '#000000',\n 'strokeWidth': .15\n }\n spawnerRole = Spawner()\n # Clean up memory\n for creepName in Object.keys(Memory.creeps):\n if not Game.creeps[creepName]:\n if Memory.creeps[creepName].role == \"remoteHarvester\":\n print(\"Cleaning up remoteHarvester. It mined: \" + Memory.creeps[creepName].totalHarvested)\n del Memory.creeps[creepName]\n #print(\"Clearing non-existent creep memory: \" + creepName)\n\n if Game.cpu.bucket == 10000:\n Game.cpu.generatePixel()\n # Run each creep\n for name in Object.keys(Game.creeps):\n creep = Game.creeps[name]\n if creep.memory.role in Spawner.roles:\n Spawner.roles[creep.memory.role].run(creep)\n else:\n creep.say(\"No role\")\n\n # Run tower code\n homeRoom = Object.values(Game.spawns)[0].room\n towers = [struct for room in Object.values(Game.rooms) for struct in room.find(FIND_STRUCTURES) if struct.structureType == STRUCTURE_TOWER]\n hostiles = homeRoom.find(FIND_HOSTILE_CREEPS)\n for tower in towers:\n structures = sorted([struct for struct in tower.room.find(FIND_STRUCTURES) if struct.hits < struct.hitsMax], key=lambda struct: struct.hits)\n if len(hostiles) > 0:\n tower.attack(tower.pos.findClosestByPath(hostiles))\n continue\n\n for structure in structures:\n if structure.hits < structure.hitsMax and structure.hits < 100000:\n tower.repair(structure)\n break\n\n # Run visuals\n for room in Object.values(Game.rooms):\n for container in [struct for struct in room.find(FIND_STRUCTURES) if struct.structureType == STRUCTURE_CONTAINER or struct.structureType == STRUCTURE_STORAGE]:\n room.visual.text(Spawner.roles['harvester'].getStructureFutureEnergy(container), container.pos, containerTextStyle)\n\n # Run each spawn\n for name in Object.keys(Game.spawns)[0:1]:\n spawn = Game.spawns[name]\n spawnerRole.run(spawn)", "def parseGame(self, tree):\n if not isinstance(tree, Node):\n tree = indentTreeParser(tree).children[0]\n sclass, args = self._parseArgs(tree.content) \n self.game = sclass(**args) \n for c in tree.children:\n if c.content == \"SpriteSet\":\n self.parseSprites(c.children)\n if c.content == \"InteractionSet\":\n self.parseInteractions(c.children)\n if c.content == \"LevelMapping\":\n self.parseMappings(c.children)\n if c.content == \"TerminationSet\":\n self.parseTerminations(c.children)\n return self.game", "def test_get_game_content(self):\n pass", "def read_game_logs(file_path):\n\n if os.path.isfile(file_path):\n with open(file_path, \"r\") as read_file:\n log = json.load(read_file)\n # event_type = set([e[\"event\"] for e in log ])\n # the event types: command, text_message, set_attribute, join\n # print(\"event types\", event_type)\n\n # sort all messages chronologically\n log.sort(key=lambda x: x[\"date_modified\"])\n\n start = None\n end = None\n real_end = None # WHen The came master says COngrats or you die, because rest of the messages looks like bugs...\n episode_list = []\n length = len(log)\n game_finished = False\n # Episode are being searched between 2 starts commands\n # only the one where the command done has been issued is kept\n for i, l in enumerate(log):\n if \"command\" in l.keys():\n if l[\"command\"] == \"start\":\n if start == None:\n start = i\n elif end == None:\n end = i\n if l[\"command\"] == \"done\":\n game_finished = True\n\n if l[\"user\"][\"id\"] == 1 and l[\"event\"] == \"text_message\" and type(l[\"message\"]) is str and (\n l[\"message\"].startswith(\"Congrats\") or l[\"message\"].startswith(\n \"The rescue robot has not reached you\")):\n real_end = i + 1 # +1 because we want to include this message in the log slice...\n if start is not None and end is not None:\n if game_finished:\n episode_list.append(log[start:real_end])\n start = end\n end = None\n real_end = None\n game_finished = False\n\n if i + 1 == length:\n if start is not None and end is None and game_finished:\n episode_list.append(log[start:real_end])\n\n score_list = {}\n for i, e in enumerate(episode_list):\n # the number of answers the avatar utters gives us the number of question asked\n # num_questions = sum(\n # [1 for m in e if m[\"user\"][\"name\"] == \"Avatar\" and m[\"event\"] == \"text_message\"])\n\n # Just sum every messages ending with a question mark issueed by the user...\n num_questions = sum([1 for m in e if m[\"user\"][\"name\"] != \"Avatar\" and m[\"user\"][\"id\"] != 1 and m[\n \"event\"] == \"text_message\" and type(m[\"message\"]) is str and m[\"message\"].endswith(\"?\")])\n\n # user id 1 is alway the game master, we are looping here on the messages of the \"real\" player\n # when we tell the avatar to change location, we don't get an answer, this is why the substraction gives the number of orders\n # this does not include the order \"done\"\n # num_orders = sum(\n # [1 for m in e if m[\"user\"][\"name\"] != \"Avatar\" and m[\"user\"][\"id\"] != 1 and m[\n # \"event\"] == \"text_message\"]) - num_questions\n\n # Just sum every order of type \"go west\". Describe orders are not counted.\n num_orders = sum([1 for m in e if m[\"user\"][\"name\"] != \"Avatar\" and m[\"user\"][\"id\"] != 1 and m[\n \"event\"] == \"text_message\" and type(m[\"message\"]) is str and (\n \"east\" in m[\"message\"].lower() or \"north\" in m[\"message\"].lower() or \"west\" in m[\n \"message\"].lower() or \"south\" in m[\"message\"].lower() or \"back\" in m[\"message\"].lower())])\n\n game_won = sum([1 for m in e if m[\"user\"][\"id\"] == 1 and m[\n \"event\"] == \"text_message\" and type(m[\"message\"]) is str and m[\"message\"].startswith(\"Congrats\")]) > 0\n\n # Work-Around - the final reward giving +1.0 on success and -1.0 on loss happens after the messages\n # Saying \"congratulations\" or \"you die horribly\" just repeating the message when the game starts.\n # We had to exclude that message to segment finished games but this is why we have to add these rewards here manually...\n\n final_reward = -1.0\n if game_won:\n final_reward = 1.0\n score_list[i] = {\"score\": sum([m[\"message\"][\"observation\"][\"reward\"] for m in e if\n \"message\" in m.keys() and type(m[\"message\"]) is dict])+final_reward,\n \"num_questions\": num_questions, \"num_orders\": num_orders, \"game_session\": e,\n \"game_won\": game_won}\n\n return score_list\n\n else:\n raise Exception(f\"{file_path} is not a correct file path.\")", "def process_game(line):\n if not line:\n return None, None\n saved_game = json.loads(line)\n saved_game = add_cached_states_to_saved_game(saved_game)\n saved_game = add_possible_orders_to_saved_game(saved_game)\n saved_game_proto = dict_to_proto(saved_game, SavedGameProto)\n saved_game_proto = add_rewards_to_saved_game_proto(saved_game_proto, DefaultRewardFunction())\n saved_game_zlib = proto_to_zlib(saved_game_proto)\n return saved_game['id'], saved_game_zlib", "def processAndPickle(file_name, dimension = 'agency', first = 1, last = 140):\n\tclean_responses = []\n\tresponses = readnarratives.loadNarrativeData(dimension, first, last)\n\tprint responses [0]\n\n\t#print responses [0][1]\n\n\n\tfor narrative in responses:\n\t\tfor scene in narrative:\n\t\t\tscene_text = scene[0][0]\n\t\t\tscene_score = scene[0][1]\n\n\t\t\ttagged_texts = [text[0] for text in scene[1]]\n\t\t\ttagged_scores = [text[1] for text in scene[1]]\n\n\t\t\tscene_clean_text = processText([scene_text])[0]\n\t\t\ttagged_clean_texts = processText(tagged_texts)\n\n\t\t\tno_blank_texts, no_blank_scores = discardBlanks(tagged_clean_texts,\n\t\t\t\ttagged_scores)\n\n\t\t\tclean_examples = [(text,no_blank_scores[i]) \n\t\t\t\tfor i,text in enumerate(no_blank_texts)]\n\t\t\tclean_responses.append(((scene_clean_text,scene_score),\n\t\t\t\tclean_examples))\n\n\treadnarratives.makePickle(clean_responses, file_name)\n\n\tdata = readnarratives.readPickle(file_name)", "def _parse(self):\n adds_factor = 1\n pattern = re.compile(\"(\\d+)?([kKdDbB])(\\d+)?\")\n\n for chunk in self.chunks:\n if chunk == \"+\":\n adds_factor = 1\n elif chunk == \"-\":\n adds_factor = -1\n elif chunk == \">\":\n # next factor will be target difficulty!\n adds_factor = 0\n elif chunk.isdigit():\n self.bonus += int(chunk) * adds_factor # anything + 0 remains unchanged\n if adds_factor == 0:\n self.target = 0\n break # difficulty always be the last chunk!\n else:\n m = pattern.match(chunk)\n\n # This part is dubious\n if m is not None:\n t_die_letter = m.group(2).upper()\n t_dice = 0\n t_keep = 0\n if m.group(1) is not None:\n t_dice = int(m.group(1))\n if m.group(3) is not None:\n t_keep = int(m.group(3))\n\n if \"D\" == t_die_letter:\n if adds_factor == 1:\n self.dice += t_dice\n self.keeps += t_dice\n elif adds_factor == -1:\n self.keeps -= t_dice\n elif \"B\" == t_die_letter :\n if adds_factor == 1:\n self.dice += t_dice\n # you cannot do -xB\n else: #well, that's a Keep\n self.dice += t_dice * adds_factor\n self.keeps += t_keep * adds_factor\n\n if self.keeps > self.dice:\n self.keeps = self.dice", "def main():\n\n print_header()\n statistic = {}\n\n\n while True:\n\n \"\"\"\n System take input for opponent like friend or computer (computer is segregated into two types 'c1' (EasyAi) and 'c2' HarderAi)\n System also take input for player icon and provide only two options 'X' or 'O'\n \"\"\"\n\n opponent = input(\n \"Would you like to play against a friend or the computer? \\n\\t-friend (f)\\n\\t-computer level 1 (c1)\\n\\t-computer level 2 (c2)\")\n icon_coice = input(\"Would you like to play as (X) or (O)? \").upper()\n players = [EasyAi(icon_coice), HarderAi(flip_icon(icon_coice))]\n if opponent.lower() == \"f\":\n players = [Human(icon_coice), Human(flip_icon(icon_coice))]\n # start a game with friend\n if opponent.lower() == \"c1\":\n players = [Human(icon_coice), EasyAi(flip_icon(icon_coice))]\n # start a game with computer\n if opponent.lower() == \"c2\":\n players = [Human(icon_coice), HarderAi(flip_icon(icon_coice))]\n\n start_time = time.time()\n\n \"\"\"\n Load the Game by creating game class object and it takes the Players list\n call its play_game method to start game and return final results\n \"\"\"\n\n game = Game(players=players)\n result = game.play_game()\n ending_time = time.time()\n\n statistic[result] = statistic.get(result, 0) + 1\n\n # calculate game duration\n duration = int(ending_time - start_time)\n duration_string = get_duration_string(duration)\n\n # pass the Game states and it duration to below method\n write_result_to_file(duration_string, statistic)\n\n user_choice = input(\"Would you like to play a game again? [y/n]\")\n if user_choice.lower().startswith(\"n\"):\n break", "def read_pgn(filepath, maxGames = 100000, timecontrol = 300, type = \"Lichess Standard\"):\n # import pgn file\n pgn_file = open(filepath)\n \n # read all games from pgn_file\n all_games = import_multiple_games(pgn_file, maxGames = maxGames)\n \n # create lists for saving variables\n board_maps = []\n times_p1, remaining_times_p1 = [], []\n materials_p1, materials_p2 = [], []\n board_positions = []\n results = []\n\n # Counter variables for Summary\n valid_time_counter = 0\n valid_rem_time_counter = 0\n valid_outlier_counter = 0\n\n # Define end of iteration \n end = len(all_games)\n\n # Print checkpoint\n print(len(all_games), \" Games will now be read in as arrays\")\n\n # Set type of the game\n if(type == \"Lichess Standard\"):\n # iterate through every game\n for current_game in range(0, end):\n # if current_game == 1: continue\n\n # get result\n result = get_result_OHE(all_games[current_game].headers[\"Result\"])\n\n if result == 2: \n board_flip = True\n elif result == 0: \n board_flip = False \n elif result == 1: \n continue\n\n # create arrays for the current game\n curr_times_p1, curr_remaining_times_p1 = [], []\n \n # do moves\n curr_materials_p1, curr_materials_p2, curr_board_positions_p1, _, curr_board_maps = extract_move_information(all_games[current_game], board_flip = board_flip)\n\n # get all comments in the current board\n curr_comments = []\n for node in all_games[current_game].mainline():\n curr_comments.append(node.comment)\n \n if board_flip:\n for i in range(0, len(curr_comments) - 1, 2):\n swap_comm = curr_comments[i]\n curr_comments[i] = curr_comments[i+1]\n curr_comments[i+1] = swap_comm\n\n # extract time information from the comments\n curr_times_p1, _, curr_remaining_times_p1, _ = extract_time_from_comment(curr_comments, timecontrol)\n\n # In the following, there are some sanity checks for the data and the final summary of the pgn import\n valid_data = True\n rem_time_counter = 0\n # check if remaining times are valid, if not, skip the game \n for remaining_time in curr_remaining_times_p1:\n if remaining_time > timecontrol:\n #print(\"Invalid data found - the present game will be skipped.\")\n rem_time_counter += 1\n valid_data = False\n if rem_time_counter > 0: \n valid_rem_time_counter += 1\n continue\n\n time_counter = 0\n # check if times_p1 are valid, if not, skip the game\n for time in curr_times_p1:\n if time < 0:\n #print(\"Invalid data found - the present game will be skipped.\")\n time_counter += 1\n valid_data = False\n if time_counter > 0: \n valid_time_counter += 1\n continue\n\n outlier_counter = 0\n # check times for heavy outliers depending on the given timecontrol \n for time in curr_times_p1:\n if time > (timecontrol * 0.15):\n #print(\"Heavy outlier found - the present game will be skipped\")\n outlier_counter += 1\n valid_data = False\n if outlier_counter > 0: \n valid_outlier_counter +=1\n continue\n\n if valid_data:\n # Append all variables to final lists\n # Board Maps\n board_maps.extend(curr_board_maps)\n\n # Board Positions\n board_positions.extend(curr_board_positions_p1)\n\n # Materials W and B\n materials_p1.extend(curr_materials_p1)\n materials_p2.extend(curr_materials_p2)\n\n # Remaining Time W\n remaining_times_p1.extend(curr_remaining_times_p1)\n\n # Time W (prediction variable)\n times_p1.extend(np.array(curr_times_p1) / np.array(curr_remaining_times_p1))\n\n # Result\n curr_result = np.zeros_like(curr_times_p1)\n curr_result.fill(get_result_OHE(all_games[current_game].headers[\"Result\"]))\n results.extend(curr_result)\n\n # print actual read games\n if((current_game % 10000 == 0) and (current_game != 0)): \n print(\"Games already read: \", current_game, \"/\", end)\n\n # get number of plies\n plies = len(times_p1)\n # flat board positions for creating final numpy ndarray\n for i, boardpos in enumerate(board_positions):\n board_positions[i] = np.array([boardpos.flatten()])\n \n # Lists to Numpy Arrays\n board_positions = np.array(board_positions).reshape(plies, 768)\n remaining_times_p1 = np.array(remaining_times_p1).reshape(plies,1)\n materials_p1 = np.array(materials_p1).reshape(plies,1)\n # materials_p2 = np.array(materials_p2).reshape(plies,1)\n times_p1 = np.array(times_p1).reshape(plies,1)\n results = np.array(results).reshape(plies, 1)\n\n\n # create return compressed data\n input_variables = np.hstack((materials_p1, board_positions)) # remaining_times_p1, materials_p2, results\n prediction_variable = times_p1\n board_maps = np.array(board_maps)\n \n print(\"-\" * 30)\n print(\"Import Summmary\")\n print(\"Games skipped due to invalid remaining times: \", valid_rem_time_counter)\n print(\"Games skipped due to invalid ply times: \", valid_time_counter)\n print(\"Games skipped due to outliers: \", outlier_counter)\n print(\"Number of plies which are imported: \", plies)\n\n return prediction_variable, input_variables, board_maps" ]
[ "0.5883831", "0.5731385", "0.5504873", "0.543542", "0.54344803", "0.54229444", "0.54229164", "0.52950484", "0.52757853", "0.5214277", "0.5172111", "0.5169089", "0.5136626", "0.5134884", "0.50984156", "0.5005022", "0.49743158", "0.49637678", "0.4961036", "0.49550956", "0.4938498", "0.4928554", "0.4918484", "0.49132058", "0.49035773", "0.48979574", "0.48676556", "0.48584172", "0.48516622", "0.4832485" ]
0.63541466
0
simply redirect to dashboard
def index(self): log.debug('index()') return redirect_to('/admin/dashboard')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def go_to_dashboard():\n\treturn render_template(\"/dashboard.html\")", "def get(self):\n self.redirect('/admin')", "def get(self):\n self.redirect('/admin')", "def toLanding():\n return redirect(url_for('landingurl'))", "def dispatch(request):\n if request.user.is_admin:\n return redirect(reverse(\"admin-dashboard\"))\n else:\n return redirect(reverse(\"trainee-dashboard\"))", "def post(self) :\n self.redirect('/admin')", "def home_page():\n return redirect('/users')", "def test_dashboard_view(self):\n target_url = url_for('dashboard.dashboard_panel')\n redirect_url = url_for('users.login', next=target_url)\n response = self.client.get(target_url)\n self.assertEqual(response.status_code, 302)\n self.assertRedirects(response, redirect_url)", "def index():\n return redirect(auth_flow.get_authorization_url())", "def dashboard():", "def get(self, request):\n return redirect('start:home')", "def get(self, request):\n return redirect('start:home')", "def get(self, request):\n return redirect('start:home')", "def home_page():\n return redirect(url_for(_DEFAULT_ROUTE, _external=True))", "def root_redirect():\r\n return redirect(url_for(\"display_top\"))", "def home_view(request):\n return HttpResponseRedirect('/schedule/')", "def home(request):\n return redirect('commprod/')", "def homepage():\n if g.user:\n return redirect(f\"/user/{g.user.id}\")\n else:\n return redirect(\"/landing\")", "def landing():\n if g.user:\n return render_template('landing.html', user=g.user)\n return redirect(url_for('login'))", "def entry_page():\n return redirect(url_for('index'))", "def admin():\n return redirect(url_for(\"user\", name=\"Admin!\"))", "def home(request):\n assert isinstance(request, HttpRequest)\n return redirect('/departments')", "def homepage():\n return redirect('index.html')", "def redirectPage() -> Response:\n # pass in the function name\n return redirect(url_for('view.loadMainPage'))", "def idx(_request):\n return HttpResponseRedirect('/home')", "def index():\n return redirect(url_for(\"home\"))", "def goToAdmin(request):\n\n\ttemplate = '../admin'\n\treturn HttpResponseRedirect(template)", "def get(self):\n self.logout()\n self.redirect('/')", "def dashboard():\n return render_template('home/dashboard.html')", "def test_login_required_dashboard(self):\r\n response = self.client.get(reverse('dashboard'))\r\n self.assertEqual(response.status_code, 302)\r\n self.assertEqual(response['Location'], 'http://testserver/accounts/login?next=/dashboard')" ]
[ "0.7913281", "0.745907", "0.745907", "0.74100876", "0.73945457", "0.7383052", "0.7213817", "0.7201024", "0.7168231", "0.7139866", "0.70773005", "0.70773005", "0.70773005", "0.7049786", "0.6996893", "0.69752914", "0.6969216", "0.69509476", "0.6876307", "0.6867712", "0.682362", "0.6822801", "0.6817731", "0.68095946", "0.6789032", "0.67811567", "0.676805", "0.67632556", "0.6751757", "0.67514247" ]
0.7543102
1
switch server ON or OFF (no param is toggle)
def toggle_server(self): name = request.params.get('name', g.DEFAULT_SERVER) log.debug('toggle_server(%s)' % name) servers = model.Session.query(model.Server) server = servers.filter(model.Server.name == name).one() server.server_on = not server.server_on model.Session.update(server) model.Session.commit() redirect_to('/admin/dashboard')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _toggle_server(self):\r\n\t\t_logger.debug(\"Toggle server button is pressed.\")\r\n\r\n\t\tif not comm_server.is_running():\r\n\t\t\tserver_ip = self.children[\"entry_IP\"].get()\r\n\t\t\tserver_port = int(self.children[\"entry_port\"].get())\r\n\t\t\tif not comm_server.start_server(server_ip, server_port):\r\n\t\t\t\treturn\r\n\t\t\tself._save_server_config(server_ip, server_port)\r\n\r\n\t\t\tself.children[\"btn_toggle_server\"].config(text = \"關閉伺服器\")\r\n\t\t\tself._update_connection_num(\"\")\r\n\t\telse:\r\n\t\t\tcomm_server.stop_server()\r\n\t\t\tself.children[\"btn_toggle_server\"].config(text = \"啟動伺服器\")\r\n\t\t\tself.children[\"label_connections\"].config(text = \"連接數: -/-\")", "def change_status():\n if self.on:\n connect.SOCKET.sendall(bytes(\"OFF\\n\", \"utf-8\"))\n self.on = False\n else:\n connect.SOCKET.sendall(bytes(\"ON\\n\", \"utf-8\"))\n self.on = True", "def turn_on(self, **kwargs):\n set_sonoff_state(self._host, \"on\")\n self._state = True", "def toggle(self):\n self._state.is_on = not self._state.is_on\n self.send_command(Command.TOGGLE, [])", "async def toggle(self, ctx):\r\n serverid = ctx.message.server.id\r\n if self.adkillr[serverid]['toggle'] is True:\r\n self.adkillr[serverid]['toggle'] = False\r\n e = discord.Embed(description='**AntiAdv is now disabled.**')\r\n await self.bot.say(embed=e)\r\n elif self.adkillr[serverid]['toggle'] is False:\r\n self.adkillr[serverid]['toggle'] = True\r\n e = discord.Embed(description='**AntiAdv is now enabled.**')\r\n await self.bot.say(embed=e)\r\n dataIO.save_json(\"data/adkillr/adkillr.json\", self.adkillr)", "def turn_on(self, **kwargs):\n self._state = True\n if(self._device['type'] == '_DT-PLUG' or self._device['type'] == '_THIMR'):\n self._send_cmd(self._device,'cmd=ctrl&devices={[' + self._device[\"sid\"] + ']}&op={\"cmd\":5,\"op\":1 }', 5)\n if(self._device['type'] == '_REALY2' or self._device['type'] == '_REALY4'):\n self._send_cmd(self._device, 'cmd=ctrl&devices={[' + self._device[\"sid\"] + ']}&op={\"cmd\":5,\"'+ self._data_key +'\":1 }', 5)", "def openCircuit(srv):", "def force_switch_on(self):\n self.turn_on_modem()", "def affection_status_switch_on(self):\n self._affection_status_switch = False", "def switch(ind, status):\n print(\"Switching :\", ind, \">>\", status == 'on')\n GPIO.output(ind, status == 'on')", "async def async_turn_on(self, **kwargs):\n try:\n if await self._api.set_relay_state(self._dev_id, \"on\"):\n self._is_on = True\n self.async_write_ha_state()\n except Smile.PlugwiseError:\n _LOGGER.error(\"Error while communicating to device\")", "async def toggle(self, ctx):\r\n server = ctx.guild\r\n if self._logs[str(server.id)][\"toggle\"] == True:\r\n self._logs[str(server.id)][\"toggle\"] = False\r\n dataIO.save_json(self._logs_file, self._logs)\r\n await ctx.send(\"Modlogs are now disabled.\")\r\n return\r\n if self._logs[str(server.id)][\"toggle\"] == False:\r\n self._logs[str(server.id)][\"toggle\"] = True\r\n dataIO.save_json(self._logs_file, self._logs)\r\n await ctx.send(f\"Modlogs are now enabled {self.bot.get_emoji(470063310386233344)}\")\r\n return", "def affection_status_switch_on(self):\n self._affection_status_switch = True", "def onoff(boolean):\n if boolean:\n return \"on\"\n else:\n return \"off\"", "def run(self):\n self.network_ctrl.connect_with_remote_system()\n cmd = self.create_command(self.on_or_off, self.port)\n self.network_ctrl.send_command(cmd)\n\n check = self._port_status(self.port)\n result = self.network_ctrl.send_command(check)\n result = result[0]\n if self.on_or_off:\n if result == \"1\":\n self.router.mode = Mode.normal\n logging.info(\"[+] Successfully switched on port \" + str(self.port))\n else:\n self.router.mode = Mode.unknown\n logging.info(\"[-] Error switching on port \" + str(self.port))\n else:\n if result == \"0\":\n self.router.mode = Mode.off\n logging.info(\"[+] Successfully switched off port \" + str(self.port))\n else:\n self.router.mode = Mode.unknown\n logging.info(\"[-] Error switching off port \" + str(self.port))\n\n self.network_ctrl.exit()", "def turn_on(self):\n self._remote.power(1)", "def handle_switch(settings, methods=['GET', 'POST']):\n print(settings)\n message = ''\n if not ((settings['lora'] == \"True\") and (settings['sigfox'] == \"True\")):\n if settings['lora'] != SETTINGS['lora']:\n if settings['lora'] == \"True\":\n start_lora_session()\n settings['channel'] = 868100000\n settings['sf'] = 7\n message += 'LoRa receiver turned ON \\n'\n if settings['lora'] == \"False\":\n turn_off_lora()\n message += 'LoRa receiver turned OFF \\n'\n settings['channel'] = []\n settings['sf'] = []\n socketio.emit('log', message)\n if settings['sigfox'] != SETTINGS['sigfox']:\n if settings['sigfox'] == \"True\":\n start_sigfox()\n message += 'Sigfox receiver turned ON \\n'\n if settings['sigfox'] == 'False':\n turn_off_sigfox()\n message += 'Sigfox receiver turned OFF \\n'\n socketio.emit('log', message)\n update_local_settings(settings)\n else:\n socketio.emit('log', \"LoRa and Sigfox cannot be turned on at the same time\")\n socketio.emit('settings_update', SETTINGS)", "def jump_server(self, msg=\"Changing servers\"):\n if self.connection.is_connected():\n self.connection.disconnect(msg)\n self._connect()", "def setOn(self, command):\r\n self.setDriver('ST', 1)", "def bulb_toggle():\n tx = zb_explicit_command\n tx[\"dest_addr_long\"] = GE_LINK_BULB_MAC\n tx[\"cluster\"] = CLUSTER_A\n tx[\"data\"] = DATA_TOGGLE\n response = zb.Send(tx)", "def turn_off(self, **kwargs):\n _LOGGER.error(\"DALI TURN OFF\")\n self._state = False\n\n url = self.urlx + '/toggle'\n headers = {'x-ha-access': 'raspberry',\n 'content-type': 'application/json'}\n\n response = get(url, headers=headers)\n _LOGGER.error(response.text)\n\n json_data = json.loads(response.text)\n _LOGGER.error(json_data)\n\n state = json_data['state']\n\n self._dimmer = 0\n\n self._state = state == 'on'", "def server_activate(self):\n\t\tpass", "def turn_on(self, **kwargs):\n self.smartplug.turn_on()", "async def async_turn_on(self, **kwargs):\n try:\n state_on = await self._api.set_relay_state(\n self._dev_id, self._members, \"on\"\n )\n if state_on:\n self._is_on = True\n self.async_write_ha_state()\n except PlugwiseException:\n _LOGGER.error(\"Error while communicating to device\")", "def set_multiplex_mode(self, c, on):\n self.binding.set_switcher_mode(on)\n return True", "def state(config: dict):\n\n async def state_callback(device):\n if device.basic_info is not None:\n if device.available:\n print_device_details(device)\n\n device.shutdown_event_loop()\n\n logger.info(\"Initialising SonoffSwitch with host %s\" % config[\"host\"])\n SonoffSwitch(\n host=config[\"host\"],\n callback_after_update=state_callback,\n logger=logger,\n device_id=config[\"device_id\"],\n api_key=config[\"api_key\"],\n )", "def enable_server(self, server):\n log.info(\"Enabling %s in netscaler\", server)\n return self.post(\"server?action=enable\", {\"server\": {\"name\": server}}, content_type=self.content_type(\"server\"))", "def r2_on_off():\n \n r2_cmd_packet = b'\\x04\\x14\\x02\\x00\\x00\\xe6\\x0f'\n ser_relay.write(r2_cmd_packet)", "async def async_turn_on(self, **kwargs):\n if self.is_on == False:\n await self.async_call_service(self._cfg.get('turn_on_service')) \n self._state = True", "def toggle_lights(bridge):\n if check_any_light_on(bridge):\n turn_off_lights(bridge)\n else:\n turn_on_lights(bridge)" ]
[ "0.76402134", "0.71926904", "0.68771076", "0.663573", "0.655506", "0.64368844", "0.6404402", "0.63814354", "0.63383687", "0.6325942", "0.6312694", "0.6296537", "0.62616146", "0.62301064", "0.6228538", "0.61898726", "0.6149007", "0.61183274", "0.61173564", "0.6102371", "0.60952157", "0.60761505", "0.60696185", "0.6058112", "0.60438997", "0.603861", "0.6036663", "0.60213065", "0.6021279", "0.60155654" ]
0.78646547
0
edit the settings of a station, or create a new one
def edit_station(self): mac = request.params.get('mac', g.DEFAULT_MAC) log.debug('edit_station(%s)' % mac) # collect desired request params into dictionary # XXX need to do form validation here items = request.params stations = model.Session.query(model.Station) station = stations.filter(model.Station.mac == mac).first() if not station: station = model.Station(mac) model.Session.save(station) station.update(items) model.Session.update(station) model.Session.commit() redirect_to('/admin/dashboard')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_settings(path, server, station):\n db.save_data(path, server, station)", "def _edit_setting(self):\n settings = fileIO.load_json(\"settings.json\")\n self._list_settings(settings=settings)\n option = False\n while not option: #While loop until valid setting given\n option = input(\"Please type the setting you would like to change: \")\n if option not in settings:\n option = False\n newSetting = input(\"Please enter what you would like to change that setting to: \")\n command = \"edit_setting {0} {1}\".format(option, newSetting)\n return(command)", "def save(self):\n self.client._perform_empty(\n \"PUT\", \"/projects/%s/apiservices/%s/settings\" % (self.project_key, self.service_id),\n body = self.settings)", "def set_config(self): # called from button_set object \n self.settings['lights_on'] = self.lights_on.get()\n self.settings['lights_off'] = self.lights_off.get()\n self.settings['ambient_min'] = self.ambient_min.get()\n self.settings['soil_1'] = self.smc1.get()\n self.settings['soil_2'] = self.smc2.get()\n self.settings['soil_3'] = self.smc3.get()\n self.settings['soil_4'] = self.smc4.get()\n self.settings['overhead_level'] = self.overhead_level.get()\n\n # Save settings to config file in case of reboot / power-loss\n print \"UPDATING SETTINGS FILE\"\n with open(self.settings_path, 'w') as jsonfile:\n jsonfile.write(json.dumps(self.settings, indent=4))\n self.active_changes = True # (flag) changes are active!", "def setStation(self, isStation: bool) -> None:", "def save(self):\n return self.client._perform_empty(\"PUT\", \"/admin/general-settings\", body = self.settings)", "def writeSettings(self):\n for i in range(1,N_STATION+1):\n vol = f\"vol{i}\"\n self.settings.setValue(vol,self.param.vol[i-1])\n info = f\"info{i}\"\n self.settings.setValue(info,self.param.info[i-1])\n ip = f\"ip{i}\"\n self.settings.setValue(ip,self.param.ip[i-1])\n muted = f\"muted{i}\"\n self.settings.setValue(muted,self.param.muted[i-1])", "def _onSettings(self, event):\n dialog = sc.SettingsDialog(self)\n if dialog.ShowModal() == wx.ID_OK:\n dialog.saveSettings()\n dialog.Destroy()", "def save(self):\n self.workspace.client._perform_empty(\n \"PUT\", \"/workspaces/%s\" % self.workspace.workspace_key,\n body=self.settings)", "def edit_settings(request, form_class=EditSettings, template_name='rss/edit_settings'):\n if request.method == 'POST':\n form = form_class(request.POST)\n if form.is_valid():\n form.save()\n return redirect('feed_item_list')\n else:\n form = form_class()\n\n if request.is_ajax():\n template_name += '_ajax'\n template_name += '.html'\n return render_to_response(template_name, {'form': form}, context_instance=RequestContext(request))", "def save_settings(client_id, time_format, country, time_zone):\n\tsettings = Settings(user_id=client_id,\n\t\t\ttime_format=time_format,\n\t\t\tcountry=country,\n\t\t\ttime_zone=time_zone)\n\tsession.merge(settings)\n\tsession.commit()", "def save(self):\n self.client._perform_empty(\"PUT\", \"/project-folders/%s/settings\" % (self.project_folder_id), body = self.settings)", "def set_config(self, settings='settings.json'): # called from button_set object \n self.settings['lights_on'] = self.lights_on.get()\n self.settings['lights_off'] = self.lights_off.get()\n self.settings['photo1'] = self.photo1.get()\n self.settings['photo2'] = self.photo2.get()\n self.settings['smc1'] = self.smc1.get()\n self.settings['smc2'] = self.smc2.get()\n self.settings['smc3'] = self.smc3.get()\n self.settings['smc4'] = self.smc4.get()\n self.settings['watering'] = self.watering.get()\n self.settings['cycle'] = self.cycle.get()\n settings_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), settings)\n if os.path.exists(settings_path):\n with open(settings_path, 'w') as jsonfile:\n jsonfile.write(json.dumps(self.settings, indent=4))\n self.active_changes = True # (flag) changes are active!", "async def set_station(self: SimpleNWS, station: Optional[str] = None) -> None:\n if station:\n self.station = station\n if not self.stations:\n self.stations = [self.station]\n else:\n self.stations = await self.get_points_stations()\n self.station = self.stations[0]", "def save_settings(sender, instance, raw, using, update_fields, **kwargs):\n status = StatusModel.objects.get(pk=1)\n # See if periodic temperature update task exists\n try:\n periodic_response = PeriodicTask.objects.get(name=\"Get Response\")\n # Update period if different\n if periodic_response.interval.every != instance.t_sample:\n periodic_response.interval.every = instance.t_sample\n periodic_response.interval.save()\n except PeriodicTask.DoesNotExist:\n # Create periodic task if it doesn't exist\n periodic_interval = IntervalSchedule.objects.create(\n every=instance.t_sample,\n period='seconds'\n )\n periodic_response = PeriodicTask.objects.create(\n name=\"Get Response\",\n task=\"silviacontrol.tasks.async_comms_response\",\n enabled=status.on,\n interval=periodic_interval\n )\n # Send to Arduino (update settings, not status, current status values given)\n async_comms_update.delay(status.on, status.brew, status.mode)", "def save_setting(self):\n if self.is_checked.get():\n if \"Email\" not in s.alert:\n s.updateAlert(\"Email\")\n s.updateEmail(self.email_addr_entry.get())\n if not self.is_checked.get():\n if \"Email\" in s.alert:\n s.deleteAlert(\"Email\")\n s.deleteEmail()\n # Check the refresh interval\n if self.is_minimize_to_system_tray.get():\n s.updateMinimize(\"True\")\n else:\n s.updateMinimize(\"False\")\n\n if self.is_launch_at_start_up.get():\n s.updateLaunchAtStartup(\"True\")\n become_persistent(__file__)\n else:\n s.updateLaunchAtStartup(\"False\")\n remove_startup()\n\n s.updateSetting(self.interval_entry.get())\n Tracker.save_state(Tracker.FILENAME, s)", "def saveSettings(self):\n e = constrain.saveSettings(self)\n e.attrib['status'] = ('true' if self.status else 'false')\n return e", "def update(self):\n if self.name == \"Settings\":\n args = [\"NAME:Settings\"]\n else:\n args = [\"NAME:\" + self.name, \"Enable:=\", self.Enable]\n if self.UserSpecifiedSettings:\n args += self.manualsettings\n else:\n args += self.autosettings\n if self.name == \"Settings\":\n self.meshmodule.EditGlobalMeshRegion(args)\n else:\n self.meshmodule.EditMeshRegion(self.name, args)\n return True", "def change_settings(new_settings={}, file=None):\n gl = globals()\n if file is not None:\n execfile(file)\n gl.update(locals())\n gl.update(new_settings)\n # Here you can add some code to check that the new configuration\n # values are valid.", "def edit():", "def action_settings(self):\n\n cur_datadir = self.config.starbound_data_dir\n settings = SettingsDialog(self)\n settings.exec()\n new_datadir = self.config.starbound_data_dir\n if new_datadir:\n if cur_datadir != new_datadir:\n self.load_data()\n self.scene.refresh(self.data)\n else:\n self.close_world()\n\n # Make sure our menus are enabled/disabled as appropriate\n self.enforce_menu_state()\n\n # Re-focus the main window\n self.activateWindow()", "def update_settings(command):\n namespace = app.main(command)\n assert namespace.command == 'u' or namespace.command == \"updatesettings\"", "def storeSettings(request):\n c = Club(id=request.POST['id'], name=request.POST['name'], logo=request.POST['logo'], default_tournament_name=request.POST['default_tournament_name'], player_plugin__name=request.POST['player_plugin__name'], players_last_updated=request.POST['players_last_updated'], data=request.POST['data'])\n c.save()", "def manage_edit_save(self, REQUEST):\n self._config.update(ldap_config.read_form(REQUEST.form, edit=True))\n REQUEST.RESPONSE.redirect(self.absolute_url() + '/manage_edit')", "def testSaveSettings(self):\n \n self.waitForElement(\"link=Settings\")\n self.selenium.click(\"link=Settings\")\n self.selenium.wait_for_page_to_load(self.WAITTIME)\n self.selenium.click(\"name=zmanage_editProperties:method\")\n self.selenium.wait_for_page_to_load(self.WAITTIME)", "def update_settings(self):\n settings = {\n \"reference\": self,\n \"draw_tangents\": self.cbDrawTangents.isChecked(),\n }\n if self.cbShowSolarAngle.isChecked():\n settings[\"show_solar_angle\"] = self.cbSolarAngleType.currentText(), self.cbSolarBody.currentText()\n else:\n settings[\"show_solar_angle\"] = None\n\n self.view.set_remote_sensing_appearance(settings)", "def update_ionic_settings(self, key, value):\n if self._ionic_settings:\n if key in self._ionic_settings:\n self._ionic_settings[key] = value\n else:\n print(\"key does not exist!! keys include: {ediff ,nsw, ibrion ,isif, isym, nblock, kblock}\")\n else:\n print(\"magnetic settings not present!\")", "def test_crud_capabilities(self):\n\n station = TSStation.new('nl.test')\n station.names.append('Appingedam')\n self.assertEqual(station.id_, 'nl.test')\n self.assertEqual(station.code, 'test')\n self.assertEqual(station.country, 'nl')\n self.assertEqual(station.url, '/station/nl.test')\n station.put()\n\n # Read the entry\n station = TSStation.get('nl.test')\n self.assertEqual(station.id_, 'nl.test')\n self.assertEqual(station.name, 'Appingedam')\n\n # Update the entry\n station.names.append('Appingedam Centrum')\n station.put()\n station = TSStation.get('nl.test')\n self.assertEqual(station.names[1], 'Appingedam Centrum')\n\n # Update with dictionary\n dictionary = {\n 'id': 'nl.test',\n 'names': ['Amsterdam Muiderpoort', 'Muiderpoort'],\n 'displayIndex': 1,\n 'labelAngle': 180,\n 'importance': 3,\n 'wikiString': 'nl:Station Amsterdam Muiderpoort',\n 'openedString': '1896-05-18',\n 'positions': [\n {'km': 14.350, 'route': 'nl.os01', 'lat': 52.3605540, 'lon': 4.9311113},\n {'km': 0.398, 'route': 'nl.ssh01', 'lat': 52.3605540, 'lon': 4.9311113}\n ]}\n station.update_with_dictionary(dictionary)\n station = TSStation.get('nl.test')\n self.assertEqual(station.dictionary_from_object(), dictionary)\n self.assertEqual(station.name, 'Amsterdam Muiderpoort')\n self.assertEqual(station.display_name, 'Muiderpoort')\n self.assertEqual(station.wiki_link, 'http://nl.wikipedia.org/wiki/Station_Amsterdam_Muiderpoort')\n self.assertEqual(len(station.positions), 2)\n\n position1 = TSStationPosition.get('nl.test_os01')\n self.assertIsNotNone(position1)\n position2 = TSStationPosition.get('nl.test_ssh01')\n self.assertIsNotNone(position2)\n\n # Delete the entry\n station.delete()\n station = TSStation.get('nl.test')\n self.assertEqual(station, None)\n\n position1 = TSStationPosition.get('nl.test_os01')\n self.assertIsNone(position1)\n position2 = TSStationPosition.get('nl.test_ssh01')\n self.assertIsNone(position2)", "def save_changes(self):\n\n velib, autolib, subway = None, None, None\n for key, value in VELIB_SUBSCRIPTIONS.iteritems():\n if self._velib.get() == value:\n velib = key\n break\n for key, value in AUTOLIB_SUBSCRIPTIONS.iteritems():\n if self._autolib.get() == value:\n autolib = key\n break\n for key, value in SUBWAY_SUBSCRIPTIONS.iteritems():\n if self._subway.get() == value:\n subway = key\n break\n preferences = {\n FASTEST: self._fastest.get(),\n SHORTEST: self._shortest.get(),\n CHEAPEST: self._cheapest.get(),\n SIMPLEST: self._simplest.get(),\n WEATHER_IMPACT: self._weather_impact.get(),\n LESS_PAINFUL: self._less_painful.get(),\n LESS_WALKING: self._less_walking.get()\n }\n\n result = self._system.set_profile_settings(velib, autolib, subway, self._driving_licence.get(), preferences)\n if not result[\"success\"]:\n showerror('Erreur système', result[\"error\"])\n return\n\n # Redirection vers la page principale\n from settings import RideSettingsPage\n self.pack_forget()\n RideSettingsPage(self._window, self._system)", "def saveToolSettings(*args, **kwargs)->None:\n pass" ]
[ "0.6273995", "0.59488606", "0.5932659", "0.59095985", "0.5875198", "0.5861084", "0.58298385", "0.55824786", "0.55695635", "0.5492336", "0.5476819", "0.54746765", "0.54738426", "0.54638255", "0.5434654", "0.5421887", "0.5418526", "0.5418469", "0.5399601", "0.5392158", "0.53798693", "0.5358899", "0.5354391", "0.534365", "0.53412133", "0.5328243", "0.5311896", "0.52597934", "0.52490234", "0.5246434" ]
0.6632205
0
reset all stations to the default configuration
def reset_stations(self): # XXX currently everyone shares the default MAC mac = g.DEFAULT_MAC stations = model.Session.query(model.Station) station = stations.filter(model.Station.mac == mac).one() station.clone(model.Station()) model.Session.update(station) model.Session.commit() # reset all config archives for type in ['station', 'user']: path = h.get_config_dir_for(type) src = os.path.join(path, '..', g.FACTORY_CONFIG) for f in os.listdir(path): if f.endswith('.tar.gz'): dst = os.path.join(path, f) log.debug('%s -> %s' % (src, dst)) shutil.copyfile(src, dst) redirect_to('/admin/dashboard')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reset( self ):\n self.conf = self.defaults", "def reset_config():\r\n # TODO implement configuration reset\r\n pass", "def antenny_config_reset(self):\n return self.antenny_config.reset_default_config()", "def reset():\n Vessel.reset_instances()", "def reset(self):\n self.train_loss.reset_states()\n self.train_accuracy.reset_states()\n self.val_loss.reset_states()\n self.val_accuracy.reset_states()\n self.train_mIoU.reset_states()\n self.val_mIoU.reset_states()", "def reset_cfg():\n _C.merge_from_other_cfg(_CFG_DEFAULT)", "def reload(self):\n self.known_stations = {}\n self.read_noaa_stations()\n self.read_table_stations()\n self.last_reload_check_time = datetime.datetime.utcnow()\n LOGGER.info('Have %s known stations', len(self.known_stations.keys()))", "def reset_config():\n return _set_config(_gen_config())", "def reset(self):\n self.manager.delete_all()\n for name, val in DEFAULT_SETTINGS.items():\n val['name'] = name\n val['default_value'] = val['value']\n self.manager.from_dict(val)", "def reset(self):\n for provider in self.providers.values():\n provider.reset()\n\n for observation in self.observations.values():\n observation.reset()", "def reset(self):\n for gate in self.gates:\n gate.reset()", "def reset(self):\n for layer in self.network:\n layer.clean()", "def reset(self):\n for s in self.subsystems:\n s.uptime = 1", "def _reset(cls):\r\n cls._CONFIGURED = False\r\n cls._ENABLED = {}", "def reset(self):\n self.satisfiability = Satisfiability.UNTESTED\n self.model = None\n self.unsatCore = []", "def reset_config():\n\n Config.config().update({\"coerce\": True, \"debug\": True, \"active\": True})", "def reset_all(self):\n for i, stop in enumerate(self):\n stop._map = self\n stop.reset()", "def reset(self):\r\n # TODO: have reset flag such that it forces all the bottom changes\r\n self.pwm_freq = self._default[\"pwm_freq\"]\r\n self.gate_logic = self._default[\"gate_logic\"]\r\n self.max_pwm = self._default[\"max_pwm\"]\r\n self.lase_on_power_up = self._default[\"lase_on_power_up\"]\r\n\r\n self.mode = self._default[\"mode\"]\r\n self.lase = self._default[\"lase\"]\r\n self.percent = self._default[\"percent\"] # in percent\r", "def reset(self, config, **kwargs):\n pass", "def clearAllSettings(self) -> None:\n ...", "def reset(self):\n for tlight in self.trafficLights:\n self.trafficLights[tlight].reset()", "def reset(self):\n if self._key:\n self._lib.StObjectReset(self._key)\n os.chdir(self._cwd)\n self._layers.clear() # layer: index\n self._substrate = None\n self._experiments.clear() # analyzed experiments\n self._tmpstandards.clear()", "def reset(self):\n self.data = self._defaults", "def reset(self, initialization='default'):\n raise NotImplementedError()", "def reset(self):\n self.ship_list = self.backup_list", "def reset_values(self):\n self.parse_config_file()", "def resetDeviceStates(self):", "def reset():", "def reset():", "def reset():" ]
[ "0.6853071", "0.68103075", "0.6650491", "0.6463266", "0.64595515", "0.64433664", "0.642196", "0.6405763", "0.6370755", "0.6367848", "0.6355455", "0.6312466", "0.62638384", "0.6220435", "0.6203902", "0.6186515", "0.618514", "0.6183243", "0.61750066", "0.614511", "0.6129231", "0.61112756", "0.6098638", "0.6092727", "0.6090644", "0.6086672", "0.6084708", "0.6073663", "0.6073663", "0.6073663" ]
0.80534256
0
Takes a query string and returns up to n results from dfr.jstor.org.
def jstor(query_str, n):
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _search(q: str, n: int):\n return search_client.retrieve([q], n)[0]", "def query(q, top_n=12):\n print('Query: ' + q + '; Top N: ' + str(top_n))\n\n driver = None\n bad_request = False\n urls = set()\n try:\n driver = Fetcher.get_selenium_driver()\n driver.get('https://api.duckduckgo.com/?q=' + q + '&kl=wt-wt')\n except:\n print('An error occurred while searching query: ' + q)\n bad_request = True\n finally:\n try:\n if not bad_request:\n results = driver.find_elements_by_class_name('result__a')\n result_size = len(results)\n print('Result Size: ' + str(result_size))\n while result_size > 0 and len(urls) < top_n:\n for element in results:\n new_url = element.get_attribute('href')\n # TODO: Filter URLs if required\n urls.add(new_url)\n if len(urls) == top_n:\n break\n\n # Infinite Scroll\n if len(urls) < top_n:\n driver.execute_script('window.scrollTo(0, document.body.scrollHeight);')\n results = driver.find_elements_by_class_name('result__a')\n results = results[result_size:]\n result_size = len(results)\n print('Moved to Next Page. Result Size: ' + str(result_size))\n except:\n print('An error occurred while searching query: ' + q + ' and fetching results')\n finally:\n if driver:\n Fetcher.close_selenium_driver(driver)\n print('Search Completed')\n return urls", "def query(url):", "def _make_paged_query(\n conn, search_base, search_scope, ad_query, attr_list, page_size\n):\n result = []\n page_result_control = SimplePagedResultsControl(\n size=page_size,\n cookie=''\n )\n\n msgid = conn.search_ext(\n search_base,\n search_scope,\n ad_query,\n attr_list,\n serverctrls=[page_result_control],\n )\n\n while True:\n r_type, r_data, r_msgid, serverctrls = conn.result3(msgid)\n result.extend(r_data)\n\n if serverctrls:\n if serverctrls[0].cookie:\n page_result_control.size = page_size\n page_result_control.cookie = serverctrls[0].cookie\n\n msgid = conn.search_ext(\n search_base,\n search_scope,\n ad_query,\n attr_list,\n serverctrls=[page_result_control],\n )\n else:\n break\n\n return result", "def solr_query(config, solr_host, fq, solr_collection_name):\n # solr_collection_name = config['solr_collection_name']\n\n getVars = {'q': '*:*',\n 'fq': fq,\n 'rows': 300000}\n\n url = f'{solr_host}{solr_collection_name}/select?'\n response = requests.get(url, params=getVars)\n return response.json()['response']['docs']", "def query_and_fetch(query, top_n=12):\n global url_details, url_text\n print('Query: ' + query + '; Top N: ' + str(top_n))\n url_details = []\n url_text = []\n driver = None\n bad_request = False\n try:\n driver = Fetcher.get_selenium_driver()\n driver.get('https://api.duckduckgo.com/?q=' + query + '&kl=wt-wt')\n except:\n print('An error occurred while searching query: ' + query)\n Fetcher.close_selenium_driver(driver)\n Fetcher.search_driver = None\n bad_request = True\n finally:\n try:\n if not bad_request:\n results = driver.find_elements_by_class_name('result__a')\n result_size = len(results)\n print('Result Size: ' + str(result_size))\n while result_size > 0 and len(url_details) < top_n:\n urls = []\n for element in results:\n new_url = element.get_attribute('href')\n # TODO: Filter URLs if required\n print(new_url)\n urls.append(new_url)\n\n fetched_result = Fetcher.fetch_multiple(urls, top_n)\n\n for fetched_data in fetched_result:\n if not fetched_data[1] or len(fetched_data[1].strip()) == 0:\n continue\n details = dict()\n details['url'] = fetched_data[0]\n details['html'] = fetched_data[1]\n details['title'] = fetched_data[2]\n details['label'] = predict(fetched_data[3])\n url_details.append(details)\n url_text.append(fetched_data[3])\n if len(url_details) == top_n:\n break\n\n # Infinite Scroll\n if len(url_details) < top_n:\n driver.execute_script('window.scrollTo(0, document.body.scrollHeight);')\n results = driver.find_elements_by_class_name('result__a')\n results = results[result_size:]\n result_size = len(results)\n print('Moved to Next Page. Result Size: ' + str(result_size))\n except:\n print('An error occurred while searching query: '+ query + ' and fetching results')\n #finally:\n # if driver is not None:\n # Fetcher.close_selenium_driver(driver)\n setattr(flask.current_app, 'url_text', url_text)\n print('Search Completed')\n return url_details", "def search(query_string):", "def get_dois(query_str, count=100):\n url = '%s/%s' % (elsevier_search_url, query_str)\n if api_key is None:\n logging.error('Missing API key at %s, could not perform search.' %\n api_key_file)\n return None\n params = {'APIKey': api_key,\n 'query': query_str,\n 'count': count,\n 'httpAccept': 'application/xml',\n 'sort': '-coverdate',\n 'field': 'doi'}\n res = urllib2.urlopen(url, data=urllib.urlencode(params))\n xml = res.read()\n et = ET.fromstring(xml)\n doi_tags = et.findall('atom:entry/prism:doi', elsevier_ns)\n dois = [dt.text for dt in doi_tags]\n return dois", "def get():\n data = []\n start = request.args.get('start', 0, type=int)\n rows = request.args.get('rows', 10, type=int)\n query = request.args.get('query', '')\n if not query:\n return jsonify(data), 200\n\n try:\n solr_query, nr_number, nr_name = SolrQueries.get_parsed_query_name_nr_search(query)\n condition = ''\n if nr_number:\n condition = f\"requests.nr_num ILIKE '%{nr_number}%'\"\n if nr_name:\n if condition:\n condition += ' OR '\n name_condition = \"requests.name_search ILIKE '%\"\n name_condition += \"%' AND requests.name_search ILIKE '%\".join(nr_name.split())\n name_condition += \"%'\"\n\n condition += f'({name_condition})'\n\n results = RequestDAO.query.filter(\n RequestDAO.stateCd.in_([State.DRAFT, State.INPROGRESS, State.REFUND_REQUESTED]),\n text(f'({condition})')\n ).options(\n lazyload('*'),\n eagerload(RequestDAO.names).load_only(Name.name),\n load_only(\n RequestDAO.id,\n RequestDAO.nrNum\n )\n ).order_by(RequestDAO.submittedDate.desc()).limit(rows).all()\n\n data.extend([{\n # 'id': nr.id,\n 'nrNum': nr.nrNum,\n 'names': [n.name for n in nr.names]\n } for nr in results])\n\n while len(data) < rows:\n nr_data, have_more_data = RequestSearch._get_next_set_from_solr(solr_query, start, rows)\n nr_data = nr_data[:(rows - len(data))]\n data.extend([{\n # 'id': nr.id,\n 'nrNum': nr.nrNum,\n 'names': [n.name for n in nr.names]\n } for nr in nr_data])\n\n if not have_more_data:\n break # no more data in solr\n start += rows\n\n return jsonify(data), 200\n except Exception:\n return jsonify({'message': 'Internal server error'}), 500", "def search(query, max: int = None):\n for post in client.search(query, max=max):\n print(json.dumps(post))", "def search_google(query, num_results=1):\n\n results = []\n num_ppg = min([num_results, 10])\n for i in search(\n query, # The query you want to run\n tld=\"com\", # The top level domain\n lang=\"en\", # The language\n num=num_ppg, # Number of results per page\n start=0, # First result to retrieve\n stop=num_results, # Last result to retrieve\n pause=3.0, # Lapse between HTTP requests\n ):\n\n results.append(i)\n\n if len(results) == 0:\n results = [\"n/a\"]\n\n return results", "def scrape_result_pages(n, r, s):\n h = list()\n for page in xrange(n):\n for result in create_search_results(page + 1, r, s):\n h.append(result)\n return h", "def query1():\n\n print(\"1. What are the most popular three articles of all time? Which \" +\n \"articles have been accessed the most?\\n\")\n\n query = \"\"\"\n SELECT articles.title, subq.hits FROM articles\n LEFT JOIN\n (SELECT COUNT(log.path) AS hits, log.path FROM log\n WHERE log.path LIKE '/article/%'\n AND log.status = '200 OK' AND log.method = 'GET'\n GROUP BY log.path) AS subq\n ON subq.path LIKE '/article/'||articles.slug\n ORDER BY subq.hits DESC LIMIT 3;\n \"\"\"\n\n response = db_query(query)\n\n for i, j in enumerate(response):\n # Convert tuple to list to allow writing. Format \"hits\" with comma\n # seperator. Print output.\n j = list(j)\n j[1] = str(format(j[1], ',d'))\n print(\" Title: '{}' - {} views\".format(*j))", "def get_first_n_crawled_chunks(self, number):\n try:\n conn = psycopg2.connect(\"dbname='{0}'\".format(DATABASE))\n cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)\n cur.execute(\"SELECT * FROM crawler WHERE c_task = 'crawled' ORDER BY index LIMIT %s;\", (number,))\n results = cur.fetchall()\n cur.close()\n return results\n except Exception as e:\n print(e)", "def search(self, query, maxhits=100):", "def search():\r\n\r\n # Ensure parameter is present\r\n if not request.args.get(\"q\"):\r\n raise RuntimeError(\"missing search string\")\r\n\r\n # Query db with the string LIMIT result to 10\r\n # TODO\r\n\r\n # send back as json\r\n # TODO\r\n\r\n return jsonify({})", "def query_serp(query:str, site:str, dates:list, num_results:int, paper_name:str) -> list:\n all_sites = []\n total_sites_count = 0\n\n for d in dates:\n try:\n # Get query dict and params dict\n query_r, params = make_params(query=query, site=site, date_start=d[0], date_end=d[1],\n num_results=num_results, paper=paper_name)\n # serpAPI query\n client = GoogleSearchResults(params)\n results = client.get_dict()\n news_results = results['news_results']\n\n count = 0\n sites_date = []\n # Loop through till end of search results or error encountered\n while (news_results and len(news_results)>0) or ('error' not in results):\n sites = [news['link'] for news in news_results]\n sites_date.extend(sites)\n count+=len(sites)\n\n params['start'] = count\n client = GoogleSearchResults(params)\n results = client.get_dict()\n news_results = results['news_results']\n\n print('Date Range: {}-{}\\tTotal Sites: {}'.format(d[0],d[1],len(sites_date)))\n\n # add list of sites to query dict\n query_r['sites'] = sites_date\n all_sites.append(query_r)\n total_sites_count += len(sites_date)\n except Exception as e:\n print(e)\n print(d)\n continue\n print('Total Sites: {}'.format(total_sites_count))\n return all_sites", "def request(query):", "def esearch_query(payload, retmax = 100, sleep=2):\n url = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi'\n payload['retmax'] = retmax\n payload['retstart'] = 0\n ids = list()\n count = 1\n while payload['retstart'] < count:\n response = requests.get(url, params=payload)\n xml = ET.fromstring(response.content)\n count = int(xml.findtext('Count'))\n ids += [xml_id.text for xml_id in xml.findall('IdList/Id')]\n payload['retstart'] += retmax\n time.sleep(sleep)\n return ids", "def esearch_query(payload, retmax = 100, sleep=2):\n url = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi'\n payload['retmax'] = retmax\n payload['retstart'] = 0\n ids = list()\n count = 1\n while payload['retstart'] < count:\n response = requests.get(url, params=payload)\n xml = ET.fromstring(response.content)\n count = int(xml.findtext('Count'))\n ids += [xml_id.text for xml_id in xml.findall('IdList/Id')]\n payload['retstart'] += retmax\n time.sleep(sleep)\n return ids", "def fetch_pages(query_val, page_num):\n \n for page_id in range(1 + page_num + 1):\n try:\n output = fetch_data(query_val, page_id)\n for j in output:\n print(str(j))\n \n except Exception as e:\n print(e)", "def page_query(q):\n\toffset = 0\n\twhile True:\n\t\tr = False\n\t\tfor elem in q.limit(1000).offset(offset):\n\t\t r = True\n\t\t yield elem\n\t\toffset += 1000\n\t\tif not r:\n\t\t\tbreak", "def _paginatedRequest(allPages, *args):\n data = []\n currentPage = 0\n while True:\n newData = Gw2Spidy._request(*(args + (str(currentPage),)))\n if not allPages:\n return newData['results']\n data.extend(newData['results'])\n currentPage = currentPage + 1\n if newData['page'] == newData['last_page']:\n break\n return data", "def fulfill_query(text, page_no, page_size):\n keywords = get_keywords(text)\n if not keywords:\n return 0, []\n else:\n n, results = db.get_profiles_by_keywords(keywords, page_no, page_size)\n profiles = tuple(zip(*results))\n if profiles:\n profiles = profiles[0]\n return n, profiles", "def query_all():\n all_nutrients = {}\n offset_counter = 0\n\n while offset_counter <= 7500:\n query_string = 'https://api.nal.usda.gov/ndb/nutrients/?format=json&api_key={}&nutrients=205&nutrients=204&nutrients=208&nutrients=269&offset={}'.format(api_key, offset_counter)\n results = requests.get(query_string)\n all_nutrients[offset_counter] = results.json()\n offset_counter += 150\n\n print all_nutrients", "def get_summaries(query, **kwargs):\n kwargs.update(stop=40)\n results = search(query, **kwargs)\n return results", "def _Get(self, count):\n if count > MAXIMUM_RESULTS:\n count = MAXIMUM_RESULTS\n entity_list = self._Next(count)\n while len(entity_list) < count and self.__more_results:\n next_results = self._Next(count - len(entity_list))\n if not next_results:\n break\n entity_list += next_results\n return entity_list;", "def fetch(self, **kwargs):\n page = kwargs.get('page', 1)\n if page is None:\n page = 1\n elif page > self.max_pages:\n page = self.max_pages\n\n start_index = ((page - 1) * self.max_results_per_page)\n\n end_index = kwargs.get('num', start_index + self.max_results_per_page)\n if end_index is None:\n end_index = start_index + self.max_results_per_page\n\n headers = {\n 'Username': self.engine_info['ISCAPE_SEARCH_USERNAME'],\n 'Userkey': self.engine_info['ISCAPE_SEARCH_USER_KEY']\n }\n\n data = {\n 'query': kwargs.pop('query', ''),\n 'installation_id': self.engine_info['INSTALLATION_ID'],\n 'page_start': start_index,\n 'page_end': end_index\n }\n\n if getattr(settings, 'USE_V2_API', False):\n query_endpoint = self.engine_info['QUERY_ENDPOINT'].format(\n data.get('installation_id', ''))\n else:\n query_endpoint = self.engine_info['QUERY_ENDPOINT']\n req = requests.Request(\n 'POST',\n query_endpoint,\n headers=headers,\n data=data)\n prepared_request = req.prepare()\n pretty_print_POST(prepared_request)\n session = requests.Session()\n session.verify = False\n\n try:\n response = session.send(prepared_request)\n logger.warning(\" RESPONSE: {0}\".format(response.content))\n response.raise_for_status()\n except Exception as e: # this might have to change for bad responses...\n logger.exception(str(e))\n else:\n return response.json()", "def test_paged_search(self):\n search_dn = \"ou=nerdherd,%s\" % self.basedn\n res = self.conn.search(search_dn, 1, page_size=2)\n for ent in res:\n self.assertIsInstance(ent, bonsai.LDAPEntry)\n page = 1 # First page already is acquired.\n while True:\n if len(res) > 2:\n self.fail(\"The size of the page is greater than expected.\")\n msgid = res.acquire_next_page()\n if msgid is None:\n break\n res = self.conn.get_result(msgid)\n page += 1\n self.assertEqual(page, 3)", "def parse_input():\n if len(sys.argv) == 1 or len(sys.argv) > 3:\n print(\"\"\"Usage: ./search.py \"query\" [k]\nReturns the top k results of the search. The second argument is optional, by default k = 10.\"\"\")\n sys.exit(1)\n elif len(sys.argv) == 2:\n query_string = sys.argv[1]\n k = 10\n else:\n query_string = sys.argv[1]\n k = int(sys.argv[2])\n if k < 1 or k > 100000:\n print(\"Error! k must be between 1 and 100000, setting k = 10\")\n k = 10\n return (query_string, k)" ]
[ "0.6514424", "0.64358556", "0.61324334", "0.5951311", "0.589267", "0.5877768", "0.5805753", "0.57447416", "0.57253385", "0.5724489", "0.57081157", "0.5680368", "0.5673939", "0.5651415", "0.5615148", "0.56129974", "0.557348", "0.5568963", "0.5501987", "0.5501987", "0.54856455", "0.54794943", "0.5464073", "0.5438427", "0.5428097", "0.5418772", "0.5407406", "0.53729916", "0.53686404", "0.5354822" ]
0.67318326
0
Parse the boundary coordinates from a GenBankformatted flat file. The functions takes a Biopython SeqFeature object containing data that was parsed from the feature in the flat file. Parsing these coordinates can be tricky. There can be more than one set of coordinates if it is a compound location. Only features with 1 or 2 open reading frames (parts) are correctly parsed. Also, the boundaries may not be precise; instead they may be open or fuzzy. Nonprecise coordinates are converted to '1'. If the strand is undefined, the coordinates are converted to '1' and parts is set to '0'. If an incorrect data type is provided, coorindates are set to '1' and parts is set to '0'.
def parse_coordinates(seqfeature): start_position = None stop_position = None start = -1 stop = -1 parts = 0 if (isinstance(seqfeature.location, FeatureLocation) or \ isinstance(seqfeature.location, CompoundLocation)): if seqfeature.strand is None: pass elif isinstance(seqfeature.location, FeatureLocation): parts = 1 start_position = seqfeature.location.start stop_position = seqfeature.location.end elif isinstance(seqfeature.location, CompoundLocation): parts = len(seqfeature.location.parts) # Skip this compound seqfeature if it is comprised of more # than two features (tricky to parse). if parts == 2: # Retrieve compound seqfeature positions based on strand. if seqfeature.strand == 1: start_position = seqfeature.location.parts[0].start stop_position = seqfeature.location.parts[1].end elif seqfeature.strand == -1: start_position = seqfeature.location.parts[1].start stop_position = seqfeature.location.parts[0].end else: pass else: pass else: pass else: pass if isinstance(start_position, ExactPosition): start = int(start_position) if isinstance(stop_position, ExactPosition): stop = int(stop_position) return (start, stop, parts)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parseBoundaryField(fn):\n content = getFileContent(fn)\n if content is not None:\n return parseBoundaryContent(content)\n else:\n return None", "def get_bbox(fname):\r\n fname = fname.split('_') # fname -> list\r\n i = fname.index('bbox')\r\n return map(float, fname[i+1:i+5]) # m\r", "def read_mesh_boundary(sFilename_boundary_in):\n iReturn_code = 1\n if os.path.isfile(sFilename_boundary_in):\n pass\n else:\n print('This mesh file does not exist: ', sFilename_boundary_in )\n iReturn_code = 0\n return iReturn_code\n\n \n pDriver_json = ogr.GetDriverByName('GeoJSON') \n pDataset_mesh = pDriver_json.Open(sFilename_boundary_in, gdal.GA_ReadOnly)\n pLayer_mesh = pDataset_mesh.GetLayer(0)\n pSpatial_reference_out = pLayer_mesh.GetSpatialRef()\n ldefn = pLayer_mesh.GetLayerDefn() \n\n #we also need to spatial reference\n for pFeature_mesh in pLayer_mesh:\n pGeometry_mesh = pFeature_mesh.GetGeometryRef() \n pGeometrytype_boundary = pGeometry_mesh.GetGeometryName()\n if(pGeometrytype_boundary == 'POLYGON'): \n pBoundary_ogr = pGeometry_mesh \n else:\n if(pGeometrytype_boundary == 'MULTIPOLYGON'): \n nLine = pGeometry_mesh.GetGeometryCount()\n for i in range(nLine):\n pBoundary_ogr = pGeometry_mesh.GetGeometryRef(i)\n \n pass\n else:\n pass\n pass \n \n \n pBoundary_wkt = pBoundary_ogr.ExportToWkt()\n aExtent = pBoundary_ogr.GetEnvelope()\n min_x, max_x, min_y, max_y = aExtent\n \n return pBoundary_wkt, aExtent", "def cfdReadBoundaryFile(self):\r\n \r\n with open(self.boundaryFile,\"r\") as fpid:\r\n print('Reading boundary file ...')\r\n \r\n ## (dict) key for each boundary patch\r\n self.cfdBoundaryPatchesArray={}\r\n for linecount, tline in enumerate(fpid):\r\n \r\n if not io.cfdSkipEmptyLines(tline):\r\n continue\r\n \r\n if not io.cfdSkipMacroComments(tline):\r\n continue\r\n \r\n if \"FoamFile\" in tline:\r\n dictionary=io.cfdReadCfdDictionary(fpid)\r\n continue\r\n \r\n count=0\r\n if len(tline.split()) ==1:\r\n if \"(\" in tline:\r\n continue\r\n if \")\" in tline:\r\n continue\r\n \r\n if tline.strip().isdigit():\r\n \r\n self.numberOfBoundaryPatches = tline.split()[0]\r\n continue\r\n \r\n boundaryName=tline.split()[0]\r\n \r\n self.cfdBoundaryPatchesArray[boundaryName]=io.cfdReadCfdDictionary(fpid)\r\n ## number of faces for the boundary patch\r\n self.cfdBoundaryPatchesArray[boundaryName]['numberOfBFaces']= int(self.cfdBoundaryPatchesArray[boundaryName].pop('nFaces'))\r\n \r\n ## start face index of the boundary patch in the self.faceNodes\r\n self.cfdBoundaryPatchesArray[boundaryName]['startFaceIndex']= int(self.cfdBoundaryPatchesArray[boundaryName].pop('startFace'))\r\n count=count+1\r\n\r\n ## index for boundary face, used for reference\r\n self.cfdBoundaryPatchesArray[boundaryName]['index']= count", "def get_bbox(fname):\r\n fname = fname.split('_') # fname -> list\r\n i = fname.index('bbox')\r\n return list(map(float, fname[i+1:i+5])) # m\r", "def GetCoordinates(XSGeometry, Dbf):\n BedLevel = XSGeometry[0]\n BankLeftLevel = XSGeometry[1]\n BankRightLevel = XSGeometry[2]\n InterPLHeight = XSGeometry[3]\n InterPRHeight = XSGeometry[4]\n Bl = XSGeometry[5]\n Br = XSGeometry[6]\n xl = XSGeometry[7]\n yl = XSGeometry[8]\n xr = XSGeometry[9]\n yr = XSGeometry[10]\n B = XSGeometry[11]\n\n Xcoords = list()\n Ycoords = list()\n Zcoords = list()\n # point 1\n Xcoords.append(xl)\n Ycoords.append(yl)\n Zcoords.append(BankLeftLevel)\n # 8 points cross sections\n if Dbf != False:\n # point 2\n Xcoords.append(xl)\n Ycoords.append(yl)\n Zcoords.append(BedLevel + Dbf + InterPLHeight)\n # point 3\n Xcoords.append(xl + (Bl / (Bl + Br + B)) * (xr - xl))\n Ycoords.append(yl + (Bl / (Bl + Br + B)) * (yr - yl))\n Zcoords.append(BedLevel + Dbf)\n # point 4\n Xcoords.append(xl + (Bl / (Bl + Br + B)) * (xr - xl))\n Ycoords.append(yl + (Bl / (Bl + Br + B)) * (yr - yl))\n Zcoords.append(BedLevel)\n # point 5\n Xcoords.append(xl + ((Bl+B) / (Bl + Br + B)) * (xr - xl))\n Ycoords.append(yl + ((Bl+B) / (Bl + Br + B)) * (yr - yl))\n Zcoords.append(BedLevel)\n # point 6\n Xcoords.append(xl + ((Bl+B) / (Bl + Br + B)) * (xr - xl))\n Ycoords.append(yl + ((Bl+B) / (Bl + Br + B)) * (yr - yl))\n Zcoords.append(BedLevel + Dbf)\n # point 7\n Xcoords.append(xr)\n Ycoords.append(yr)\n Zcoords.append(BedLevel + Dbf + InterPRHeight)\n else:\n # point 2\n Xcoords.append(xl)\n Ycoords.append(yl)\n Zcoords.append(BedLevel + InterPLHeight)\n # point 3\n Xcoords.append(xl + (Bl / (Bl + Br + B)) * (xr - xl))\n Ycoords.append(yl + (Bl / (Bl + Br + B)) * (yr - yl))\n Zcoords.append(BedLevel)\n # point 4\n Xcoords.append(xl + ((Bl+B) / (Bl + Br + B)) * (xr - xl))\n Ycoords.append(yl + ((Bl+B) / (Bl + Br + B)) * (yr - yl))\n Zcoords.append(BedLevel)\n # point 5\n Xcoords.append(xr)\n Ycoords.append(yr)\n Zcoords.append(BedLevel + InterPRHeight)\n\n # point 8\n Xcoords.append(xr)\n Ycoords.append(yr)\n Zcoords.append(BankRightLevel)\n\n return Xcoords, Ycoords, Zcoords", "def get_bbox(fname):\r\n\r\n fname = fname.split('_') # fname -> list\r\n i = fname.index('bbox')\r\n return list(map(float, fname[i+1:i+5])) # m\r", "def parseBed(fname):\n \n handle=open(fname,'r')\n for line in handle:\n if line.startswith(\"#\"):\n continue\n if line.startswith(\"track\") or line.startswith(\"browser\"):\n continue\n vals=line.rstrip().split(\"\\t\")\n chr = vals[0]\n start = int(vals[1])\n end = int(vals[2])\n if len(vals)>=3:\n strand = vals[5]\n score = float(vals[4])\n name = vals[3]\n res = Interval(chr,start,end)\n if len(vals)>3:\n res.strand = strand\n res.score = score\n res.name = name\n res = Interval(chr,start,end,strand=strand,score=score,name=name)\n if len(vals)>6:\n res = SplicedInterval(res.chr,res.start,res.end,res.strand,score=res.score,name=res.name,exonLengths=vals[10],exonOffsets=vals[11])\n #res=dict(zip(bed_fields,vals))\n #res['start'],res['end'],res['score'] = int(res['start']),int(res['end']),int(res['score'])\n yield res", "def test_parses_map_2(self):\n p = GPBEC()\n p.parse(\"GPBEC,220516,5130.02,N,00046.34,W,213.8,T,218.0,M,0004.6,N,EGLM*11\")\n\n self.assertEquals(\"GPBEC\", p.sen_type)\n self.assertEquals(\"220516\", p.timestamp)\n self.assertEquals(\"5130.02\", p.waypoint_lat)\n self.assertEquals(\"N\", p.waypoint_lat_dir)\n self.assertEquals(\"00046.34\", p.waypoint_lon)\n self.assertEquals(\"W\", p.waypoint_lon_dir)\n self.assertEquals(\"213.8\", p.bearing_true)\n self.assertEquals(\"T\", p.bearing_true_sym)\n self.assertEquals(\"218.0\", p.bearing_mag)\n self.assertEquals(\"M\", p.bearing_mag_sym)\n self.assertEquals(\"0004.6\", p.nautical_miles)\n self.assertEquals(\"N\", p.nautical_miles_sym)\n self.assertEquals(\"EGLM\", p.waypoint_id)\n self.assertEquals(\"11\", p.checksum)", "def feature_to_bbox(feature: dict) -> List[float]:\n coords = feature[\"geometry\"][\"coordinates\"][0]\n if len(coords) == 5:\n assert coords[-1] == coords[0]\n del coords[-1]\n\n p0, p1 = coords[0], coords[1]\n w, s, e, n = p0[0], p0[1], p1[0], p1[1]\n # unpacking lon, lat, alt in single variable c to avoid issues when alt is\n # missing in GeoJSON lon = c[0], lat = c[1], alt = c[2]\n for c in coords[1:]:\n if c[0] < w:\n w = c[0]\n if c[0] > e:\n e = c[0]\n if c[1] < s:\n s = c[1]\n if c[1] > n:\n n = c[1]\n\n return [w, s, e, n]", "def calculate_boundaries(self, latmesh=False, mask=None, lat_mask=None,\n adot=None, U_mask=None, mark_divide=False):\n s = \"::: calculating boundaries :::\"\n print_text(s, cls=self)\n\n if lat_mask == None and mark_divide:\n s = \">>> IF PARAMETER <mark_divide> OF calculate_boundaries() IS \" + \\\n \"TRUE, PARAMETER <lat_mask> MUST BE AN EXPRESSION FOR THE LATERAL\" + \\\n \" BOUNDARIES <<<\"\n print_text(s, 'red', 1)\n sys.exit(1)\n \n # this function contains markers which may be applied to facets of the mesh\n self.ff = FacetFunction('size_t', self.mesh)\n self.ff_acc = FacetFunction('size_t', self.mesh)\n self.cf = CellFunction('size_t', self.mesh)\n dofmap = self.Q.dofmap()\n\n S = self.S\n B = self.B\n \n # default to all grounded ice :\n if mask == None:\n mask = Expression('1.0', element=self.Q.ufl_element())\n \n # default to all positive accumulation :\n if adot == None:\n adot = Expression('1.0', element=self.Q.ufl_element())\n \n # default to U observations everywhere :\n if U_mask == None:\n U_mask = Expression('1.0', element=self.Q.ufl_element())\n\n self.init_adot(adot)\n self.init_mask(mask)\n self.init_U_mask(U_mask)\n\n if mark_divide:\n s = \" - marking the interior facets for incomplete meshes -\"\n print_text(s, cls=self)\n self.init_lat_mask(lat_mask)\n\n self.S.set_allow_extrapolation(True)\n self.B.set_allow_extrapolation(True)\n self.mask.set_allow_extrapolation(True)\n self.adot.set_allow_extrapolation(True)\n self.U_mask.set_allow_extrapolation(True)\n self.lat_mask.set_allow_extrapolation(True)\n \n tol = 1e-6\n \n s = \" - marking boundaries - \"\n print_text(s, cls=self)\n\n if latmesh:\n s = \" - using a lateral surface mesh - \"\n print_text(s, cls=self)\n \n class GAMMA_S_GND(SubDomain):\n def inside(self, x, on_boundary):\n return mask(x[0], x[1], x[2]) <= 1.0 and on_boundary\n gamma_s_gnd = GAMMA_S_GND()\n\n class GAMMA_S_FLT(SubDomain):\n def inside(self, x, on_boundary):\n return mask(x[0], x[1], x[2]) > 1.0 and on_boundary\n gamma_s_flt = GAMMA_S_FLT()\n\n class GAMMA_U_GND(SubDomain):\n def inside(self, x, on_boundary):\n return abs(x[2] - S(x[0], x[1], x[2])) < tol \\\n and mask(x[0], x[1], x[2]) <= 1.0 \\\n and U_mask(x[0], x[1], x[2]) <= 0.0 and on_boundary\n gamma_u_gnd = GAMMA_U_GND()\n\n class GAMMA_U_FLT(SubDomain):\n def inside(self, x, on_boundary):\n return abs(x[2] - S(x[0], x[1], x[2])) < tol \\\n and mask(x[0], x[1], x[2]) > 1.0 \\\n and U_mask(x[0], x[1], x[2]) <= 0.0 and on_boundary\n gamma_u_flt = GAMMA_U_FLT()\n\n class GAMMA_B_GND(SubDomain):\n def inside(self, x, on_boundary):\n return abs(x[2] - B(x[0], x[1], x[2])) < tol \\\n and mask(x[0], x[1], x[2]) <= 1.0 and on_boundary\n gamma_b_gnd = GAMMA_B_GND()\n\n class GAMMA_B_FLT(SubDomain):\n def inside(self, x, on_boundary):\n return abs(x[2] - B(x[0], x[1], x[2])) < tol \\\n and mask(x[0], x[1], x[2]) > 1.0 and on_boundary\n gamma_b_flt = GAMMA_B_FLT()\n\n class GAMMA_L_OVR(SubDomain):\n def inside(self, x, on_boundary):\n return x[2] > -10 and x[2] < S(x[0], x[1], x[2]) - tol and on_boundary\n gamma_l_ovr = GAMMA_L_OVR()\n\n class GAMMA_L_UDR(SubDomain):\n def inside(self, x, on_boundary):\n return x[2] < 10 and x[2] < S(x[0], x[1], x[2]) - tol and on_boundary\n gamma_l_udr = GAMMA_L_UDR()\n\n class GAMMA_L_TRM(SubDomain):\n def inside(self, x, on_boundary):\n return lat_mask(x[0], x[1], x[2]) <= 0.0\n gamma_l_trm = GAMMA_L_TRM()\n\n gamma_s_flt.mark(self.ff, self.GAMMA_S_FLT)\n gamma_s_gnd.mark(self.ff, self.GAMMA_S_GND)\n gamma_l_ovr.mark(self.ff, self.GAMMA_L_OVR)\n gamma_l_udr.mark(self.ff, self.GAMMA_L_UDR)\n gamma_b_flt.mark(self.ff, self.GAMMA_B_FLT)\n gamma_b_gnd.mark(self.ff, self.GAMMA_B_GND)\n #gamma_u_flt.mark(self.ff, self.GAMMA_U_FLT)\n #gamma_u_gnd.mark(self.ff, self.GAMMA_U_GND)\n #if mark_divide: \n # gamma_l_trm.mark(self.cf, 1)\n \n else :\n s = \" - not using a lateral surface mesh - \"\n print_text(s, cls=self)\n\n class GAMMA_GND(SubDomain):\n def inside(self, x, on_boundary):\n return mask(x[0], x[1], x[2]) <= 1.0\n gamma_gnd = GAMMA_GND()\n\n class GAMMA_FLT(SubDomain):\n def inside(self, x, on_boundary):\n return mask(x[0], x[1], x[2]) > 1.0\n gamma_flt = GAMMA_FLT()\n\n class GAMMA_L_OVR(SubDomain):\n def inside(self, x, on_boundary):\n return x[2] > -10 and on_boundary\n gamma_l_ovr = GAMMA_L_OVR()\n\n class GAMMA_L_UDR(SubDomain):\n def inside(self, x, on_boundary):\n return x[2] < 10 and on_boundary\n gamma_l_udr = GAMMA_L_UDR()\n\n gamma_flt.mark(self.cf, 1)\n gamma_gnd.mark(self.cf, 0)\n gamma_l_ovr.mark(self.ff, self.GAMMA_L_OVR)\n gamma_l_udr.mark(self.ff, self.GAMMA_L_UDR)\n \n # mark the divide if desired : \n if mark_divide:\n class GAMMA_L_DVD(SubDomain):\n def inside(self, x, on_boundary):\n return lat_mask(x[0], x[1], x[2]) <= 0.0 and on_boundary\n gamma_l_dvd = GAMMA_L_DVD()\n gamma_l_dvd.mark(self.ff, self.GAMMA_L_DVD)\n \n s = \" - done - \"\n print_text(s, cls=self)\n \n #s = \" - iterating through %i cells - \" % self.num_cells\n #print_text(s, cls=self)\n #for c in cells(self.mesh):\n # x_m = c.midpoint().x()\n # y_m = c.midpoint().y()\n # z_m = c.midpoint().z()\n # mask_xy = mask(x_m, y_m, z_m)\n\n # if mask_xy > 1:\n # self.cf[c] = 1\n # else:\n # self.cf[c] = 0\n\n #s = \" - done - \"\n #print_text(s, cls=self)\n\n self.ds = Measure('ds')[self.ff]\n self.dx = Measure('dx')#[self.cf]\n \n self.dx_g = self.dx(0) # internal above grounded\n self.dx_f = self.dx(1) # internal above floating\n self.dBed_g = self.ds(3) # grounded bed\n self.dBed_f = self.ds(5) # floating bed\n self.dBed = self.ds(3) + self.ds(5) # bed\n self.dSrf_gu = self.ds(8) # grounded with U observations\n self.dSrf_fu = self.ds(9) # floating with U observations\n self.dSrf_u = self.ds(8) + self.ds(9) # surface with U observations\n self.dSrf_g = self.ds(2) + self.ds(8) # surface of grounded ice\n self.dSrf_f = self.ds(6) + self.ds(9) # surface of floating ice\n self.dSrf = self.ds(6) + self.ds(2) \\\n + self.ds(8) + self.ds(9) # surface\n self.dLat_d = self.ds(7) # lateral divide\n self.dLat_to = self.ds(4) # lateral terminus overwater\n self.dLat_tu = self.ds(10) # lateral terminus underwater\n self.dLat_t = self.ds(4) + self.ds(10) # lateral terminus\n self.dLat = self.ds(4) + self.ds(7) \\\n + self.ds(10) # lateral", "def openFullProfFile(self, filename):\n handle = open(filename)\n lines = handle.readlines()\n handle.close()\n atoms = []\n bonds = []\n conns = []\n for line in lines:\n if line[0:4] == \"CELL\":\n #format of line: CELL a b c alpha beta gamma\n vals = line.split()\n print vals\n a = float(vals[1])\n b = float(vals[2])\n c = float(vals[3])\n alpha = float(vals[4])\n gamma = float(vals[5])\n beta = float(vals[6])\n elif line[0:6] == \"SPACEG\":\n #this is the space group in Hermann-Mauguin notation.\n hm_spacegroup = (line[6:]).strip().upper()\n space_group = GetSpaceGroup(hm_spacegroup)\n elif line[0:3] == \"BOX\":\n #Format: xmin xmax ymin ymax zmin zmax\n #In this program, however, xmin, ymin, zmin = 0,0,0 always.\n vals = line.split()\n a_diff = float(vals[2]) - float(vals[1])\n b_diff = float(vals[4]) - float(vals[3])\n c_diff = float(vals[6]) - float(vals[5])\n a_cutoff = int(a_diff)\n b_cutoff = int(b_diff)\n c_cutoff = int(c_diff)\n if a_diff - a_cutoff > 0:\n a_cutoff += 1\n if b_diff - b_cutoff > 0:\n b_cutoff += 1\n if c_diff - c_cutoff > 0:\n c_cutoff += 1\n elif line[0:4] == \"ATOM\":\n vals = line.split()\n label = vals[1]\n symbol = vals[2]\n a_coord = float(vals[3])\n b_coord = float(vals[4])\n c_coord = float(vals[5])\n position = (a_coord, b_coord, c_coord)\n #Get the radius which is right after the word \"RADIUS\"\n for i in range(len(vals)):\n if vals[i] == \"RADIUS\":\n radius = float(vals[i+1])\n break\n else:\n radius = None\n #Get the color which is right after the word \"COLOR\"\n for i in range(len(vals)):\n if vals[i] == \"COLOR\":\n color = [float(vals[i+1]), float(vals[i+2]), float(vals[i+3])]\n break\n else:\n color = None\n #atomData format (each line):\n #label massNum aPos bPos cPos anisotropy_a anisotropy_b anistropy_c spin valence\n atoms.append([label, symbol, position, radius, color])\n elif line[0:4] == \"BOND\":\n #Format: BOND label1 label2 min_dist max_dist RADIUS rad COLOR r g b t\n #The color and radius need not be there and will be ignored for now since\n #the color and radius of bonds is hardcoded in right now.\n vals = line.split()\n bonds.append([vals[1], vals[2], vals[3], vals[4]])\n elif line[0:4] == \"CONN\":\n #Format: BOND symbol1 symbol2 min_dist max_dist RADIUS rad COLOR r g b t\n #The color and radius need not be there and will be ignored for now since\n #the color and radius of bonds is hardcoded in right now.\n vals = line.split()\n conns.append([vals[1], vals[2], vals[3], vals[4]])\n \n \n self.newCell(space_group.number, a, b, c, alpha, beta, gamma, 1, 1, 1,\n a_cutoff, b_cutoff, c_cutoff)\n \n for atom in atoms:\n #FPStudio does not seem to support isotopes\n massNum = None\n self.addAtom(atom[1], atom[2], massNum = massNum, radius = atom[3], rgb = atom[4])\n \n for bond in bonds:\n self.createBonds(label1 = bonds[0], label2 = bonds[1],\n minDist = bonds[2], maxDist = bonds[3])\n for conn in conns:\n self.createBonds(symbol1 = conns[0], symbol2 = conns[1],\n minDist = conns[2], maxDist = conns[3])\n \n self.refreshGUI()\n #self.cellChange(space_group.number, a, b, c, alpha, beta, gamma, magNa = 1, magNb = 1, magNc = 1, cutNa = a_cutoff, cutNb = b_cutoff, cutNc = c_cutoff, atomData = atoms)\n #self.updateCell(space_group.number, a, b, c, alpha, beta, gamma, magNa = 1, magNb = 1, magNc = 1, cutNa = a_cutoff, cutNb = b_cutoff, cutNc = c_cutoff, atomData = atoms)\n #self.refreshGUI()\n \n #send signal to the cell window to show the info that has been loaded and to vtkWindow to draw it\n send(signal = \"File Load\", sender = \"Session\",\n spaceGroup = space_group.number, a = a, b = b, c = c,\n alpha = alpha, beta = beta, gamma = gamma, magNa = a_cutoff,\n magNb = b_cutoff, magNc = c_cutoff, cutNa = a_cutoff,\n cutNb = b_cutoff, cutNc = c_cutoff)\n \n \n #TODO: use these values extracted. You could combine the three file opening functions.\n #Each function would have to extract values form it's format and then a single function\n #could be used for all three to construct the model from the extracted values.e", "def process_latlon(self):\n data = self.unixtext.replace(\"\\n\", \" \")\n search = LAT_LON_PREFIX.search(data)\n if search is None:\n return None\n pos = search.start()\n newdata = data[pos+9:]\n # Go find our next non-digit, non-space character, if we find it, we\n # should truncate our string, this could be improved, I suspect\n search = re.search(r\"[^\\s0-9]\", newdata)\n if search is not None:\n pos2 = search.start()\n newdata = newdata[:pos2]\n\n poly = str2polygon(newdata)\n if poly is None:\n return None\n\n # check 0, PGUM polygons are east longitude akrherz/pyIEM#74\n if self.tp.source == 'PGUM':\n newpts = [[0 - pt[0], pt[1]] for pt in poly.exterior.coords]\n poly = Polygon(newpts)\n\n # check 1, is the polygon valid?\n if not poly.is_valid:\n self.tp.warnings.append(\n (\"LAT...LON polygon is invalid!\\n%s\") % (poly.exterior.xy,))\n return\n # check 2, is the exterior ring of the polygon clockwise?\n if poly.exterior.is_ccw:\n self.tp.warnings.append(\n (\"LAT...LON polygon exterior is CCW, reversing\\n%s\"\n ) % (poly.exterior.xy,))\n poly = Polygon(zip(poly.exterior.xy[0][::-1],\n poly.exterior.xy[1][::-1]))\n self.giswkt = 'SRID=4326;%s' % (dumps(MultiPolygon([poly]),\n rounding_precision=6),)\n return poly", "def readSurfaceGeo(b18path):\n if not os.path.isfile(b18path):\n print(\"b18 building file not found! Please check!\")\n pass\n else:\n b18file = open(b18path,\"r\")\n b18data = b18file.readlines()\n srfGeoBlock = getDataParagraph(\"_EXTENSION_BuildingGeometry_START_\", \"_EXTENSION_BuildingGeometry_END_\", b18data)\n #now get vertex's coordinate xyz\n vertexdict = dict() #{vertexID:[x,y,z]}\n srfbasicinfo = dict() #{surfaceID:[vertexID]}\n srfInfo = dict() #{surfaceID:[vertices coordinate]}\n for line in srfGeoBlock:\n dline = line.split()\n if \"vertex\" in dline:\n vertexdict[int(dline[1])] = [float(xyz) for xyz in dline[2:]] #{vertexID:[x,y,z]}\n if \"wall\" in dline or \"window\" in dline or \"floor\" in dline or \"ceiling\" in dline or \"roof\" in dline:\n srfbasicinfo[int(dline[1])] = [[int(nrID) for nrID in dline[2:]],dline[0]] #{surfaceID:[[vertexID],construction]}\n #print srfbasicinfo[int(dline[1])]\n for key in srfbasicinfo.keys():\n srfInfo[key] = []\n for vertices in srfbasicinfo[key][0]:\n srfInfo[key].append(vertexdict[vertices])\n b18file.close()\n return srfInfo,vertexdict,srfbasicinfo\n #actually only need srfInfo\n #just getting everything out for now, incase will need to use those", "def read_processed_bills(file, multi_index=True, dtype=None):\n if multi_index:\n header = [0, 1]\n else:\n header = None\n\n # Define dtypes for all possible (level 0) columns\n dtype = {'cis': str,\n 'kWh': np.float64,\n 'kWhOn': np.float64,\n 'kWhSemi': np.float64,\n 'kWhOff': np.float64,\n 'kW': np.float64,\n 'kWOn': np.float64,\n 'kWSemi': np.float64,\n 'billAmnt': np.float64,\n 'Therms': np.float64,\n 'EUI_elec': np.float64,\n 'EUI_gas': np.float64,\n 'EUI_tot': np.float64,\n 'EUI_tot_mo_avg_2009_2015': np.float64,\n 'EUI_tot_mo_avg_2013_2015': np.float64,\n 'EUI_elec_mo_avg_2009_2015': np.float64,\n 'EUI_elec_mo_avg_2013_2015': np.float64,\n 'EUI_gas_mo_avg_2009_2015': np.float64,\n 'EUI_gas_mo_avg_2013_2015': np.float64,\n 'summary': np.float64}\n # Define all possible (level 1) columns under cis to be converted to float\n col_to_float = ['Longitude', 'Latitude',\n 'year_built', 'year_renovated',\n 'Vacancy %', 'Number Of Stories',\n 'building_area', 'land_area']\n # Define all possible (level 1) columns under cis to be converted to\n # datetime\n col_to_time = ['date_transfer']\n # Define all possible (level 1) columns under cis to be converted to\n # boolean\n col_to_bool = ['range_address_ind']\n\n # Read file\n df = pd.read_csv(file, header=header, dtype=dtype)\n\n # Convert (level 1) columns to float\n for col in col_to_float:\n full_col = ('cis', col)\n if full_col in df:\n df.loc[:, full_col] = df.loc[:, full_col].astype(np.float64)\n # Convert (level 1) columns to datetime\n for col in col_to_time:\n full_col = ('cis', col)\n if full_col in df:\n df.loc[:, full_col] = pd.to_datetime(df.loc[:, full_col],\n format='%Y-%m-%d')\n # Convert (level 1) columns to boolean\n for col in col_to_bool:\n full_col = ('cis', col)\n if full_col in df:\n df.loc[:, full_col] = df.loc[:, full_col].astype(bool)\n\n return df", "def calculate_bin_edges(n_bins, geo):\n #Gefittete offsets: x,y,factor: factor*(x+x_off)\n #[6.19, 0.064, 1.0128]\n \n #print \"Reading detector geometry in order to calculate the detector dimensions from file \" + fname_geo_limits\n #geo = np.loadtxt(fname_geo_limits)\n\n # derive maximum and minimum x,y,z coordinates of the geometry input [[first_OM_id, xmin, ymin, zmin], [last_OM_id, xmax, ymax, zmax]]\n geo_limits = np.nanmin(geo, axis = 0), np.nanmax(geo, axis = 0)\n #print ('Detector dimensions [[first_OM_id, xmin, ymin, zmin], [last_OM_id, xmax, ymax, zmax]]: ' + str(geo_limits))\n\n x_bin_edges = np.linspace(geo_limits[0][1] - 9.95, geo_limits[1][1] + 9.95, num=n_bins[0] + 1) #try to get the lines in the bin center 9.95*2 = average x-separation of two lines\n y_bin_edges = np.linspace(geo_limits[0][2] - 9.75, geo_limits[1][2] + 9.75, num=n_bins[1] + 1) # Delta y = 19.483\n z_bin_edges = np.linspace(geo_limits[0][3] - 4.665, geo_limits[1][3] + 4.665, num=n_bins[2] + 1) # Delta z = 9.329\n\n #offset_x, offset_y, scale = [6.19, 0.064, 1.0128]\n #x_bin_edges = (x_bin_edges + offset_x )*scale\n #y_bin_edges = (y_bin_edges + offset_y )*scale\n\n #calculate_bin_edges_test(geo, y_bin_edges, z_bin_edges) # test disabled by default. Activate it, if you change the offsets in x/y/z-bin-edges\n\n return x_bin_edges, y_bin_edges, z_bin_edges", "def __init__(\n self,\n feature_type: str,\n location: Union[CompoundLocation, FeatureLocation] = None,\n left_right: Tuple[int, int] = None,\n strand: int = None,\n reference_sequence: Seq = None,\n name: str = None\n ):\n\n # make sure that EITHER range OR location was used, NOT both\n if left_right is not None:\n if not isinstance(left_right, tuple):\n raise TypeError(f'Range parameter must be a tuple of 2 integers for {name}')\n elif len(left_right) != 2:\n raise ValueError(f'Range parameter must be a tuple of length 2 for {name}')\n elif not is_int(left_right[0]) or not is_int(left_right[1]):\n raise TypeError(f'Start and end values for range parameter must be integers for {name}')\n elif left_right[0] > left_right[1]:\n raise ValueError(f'First position of range tuple must be <= to second position for {name}')\n elif location is not None:\n raise ValueError(f'Use either the range + strand parameters, or the location parameter for {name}')\n else:\n start, end = left_right\n # FeatureLocation uses 0-indexing; also wants base ints; FeatureLocation will throw an error if strand\n # is not +1, -1, or None, let it do that\n start, end = int(start), int(end)\n self.location = FeatureLocation(start - 1, end, strand)\n elif location is not None:\n if not isinstance(location, FeatureLocation) and not isinstance(location, CompoundLocation):\n raise TypeError(f'Location parameter must be Biopython object for {name}')\n else:\n self.location = location\n else:\n self.location = None\n\n self.type = feature_type\n self.name = name\n\n self._set_sequence(reference_sequence=reference_sequence)", "def compute_bb(self) -> List[float]:\n all_shapes = list(self.parts.values())\n bbox_vertices = unary_union(all_shapes).envelope.exterior.coords.xy\n min_x = min(bbox_vertices[0])\n max_x = max(bbox_vertices[0])\n min_y = min(bbox_vertices[1])\n max_y = max(bbox_vertices[1])\n return [min_x, max_x, min_y, max_y]", "def _detect_branching(\n self,\n Dseg: np.ndarray,\n tips: np.ndarray,\n seg_reference=None,\n ) -> Tuple[\n List[np.ndarray],\n List[np.ndarray],\n List[List[int]],\n List[List[int]],\n int,\n ]:\n if self.flavor == 'haghverdi16':\n ssegs = self._detect_branching_single_haghverdi16(Dseg, tips)\n elif self.flavor == 'wolf17_tri':\n ssegs = self._detect_branching_single_wolf17_tri(Dseg, tips)\n elif self.flavor == 'wolf17_bi' or self.flavor == 'wolf17_bi_un':\n ssegs = self._detect_branching_single_wolf17_bi(Dseg, tips)\n else:\n raise ValueError(\n '`flavor` needs to be in {\"haghverdi16\", \"wolf17_tri\", \"wolf17_bi\"}.'\n )\n # make sure that each data point has a unique association with a segment\n masks = np.zeros((len(ssegs), Dseg.shape[0]), dtype=bool)\n for iseg, seg in enumerate(ssegs):\n masks[iseg][seg] = True\n nonunique = np.sum(masks, axis=0) > 1\n ssegs = []\n for iseg, mask in enumerate(masks):\n mask[nonunique] = False\n ssegs.append(np.arange(Dseg.shape[0], dtype=int)[mask])\n # compute new tips within new segments\n ssegs_tips = []\n for inewseg, newseg in enumerate(ssegs):\n if len(np.flatnonzero(newseg)) <= 1:\n logg.warning(f'detected group with only {np.flatnonzero(newseg)} cells')\n secondtip = newseg[np.argmax(Dseg[tips[inewseg]][newseg])]\n ssegs_tips.append([tips[inewseg], secondtip])\n undecided_cells = np.arange(Dseg.shape[0], dtype=int)[nonunique]\n if len(undecided_cells) > 0:\n ssegs.append(undecided_cells)\n # establish the connecting points with the other segments\n ssegs_connects = [[], [], [], []]\n for inewseg, newseg_tips in enumerate(ssegs_tips):\n reference_point = newseg_tips[0]\n # closest cell to the new segment within undecided cells\n closest_cell = undecided_cells[\n np.argmin(Dseg[reference_point][undecided_cells])\n ]\n ssegs_connects[inewseg].append(closest_cell)\n # closest cell to the undecided cells within new segment\n closest_cell = ssegs[inewseg][\n np.argmin(Dseg[closest_cell][ssegs[inewseg]])\n ]\n ssegs_connects[-1].append(closest_cell)\n # also compute tips for the undecided cells\n tip_0 = undecided_cells[\n np.argmax(Dseg[undecided_cells[0]][undecided_cells])\n ]\n tip_1 = undecided_cells[np.argmax(Dseg[tip_0][undecided_cells])]\n ssegs_tips.append([tip_0, tip_1])\n ssegs_adjacency = [[3], [3], [3], [0, 1, 2]]\n trunk = 3\n elif len(ssegs) == 3:\n reference_point = np.zeros(3, dtype=int)\n reference_point[0] = ssegs_tips[0][0]\n reference_point[1] = ssegs_tips[1][0]\n reference_point[2] = ssegs_tips[2][0]\n closest_points = np.zeros((3, 3), dtype=int)\n # this is another strategy than for the undecided_cells\n # here it's possible to use the more symmetric procedure\n # shouldn't make much of a difference\n closest_points[0, 1] = ssegs[1][\n np.argmin(Dseg[reference_point[0]][ssegs[1]])\n ]\n closest_points[1, 0] = ssegs[0][\n np.argmin(Dseg[reference_point[1]][ssegs[0]])\n ]\n closest_points[0, 2] = ssegs[2][\n np.argmin(Dseg[reference_point[0]][ssegs[2]])\n ]\n closest_points[2, 0] = ssegs[0][\n np.argmin(Dseg[reference_point[2]][ssegs[0]])\n ]\n closest_points[1, 2] = ssegs[2][\n np.argmin(Dseg[reference_point[1]][ssegs[2]])\n ]\n closest_points[2, 1] = ssegs[1][\n np.argmin(Dseg[reference_point[2]][ssegs[1]])\n ]\n added_dist = np.zeros(3)\n added_dist[0] = (\n Dseg[closest_points[1, 0], closest_points[0, 1]]\n + Dseg[closest_points[2, 0], closest_points[0, 2]]\n )\n added_dist[1] = (\n Dseg[closest_points[0, 1], closest_points[1, 0]]\n + Dseg[closest_points[2, 1], closest_points[1, 2]]\n )\n added_dist[2] = (\n Dseg[closest_points[1, 2], closest_points[2, 1]]\n + Dseg[closest_points[0, 2], closest_points[2, 0]]\n )\n trunk = np.argmin(added_dist)\n ssegs_adjacency = [\n [trunk] if i != trunk else [j for j in range(3) if j != trunk]\n for i in range(3)\n ]\n ssegs_connects = [\n [closest_points[i, trunk]]\n if i != trunk\n else [closest_points[trunk, j] for j in range(3) if j != trunk]\n for i in range(3)\n ]\n else:\n trunk = 0\n ssegs_adjacency = [[1], [0]]\n reference_point_in_0 = ssegs_tips[0][0]\n closest_point_in_1 = ssegs[1][\n np.argmin(Dseg[reference_point_in_0][ssegs[1]])\n ]\n reference_point_in_1 = closest_point_in_1 # ssegs_tips[1][0]\n closest_point_in_0 = ssegs[0][\n np.argmin(Dseg[reference_point_in_1][ssegs[0]])\n ]\n ssegs_connects = [[closest_point_in_1], [closest_point_in_0]]\n return ssegs, ssegs_tips, ssegs_adjacency, ssegs_connects, trunk", "def parse(self, conf):\n boundaries = process_args(conf,\n factory=self.factory,\n str_keys=['type', 'boundary_type'])\n\n for b in boundaries.values():\n for k, v in b.items():\n if isinstance(v, dict) and 'type' in v:\n f_type = v.pop('type')\n func = self.factory.create_function(f_type, **v)\n b[k] = func\n\n self.bcs = list(boundaries.values())", "def PrepareBoundaries(self):\r\n \r\n self.maxleft = self.currentpoint[0]\r\n self.maxright = self.currentpoint[0]\r\n self.maxtop = self.currentpoint[1]\r\n self.maxbottom = self.currentpoint[1]\r\n for element in self.finalstring:\r\n if element == '+':\r\n self.currentheading += self.angle\r\n elif element == '-':\r\n self.currentheading -= self.angle\r\n elif element == 'F':\r\n if self.maxleft > self.currentpoint[0]:\r\n self.maxleft = self.currentpoint[0]\r\n if self.maxright < self.currentpoint[0]:\r\n self.maxright = self.currentpoint[0]\r\n if self.maxbottom > self.currentpoint[1]:\r\n self.maxbottom = self.currentpoint[1]\r\n if self.maxtop < self.currentpoint[1]:\r\n self.maxtop = self.currentpoint[1]\r\n \r\n \r\n self.currentpoint = self.NextPoint(self.currentpoint, self.length, self.currentheading)\r\n \r\n \r\n \r\n elif element == '[':\r\n self.stack.append([self.currentpoint[0], self.currentpoint[1], self.currentheading])\r\n elif element == ']':\r\n popped = self.stack.pop()\r\n self.currentheading = popped.pop()\r\n self.currentpoint = popped\r\n \r\n #Yes, for the special case where the last point is actually a boundary, we need to do this post-check\r\n if self.maxleft > self.currentpoint[0]:\r\n self.maxleft = self.currentpoint[0]\r\n if self.maxright < self.currentpoint[0]:\r\n self.maxright = self.currentpoint[0]\r\n if self.maxbottom > self.currentpoint[1]:\r\n self.maxbottom = self.currentpoint[1]\r\n if self.maxtop < self.currentpoint[1]:\r\n self.maxtop = self.currentpoint[1] \r\n \r\n #After parsing the string, we set the heading and currentpoint back to their original values.\r\n self.currentheading = 0\r\n self.currentpoint = self.startingpoint", "def _add_boundaries(self):\n logger.info(\"add_boundaries: start of method\")\n\n import scipy.spatial as sptl\n import scipy.sparse as sprs\n Lx = self._Lx\n Ly = self._Ly\n Lz = self._Lz\n Np = self.num_pores()\n btype = self._btype\n boffset = 0.05\n\n #Translate internal pores to each face of domain\n poffset = np.zeros((7,3))\n poffset[[2,5],0] = [-Lx, Lx]\n poffset[[3,4],1] = [-Ly, Ly]\n poffset[[1,6],2] = [-Lz, Lz]\n pcoords = pcoords0 = self['pore.coords']\n for i in np.r_[1:7]:\n pcoords = np.concatenate((pcoords,pcoords0 + poffset[i,:]),axis=0)\n\n #Use some twisted logic to get bval list of + for boundary and - for periodic faces\n bval = [0, 1, 2, 3, 4, 5, 6]*(np.array([0, btype[2], btype[0], btype[1], btype[1], btype[0], btype[2]])*-2+1)\n ptype = np.zeros((Np,),dtype=int)\n for i in np.r_[1:7]:\n ptype = np.concatenate((ptype,np.ones((Np,),dtype=int)*bval[i]),axis=0)\n\n #pnum contains the internal ID number of the boundary pores (for connecting periodic points)\n pnum = self.pores()\n pnum = np.tile(pnum,7)\n\n Tri = sptl.Delaunay(pcoords)\n adjmat = sprs.lil_matrix((np.shape(pcoords)[0],np.shape(pcoords)[0]),dtype=int)\n for i in np.arange(0,np.shape(Tri.simplices)[0]):\n #Keep only simplices that are fully in real domain\n adjmat[Tri.simplices[i],Tri.simplices[i]] = 1\n adjmat = sprs.triu(adjmat,k=1,format=\"lil\")\n for i in np.arange(0,Np):\n #Add periodic throats to the netowrk (if any)\n tpore2 = pnum[adjmat.rows[i]][ptype[adjmat.rows[i]]<0]\n tpore1 = np.ones_like(tpore2,dtype=int)*i\n conns = self['throat.conns']\n conns = np.concatenate((conns,np.vstack((tpore1,tpore2)).T),axis=0)\n #Add boundary pores and throats to the network\n newporetyps = np.unique(ptype[adjmat.rows[i]][ptype[adjmat.rows[i]]>0])\n newporenums = np.r_[self.num_pores():self.num_pores()+np.size(newporetyps)]\n tpore2 = newporenums\n tpore1 = np.ones_like(tpore2,dtype=int)*i\n conns = np.concatenate((conns,np.vstack((tpore1,tpore2)).T),axis=0)\n self['throat.conns'] = conns\n bcoords = np.zeros((7,3),dtype=float)\n coords = self['pore.coords']\n bcoords[1,:] = [coords[i,0], coords[i,1], 0-Lz*boffset]\n bcoords[2,:] = [0-Lx*boffset, coords[i,1], coords[i,2]]\n bcoords[3,:] = [coords[i,0], -Ly*boffset, coords[i,2]]\n bcoords[4,:] = [coords[i,0], Ly+Ly*boffset, coords[i,2]]\n bcoords[5,:] = [Lx+Lx*boffset, coords[i,1], coords[i,2]]\n bcoords[6,:] = [coords[i,0], coords[i,1], Lz+Lz*boffset]\n newporecoords = bcoords[newporetyps,:]\n coords = np.concatenate((coords,newporecoords),axis=0)\n self['pore.coords'] = coords\n #Reset number of pores and throats (easier than tracking it)\n nums = np.r_[0:np.shape(coords)[0]]\n self['pore.numbering'] = nums\n self['pore.numbering'] = np.ones((nums[-1]+1,),dtype=bool)\n nums = np.r_[0:np.shape(conns)[0]]\n self['throat.numbering'] = nums\n self['throat.numbering'] = np.ones((nums[-1]+1,),dtype=bool)\n logger.debug(\"add_boundaries: end of method\")", "def compute_bb(self):\n all_shapes = list(self.parts.values()) + list(self.edges.values())\n bbox_vertices = cascaded_union(all_shapes).envelope.exterior.coords.xy\n min_x = min(bbox_vertices[0])\n max_x = max(bbox_vertices[0])\n min_y = min(bbox_vertices[1])\n max_y = max(bbox_vertices[1])\n return [min_x, max_x,min_y, max_y]", "def read_cbf_file(inputfilename):\n \n \n with open(inputfilename, 'rb') as fid:\n BD = np.fromfile(fid, np.float64)\n \n # https://www.mathworks.com/help/matlab/ref/fwrite.html\n # https://www.mathworks.com/help/matlab/numeric-types.html\n \n k=0;\n # Static data (100 places)\n SD=BD[k:k+100]\n k=k+100\n # Priors (50 places)\n PR=BD[k:k+50];\n k=k+50;\n # Priorunc (50 places)\n PRU=BD[k:k+50]\n k=k+50 \n \n # O. Priors (50 places)\n OPR=BD[k:k+50]\n k=k+50\n # O. Priorunc (50 places)\n OPRU=BD[k:k+50]\n k=k+50\n \n CBF = {}\n CBF['PARPRIORS'] = np.expand_dims(PR,axis=1)\n CBF['PARPRIORUNC'] = np.expand_dims(PRU,axis=1)\n CBF=read_other_obs_constraints(CBF,OPR,OPRU)\n \n CBF['ID'] = SD[0] # ID (not used)\n CBF['LAT'] = SD[1] # Latitude\n CBF['nodays'] = int(SD[2]) # Number of days\n CBF['nomet'] = int(SD[3])\n CBF['noobs'] =int(SD[4])\n CBF['EDC'] = SD[5]\n CBF['EDCDIAG'] = SD[6]\n# CBF = {'PARPRIORS':np.expand_dims(PR,axis=1), \n# 'PARPRIORUNC':np.expand_dims(PRU,axis=1), \n# 'OTHERPRIORS':np.expand_dims(OPR,axis=1), #\n# 'OTHERPRIORSUNC':np.expand_dims(OPRU,axis=1),\n# 'ID':SD[0], # ID (not used)\n# 'LAT':SD[1], # Latitude\n# 'nodays':int(SD[2]), # Number of days\n# 'nomet':int(SD[3]), \n# 'noobs':int(SD[4]),\n# 'EDC':SD[5],\n# 'EDCDIAG':SD[6],\n# 'gppabs':SD[7],\n# 'rc_random_search':SD[10]==1,\n# 'nbe_annual_unc':SD[13],\n# 'etiav':SD[14],\n# 'nbe_seasonal_unc':SD[15]}\n \n #MCMC start searching EDCs from anywhere (1) or from prescribed starting\n #point(0). this is outdated - consider deleting\n CBF['rc_random_search'] = SD[10]==1\n \n #NEE IAV options\n CBF=read_obs_uncertainty_fields(CBF,SD,OPRU)\n \n \n TEMPDATA=BD[k:k+(CBF['nomet']+CBF['noobs'])*CBF['nodays']].reshape(CBF['nodays'],\n (CBF['nomet']+CBF['noobs']))\n #All met data\n CBF['MET'] = TEMPDATA[0:CBF['nodays'],0:CBF['nomet']] # Add in new meteorology here\n# CBF['OBS'] = TEMPDATA[0:CBF['nodays'],CBF['nomet']:]\n CBFOBS = TEMPDATA[0:CBF['nodays'],CBF['nomet']:]\n CBF=define_cbf_obs_fields(CBF,CBFOBS)\n \n #Removing redundant fields\n# CBF=rmfield(CBF,'noobs');\n# # CBF=rmfield(CBF,'nomet');\n# # CBF=rmfield(CBF,'nodays');\n \n \n # Read prescribed mean meteorology\n \n if len(BD) - (k+(CBF['nomet']+CBF['noobs'])*CBF['nodays']) == CBF['nomet'] + CBF['noobs']:\n \n kmmet= k+(CBF['nomet']+CBF['noobs'])*CBF['nodays']\n CBF['mmet'] = BD[kmmet:kmmet+CBF['nomet']]\n \n \n #Retaining \"OTHERPRIORS\" for now\n CBF['RAW'] = {}\n CBF['RAW']['OTHERPRIORS']=OPR;\n CBF['RAW']['OTHERPRIORSUNC']=OPRU;\n CBF['RAW']['info']='Raw inputs/outputs as stored in CBF binary structure';\n CBF['RAW']['details']='For completeness & development purpose only; When re-writing CBF to file, these are over-written by CBF.OBS, etc.';\n\n \n \n \n \n return CBF\n #disp(sprintf('CHECK: .cbf file \"%s\" successfully read into matlab.',filename)) ", "def test_parses_map_3(self):\n p = GPBEC()\n p.parse(\"GPBEC,220516,5130.02,N,00046.34,W,213.8,T,218.0,M,0004.6,N,EGLM,X*11\")\n\n self.assertEquals(\"GPBEC\", p.sen_type)\n self.assertEquals(\"220516\", p.timestamp)\n self.assertEquals(\"5130.02\", p.waypoint_lat)\n self.assertEquals(\"N\", p.waypoint_lat_dir)\n self.assertEquals(\"00046.34\", p.waypoint_lon)\n self.assertEquals(\"W\", p.waypoint_lon_dir)\n self.assertEquals(\"213.8\", p.bearing_true)\n self.assertEquals(\"T\", p.bearing_true_sym)\n self.assertEquals(\"218.0\", p.bearing_mag)\n self.assertEquals(\"M\", p.bearing_mag_sym)\n self.assertEquals(\"0004.6\", p.nautical_miles)\n self.assertEquals(\"N\", p.nautical_miles_sym)\n self.assertEquals(\"EGLM\", p.waypoint_id)\n self.assertEquals(\"X\", p.faa_mode)\n self.assertEquals(\"11\", p.checksum)", "def read_bonedata(self, fid):\r\n\r\n bone_count = 0\r\n lin = self.read_line(fid)\r\n while lin[0]!=':':\r\n parts = lin.split()\r\n if parts[0] == 'begin':\r\n bone_count += 1\r\n self.vertices.append(vertex(name = '', id=np.NaN,\r\n meta={'name': [],\r\n 'id': [], \r\n 'offset': [], \r\n 'orientation': [], \r\n 'axis': [0., 0., 0.], \r\n 'axis_order': [], \r\n 'C': np.eye(3), \r\n 'Cinv': np.eye(3), \r\n 'channels': [], \r\n 'bodymass': [], \r\n 'confmass': [], \r\n 'order': [], \r\n 'rot_ind': [], \r\n 'pos_ind': [], \r\n 'limits': [],\r\n 'xyz': np.array([0., 0., 0.]),\r\n 'rot': np.eye(3)}))\r\n lin = self.read_line(fid)\r\n\r\n\r\n elif parts[0]=='id':\r\n self.vertices[bone_count].id = int(parts[1])\r\n lin = self.read_line(fid)\r\n\r\n self.vertices[bone_count].children = []\r\n\r\n elif parts[0]=='name':\r\n self.vertices[bone_count].name = parts[1]\r\n lin = self.read_line(fid)\r\n\r\n\r\n elif parts[0]=='direction':\r\n direction = np.array([float(parts[1]), float(parts[2]), float(parts[3])])\r\n lin = self.read_line(fid)\r\n\r\n\r\n elif parts[0]=='length':\r\n lgth = float(parts[1])\r\n lin = self.read_line(fid)\r\n\r\n\r\n elif parts[0]=='axis':\r\n self.vertices[bone_count].meta['axis'] = np.array([float(parts[1]),\r\n float(parts[2]),\r\n float(parts[3])])\r\n # order is reversed compared to bvh\r\n self.vertices[bone_count].meta['axis_order'] = parts[-1][::-1].lower()\r\n lin = self.read_line(fid)\r\n\r\n elif parts[0]=='dof':\r\n order = []\r\n for i in range(1, len(parts)):\r\n if parts[i]== 'rx':\r\n chan = 'Xrotation'\r\n order.append('x')\r\n elif parts[i] =='ry':\r\n chan = 'Yrotation'\r\n order.append('y')\r\n elif parts[i] == 'rz':\r\n chan = 'Zrotation'\r\n order.append('z')\r\n elif parts[i] == 'tx':\r\n chan = 'Xposition'\r\n elif parts[i] == 'ty':\r\n chan = 'Yposition'\r\n elif parts[i] == 'tz':\r\n chan = 'Zposition'\r\n elif parts[i] == 'l':\r\n chan = 'length'\r\n self.vertices[bone_count].meta['channels'].append(chan)\r\n # order is reversed compared to bvh\r\n self.vertices[bone_count].meta['order'] = order[::-1]\r\n lin = self.read_line(fid)\r\n\r\n elif parts[0]=='limits':\r\n self.vertices[bone_count].meta['limits'] = [[float(parts[1][1:]), float(parts[2][:-1])]]\r\n\r\n lin = self.read_line(fid)\r\n\r\n while lin !='end':\r\n parts = lin.split()\r\n\r\n self.vertices[bone_count].meta['limits'].append([float(parts[0][1:]), float(parts[1][:-1])])\r\n lin = self.read_line(fid)\r\n self.vertices[bone_count].meta['limits'] = np.array(self.vertices[bone_count].meta['limits'])\r\n\r\n elif parts[0]=='end':\r\n self.vertices[bone_count].meta['offset'] = direction*lgth\r\n lin = self.read_line(fid)\r\n\r\n return lin", "def get_bbox(self):\n dimsizes = self.get_full_dimensions('lon').values()\n slices = [slice(None, None, dimsizes[0] - 1),\n slice(None, None, dimsizes[1] - 1)]\n lon = self.read_values('lon', slices=slices)\n lat = self.read_values('lat', slices=slices)\n return (lon.min(), lat.min(), lon.max(), lat.max())", "def test_parses_map_1(self):\n p = GPBEC()\n p.parse(\"$GPBEC,081837,,,,,,T,,M,,N,*13\")\n\n self.assertEquals(\"GPBEC\", p.sen_type)\n self.assertEquals(\"081837\", p.timestamp)\n self.assertEquals(\"\", p.waypoint_lat)\n self.assertEquals(\"\", p.waypoint_lat_dir)\n self.assertEquals(\"\", p.waypoint_lon)\n self.assertEquals(\"\", p.waypoint_lon_dir)\n self.assertEquals(\"\", p.bearing_true)\n self.assertEquals(\"T\", p.bearing_true_sym)\n self.assertEquals(\"\", p.bearing_mag)\n self.assertEquals(\"M\", p.bearing_mag_sym)\n self.assertEquals(\"\", p.nautical_miles)\n self.assertEquals(\"N\", p.nautical_miles_sym)\n self.assertEquals(\"\", p.waypoint_id)\n self.assertEquals(\"13\", p.checksum)", "def deserialize(self, str):\n try:\n end = 0\n _x = self\n start = end\n end += 24\n (_x.major_ax, _x.minor_ax, _x.coup_strength, _x.limit_cycle, _x.forward_velocity, _x.curvature,) = _struct_6f.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.x_offset = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.y_offset = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%si'%length\n start = end\n end += struct.calcsize(pattern)\n self.coupling_1 = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%si'%length\n start = end\n end += struct.calcsize(pattern)\n self.coupling_2 = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%si'%length\n start = end\n end += struct.calcsize(pattern)\n self.coupling_3 = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%si'%length\n start = end\n end += struct.calcsize(pattern)\n self.coupling_4 = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%si'%length\n start = end\n end += struct.calcsize(pattern)\n self.coupling_5 = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%si'%length\n start = end\n end += struct.calcsize(pattern)\n self.coupling_6 = struct.unpack(pattern, str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def _readBTS(self,fname):\n with BinaryFile(fname) as f:\n #\n # read header info\n #\n if self.verbose: print('Reading header information from',fname)\n\n ID = f.read_int2()\n assert( ID==7 or ID==8 )\n if ID==7: filetype = 'non-periodic'\n elif ID==8: filetype = 'periodic'\n else: filetype = 'UNKNOWN'\n if self.verbose:\n print(' id= {:d} ({:s})'.format(ID,filetype))\n\n # - read resolution settings\n self.NZ = f.read_int4()\n self.NY = f.read_int4()\n self.Ntower = f.read_int4()\n if self.verbose:\n print(' NumGrid_Z,_Y=',self.NZ,self.NY)\n print(' ntower=',self.Ntower)\n self.N = f.read_int4()\n self.dz = f.read_float(dtype=self.realtype)\n self.dy = f.read_float(dtype=self.realtype)\n self.dt = f.read_float(dtype=self.realtype)\n self.period = self.realtype(self.N * self.dt)\n self.Nsize = 3*self.NY*self.NZ*self.N\n if self.verbose:\n print(' nt=',self.N)\n print(' (problem size: {:d} points)'.format(self.Nsize))\n print(' dz,dy=',self.dz,self.dy)\n print(' TimeStep=',self.dt)\n print(' Period=',self.period)\n\n # - read reference values\n self.uhub = f.read_float(dtype=self.realtype)\n self.zhub = f.read_float(dtype=self.realtype) # NOT USED\n self.zbot = f.read_float(dtype=self.realtype)\n if self.Umean is None:\n self.Umean = self.uhub\n if self.verbose:\n print(' Umean = uhub =',self.Umean,\n '(for calculating fluctuations)')\n else: # user-specified Umean\n if self.verbose:\n print(' Umean =',self.Umean,\n '(for calculating fluctuations)')\n print(' uhub=',self.uhub,' (NOT USED)')\n if self.verbose:\n print(' HubHt=',self.zhub,' (NOT USED)')\n print(' Zbottom=',self.zbot)\n\n # - read scaling factors\n self.Vslope = np.zeros(3,dtype=self.realtype)\n self.Vintercept = np.zeros(3,dtype=self.realtype)\n for i in range(3):\n self.Vslope[i] = f.read_float(dtype=self.realtype)\n self.Vintercept[i] = f.read_float(dtype=self.realtype)\n if self.verbose:\n # output is float64 precision by default...\n print(' Vslope=',self.Vslope)\n print(' Vintercept=',self.Vintercept)\n\n # - read turbsim info string\n nchar = f.read_int4()\n version = f.read(N=nchar)\n if self.verbose: print(version)\n\n #\n # read normalized data\n #\n # note: need to specify Fortran-order to properly read data using np.nditer\n t0 = time.process_time()\n if self.verbose: print('Reading normalized grid data')\n\n self.U = np.zeros((3,self.NY,self.NZ,self.N),order='F',dtype=self.realtype)\n self.T = np.zeros((self.N,self.NY,self.NZ))\n if self.verbose:\n print(' U size :',self.U.nbytes/1024.**2,'MB')\n\n for val in np.nditer(self.U, op_flags=['writeonly']):\n val[...] = f.read_int2()\n self.U = self.U.swapaxes(3,2).swapaxes(2,1) # new shape: (3,self.N,self.NY,self.NZ)\n\n if self.Ntower > 0:\n if self.verbose:\n print('Reading normalized tower data')\n self.Utow = np.zeros((3,self.Ntower,self.N),\n order='F',dtype=self.realtype)\n if self.verbose:\n print(' Utow size :',self.Utow.nbytes/1024.**2,'MB')\n for val in np.nditer(self.Utow, op_flags=['writeonly']):\n val[...] = f.read_int2()\n\n if self.verbose:\n print(' Read velocitiy fields in',time.process_time()-t0,'s')\n \n #\n # calculate dimensional velocity\n #\n if self.verbose:\n print('Calculating velocities from normalized data')\n for i in range(3):\n self.U[i,:,:,:] -= self.Vintercept[i]\n self.U[i,:,:,:] /= self.Vslope[i]\n if self.Ntower > 0:\n self.Utow[i,:,:] -= self.Vintercept[i]\n self.Utow[i,:,:] /= self.Vslope[i]\n self.U[0,:,:,:] -= self.Umean # uniform inflow w/ no shear assumed\n\n print(' u min/max [',np.min(self.U[0,:,:,:]),\n np.max(self.U[0,:,:,:]),']')\n print(' v min/max [',np.min(self.U[1,:,:,:]),\n np.max(self.U[1,:,:,:]),']')\n print(' w min/max [',np.min(self.U[2,:,:,:]),\n np.max(self.U[2,:,:,:]),']')\n\n self.scaling = np.ones((3,self.NZ))\n\n #\n # calculate coordinates\n #\n if self.verbose:\n print('Calculating coordinates')\n #self.y = -0.5*(self.NY-1)*self.dy + np.arange(self.NY,dtype=self.realtype)*self.dy\n self.y = np.arange(self.NY,dtype=self.realtype)*self.dy\n self.z = self.zbot + np.arange(self.NZ,dtype=self.realtype)*self.dz\n #self.ztow = self.zbot - np.arange(self.NZ,dtype=self.realtype)*self.dz #--NOT USED\n\n self.t = np.arange(self.N,dtype=self.realtype)*self.dt\n if self.verbose:\n print('Read times [',self.t[0],self.t[1],'...',self.t[-1],']')" ]
[ "0.52373254", "0.50964826", "0.5067136", "0.5046825", "0.5045167", "0.5020805", "0.50203526", "0.4937692", "0.49202532", "0.4911355", "0.48771298", "0.4874154", "0.48680946", "0.48605794", "0.4844378", "0.4825628", "0.47997165", "0.479658", "0.47905588", "0.4760451", "0.47603858", "0.47366378", "0.4735722", "0.4735001", "0.47173226", "0.47171032", "0.4715557", "0.470604", "0.46996891", "0.4683101" ]
0.5479175
0
Parse data from a Biopython CDS SeqFeature object into a Cds object.
def parse_cds_seqfeature(seqfeature): cds_ftr = cds.Cds() cds_ftr.seqfeature = seqfeature try: locus_tag = seqfeature.qualifiers["locus_tag"][0] except: locus_tag = "" finally: cds_ftr.set_locus_tag(locus_tag, delimiter=None) cds_ftr.set_orientation(seqfeature.strand, "fr_short", case = True) cds_ftr.start, cds_ftr.stop, cds_ftr.parts = parse_coordinates(seqfeature) # Coordinate format for GenBank flat file features parsed by Biopython # are 0-based half open intervals. cds_ftr.coordinate_format = "0_half_open" # For translation, convert it to a Biopython Seq object. try: translation = seqfeature.qualifiers["translation"][0] except: translation = "" finally: translation = Seq(translation, Alphabet.IUPAC.protein) cds_ftr.set_translation(translation) cds_ftr.set_nucleotide_length(translation=True) try: translation_table = seqfeature.qualifiers["transl_table"][0] except: translation_table = 0 finally: cds_ftr.set_translation_table(translation_table) try: product = seqfeature.qualifiers["product"][0] except: product = "" finally: cds_ftr.set_description_field("product", product) try: function = seqfeature.qualifiers["function"][0] except: function = "" finally: cds_ftr.set_description_field("function", function) try: note = seqfeature.qualifiers["note"][0] except: note = "" finally: cds_ftr.set_description_field("note", note) try: gene = seqfeature.qualifiers["gene"][0] except: gene = "" finally: cds_ftr.set_gene(gene) cds_ftr.set_name() return cds_ftr
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_cds_features(self, handle, alphabet=..., tags2id=...):\n ...", "def extract_cds(seq_record):\n return [f for f in seq_record.features if f.type == \"CDS\"]", "def process_cds(cfs, ref):\n # unpack the tuple\n feat, scaffold, phase = cfs\n # First, extract the sequence of the CDS from the scaffold. This should\n # respect the strand, so we won't have to reverse-complement\n featseq = feat.extract(ref[scaffold])\n return featseq", "def to_seqfeature(feature):\n if isinstance(feature, six.string_types):\n feature = feature_from_line(feature)\n\n qualifiers = {\n \"source\": [feature.source],\n \"score\": [feature.score],\n \"seqid\": [feature.seqid],\n \"frame\": [feature.frame],\n }\n qualifiers.update(feature.attributes)\n return SeqFeature(\n # Convert from GFF 1-based to standard Python 0-based indexing used by\n # BioPython\n FeatureLocation(feature.start - 1, feature.stop),\n id=feature.id,\n type=feature.featuretype,\n strand=_biopython_strand[feature.strand],\n qualifiers=qualifiers,\n )", "def from_seqfeature(s, **kwargs):\n source = s.qualifiers.get(\"source\", \".\")[0]\n score = s.qualifiers.get(\"score\", \".\")[0]\n seqid = s.qualifiers.get(\"seqid\", \".\")[0]\n frame = s.qualifiers.get(\"frame\", \".\")[0]\n strand = _feature_strand[s.strand]\n\n # BioPython parses 1-based GenBank positions into 0-based for use within\n # Python. We need to convert back to 1-based GFF format here.\n start = s.location.start.position + 1\n stop = s.location.end.position\n featuretype = s.type\n id = s.id\n attributes = dict(s.qualifiers)\n attributes.pop(\"source\", \".\")\n attributes.pop(\"score\", \".\")\n attributes.pop(\"seqid\", \".\")\n attributes.pop(\"frame\", \".\")\n return Feature(\n seqid,\n source,\n featuretype,\n start,\n stop,\n score,\n strand,\n frame,\n attributes,\n id=id,\n **kwargs\n )", "def cds_desc(gff3, fasta):\n seqs = {}\n for defline, seq in LocusPocus.fasta.parse(fasta):\n seqid = defline[1:].split(' ')[0]\n if seqid not in seqs:\n seqs[seqid] = seq\n\n accession = ''\n cdslen = 0\n for entry in gff3:\n if '\\tCDS\\t' in entry:\n fields = entry.rstrip().split('\\t')\n assert len(fields) == 9\n accession = re.search(r'accession=([^;\\n]+)', fields[8]).group(1)\n cdslen += int(fields[4]) - int(fields[3]) + 1\n elif entry.startswith('###'):\n if accession:\n cdsseq = seqs[accession]\n if len(cdsseq) != cdslen:\n message = 'CDS for \"%s\": length mismatch' % accession\n message += ' (gff3=%d, fa=%d)' % (cdslen, len(cdsseq))\n message += '; most likely a duplicated accession'\n message += ', discarding'\n print(message, file=sys.stderr)\n else:\n gccontent = gc_content(cdsseq)\n gcskew = gc_skew(cdsseq)\n ncontent = n_content(cdsseq)\n values = '%s %d %.3f %.3f %.3f' % (\n accession, cdslen, gccontent, gcskew, ncontent)\n yield values.split(' ')\n accession = ''\n cdslen = 0", "def parse_source_seqfeature(seqfeature):\n src_ftr = source.Source()\n src_ftr.seqfeature = seqfeature\n start, stop, parts = parse_coordinates(seqfeature)\n src_ftr.start = start\n src_ftr.stop = stop\n\n try:\n src_ftr.organism = str(seqfeature.qualifiers[\"organism\"][0])\n except:\n src_ftr.organism = \"\"\n\n try:\n src_ftr.host = str(seqfeature.qualifiers[\"host\"][0])\n except:\n src_ftr.host = \"\"\n\n try:\n src_ftr.lab_host = str(seqfeature.qualifiers[\"lab_host\"][0])\n except:\n src_ftr.lab_host = \"\"\n\n src_ftr.parse_organism()\n src_ftr.parse_host()\n src_ftr.parse_lab_host()\n\n return src_ftr", "def parse_gff(g):\n # We also want to store the mRNA->gene information!\n mrna_par = {}\n # And the CDS->mRNA information\n cds_dat = {}\n with open(g, 'r') as f:\n for line in f:\n # if the line is empty or starts with a #, we will skip it\n if line.startswith('#') or line == '\\n':\n continue\n else:\n tmp = line.strip().split('\\t')\n feat_type = tmp[2]\n if feat_type == 'mRNA':\n meta = tmp[8].split(';')\n for m in meta:\n if m.startswith('ID='):\n tx_id = m.split('=')[1]\n if m.startswith('Parent='):\n tx_par = m.split('=')[1]\n mrna_par[tx_id] = tx_par\n elif feat_type == 'CDS':\n scaf = tmp[0]\n start = tmp[3]\n end = tmp[4]\n strand = tmp[6]\n phase = tmp[7]\n meta = tmp[8].split(';')\n for m in meta:\n if m.startswith('ID='):\n cds_id = m.split('=')[1]\n if m.startswith('Parent='):\n cds_par = m.split('=')[1]\n if strand == '-':\n strand = -1\n else:\n strand = 1\n # Watch out for transcripts where there are multiple CDS.\n # This will require a nested dictionary of lists.\n if cds_par in cds_dat:\n pass\n else:\n cds_dat[cds_par] = {}\n if cds_id in cds_dat[cds_par]:\n pass\n else:\n cds_dat[cds_par][cds_id] = []\n # We want to make a SequenceFeature for each CDS chunk\n # Keep in mind that GFF is 1-based, so we have to adjust\n # the start position!\n cds_feat = SeqFeature(\n FeatureLocation(int(start)-1, int(end), strand=strand),\n type=\"CDS\",\n id=cds_id)\n # Add some qualifiers to modify the behavior\n # Use the \"standard\" genetic code from NCBI\n cds_feat.qualifiers['transl_tabl'] = [1]\n # Then, append it into the corresponding dictionary item\n # keeping the chromosome (scaffold) name and phase with it\n cds_dat[cds_par][cds_id].append((cds_feat, scaf, phase))\n else:\n continue\n return (mrna_par, cds_dat)", "def points_to_cf(pts: xr.DataArray | Sequence):\n from shapely.geometry import MultiPoint\n\n if isinstance(pts, xr.DataArray):\n dim = pts.dims[0]\n coord = pts[dim] if dim in pts.coords else None\n pts_ = pts.values.tolist()\n else:\n dim = \"features\"\n coord = None\n pts_ = pts\n\n x, y, node_count, crdX, crdY = [], [], [], [], []\n for pt in pts_:\n if isinstance(pt, MultiPoint):\n xy = np.concatenate([p.coords for p in pt.geoms])\n else:\n xy = np.atleast_2d(pt.coords)\n x.extend(xy[:, 0])\n y.extend(xy[:, 1])\n node_count.append(xy.shape[0])\n crdX.append(xy[0, 0])\n crdY.append(xy[0, 1])\n\n ds = xr.Dataset(\n data_vars={\n \"node_count\": xr.DataArray(node_count, dims=(dim,)),\n \"geometry_container\": xr.DataArray(\n attrs={\n \"geometry_type\": \"point\",\n \"node_count\": \"node_count\",\n \"node_coordinates\": \"x y\",\n \"coordinates\": \"crd_x crd_y\",\n }\n ),\n },\n coords={\n \"x\": xr.DataArray(x, dims=(\"node\",), attrs={\"axis\": \"X\"}),\n \"y\": xr.DataArray(y, dims=(\"node\",), attrs={\"axis\": \"Y\"}),\n \"crd_x\": xr.DataArray(crdX, dims=(dim,), attrs={\"nodes\": \"x\"}),\n \"crd_y\": xr.DataArray(crdY, dims=(dim,), attrs={\"nodes\": \"y\"}),\n },\n )\n\n if coord is not None:\n ds = ds.assign_coords({dim: coord})\n\n # Special case when we have no MultiPoints\n if (ds.node_count == 1).all():\n ds = ds.drop_vars(\"node_count\")\n del ds.geometry_container.attrs[\"node_count\"]\n return ds", "def get_cds(geneid, seqdict):\n nuc_seq = seqdict[geneid]\n # Translate it\n aa_seq = nuc_seq.seq.translate()\n # Decorate it like you would a full SeqRecord object\n aa_seq_rec = SeqRecord.SeqRecord(\n aa_seq,\n id=geneid,\n description='')\n return aa_seq_rec", "def get_seqrecord_features(phage_genome):\n\n features = []\n for phage_cds in phage_genome.cds_features:\n features.append(phage_cds.seqfeature)\n\n return features", "def parse_gff(path):\n fasta = find_fasta(path)\n if not fasta:\n raise FileNotFoundError(f\"Could not find partner FASTA file for {path}\")\n\n # Parse FASTA and create GFFUtils database\n fasta = parse_infile(fasta, \"fasta\")\n gff = gffutils.create_db(\n str(path),\n \":memory:\",\n force=True,\n merge_strategy=\"create_unique\",\n sort_attribute_values=True\n )\n regions = find_regions(gff.directives)\n\n for record in fasta:\n # Normalise Feature location based on ##sequence-region directive.\n # Necessary for extracted GFF3 files that still store coordinates\n # relative to the entire region, not to the extracted FASTA.\n # If no sequence-region directive is found, assumes 1 (i.e. sequence start).\n cds, gene = parse_cds_features(\n gff.region(seqid=record.id, featuretype=[\"gene\", \"CDS\"]),\n regions[record.id][0] - 1 if record.id in regions else 0\n )\n if not cds:\n LOG.warning(\"Found no CDS features in %s [%s]\", record.id, path)\n record.features = sorted(\n [*gene, *merge_cds_features(cds)],\n key=lambda f: f.location.start\n )\n\n return fasta", "def combine_features(c_dat):\n # They are keyed on transcript ID\n for tx in c_dat:\n for cds in c_dat[tx]:\n cds_pieces = c_dat[tx][cds]\n # If there fewer than 2 CDS chunks, then pull the tuple out of the\n # list.\n if len(cds_pieces) < 2:\n c_dat[tx][cds] = cds_pieces[0]\n else:\n # Join pieces\n locs = []\n ph = []\n for chunk in cds_pieces:\n c_loc = FeatureLocation(\n chunk[0].location.start,\n chunk[0].location.end,\n strand=chunk[0].strand)\n locs.append(c_loc)\n ph.append(chunk[2])\n # Sort them, according to strand. We assume that a CDS is not a\n # mixed-strand feature\n if cds_pieces[0][0].strand == 1:\n locs.sort(key=lambda x: x.start)\n else:\n locs.sort(key=lambda x: x.end, reverse=True)\n # Join them into a CompoundLocation\n full_loc = CompoundLocation(locs)\n # And then overwrite the input dictionary values\n full_feat = SeqFeature(full_loc, type='CDS',\n id=cds_pieces[0][0].id)\n full_feat.qualifiers['transl_tabl'] = [1]\n # Keep the phases!\n c_dat[tx][cds] = (full_feat, cds_pieces[0][1], ph)\n return c_dat", "def parse(self, handle, do_features=...): # -> SeqRecord | None:\n ...", "def collect_data(self):\n categories = []\n list_of_feature_lists = []\n feature_sets = set()\n with open(self.csv_path, \"r\") as f:\n reader = csv.DictReader(f)\n # collecting some stats for FDR calculation:\n self.PSM_count = 0\n self.decoy_count = 0\n\n if self[\"dump_svm_matrix\"]:\n self.init_svm_matrix_dump()\n additional_matrix_info = []\n\n for i, row in enumerate(\n sorted(\n reader,\n reverse=self[\"bigger_scores_better\"],\n key=lambda d: float(d[self.col_for_sorting]),\n )\n ):\n\n features = self.row_to_features(row)\n\n if tuple(features) in feature_sets:\n continue\n feature_sets.add(tuple(features))\n\n category, psm_FDR = self.get_psm_category(row)\n\n list_of_feature_lists.append(features)\n categories.append(category)\n\n if self[\"dump_svm_matrix\"]:\n label = -1 if row_is_decoy(row) else 1\n sequence = \"{0}.{1}#{2}.{3}\".format(\n row[\"Sequence Pre AA\"].strip(),\n row[\"Sequence\"].strip(),\n row[\"Modifications\"].strip(),\n row[\"Sequence Post AA\"].strip(),\n )\n additional_matrix_info.append(\n {\n \"psm_id\": row[\"Spectrum Title\"].strip(),\n \"label\": label,\n \"scannr\": row[\"Spectrum Title\"].strip().split(\".\")[-2],\n \"peptide\": sequence,\n \"proteins\": self.parse_protein_ids(row[\"Protein ID\"]),\n }\n )\n\n if i % 1000 == 0:\n score_val = float(row[self.col_for_sorting])\n msg = (\n \"Generating feature matrix from input csv \"\n \"(line ~{0}) with score {1} and FDR \"\n \"{2}\".format(i, score_val, psm_FDR)\n )\n print(msg, end=\"\\r\")\n\n # All data points are collected in one big matrix, to make standardization possible\n print(\"\\nConverting feature matrix to NumPy array...\")\n X_raw = np.array(list_of_feature_lists, dtype=float)\n\n print(\"Replacing empty/NaN values with the mean of each column...\")\n self.nan_replacer = Imputer()\n self.nan_replacer.fit(X_raw)\n X_raw = self.nan_replacer.transform(X_raw)\n # Standardize input matrix to ease machine learning! Scaled data has zero mean and unit variance\n print(\"Standardizing input matrix...\")\n self.scaler = SCALER.fit(X_raw)\n self.X = self.scaler.transform(X_raw)\n self.categories = np.array(categories)\n print()\n\n if self[\"dump_svm_matrix\"]:\n print(\"Dumping SVM matrix to\", self[\"dump_svm_matrix\"])\n\n for i, matrix_row in enumerate(self.X):\n matrix_row_info = additional_matrix_info[i]\n self.dump_svm_matrix_row(\n row=list(matrix_row),\n psm_id=matrix_row_info[\"psm_id\"],\n label=matrix_row_info[\"label\"],\n scannr=matrix_row_info[\"scannr\"],\n peptide=matrix_row_info[\"peptide\"],\n proteins=matrix_row_info[\"proteins\"],\n )\n\n print(\"Dumped SVM matrix to\", self[\"dump_svm_matrix\"])\n return", "def parse_coordinates(seqfeature):\n start_position = None\n stop_position = None\n start = -1\n stop = -1\n parts = 0\n\n if (isinstance(seqfeature.location, FeatureLocation) or \\\n isinstance(seqfeature.location, CompoundLocation)):\n\n if seqfeature.strand is None:\n pass\n elif isinstance(seqfeature.location, FeatureLocation):\n parts = 1\n start_position = seqfeature.location.start\n stop_position = seqfeature.location.end\n elif isinstance(seqfeature.location, CompoundLocation):\n parts = len(seqfeature.location.parts)\n\n # Skip this compound seqfeature if it is comprised of more\n # than two features (tricky to parse).\n if parts == 2:\n\n # Retrieve compound seqfeature positions based on strand.\n if seqfeature.strand == 1:\n start_position = seqfeature.location.parts[0].start\n stop_position = seqfeature.location.parts[1].end\n elif seqfeature.strand == -1:\n start_position = seqfeature.location.parts[1].start\n stop_position = seqfeature.location.parts[0].end\n else:\n pass\n else:\n pass\n else:\n pass\n else:\n pass\n if isinstance(start_position, ExactPosition):\n start = int(start_position)\n if isinstance(stop_position, ExactPosition):\n stop = int(stop_position)\n return (start, stop, parts)", "def parser(raw_seq, date):\n taxon_id = int(raw_seq.features[0].qualifiers['db_xref'][0][6:])\n organism = raw_seq.annotations['organism']\n accession = raw_seq.annotations['accessions'][0]\n gene = []\n records = []\n frag_type = 'whole'\n begin = 1\n end = len(raw_seq)\n sequence = str(raw_seq.seq)\n name = organism\n strand = 1\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n records.append(rec)\n for i in raw_seq.features:\n if i.type == 'gene' and 'gene' in i.qualifiers:\n if i.location_operator != 'join':\n frag_type = 'gene'\n begin = int(i.location.start)\n end = int(i.location.end)\n sequence = str(raw_seq.seq[begin:end])\n name = str(i.qualifiers['gene'][0])\n strand = str(i.location.strand)\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n elif i.location_operator == 'join':\n frag_type = 'gene'\n begin = int(i.sub_features[0].location.start)\n end = int(i.sub_features[0].location.end)\n name = str(i.qualifiers['gene'][0])\n strand = str(i.location.strand)\n sequence = ''\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n gene.append(rec)\n begin = int(i.sub_features[1].location.start)\n end = int(i.sub_features[1].location.end)\n sequence = ''.join([str(raw_seq.seq[begin:end]), str(raw_seq.seq[begin:end])])\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n elif i.type == 'CDS' and 'gene' in i.qualifiers:\n frag_type = 'cds'\n begin = int(i.location.start)\n end = int(i.location.end)\n sequence = str(raw_seq.seq[begin:end])\n name = str(i.qualifiers['gene'][0]).replace(' ', '_')\n strand = str(i.location.strand)\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n elif i.type == 'tRNA' and 'gene' in i.qualifiers:\n frag_type = 'tRNA'\n begin = int(i.location.start)\n end = int(i.location.end)\n sequence = str(raw_seq.seq[begin:end])\n if len(sequence) >= 100:\n sequence = ''\n name = str(i.qualifiers['gene'][0]).replace(' ', '_')\n strand = str(i.location.strand)\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n elif i.type == 'rRNA':\n frag_type = 'rRNA'\n begin = int(i.location.start)\n end = int(i.location.end)\n sequence = str(raw_seq.seq[begin:end])\n name = str(i.qualifiers['product'][0]).replace(' ', '_')\n strand = str(i.location.strand)\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n elif i.type == 'exon' and 'gene' in i.qualifiers:\n frag_type = 'exon'\n begin = int(i.location.start)\n end = int(i.location.end)\n sequence = str(raw_seq.seq[begin:end])\n if 'number' in i.qualifiers:\n name = '{0}_exon_{1}'.format(i.qualifiers['gene'][0],\n i.qualifiers['number'][0])\n else:\n name = '{0}_exon'.format(i.qualifiers['gene'][0])\n strand = int(i.location.strand)\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n elif i.type == 'intron' and 'gene' in i.qualifiers:\n frag_type = 'intron'\n begin = int(i.location.start)\n end = int(i.location.end)\n sequence = str(raw_seq.seq[begin:end])\n strand = str(i.location.strand)\n if 'number' in i.qualifiers:\n name = '{0}_{1}_intron'.format(i.qualifiers['gene'][0],\n i.qualifiers['number'][0])\n else:\n name = '{0}_intron'.format(i.qualifiers['gene'][0])\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n\n records.append(rec)\n gene.sort(key=lambda x: x[5])\n\n for i in range(len(gene) - 1):\n frag_type = 'spacer'\n now = gene[i]\n then = gene[i + 1]\n tail = now[6] + 1\n head = then[5] - 1\n sequence = str(raw_seq.seq[tail:head])\n name = '{0}-{1}_spacer'.format(now[3], then[3])\n strand = 0\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n records.append(rec)\n records.extend(gene)\n\n database.extend(records)", "def parse_feature(self, feature_key, lines):\n ...", "def cf_to_points(ds: xr.Dataset):\n from shapely.geometry import MultiPoint, Point\n\n # Shorthand for convenience\n geo = ds.geometry_container.attrs\n\n # The features dimension name, defaults to the one of 'node_count' or the dimension of the coordinates, if present.\n feat_dim = None\n if \"coordinates\" in geo and feat_dim is None:\n xcoord_name, _ = geo[\"coordinates\"].split(\" \")\n (feat_dim,) = ds[xcoord_name].dims\n\n x_name, y_name = ds.geometry_container.attrs[\"node_coordinates\"].split(\" \")\n xy = np.stack([ds[x_name].values, ds[y_name].values], axis=-1)\n\n node_count_name = ds.geometry_container.attrs.get(\"node_count\")\n if node_count_name is None:\n # No node_count means all geometries are single points (node_count = 1)\n # And if we had no coordinates, then the dimension defaults to \"features\"\n feat_dim = feat_dim or \"features\"\n node_count = xr.DataArray([1] * xy.shape[0], dims=(feat_dim,))\n if feat_dim in ds.coords:\n node_count = node_count.assign_coords({feat_dim: ds[feat_dim]})\n else:\n node_count = ds[node_count_name]\n\n j = 0 # The index of the first node.\n geoms = np.empty(node_count.shape, dtype=object)\n # i is the feature index, n its number of nodes\n for i, n in enumerate(node_count.values):\n if n == 1:\n geoms[i] = Point(xy[j, :])\n else:\n geoms[i] = MultiPoint(xy[j : j + n, :])\n j += n\n\n return xr.DataArray(geoms, dims=node_count.dims, coords=node_count.coords)", "def feat_collect(infile, feat_mode):\n from analysis.seqfile_ops import load_genbank\n gb_record = load_genbank(infile)\n feat_list = gb_record.features\n collected = []\n # establish collection parameters\n types_list = feat_mode['types'] # default entry is ('CDS')\n tags_dict = feat_mode['tags'] # default is an empty dictionary\n # start collecting features\n for feature in feat_list:\n if feature.type in types_list:\n if len(tags_dict.keys()) is 0:\n collected.append(feature)\n else:\n for tag_key in tags_dict.keys():\n if tag_key in feature.qualifiers:\n feat_value = feature.qualifiers.get(tag_key)\n if feat_value[0] in tags_dict[tag_key]:\n collected.append(feature)\n else: pass\n else: pass\n else: pass\n ## consider adding some info to the log\n return collected", "def parse_dataset(self, data):\n pass", "def parse_prodigal(prodigal_record):\n\tfor rec in prodigal_record:\n\t\t# each one of these records is a feature\n\t\tm = re.match(\">?(\\S+)_(\\d+) # (\\d+) # (\\d+) # (-?\\d+) # ID=([^;]+);\", rec.description)\n\t\tif m:\n\t\t\tname, id_number, start, end, strand, prod_id = m.groups()\n\t\t\tstart = int(start)\n\t\t\tend = int(end)\n\t\t\tstrand = int(strand)\n\t\t\tlocation = SeqFeature.FeatureLocation(start, end, strand)\n\t\t\tsequence = str(rec.seq)\n\t\t\tqualifiers = {'translation': [sequence], 'prodigal_id': prod_id}\n\t\t\t# multiple features go on the same record. This returns the name to keep track of what goes where.\n\t\t\tfeature = SeqFeature.SeqFeature(location=location,\n\t\t\t type=\"CDS\",\n\t\t\t strand=strand,\n\t\t\t id=id_number,\n\t\t\t qualifiers=qualifiers)\n\t\t\tyield name, feature", "def parse_and_construct_graphic_annotation_sequence(ds):\r\n # Initiate output\r\n graphic_annotation_sequence = list()\r\n # Get segmentation data and flip axes\r\n pixel_array = ds.pixel_array\r\n segmentation_volume = np.transpose(pixel_array, (2, 1, 0))\r\n frame_no = 0\r\n # For each frame\r\n for frame in ds.PerFrameFunctionalGroupsSequence:\r\n # Get segmentation map\r\n segmentation_map = segmentation_volume[:,:,frame_no]\r\n frame_no += 1\r\n # Extract contours\r\n contours = measure.find_contours(segmentation_map, 0.5)\r\n if len(contours) < 1:\r\n continue\r\n graphic_object_sequence = list()\r\n # For each contour\r\n for contour in contours:\r\n # Create graphic object\r\n graphic_object = {\r\n \"GraphicAnnotationUnits\": \"PIXEL\",\r\n \"GraphicDimensions\": 2,\r\n \"NumberOfGraphicPoints\": len(contour),\r\n \"GraphicData\": contour.ravel().tolist(),\r\n \"GraphicType\": \"POLYLINE\" \r\n }\r\n graphic_object_sequence.append(graphic_object)\r\n # Create graphic annotation\r\n graphic_annotation = {\r\n \"ReferencedImageSequence\": [{\r\n \"ReferencedSOPClassUID\": frame.DerivationImageSequence[0].SourceImageSequence[0].ReferencedSOPClassUID,\r\n \"ReferencedSOPInstanceUID\": frame.DerivationImageSequence[0].SourceImageSequence[0].ReferencedSOPInstanceUID,\r\n }],\r\n \"GraphicLayer\": str(ds.SegmentSequence[frame.SegmentIdentificationSequence[0].ReferencedSegmentNumber - 1].SegmentDescription).upper(),\r\n \"GraphicObjectSequence\": graphic_object_sequence\r\n }\r\n graphic_annotation_sequence.append(graphic_annotation)\r\n return graphic_annotation_sequence", "def _process_cdss(self, prot_fasta_path):\n if self.is_metagenome:\n prot_fasta = {} # type: dict\n untranslatable_prot = set()\n for cds_id in self.cdss:\n cds = self.feature_dict[cds_id]\n try:\n prot_seq = str(Seq(cds['dna_sequence']).translate(\n self.code_table, cds=True).strip(\"*\"))\n except TranslationError as e:\n cds['warnings'] = cds.get('warnings', []) + [str(e)]\n # NOTE: we may need a different way of handling this for metagenomes.\n prot_seq = \"\"\n if self.is_metagenome:\n untranslatable_prot.add(cds_id)\n\n if self.is_metagenome:\n if prot_seq != \"\":\n protein_id = \"\"\n if cds.get(\"aliases\"):\n aliases = cds['aliases']\n for key, val in aliases:\n if key == \"protein_id\":\n protein_id = val\n if not protein_id:\n protein_id = cds['id'] # assign to some default\n else:\n # log a warning here?\n pass\n # TODO: update header to reflect what we actually want people\n # to see.\n if protein_id in prot_fasta:\n prot_fasta[protein_id][0] += \"|\" + cds['id']\n else:\n fasta_seq_data = \">\" + protein_id + \" cds_ids:\" + cds['id']\n prot_fasta[protein_id] = [fasta_seq_data, prot_seq]\n else:\n pass\n\n else:\n cds.update({\n \"protein_translation\": prot_seq,\n \"protein_md5\": hashlib.md5(prot_seq.encode('utf8')).hexdigest(),\n \"protein_translation_length\": len(prot_seq),\n })\n\n if 'parent_gene' in cds:\n parent_gene = self.feature_dict[cds['parent_gene']]\n # no propigation for now\n propagate_cds_props_to_gene(cds, parent_gene, self.is_metagenome)\n elif self.generate_genes:\n spoof = copy.copy(cds)\n spoof['type'] = 'gene'\n spoof['id'] = cds['id']+\"_gene\"\n spoof['cdss'] = [cds['id']]\n spoof['warnings'] = [warnings['spoofed_gene'].format(cds['id'])]\n self.feature_dict[spoof['id']] = spoof\n cds['parent_gene'] = spoof['id']\n self.spoof_gene_count += 1\n else:\n raise ValueError(warnings['no_spoof'])\n\n self.feature_dict[cds['id']] = cds\n\n if self.is_metagenome:\n with open(prot_fasta_path, 'w') as fid:\n for key, line in prot_fasta.items():\n fid.write('\\n'.join(line))\n # do something with 'untranslatable_prot'", "def collect(self, vcfname, tag):\n if tag not in [\"TP\", \"FN\"]:\n return extractPiscesIndelFeatures(vcfname, tag, self.chr_depth)\n else:\n features = [\"CHROM\", \"POS\", \"REF\", \"ALT\", \"QUAL\", \"S.1.VT\",\n \"I.T_ALT_RATE\", \"I.DP_normal\", \"I.DP_tumor\", \"I.tag\", \"I.count\"]\n return GenericFeatures.collectFeatures(vcfname, tag, features, processor=StrelkaAdmixIndelFeatures.processValue)", "def load_feature(feature_name, caf_dose, features_path):\n # gets the paths to the folders where the specified feature is stored\n subject_paths = glob.glob(os.path.join(features_path, \"*\", feature_name))\n\n feature = {}\n for path in subject_paths:\n # extract the subject id from the current path (second to last element in the path)\n subject_id = path.split(os.sep)[-2]\n\n # get all stages for the current subject\n stages = set(\n [\n p.split(os.sep)[-1].split(\"_\")[-1].split(\".\")[0]\n for p in glob.glob(os.path.join(path, \"*.npy\"))\n ]\n )\n if len(stages) == 0:\n print(\n f\"The following directory doesn't contain features: {path}. \"\n \"This will likely cause an error down the line\"\n )\n for stage in stages:\n if stage not in feature:\n feature[stage] = {}\n # load the file containing the data for the current stage and subject\n feature[stage][subject_id] = np.load(\n os.path.join(path, f\"{feature_name}_{stage}.npy\"), allow_pickle=True\n )\n return feature", "def sequence_to_biopython_record(\n sequence, id=\"<unknown id>\", name=\"<unknown name>\", features=()\n):\n if has_dna_alphabet:\n seq = Seq(sequence, alphabet=DNAAlphabet())\n else:\n seq = Seq(sequence)\n\n return SeqRecord(\n seq=seq,\n id=id,\n name=name,\n features=list(features),\n annotations={\"molecule_type\": \"DNA\"},\n )", "def _proc_dataset(d):\n # merge 2dseq complex frame group if present\n if d.is_complex and d.type == '2dseq':\n d = FrameGroupMerger().merge(d, 'FG_COMPLEX')\n\n # prepare the data array\n if d.is_svs:\n data = _prep_data_svs(d)\n elif d.is_mrsi:\n data = _prep_data_mrsi(d)\n else:\n data = d.data\n\n # get properties\n properties = d.to_dict()\n\n # some Bruker datasets do not have affine property\n if d.type == 'fid': if not 'affine' in properties: properties.update({'affine':np.identity(4)})\n \n yield data, properties", "def parse_df(data):\n\n # First column should be the ids\n ids = list(data.iloc[:, 0])\n\n # Second column should hold the labels\n labels = list(data.iloc[:, 1])\n\n # From third columns, we should have the features\n features = list(data.iloc[:, 2:].values)\n\n return ids, labels, features", "def extractFeatures(self, datum):\n abstract" ]
[ "0.6618724", "0.6388655", "0.62729174", "0.571681", "0.56512153", "0.5595933", "0.55470765", "0.55052245", "0.5452013", "0.5344809", "0.53293115", "0.53277576", "0.5323969", "0.5287743", "0.51737297", "0.50894594", "0.50693136", "0.5034963", "0.5032553", "0.5020525", "0.5016843", "0.50130093", "0.5006117", "0.49967706", "0.4967874", "0.49458304", "0.49446902", "0.49388698", "0.49386206", "0.4906504" ]
0.7385726
0
Parse data from a Biopython tRNA SeqFeature object into a Trna object.
def parse_trna_seqfeature(seqfeature): trna_ftr = trna.Trna() trna_ftr.seqfeature = seqfeature try: locus_tag = seqfeature.qualifiers["locus_tag"][0] except (KeyError, IndexError): locus_tag = "" finally: trna_ftr.set_locus_tag(locus_tag, delimiter=None) trna_ftr.set_orientation(seqfeature.strand, "fr_short", True) trna_ftr.start, trna_ftr.stop, trna_ftr.parts = parse_coordinates(seqfeature) # Coordinate format for GenBank flat file features parsed by Biopython # are 0-based half open intervals. trna_ftr.coordinate_format = "0_half_open" trna_ftr.set_nucleotide_length(use_seq=True) try: product = seqfeature.qualifiers["product"][0] except (KeyError, IndexError): product = "" finally: trna_ftr.product = product try: note = seqfeature.qualifiers["note"][0] except (KeyError, IndexError): note = "" finally: trna_ftr.note = note try: gene = seqfeature.qualifiers["gene"][0] except (KeyError, IndexError): gene = "" finally: trna_ftr.gene = gene trna_ftr.set_name() return trna_ftr
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_tmrna_seqfeature(seqfeature):\n tmrna_ftr = tmrna.Tmrna()\n tmrna_ftr.seqfeature = seqfeature\n\n try:\n locus_tag = seqfeature.qualifiers[\"locus_tag\"][0]\n except (KeyError, IndexError):\n locus_tag = \"\"\n finally:\n tmrna_ftr.set_locus_tag(locus_tag, delimiter=None)\n\n tmrna_ftr.set_orientation(seqfeature.strand, \"fr_short\", True)\n tmrna_ftr.start, tmrna_ftr.stop, tmrna_ftr.parts = parse_coordinates(seqfeature)\n\n # Coordinate format for GenBank flat file features parsed by Biopython\n # are 0-based half open intervals.\n tmrna_ftr.coordinate_format = \"0_half_open\"\n\n tmrna_ftr.set_nucleotide_length(use_seq=True)\n\n try:\n note = seqfeature.qualifiers[\"note\"][0]\n except (KeyError, IndexError):\n note = \"\"\n finally:\n tmrna_ftr.note = note\n\n try:\n gene = seqfeature.qualifiers[\"gene\"][0]\n except (KeyError, IndexError):\n gene = \"\"\n finally:\n tmrna_ftr.gene = gene\n\n tmrna_ftr.set_name()\n return tmrna_ftr", "def make_tRNA_fasta_dict(tRNAdf):\n\n\n\ttRNA_fasta_outdict = OrderedDict()\n\n\tfor i in tRNAdf.index:\n\n\t\tif tRNAdf.loc[i,'feature'] == 'tRNA':\n\t\t\tchrom = tRNAdf.loc[i,'#chrom']\n\t\t\tchrStart = int(tRNAdf.loc[i,'chromStart'])\n\t\t\tchrEnd = int(tRNAdf.loc[i,'chromEnd'])\n\t\t\tstrand = tRNAdf.loc[i,'strand']\n\t\t\t\n\t\t\tif strand == \"+\":\n\t\t\t\tchrStart = chrStart-1 ### gtf files are 1-based, convert to 0-based\n\t\t\t\ttrSeq = SeqIO.Seq(genome[chrom][chrStart:chrEnd])\n\t\t\t\ttrdict = parse_entry(tRNAdf.loc[i,'transcript_id'])\n\t\t\t\n\t\t\telse: # for neg strand\n\t\t\t\tchrStart = chrStart-1\n\t\t\t\ttrSeq = SeqIO.Seq(genome[chrom][chrStart:chrEnd])\n\t\t\t\ttrSeq = trSeq.reverse_complement()\n\t\t\t\ttrdict = parse_entry(tRNAdf.loc[i,'transcript_id'])\n\n\t\t\ttrID = \"tRNA_\"+trdict['gene_id'][0]\n\t\t\tdesc = \"| tRNA | \"+trdict['gene_type'][0] + \" | %s; %s; %s:%s\" % (chrom, strand, chrStart, chrEnd)\n\n\t\t\ttrSeqRec = SeqRecord(trSeq, id=trID, name=trdict['gene_name'][0], description=desc)\n\t\t\ttRNA_fasta_outdict[trID] = trSeqRec\n\t\n\treturn tRNA_fasta_outdict", "def parse_cds_seqfeature(seqfeature):\n cds_ftr = cds.Cds()\n cds_ftr.seqfeature = seqfeature\n\n try:\n locus_tag = seqfeature.qualifiers[\"locus_tag\"][0]\n except:\n locus_tag = \"\"\n finally:\n cds_ftr.set_locus_tag(locus_tag, delimiter=None)\n\n cds_ftr.set_orientation(seqfeature.strand, \"fr_short\", case = True)\n cds_ftr.start, cds_ftr.stop, cds_ftr.parts = parse_coordinates(seqfeature)\n\n # Coordinate format for GenBank flat file features parsed by Biopython\n # are 0-based half open intervals.\n cds_ftr.coordinate_format = \"0_half_open\"\n\n # For translation, convert it to a Biopython Seq object.\n try:\n translation = seqfeature.qualifiers[\"translation\"][0]\n except:\n translation = \"\"\n finally:\n translation = Seq(translation, Alphabet.IUPAC.protein)\n cds_ftr.set_translation(translation)\n\n cds_ftr.set_nucleotide_length(translation=True)\n\n try:\n translation_table = seqfeature.qualifiers[\"transl_table\"][0]\n except:\n translation_table = 0\n finally:\n cds_ftr.set_translation_table(translation_table)\n\n try:\n product = seqfeature.qualifiers[\"product\"][0]\n except:\n product = \"\"\n finally:\n cds_ftr.set_description_field(\"product\", product)\n\n try:\n function = seqfeature.qualifiers[\"function\"][0]\n except:\n function = \"\"\n finally:\n cds_ftr.set_description_field(\"function\", function)\n\n try:\n note = seqfeature.qualifiers[\"note\"][0]\n except:\n note = \"\"\n finally:\n cds_ftr.set_description_field(\"note\", note)\n\n try:\n gene = seqfeature.qualifiers[\"gene\"][0]\n except:\n gene = \"\"\n finally:\n cds_ftr.set_gene(gene)\n\n cds_ftr.set_name()\n return cds_ftr", "def parse_source_seqfeature(seqfeature):\n src_ftr = source.Source()\n src_ftr.seqfeature = seqfeature\n start, stop, parts = parse_coordinates(seqfeature)\n src_ftr.start = start\n src_ftr.stop = stop\n\n try:\n src_ftr.organism = str(seqfeature.qualifiers[\"organism\"][0])\n except:\n src_ftr.organism = \"\"\n\n try:\n src_ftr.host = str(seqfeature.qualifiers[\"host\"][0])\n except:\n src_ftr.host = \"\"\n\n try:\n src_ftr.lab_host = str(seqfeature.qualifiers[\"lab_host\"][0])\n except:\n src_ftr.lab_host = \"\"\n\n src_ftr.parse_organism()\n src_ftr.parse_host()\n src_ftr.parse_lab_host()\n\n return src_ftr", "def GFFParse(gff_file):\n genes, utr5, exons=dict(), dict(), dict()\n transcripts, utr3, cds=dict(), dict(), dict()\n # TODO Include growing key words of different non-coding/coding transcripts \n features=['mrna', 'transcript', 'ncrna', 'mirna', 'pseudogenic_transcript', 'rrna', 'snorna', 'snrna', 'trna', 'scrna', 'mrna_te_gene']\n gff_handle=open(gff_file, \"rU\")\n for gff_line in gff_handle:\n gff_line=gff_line.strip('\\n\\r').split('\\t')\n if re.match(r'#|>', gff_line[0]): # skip commented line or fasta identifier line \n continue\n if len(gff_line)==1: # skip fasta sequence/empty line if present \n continue \n assert len(gff_line)==9, '\\t'.join(gff_line) # not found 9 tab-delimited fields in this line \n if '' in gff_line: # skip this line if there any field with an empty value\n print 'Skipping..', '\\t'.join(gff_line)\n continue\n if gff_line[-1][-1]==';': # trim the last ';' character \n gff_line[-1]=gff_line[-1].strip(';')\n if gff_line[2].lower() in ['gene', 'pseudogene', 'transposable_element_gene']:\n gid, gene_info=None, dict()\n gene_info['start']=int(gff_line[3])\n gene_info['stop']=int(gff_line[4])\n gene_info['chr']=gff_line[0]\n gene_info['source']=gff_line[1]\n gene_info['strand']=gff_line[6]\n for attb in gff_line[-1].split(';'):\n attb=attb.split('=') # gff attributes are separated by key=value pair \n if attb[0]=='ID':\n gid=attb[1]\n break\n genes[(gff_line[0], gid)]=gene_info # store gene information based on the chromosome and gene symbol.\n elif gff_line[2].lower() in features: \n gid, mrna_info=None, dict() \n mrna_info['start']=int(gff_line[3])\n mrna_info['stop']=int(gff_line[4])\n mrna_info['chr']=gff_line[0]\n mrna_info['strand']=gff_line[6]\n mrna_info['type'] = gff_line[2]\n for attb in gff_line[-1].split(';'):\n attb=attb.split('=')\n if attb[0]=='Parent':\n gid=attb[1]\n elif attb[0]=='ID':\n mrna_info[attb[0]]=attb[1]\n for fid in gid.split(','): # child may be mapped to multiple parents ex: Parent=AT01,AT01-1-Protein \n if (gff_line[0], fid) in transcripts:\n transcripts[(gff_line[0], fid)].append(mrna_info)\n else:\n transcripts[(gff_line[0], fid)]=[mrna_info]\n elif gff_line[2].lower() in ['exon', 'pseudogenic_exon']:\n tids, exon_info=None, dict()\n exon_info['start']=int(gff_line[3])\n exon_info['stop']=int(gff_line[4])\n exon_info['chr']=gff_line[0]\n exon_info['strand']=gff_line[6]\n for attb in gff_line[-1].split(';'):\n attb=attb.split('=')\n if attb[0]=='Parent':\n tids=attb[1]\n break\n for tid in tids.split(','):\n if (gff_line[0], tid) in exons:\n exons[(gff_line[0], tid)].append(exon_info)\n else:\n exons[(gff_line[0], tid)]=[exon_info]\n elif gff_line[2].lower() in ['five_prime_utr']:\n utr5_info, tids=dict(), None\n utr5_info['start']=int(gff_line[3])\n utr5_info['stop']=int(gff_line[4])\n utr5_info['chr']=gff_line[0]\n utr5_info['strand']=gff_line[6]\n for attb in gff_line[-1].split(';'):\n attb=attb.split('=')\n if attb[0]=='Parent':\n tids=attb[1]\n break\n for tid in tids.split(','):\n if (gff_line[0], tid) in utr5:\n utr5[(gff_line[0], tid)].append(utr5_info)\n else:\n utr5[(gff_line[0], tid)]=[utr5_info]\n elif gff_line[2].lower() in ['cds']:\n cds_info, tids=dict(), None\n cds_info['start']=int(gff_line[3])\n cds_info['stop']=int(gff_line[4])\n cds_info['chr']=gff_line[0]\n cds_info['strand']=gff_line[6]\n for attb in gff_line[-1].split(';'):\n attb=attb.split('=')\n if attb[0]=='Parent':\n tids=attb[1]\n break\n for tid in tids.split(','):\n if (gff_line[0], tid) in cds:\n cds[(gff_line[0], tid)].append(cds_info)\n else:\n cds[(gff_line[0], tid)]=[cds_info]\n elif gff_line[2].lower() in ['three_prime_utr']:\n utr3_info, tids=dict(), None\n utr3_info['start']=int(gff_line[3])\n utr3_info['stop']=int(gff_line[4])\n utr3_info['chr']=gff_line[0]\n utr3_info['strand']=gff_line[6]\n for attb in gff_line[-1].split(';'):\n attb=attb.split('=')\n if attb[0]=='Parent':\n tids=attb[1]\n break\n for tid in tids.split(','):\n if (gff_line[0], tid) in utr3:\n utr3[(gff_line[0], tid)].append(utr3_info)\n else:\n utr3[(gff_line[0], tid)]=[utr3_info]\n gff_handle.close()\n return genes, transcripts, exons, utr3, utr5, cds", "def parser(raw_seq, date):\n taxon_id = int(raw_seq.features[0].qualifiers['db_xref'][0][6:])\n organism = raw_seq.annotations['organism']\n accession = raw_seq.annotations['accessions'][0]\n gene = []\n records = []\n frag_type = 'whole'\n begin = 1\n end = len(raw_seq)\n sequence = str(raw_seq.seq)\n name = organism\n strand = 1\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n records.append(rec)\n for i in raw_seq.features:\n if i.type == 'gene' and 'gene' in i.qualifiers:\n if i.location_operator != 'join':\n frag_type = 'gene'\n begin = int(i.location.start)\n end = int(i.location.end)\n sequence = str(raw_seq.seq[begin:end])\n name = str(i.qualifiers['gene'][0])\n strand = str(i.location.strand)\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n elif i.location_operator == 'join':\n frag_type = 'gene'\n begin = int(i.sub_features[0].location.start)\n end = int(i.sub_features[0].location.end)\n name = str(i.qualifiers['gene'][0])\n strand = str(i.location.strand)\n sequence = ''\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n gene.append(rec)\n begin = int(i.sub_features[1].location.start)\n end = int(i.sub_features[1].location.end)\n sequence = ''.join([str(raw_seq.seq[begin:end]), str(raw_seq.seq[begin:end])])\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n elif i.type == 'CDS' and 'gene' in i.qualifiers:\n frag_type = 'cds'\n begin = int(i.location.start)\n end = int(i.location.end)\n sequence = str(raw_seq.seq[begin:end])\n name = str(i.qualifiers['gene'][0]).replace(' ', '_')\n strand = str(i.location.strand)\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n elif i.type == 'tRNA' and 'gene' in i.qualifiers:\n frag_type = 'tRNA'\n begin = int(i.location.start)\n end = int(i.location.end)\n sequence = str(raw_seq.seq[begin:end])\n if len(sequence) >= 100:\n sequence = ''\n name = str(i.qualifiers['gene'][0]).replace(' ', '_')\n strand = str(i.location.strand)\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n elif i.type == 'rRNA':\n frag_type = 'rRNA'\n begin = int(i.location.start)\n end = int(i.location.end)\n sequence = str(raw_seq.seq[begin:end])\n name = str(i.qualifiers['product'][0]).replace(' ', '_')\n strand = str(i.location.strand)\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n elif i.type == 'exon' and 'gene' in i.qualifiers:\n frag_type = 'exon'\n begin = int(i.location.start)\n end = int(i.location.end)\n sequence = str(raw_seq.seq[begin:end])\n if 'number' in i.qualifiers:\n name = '{0}_exon_{1}'.format(i.qualifiers['gene'][0],\n i.qualifiers['number'][0])\n else:\n name = '{0}_exon'.format(i.qualifiers['gene'][0])\n strand = int(i.location.strand)\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n elif i.type == 'intron' and 'gene' in i.qualifiers:\n frag_type = 'intron'\n begin = int(i.location.start)\n end = int(i.location.end)\n sequence = str(raw_seq.seq[begin:end])\n strand = str(i.location.strand)\n if 'number' in i.qualifiers:\n name = '{0}_{1}_intron'.format(i.qualifiers['gene'][0],\n i.qualifiers['number'][0])\n else:\n name = '{0}_intron'.format(i.qualifiers['gene'][0])\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n\n records.append(rec)\n gene.sort(key=lambda x: x[5])\n\n for i in range(len(gene) - 1):\n frag_type = 'spacer'\n now = gene[i]\n then = gene[i + 1]\n tail = now[6] + 1\n head = then[5] - 1\n sequence = str(raw_seq.seq[tail:head])\n name = '{0}-{1}_spacer'.format(now[3], then[3])\n strand = 0\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n records.append(rec)\n records.extend(gene)\n\n database.extend(records)", "def parse_rttm_multiscale(self, sample):\n if sample.rttm_file is None:\n raise ValueError(f\"RTTM file is not provided for this sample {sample}\")\n rttm_lines = open(sample.rttm_file).readlines()\n uniq_id = os.path.splitext(os.path.basename(sample.rttm_file))[0]\n mapping_dict = self.emb_dict[max(self.emb_dict.keys())][uniq_id]['mapping']\n rttm_timestamps = extract_seg_info_from_rttm(uniq_id, rttm_lines, mapping_dict, sample.target_spks)\n fr_level_target = assign_frame_level_spk_vector(\n rttm_timestamps, self.round_digits, self.frame_per_sec, sample.target_spks\n )\n seg_target = self.get_diar_target_labels_from_fr_target(uniq_id, fr_level_target)\n return seg_target", "def parse(tx_meta):\n return TranscriptIDInfo(*tx_meta.split('|'))", "def to_seqfeature(feature):\n if isinstance(feature, six.string_types):\n feature = feature_from_line(feature)\n\n qualifiers = {\n \"source\": [feature.source],\n \"score\": [feature.score],\n \"seqid\": [feature.seqid],\n \"frame\": [feature.frame],\n }\n qualifiers.update(feature.attributes)\n return SeqFeature(\n # Convert from GFF 1-based to standard Python 0-based indexing used by\n # BioPython\n FeatureLocation(feature.start - 1, feature.stop),\n id=feature.id,\n type=feature.featuretype,\n strand=_biopython_strand[feature.strand],\n qualifiers=qualifiers,\n )", "def _handle_feature(fea):\n if len(fea.shape) == 1:\n fea = np.array([fea]).T\n\n return fea", "def _trna_annotation(data):\n trna_ref = op.join(dd.get_srna_trna_file(data))\n name = dd.get_sample_name(data)\n work_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), \"trna\", name))\n in_file = op.basename(data[\"clean_fastq\"])\n tdrmapper = os.path.join(os.path.dirname(sys.executable), \"TdrMappingScripts.pl\")\n perl_export = utils.get_perl_exports()\n if not file_exists(trna_ref) or not file_exists(tdrmapper):\n logger.info(\"There is no tRNA annotation to run TdrMapper.\")\n return work_dir\n out_file = op.join(work_dir, in_file + \".hq_cs.mapped\")\n if not file_exists(out_file):\n with tx_tmpdir(data) as txdir:\n with utils.chdir(txdir):\n utils.symlink_plus(data[\"clean_fastq\"], op.join(txdir, in_file))\n cmd = (\"{perl_export} && perl {tdrmapper} {trna_ref} {in_file}\").format(**locals())\n do.run(cmd, \"tRNA for %s\" % name)\n for filename in glob.glob(\"*mapped*\"):\n shutil.move(filename, work_dir)\n return work_dir", "def convert_tcr(self):\n\n def read_text(file_name, event_a_id, event_b_id):\n idx_val = {\"span1\": [], \"span2\": [], \"signal\": []}\n parsed_doc = minidom.parse(self.dir_path + \"tcr/TemporalPart/{}\".format(file_name))\n elements = parsed_doc.getElementsByTagName('TEXT')\n text = \"\"\n token_index = 0\n tagxid = {\"EVENT\": \"eid\", \"TIMEX3\": \"tid\"}\n for element in elements:\n if element.tagName == \"TEXT\":\n for item in element.childNodes:\n if item.nodeName == \"#text\":\n text += item.wholeText\n token_index += len(item.wholeText)\n elif item.nodeName == \"EVENT\" or item.nodeName == \"TIMEX3\":\n item_text = ' '.join([child_node.wholeText for child_node in item.childNodes])\n text += item_text\n start_end = [token_index, token_index + len(item_text)]\n token_index += len(item_text)\n\n if item.attributes[tagxid[item.nodeName]].value == event_a_id:\n idx_val[\"span1\"].append(start_end)\n event_a_text = item_text\n elif item.attributes[tagxid[item.nodeName]].value == event_b_id:\n idx_val[\"span2\"].append(start_end)\n event_b_text = item_text\n return text, idx_val, [event_a_text, event_b_text]\n\n mismatch = 0\n data = pd.DataFrame(columns=self.scheme_columns)\n\n test_files = [\"2010.01.08.facebook.bra.color\", \"2010.01.12.haiti.earthquake\", \"2010.01.12.turkey.israel\",\n \"2010.01.13.google.china.exit\", \"2010.01.13.mexico.human.traffic.drug\"]\n\n with open(self.dir_path + \"tcr/CausalPart/allClinks.txt\", 'r') as in_file:\n lines = in_file.readlines()\n\n annotations = [line.strip().split('\\t') for line in lines]\n\n for annotation in annotations:\n file_path = annotation[0] + \".tml\"\n text, idx_val, events_text = read_text(file_path, annotation[1], annotation[2])\n direction = 1 if annotation[3] == \"caused_by\" else 0\n\n split = 2 if annotation[0] in test_files else 1\n\n # saving the sample\n new_row = {\"original_id\": '', \"span1\": [events_text[0]], \"span2\": [events_text[1]], \"signal\": [],\n \"context\": text,\n \"idx\": idx_val, \"label\": 1, \"direction\": direction,\n \"source\": self.namexid[\"tcr\"],\n \"ann_file\": file_path,\n \"split\": split}\n\n if self.check_span_indexes(new_row):\n data = data.append(new_row, ignore_index=True)\n else:\n mismatch += 1\n return data, mismatch", "def _parse_tx_infos(self, gtf_path):\n if os.path.exists('_tx_cache.bin'):\n with open('_tx_cache.bin', 'rb') as f:\n return pickle.load(f)\n result = []\n with gzip.open(gtf_path, 'rt') as f:\n for i, line in enumerate(f):\n if i % 1000 == 0:\n print('processed {}'.format(i), file=sys.stderr)\n if line.startswith('#'):\n continue\n if line.split('\\t', 3)[2] != 'transcript':\n continue\n record = GTFFeature.parse(line)\n if record.feature != 'transcript':\n continue\n result.append(\n TranscriptInfo(record.attrs['gene_id'],\n record.attrs['transcript_id'],\n record.attrs['transcript_type'],\n record.seqname,\n record.start,\n record.end))\n with open('_tx_cache.bin', 'wb') as g:\n pickle.dump(result, g)\n print(len(result), file=sys.stderr)\n return result", "def test_parse_trflp(self):\r\n\r\n data = \\\r\n \"\"\"\tBin (10bp)\tBin (20bp)\tBin (30bp)\tBin (40 bp)\r\nSamp-le 1\t1000\t2000\t3000\t4000\r\nSample 2\t\t2000\t3000\t4000\r\nSample 3\t\t\t3000\t4000\r\nSample 4\t\t\t\t4000\r\nSample 5\t25\t\t\t\"\"\"\r\n samples, otus, data = parse_trflp(data.split('\\n'))\r\n\r\n samples_exp = [\r\n 'Samp.le.1',\r\n 'Sample.2',\r\n 'Sample.3',\r\n 'Sample.4',\r\n 'Sample.5']\r\n otus_exp = ['Bin__10bp_', 'Bin__20bp_', 'Bin__30bp_', 'Bin__40_bp_']\r\n data_exp = array([[1000, 0, 0, 0, 25],\r\n [2000, 2000, 0, 0, 0],\r\n [3000, 3000, 3000, 0, 0],\r\n [4000, 4000, 4000, 4000, 0]])\r\n\r\n self.assertEqual(samples, samples_exp)\r\n self.assertEqual(otus, otus_exp)\r\n assert_almost_equal(data, data_exp)", "def read(self, featureType='miRNA_primary_transcript'):\n logger.info('Reading %s' % self.fileName)\n self.fileHandle = open(self.fileName, 'r+b')\n bytePosition = self.fileHandle.tell()\n for line in self.fileHandle:\n row = line.decode('utf-8').rstrip().split(\"\\t\")\n if not row[0].startswith(\"#\") and row[2] == featureType:\n attributes = row[-1].split(\";\")\n for attribute in attributes:\n if attribute.startswith('Name'):\n mirbase_name = attribute.split(\"=\")[-1]\n self.features[mirbase_name] = bytePosition\n bytePosition = self.fileHandle.tell()\n self.fileHandle.close()\n logger.debug('Reading %s finished' % self.fileName)", "def parseFasta(self, fastaRef):\n\n seq = \"\"\n prevId = \"\"\n with open(fastaRef, 'r') as f:\n\n for line in f:\n if \">\" == line[0]:\n # asserting the regex don't fail...\n found = GENEIDRULE.search(line)\n if(found):\n alternate = found.group(1)\n geneName = found.group(2)\n self._transcripts[alternate] = geneName\n else:\n print(\"EnsemblFasta: NOT FOUND\")\n print(line)\n exit()\n\n if(prevId and seq):\n geneName = self._transcripts[prevId]\n if geneName in self._genes:\n gene = self._genes[geneName]\n else:\n gene = Gene(geneName)\n self._genes[geneName] = gene\n\n gene.addTranscripts(prevId, seq)\n seq = \"\"\n prevId = alternate\n else:\n seq += line.rstrip(\"\\n\")\n gene.addTranscripts(prevId, seq)", "def _transform_feature(self, contig, in_feature):\n def _aliases(feat):\n keys = ('locus_tag', 'old_locus_tag', 'protein_id',\n 'transcript_id', 'gene', 'ec_number', 'gene_synonym')\n alias_list = []\n for key in keys:\n if key in feat['attributes']:\n alias_list.extend([(key, val) for val in feat['attributes'][key]])\n return alias_list\n\n if in_feature['start'] < 1 or in_feature['end'] > len(contig):\n self.warn(f\"Feature with invalid location for specified contig: {in_feature}\")\n if self.strict:\n raise ValueError(\"Features must be completely contained within the Contig in the \"\n f\"Fasta file. Feature: in_feature\")\n return\n\n feat_seq = contig.seq[in_feature['start']-1:in_feature['end']].upper()\n if in_feature['strand'] in {'-', '-1'}:\n feat_seq = feat_seq.reverse_complement()\n\n # if the feature ID is duplicated (CDS or transpliced gene) we only\n # need to update the location and dna_sequence\n if in_feature.get('ID') in self.feature_dict:\n existing = self.feature_dict[in_feature['ID']]\n existing['location'].append(self._location(in_feature))\n existing['dna_sequence'] = existing.get('dna_sequence', '') + str(feat_seq)\n existing['dna_sequence_length'] = len(existing['dna_sequence'])\n return\n\n # The following is common to all the feature types\n out_feat = {\n \"id\": in_feature.get('ID'),\n \"type\": in_feature['type'],\n \"location\": [self._location(in_feature)],\n \"dna_sequence\": str(feat_seq),\n \"dna_sequence_length\": len(feat_seq),\n \"md5\": hashlib.md5(str(feat_seq).encode('utf8')).hexdigest(),\n \"warnings\": [],\n \"flags\": [],\n }\n\n # add optional fields\n if 'note' in in_feature['attributes']:\n out_feat['note'] = in_feature['attributes'][\"note\"][0]\n ont, db_xrefs = self._get_ontology_db_xrefs(in_feature['attributes'])\n if ont:\n out_feat['ontology_terms'] = ont\n aliases = _aliases(in_feature)\n if aliases:\n out_feat['aliases'] = aliases\n if db_xrefs:\n out_feat['db_xrefs'] = db_xrefs\n if 'product' in in_feature['attributes']:\n out_feat['functions'] = in_feature['attributes'][\"product\"]\n if 'product_name' in in_feature['attributes']:\n if \"functions\" in out_feat:\n out_feat['functions'].extend(in_feature['attributes'][\"product_name\"])\n else:\n out_feat['functions'] = in_feature['attributes'][\"product_name\"]\n if 'function' in in_feature['attributes']:\n out_feat['functional_descriptions'] = in_feature['attributes'][\"function\"]\n if 'inference' in in_feature['attributes']:\n GenomeUtils.parse_inferences(in_feature['attributes']['inference'])\n if 'trans-splicing' in in_feature['attributes'].get('exception', []):\n out_feat['flags'].append('trans_splicing')\n if 'pseudo' in in_feature['attributes'].get('exception', []):\n out_feat['flags'].append('pseudo')\n if 'ribosomal-slippage' in in_feature['attributes'].get('exception', []):\n out_feat['flags'].append('ribosomal_slippage')\n parent_id = in_feature.get('Parent', '')\n if parent_id and parent_id not in self.feature_dict:\n raise ValueError(f\"Parent ID: {parent_id} was not found in feature ID list.\")\n\n # if the feature is a exon or UTR, it will only be used to update the\n # location and sequence of it's parent, we add the info to it parent\n # feature but not the feature dict\n if in_feature['type'] in self.skip_types:\n if parent_id and in_feature['type'] in {'exon', 'five_prime_UTR', 'three_prime_UTR'}:\n parent = self.feature_dict[parent_id]\n if in_feature['type'] not in parent:\n parent[in_feature['type']] = []\n parent[in_feature['type']].append(out_feat)\n return\n\n # add type specific features\n elif 'gene' in in_feature['type']:\n out_feat['protein_translation_length'] = 0\n out_feat['cdss'] = []\n\n elif in_feature['type'] == 'CDS':\n if parent_id:\n parent = self.feature_dict[parent_id]\n if 'cdss' in parent: # parent must be a gene\n if not is_parent(parent, out_feat):\n parent[\"warnings\"] = parent.get('warnings', []) + [\n warnings[\"genes_CDS_child_fails_location_validation\"].format(out_feat[\"id\"])]\n out_feat[\"warnings\"].append(\n warnings[\"CDS_fail_child_of_gene_coordinate_validation\"].format(parent_id))\n parent['cdss'].append(in_feature['ID'])\n out_feat['parent_gene'] = parent_id\n else: # parent must be mRNA\n if not is_parent(parent, out_feat):\n parent[\"warnings\"] = parent.get('warnings', []) + [\n warnings[\"mRNA_fail_parent_coordinate_validation\"].format(out_feat[\"id\"])]\n out_feat[\"warnings\"].append(\n warnings[\"CDS_fail_child_of_mRNA_coordinate_validation\"].format(parent_id))\n parent['cds'] = in_feature['ID']\n out_feat['parent_mrna'] = parent_id\n parent_gene = self.feature_dict[parent['parent_gene']]\n parent_gene['cdss'].append(in_feature['ID'])\n out_feat['parent_gene'] = parent['parent_gene']\n # keep track of CDSs for post processing\n self.cdss.add(out_feat['id'])\n\n elif in_feature['type'] == 'mRNA':\n if parent_id:\n parent = self.feature_dict[parent_id]\n if 'mrnas' not in parent:\n parent['mrnas'] = []\n if 'cdss' in parent: # parent must be a gene\n parent['mrnas'].append(in_feature['ID'])\n out_feat['parent_gene'] = parent_id\n if not is_parent(parent, out_feat):\n parent[\"warnings\"] = parent.get('warnings', []) + [\n warnings[\"genes_mRNA_child_fails_location_validation\"].format(out_feat[\"id\"])]\n out_feat[\"warnings\"].append(\n warnings[\"mRNAs_parent_gene_fails_location_validation\"].format(parent_id))\n\n else:\n out_feat[\"type\"] = in_feature['type']\n # this prevents big misc_features from blowing up the genome size\n if out_feat['dna_sequence_length'] > MAX_MISC_FEATURE_SIZE:\n del out_feat['dna_sequence']\n if parent_id:\n parent = self.feature_dict[parent_id]\n if 'children' not in parent:\n parent['children'] = []\n parent['children'].append(out_feat['id'])\n out_feat['parent_gene'] = parent_id\n if not is_parent(parent, out_feat):\n parent[\"warnings\"] = parent.get('warnings', []) + [\n warnings[\"generic_parents_child_fails_location_validation\"].format(out_feat[\"id\"])]\n out_feat[\"warnings\"].append(\n warnings[\"generic_childs_parent_fails_location_validation\"].format(parent_id))\n\n # cleanup empty optional arrays\n for key in ['warnings', 'flags']:\n if not out_feat[key]:\n del out_feat[key]\n\n self.feature_dict[out_feat['id']] = out_feat", "def parse_transcript(gene_obj, tx_obj, build=None):\n build = build or 37\n add_tx_links(tx_obj, build)\n\n if tx_obj.get('refseq_id'):\n gene_name = (gene_obj['common']['hgnc_symbol'] if gene_obj['common'] else\n gene_obj['hgnc_id'])\n tx_obj['change_str'] = transcript_str(tx_obj, gene_name)", "def toLingDataToken(token):\n\n t = Token()\n\n t.set(\n id=token.i,\n word=token.orth_,\n lemma=token.lemma_,\n POS=token.tag_,\n SPOS=token.pos_,\n depID=token.dep,\n depStr=token.dep_,\n NE=token.ent_type_,\n foreign=token.is_oov\n )\n\n # setting features\n '''\n t.features = {}\n #print(t.POS)\n featureStr = translate(t.POS)\n # save string form of feature translation\n t.features['str'] = featureStr\n\n featureArr = featureStr.split(\"+\")\n #print(featureArr)\n # find the first feature\n i = 0\n while len(featureArr[i]) < 1:\n i += 1\n\n t.features['type'] = featureArr[i]\n if t.features['type'] in [\"N\"]:\n # look for number\n i += 1\n while i < len(featureArr):\n # this means it's probably a number declaration\n if len(featureArr[i]) < 4:\n t.features['number'] = featureArr[i]\n # and next feature could be type of noun\n if i + 1 < len(featureArr):\n t.features['isProper'] = featureArr[i + 1]\n break\n i += 1\n\n if t.features['type'] in [\"V\"]:\n # look for person and number\n i += 1\n while i < len(featureArr):\n # this means it's probably a person declaration\n if len(featureArr[i]) < 4:\n t.features['person'] = featureArr[i]\n # and next feature could be number\n if i + 1 < len(featureArr):\n t.features['number'] = featureArr[i + 1]\n break\n else:\n # probably a tense\n t.features['tense'] = featureArr[i]\n t.features['isParticiple'] = (\"Part\" in featureArr[i])\n\n i += 1\n #print(t.features)\n '''\n\n # setting wordType\n if token.tag_ == \"BES\": # copula\n t.set(wordType=4)\n elif token.pos_ == \"VERB\":\n t.set(wordType=1)\n elif token.pos_ == \"NOUN\" or token.pos_ == \"PROPN\":\n t.set(wordType=2)\n elif token.pos_ == \"PRON\":\n t.set(wordType=3)\n else:\n t.set(wordType=5)\n\n # spaCy does not have coreferencing...\n\n return t", "def parse_transcripts(transcript_lines):\n LOG.info(\"Parsing transcripts\")\n transcripts = parse_ensembl_transcripts(transcript_lines)\n\n # Since there can be multiple lines with information about the same transcript\n # we store transcript information in a dictionary for now\n parsed_transcripts = {}\n # Loop over the parsed transcripts\n for tx in transcripts:\n tx_id = tx[\"ensembl_transcript_id\"]\n ens_gene_id = tx[\"ensembl_gene_id\"]\n\n # Check if the transcript has been added\n # If not, create a new transcript\n if not tx_id in parsed_transcripts:\n tx_info = {\n \"chrom\": tx[\"chrom\"],\n \"transcript_start\": tx[\"transcript_start\"],\n \"transcript_end\": tx[\"transcript_end\"],\n \"mrna\": set(),\n \"mrna_predicted\": set(),\n \"nc_rna\": set(),\n \"ensembl_gene_id\": ens_gene_id,\n \"ensembl_transcript_id\": tx_id,\n }\n parsed_transcripts[tx_id] = tx_info\n\n tx_info = parsed_transcripts[tx_id]\n # Add the ref seq information\n if tx.get(\"refseq_mrna_predicted\"):\n tx_info[\"mrna_predicted\"].add(tx[\"refseq_mrna_predicted\"])\n if tx.get(\"refseq_mrna\"):\n tx_info[\"mrna\"].add(tx[\"refseq_mrna\"])\n if tx.get(\"refseq_ncrna\"):\n tx_info[\"nc_rna\"].add(tx[\"refseq_ncrna\"])\n\n return parsed_transcripts", "def _parse_record(line):\n fields = line.split()\n if len(fields) < 6:\n raise ParseError(\"Less than six fields found in PED/FAM file\")\n individual_id, paternal_id, maternal_id = fields[1:4]\n if paternal_id == \"0\":\n paternal_id = None\n if maternal_id == \"0\":\n maternal_id = None\n return Trio(child=individual_id, father=paternal_id, mother=maternal_id)", "def read_fasta_file(fasta):\n\n ptn_list = []\n fasta_content = open(fasta, \"r\")\n new_ptn = None\n for line in fasta_content:\n if \">sp\" in line or \">tr\" in line:\n if new_ptn != None:\n new_ptn[\"seq\"] = sequence\n ptn_list.append(new_ptn)\n tokens = line.split()\n new_ptn = {\"id\": tokens[0] }\n sequence = \"\"\n else:\n sequence += line[:-1]\n new_ptn[\"seq\"] = sequence\n ptn_list.append(new_ptn)\n\n return ptn_list", "def read_transcript_data(fn):\n\n def _read_lines(fn):\n # NC_000007.13\tRefSeq\tcDNA_match\t50344265\t50344518\t254\t+\t.\tID=aln58042;Target=NM_001220765.2 1 254 +;gap_count=0;identity=0.0691326;idty=1;num_ident=428;num_mismatch=0;pct_coverage=6.91326;pct_identity_gap=100;pct_identity_ungap=100;score=254\n # NC_000002.11 RefSeq cDNA_match 179671939 179672150 212 - . ID=ed951d46-194c-477a-a480-4bc64530c5ba;Target=NM_001267550.2 1 212 +;gap_count=0;identity=0.999991;idty=1;num_ident=109223;num_mismatch=1;pct_coverage=100;pct_identity_gap=99.9991;pct_identity_ungap=99.9991\n line_re = re.compile(\n \"(?P<ref_ac>\\S+)\\s+(?P<origin>\\S+)\\s+(?P<match_type>\\S+)\\s+\"\n \"(?P<g_start>\\d+)\\s+(?P<g_end>\\d+)\\s+(?P<score>\\S+)\\s+\"\n \"(?P<strand>[-+])\\s+\\.\\s+ID=(?P<aln>[^;]+);Target=(?P<tx_ac>\\S+)\"\n \"\\s+(?P<tx_start>\\d+)\\s+(?P<tx_end>\\d+).+?\"\n \"pct_coverage=(?P<pct_coverage>[^;]+);\"\n \"pct_identity_gap=(?P<pct_identity_gap>[^;]+);\"\n \"pct_identity_ungap=(?P<pct_identity_ungap>[^;]+)\"\n )\n fh = io.open(fn, \"rb\")\n while fh.peek(1)[0] == \"#\":\n fh.readline()\n while fh.peek(3)[0:3] != \"###\":\n line = fh.readline()\n try:\n yield line_re.match(line).groupdict()\n except AttributeError:\n raise Exception(\"Failed at\", line)\n raise StopIteration\n def _key(e):\n return (e[\"tx_ac\"], not e[\"ref_ac\"].startswith(\"NC_\"), e[\"ref_ac\"], e[\"aln\"])\n return itertools.groupby(sorted(_read_lines(fn), key=_key), key=_key)", "def parse(self, handle, do_features=...): # -> SeqRecord | None:\n ...", "def load_tgas():\n tgas = GaiaData(gload.tgas())\n return tgas", "def _read_gtf(gtf):\n if not gtf:\n return gtf\n db = defaultdict(list)\n with open(gtf) as in_handle:\n for line in in_handle:\n if line.startswith(\"#\"):\n continue\n cols = line.strip().split(\"\\t\")\n name = [n.split(\"=\")[1] for n in cols[-1].split(\";\") if n.startswith(\"Name\")]\n chrom, start, end, strand = cols[0], cols[3], cols[4], cols[6]\n if cols[2] == \"miRNA_primary_transcript\":\n db[name[0]].append([chrom, int(start), int(end), strand])\n return db", "def parse(record):\n\n #Extract individual parts of the FASTA record\n\n identifier = record.id #The sequence's Id\n sequence = record.seq #The sequence itself\n sequence = sequence.upper() #Turns all the nucleotides to upper case\n\n return identifier, sequence", "def get_tica_trajs(self):\n # Assume order of steps in model\n # Then I try to check as best as I know that it's correct\n featurizer = retrieve_feat(self.model)\n scaler = retrieve_scaler(self.model)\n decomposer = retrieve_decomposer(self.model)\n\n logger.info('Featurizing traj_dict')\n ftrajs = get_ftrajs(self.traj_dict, featurizer)\n\n logger.info('Scaling ftrajs')\n sctrajs = get_sctrajs(ftrajs, scaler)\n\n logger.info('Getting output of tICA')\n ttrajs = get_ttrajs(sctrajs, decomposer)\n\n return ttrajs", "def parse(self,gff3_line):\r\n split_line = gff3_line.strip().split('\\t')\r\n self.seqid = split_line[0]\r\n self.source = split_line[1]\r\n self.type = split_line[2]\r\n self.start = int(split_line[3])\r\n self.end = int(split_line[4])\r\n self.score = split_line[5]\r\n self.strand = split_line[6]\r\n self.phase = split_line[7]\r\n self.attributes.parse(split_line[8])\r\n return self", "def read_gtf_file(gtf_file):\n genes = {}\n transcripts = {}\n exons = {}\n\n with open(gtf_file) as gtf:\n for line in gtf:\n line = line.strip()\n\n # Ignore header\n if line.startswith(\"#\"):\n continue\n\n # Split into constitutive fields on tab\n tab_fields = line.split(\"\\t\")\n chrom = tab_fields[0]\n entry_type = tab_fields[2]\n\n # Entry is a gene\n if entry_type == \"gene\":\n gene = Gene.get_gene_from_gtf(tab_fields)\n native_id = gene.identifier\n genes[native_id] = gene\n\n # Entry is a transcript\n elif entry_type == \"transcript\":\n transcript = Transcript.get_transcript_from_gtf(tab_fields)\n gene_id = transcript.gene_id\n if gene_id in genes:\n genes[gene_id].add_transcript(transcript)\n native_id = transcript.identifier\n transcripts[native_id] = transcript\n \n # Entry is an edge\n elif entry_type == \"exon\":\n exon = Edge.create_edge_from_gtf(tab_fields)\n # This ID is used because of a rare GENCODE bug\n location_exon_id = exon.identifier\n exons[location_exon_id] = exon \n\n transcript_id = list(exon.transcript_ids)[0]\n gene_id = exon.annotations[\"gene_id\"]\n \n if location_exon_id not in exons:\n # Add the new edge to the data structure\n exons[location_exon_id] = exon\n else:\n # Update existing exon entry, including its transcript set\n exon = exons[location_exon_id]\n exon.transcript_ids.add(transcript_id)\n \n if transcript_id in transcripts: \n currTranscript = transcripts[transcript_id]\n currTranscript.add_exon(exon)\n\n return genes, transcripts, exons" ]
[ "0.7088515", "0.5894162", "0.54035914", "0.53238535", "0.5313058", "0.53036654", "0.528235", "0.52794975", "0.5255988", "0.52157354", "0.5066412", "0.50403583", "0.5033416", "0.501589", "0.5011236", "0.50101614", "0.50068706", "0.5001515", "0.49953058", "0.49833107", "0.49733436", "0.49542463", "0.49428594", "0.49414036", "0.49196687", "0.4912215", "0.48937404", "0.4888606", "0.48806414", "0.48558718" ]
0.75705236
0
Parses data from a BioPython tmRNA SeqFeature object into a Tmrna object.
def parse_tmrna_seqfeature(seqfeature): tmrna_ftr = tmrna.Tmrna() tmrna_ftr.seqfeature = seqfeature try: locus_tag = seqfeature.qualifiers["locus_tag"][0] except (KeyError, IndexError): locus_tag = "" finally: tmrna_ftr.set_locus_tag(locus_tag, delimiter=None) tmrna_ftr.set_orientation(seqfeature.strand, "fr_short", True) tmrna_ftr.start, tmrna_ftr.stop, tmrna_ftr.parts = parse_coordinates(seqfeature) # Coordinate format for GenBank flat file features parsed by Biopython # are 0-based half open intervals. tmrna_ftr.coordinate_format = "0_half_open" tmrna_ftr.set_nucleotide_length(use_seq=True) try: note = seqfeature.qualifiers["note"][0] except (KeyError, IndexError): note = "" finally: tmrna_ftr.note = note try: gene = seqfeature.qualifiers["gene"][0] except (KeyError, IndexError): gene = "" finally: tmrna_ftr.gene = gene tmrna_ftr.set_name() return tmrna_ftr
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_trna_seqfeature(seqfeature):\n trna_ftr = trna.Trna()\n trna_ftr.seqfeature = seqfeature\n\n try:\n locus_tag = seqfeature.qualifiers[\"locus_tag\"][0]\n except (KeyError, IndexError):\n locus_tag = \"\"\n finally:\n trna_ftr.set_locus_tag(locus_tag, delimiter=None)\n\n trna_ftr.set_orientation(seqfeature.strand, \"fr_short\", True)\n trna_ftr.start, trna_ftr.stop, trna_ftr.parts = parse_coordinates(seqfeature)\n\n # Coordinate format for GenBank flat file features parsed by Biopython\n # are 0-based half open intervals.\n trna_ftr.coordinate_format = \"0_half_open\"\n\n trna_ftr.set_nucleotide_length(use_seq=True)\n\n try:\n product = seqfeature.qualifiers[\"product\"][0]\n except (KeyError, IndexError):\n product = \"\"\n finally:\n trna_ftr.product = product\n\n try:\n note = seqfeature.qualifiers[\"note\"][0]\n except (KeyError, IndexError):\n note = \"\"\n finally:\n trna_ftr.note = note\n\n try:\n gene = seqfeature.qualifiers[\"gene\"][0]\n except (KeyError, IndexError):\n gene = \"\"\n finally:\n trna_ftr.gene = gene\n\n trna_ftr.set_name()\n return trna_ftr", "def make_tRNA_fasta_dict(tRNAdf):\n\n\n\ttRNA_fasta_outdict = OrderedDict()\n\n\tfor i in tRNAdf.index:\n\n\t\tif tRNAdf.loc[i,'feature'] == 'tRNA':\n\t\t\tchrom = tRNAdf.loc[i,'#chrom']\n\t\t\tchrStart = int(tRNAdf.loc[i,'chromStart'])\n\t\t\tchrEnd = int(tRNAdf.loc[i,'chromEnd'])\n\t\t\tstrand = tRNAdf.loc[i,'strand']\n\t\t\t\n\t\t\tif strand == \"+\":\n\t\t\t\tchrStart = chrStart-1 ### gtf files are 1-based, convert to 0-based\n\t\t\t\ttrSeq = SeqIO.Seq(genome[chrom][chrStart:chrEnd])\n\t\t\t\ttrdict = parse_entry(tRNAdf.loc[i,'transcript_id'])\n\t\t\t\n\t\t\telse: # for neg strand\n\t\t\t\tchrStart = chrStart-1\n\t\t\t\ttrSeq = SeqIO.Seq(genome[chrom][chrStart:chrEnd])\n\t\t\t\ttrSeq = trSeq.reverse_complement()\n\t\t\t\ttrdict = parse_entry(tRNAdf.loc[i,'transcript_id'])\n\n\t\t\ttrID = \"tRNA_\"+trdict['gene_id'][0]\n\t\t\tdesc = \"| tRNA | \"+trdict['gene_type'][0] + \" | %s; %s; %s:%s\" % (chrom, strand, chrStart, chrEnd)\n\n\t\t\ttrSeqRec = SeqRecord(trSeq, id=trID, name=trdict['gene_name'][0], description=desc)\n\t\t\ttRNA_fasta_outdict[trID] = trSeqRec\n\t\n\treturn tRNA_fasta_outdict", "def _trna_annotation(data):\n trna_ref = op.join(dd.get_srna_trna_file(data))\n name = dd.get_sample_name(data)\n work_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), \"trna\", name))\n in_file = op.basename(data[\"clean_fastq\"])\n tdrmapper = os.path.join(os.path.dirname(sys.executable), \"TdrMappingScripts.pl\")\n perl_export = utils.get_perl_exports()\n if not file_exists(trna_ref) or not file_exists(tdrmapper):\n logger.info(\"There is no tRNA annotation to run TdrMapper.\")\n return work_dir\n out_file = op.join(work_dir, in_file + \".hq_cs.mapped\")\n if not file_exists(out_file):\n with tx_tmpdir(data) as txdir:\n with utils.chdir(txdir):\n utils.symlink_plus(data[\"clean_fastq\"], op.join(txdir, in_file))\n cmd = (\"{perl_export} && perl {tdrmapper} {trna_ref} {in_file}\").format(**locals())\n do.run(cmd, \"tRNA for %s\" % name)\n for filename in glob.glob(\"*mapped*\"):\n shutil.move(filename, work_dir)\n return work_dir", "def parser(raw_seq, date):\n taxon_id = int(raw_seq.features[0].qualifiers['db_xref'][0][6:])\n organism = raw_seq.annotations['organism']\n accession = raw_seq.annotations['accessions'][0]\n gene = []\n records = []\n frag_type = 'whole'\n begin = 1\n end = len(raw_seq)\n sequence = str(raw_seq.seq)\n name = organism\n strand = 1\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n records.append(rec)\n for i in raw_seq.features:\n if i.type == 'gene' and 'gene' in i.qualifiers:\n if i.location_operator != 'join':\n frag_type = 'gene'\n begin = int(i.location.start)\n end = int(i.location.end)\n sequence = str(raw_seq.seq[begin:end])\n name = str(i.qualifiers['gene'][0])\n strand = str(i.location.strand)\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n elif i.location_operator == 'join':\n frag_type = 'gene'\n begin = int(i.sub_features[0].location.start)\n end = int(i.sub_features[0].location.end)\n name = str(i.qualifiers['gene'][0])\n strand = str(i.location.strand)\n sequence = ''\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n gene.append(rec)\n begin = int(i.sub_features[1].location.start)\n end = int(i.sub_features[1].location.end)\n sequence = ''.join([str(raw_seq.seq[begin:end]), str(raw_seq.seq[begin:end])])\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n elif i.type == 'CDS' and 'gene' in i.qualifiers:\n frag_type = 'cds'\n begin = int(i.location.start)\n end = int(i.location.end)\n sequence = str(raw_seq.seq[begin:end])\n name = str(i.qualifiers['gene'][0]).replace(' ', '_')\n strand = str(i.location.strand)\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n elif i.type == 'tRNA' and 'gene' in i.qualifiers:\n frag_type = 'tRNA'\n begin = int(i.location.start)\n end = int(i.location.end)\n sequence = str(raw_seq.seq[begin:end])\n if len(sequence) >= 100:\n sequence = ''\n name = str(i.qualifiers['gene'][0]).replace(' ', '_')\n strand = str(i.location.strand)\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n elif i.type == 'rRNA':\n frag_type = 'rRNA'\n begin = int(i.location.start)\n end = int(i.location.end)\n sequence = str(raw_seq.seq[begin:end])\n name = str(i.qualifiers['product'][0]).replace(' ', '_')\n strand = str(i.location.strand)\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n elif i.type == 'exon' and 'gene' in i.qualifiers:\n frag_type = 'exon'\n begin = int(i.location.start)\n end = int(i.location.end)\n sequence = str(raw_seq.seq[begin:end])\n if 'number' in i.qualifiers:\n name = '{0}_exon_{1}'.format(i.qualifiers['gene'][0],\n i.qualifiers['number'][0])\n else:\n name = '{0}_exon'.format(i.qualifiers['gene'][0])\n strand = int(i.location.strand)\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n elif i.type == 'intron' and 'gene' in i.qualifiers:\n frag_type = 'intron'\n begin = int(i.location.start)\n end = int(i.location.end)\n sequence = str(raw_seq.seq[begin:end])\n strand = str(i.location.strand)\n if 'number' in i.qualifiers:\n name = '{0}_{1}_intron'.format(i.qualifiers['gene'][0],\n i.qualifiers['number'][0])\n else:\n name = '{0}_intron'.format(i.qualifiers['gene'][0])\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n\n records.append(rec)\n gene.sort(key=lambda x: x[5])\n\n for i in range(len(gene) - 1):\n frag_type = 'spacer'\n now = gene[i]\n then = gene[i + 1]\n tail = now[6] + 1\n head = then[5] - 1\n sequence = str(raw_seq.seq[tail:head])\n name = '{0}-{1}_spacer'.format(now[3], then[3])\n strand = 0\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n records.append(rec)\n records.extend(gene)\n\n database.extend(records)", "def parse_rttm_multiscale(self, sample):\n if sample.rttm_file is None:\n raise ValueError(f\"RTTM file is not provided for this sample {sample}\")\n rttm_lines = open(sample.rttm_file).readlines()\n uniq_id = os.path.splitext(os.path.basename(sample.rttm_file))[0]\n mapping_dict = self.emb_dict[max(self.emb_dict.keys())][uniq_id]['mapping']\n rttm_timestamps = extract_seg_info_from_rttm(uniq_id, rttm_lines, mapping_dict, sample.target_spks)\n fr_level_target = assign_frame_level_spk_vector(\n rttm_timestamps, self.round_digits, self.frame_per_sec, sample.target_spks\n )\n seg_target = self.get_diar_target_labels_from_fr_target(uniq_id, fr_level_target)\n return seg_target", "def parse_source_seqfeature(seqfeature):\n src_ftr = source.Source()\n src_ftr.seqfeature = seqfeature\n start, stop, parts = parse_coordinates(seqfeature)\n src_ftr.start = start\n src_ftr.stop = stop\n\n try:\n src_ftr.organism = str(seqfeature.qualifiers[\"organism\"][0])\n except:\n src_ftr.organism = \"\"\n\n try:\n src_ftr.host = str(seqfeature.qualifiers[\"host\"][0])\n except:\n src_ftr.host = \"\"\n\n try:\n src_ftr.lab_host = str(seqfeature.qualifiers[\"lab_host\"][0])\n except:\n src_ftr.lab_host = \"\"\n\n src_ftr.parse_organism()\n src_ftr.parse_host()\n src_ftr.parse_lab_host()\n\n return src_ftr", "def mrna_desc(gff3, fasta):\n seqs = {}\n for defline, seq in LocusPocus.fasta.parse(fasta):\n seqid = defline[1:].split(' ')[0]\n if seqid not in seqs:\n seqs[seqid] = seq\n\n mrnaacc = ''\n mrnalen = 0\n for entry in gff3:\n if '\\tmRNA\\t' in entry:\n fields = entry.rstrip().split('\\t')\n assert len(fields) == 9\n mrnalen += int(fields[4]) - int(fields[3]) + 1\n accmatch = re.search(r'accession=([^;\\n]+)', fields[8])\n assert accmatch, 'Unable to parse mRNA accession: %s' % fields[8]\n mrnaacc = accmatch.group(1)\n elif entry.startswith('###'):\n mrnaseq = seqs[mrnaacc]\n if len(mrnaseq) != mrnalen:\n message = 'mature mRNA \"%s\": length mismatch' % mrnaacc\n message += ' (gff3=%d, fa=%d)' % (mrnalen, len(mrnaseq))\n message += '; most likely a duplicated accession, discarding'\n print(message, file=sys.stderr)\n else:\n gccontent = gc_content(mrnaseq)\n gcskew = gc_skew(mrnaseq)\n ncontent = n_content(mrnaseq)\n values = '%s %d %.3f %.3f %.3f' % (\n mrnaacc, mrnalen, gccontent, gcskew, ncontent)\n yield values.split(' ')\n mrnaacc = ''\n mrnalen = 0", "def GFFParse(gff_file):\n genes, utr5, exons=dict(), dict(), dict()\n transcripts, utr3, cds=dict(), dict(), dict()\n # TODO Include growing key words of different non-coding/coding transcripts \n features=['mrna', 'transcript', 'ncrna', 'mirna', 'pseudogenic_transcript', 'rrna', 'snorna', 'snrna', 'trna', 'scrna', 'mrna_te_gene']\n gff_handle=open(gff_file, \"rU\")\n for gff_line in gff_handle:\n gff_line=gff_line.strip('\\n\\r').split('\\t')\n if re.match(r'#|>', gff_line[0]): # skip commented line or fasta identifier line \n continue\n if len(gff_line)==1: # skip fasta sequence/empty line if present \n continue \n assert len(gff_line)==9, '\\t'.join(gff_line) # not found 9 tab-delimited fields in this line \n if '' in gff_line: # skip this line if there any field with an empty value\n print 'Skipping..', '\\t'.join(gff_line)\n continue\n if gff_line[-1][-1]==';': # trim the last ';' character \n gff_line[-1]=gff_line[-1].strip(';')\n if gff_line[2].lower() in ['gene', 'pseudogene', 'transposable_element_gene']:\n gid, gene_info=None, dict()\n gene_info['start']=int(gff_line[3])\n gene_info['stop']=int(gff_line[4])\n gene_info['chr']=gff_line[0]\n gene_info['source']=gff_line[1]\n gene_info['strand']=gff_line[6]\n for attb in gff_line[-1].split(';'):\n attb=attb.split('=') # gff attributes are separated by key=value pair \n if attb[0]=='ID':\n gid=attb[1]\n break\n genes[(gff_line[0], gid)]=gene_info # store gene information based on the chromosome and gene symbol.\n elif gff_line[2].lower() in features: \n gid, mrna_info=None, dict() \n mrna_info['start']=int(gff_line[3])\n mrna_info['stop']=int(gff_line[4])\n mrna_info['chr']=gff_line[0]\n mrna_info['strand']=gff_line[6]\n mrna_info['type'] = gff_line[2]\n for attb in gff_line[-1].split(';'):\n attb=attb.split('=')\n if attb[0]=='Parent':\n gid=attb[1]\n elif attb[0]=='ID':\n mrna_info[attb[0]]=attb[1]\n for fid in gid.split(','): # child may be mapped to multiple parents ex: Parent=AT01,AT01-1-Protein \n if (gff_line[0], fid) in transcripts:\n transcripts[(gff_line[0], fid)].append(mrna_info)\n else:\n transcripts[(gff_line[0], fid)]=[mrna_info]\n elif gff_line[2].lower() in ['exon', 'pseudogenic_exon']:\n tids, exon_info=None, dict()\n exon_info['start']=int(gff_line[3])\n exon_info['stop']=int(gff_line[4])\n exon_info['chr']=gff_line[0]\n exon_info['strand']=gff_line[6]\n for attb in gff_line[-1].split(';'):\n attb=attb.split('=')\n if attb[0]=='Parent':\n tids=attb[1]\n break\n for tid in tids.split(','):\n if (gff_line[0], tid) in exons:\n exons[(gff_line[0], tid)].append(exon_info)\n else:\n exons[(gff_line[0], tid)]=[exon_info]\n elif gff_line[2].lower() in ['five_prime_utr']:\n utr5_info, tids=dict(), None\n utr5_info['start']=int(gff_line[3])\n utr5_info['stop']=int(gff_line[4])\n utr5_info['chr']=gff_line[0]\n utr5_info['strand']=gff_line[6]\n for attb in gff_line[-1].split(';'):\n attb=attb.split('=')\n if attb[0]=='Parent':\n tids=attb[1]\n break\n for tid in tids.split(','):\n if (gff_line[0], tid) in utr5:\n utr5[(gff_line[0], tid)].append(utr5_info)\n else:\n utr5[(gff_line[0], tid)]=[utr5_info]\n elif gff_line[2].lower() in ['cds']:\n cds_info, tids=dict(), None\n cds_info['start']=int(gff_line[3])\n cds_info['stop']=int(gff_line[4])\n cds_info['chr']=gff_line[0]\n cds_info['strand']=gff_line[6]\n for attb in gff_line[-1].split(';'):\n attb=attb.split('=')\n if attb[0]=='Parent':\n tids=attb[1]\n break\n for tid in tids.split(','):\n if (gff_line[0], tid) in cds:\n cds[(gff_line[0], tid)].append(cds_info)\n else:\n cds[(gff_line[0], tid)]=[cds_info]\n elif gff_line[2].lower() in ['three_prime_utr']:\n utr3_info, tids=dict(), None\n utr3_info['start']=int(gff_line[3])\n utr3_info['stop']=int(gff_line[4])\n utr3_info['chr']=gff_line[0]\n utr3_info['strand']=gff_line[6]\n for attb in gff_line[-1].split(';'):\n attb=attb.split('=')\n if attb[0]=='Parent':\n tids=attb[1]\n break\n for tid in tids.split(','):\n if (gff_line[0], tid) in utr3:\n utr3[(gff_line[0], tid)].append(utr3_info)\n else:\n utr3[(gff_line[0], tid)]=[utr3_info]\n gff_handle.close()\n return genes, transcripts, exons, utr3, utr5, cds", "def to_seqfeature(feature):\n if isinstance(feature, six.string_types):\n feature = feature_from_line(feature)\n\n qualifiers = {\n \"source\": [feature.source],\n \"score\": [feature.score],\n \"seqid\": [feature.seqid],\n \"frame\": [feature.frame],\n }\n qualifiers.update(feature.attributes)\n return SeqFeature(\n # Convert from GFF 1-based to standard Python 0-based indexing used by\n # BioPython\n FeatureLocation(feature.start - 1, feature.stop),\n id=feature.id,\n type=feature.featuretype,\n strand=_biopython_strand[feature.strand],\n qualifiers=qualifiers,\n )", "def load(self, adrmine_tweets, adrmine_annotations):\n\n print(\"Loading ADRMine data from {}...\".format(adrmine_annotations))\n\n self._adrmine_tweets = adrmine_tweets\n self._adrmine_annotations = adrmine_annotations\n\n num_missing_tweets = 0\n self._tweets_dict = {}\n with open(self._adrmine_tweets) as f:\n for line in f:\n # each line contains 4 fields, tab-separated:\n # tweet ID, user ID, text ID and Tweet text\n (tweetID, userID, textID, tweetText) = line.rstrip().split('\\t')\n self._tweets_dict[textID] = tweetText\n\n self._annotations_dict = {}\n adrmine_orig_annotations = 0\n num_usable_annotations = 0\n with open(self._adrmine_annotations) as f:\n for line in f:\n # each line contains 5 fields, tab-separated:\n # text ID, start offset, end offset, semantic type, annotated text, related drug and target drug.\n (textID, startOffset, endOffset, semanticType, annotatedText, relatedDrug, targetDrug) = line.rstrip().split('\\t')\n\n if textID in self._tweets_dict:\n if textID not in self._annotations_dict:\n self._annotations_dict[textID] = []\n\n self._annotations_dict[textID].append({'semanticType': semanticType,\n 'startOffset': startOffset,\n 'endOffset': endOffset,\n 'annotatedText': annotatedText})\n num_usable_annotations += 1\n else:\n #print(\"TextID {} does not have a corresponding tweet\".format(textID))\n num_missing_tweets += 1\n\n adrmine_orig_annotations += 1\n\n self._validate_annotations()\n\n print(\" Number of original annotations: {}\".format(adrmine_orig_annotations))\n print(\" Number of missing tweets: {}\".format(num_missing_tweets))\n print(\" Number of usable annotations: {}\".format(num_usable_annotations))\n\n return (self._annotations_dict, self._tweets_dict)", "def parse(record):\n\n #Extract individual parts of the FASTA record\n\n identifier = record.id #The sequence's Id\n sequence = record.seq #The sequence itself\n sequence = sequence.upper() #Turns all the nucleotides to upper case\n\n return identifier, sequence", "def premrna_desc(gff3, fasta):\n seqs = {}\n for defline, seq in LocusPocus.fasta.parse(fasta):\n seqid = defline[1:].split(' ')[0]\n if seqid not in seqs:\n seqs[seqid] = seq\n\n mrnaacc = ''\n mrnalen = 0\n gccontent = 0.0\n gcskew = 0.0\n ncontent = 0.0\n exoncount = 0\n introncount = 0\n utr5plen = 0\n utr3plen = 0\n for entry in gff3:\n if '\\tmRNA\\t' in entry:\n fields = entry.rstrip().split('\\t')\n assert len(fields) == 9\n mrnaacc = re.search(r'accession=([^;\\n]+)', fields[8]).group(1)\n mrnalen = int(fields[4]) - int(fields[3]) + 1\n mrnaseq = seqs[mrnaacc]\n if len(mrnaseq) != mrnalen:\n message = 'pre-mRNA \"%s\": length mismatch' % mrnaacc\n message += ' (gff3=%d, fa=%d)' % (mrnalen, len(mrnaseq))\n message += '; most likely a duplicated accession, discarding'\n print(message, file=sys.stderr)\n mrnaacc = ''\n gccontent = gc_content(mrnaseq)\n gcskew = gc_skew(mrnaseq)\n ncontent = n_content(mrnaseq)\n elif '\\texon\\t' in entry:\n exoncount += 1\n elif '\\tintron\\t' in entry:\n introncount += 1\n elif '\\tfive_prime_UTR\\t' in entry:\n fields = entry.rstrip().split('\\t')\n assert len(fields) == 9\n utr5plen += int(fields[4]) - int(fields[3]) + 1\n elif '\\tthree_prime_UTR\\t' in entry:\n fields = entry.rstrip().split('\\t')\n assert len(fields) == 9\n utr3plen += int(fields[4]) - int(fields[3]) + 1\n elif entry.startswith('###'):\n if mrnaacc != '':\n values = '%s %d %.3f %.3f %.3f %d %d %d %d' % (\n mrnaacc, mrnalen, gccontent, gcskew, ncontent,\n exoncount, introncount, utr5plen, utr3plen)\n yield values.split(' ')\n mrnaacc = ''\n mrnalen = 0\n gccontent = 0.0\n gcskew = 0.0\n ncontent = 0.0\n exoncount = 0\n exonlen = 0\n introncount = 0\n utr5plen = 0\n utr3plen = 0", "def parse_cds_seqfeature(seqfeature):\n cds_ftr = cds.Cds()\n cds_ftr.seqfeature = seqfeature\n\n try:\n locus_tag = seqfeature.qualifiers[\"locus_tag\"][0]\n except:\n locus_tag = \"\"\n finally:\n cds_ftr.set_locus_tag(locus_tag, delimiter=None)\n\n cds_ftr.set_orientation(seqfeature.strand, \"fr_short\", case = True)\n cds_ftr.start, cds_ftr.stop, cds_ftr.parts = parse_coordinates(seqfeature)\n\n # Coordinate format for GenBank flat file features parsed by Biopython\n # are 0-based half open intervals.\n cds_ftr.coordinate_format = \"0_half_open\"\n\n # For translation, convert it to a Biopython Seq object.\n try:\n translation = seqfeature.qualifiers[\"translation\"][0]\n except:\n translation = \"\"\n finally:\n translation = Seq(translation, Alphabet.IUPAC.protein)\n cds_ftr.set_translation(translation)\n\n cds_ftr.set_nucleotide_length(translation=True)\n\n try:\n translation_table = seqfeature.qualifiers[\"transl_table\"][0]\n except:\n translation_table = 0\n finally:\n cds_ftr.set_translation_table(translation_table)\n\n try:\n product = seqfeature.qualifiers[\"product\"][0]\n except:\n product = \"\"\n finally:\n cds_ftr.set_description_field(\"product\", product)\n\n try:\n function = seqfeature.qualifiers[\"function\"][0]\n except:\n function = \"\"\n finally:\n cds_ftr.set_description_field(\"function\", function)\n\n try:\n note = seqfeature.qualifiers[\"note\"][0]\n except:\n note = \"\"\n finally:\n cds_ftr.set_description_field(\"note\", note)\n\n try:\n gene = seqfeature.qualifiers[\"gene\"][0]\n except:\n gene = \"\"\n finally:\n cds_ftr.set_gene(gene)\n\n cds_ftr.set_name()\n return cds_ftr", "def read_transcript_data(fn):\n\n def _read_lines(fn):\n # NC_000007.13\tRefSeq\tcDNA_match\t50344265\t50344518\t254\t+\t.\tID=aln58042;Target=NM_001220765.2 1 254 +;gap_count=0;identity=0.0691326;idty=1;num_ident=428;num_mismatch=0;pct_coverage=6.91326;pct_identity_gap=100;pct_identity_ungap=100;score=254\n # NC_000002.11 RefSeq cDNA_match 179671939 179672150 212 - . ID=ed951d46-194c-477a-a480-4bc64530c5ba;Target=NM_001267550.2 1 212 +;gap_count=0;identity=0.999991;idty=1;num_ident=109223;num_mismatch=1;pct_coverage=100;pct_identity_gap=99.9991;pct_identity_ungap=99.9991\n line_re = re.compile(\n \"(?P<ref_ac>\\S+)\\s+(?P<origin>\\S+)\\s+(?P<match_type>\\S+)\\s+\"\n \"(?P<g_start>\\d+)\\s+(?P<g_end>\\d+)\\s+(?P<score>\\S+)\\s+\"\n \"(?P<strand>[-+])\\s+\\.\\s+ID=(?P<aln>[^;]+);Target=(?P<tx_ac>\\S+)\"\n \"\\s+(?P<tx_start>\\d+)\\s+(?P<tx_end>\\d+).+?\"\n \"pct_coverage=(?P<pct_coverage>[^;]+);\"\n \"pct_identity_gap=(?P<pct_identity_gap>[^;]+);\"\n \"pct_identity_ungap=(?P<pct_identity_ungap>[^;]+)\"\n )\n fh = io.open(fn, \"rb\")\n while fh.peek(1)[0] == \"#\":\n fh.readline()\n while fh.peek(3)[0:3] != \"###\":\n line = fh.readline()\n try:\n yield line_re.match(line).groupdict()\n except AttributeError:\n raise Exception(\"Failed at\", line)\n raise StopIteration\n def _key(e):\n return (e[\"tx_ac\"], not e[\"ref_ac\"].startswith(\"NC_\"), e[\"ref_ac\"], e[\"aln\"])\n return itertools.groupby(sorted(_read_lines(fn), key=_key), key=_key)", "def read(self, featureType='miRNA_primary_transcript'):\n logger.info('Reading %s' % self.fileName)\n self.fileHandle = open(self.fileName, 'r+b')\n bytePosition = self.fileHandle.tell()\n for line in self.fileHandle:\n row = line.decode('utf-8').rstrip().split(\"\\t\")\n if not row[0].startswith(\"#\") and row[2] == featureType:\n attributes = row[-1].split(\";\")\n for attribute in attributes:\n if attribute.startswith('Name'):\n mirbase_name = attribute.split(\"=\")[-1]\n self.features[mirbase_name] = bytePosition\n bytePosition = self.fileHandle.tell()\n self.fileHandle.close()\n logger.debug('Reading %s finished' % self.fileName)", "def parse_rosalind(filename):\n print \"parse_rosalind should be called parse_fasta\"\n return parse_fasta(filename)", "def parse(self, handle, do_features=...): # -> SeqRecord | None:\n ...", "def parse_rttm_for_ms_targets(self, sample):\n rttm_lines = open(sample.rttm_file).readlines()\n uniq_id = self.get_uniq_id_with_range(sample)\n rttm_timestamps = extract_seg_info_from_rttm(uniq_id, rttm_lines)\n fr_level_target = assign_frame_level_spk_vector(\n rttm_timestamps, self.round_digits, self.frame_per_sec, target_spks=sample.target_spks\n )\n seg_target, base_clus_label = self.get_diar_target_labels(uniq_id, sample, fr_level_target)\n clus_label_index, scale_mapping = self.assign_labels_to_longer_segs(uniq_id, base_clus_label)\n return clus_label_index, seg_target, scale_mapping", "def read_fasta_file(fasta):\n\n ptn_list = []\n fasta_content = open(fasta, \"r\")\n new_ptn = None\n for line in fasta_content:\n if \">sp\" in line or \">tr\" in line:\n if new_ptn != None:\n new_ptn[\"seq\"] = sequence\n ptn_list.append(new_ptn)\n tokens = line.split()\n new_ptn = {\"id\": tokens[0] }\n sequence = \"\"\n else:\n sequence += line[:-1]\n new_ptn[\"seq\"] = sequence\n ptn_list.append(new_ptn)\n\n return ptn_list", "def get_aa (tRNA):\n\tpass", "def parse_sequence(sequence):\n return FastaEntry.from_text(sequence)", "def RNAorDNA ( seq ) :\n\tif dna_regex . search ( seq ):\n\t\treturn RNA ( seq )\n\n\tif rna_regex . search ( seq ):\n\t\treturn DNA ( seq )", "def _parse_alignment( alignment ):\n log.info(\"Parsing subread locations from alignment data\")\n locations = {}\n for entry in BlasrReader( alignment ):\n if entry.tstrand == '1':\n start = int(entry.tlength) - int(entry.tend)\n end = int(entry.tlength) - int(entry.tstart)\n else:\n start = int(entry.tstart)\n end = int(entry.tend)\n locations[entry.qname] = (start, end)\n return locations", "def RNAparser(text):\n upper_text = text.upper()\n RNAsequence = \"\"\n # Splits text into an array of no whitespace text\n no_space_text_array = upper_text.split()\n # Parse through all the text in the text within the array, adding nucleotide letters to RNA sequence\n for no_space_sequence in no_space_text_array:\n for letter in no_space_sequence:\n if isRNANucleotide(letter):\n RNAsequence += letter\n # If there exists invalid RNA nucleotide, then the text file must not be a RNA sequence\n if not isRNANucleotide(letter):\n return \"\"\n # Otherwise return a RNA sequence with no blank spaces\n return RNAsequence", "def ref_lamanno(\n fasta_path,\n gtf_path,\n cdna_path,\n intron_path,\n index_path,\n t2g_path,\n cdna_t2c_path,\n intron_t2c_path,\n temp_dir='tmp',\n overwrite=False,\n):\n results = {}\n if not os.path.exists(index_path) or overwrite:\n fasta_path = decompress_file(fasta_path, temp_dir=temp_dir)\n sorted_fasta_path, fasta_chromosomes = sort_fasta(\n fasta_path, os.path.join(temp_dir, SORTED_FASTA_FILENAME)\n )\n gtf_path = decompress_file(gtf_path, temp_dir=temp_dir)\n sorted_gtf_path, gtf_chromosomes = sort_gtf(\n gtf_path, os.path.join(temp_dir, SORTED_GTF_FILENAME)\n )\n logger.info('Splitting genome into cDNA at {}'.format(cdna_path))\n chromosomes = check_chromosomes(fasta_chromosomes, gtf_chromosomes)\n cdna_fasta_path = generate_cdna_fasta(\n sorted_fasta_path,\n sorted_gtf_path,\n cdna_path,\n chromosomes=chromosomes\n )\n results.update({'cdna_fasta': cdna_fasta_path})\n logger.info(\n 'Creating cDNA transcripts-to-capture at {}'.format(cdna_t2c_path)\n )\n cdna_t2c_result = create_t2c(cdna_fasta_path, cdna_t2c_path)\n results.update({'cdna_t2c': cdna_t2c_result['t2c']})\n logger.info('Splitting genome into introns at {}'.format(intron_path))\n intron_fasta_path = generate_intron_fasta(\n sorted_fasta_path,\n sorted_gtf_path,\n intron_path,\n chromosomes=chromosomes\n )\n results.update({'intron_fasta': intron_fasta_path})\n logger.info(\n 'Creating intron transcripts-to-capture at {}'.\n format(cdna_t2c_path)\n )\n intron_t2c_result = create_t2c(intron_fasta_path, intron_t2c_path)\n results.update({'intron_t2c': intron_t2c_result['t2c']})\n logger.info('Concatenating cDNA and intron FASTAs')\n combined_path = concatenate_files(\n cdna_fasta_path,\n intron_fasta_path,\n out_path=os.path.join(temp_dir, COMBINED_FILENAME),\n temp_dir=temp_dir\n )\n t2g_result = create_t2g_from_fasta(combined_path, t2g_path)\n results.update(t2g_result)\n index_result = kallisto_index(combined_path, index_path)\n results.update(index_result)\n else:\n logger.info(\n 'Skipping kallisto index because {} already exists. Use the --overwrite flag to overwrite.'\n .format(index_path)\n )\n\n return results", "def read_tcr_data(\n organism,\n contig_annotations_csvfile,\n consensus_annotations_csvfile,\n include_gammadelta = False,\n allow_unknown_genes = False,\n verbose = False\n):\n from all_genes import all_genes\n\n expected_gene_names = all_genes[organism].keys()\n\n #from cdr3s_human import all_align_fasta\n\n gene_suffix = '*01' # may not be used\n\n\n # read the contig annotations-- map from clonotypes to barcodes\n # barcode,is_cell,contig_id,high_confidence,length,chain,v_gene,d_gene,j_gene,c_gene,full_length,productive,cdr3,cdr3_nt,reads,umis,raw_clonotype_id,raw_consensus_id\n # AAAGATGGTCTTCTCG-1,True,AAAGATGGTCTTCTCG-1_contig_1,True,695,TRB,TRBV5-1*01,TRBD2*02,TRBJ2-3*01,TRBC2*01,True,True,CASSPLAGYAADTQYF,TGCGCCAGCAGCCCCCTAGCGGGATACGCAGCAGATACGCAGTATTTT,9427,9,clonotype14,clonotype14_consensus_1\n assert exists( contig_annotations_csvfile )\n\n _, lines = parse_csv_file(contig_annotations_csvfile)\n clonotype2barcodes = {}\n clonotype2tcrs_backup = {} ## in case we dont have a consensus_annotations_csvfile\n for l in lines:\n bc = l['barcode']\n clonotype = l['raw_clonotype_id']\n if clonotype =='None':\n if l['productive'] not in [ 'None','False' ]:\n assert l['productive'] == 'True'\n #print 'clonotype==None: unproductive?',l['productive']\n continue\n if clonotype not in clonotype2barcodes:\n clonotype2barcodes[clonotype] = []\n if bc in clonotype2barcodes[clonotype]:\n pass\n #print 'repeat barcode'\n else:\n clonotype2barcodes[clonotype].append( bc )\n\n if not clonotype:\n print 'empty clonotype id:', l\n continue\n assert clonotype\n ## experimenting here ########################################3\n if l['productive'].lower() != 'true':\n continue\n if l['cdr3'].lower() == 'none' or l['cdr3_nt'].lower() == 'none':\n continue\n\n chain = l['chain']\n if chain not in ['TRA','TRB']:\n continue\n ab = chain[2]\n if clonotype not in clonotype2tcrs_backup:\n clonotype2tcrs_backup[ clonotype ] = {'A':Counter(), 'B':Counter() }\n # stolen from below\n vg = l['v_gene']\n if '*' not in vg:\n vg += gene_suffix\n if 'DV' in vg and vg not in expected_gene_names:\n #print 'DV?',vg\n vg = vg[:vg.index('DV')]+'/'+vg[vg.index('DV'):]\n jg = l['j_gene']\n if '*' not in jg:\n jg += gene_suffix\n\n if vg not in expected_gene_names:\n print 'unrecognized V gene:', organism, vg\n if not allow_unknown_genes:\n continue\n if jg not in expected_gene_names:\n print 'unrecognized J gene:', organism, jg\n if not allow_unknown_genes:\n continue\n #assert vg in all_align_fasta[organism]\n #assert jg in all_align_fasta[organism]\n\n tcr_chain = ( vg, jg, l['cdr3'], l['cdr3_nt'].lower() )\n\n clonotype2tcrs_backup[clonotype][ab][tcr_chain] += int(l['umis'])\n\n for id in clonotype2tcrs_backup:\n for ab in 'AB':\n for t1,count1 in clonotype2tcrs_backup[id][ab].iteritems():\n for t2, count2 in clonotype2tcrs_backup[id][ab].iteritems():\n if t2<=t1:continue\n if t1[3] == t2[3]:\n print 'repeat??', count1, count2, t1, t2\n\n\n\n if consensus_annotations_csvfile is None:\n clonotype2tcrs = clonotype2tcrs_backup\n else:\n\n ## now read details on the individual chains for each clonotype\n # ==> tcr/human/JCC176_TX2_TCR_consensus_annotations.csv <==\n # clonotype_id,consensus_id,length,chain,v_gene,d_gene,j_gene,c_gene,full_length,productive,cdr3,cdr3_nt,reads,umis\n # clonotype100,clonotype100_consensus_1,550,TRB,TRBV24-1*01,TRBD1*01,TRBJ2-7*01,TRBC2*01,True,True,CATSDPGQGGYEQYF,TGTGCCACCAGTGACCCCGGACAGGGAGGATACGAGCAGTACTTC,8957,9\n\n assert exists(consensus_annotations_csvfile)\n _, lines = parse_csv_file( consensus_annotations_csvfile )\n\n\n ## first get clonotypes with one alpha and one beta\n clonotype2tcrs = {}\n\n for l in lines:\n if l['productive'] == 'True':\n id = l['clonotype_id']\n if id not in clonotype2tcrs:\n # dictionaries mapping from tcr to umi-count\n clonotype2tcrs[id] = { 'A':Counter(), 'B':Counter() } #, 'G':[], 'D': [] }\n assert id in clonotype2barcodes\n\n ch = l['chain']\n if not ch.startswith('TR'):\n print 'skipline:', consensus_annotations_csvfile, ch, l['v_gene'], l['j_gene']\n continue\n ab = ch[2]\n if ab not in 'AB':\n print 'skipline:', consensus_annotations_csvfile, ch, l['v_gene'], l['j_gene']\n continue\n vg = l['v_gene']\n if '*' not in vg:\n vg += gene_suffix\n if 'DV' in vg and vg not in expected_gene_names:\n #print 'DV?',vg\n vg = vg[:vg.index('DV')]+'/'+vg[vg.index('DV'):]\n jg = l['j_gene']\n if '*' not in jg:\n jg += gene_suffix\n # if vg in tcr_gene_remap[organism]:\n # vg = tcr_gene_remap[organism][vg]\n # if jg in tcr_gene_remap[organism]:\n # jg = tcr_gene_remap[organism][jg]\n\n if vg not in expected_gene_names:\n print 'unrecognized V gene:', organism, vg\n if not allow_unknown_genes:\n continue\n if jg not in expected_gene_names:\n print 'unrecognized J gene:', organism, jg\n if not allow_unknown_genes:\n continue\n #assert vg in all_align_fasta[organism]\n #assert jg in all_align_fasta[organism]\n tcr_chain = ( vg, jg, l['cdr3'], l['cdr3_nt'].lower() )\n\n if tcr_chain not in clonotype2tcrs[id][ab]:\n umis = int( l['umis'] )\n clonotype2tcrs[id][ab][ tcr_chain ] = umis\n old_umis = clonotype2tcrs_backup[id][ab][tcr_chain]\n if umis != old_umis:\n print 'diff_umis:',umis, old_umis, id,ab,tcr_chain\n else:\n print 'repeat?',id,ab,tcr_chain\n else:\n if l['productive'] not in [ 'None','False' ]:\n print 'unproductive?',l['productive']\n\n\n if verbose:\n idl1 = sorted( clonotype2tcrs_backup.keys())\n idl2 = sorted( clonotype2tcrs.keys())\n print 'same ids:', len(idl1), len(idl2), idl1==idl2\n for id in clonotype2tcrs_backup:\n if id in clonotype2tcrs:\n for ab in 'AB':\n tl1 = sorted(clonotype2tcrs_backup[id][ab].keys())\n tl2 = sorted(clonotype2tcrs[id][ab].keys())\n if tl1 != tl2:\n print 'diffids:',id,ab,tl1,tl2\n\n\n return clonotype2tcrs, clonotype2barcodes", "def _read_gtf(gtf):\n if not gtf:\n return gtf\n db = defaultdict(list)\n with open(gtf) as in_handle:\n for line in in_handle:\n if line.startswith(\"#\"):\n continue\n cols = line.strip().split(\"\\t\")\n name = [n.split(\"=\")[1] for n in cols[-1].split(\";\") if n.startswith(\"Name\")]\n chrom, start, end, strand = cols[0], cols[3], cols[4], cols[6]\n if cols[2] == \"miRNA_primary_transcript\":\n db[name[0]].append([chrom, int(start), int(end), strand])\n return db", "def parseFasta(self, fastaRef):\n\n seq = \"\"\n prevId = \"\"\n with open(fastaRef, 'r') as f:\n\n for line in f:\n if \">\" == line[0]:\n # asserting the regex don't fail...\n found = GENEIDRULE.search(line)\n if(found):\n alternate = found.group(1)\n geneName = found.group(2)\n self._transcripts[alternate] = geneName\n else:\n print(\"EnsemblFasta: NOT FOUND\")\n print(line)\n exit()\n\n if(prevId and seq):\n geneName = self._transcripts[prevId]\n if geneName in self._genes:\n gene = self._genes[geneName]\n else:\n gene = Gene(geneName)\n self._genes[geneName] = gene\n\n gene.addTranscripts(prevId, seq)\n seq = \"\"\n prevId = alternate\n else:\n seq += line.rstrip(\"\\n\")\n gene.addTranscripts(prevId, seq)", "def _mint_trna_annotation(data):\n trna_lookup = op.join(dd.get_srna_mint_lookup(data))\n trna_space = op.join(dd.get_srna_mint_space(data))\n trna_other = op.join(dd.get_srna_mint_other(data))\n name = dd.get_sample_name(data)\n work_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), \"trna_mint\", name))\n in_file = op.basename(data[\"clean_fastq\"])\n mintmap = os.path.realpath(os.path.join(os.path.dirname(sys.executable), \"MINTmap.pl\"))\n perl_export = utils.get_perl_exports()\n if not file_exists(trna_lookup) or not file_exists(mintmap):\n logger.info(\"There is no tRNA annotation to run MINTmap.\")\n return work_dir\n jar_folder = os.path.join(os.path.dirname(mintmap), \"MINTplates\")\n out_file = op.join(work_dir, name + \"-MINTmap_v1-exclusive-tRFs.expression.txt\")\n if not file_exists(out_file):\n with tx_tmpdir(data) as txdir:\n with utils.chdir(txdir):\n utils.symlink_plus(data[\"clean_fastq\"], op.join(txdir, in_file))\n cmd = (\"{perl_export} && {mintmap} -f {in_file} -p {name} \"\n \"-l {trna_lookup} -s {trna_space} -j {jar_folder} \"\n \"-o {trna_other}\").format(**locals())\n do.run(cmd, \"tRNA for %s\" % name)\n for filename in glob.glob(\"*MINTmap*\"):\n shutil.move(filename, work_dir)\n return work_dir", "def convert(self) -> dict:\n def get_triples_linker():\n \"\"\"\n Retrieve all of the triples linking phrases from the AMR object.\n We use the Framenet words that are found in the AMR Object as the linker.\n \"\"\"\n triples_linkers = []\n concepts = list(self.amr_obj.concepts())\n\n # Retrieve all concept that has the word ARG in it\n for concept in concepts:\n triple = self.amr_obj.triples(head=concept[0])\n items = [item for item in triple if 'ARG' in item[1]]\n if len(items) > 0:\n triples_linkers.append(triple)\n return triples_linkers\n\n def generate_triples():\n\n def fixing_annotation(key, n):\n \"\"\"\n Fixing some inconsistency in the annotation\n \"\"\"\n if key + '.' + n not in self.propbank:\n key = key.replace('-', '_')\n return key + '.' + n\n\n def is_agent(f_rel, rel_var):\n \"\"\"\n Checking whether the role is an agent (denoted by 'pag') or not\n \"\"\"\n # TODO: beside 'pag' is there any other role?\n m = re.match(r'(.*)-(\\d*)$', rel_var)\n key = m.group(1)\n n = m.group(2)\n\n # some annotation does not have the correspondence frameset, just put false if found\n if n == '00':\n return False\n\n concept = fixing_annotation(key, n)\n roleset = self.propbank[concept]\n\n m = re.match(r':ARG(.).*', f_rel[1])\n n = int(m.group(1))\n roles = roleset.getElementsByTagName('role')\n\n for role in roles:\n if dict(role.attributes)['n'].value == str(n) and dict(role.attributes)['f'].value.lower() == 'pag':\n return True\n return False\n\n # Case 1: ARG\n for triple_linker in self.triples_linkers:\n triple = [None, triple_linker[0][0], []]\n for rel in triple_linker:\n if 'ARG' in rel[1] and 'of' not in rel[1]:\n # check whether the propbank verb rel[0] and its argument rel[2] is an agent or not\n if is_agent(rel, self.var2c[rel[0]].__str__()):\n triple[0] = rel[2]\n else:\n triple[2].append(rel[2])\n if not (triple[0] is None and triple[2] == []):\n self.triples[triple[1]] = triple\n\n # Case 2: ARG-of\n for triple_linker in self.triples_linkers:\n for rel in triple_linker:\n if 'ARG' in rel[1] and 'of' in rel[1]:\n if rel[2] not in self.triples:\n self.triples[rel[2]] = [None, rel[2], []]\n if is_agent(rel, self.var2c[rel[2]].__str__()):\n self.triples[rel[2]][0] = rel[0]\n else:\n self.triples[rel[2]][2].append(rel[0])\n return self.triples\n\n self.triples_linkers = get_triples_linker()\n return generate_triples()" ]
[ "0.71053255", "0.57508117", "0.56290597", "0.55088407", "0.5436147", "0.53689307", "0.53286886", "0.52768457", "0.52746665", "0.5266213", "0.5193937", "0.5161827", "0.5149462", "0.51487356", "0.5133959", "0.50579363", "0.5048437", "0.5045914", "0.50262564", "0.5013615", "0.50037795", "0.4997408", "0.49923876", "0.497598", "0.49742824", "0.49639753", "0.49437067", "0.49329597", "0.4926145", "0.48897314" ]
0.7405606
0
Parses a Biopython Source SeqFeature.
def parse_source_seqfeature(seqfeature): src_ftr = source.Source() src_ftr.seqfeature = seqfeature start, stop, parts = parse_coordinates(seqfeature) src_ftr.start = start src_ftr.stop = stop try: src_ftr.organism = str(seqfeature.qualifiers["organism"][0]) except: src_ftr.organism = "" try: src_ftr.host = str(seqfeature.qualifiers["host"][0]) except: src_ftr.host = "" try: src_ftr.lab_host = str(seqfeature.qualifiers["lab_host"][0]) except: src_ftr.lab_host = "" src_ftr.parse_organism() src_ftr.parse_host() src_ftr.parse_lab_host() return src_ftr
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_seqfeature(feature):\n if isinstance(feature, six.string_types):\n feature = feature_from_line(feature)\n\n qualifiers = {\n \"source\": [feature.source],\n \"score\": [feature.score],\n \"seqid\": [feature.seqid],\n \"frame\": [feature.frame],\n }\n qualifiers.update(feature.attributes)\n return SeqFeature(\n # Convert from GFF 1-based to standard Python 0-based indexing used by\n # BioPython\n FeatureLocation(feature.start - 1, feature.stop),\n id=feature.id,\n type=feature.featuretype,\n strand=_biopython_strand[feature.strand],\n qualifiers=qualifiers,\n )", "def from_seqfeature(s, **kwargs):\n source = s.qualifiers.get(\"source\", \".\")[0]\n score = s.qualifiers.get(\"score\", \".\")[0]\n seqid = s.qualifiers.get(\"seqid\", \".\")[0]\n frame = s.qualifiers.get(\"frame\", \".\")[0]\n strand = _feature_strand[s.strand]\n\n # BioPython parses 1-based GenBank positions into 0-based for use within\n # Python. We need to convert back to 1-based GFF format here.\n start = s.location.start.position + 1\n stop = s.location.end.position\n featuretype = s.type\n id = s.id\n attributes = dict(s.qualifiers)\n attributes.pop(\"source\", \".\")\n attributes.pop(\"score\", \".\")\n attributes.pop(\"seqid\", \".\")\n attributes.pop(\"frame\", \".\")\n return Feature(\n seqid,\n source,\n featuretype,\n start,\n stop,\n score,\n strand,\n frame,\n attributes,\n id=id,\n **kwargs\n )", "def parse_cds_seqfeature(seqfeature):\n cds_ftr = cds.Cds()\n cds_ftr.seqfeature = seqfeature\n\n try:\n locus_tag = seqfeature.qualifiers[\"locus_tag\"][0]\n except:\n locus_tag = \"\"\n finally:\n cds_ftr.set_locus_tag(locus_tag, delimiter=None)\n\n cds_ftr.set_orientation(seqfeature.strand, \"fr_short\", case = True)\n cds_ftr.start, cds_ftr.stop, cds_ftr.parts = parse_coordinates(seqfeature)\n\n # Coordinate format for GenBank flat file features parsed by Biopython\n # are 0-based half open intervals.\n cds_ftr.coordinate_format = \"0_half_open\"\n\n # For translation, convert it to a Biopython Seq object.\n try:\n translation = seqfeature.qualifiers[\"translation\"][0]\n except:\n translation = \"\"\n finally:\n translation = Seq(translation, Alphabet.IUPAC.protein)\n cds_ftr.set_translation(translation)\n\n cds_ftr.set_nucleotide_length(translation=True)\n\n try:\n translation_table = seqfeature.qualifiers[\"transl_table\"][0]\n except:\n translation_table = 0\n finally:\n cds_ftr.set_translation_table(translation_table)\n\n try:\n product = seqfeature.qualifiers[\"product\"][0]\n except:\n product = \"\"\n finally:\n cds_ftr.set_description_field(\"product\", product)\n\n try:\n function = seqfeature.qualifiers[\"function\"][0]\n except:\n function = \"\"\n finally:\n cds_ftr.set_description_field(\"function\", function)\n\n try:\n note = seqfeature.qualifiers[\"note\"][0]\n except:\n note = \"\"\n finally:\n cds_ftr.set_description_field(\"note\", note)\n\n try:\n gene = seqfeature.qualifiers[\"gene\"][0]\n except:\n gene = \"\"\n finally:\n cds_ftr.set_gene(gene)\n\n cds_ftr.set_name()\n return cds_ftr", "def parser(raw_seq, date):\n taxon_id = int(raw_seq.features[0].qualifiers['db_xref'][0][6:])\n organism = raw_seq.annotations['organism']\n accession = raw_seq.annotations['accessions'][0]\n gene = []\n records = []\n frag_type = 'whole'\n begin = 1\n end = len(raw_seq)\n sequence = str(raw_seq.seq)\n name = organism\n strand = 1\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n records.append(rec)\n for i in raw_seq.features:\n if i.type == 'gene' and 'gene' in i.qualifiers:\n if i.location_operator != 'join':\n frag_type = 'gene'\n begin = int(i.location.start)\n end = int(i.location.end)\n sequence = str(raw_seq.seq[begin:end])\n name = str(i.qualifiers['gene'][0])\n strand = str(i.location.strand)\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n elif i.location_operator == 'join':\n frag_type = 'gene'\n begin = int(i.sub_features[0].location.start)\n end = int(i.sub_features[0].location.end)\n name = str(i.qualifiers['gene'][0])\n strand = str(i.location.strand)\n sequence = ''\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n gene.append(rec)\n begin = int(i.sub_features[1].location.start)\n end = int(i.sub_features[1].location.end)\n sequence = ''.join([str(raw_seq.seq[begin:end]), str(raw_seq.seq[begin:end])])\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n elif i.type == 'CDS' and 'gene' in i.qualifiers:\n frag_type = 'cds'\n begin = int(i.location.start)\n end = int(i.location.end)\n sequence = str(raw_seq.seq[begin:end])\n name = str(i.qualifiers['gene'][0]).replace(' ', '_')\n strand = str(i.location.strand)\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n elif i.type == 'tRNA' and 'gene' in i.qualifiers:\n frag_type = 'tRNA'\n begin = int(i.location.start)\n end = int(i.location.end)\n sequence = str(raw_seq.seq[begin:end])\n if len(sequence) >= 100:\n sequence = ''\n name = str(i.qualifiers['gene'][0]).replace(' ', '_')\n strand = str(i.location.strand)\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n elif i.type == 'rRNA':\n frag_type = 'rRNA'\n begin = int(i.location.start)\n end = int(i.location.end)\n sequence = str(raw_seq.seq[begin:end])\n name = str(i.qualifiers['product'][0]).replace(' ', '_')\n strand = str(i.location.strand)\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n elif i.type == 'exon' and 'gene' in i.qualifiers:\n frag_type = 'exon'\n begin = int(i.location.start)\n end = int(i.location.end)\n sequence = str(raw_seq.seq[begin:end])\n if 'number' in i.qualifiers:\n name = '{0}_exon_{1}'.format(i.qualifiers['gene'][0],\n i.qualifiers['number'][0])\n else:\n name = '{0}_exon'.format(i.qualifiers['gene'][0])\n strand = int(i.location.strand)\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n elif i.type == 'intron' and 'gene' in i.qualifiers:\n frag_type = 'intron'\n begin = int(i.location.start)\n end = int(i.location.end)\n sequence = str(raw_seq.seq[begin:end])\n strand = str(i.location.strand)\n if 'number' in i.qualifiers:\n name = '{0}_{1}_intron'.format(i.qualifiers['gene'][0],\n i.qualifiers['number'][0])\n else:\n name = '{0}_intron'.format(i.qualifiers['gene'][0])\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n\n records.append(rec)\n gene.sort(key=lambda x: x[5])\n\n for i in range(len(gene) - 1):\n frag_type = 'spacer'\n now = gene[i]\n then = gene[i + 1]\n tail = now[6] + 1\n head = then[5] - 1\n sequence = str(raw_seq.seq[tail:head])\n name = '{0}-{1}_spacer'.format(now[3], then[3])\n strand = 0\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n records.append(rec)\n records.extend(gene)\n\n database.extend(records)", "def parse_feature(self, feature_key, lines):\n ...", "def parse_trna_seqfeature(seqfeature):\n trna_ftr = trna.Trna()\n trna_ftr.seqfeature = seqfeature\n\n try:\n locus_tag = seqfeature.qualifiers[\"locus_tag\"][0]\n except (KeyError, IndexError):\n locus_tag = \"\"\n finally:\n trna_ftr.set_locus_tag(locus_tag, delimiter=None)\n\n trna_ftr.set_orientation(seqfeature.strand, \"fr_short\", True)\n trna_ftr.start, trna_ftr.stop, trna_ftr.parts = parse_coordinates(seqfeature)\n\n # Coordinate format for GenBank flat file features parsed by Biopython\n # are 0-based half open intervals.\n trna_ftr.coordinate_format = \"0_half_open\"\n\n trna_ftr.set_nucleotide_length(use_seq=True)\n\n try:\n product = seqfeature.qualifiers[\"product\"][0]\n except (KeyError, IndexError):\n product = \"\"\n finally:\n trna_ftr.product = product\n\n try:\n note = seqfeature.qualifiers[\"note\"][0]\n except (KeyError, IndexError):\n note = \"\"\n finally:\n trna_ftr.note = note\n\n try:\n gene = seqfeature.qualifiers[\"gene\"][0]\n except (KeyError, IndexError):\n gene = \"\"\n finally:\n trna_ftr.gene = gene\n\n trna_ftr.set_name()\n return trna_ftr", "def parse_cds_features(self, handle, alphabet=..., tags2id=...):\n ...", "def read_fasta(src, remove_gaps=False):\n file_obj = None\n if isinstance(src, str):\n try:\n file_obj = open(src, \"r\")\n except IOError:\n print((\"The file `%s` does not exist, exiting gracefully\" % src))\n elif isinstance(src, filetypes):\n file_obj = src\n else:\n raise TypeError('FASTA reader cannot recognize the source of %s, %s, %s' % (src,type(src),isinstance(src, filetypes)))\n name = None\n seq_list = list()\n for line_number, i in enumerate(file_obj):\n if i.startswith('>'):\n if name:\n if remove_gaps:\n yield name, ''.join(seq_list).replace('-', '')\n else:\n yield name, ''.join(seq_list)\n seq_list = list()\n name = i[1:].strip()\n else:\n #seq = ''.join(i.strip().upper().split())\n seq = ''.join(i.strip().split())\n #if not is_sequence_legal(seq):\n # raise Exception(\"Error: illegal characeters in sequence at line %d\" % line_number)\n seq_list.append(seq)\n if name:\n if remove_gaps:\n yield name, ''.join(seq_list).replace('-', '')\n else:\n yield name, ''.join(seq_list)\n if isinstance(src, str):\n file_obj.close()", "def parse_coordinates(seqfeature):\n start_position = None\n stop_position = None\n start = -1\n stop = -1\n parts = 0\n\n if (isinstance(seqfeature.location, FeatureLocation) or \\\n isinstance(seqfeature.location, CompoundLocation)):\n\n if seqfeature.strand is None:\n pass\n elif isinstance(seqfeature.location, FeatureLocation):\n parts = 1\n start_position = seqfeature.location.start\n stop_position = seqfeature.location.end\n elif isinstance(seqfeature.location, CompoundLocation):\n parts = len(seqfeature.location.parts)\n\n # Skip this compound seqfeature if it is comprised of more\n # than two features (tricky to parse).\n if parts == 2:\n\n # Retrieve compound seqfeature positions based on strand.\n if seqfeature.strand == 1:\n start_position = seqfeature.location.parts[0].start\n stop_position = seqfeature.location.parts[1].end\n elif seqfeature.strand == -1:\n start_position = seqfeature.location.parts[1].start\n stop_position = seqfeature.location.parts[0].end\n else:\n pass\n else:\n pass\n else:\n pass\n else:\n pass\n if isinstance(start_position, ExactPosition):\n start = int(start_position)\n if isinstance(stop_position, ExactPosition):\n stop = int(stop_position)\n return (start, stop, parts)", "def parse(self, handle, do_features=...): # -> SeqRecord | None:\n ...", "def sequence_to_biopython_record(\n sequence, id=\"<unknown id>\", name=\"<unknown name>\", features=()\n):\n if has_dna_alphabet:\n seq = Seq(sequence, alphabet=DNAAlphabet())\n else:\n seq = Seq(sequence)\n\n return SeqRecord(\n seq=seq,\n id=id,\n name=name,\n features=list(features),\n annotations={\"molecule_type\": \"DNA\"},\n )", "def mapper(line): \n feats = line.strip().split(\",\") \n # labels must be at the beginning for LRSGD\n label = feats[len(feats) - 1] \n feats = feats[: len(feats) - 1]\n feats.insert(0,label)\n features = [ float(feature) for feature in feats ] # need floats\n return np.array(features)", "def gff2FA(annotation, sequence, windows, output):\n df_gff = pd.read_csv(annotation, index_col=False, sep='\\t', header=None, comment=\"#\")\n df_gff.columns = ['seqname', 'source', 'feature', 'start', 'end', 'score', 'strand', 'frame', 'attribute']\n fasta_seq = SeqIO.parse(sequence, 'fasta')\n buffer_seqs = []\n cont = 0\n for record in fasta_seq:\n print(record.id)\n dff_extract = df_gff[df_gff.seqname == record.id]\n for key,val in dff_extract.iterrows():\n clean_seq = ''.join(str(record.seq).splitlines())\n if int(val.start) - windows < 0:\n start = 0\n else:\n start = int(val.start) - windows\n if int(val.end) + windows > len(clean_seq):\n end = len(clean_seq)\n else:\n end = int(val.end) + windows\n new_seq = clean_seq[start:end]\n att = val.attribute\n id = record.id + '_' + str(start) + '_' + str(end)\n desc = \"seq_id:\" + str(record.id)\n desc += \" feature_start:\" + str(val.start)\n desc += \" feature_end:\" + str(val.end)\n desc += \" genome_start:\" + str(start)\n desc += \" genome_end:\" + str(end)\n desc += \" feature:\" + str(val.feature)\n desc += \" attributes:\" + val.attribute\n seq = SeqRecord(Seq(new_seq), id=id, description=desc)\n buffer_seqs.append(seq)\n cont += 1\n if output:\n print('Saving...')\n SeqIO.write(buffer_seqs, output, \"fasta\")\n else:\n return buffer_seqs", "def fasta(path):\n label = None\n sequence = None\n with open(path, 'r') as data:\n for line in data:\n line = line.strip()\n if line.startswith('>'):\n if label and sequence:\n yield (label, sequence)\n label = line[1:]\n sequence = \"\"\n else:\n sequence += line\n\n if label and sequence:\n yield (label, sequence)", "def parse_gff(path):\n fasta = find_fasta(path)\n if not fasta:\n raise FileNotFoundError(f\"Could not find partner FASTA file for {path}\")\n\n # Parse FASTA and create GFFUtils database\n fasta = parse_infile(fasta, \"fasta\")\n gff = gffutils.create_db(\n str(path),\n \":memory:\",\n force=True,\n merge_strategy=\"create_unique\",\n sort_attribute_values=True\n )\n regions = find_regions(gff.directives)\n\n for record in fasta:\n # Normalise Feature location based on ##sequence-region directive.\n # Necessary for extracted GFF3 files that still store coordinates\n # relative to the entire region, not to the extracted FASTA.\n # If no sequence-region directive is found, assumes 1 (i.e. sequence start).\n cds, gene = parse_cds_features(\n gff.region(seqid=record.id, featuretype=[\"gene\", \"CDS\"]),\n regions[record.id][0] - 1 if record.id in regions else 0\n )\n if not cds:\n LOG.warning(\"Found no CDS features in %s [%s]\", record.id, path)\n record.features = sorted(\n [*gene, *merge_cds_features(cds)],\n key=lambda f: f.location.start\n )\n\n return fasta", "def parse_tmrna_seqfeature(seqfeature):\n tmrna_ftr = tmrna.Tmrna()\n tmrna_ftr.seqfeature = seqfeature\n\n try:\n locus_tag = seqfeature.qualifiers[\"locus_tag\"][0]\n except (KeyError, IndexError):\n locus_tag = \"\"\n finally:\n tmrna_ftr.set_locus_tag(locus_tag, delimiter=None)\n\n tmrna_ftr.set_orientation(seqfeature.strand, \"fr_short\", True)\n tmrna_ftr.start, tmrna_ftr.stop, tmrna_ftr.parts = parse_coordinates(seqfeature)\n\n # Coordinate format for GenBank flat file features parsed by Biopython\n # are 0-based half open intervals.\n tmrna_ftr.coordinate_format = \"0_half_open\"\n\n tmrna_ftr.set_nucleotide_length(use_seq=True)\n\n try:\n note = seqfeature.qualifiers[\"note\"][0]\n except (KeyError, IndexError):\n note = \"\"\n finally:\n tmrna_ftr.note = note\n\n try:\n gene = seqfeature.qualifiers[\"gene\"][0]\n except (KeyError, IndexError):\n gene = \"\"\n finally:\n tmrna_ftr.gene = gene\n\n tmrna_ftr.set_name()\n return tmrna_ftr", "def loadSeq(self):\n fileName = QtGui.QFileDialog.getOpenFileName( self, self.tr('Open Sequence'), '', \n self.tr('Sequence possibly with SSE predictions (*.seq)') )\n fileName = str(fileName)\n if fileName:\n self.structPred = StructurePrediction.load(fileName, self.app)\n return True\n else : \n return False", "def get_seqrecord_features(phage_genome):\n\n features = []\n for phage_cds in phage_genome.cds_features:\n features.append(phage_cds.seqfeature)\n\n return features", "def numpy_read_features(path):\n import numpy\n # read table as a structured array (each row is a tuple)\n feature_array = numpy.genfromtxt(path, delimiter='\\t', names=True, dtype=None)\n source = feature_array['source']\n target = feature_array['target']\n status = feature_array['status']\n feature_names = numpy.array(feature_array.dtype.names[3: ])\n features = feature_array[feature_names]\n # convert from structured array to normal ndarray\n features = features.view((numpy.float, len(features.dtype.names)))\n return source, target, status, features, feature_names", "def _read(self, source, label_source=None):\n filename = os.path.join(\"data\", \"sources\", source+\".csv\")\n if label_source is None:\n label_filename = os.path.join(\"data\", \"labels\", source + \".columnmap.txt\")\n else:\n label_filename = os.path.join(\"data\", \"labels\", label_source)\n df = pd.read_csv(filename, dtype=str) # read the data source as a DataFrame\n # labels = pd.read_csv(label_filename) # read the semantic labels of columns in df\n\n labels_frame = pd.read_csv(label_filename, na_values=[\"\"], dtype={'column_name': 'str'})\n # dictionary (column_name, class_label)\n labels = labels_frame[['column_name', 'semantic_type']].dropna().set_index('column_name')['semantic_type'].to_dict()\n # logging.info(\"labels:{}\".format(labels))\n\n source_cols = []\n for c in df.columns:\n # label = str(labels.get_value(labels[labels['column_name'] == c].index[0], 'semantic_type')) # extract semantic label of column c\n if c in labels:\n label = labels[c]\n else:\n label = 'unknown'\n col = Column(filename=filename, colname=c, title=label, lines=list(df[c]))\n source_cols.append(col)\n\n return source_cols", "def parse_sequence(sequence):\n return FastaEntry.from_text(sequence)", "def parse_prodigal(prodigal_record):\n\tfor rec in prodigal_record:\n\t\t# each one of these records is a feature\n\t\tm = re.match(\">?(\\S+)_(\\d+) # (\\d+) # (\\d+) # (-?\\d+) # ID=([^;]+);\", rec.description)\n\t\tif m:\n\t\t\tname, id_number, start, end, strand, prod_id = m.groups()\n\t\t\tstart = int(start)\n\t\t\tend = int(end)\n\t\t\tstrand = int(strand)\n\t\t\tlocation = SeqFeature.FeatureLocation(start, end, strand)\n\t\t\tsequence = str(rec.seq)\n\t\t\tqualifiers = {'translation': [sequence], 'prodigal_id': prod_id}\n\t\t\t# multiple features go on the same record. This returns the name to keep track of what goes where.\n\t\t\tfeature = SeqFeature.SeqFeature(location=location,\n\t\t\t type=\"CDS\",\n\t\t\t strand=strand,\n\t\t\t id=id_number,\n\t\t\t qualifiers=qualifiers)\n\t\t\tyield name, feature", "def features_from_label(audio_file, segment):\n duration = segment['end'] - segment['start']\n audio, sample_rate = librosa.core.load(\n audio_file,\n duration=duration,\n offset=segment['start']\n )\n features = fe.get_features(audio, sample_rate)\n return features", "def parse_features(self, skip=...):\n ...", "def parse_features(self, skip=...):\n ...", "def _parse_source_model_file(source_model_file):\n parse_args = dict(source=source_model_file)\n\n srcs = []\n for _, element in etree.iterparse(**parse_args):\n if element.tag == NRML04_POINT_SOURCE.text:\n srcs.append(_parse_point_source(element))\n if element.tag == NRML04_AREA_SOURCE.text:\n srcs.append(_parse_area_source(element))\n\n return srcs", "def load_pts_features(path):\n\n #\n # Your code here\n #\n\n pts = [np.empty((123, 2)), np.empty((123, 2))]\n feats = [np.empty((123, 128)), np.empty((123, 128))]\n\n return pts, feats", "def structure_parse(source):\r\n return structure_grammar().parseString(source)", "def read_ptbtagged(ptbtagged_path: str) -> Iterator[Tuple[TokenSeq, PosSeq]]:\n #do this immediately (first)\n #start generating feature matrices\n \n #read file into an array \n with open(ptbtagged_path) as f:\n file_array = f.readlines()\n file_array.append(\"\\n\")\n array_of_tuples = create_tuples(file_array)\n\n return generator(array_of_tuples)", "def parse(self,gff3_line):\r\n split_line = gff3_line.strip().split('\\t')\r\n self.seqid = split_line[0]\r\n self.source = split_line[1]\r\n self.type = split_line[2]\r\n self.start = int(split_line[3])\r\n self.end = int(split_line[4])\r\n self.score = split_line[5]\r\n self.strand = split_line[6]\r\n self.phase = split_line[7]\r\n self.attributes.parse(split_line[8])\r\n return self" ]
[ "0.6938026", "0.67124647", "0.64333224", "0.5949653", "0.580537", "0.57997847", "0.5682999", "0.56635153", "0.5602488", "0.55622095", "0.5510677", "0.5465436", "0.54645", "0.534558", "0.53260916", "0.52932435", "0.52316695", "0.52245367", "0.5222536", "0.5202381", "0.51751524", "0.51722723", "0.516516", "0.5144909", "0.5144909", "0.5106369", "0.5070028", "0.50668764", "0.5053531", "0.502268" ]
0.7324228
0
Create a dictionary of Biopython SeqFeature objects based on their type. From a list of all Biopython SeqFeatures derived from a GenBankformatted flat file, create a dictionary of SeqFeatures based on their 'type' attribute.
def create_seqfeature_dictionary(seqfeature_list): seqfeature_type_set = set() seqfeature_dict = {} for seqfeature in seqfeature_list: seqfeature_type_set.add(seqfeature.type) for type in seqfeature_type_set: sublist = [] for index in range(len(seqfeature_list)): seqfeature = seqfeature_list[index] if seqfeature.type == type: sublist.append(seqfeature) seqfeature_dict[type] = sublist return seqfeature_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cbf_file_to_basis_dict(path):\n import dxtbx.format.Registry\n reader = dxtbx.format.Registry.get_format_class_for_file(path)\n instance = reader(path)\n return map_detector_to_basis_dict(instance.get_detector())", "def _create_feature_dict(feature_table_file) -> dict:\n feature_dict = dict()\n with open(feature_table_file, \"r\") as feature_table:\n csv_in = csv.reader(feature_table, delimiter=\"\\t\")\n\n header = [x.lower() for x in next(csv_in)]\n accession_idx = header.index(\"accession\")\n type_idx = header.index(\"type\")\n type_specific_idx = header.index(\"type_specific\")\n description_idx = header.index(\"description\")\n identifier = 2\n for line in csv_in:\n if line[accession_idx] not in feature_dict:\n feature_dict[line[accession_idx]] = dict()\n\n if line[1] not in feature_dict[line[accession_idx]]:\n feature_dict[line[accession_idx]][line[type_idx]] = []\n\n # Insert feature entry\n feature_dict[line[0]][line[1]].append(\n (line[type_specific_idx].split(\",\"), line[description_idx], str(identifier))\n )\n identifier += 1\n\n return feature_dict", "def file_reader(fname, variant_type):\n type_dict = {}\n with open(fname, \"r\") as file:\n for line in file:\n variants = []\n # len(type_dict) is there to ensure there are only 10 results\n if not line.startswith(\"#\") and len(type_dict) < 10:\n info = line.split(\"\\t\")\n # info[7] contains the annotation string\n info_split = info[7].split(\"|\")\n # checks which variant it has and goes through the annotation where the missense is\n if variant_type == \"missense_variant\":\n indices = [i for i, x in enumerate(info_split) if x == \"missense_variant\"]\n if variant_type == \"frame_shift_variant\":\n indices = [i for i, x in enumerate(info_split) if x == \"frame_shift_variant\"]\n # pares the info from the variant string\n for index in indices:\n # the variant is always 8 indexes further from the wordt missense/frame_shift_variant\n variant = info_split[index + 8]\n variants.append(variant)\n gene_name = info_split[3]\n type_dict[gene_name] = variants\n return type_dict", "def getFeatureDicts(self):\n feature_dicts = super().getFeatureDicts()\n feature_dicts.extend([self.__suffixes, self.__prefixes, self.__tags, self.__numbers, self.__caps, self.__caps_no_start])\n return feature_dicts", "def abberationType(self, abbs):\n # Super slow and broken! May not be worth the extra work to fix...\n results = []\n abbs_proc = [] # For tracking processed abbs\n query = \"SELECT f.uniquename AS fbid, db.name AS db,\" \\\n \"dbx.accession AS acc \" \\\n \"FROM feature f \" \\\n \"JOIN cvterm gross_type ON gross_type.cvterm_id=f.type_id \" \\\n \"JOIN feature_cvterm fc ON fc.feature_id = f.feature_id \" \\\n \"JOIN cvterm fine_type ON fine_type.cvterm_id = fc.cvterm_id \" \\\n \"JOIN feature_cvtermprop fctp ON fctp.feature_cvterm_id = fc.feature_cvterm_id \" \\\n \"JOIN cvterm meta ON meta.cvterm_id = fctp.type_id \" \\\n \"JOIN cvterm gtyp ON gtyp.cvterm_id = f.type_id \" \\\n \"JOIN dbxref dbx ON fine_type.dbxref_id = dbx.dbxref_id \" \\\n \"JOIN db ON dbx.db_id = db.db_id \" \\\n \"WHERE gross_type.name = 'chromosome_structure_variation' -- double checks input gross type\" \\\n \"AND meta.name = 'wt_class'\" \\\n \"AND f.uniquename in (%s)\" % (\"'\" + \"'.'\".join(abbs))\n dc = self.query_fb(query)\n for d in dc:\n results.append((d['fbid'], d['db'] + '_' + d['acc']))\n abbs_proc.append(d['fbid'])\n [results.append((a, 'SO_0000110')) for a in abbs if\n a not in abbs_proc] # Defaulting to generic feature id not abb\n return results", "def feat_collect(infile, feat_mode):\n from analysis.seqfile_ops import load_genbank\n gb_record = load_genbank(infile)\n feat_list = gb_record.features\n collected = []\n # establish collection parameters\n types_list = feat_mode['types'] # default entry is ('CDS')\n tags_dict = feat_mode['tags'] # default is an empty dictionary\n # start collecting features\n for feature in feat_list:\n if feature.type in types_list:\n if len(tags_dict.keys()) is 0:\n collected.append(feature)\n else:\n for tag_key in tags_dict.keys():\n if tag_key in feature.qualifiers:\n feat_value = feature.qualifiers.get(tag_key)\n if feat_value[0] in tags_dict[tag_key]:\n collected.append(feature)\n else: pass\n else: pass\n else: pass\n ## consider adding some info to the log\n return collected", "def readTypes(self):\r\n types = {}\r\n for m in re.finditer(\"TYPE (.*) = (.*);\", self.data):\r\n typename, typetype = m.groups() \r\n if typetype in self.SIMPLETYPES:\r\n types[typename] = typetype\r\n else:\r\n types[typename] = \"#\" + typetype\r\n \r\n return types", "def codebook_json_data_factory() -> List[Dict[str, Any]]:\n codebook_data = [\n {\n Features.CODEWORD: [\n {Indices.ROUND.value: 0, Indices.CH.value: 0, Features.CODE_VALUE: 1},\n {Indices.ROUND.value: 1, Indices.CH.value: 1, Features.CODE_VALUE: 1}\n ],\n Features.TARGET: \"GENE_A\"\n },\n {\n Features.CODEWORD: [\n {Indices.ROUND.value: 0, Indices.CH.value: 2, Features.CODE_VALUE: 1},\n {Indices.ROUND.value: 1, Indices.CH.value: 1, Features.CODE_VALUE: 1}\n ],\n Features.TARGET: \"GENE_B\"\n },\n ]\n return codebook_data", "def _extract_features(self):\n # print(os.getpid())\n return {n:self._extract_feature(f) for (n,f) in self.features.items()}", "def to_dict(self) -> Dict[str, Any]:\n fields = [\n k\n for row in self.field_values\n for k, _ in row.statuses.items()\n if k != DUMMY_ENTITY_ID\n ]\n features_dict: Dict[str, List[Any]] = {k: list() for k in fields}\n\n for row in self.field_values:\n for feature in features_dict.keys():\n native_type_value = feast_value_type_to_python_type(row.fields[feature])\n features_dict[feature].append(native_type_value)\n\n return features_dict", "def load_gene_dict(reference_genbank_name=\"data/covid-19-genbank.gb\"):\n recs = [rec for rec in SeqIO.parse(reference_genbank_name, \"genbank\")]\n gene_dict = {}\n for rec in recs:\n feats = [feat for feat in rec.features if feat.type == \"CDS\"]\n for feat in feats:\n content = '{}: {}'.format(feat.qualifiers['protein_id'][0], feat.qualifiers['product'][0])\n if feat.qualifiers['product'][0] == 'ORF1a polyprotein':\n continue\n if feat.location_operator == 'join':\n for item in feat.location.parts:\n key = (item.start.position, item.end.position)\n if 'translation' in feat.qualifiers:\n seq = feat.qualifiers['translation']\n if len(seq) == 1:\n amino_acid_seq = seq[0]\n gene_dict[key] = (content, amino_acid_seq)\n else:\n key = (feat.location.start.position, feat.location.end.position)\n if 'translation' in feat.qualifiers:\n seq = feat.qualifiers['translation']\n if len(seq) == 1:\n amino_acid_seq = seq[0]\n gene_dict[key] = (content, amino_acid_seq)\n return gene_dict", "def _parse_features(cls, node: OMNode) -> Dict[str, Dict[str, bool]]:\n features = {}\n for sectname in node:\n section = node[sectname]\n if not isinstance(section, dict) or '_types' not in section:\n continue\n features[sectname] = {}\n for opt in section:\n if not opt.startswith('has'):\n continue\n value = section[opt]\n if not isinstance(value, bool):\n continue\n option = opt[3:]\n features[sectname][option] = value\n return features", "def to_features(self):\n to_return = dict()\n\n to_return['bias'] = 1.0\n to_return['user:' + self.user] = 1.0\n to_return['format:' + self.format] = 1.0\n to_return['token:' + self.token.lower()] = 1.0\n\n to_return['part_of_speech:' + self.part_of_speech] = 1.0\n for morphological_feature in self.morphological_features:\n to_return['morphological_feature:' + morphological_feature] = 1.0\n to_return['dependency_label:' + self.dependency_label] = 1.0\n\n return to_return", "def _parse_features(chrom: str, db: FeatureDB, feature_types: List[str]) -> List[Dict]:\n feature_collections = []\n for top_level_feature in _find_all_top_level_non_gene_features(chrom, db, feature_types):\n children = list(db.children(top_level_feature, level=1))\n\n # extract parent locus tag to compare to children\n locus_tag = None\n if BioCantorQualifiers.LOCUS_TAG.value in top_level_feature.attributes:\n locus_tag = top_level_feature.attributes[BioCantorQualifiers.LOCUS_TAG.value][0]\n\n if not children:\n # treat this isolated feature as both FeatureIntervalCollection and FeatureInterval\n feature = _parse_child_features_to_feature_interval([top_level_feature])\n # infer a FeatureCollection from the information on the FeatureInterval\n feature_collection = dict(\n feature_intervals=[feature],\n feature_collection_name=feature[\"feature_name\"],\n feature_collection_id=feature[\"feature_id\"],\n feature_collection_type=top_level_feature.featuretype,\n locus_tag=locus_tag,\n sequence_name=chrom,\n qualifiers=feature[\"qualifiers\"],\n )\n # remove qualifiers from feature\n del feature[\"qualifiers\"]\n else:\n # combine all children into a FeatureInterval\n feature = _parse_child_features_to_feature_interval(children, locus_tag=locus_tag)\n feature_collection_name, feature_collection_id = extract_feature_name_id(top_level_feature.attributes)\n\n feature_collection = dict(\n feature_intervals=[feature],\n feature_collection_name=feature_collection_name,\n feature_collection_id=feature_collection_id,\n feature_collection_type=top_level_feature.featuretype,\n locus_tag=locus_tag,\n sequence_name=chrom,\n qualifiers=filter_and_sort_qualifiers(top_level_feature.attributes),\n )\n\n feature_collections.append(feature_collection)\n return feature_collections", "def _get_type_seq(self):\n for partition, filepath in self.directory.items():\n if filepath is not None:\n conll_file = os.path.basename(filepath) # get name of conll file\n\n # collect sequence data\n sents = list(self.conll_parser.sents(conll_file))\n tagged_sents = list(self.conll_parser.tagged_sents(conll_file))\n\n word_seq = Preprocessor.replace_rare_tokens(sents) if self.replace_rare_tokens else sents\n char_seq = [[[c for c in w] for w in s] for s in sents]\n tag_seq = [[t[-1] for t in s] for s in tagged_sents]\n\n # update the class attributes\n self.type_seq[partition] = {'word': word_seq, 'char': char_seq, 'tag': tag_seq}", "def _cim_feature_type():\n return {\n 'name' : 'cim_feature_type',\n 'is_open' : False,\n 'doc' : None,\n 'members' : [\n ('file', None),\n ('diagnostic', None),\n ],\n }", "def load_ab_features(mbids, feature_type, project_home='.'):\n if feature_type in ['hl', 'highlevel']:\n for mbid in mbids:\n yield _get_ab_features(\n mbid=mbid,\n project_home=project_home,\n path_type='highlevel',\n )\n elif feature_type in ['ll', 'lowlevel']:\n for mbid in mbids:\n yield _get_ab_features(\n mbid=mbid,\n project_home=project_home,\n path_type='lowlevel',\n )\n else:\n raise ValueError('feature_type unknown.')", "def gbk_parse(fname):\n fhand = _open_file(gbkfname)\n unk = 1 \n\n for record in SeqIO.parse(fhand, \"genbank\"):\n\n gene_tags = dict()\n tx_tags = collections.defaultdict(list) \n exon = collections.defaultdict(list) \n cds = collections.defaultdict(list) \n mol_type, chr_id = None, None \n\n for rec in record.features:\n\n if rec.type == 'source':\n mol_type = rec.qualifiers['mol_type'][0]\n try:\n chr_id = rec.qualifiers['chromosome'][0]\n except:\n chr_id = record.name \n continue \n\n strand='-'\n strand='+' if rec.strand>0 else strand\n \n fid = None \n try:\n fid = rec.qualifiers['gene'][0]\n except:\n pass\n\n transcript_id = None\n try:\n transcript_id = rec.qualifiers['transcript_id'][0]\n except:\n pass \n\n if re.search(r'gene', rec.type):\n gene_tags[fid] = (rec.location._start.position+1, \n rec.location._end.position, \n strand,\n rec.type,\n rec.qualifiers['note'][0])\n elif rec.type == 'exon':\n exon[fid].append((rec.location._start.position+1, \n rec.location._end.position))\n elif rec.type=='CDS':\n cds[fid].append((rec.location._start.position+1, \n rec.location._end.position))\n else: \n # get all transcripts \n if transcript_id: \n tx_tags[fid].append((rec.location._start.position+1,\n rec.location._end.position, \n transcript_id,\n rec.type))\n # record extracted, generate feature table\n unk = feature_table(chr_id, mol_type, strand, gene_tags, tx_tags, cds, exon, unk)\n \n #break\n fhand.close()", "def load_fragGC_pickle(inFH):\n fojb = pickle.load(inFH)\n\n d = dict()\n for x in fojb:\n taxon_name = x[0]\n d[taxon_name] = dict()\n d[taxon_name]['fragLength'] = []\n d[taxon_name]['fragGC'] = []\n \n for scaf,v in x[1].items(): \n for z in v:\n # fragStart, fragLength, fragGC\n d[taxon_name]['fragLength'].append(z[1])\n d[taxon_name]['fragGC'].append(z[2]) \n return d", "def get_features(self, feature_type=\"all\"):\n # if exists(path=\"data.csv\"):\n # return pd.read_csv(\"data.csv\")\n # else:\n # reading through directory\n for file_path in self.list_news_path:\n with open(file_path, 'r') as f:\n\n # open document to read and assign to doc\n doc = json.load(f)\n # skip the empty title or body\n if doc['title'] == \"\" or doc['text'] == \"\":\n pass\n else:\n # to extract all data from news content\n if feature_type == \"all\":\n news = doc['title'] + doc['text']\n\n # preprocesses news content\n words = preprocess(news)\n yield words\n\n # to extract title and text as a pair\n elif feature_type == \"pair\":\n title = preprocess(doc[\"title\"])\n body = preprocess(doc['text'])\n yield title, body\n # if not title or not body:\n # pass\n # else:\n # yield title, body\n\n # else you only need either title or body\n else:\n assert feature_type in doc.keys(), \"feature not in the document: \" + file_path\n # without stemming\n # CUSTOM_FILTERS = [lambda x: x.lower(), strip_tags, strip_punctuation, strip_multiple_whitespaces,\n # strip_numeric, remove_stopwords]\n\n feature = doc[feature_type]\n words = preprocess(feature)\n # using alternative preprocessing function\n # words = preprocess_string(words, filters=CUSTOM_FILTERS)\n yield words", "def getFeatureDicts(self):\n return [self.data.getWordTagDict(), self.data.tags_trigrams, self.data.tags_bigrams]", "def sgd_features(filepath=None):\n\n if filepath == None:\n filepath=load_sgd_tab()\n\n arabic_to_roman_dict=chromosomename_roman_to_arabic()[0]\n \n with open(filepath) as f:\n lines = f.readlines()\n\n\n feature_list = []\n feature_orf_dict = {}\n feature_ars_dict = {}\n feature_telomere_dict = {}\n feature_ltr_dict = {}\n feature_centromere_dict = {}\n feature_Xelement_dict = {}\n feature_intron_dict = {}\n feature_ncrna_dict = {}\n feature_ncexon_dict = {}\n feature_trna_dict = {}\n feature_snorna_dict = {}\n feature_teg_dict = {}\n feature_5p_utrintron_dict = {}\n feature_mas_dict = {}\n feature_snrna_dict = {}\n feature_rrna_dict = {}\n feature_ets_dict = {}\n feature_its_dict = {}\n feature_oor_dict = {}\n feature_telrna_dict = {}\n \n for line in lines:\n l = line.strip('\\n').split('\\t')\n if not l[1] in feature_list:\n feature_list.append(l[1])\n\n if not l[8].endswith('micron') and not l[8] == '':\n chromosome = arabic_to_roman_dict.get(int(l[8]))\n if l[1] == 'ORF':\n feature_orf_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'ARS':\n feature_ars_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'telomere':\n feature_telomere_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'long_terminal_repeat':\n feature_ltr_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'centromere':\n feature_centromere_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'X_element':\n feature_Xelement_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'intron':\n feature_intron_dict[l[6]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'ncRNA_gene':\n feature_ncrna_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'noncoding_exon':\n feature_ncexon_dict[l[6]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'tRNA_gene':\n feature_trna_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'snoRNA_gene':\n feature_snorna_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'transposable_element_gene':\n feature_teg_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'five_prime_UTR_intron':\n feature_5p_utrintron_dict[l[6]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'matrix_attachment_site':\n feature_mas_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'snRNA_gene':\n feature_snrna_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'rRNA_gene':\n feature_rrna_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'external_transcribed_spacer_region':\n feature_ets_dict[l[6]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'internal_transcribed_spacer_region':\n feature_its_dict[l[6]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'origin_of_replication':\n feature_oor_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'telomerase_RNA_gene':\n feature_telrna_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n\n\n \n\n\n genomicregions_list = ['ORF', 'ARS', 'Telomere', 'long_terminal_repeat',\n 'Centromere', 'X_element', 'Intron', 'ncRNA_gene',\n 'Noncoding_exon', 'tRNA_gene', 'snoRNA_gene',\n 'transposable_element_gene', 'five_prime_UTR_intron',\n 'matrix_attachment_site', 'snRNA_gene', 'rRNA_gene',\n 'external_transcribed_spacer_region',\n 'internal_transcribed_spacer_region',\n 'origin_of_replication', 'telomerase_RNA_gene']\n\n\n return(genomicregions_list, feature_orf_dict, feature_ars_dict, feature_telomere_dict,\n feature_ltr_dict, feature_centromere_dict, feature_Xelement_dict, feature_intron_dict,\n feature_ncrna_dict, feature_ncexon_dict, feature_trna_dict,\n feature_snorna_dict, feature_teg_dict, feature_5p_utrintron_dict,\n feature_mas_dict, feature_snrna_dict, feature_rrna_dict,\n feature_ets_dict, feature_its_dict, feature_oor_dict,\n feature_telrna_dict)", "def read_features_dict(path):\n # type_dict specifies the type conversion to be applied. Each key denotes\n # a column name and the value is the conversion. Columns not included are\n # converted to floats.\n type_dict = {'source': str, 'target': str, 'status': int}\n with open(path) as feature_file:\n reader = csv.DictReader(feature_file, delimiter='\\t')\n for row in reader:\n yield {key: type_dict.get(key, float)(value) for key, value in row.items()}", "def feat_dict(pos_feat,text):\n dict = {}\n bigrams = ngrams(word_tokenize(text),2)\n trigrams = ngrams(word_tokenize(text),3)\n \n for feat in pos_feat:\n dict[feat]=features(feat,text,bigrams,[],[])\n return dict", "def to_record(\n self,\n filepath=None,\n features_type=\"misc_feature\",\n with_original_features=True,\n with_original_spec_features=False,\n with_constraints=True,\n with_objectives=True,\n with_sequence_edits=False,\n colors_dict=None,\n use_short_labels=True,\n record_id = None\n ):\n record = sequence_to_biopython_record(self.sequence)\n if record_id is not None:\n record.id = record_id\n\n record.features = []\n if with_constraints:\n record.features += [\n cst.to_biopython_feature(\n role=\"constraint\",\n feature_type=features_type,\n colors_dict=colors_dict,\n use_short_label=use_short_labels,\n )\n for cst in self.constraints\n if cst.__dict__.get(\"location\", False)\n ]\n if with_objectives:\n record.features += [\n obj.to_biopython_feature(\n role=\"objective\",\n feature_type=features_type,\n colors_dict=colors_dict,\n use_short_label=use_short_labels,\n )\n for obj in self.objectives\n ]\n if with_original_features and (self.record is not None):\n record.features += [\n f\n for f in self.record.features\n if with_original_spec_features\n or not find_specification_label_in_feature(f)\n ]\n if with_sequence_edits:\n record.features += self.sequence_edits_as_features()\n\n if filepath is not None:\n write_record(record=record, target=filepath, file_format=\"genbank\")\n else:\n return record", "def create_from_feature_list(self, features): \n for f in features:\n featuretype = f.pop('featuretype', None)\n if featuretype is None:\n raise LoopException\n if featuretype == 'strati':\n self.create_and_add_foliation(f)\n # if featuretype == 'fault':\n # self.create_and_add_fault(f)\n if featuretype == 'folded_strati':\n self.create_and_add_folded_foliation(f)", "def feature_extract(self, CT_pairs):\n instances = []\n for pair in CT_pairs:\n config = pair[0]\n label = pair[1]\n data = []\n featureset = {}\n \n # for nltk NaiveBayes feature selection stuff when doing MaxEnt decoding parser commit this\n# featureset[\"topOfBuffer\"] = self.token_dict[config.beta.top()]\n# featureset[\"topOfStack\"] = self.token_dict[config.sigma.top()]\n# featureset[\"bufferStackPair\"] = (self.token_dict[config.sigma.top()], self.token_dict[config.beta.top()])\n# featureset[\"topOfBuffer\"] = self.POS_dict[config.beta.top()]\n# featureset[\"topOfStack\"] = self.POS_dict[config.sigma.top()]\n# featureset[\"bufferStackPair\"] = tuple((self.POS_dict[config.sigma.top()], self.POS_dict[config.beta.top()]))\n \n # add the (StackTopPOS,BufferTopPOS,bufferchildren_POS) feature\n #value_set = tuple([self.POS_dict[config.sigma.top()], self.POS_dict[config.beta.top()]] + [self.POS_dict[child] for child in self.getBufferChildren(config.beta.top())])\n #featureset[\"bufferStackbufferChildrenPair\"] = value_set\n \n # for MaxEnt decoding stuff\n # token variants\n data.append((\"topOfBuffer\",self.token_dict[config.beta.top()]))\n data.append((\"topOfStack\",self.token_dict[config.sigma.top()]))\n data.append((\"bufferStackPair\",self.token_dict[config.sigma.top()],self.token_dict[config.beta.top()]))\n #POS variants\n data.append((\"topOfBuffer\",self.POS_dict[config.beta.top()]))\n data.append((\"topOfStack\",self.POS_dict[config.sigma.top()]))\n data.append((\"bufferStackPair\",self.POS_dict[config.sigma.top()],self.POS_dict[config.beta.top()]))\n ins = Instance(label=label, data=data)\n #ins = Instance(label=label, data=featureset)\n instances.append(ins)\n \n return instances", "def initialize_from_files(self, feature_type, id_vector_file, bg_flags_file,\n feature_mat_file, kernel_mat_file, rw_lock=None):\n with self._map_lock.write_lock():\n # even though this happens in the initialize() call we make here,\n # we would like to short circuit before loading data if we can.\n if feature_type in self._feature2memory:\n raise KeyError(\"Key '%s' already present in our mapping. \"\n \"Please remove first before initializing.\")\n\n self._feature2memory[feature_type] = \\\n FeatureMemory.construct_from_files(id_vector_file,\n bg_flags_file,\n feature_mat_file,\n kernel_mat_file,\n rw_lock)", "def create_dicts(self):\n \n # remove this string from filename to make output file names more manageable\n pre_output1 = self.file1.replace(\"_Guys121919_CGH_1100_Jul11\", '')\n pre_output2 = self.file2.replace(\"_Guys121919_CGH_1100_Jul11\", '')\n \n # Build the output file name.\n # if prefix is present add it\n if self.out_file_prefix is not None:\n # concatenate prefix, filenames and dyes into output filename file1_file1_dye_file2_file2_dye.txt\n self.outputfilename = self.out_file_prefix+pre_output1.replace(\".txt\", '') + \"_\" + self.file1_dye + \"_\" + pre_output2.replace(\".txt\", '') + \"_\" + self.file2_dye + \".txt\"\n # if no prefix don't add it!\n else:\n # concatenate filenames and dyes into output filename file1_file1_dye_file2_file2_dye.txt\n self.outputfilename = pre_output1.replace(\".txt\", '') + \"_\" + self.file1_dye + \"_\" + pre_output2.replace(\".txt\", '') + \"_\" + self.file2_dye + \".txt\"\n\n # add temp to end of file name to create a temporary output filename\n self.tempoutputfilename = self.outputfilename.replace(\".txt\", '') + \"temp.txt\"\n\n # open temp output file\n self.tempoutputfile = open(self.outputfolder + self.tempoutputfilename, 'w')\n\n \n # open FE files\n file1_open = open(self.chosenfolder + self.file1, 'r')\n file2_open = open(self.chosenfolder + self.file2, 'r')\n\n # open file1 and create a dict of the features.\n for linenumber, line in enumerate(file1_open):\n if linenumber >= 10:\n splitline = line.split('\\t')\n self.file1_dict[int(splitline[1])] = line\n # get n of rows in file1 (take the linenumber of the last line)\n self.file1_len = linenumber\n\n # repeat for features in second file but first writing the feparam and stats to temp file - when pairing with control this ensures the \"header\" comes from the test (file2) not control (file1), NB NEITHER ARE ACCURATE!!!!\n for linenumber, line in enumerate(file2_open):\n if linenumber < 10:\n self.tempoutputfile.write(line)\n # then add all features to a dictionary, with the unique feature number as a key\n if linenumber >= 10:\n splitline = line.split('\\t')\n self.file2_dict[int(splitline[1])] = line\n # get n of rows in file2\n self.file2_len = linenumber\n\n # close files\n file1_open.close()\n file2_open.close()", "def separate_types(obj_list):\n\n obj_dict = {\n 'R':[],\n 'L':[],\n 'C':[],\n 'V':[],\n 'I':[],\n 'E':[],\n 'G':[],\n 'H':[],\n 'F':[]\n }\n\n for obj in obj_list:\n obj_dict[obj.el_type].append(obj)\n\n return obj_dict" ]
[ "0.59509116", "0.5866959", "0.5769676", "0.568471", "0.56685895", "0.5634546", "0.5574579", "0.5568446", "0.55519086", "0.554404", "0.5521767", "0.55096895", "0.55072814", "0.5497107", "0.54693377", "0.54522717", "0.5439587", "0.54240304", "0.540727", "0.537465", "0.5333679", "0.5322289", "0.53209263", "0.5316841", "0.5279893", "0.5277998", "0.52716595", "0.52619773", "0.52519166", "0.5244419" ]
0.7171406
0
Parse data from a Biopython SeqRecord object into a Genome object. All Source, CDS, tRNA, and tmRNA features are parsed into their associated Source, Cds, Trna, and Tmrna objects.
def parse_genome_data(seqrecord, filepath=pathlib.Path(), translation_table=11, genome_id_field="_organism_name", gnm_type="", host_genus_field="_organism_host_genus"): # Keep track of the file from which the record is derived. gnm = genome.Genome() gnm.set_filename(filepath) gnm.type = gnm_type try: gnm.organism = seqrecord.annotations["organism"] except: gnm.organism = "" finally: # Identifies host and phage name from organism field. gnm.parse_organism() try: # Since accessions are stored in a list, there may be more than # one accessions associated with this file. # The first accession in the list is assumed to be the most recent. accession = seqrecord.annotations["accessions"][0] except: accession = "" finally: gnm.set_accession(accession) try: gnm.description = seqrecord.description # It appears that if description is not present, Biopython # auto-populates this attribute as "<unknown description>" if gnm.description == "<unknown description>": gnm.description = "" except: gnm.description = "" finally: # Identifies host and phage name from description field. gnm.parse_description() try: gnm.source = seqrecord.annotations["source"] except: gnm.source = "" finally: # Identifies host and phage name from record source field. gnm.parse_source() try: # The retrieved authors can be stored in multiple Reference elements. refs = seqrecord.annotations["references"] authors_list = [] for ref in refs: # Note: Reference objects are instantiated with an empty # authors attribute. So if no authors are present in a Reference, # it will still concatenate an empty string, resulting in an # author_string = ";;;" etc. So only add the authors info if # it is not an empty string. if ref.authors != "": authors_list.append(ref.authors) authors_string = ";".join(authors_list) gnm.authors = authors_string except: gnm.authors = "" # Biopython requires the parsed record contains a sequence, so # no need to test whether the seq attribute is present or not. # Nucleotide sequence, length, and % GC. gnm.set_sequence(seqrecord.seq) try: date = seqrecord.annotations["date"] gnm.date = datetime.strptime(date, "%d-%b-%Y") except: gnm.date = constants.EMPTY_DATE # # Now that record fields are parsed, set the genome name, id, # # and host_genus. if genome_id_field != "": gnm.name = getattr(gnm, genome_id_field) gnm.set_id(value=gnm.name) else: # The seqrecord name and id are used if genome_id_field is empty. try: gnm.name = seqrecord.name # It appears that if name is not present, Biopython auto-populates # this attribute as "<unknown name>" if gnm.name == "<unknown name>": gnm.name = "" except: gnm.name = "" try: gnm.id = seqrecord.id # It appears that if id is not present, Biopython auto-populates # this attribute as "<unknown id>" if gnm.id == "<unknown id>": gnm.id = "" except: gnm.id = "" gnm.set_host_genus(attribute=host_genus_field) # Create lists of parsed features. # Note: Biopython instantiates the features attribute with # an empty list, so no need to test if features attribute is # present or not. seqfeature_dict = create_seqfeature_dictionary(seqrecord.features) cds_list = [] if "CDS" in seqfeature_dict.keys(): for seqfeature in seqfeature_dict["CDS"]: cds_ftr = parse_cds_seqfeature(seqfeature) cds_ftr.genome_id = gnm.id cds_ftr.genome_length = gnm.length cds_ftr.set_nucleotide_sequence(parent_genome_seq=gnm.seq) cds_list.append(cds_ftr) source_list = [] if "source" in seqfeature_dict.keys(): for seqfeature in seqfeature_dict["source"]: src_ftr = parse_source_seqfeature(seqfeature) src_ftr.genome_id = gnm.id source_list.append(src_ftr) trna_list = [] if "tRNA" in seqfeature_dict.keys(): for seqfeature in seqfeature_dict["tRNA"]: trna_ftr = parse_trna_seqfeature(seqfeature) trna_ftr.genome_id = gnm.id trna_ftr.genome_length = gnm.length trna_ftr.set_nucleotide_sequence(parent_genome_seq=gnm.seq) trna_ftr.set_nucleotide_length(use_seq=True) trna_ftr.parse_amino_acid() trna_ftr.parse_anticodon() trna_list.append(trna_ftr) tmrna_list = [] if "tmRNA" in seqfeature_dict.keys(): for seqfeature in seqfeature_dict["tmRNA"]: tmrna_ftr = parse_tmrna_seqfeature(seqfeature) tmrna_ftr.genome_id = gnm.id tmrna_ftr.genome_length = gnm.length tmrna_ftr.set_nucleotide_sequence(parent_genome_seq=gnm.seq) tmrna_ftr.set_nucleotide_length(use_seq=True) tmrna_ftr.parse_peptide_tag() tmrna_ftr.run_aragorn() tmrna_list.append(tmrna_ftr) gnm.translation_table = translation_table gnm.set_cds_features(cds_list) gnm.set_source_features(source_list) gnm.set_trna_features(trna_list) gnm.set_tmrna_features(tmrna_list) # The feature.id is constructed from the Genome.id and the feature order. gnm.set_feature_ids(use_type=True, use_cds=True) gnm.set_feature_ids(use_type=True, use_source=True) gnm.set_feature_ids(use_type=True, use_trna=True) gnm.set_feature_ids(use_type=True, use_tmrna=True) return gnm
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parser(raw_seq, date):\n taxon_id = int(raw_seq.features[0].qualifiers['db_xref'][0][6:])\n organism = raw_seq.annotations['organism']\n accession = raw_seq.annotations['accessions'][0]\n gene = []\n records = []\n frag_type = 'whole'\n begin = 1\n end = len(raw_seq)\n sequence = str(raw_seq.seq)\n name = organism\n strand = 1\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n records.append(rec)\n for i in raw_seq.features:\n if i.type == 'gene' and 'gene' in i.qualifiers:\n if i.location_operator != 'join':\n frag_type = 'gene'\n begin = int(i.location.start)\n end = int(i.location.end)\n sequence = str(raw_seq.seq[begin:end])\n name = str(i.qualifiers['gene'][0])\n strand = str(i.location.strand)\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n elif i.location_operator == 'join':\n frag_type = 'gene'\n begin = int(i.sub_features[0].location.start)\n end = int(i.sub_features[0].location.end)\n name = str(i.qualifiers['gene'][0])\n strand = str(i.location.strand)\n sequence = ''\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n gene.append(rec)\n begin = int(i.sub_features[1].location.start)\n end = int(i.sub_features[1].location.end)\n sequence = ''.join([str(raw_seq.seq[begin:end]), str(raw_seq.seq[begin:end])])\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n elif i.type == 'CDS' and 'gene' in i.qualifiers:\n frag_type = 'cds'\n begin = int(i.location.start)\n end = int(i.location.end)\n sequence = str(raw_seq.seq[begin:end])\n name = str(i.qualifiers['gene'][0]).replace(' ', '_')\n strand = str(i.location.strand)\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n elif i.type == 'tRNA' and 'gene' in i.qualifiers:\n frag_type = 'tRNA'\n begin = int(i.location.start)\n end = int(i.location.end)\n sequence = str(raw_seq.seq[begin:end])\n if len(sequence) >= 100:\n sequence = ''\n name = str(i.qualifiers['gene'][0]).replace(' ', '_')\n strand = str(i.location.strand)\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n elif i.type == 'rRNA':\n frag_type = 'rRNA'\n begin = int(i.location.start)\n end = int(i.location.end)\n sequence = str(raw_seq.seq[begin:end])\n name = str(i.qualifiers['product'][0]).replace(' ', '_')\n strand = str(i.location.strand)\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n elif i.type == 'exon' and 'gene' in i.qualifiers:\n frag_type = 'exon'\n begin = int(i.location.start)\n end = int(i.location.end)\n sequence = str(raw_seq.seq[begin:end])\n if 'number' in i.qualifiers:\n name = '{0}_exon_{1}'.format(i.qualifiers['gene'][0],\n i.qualifiers['number'][0])\n else:\n name = '{0}_exon'.format(i.qualifiers['gene'][0])\n strand = int(i.location.strand)\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n elif i.type == 'intron' and 'gene' in i.qualifiers:\n frag_type = 'intron'\n begin = int(i.location.start)\n end = int(i.location.end)\n sequence = str(raw_seq.seq[begin:end])\n strand = str(i.location.strand)\n if 'number' in i.qualifiers:\n name = '{0}_{1}_intron'.format(i.qualifiers['gene'][0],\n i.qualifiers['number'][0])\n else:\n name = '{0}_intron'.format(i.qualifiers['gene'][0])\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n\n records.append(rec)\n gene.sort(key=lambda x: x[5])\n\n for i in range(len(gene) - 1):\n frag_type = 'spacer'\n now = gene[i]\n then = gene[i + 1]\n tail = now[6] + 1\n head = then[5] - 1\n sequence = str(raw_seq.seq[tail:head])\n name = '{0}-{1}_spacer'.format(now[3], then[3])\n strand = 0\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n records.append(rec)\n records.extend(gene)\n\n database.extend(records)", "def parse_records(self, handle, do_features=...): # -> Generator[SeqRecord, None, None]:\n ...", "def parse_records(self):\n for record in sp.parse(gzip.open(\n \"./human_uniprot_04_07_20.gz\", 'rt')):\n # print(record.taxonomy_id)\n # if record.organism != \"Homo sapiens\":\n # continue\n # print(record.features[0])\n # for comment in record.comments:\n # if comment.startswith(\"SUBCELLULAR LOCATION\"):\n # print(comment)\n self.extract_features_to_dict(record)\n self.extract_localization(record)", "def parse(self, handle, do_features=...): # -> SeqRecord | None:\n ...", "def parse(self,gff3_line):\r\n split_line = gff3_line.strip().split('\\t')\r\n self.seqid = split_line[0]\r\n self.source = split_line[1]\r\n self.type = split_line[2]\r\n self.start = int(split_line[3])\r\n self.end = int(split_line[4])\r\n self.score = split_line[5]\r\n self.strand = split_line[6]\r\n self.phase = split_line[7]\r\n self.attributes.parse(split_line[8])\r\n return self", "def parse(record):\n\n #Extract individual parts of the FASTA record\n\n identifier = record.id #The sequence's Id\n sequence = record.seq #The sequence itself\n sequence = sequence.upper() #Turns all the nucleotides to upper case\n\n return identifier, sequence", "def __init__(self, featureType, chrom, start, end, strand, meta):\n\n self.meta = meta\n self.featureType = featureType\n self.chrom = chrom.replace('chr', '')\n self.start = int(start)\n self.end = int(end)\n self.strand = strand\n self.geneId = meta['gene_id']\n self.exonId = None\n self.exonNum = None\n self.transcriptId = None\n self.geneName = meta['gene_name']\n self.transcriptName = None\n self.transcriptBioType = None\n self.source = meta['gene_source']\n self.geneBioType = meta['gene_biotype']\n self.set_values()", "def parse_prodigal(prodigal_record):\n\tfor rec in prodigal_record:\n\t\t# each one of these records is a feature\n\t\tm = re.match(\">?(\\S+)_(\\d+) # (\\d+) # (\\d+) # (-?\\d+) # ID=([^;]+);\", rec.description)\n\t\tif m:\n\t\t\tname, id_number, start, end, strand, prod_id = m.groups()\n\t\t\tstart = int(start)\n\t\t\tend = int(end)\n\t\t\tstrand = int(strand)\n\t\t\tlocation = SeqFeature.FeatureLocation(start, end, strand)\n\t\t\tsequence = str(rec.seq)\n\t\t\tqualifiers = {'translation': [sequence], 'prodigal_id': prod_id}\n\t\t\t# multiple features go on the same record. This returns the name to keep track of what goes where.\n\t\t\tfeature = SeqFeature.SeqFeature(location=location,\n\t\t\t type=\"CDS\",\n\t\t\t strand=strand,\n\t\t\t id=id_number,\n\t\t\t qualifiers=qualifiers)\n\t\t\tyield name, feature", "def GFFParse(gff_file):\n genes, utr5, exons=dict(), dict(), dict()\n transcripts, utr3, cds=dict(), dict(), dict()\n # TODO Include growing key words of different non-coding/coding transcripts \n features=['mrna', 'transcript', 'ncrna', 'mirna', 'pseudogenic_transcript', 'rrna', 'snorna', 'snrna', 'trna', 'scrna', 'mrna_te_gene']\n gff_handle=open(gff_file, \"rU\")\n for gff_line in gff_handle:\n gff_line=gff_line.strip('\\n\\r').split('\\t')\n if re.match(r'#|>', gff_line[0]): # skip commented line or fasta identifier line \n continue\n if len(gff_line)==1: # skip fasta sequence/empty line if present \n continue \n assert len(gff_line)==9, '\\t'.join(gff_line) # not found 9 tab-delimited fields in this line \n if '' in gff_line: # skip this line if there any field with an empty value\n print 'Skipping..', '\\t'.join(gff_line)\n continue\n if gff_line[-1][-1]==';': # trim the last ';' character \n gff_line[-1]=gff_line[-1].strip(';')\n if gff_line[2].lower() in ['gene', 'pseudogene', 'transposable_element_gene']:\n gid, gene_info=None, dict()\n gene_info['start']=int(gff_line[3])\n gene_info['stop']=int(gff_line[4])\n gene_info['chr']=gff_line[0]\n gene_info['source']=gff_line[1]\n gene_info['strand']=gff_line[6]\n for attb in gff_line[-1].split(';'):\n attb=attb.split('=') # gff attributes are separated by key=value pair \n if attb[0]=='ID':\n gid=attb[1]\n break\n genes[(gff_line[0], gid)]=gene_info # store gene information based on the chromosome and gene symbol.\n elif gff_line[2].lower() in features: \n gid, mrna_info=None, dict() \n mrna_info['start']=int(gff_line[3])\n mrna_info['stop']=int(gff_line[4])\n mrna_info['chr']=gff_line[0]\n mrna_info['strand']=gff_line[6]\n mrna_info['type'] = gff_line[2]\n for attb in gff_line[-1].split(';'):\n attb=attb.split('=')\n if attb[0]=='Parent':\n gid=attb[1]\n elif attb[0]=='ID':\n mrna_info[attb[0]]=attb[1]\n for fid in gid.split(','): # child may be mapped to multiple parents ex: Parent=AT01,AT01-1-Protein \n if (gff_line[0], fid) in transcripts:\n transcripts[(gff_line[0], fid)].append(mrna_info)\n else:\n transcripts[(gff_line[0], fid)]=[mrna_info]\n elif gff_line[2].lower() in ['exon', 'pseudogenic_exon']:\n tids, exon_info=None, dict()\n exon_info['start']=int(gff_line[3])\n exon_info['stop']=int(gff_line[4])\n exon_info['chr']=gff_line[0]\n exon_info['strand']=gff_line[6]\n for attb in gff_line[-1].split(';'):\n attb=attb.split('=')\n if attb[0]=='Parent':\n tids=attb[1]\n break\n for tid in tids.split(','):\n if (gff_line[0], tid) in exons:\n exons[(gff_line[0], tid)].append(exon_info)\n else:\n exons[(gff_line[0], tid)]=[exon_info]\n elif gff_line[2].lower() in ['five_prime_utr']:\n utr5_info, tids=dict(), None\n utr5_info['start']=int(gff_line[3])\n utr5_info['stop']=int(gff_line[4])\n utr5_info['chr']=gff_line[0]\n utr5_info['strand']=gff_line[6]\n for attb in gff_line[-1].split(';'):\n attb=attb.split('=')\n if attb[0]=='Parent':\n tids=attb[1]\n break\n for tid in tids.split(','):\n if (gff_line[0], tid) in utr5:\n utr5[(gff_line[0], tid)].append(utr5_info)\n else:\n utr5[(gff_line[0], tid)]=[utr5_info]\n elif gff_line[2].lower() in ['cds']:\n cds_info, tids=dict(), None\n cds_info['start']=int(gff_line[3])\n cds_info['stop']=int(gff_line[4])\n cds_info['chr']=gff_line[0]\n cds_info['strand']=gff_line[6]\n for attb in gff_line[-1].split(';'):\n attb=attb.split('=')\n if attb[0]=='Parent':\n tids=attb[1]\n break\n for tid in tids.split(','):\n if (gff_line[0], tid) in cds:\n cds[(gff_line[0], tid)].append(cds_info)\n else:\n cds[(gff_line[0], tid)]=[cds_info]\n elif gff_line[2].lower() in ['three_prime_utr']:\n utr3_info, tids=dict(), None\n utr3_info['start']=int(gff_line[3])\n utr3_info['stop']=int(gff_line[4])\n utr3_info['chr']=gff_line[0]\n utr3_info['strand']=gff_line[6]\n for attb in gff_line[-1].split(';'):\n attb=attb.split('=')\n if attb[0]=='Parent':\n tids=attb[1]\n break\n for tid in tids.split(','):\n if (gff_line[0], tid) in utr3:\n utr3[(gff_line[0], tid)].append(utr3_info)\n else:\n utr3[(gff_line[0], tid)]=[utr3_info]\n gff_handle.close()\n return genes, transcripts, exons, utr3, utr5, cds", "def parse_gff(g):\n # We also want to store the mRNA->gene information!\n mrna_par = {}\n # And the CDS->mRNA information\n cds_dat = {}\n with open(g, 'r') as f:\n for line in f:\n # if the line is empty or starts with a #, we will skip it\n if line.startswith('#') or line == '\\n':\n continue\n else:\n tmp = line.strip().split('\\t')\n feat_type = tmp[2]\n if feat_type == 'mRNA':\n meta = tmp[8].split(';')\n for m in meta:\n if m.startswith('ID='):\n tx_id = m.split('=')[1]\n if m.startswith('Parent='):\n tx_par = m.split('=')[1]\n mrna_par[tx_id] = tx_par\n elif feat_type == 'CDS':\n scaf = tmp[0]\n start = tmp[3]\n end = tmp[4]\n strand = tmp[6]\n phase = tmp[7]\n meta = tmp[8].split(';')\n for m in meta:\n if m.startswith('ID='):\n cds_id = m.split('=')[1]\n if m.startswith('Parent='):\n cds_par = m.split('=')[1]\n if strand == '-':\n strand = -1\n else:\n strand = 1\n # Watch out for transcripts where there are multiple CDS.\n # This will require a nested dictionary of lists.\n if cds_par in cds_dat:\n pass\n else:\n cds_dat[cds_par] = {}\n if cds_id in cds_dat[cds_par]:\n pass\n else:\n cds_dat[cds_par][cds_id] = []\n # We want to make a SequenceFeature for each CDS chunk\n # Keep in mind that GFF is 1-based, so we have to adjust\n # the start position!\n cds_feat = SeqFeature(\n FeatureLocation(int(start)-1, int(end), strand=strand),\n type=\"CDS\",\n id=cds_id)\n # Add some qualifiers to modify the behavior\n # Use the \"standard\" genetic code from NCBI\n cds_feat.qualifiers['transl_tabl'] = [1]\n # Then, append it into the corresponding dictionary item\n # keeping the chromosome (scaffold) name and phase with it\n cds_dat[cds_par][cds_id].append((cds_feat, scaf, phase))\n else:\n continue\n return (mrna_par, cds_dat)", "def parse_record(self, record):\n raise NotImplementedError()", "def genome_to_seqrecord(phage_genome):\n\n assert phage_genome != None,\\\n \"Genome object passed is None and not initialized\"\n try:\n record = SeqRecord(phage_genome.seq)\n record.seq.alphabet = IUPAC.IUPACAmbiguousDNA()\n except AttributeError:\n print(\"Genome object failed to be converted to SeqRecord.\",\n \"Genome valid attribute 'seq' is required to\",\n \"convert to SeqRecord object.\")\n raise\n record.name = phage_genome.name\n if phage_genome.accession != \"\":\n record.id = phage_genome.accession\n record.features = get_seqrecord_features(phage_genome)\n record.description = get_seqrecord_description(phage_genome)\n record.annotations=\\\n get_seqrecord_annotations(phage_genome)\n\n return record", "def handle(self, *args, **opts):\n # Open GenBank for reading\n phases = {}\n features = [\"CDS\", \"rRNA\", \"tRNA\", \"ncRNA\", \"repeat_region\",\n \"misc_feature\"]\n gb = SeqIO.read(open(opts['input'], 'r'), 'genbank')\n for feature in gb.features:\n if feature.type in features:\n if 'locus_tag' in feature.qualifiers:\n phases[feature.qualifiers['locus_tag'][0]] = feature.strand\n\n # Example Reference Name: gi|29165615|ref|NC_002745.2|\n ref_name = \"gi|{0}|ref|{1}.{2}|\".format(\n gb.annotations['gi'],\n gb.name,\n gb.annotations['sequence_version']\n )\n reference = Reference.objects.get(name=ref_name)\n\n # Loop through Annotation objects\n for obj in Annotation.objects.filter(reference=reference):\n if obj.locus_tag in phases:\n print('{0} --> {1}'.format(\n obj.locus_tag, phases[obj.locus_tag]\n ))\n obj.strand = phases[obj.locus_tag]\n obj.save()", "def parse_gff(path):\n fasta = find_fasta(path)\n if not fasta:\n raise FileNotFoundError(f\"Could not find partner FASTA file for {path}\")\n\n # Parse FASTA and create GFFUtils database\n fasta = parse_infile(fasta, \"fasta\")\n gff = gffutils.create_db(\n str(path),\n \":memory:\",\n force=True,\n merge_strategy=\"create_unique\",\n sort_attribute_values=True\n )\n regions = find_regions(gff.directives)\n\n for record in fasta:\n # Normalise Feature location based on ##sequence-region directive.\n # Necessary for extracted GFF3 files that still store coordinates\n # relative to the entire region, not to the extracted FASTA.\n # If no sequence-region directive is found, assumes 1 (i.e. sequence start).\n cds, gene = parse_cds_features(\n gff.region(seqid=record.id, featuretype=[\"gene\", \"CDS\"]),\n regions[record.id][0] - 1 if record.id in regions else 0\n )\n if not cds:\n LOG.warning(\"Found no CDS features in %s [%s]\", record.id, path)\n record.features = sorted(\n [*gene, *merge_cds_features(cds)],\n key=lambda f: f.location.start\n )\n\n return fasta", "def parse_file(self):\n # the header was already read in the init, start at the first sample line\n\n for line in self._stream_handle:\n\n # create the dictionary of key/value pairs composed of the labels and the values from the\n # record being parsed\n # ex: data_dict = {'sci_bsipar_temp':10.67, n1, n2, nn}\n data_dict = self._read_data(line)\n\n if GliderParser._has_science_data(data_dict, self._particle_class):\n # create the timestamp\n timestamp = ntplib.system_to_ntp_time(float(data_dict[GliderParticleKey.M_PRESENT_TIME]))\n # create the particle\n self._record_buffer.append(self._extract_sample(\n self._particle_class, None, data_dict, internal_timestamp=timestamp))", "def parse_file(self):\n\n # Create the gps position interpolator\n gps_interpolator = GpsInterpolator()\n\n # the header was already read in the init, start at the samples\n for data_record in self._stream_handle:\n # create the dictionary of key/value pairs composed of the labels and the values from the\n # record being parsed\n data_dict = self._read_data(data_record)\n timestamp = ntplib.system_to_ntp_time(float(data_dict[GliderParticleKey.M_PRESENT_TIME]))\n\n # handle this particle if it is an engineering metadata particle\n # this is the glider_eng_metadata* particle\n if not self._metadata_sent:\n self._record_buffer.append(self.handle_metadata_particle(timestamp))\n\n # check for the presence of engineering data in the raw data row before continuing\n # This is the glider_eng* particle\n if GliderParser._has_science_data(data_dict, self._particle_class):\n self._record_buffer.append(self._extract_sample(\n self._particle_class, None, data_dict, internal_timestamp=timestamp))\n\n # check for the presence of GPS data in the raw data row before continuing\n # This is the glider_gps_position particle\n if GliderParser._has_science_data(data_dict, self._gps_class):\n gps_interpolator.append_to_buffer(\n self._extract_sample(self._gps_class, None, data_dict, internal_timestamp=timestamp))\n else:\n log.info(\"GPS data no-find: \")\n\n # check for the presence of science particle data in the raw data row before continuing\n # This is the glider_eng_sci* particle\n if GliderParser._has_science_data(data_dict, self._science_class):\n self._record_buffer.append(self._extract_sample(\n self._science_class, None, data_dict, internal_timestamp=timestamp))\n\n # If there are GPS entries, interpolate them if they contain gps lat/lon values\n if gps_interpolator.get_size() > 0:\n self._record_buffer.extend(gps_interpolator.process_and_get_objects())", "def extract_genes(seq_record):\n return [f for f in seq_record.features if f.type == \"gene\"]", "def parse_record(self, in_rec):\n \n geo_util = geo.Geo()\n \n self.metadata = {}\n for k, v in in_rec.items():\n if k == 'metadata2': continue\n elif k == 'geometry':\n self.metadata['geometry'] = v\n coords = v['coordinates']\n self.metadata['wkt'] = geo_util.convert_imageGeom(\\\n coords, 'wkt')\n elif k == 'metadata':\n for m in v:\n key = to_camelCase(m[0])\n self.metadata[key] = m[1]\n else:\n self.metadata[k] = v", "def __init__(self, line):\n (self.seqid, \n self.source, \n self.type, \n self.start, \n self.end, \n self.score, \n self.strand, \n self.phase, \n self.attributes_str) = line.strip().split('\\t')\n # preserve attribute order as a list of keys (attributes_order)\n attributes_list = self.attributes_str.split(';')\n self.attributes_order = [attr.split('=')[0] for attr in \n attributes_list]\n # store attribute keys and their values in a dictionary\n self.attributes = {attr.split('=')[0]:attr.split('=')[1] for attr in \n attributes_list}\n # rename the name attribute key to Name so it conforms to the\n # GFF3 specification, where Name is a reserved attribute key\n if 'name' in self.attributes:\n self.attributes['Name'] = self.attributes.pop('name')\n self.attributes_order[self.attributes_order.index('name')] = 'Name'", "def read_gff3_line(line):\n cols = line.strip().split('\\t')\n if ASSUME_OFFBYONE:\n cols[3] = str(int(cols[3]) - 1)\n known_fields = set()\n fields = {'seqid': cols[0], 'source': cols[1], 'type': cols[2],\n 'start': cols[3], 'end': cols[4], 'score': cols[5],\n 'strand': cols[6], 'phase': cols[7]}\n known_fields.update(fields.keys())\n attrlist = cols[8]\n attributes = dict()\n for attr in attrlist.split(';'):\n if not attr.strip():\n continue\n k, v = attr.strip().split('=')\n if k.lower() == 'dbxref':\n try:\n subkey, subvalue = v.split(':')\n except ValueError:\n if SHOW_PARSER_WARNING:\n sys.stderr.write('\\nWarning: skipping Dbxref value {} - no key! Line: {}'.format(v, line.strip()))\n continue\n assert subkey not in attributes, 'Sub-key already in attributes list: {} in line {}'.format(subkey, line)\n attributes[subkey] = subvalue.strip()\n known_fields.add(subkey)\n continue\n elif ',' in v:\n raise ValueError('List of values for key {}: {} in line {}'.format(k, v, line))\n else:\n # who knows what crazy stuff could be here...\n pass\n attributes[k] = v.strip()\n known_fields.add(k)\n fields.update(attributes)\n return fields, known_fields", "def parseGFF3(filename):\n\n # Parse with transparent decompression\n openFunc = gzip.open if filename.endswith(\".gz\") else open\n with openFunc(filename) as infile:\n for line in infile:\n if line.startswith(\"#\"):\n continue\n parts = line.strip().split(\"\\t\")\n # If this fails, the file format is not standard-compatible\n assert len(parts) == len(gffInfoFields)\n # Normalize data\n normalizedInfo = {\n \"seqid\": None if parts[0] == \".\" else urllib.unquote(parts[0]),\n \"source\": None if parts[1] == \".\" else urllib.unquote(parts[1]),\n \"type\": None if parts[2] == \".\" else urllib.unquote(parts[2]),\n \"start\": None if parts[3] == \".\" else int(parts[3]),\n \"end\": None if parts[4] == \".\" else int(parts[4]),\n \"score\": None if parts[5] == \".\" else float(parts[5]),\n \"strand\": None if parts[6] == \".\" else urllib.unquote(parts[6]),\n \"phase\": None if parts[7] == \".\" else urllib.unquote(parts[7]),\n \"attributes\": parseGFFAttributes(parts[8])\n }\n # Alternatively, you can emit the dictionary here, if you need\n # mutability:\n # yield normalizedInfo\n yield GFFRecord(**normalizedInfo)", "def load_gff(filepath):\n # GFF fields\n colnames = ['seqid', 'source', 'type', 'start', 'end', 'score', 'strand',\n 'phase', 'attributes']\n\n # get lines from file\n with open(filepath, 'r') as fp:\n lines = fp.readlines()\n\n # filter out non-gene entries\n gene_rows = [ x for x in lines if 'gene\\t' in x]\n\n # Next, let's create a StringIO buffer -- this is similar to the file\n # and url handles we have seen so far. We can then pass this to a csv\n # reader instance in the same way we have seen for actual files\n\n # First though, let's collapse the rows back into a single string\n csv_str = \"\".join(gene_rows)\n str_buffer = StringIO.StringIO(csv_str)\n\n return csv.DictReader(str_buffer, fieldnames=colnames, delimiter='\\t')", "def _parseRecords(self):\n # dict of parse methods for most common records that will be stored in structured arrays\n FLAG2METHOD = {'PS' : self.parseHighPassRecord,\n 'PC' : self.parseLowPassRecord,\n 'VD' : self.parseDigitalSValRecord}\n # dict of (record type, listname to store it in) tuples\n FLAG2REC = {'L' : (LayoutRecord, 'layoutrecords'),\n 'MS' : (SurfMessageRecord, 'messagerecords'),\n 'MU' : (UserMessageRecord, 'messagerecords'),\n 'PE' : (EpochRecord, 'epochrecords'),\n 'D' : (DisplayRecord, 'displayrecords'),\n 'VA' : (AnalogSValRecord, 'analogsvalrecords')}\n f = self.f\n while True:\n # returns an empty string when EOF is reached\n flag = f.read(2).rstrip(NULL).decode() # TODO: should this strip NULL?\n if flag == '':\n break\n # put file pointer back to start of flag\n f.seek(-2, 1) # TODO: unnecessary - doesn't this slow down parsing quite a bit?\n if flag in FLAG2METHOD: # these are the most common\n FLAG2METHOD[flag](f) # call the method\n elif flag in FLAG2REC:\n rectype, reclistname = FLAG2REC[flag]\n rec = rectype()\n rec.parse(f)\n #wx.Yield() # allow wx GUI event processing during parsing\n self._appendRecord(rec, reclistname)\n else:\n raise ValueError('Unexpected flag %r at offset %d' % (flag, f.tell()))\n #self.percentParsed = f.tell() / self.filesize * 100", "def loadContinuousRecord(self, record):\n # TODO: add chans arg to pull out only certain chans, and maybe a ti arg\n # to pull out less than the full set of sample points for this record\n self.f.seek(record['dataoffset'])\n # {ADC Waveform type; dynamic array of SHRT (signed 16 bit)} - converted to an ndarray\n # stuct.unpack for this is very slow, load directly using numpy\n data = np.fromfile(self.f, dtype=np.int16, count=record['NumSamples'])\n data -= 2048 # offset 12 bit unsigned data to be centered around 0\n nchans = self.layoutrecords[record['Probe']].nchans\n data.shape = (nchans, -1) # reshape to have nchans rows, as indicated in layout\n return data", "def process(\n self,\n bed_record: bed_pb2.BedRecord) -> Iterable[Tuple[str, bed_pb2.BedRecord]]:\n\n pacbio_molecule_name = preprocess_utils.get_pacbio_molecule_name(\n bed_record.name)\n if pacbio_molecule_name is not None:\n yield pacbio_molecule_name, bed_record", "def import_gff(file, genome_version, verbose = False):\n \n from tridentdb import models\n import re\n from django.db.utils import DatabaseError\n \n if genome_version == None:\n print(\"Genome Version is needed for loading a gff file.\")\n return\n\n genomes = models.Genome.objects.filter(genome_ver = genome_version)\n if len(genomes) == 0:\n print(\"Unknown Genome Version: %s\" % genome_version)\n return\n \n lineno = 1\n for line in file:\n if verbose:\n print(\"Line Number: %d\" % lineno)\n lineno += 1\n if not line:\n continue\n line = line.strip()\n if line[0] == '#':\n continue\n info = line.split('\\t')\n chromosome = info[0].replace(\"chr\", \"\")\n is_primary_transcript = (info[2] == 'miRNA_primary_transcript')\n genomic_mir_start = info[3]\n genomic_mir_end = info[4]\n is_on_positive_strand = (info[6] == '+')\n \n mirbase_id = mirbase_acc = mirbase_name = mirbase_derives_from = None\n mirbase = info[8].split(';')\n for tag in mirbase:\n (name, val) = tag.split('=')\n if name == \"ID\":\n mirbase_id = val\n elif name == \"accession_number\":\n mirbase_acc = val\n elif name == \"Alias\":\n # Use alias for accession_number IFF accession_number\n # is not used\n if not mirbase_acc:\n mirbase_acc = val\n elif name == \"Name\":\n mirbase_name = val\n elif name == \"derives_from\":\n mirbase_derives_from = val\n else:\n print(\"Unknown Mirbase tag: \\\"%s\\\"\" % name)\n continue\n\n mirna = models.MicroRNA(chromosome=chromosome, is_primary_transcript = is_primary_transcript, genomic_mir_start = genomic_mir_start, genomic_mir_end = genomic_mir_end, is_on_positive_strand = is_on_positive_strand, mirbase_id = mirbase_id, mirbase_acc = mirbase_acc, mirbase_name = mirbase_name, mirbase_derives_from = mirbase_derives_from, genome = genomes[0] )\n \n try:\n mirna.save()\n except DatabaseError as de:\n from sys import stderr\n stderr.write(\"Error loading GFF line: {0}\\n\".format(line))\n raise de\n ##end of import_gff", "def __init__(\n self,\n locus_tag: str,\n gene_type: str,\n location: Union[FeatureLocation, CompoundLocation],\n name: str,\n reference_sequence: Seq,\n cog: str = None,\n y_ome: str = None,\n essential: bool = False,\n replication_strand: str = None,\n origin_distance: int = None,\n terminus_distance: int = None\n ):\n\n super().__init__('gene', location=location, reference_sequence=reference_sequence, name=name)\n self.reading_frame = get_reading_frame(self.location, len(reference_sequence))\n\n # if the gene is a coding sequence, it should have a multiple of 3 length; sequence is set by super init\n if gene_type == 'CDS' and len(self.location) % 3 != 0:\n raise ValueError(locus_tag + ': sequence should have multiple of 3 length if gene is coding')\n\n self.locus_tag = locus_tag\n self.gene_type = gene_type\n self.cog = cog\n self.y_ome = y_ome\n self.essential = essential\n self.replication_strand = replication_strand\n self.origin_distance = origin_distance\n self.terminus_distance = terminus_distance\n \n # only set by add_regulon_db_gene_ids\n self.id = None\n\n # only set after calculate_and_add_cai is run\n self.cai = None\n\n # only set after the appropriate linking functions are run\n self.protein = None\n self.trna = None\n self.transcription_units = []\n self.attenuators = []\n self.riboswitches = []\n self.shine_dalgarno = None\n self.i_modulons = []", "def parseFastaHeader(self, seqRecord):\n [modbaseId, uniprotId] = seqRecord.id.split('|')\n seq = pcssPeptide.PcssProtein(modbaseId, self.pcssRunner)\n seq.setUniprotId(uniprotId)\n return seq", "def parse_transcript(gene_obj, tx_obj, build=None):\n build = build or 37\n add_tx_links(tx_obj, build)\n\n if tx_obj.get('refseq_id'):\n gene_name = (gene_obj['common']['hgnc_symbol'] if gene_obj['common'] else\n gene_obj['hgnc_id'])\n tx_obj['change_str'] = transcript_str(tx_obj, gene_name)", "def _parse_records(self, customization=None):\n def _add_parsed_record(record, records):\n \"\"\"\n Atomic function to parse a record\n and append the result in records\n \"\"\"\n if record != \"\":\n logger.debug('The record is not empty. Let\\'s parse it.')\n parsed = self._parse_record(record, customization=customization)\n if parsed:\n logger.debug('Store the result of the parsed record')\n records.append(parsed)\n else:\n logger.debug('Nothing returned from the parsed record!')\n else:\n logger.debug('The record is empty')\n\n records = []\n record = \"\"\n # read each line, bundle them up until they form an object, then send for parsing\n for linenumber, line in enumerate(self.bibtex_file_obj):\n logger.debug('Inspect line %s', linenumber)\n if line.strip().startswith('@'):\n # Remove leading whitespaces\n line = line.lstrip()\n logger.debug('Line starts with @')\n # Parse previous record\n _add_parsed_record(record, records)\n # Start new record\n logger.debug('The record is set to empty')\n record = \"\"\n # Keep adding lines to the record\n record += line\n\n # catch any remaining record and send it for parsing\n _add_parsed_record(record, records)\n logger.debug('Set the list of entries')\n self.bib_database.entries = records" ]
[ "0.65917826", "0.5944873", "0.5835161", "0.5765218", "0.5681081", "0.5671689", "0.55402595", "0.54088056", "0.5357843", "0.53517663", "0.5294333", "0.5235795", "0.52263", "0.52156526", "0.5198588", "0.5175581", "0.51186824", "0.51084155", "0.50481284", "0.5037028", "0.5034998", "0.50079185", "0.49726814", "0.49466294", "0.49367297", "0.49347782", "0.4909631", "0.489418", "0.48893008", "0.4868567" ]
0.6747453
0
Creates a SeqRecord object from a pdm_utils Genome object.
def genome_to_seqrecord(phage_genome): assert phage_genome != None,\ "Genome object passed is None and not initialized" try: record = SeqRecord(phage_genome.seq) record.seq.alphabet = IUPAC.IUPACAmbiguousDNA() except AttributeError: print("Genome object failed to be converted to SeqRecord.", "Genome valid attribute 'seq' is required to", "convert to SeqRecord object.") raise record.name = phage_genome.name if phage_genome.accession != "": record.id = phage_genome.accession record.features = get_seqrecord_features(phage_genome) record.description = get_seqrecord_description(phage_genome) record.annotations=\ get_seqrecord_annotations(phage_genome) return record
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_seq_record(self):\n\t\t#create the anotations in a pythonic manner\n\t\texempt = ['name', 'description', 'features', 'sequence'] #things which aren't annotations\n\t\tannotations = { }\n\t\tfor key, value in self.__dict__.iteritems():\n\t\t\tif key.lower() not in exempt:\n\t\t\t\tannotations[key] = value\n\t\t\n\t\t#create the features\n\t\tfeatures = []\n\t\tfor feat in self.features:\n\t\t\tfeatures.append( SeqFeature( \n\t\t\t\tlocation = FeatureLocation(feat['startpos'] - 1, feat['endpos']), #NB partsregistry uses 1-offset, and inclusive.\n\t\t\t\ttype = feat['type'],\n\t\t\t\tstrand = feat['strand'],\n\t\t\t\tqualifiers = {'title': feat['name'],}))\n\t\t\n\t\treturn SeqRecord(\tself.sequence, \n\t\t\t\t\t\t\tid=self.name,\n\t\t\t\t\t\t\tname=self.name,\n\t\t\t\t\t\t\tdescription=self.description,\n\t\t\t\t\t\t\tfeatures=features,\n\t\t\t\t\t\t\tannotations=annotations)", "def build_seq_obj(self, code, gene_code, our_taxon_names, all_seqs):\n this_voucher_seqs = self.extract_sequence_from_all_seqs_in_db(all_seqs, code, gene_code)\n\n if this_voucher_seqs == '?':\n seq = '?' * self.gene_codes_metadata[gene_code]['length']\n else:\n seq = self.create_seq_record(this_voucher_seqs)\n\n seq_record = SeqRecordExpanded(seq)\n\n if code in our_taxon_names:\n seq_record.voucher_code = code\n seq_record.taxonomy = our_taxon_names[code]\n seq_record.gene_code = gene_code\n seq_record.reading_frame = self.gene_codes_metadata[gene_code]['reading_frame']\n seq_record.table = self.gene_codes_metadata[gene_code]['genetic_code']\n return seq_record\n else:\n return None", "def sequence_to_biopython_record(\n sequence, id=\"<unknown id>\", name=\"<unknown name>\", features=()\n):\n if has_dna_alphabet:\n seq = Seq(sequence, alphabet=DNAAlphabet())\n else:\n seq = Seq(sequence)\n\n return SeqRecord(\n seq=seq,\n id=id,\n name=name,\n features=list(features),\n annotations={\"molecule_type\": \"DNA\"},\n )", "def from_genbank(cls, filename):\n\t\tseq_record = SeqIO.read(filename, 'genbank')\n\t\trec = cls(seq_record=seq_record)\n\t\treturn rec", "def __init__(self, seq_record=None):\n\t\tself._record = seq_record", "def make_protein_record(nuc_record):\n return SeqRecord(seq = nuc_record.seq.translate(to_stop=True), \\\n id = \"trans_\" + nuc_record.id, \\\n description = \"translation of CDS, using default table\")", "def geneSpecificRecord (self, orfList, headList, num):\n sequenceInfo = []\n for gene in orfList: # Finds target gene in each genome\n sequenceInfo.append(gene[num]) # ***any gene can be utilized***\n longestLength = max(len(s) for s in sequenceInfo) # gets longest seq to match length with gap characters\n paddedSequences = [s.ljust(longestLength, '-') for s in sequenceInfo] # Adds gap characters\n \n records = (SeqRecord(Seq(s), id = str(paddedSequences.index(s))) for s in paddedSequences) #creating a SeqRecord\n return(records)", "def change_biopython_record_sequence(record, new_seq):\n new_record = deepcopy(record)\n\n if has_dna_alphabet:\n seq = Seq(new_seq, alphabet=DNAAlphabet())\n else:\n seq = Seq(new_seq)\n\n new_record.seq = seq\n return new_record", "def create_fasta_seqrecord(header, sequence_string):\n seq = Seq(sequence_string, alphabet=IUPAC.unambiguous_dna)\n seqrecord = SeqRecord(seq, description=header)\n return seqrecord", "def parse_genome_data(seqrecord, filepath=pathlib.Path(),\n translation_table=11, genome_id_field=\"_organism_name\", gnm_type=\"\",\n host_genus_field=\"_organism_host_genus\"):\n\n # Keep track of the file from which the record is derived.\n gnm = genome.Genome()\n gnm.set_filename(filepath)\n gnm.type = gnm_type\n\n try:\n gnm.organism = seqrecord.annotations[\"organism\"]\n except:\n gnm.organism = \"\"\n finally:\n # Identifies host and phage name from organism field.\n gnm.parse_organism()\n\n try:\n # Since accessions are stored in a list, there may be more than\n # one accessions associated with this file.\n # The first accession in the list is assumed to be the most recent.\n accession = seqrecord.annotations[\"accessions\"][0]\n except:\n accession = \"\"\n finally:\n gnm.set_accession(accession)\n\n try:\n gnm.description = seqrecord.description\n # It appears that if description is not present, Biopython\n # auto-populates this attribute as \"<unknown description>\"\n if gnm.description == \"<unknown description>\":\n gnm.description = \"\"\n except:\n gnm.description = \"\"\n finally:\n # Identifies host and phage name from description field.\n gnm.parse_description()\n\n try:\n gnm.source = seqrecord.annotations[\"source\"]\n except:\n gnm.source = \"\"\n finally:\n # Identifies host and phage name from record source field.\n gnm.parse_source()\n\n try:\n # The retrieved authors can be stored in multiple Reference elements.\n refs = seqrecord.annotations[\"references\"]\n authors_list = []\n for ref in refs:\n # Note: Reference objects are instantiated with an empty\n # authors attribute. So if no authors are present in a Reference,\n # it will still concatenate an empty string, resulting in an\n # author_string = \";;;\" etc. So only add the authors info if\n # it is not an empty string.\n if ref.authors != \"\":\n authors_list.append(ref.authors)\n authors_string = \";\".join(authors_list)\n gnm.authors = authors_string\n except:\n gnm.authors = \"\"\n\n # Biopython requires the parsed record contains a sequence, so\n # no need to test whether the seq attribute is present or not.\n # Nucleotide sequence, length, and % GC.\n gnm.set_sequence(seqrecord.seq)\n\n try:\n date = seqrecord.annotations[\"date\"]\n gnm.date = datetime.strptime(date, \"%d-%b-%Y\")\n except:\n gnm.date = constants.EMPTY_DATE\n\n\n # # Now that record fields are parsed, set the genome name, id,\n # # and host_genus.\n if genome_id_field != \"\":\n gnm.name = getattr(gnm, genome_id_field)\n gnm.set_id(value=gnm.name)\n else:\n # The seqrecord name and id are used if genome_id_field is empty.\n try:\n gnm.name = seqrecord.name\n # It appears that if name is not present, Biopython auto-populates\n # this attribute as \"<unknown name>\"\n if gnm.name == \"<unknown name>\":\n gnm.name = \"\"\n except:\n gnm.name = \"\"\n\n try:\n gnm.id = seqrecord.id\n # It appears that if id is not present, Biopython auto-populates\n # this attribute as \"<unknown id>\"\n if gnm.id == \"<unknown id>\":\n gnm.id = \"\"\n except:\n gnm.id = \"\"\n\n gnm.set_host_genus(attribute=host_genus_field)\n\n # Create lists of parsed features.\n # Note: Biopython instantiates the features attribute with\n # an empty list, so no need to test if features attribute is\n # present or not.\n seqfeature_dict = create_seqfeature_dictionary(seqrecord.features)\n\n cds_list = []\n if \"CDS\" in seqfeature_dict.keys():\n for seqfeature in seqfeature_dict[\"CDS\"]:\n cds_ftr = parse_cds_seqfeature(seqfeature)\n cds_ftr.genome_id = gnm.id\n cds_ftr.genome_length = gnm.length\n cds_ftr.set_nucleotide_sequence(parent_genome_seq=gnm.seq)\n cds_list.append(cds_ftr)\n\n source_list = []\n if \"source\" in seqfeature_dict.keys():\n for seqfeature in seqfeature_dict[\"source\"]:\n src_ftr = parse_source_seqfeature(seqfeature)\n src_ftr.genome_id = gnm.id\n source_list.append(src_ftr)\n\n trna_list = []\n if \"tRNA\" in seqfeature_dict.keys():\n for seqfeature in seqfeature_dict[\"tRNA\"]:\n trna_ftr = parse_trna_seqfeature(seqfeature)\n trna_ftr.genome_id = gnm.id\n trna_ftr.genome_length = gnm.length\n trna_ftr.set_nucleotide_sequence(parent_genome_seq=gnm.seq)\n trna_ftr.set_nucleotide_length(use_seq=True)\n trna_ftr.parse_amino_acid()\n trna_ftr.parse_anticodon()\n trna_list.append(trna_ftr)\n\n tmrna_list = []\n if \"tmRNA\" in seqfeature_dict.keys():\n for seqfeature in seqfeature_dict[\"tmRNA\"]:\n tmrna_ftr = parse_tmrna_seqfeature(seqfeature)\n tmrna_ftr.genome_id = gnm.id\n tmrna_ftr.genome_length = gnm.length\n tmrna_ftr.set_nucleotide_sequence(parent_genome_seq=gnm.seq)\n tmrna_ftr.set_nucleotide_length(use_seq=True)\n tmrna_ftr.parse_peptide_tag()\n tmrna_ftr.run_aragorn()\n tmrna_list.append(tmrna_ftr)\n\n gnm.translation_table = translation_table\n gnm.set_cds_features(cds_list)\n gnm.set_source_features(source_list)\n gnm.set_trna_features(trna_list)\n gnm.set_tmrna_features(tmrna_list)\n\n # The feature.id is constructed from the Genome.id and the feature order.\n gnm.set_feature_ids(use_type=True, use_cds=True)\n gnm.set_feature_ids(use_type=True, use_source=True)\n gnm.set_feature_ids(use_type=True, use_trna=True)\n gnm.set_feature_ids(use_type=True, use_tmrna=True)\n return gnm", "def create_seq_record(self, s):\n gene_code = s['gene_code']\n length = self.gene_codes_metadata[gene_code]['length']\n sequence = s['sequences']\n length_difference = length - len(sequence)\n\n sequence += '?' * length_difference\n return sequence", "def parser(raw_seq, date):\n taxon_id = int(raw_seq.features[0].qualifiers['db_xref'][0][6:])\n organism = raw_seq.annotations['organism']\n accession = raw_seq.annotations['accessions'][0]\n gene = []\n records = []\n frag_type = 'whole'\n begin = 1\n end = len(raw_seq)\n sequence = str(raw_seq.seq)\n name = organism\n strand = 1\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n records.append(rec)\n for i in raw_seq.features:\n if i.type == 'gene' and 'gene' in i.qualifiers:\n if i.location_operator != 'join':\n frag_type = 'gene'\n begin = int(i.location.start)\n end = int(i.location.end)\n sequence = str(raw_seq.seq[begin:end])\n name = str(i.qualifiers['gene'][0])\n strand = str(i.location.strand)\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n elif i.location_operator == 'join':\n frag_type = 'gene'\n begin = int(i.sub_features[0].location.start)\n end = int(i.sub_features[0].location.end)\n name = str(i.qualifiers['gene'][0])\n strand = str(i.location.strand)\n sequence = ''\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n gene.append(rec)\n begin = int(i.sub_features[1].location.start)\n end = int(i.sub_features[1].location.end)\n sequence = ''.join([str(raw_seq.seq[begin:end]), str(raw_seq.seq[begin:end])])\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n elif i.type == 'CDS' and 'gene' in i.qualifiers:\n frag_type = 'cds'\n begin = int(i.location.start)\n end = int(i.location.end)\n sequence = str(raw_seq.seq[begin:end])\n name = str(i.qualifiers['gene'][0]).replace(' ', '_')\n strand = str(i.location.strand)\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n elif i.type == 'tRNA' and 'gene' in i.qualifiers:\n frag_type = 'tRNA'\n begin = int(i.location.start)\n end = int(i.location.end)\n sequence = str(raw_seq.seq[begin:end])\n if len(sequence) >= 100:\n sequence = ''\n name = str(i.qualifiers['gene'][0]).replace(' ', '_')\n strand = str(i.location.strand)\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n elif i.type == 'rRNA':\n frag_type = 'rRNA'\n begin = int(i.location.start)\n end = int(i.location.end)\n sequence = str(raw_seq.seq[begin:end])\n name = str(i.qualifiers['product'][0]).replace(' ', '_')\n strand = str(i.location.strand)\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n elif i.type == 'exon' and 'gene' in i.qualifiers:\n frag_type = 'exon'\n begin = int(i.location.start)\n end = int(i.location.end)\n sequence = str(raw_seq.seq[begin:end])\n if 'number' in i.qualifiers:\n name = '{0}_exon_{1}'.format(i.qualifiers['gene'][0],\n i.qualifiers['number'][0])\n else:\n name = '{0}_exon'.format(i.qualifiers['gene'][0])\n strand = int(i.location.strand)\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n elif i.type == 'intron' and 'gene' in i.qualifiers:\n frag_type = 'intron'\n begin = int(i.location.start)\n end = int(i.location.end)\n sequence = str(raw_seq.seq[begin:end])\n strand = str(i.location.strand)\n if 'number' in i.qualifiers:\n name = '{0}_{1}_intron'.format(i.qualifiers['gene'][0],\n i.qualifiers['number'][0])\n else:\n name = '{0}_intron'.format(i.qualifiers['gene'][0])\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n\n records.append(rec)\n gene.sort(key=lambda x: x[5])\n\n for i in range(len(gene) - 1):\n frag_type = 'spacer'\n now = gene[i]\n then = gene[i + 1]\n tail = now[6] + 1\n head = then[5] - 1\n sequence = str(raw_seq.seq[tail:head])\n name = '{0}-{1}_spacer'.format(now[3], then[3])\n strand = 0\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n records.append(rec)\n records.extend(gene)\n\n database.extend(records)", "def get_cds(geneid, seqdict):\n nuc_seq = seqdict[geneid]\n # Translate it\n aa_seq = nuc_seq.seq.translate()\n # Decorate it like you would a full SeqRecord object\n aa_seq_rec = SeqRecord.SeqRecord(\n aa_seq,\n id=geneid,\n description='')\n return aa_seq_rec", "def from_record(\n cls,\n record,\n specifications_dict=\"default\",\n logger=\"bar\",\n extra_constraints=(),\n extra_objectives=(),\n ):\n # unfortunately the local import below is the most elegant found so\n # far. builtin_specifications cannot be imported at the top of this\n # file as some built-in specifications use DnaOptimizationProblem\n # internally to resolve constructs (see EnforcePatternOccurences)\n if isinstance(record, str):\n record = load_record(record)\n parameters = dict(\n sequence=record,\n constraints=[] + list(extra_constraints), # shallow copy\n objectives=[] + list(extra_objectives), # shallow copy\n logger=logger,\n )\n for feature in record.features:\n if feature.type != \"misc_feature\":\n continue\n label = find_specification_label_in_feature(feature)\n if label is None:\n continue\n specs = Specification.list_from_biopython_feature(\n feature, specifications_dict=specifications_dict\n )\n for role, specification in specs:\n parameters[role + \"s\"].append(specification)\n return cls(**parameters)", "def get_seqrecord_annotations(phage_genome):\n\n annotations = {\"molecule type\": \"DNA\",\\\n \"topology\" : \"linear\",\\\n \"data_file_division\" : \"PHG\",\\\n \"date\" : \"\",\\\n \"accessions\" : [],\\\n \"sequence_version\" : \"1\",\\\n \"keyword\" : [],\\\n \"source\" : \"\",\\\n \"organism\" : \"\",\\\n \"taxonomy\" : [],\\\n \"comment\": ()}\n annotations[\"date\"] = phage_genome.date\n annotations[\"source\"] =\\\n \"{} phage {}\".format\\\n (phage_genome.host_genus, phage_genome.id)\n annotations[\"organism\"] =\\\n \"{} phage {}\".format\\\n (phage_genome.host_genus, phage_genome.name)\n annotations[\"taxonomy\"].append(\"Viruses\")\n annotations[\"taxonomy\"].append(\"dsDNA Viruses\")\n annotations[\"taxonomy\"].append(\"Caudovirales\")\n annotations[\"comment\"] =\\\n get_seqrecord_annotations_comments(phage_genome)\n return annotations", "def parse_sequence(fasta_seq, length):\n # extract name and sequence\n name, seq = fasta_seq.id, fasta_seq.seq\n\n # Cannot create a larger sequence than the original\n length = min(length, len(seq))\n\n # find the maximum starting index that generates a full\n # length subsequence\n max_start = len(seq) - length\n start = random.randint(0, max_start)\n \n # generate a new sequence\n gen_seq = seq[start:(start + length)]\n\n # return the generated sequence\n return SeqRecord(gen_seq, name, '', '')", "def _sequences_to_new_records(sequences):\n if isinstance(sequences, dict):\n sequences = list(sequences.items())\n records = []\n for seq in sequences:\n if hasattr(seq, \"id\"):\n records.append(deepcopy(seq))\n else:\n name, seq = seq\n records.append(\n sequence_to_biopython_record(seq, id=name, name=name)\n )\n return records", "def gen_record(document_id, primary_doc, gen_links):\n paper_id = document_id.split(\"abs/\")[-1]\n search = arxiv.Search(id_list=[paper_id])\n result = next(search.get())\n record = gen_arxiv_record_from_result(result, primary_doc=primary_doc)\n return record", "def __init__(self, featureType, chrom, start, end, strand, meta):\n\n self.meta = meta\n self.featureType = featureType\n self.chrom = chrom.replace('chr', '')\n self.start = int(start)\n self.end = int(end)\n self.strand = strand\n self.geneId = meta['gene_id']\n self.exonId = None\n self.exonNum = None\n self.transcriptId = None\n self.geneName = meta['gene_name']\n self.transcriptName = None\n self.transcriptBioType = None\n self.source = meta['gene_source']\n self.geneBioType = meta['gene_biotype']\n self.set_values()", "def __init__(self, source_sig_path=None, audio_key=None,\n signal_length=8192, random_speech_samples=True, **kwargs):\n self.speech_set = None\n if audio_key is None:\n msg = \"To generate the audio data, a path to a source signal\" \\\n \" database is needed\"\n assert source_sig_path is not None, msg\n speech_db_part = kwargs.get('speech_db_part', None)\n msg = \"To generate the audio data, a name of a dataset with \" \\\n \"source signals is needed.\"\n assert speech_db_part is not None, msg\n self.speech_set = \\\n JsonDatabase(source_sig_path).get_dataset(speech_db_part)\n self.random_speech_samples = random_speech_samples\n if source_sig_path is None:\n msg = \"The key under which the audio recording can be found in \" \\\n \"the JSON of the database is required.\"\n assert audio_key is not None, msg\n self.signal_length = signal_length\n self.mic_pair = kwargs.get('mic_pair')\n self.audio_key = audio_key", "def snapgene_file_to_seqrecord(*a, **k):\n raise ImportError(\n \"Please install snapgene_reader to import Snapgene .dna files\"\n )", "def parseFastaHeader(self, seqRecord):\n [modbaseId, uniprotId] = seqRecord.id.split('|')\n seq = pcssPeptide.PcssProtein(modbaseId, self.pcssRunner)\n seq.setUniprotId(uniprotId)\n return seq", "def test_make_fasta_rec(self):\r\n header = '>E2_1 FYI2DSB01B17QJ orig_bc=ATCACTAGTCAC new_bc=ATCACTAGTCAC bc_diffs=0'\r\n seq = 'CTGGTC'\r\n qual = map(int, '32 32 32 19 19 19'.split())\r\n self.assertEqual(make_fastq_rec(header, seq, qual),\r\n \"\"\"@E2_1 FYI2DSB01B17QJ orig_bc=ATCACTAGTCAC new_bc=ATCACTAGTCAC bc_diffs=0\r\nCTGGTC\r\n+E2_1 FYI2DSB01B17QJ orig_bc=ATCACTAGTCAC new_bc=ATCACTAGTCAC bc_diffs=0\r\nAAA444\"\"\")", "def fromgenotype(self):\n\t\tpass", "def __init__(self, seq_id, gene_id, name, start, end, strand, \n feature=None, extra=None):\n self.seq_id = seq_id \n self.gene_id = gene_id\n self.name = name\n self.start, self.end = sorted([int(start), int(end)])\n self.strand = strand\n if not feature is None:\n self.feature = feature\n if not extra is None:\n self.extra = extra", "def MakeSeq(self,content):\n return self.register(Seq(content,reg=self))", "def get_seqrecord_description(phage_genome):\n\n description = (f\"{phage_genome.host_genus} phage {phage_genome.id}\"\n \", complete genome\")\n return description", "def generate_genome(genbank):\n row = {\n '_key': genbank.id,\n 'name': genbank.name,\n 'description': genbank.description,\n 'molecule_type': genbank.annotations.get('molecule_type', ''),\n 'topology': genbank.annotations.get('topology', ''),\n 'data_file_division': genbank.annotations.get('data_file_division', ''),\n 'date': genbank.annotations.get('date', ''),\n 'accessions': genbank.annotations.get('accessions', []),\n 'sequence_version': genbank.annotations.get('sequence_version', ''),\n 'source': genbank.annotations.get('source', ''),\n 'dbxrefs': genbank.dbxrefs,\n 'organism_name': genbank.annotations.get('organism', ''),\n 'taxonomy': ', '.join(genbank.annotations.get('taxonomy', '')),\n 'comment': genbank.annotations.get('comment', ''),\n 'annotation_data': {}\n }\n annot_data = genbank.annotations.get('structured_comment', {}).get('Genome-Annotation-Data', {})\n for (key, val) in annot_data.items():\n row['annotation_data'][key] = val\n yield row", "def create(cls: Type[Sequence], sequence: bytes, alphabet: Alphabet) -> Sequence:\n return cls(lib.imm_seq_create(sequence, alphabet.imm_abc), alphabet)", "def __init__(self, seq):\n # Check the type of seq. Only strings are accepted\n if type(seq) == type(\"string\"):\n self.sequence = seq.upper()\n else:\n raise Exception(\"Invalid typesequence of nucleotides for Sequence class.\")" ]
[ "0.666563", "0.63349533", "0.61896443", "0.59928375", "0.59649557", "0.58573574", "0.57353216", "0.57348984", "0.56775516", "0.5654107", "0.56354314", "0.5570128", "0.5517305", "0.54365057", "0.54361284", "0.54074484", "0.53710055", "0.5333639", "0.52329975", "0.5215455", "0.5215373", "0.5175434", "0.51707476", "0.51675296", "0.51502305", "0.50952846", "0.5080425", "0.507123", "0.50613683", "0.5025099" ]
0.71668196
0
Helper function that uses Genome data to populate the annotations SeqRecord attribute
def get_seqrecord_annotations(phage_genome): annotations = {"molecule type": "DNA",\ "topology" : "linear",\ "data_file_division" : "PHG",\ "date" : "",\ "accessions" : [],\ "sequence_version" : "1",\ "keyword" : [],\ "source" : "",\ "organism" : "",\ "taxonomy" : [],\ "comment": ()} annotations["date"] = phage_genome.date annotations["source"] =\ "{} phage {}".format\ (phage_genome.host_genus, phage_genome.id) annotations["organism"] =\ "{} phage {}".format\ (phage_genome.host_genus, phage_genome.name) annotations["taxonomy"].append("Viruses") annotations["taxonomy"].append("dsDNA Viruses") annotations["taxonomy"].append("Caudovirales") annotations["comment"] =\ get_seqrecord_annotations_comments(phage_genome) return annotations
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_seq_record(self):\n\t\t#create the anotations in a pythonic manner\n\t\texempt = ['name', 'description', 'features', 'sequence'] #things which aren't annotations\n\t\tannotations = { }\n\t\tfor key, value in self.__dict__.iteritems():\n\t\t\tif key.lower() not in exempt:\n\t\t\t\tannotations[key] = value\n\t\t\n\t\t#create the features\n\t\tfeatures = []\n\t\tfor feat in self.features:\n\t\t\tfeatures.append( SeqFeature( \n\t\t\t\tlocation = FeatureLocation(feat['startpos'] - 1, feat['endpos']), #NB partsregistry uses 1-offset, and inclusive.\n\t\t\t\ttype = feat['type'],\n\t\t\t\tstrand = feat['strand'],\n\t\t\t\tqualifiers = {'title': feat['name'],}))\n\t\t\n\t\treturn SeqRecord(\tself.sequence, \n\t\t\t\t\t\t\tid=self.name,\n\t\t\t\t\t\t\tname=self.name,\n\t\t\t\t\t\t\tdescription=self.description,\n\t\t\t\t\t\t\tfeatures=features,\n\t\t\t\t\t\t\tannotations=annotations)", "def copy_annotations_from_unaligned(aligned_seqrec: SeqRecord, unaligned_seqrec: SeqRecord):\n # NCBI Blast id includes description, whereas alignment does not\n assert aligned_seqrec.id in unaligned_seqrec.id, f\"{aligned_seqrec.id} <> {unaligned_seqrec.id}\"\n # copy annotations from previous\n newrec = deepcopy(aligned_seqrec)\n newrec.annotations = unaligned_seqrec.annotations\n # clear any letter annotations added during deepcopy\n newrec.letter_annotations = dict()\n # original sequence and letter annotations\n seq = unaligned_seqrec.seq\n letter_annotations = unaligned_seqrec.letter_annotations\n # index to track position in original sequence\n i = 0\n for j, letter in enumerate(aligned_seqrec.seq):\n if letter in [gap_letter, stop_letter]:\n for key, values in letter_annotations.items():\n # convert strings into lists of characters,\n # then combine into string at end of loop\n if key == \"seqnums\":\n letter_annotation = None\n elif all(isinstance(value, str) for value in values):\n letter_annotation = gap_letter\n else:\n letter_annotation = None\n newrec.letter_annotations.setdefault(key, list()).append(letter_annotation)\n else:\n while seq[i] in [gap_letter, stop_letter]:\n i += 1\n assert letter == seq[i], f\"letter {letter} at {j} <> seq {seq[i]} at {i}\"\n for key in letter_annotations.keys():\n newrec.letter_annotations.setdefault(key, list()).append(letter_annotations[key][i])\n i += 1\n # convert list of chars into string\n for key, values in letter_annotations.items():\n if isinstance(values, str):\n newrec.letter_annotations[key] = \"\".join(newrec.letter_annotations[key])\n return newrec", "def main():\n args = get_args()\n FILE = args.FILE\n annotations = args.annotations\n outfile = args.outfile\n \n \n if not os.path.isfile(FILE):\n die('\"{}\" is not a file'.format(FILE))\n if not os.path.isfile(annotations):\n die('\"{}\" is not a file'.format(annotations))\n if os.path.isfile(FILE) and os.path.isfile(annotations):\n reader = csv.DictReader(open(FILE), delimiter = '\\t', fieldnames = (\"qseqid\", \"sseqid\", \"pident\", \"length\", \"mismatch\", \"gapopen\", \"qstart\", \"qend\", \"sstart\", \"send\", \"evalue\", \"bitscore\"))\n reader_a = csv.DictReader(open(annotations), fieldnames = (\"centroid\", \"domain\", \"kingdom\", \"phylum\", \"class\", \"order\", \"genus\", \"species\"))\n reader_b = csv.reader(open(annotations, 'r'))\n anno_dict = {}\n for row in reader_b:\n key1 = row[0]\n anno_dict[key1] = row[1:]\n\n #print(anno_dict)\n \n \"\"\"for dct in map(dict, reader_a):\n genus = (f\"{dct['genus']}\")\n species = (f\"{dct['species']}\")\n if genus == \"\": \n print(\"NA\")\n else:\n print(genus)\n if species == \"\":\n print(\"NA\")\n else:\n print(species)\"\"\"\n for dct in map(dict, reader):\n seq_id = (f\"{dct['sseqid']}\") \n pident = (f\"{dct['pident']}\")\n #print(seq_id)\n for dct_a in map(dict, reader_a):\n genus = (f\"{dct_a['genus']}\")\n species = (f\"{dct_a['species']}\")\n if any(seq_id == key for key in anno_dict): \n \"\"\"print(seq_id)\n print(pident)\n print(genus)\n print(species)\n #find a way to print genus and species of seq_id\n \"\"\"\n \n else:\n warn('Cannot find seq \"{}\" in lookup'.format(seq_id))\n \"\"\"for line_a in reader_a:\n an_id = (line_a['centroid']) \n print('\"{}\" is an_id'.format(an_id)) \n for line in reader:\n seq_id = (line['sseqid'])\n print('\"{}\" is seq_id'.format(seq_id))\n if seq_id == an_id:\n print(\"hi\")\n else:\n warn('Cannot find seq \"{}\" in lookup'.format(seq_id))\n \"\"\"\n #pprint.pprint(dict_list)\n #pprint.pprint(dict_list_a)\n #for key, value in d1.items():\n #if key is 'sseqid':\n #print(value)\n #print(dict_list_a['centroid']) ", "def parser(raw_seq, date):\n taxon_id = int(raw_seq.features[0].qualifiers['db_xref'][0][6:])\n organism = raw_seq.annotations['organism']\n accession = raw_seq.annotations['accessions'][0]\n gene = []\n records = []\n frag_type = 'whole'\n begin = 1\n end = len(raw_seq)\n sequence = str(raw_seq.seq)\n name = organism\n strand = 1\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n records.append(rec)\n for i in raw_seq.features:\n if i.type == 'gene' and 'gene' in i.qualifiers:\n if i.location_operator != 'join':\n frag_type = 'gene'\n begin = int(i.location.start)\n end = int(i.location.end)\n sequence = str(raw_seq.seq[begin:end])\n name = str(i.qualifiers['gene'][0])\n strand = str(i.location.strand)\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n elif i.location_operator == 'join':\n frag_type = 'gene'\n begin = int(i.sub_features[0].location.start)\n end = int(i.sub_features[0].location.end)\n name = str(i.qualifiers['gene'][0])\n strand = str(i.location.strand)\n sequence = ''\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n gene.append(rec)\n begin = int(i.sub_features[1].location.start)\n end = int(i.sub_features[1].location.end)\n sequence = ''.join([str(raw_seq.seq[begin:end]), str(raw_seq.seq[begin:end])])\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n elif i.type == 'CDS' and 'gene' in i.qualifiers:\n frag_type = 'cds'\n begin = int(i.location.start)\n end = int(i.location.end)\n sequence = str(raw_seq.seq[begin:end])\n name = str(i.qualifiers['gene'][0]).replace(' ', '_')\n strand = str(i.location.strand)\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n elif i.type == 'tRNA' and 'gene' in i.qualifiers:\n frag_type = 'tRNA'\n begin = int(i.location.start)\n end = int(i.location.end)\n sequence = str(raw_seq.seq[begin:end])\n if len(sequence) >= 100:\n sequence = ''\n name = str(i.qualifiers['gene'][0]).replace(' ', '_')\n strand = str(i.location.strand)\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n elif i.type == 'rRNA':\n frag_type = 'rRNA'\n begin = int(i.location.start)\n end = int(i.location.end)\n sequence = str(raw_seq.seq[begin:end])\n name = str(i.qualifiers['product'][0]).replace(' ', '_')\n strand = str(i.location.strand)\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n elif i.type == 'exon' and 'gene' in i.qualifiers:\n frag_type = 'exon'\n begin = int(i.location.start)\n end = int(i.location.end)\n sequence = str(raw_seq.seq[begin:end])\n if 'number' in i.qualifiers:\n name = '{0}_exon_{1}'.format(i.qualifiers['gene'][0],\n i.qualifiers['number'][0])\n else:\n name = '{0}_exon'.format(i.qualifiers['gene'][0])\n strand = int(i.location.strand)\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n elif i.type == 'intron' and 'gene' in i.qualifiers:\n frag_type = 'intron'\n begin = int(i.location.start)\n end = int(i.location.end)\n sequence = str(raw_seq.seq[begin:end])\n strand = str(i.location.strand)\n if 'number' in i.qualifiers:\n name = '{0}_{1}_intron'.format(i.qualifiers['gene'][0],\n i.qualifiers['number'][0])\n else:\n name = '{0}_intron'.format(i.qualifiers['gene'][0])\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n\n records.append(rec)\n gene.sort(key=lambda x: x[5])\n\n for i in range(len(gene) - 1):\n frag_type = 'spacer'\n now = gene[i]\n then = gene[i + 1]\n tail = now[6] + 1\n head = then[5] - 1\n sequence = str(raw_seq.seq[tail:head])\n name = '{0}-{1}_spacer'.format(now[3], then[3])\n strand = 0\n rec = [taxon_id, organism, accession, name, frag_type, begin, end, strand, sequence, date]\n records.append(rec)\n records.extend(gene)\n\n database.extend(records)", "def _set_joint_genome_info(self, gene_annotation_header_values, total_genes_in_genome_values):\n # Merge any pieces of global information that's not per-dataset\n self.gene_annotation_header = merge_values_to_unique(gene_annotation_header_values, blank_value=[], convert_for_set=tuple, \n value_name='gene_annotation_header', context='datasets in multi-dataset')\n self.total_genes_in_genome = merge_values_to_unique(total_genes_in_genome_values, blank_value=0, \n value_name='total_genes_in_genome', context='datasets in multi-dataset')", "def parse_genome_data(seqrecord, filepath=pathlib.Path(),\n translation_table=11, genome_id_field=\"_organism_name\", gnm_type=\"\",\n host_genus_field=\"_organism_host_genus\"):\n\n # Keep track of the file from which the record is derived.\n gnm = genome.Genome()\n gnm.set_filename(filepath)\n gnm.type = gnm_type\n\n try:\n gnm.organism = seqrecord.annotations[\"organism\"]\n except:\n gnm.organism = \"\"\n finally:\n # Identifies host and phage name from organism field.\n gnm.parse_organism()\n\n try:\n # Since accessions are stored in a list, there may be more than\n # one accessions associated with this file.\n # The first accession in the list is assumed to be the most recent.\n accession = seqrecord.annotations[\"accessions\"][0]\n except:\n accession = \"\"\n finally:\n gnm.set_accession(accession)\n\n try:\n gnm.description = seqrecord.description\n # It appears that if description is not present, Biopython\n # auto-populates this attribute as \"<unknown description>\"\n if gnm.description == \"<unknown description>\":\n gnm.description = \"\"\n except:\n gnm.description = \"\"\n finally:\n # Identifies host and phage name from description field.\n gnm.parse_description()\n\n try:\n gnm.source = seqrecord.annotations[\"source\"]\n except:\n gnm.source = \"\"\n finally:\n # Identifies host and phage name from record source field.\n gnm.parse_source()\n\n try:\n # The retrieved authors can be stored in multiple Reference elements.\n refs = seqrecord.annotations[\"references\"]\n authors_list = []\n for ref in refs:\n # Note: Reference objects are instantiated with an empty\n # authors attribute. So if no authors are present in a Reference,\n # it will still concatenate an empty string, resulting in an\n # author_string = \";;;\" etc. So only add the authors info if\n # it is not an empty string.\n if ref.authors != \"\":\n authors_list.append(ref.authors)\n authors_string = \";\".join(authors_list)\n gnm.authors = authors_string\n except:\n gnm.authors = \"\"\n\n # Biopython requires the parsed record contains a sequence, so\n # no need to test whether the seq attribute is present or not.\n # Nucleotide sequence, length, and % GC.\n gnm.set_sequence(seqrecord.seq)\n\n try:\n date = seqrecord.annotations[\"date\"]\n gnm.date = datetime.strptime(date, \"%d-%b-%Y\")\n except:\n gnm.date = constants.EMPTY_DATE\n\n\n # # Now that record fields are parsed, set the genome name, id,\n # # and host_genus.\n if genome_id_field != \"\":\n gnm.name = getattr(gnm, genome_id_field)\n gnm.set_id(value=gnm.name)\n else:\n # The seqrecord name and id are used if genome_id_field is empty.\n try:\n gnm.name = seqrecord.name\n # It appears that if name is not present, Biopython auto-populates\n # this attribute as \"<unknown name>\"\n if gnm.name == \"<unknown name>\":\n gnm.name = \"\"\n except:\n gnm.name = \"\"\n\n try:\n gnm.id = seqrecord.id\n # It appears that if id is not present, Biopython auto-populates\n # this attribute as \"<unknown id>\"\n if gnm.id == \"<unknown id>\":\n gnm.id = \"\"\n except:\n gnm.id = \"\"\n\n gnm.set_host_genus(attribute=host_genus_field)\n\n # Create lists of parsed features.\n # Note: Biopython instantiates the features attribute with\n # an empty list, so no need to test if features attribute is\n # present or not.\n seqfeature_dict = create_seqfeature_dictionary(seqrecord.features)\n\n cds_list = []\n if \"CDS\" in seqfeature_dict.keys():\n for seqfeature in seqfeature_dict[\"CDS\"]:\n cds_ftr = parse_cds_seqfeature(seqfeature)\n cds_ftr.genome_id = gnm.id\n cds_ftr.genome_length = gnm.length\n cds_ftr.set_nucleotide_sequence(parent_genome_seq=gnm.seq)\n cds_list.append(cds_ftr)\n\n source_list = []\n if \"source\" in seqfeature_dict.keys():\n for seqfeature in seqfeature_dict[\"source\"]:\n src_ftr = parse_source_seqfeature(seqfeature)\n src_ftr.genome_id = gnm.id\n source_list.append(src_ftr)\n\n trna_list = []\n if \"tRNA\" in seqfeature_dict.keys():\n for seqfeature in seqfeature_dict[\"tRNA\"]:\n trna_ftr = parse_trna_seqfeature(seqfeature)\n trna_ftr.genome_id = gnm.id\n trna_ftr.genome_length = gnm.length\n trna_ftr.set_nucleotide_sequence(parent_genome_seq=gnm.seq)\n trna_ftr.set_nucleotide_length(use_seq=True)\n trna_ftr.parse_amino_acid()\n trna_ftr.parse_anticodon()\n trna_list.append(trna_ftr)\n\n tmrna_list = []\n if \"tmRNA\" in seqfeature_dict.keys():\n for seqfeature in seqfeature_dict[\"tmRNA\"]:\n tmrna_ftr = parse_tmrna_seqfeature(seqfeature)\n tmrna_ftr.genome_id = gnm.id\n tmrna_ftr.genome_length = gnm.length\n tmrna_ftr.set_nucleotide_sequence(parent_genome_seq=gnm.seq)\n tmrna_ftr.set_nucleotide_length(use_seq=True)\n tmrna_ftr.parse_peptide_tag()\n tmrna_ftr.run_aragorn()\n tmrna_list.append(tmrna_ftr)\n\n gnm.translation_table = translation_table\n gnm.set_cds_features(cds_list)\n gnm.set_source_features(source_list)\n gnm.set_trna_features(trna_list)\n gnm.set_tmrna_features(tmrna_list)\n\n # The feature.id is constructed from the Genome.id and the feature order.\n gnm.set_feature_ids(use_type=True, use_cds=True)\n gnm.set_feature_ids(use_type=True, use_source=True)\n gnm.set_feature_ids(use_type=True, use_trna=True)\n gnm.set_feature_ids(use_type=True, use_tmrna=True)\n return gnm", "def sample_annotation(data):\n names = data[\"rgnames\"]['sample']\n tools = dd.get_expression_caller(data)\n work_dir = os.path.join(dd.get_work_dir(data), \"mirbase\")\n out_dir = os.path.join(work_dir, names)\n utils.safe_makedir(out_dir)\n out_file = op.join(out_dir, names)\n if dd.get_mirbase_hairpin(data):\n mirbase = op.abspath(op.dirname(dd.get_mirbase_hairpin(data)))\n if utils.file_exists(data[\"collapse\"]):\n data['transcriptome_bam'] = _align(data[\"collapse\"], dd.get_mirbase_hairpin(data), out_file, data)\n data['seqbuster'] = _miraligner(data[\"collapse\"], out_file, dd.get_species(data), mirbase, data['config'])\n else:\n logger.debug(\"Trimmed collapsed file is empty for %s.\" % names)\n else:\n logger.debug(\"No annotation file from miRBase.\")\n\n sps = dd.get_species(data) if dd.get_species(data) else \"None\"\n logger.debug(\"Looking for mirdeep2 database for %s\" % names)\n if file_exists(op.join(dd.get_work_dir(data), \"mirdeep2\", \"novel\", \"hairpin.fa\")):\n data['seqbuster_novel'] = _miraligner(data[\"collapse\"], \"%s_novel\" % out_file, sps, op.join(dd.get_work_dir(data), \"mirdeep2\", \"novel\"), data['config'])\n\n if \"trna\" in tools:\n data['trna'] = _mint_trna_annotation(data)\n\n data = spikein.counts_spikein(data)\n return [[data]]", "def copy_annotations(from_data, to_data, annot_type):\n\n for annot in from_data.annotations.select_type(annot_type):\n entity = anafora.AnaforaEntity()\n entity.id = annot.id\n entity.spans = annot.spans\n entity.type = annot.type\n to_data.annotations.append(entity)", "def add_annotations(annot_tuples, ref_data, annot_type):\n\n for annot in ref_data.annotations.select_type(annot_type):\n annot_begin, annot_end = annot.spans[0]\n annot_tuples.append((annot_begin, annot_end, annot.id))", "def annotate_record(\n seqrecord, location=\"full\", feature_type=\"misc_feature\", margin=0, **qualifiers\n):\n if location == \"full\":\n location = (margin, len(seqrecord) - margin)\n\n strand = location[2] if len(location) == 3 else 1\n seqrecord.features.append(\n SeqFeature(\n FeatureLocation(location[0], location[1], strand),\n qualifiers=qualifiers,\n type=feature_type,\n )\n )", "def geneSpecificRecord (self, orfList, headList, num):\n sequenceInfo = []\n for gene in orfList: # Finds target gene in each genome\n sequenceInfo.append(gene[num]) # ***any gene can be utilized***\n longestLength = max(len(s) for s in sequenceInfo) # gets longest seq to match length with gap characters\n paddedSequences = [s.ljust(longestLength, '-') for s in sequenceInfo] # Adds gap characters\n \n records = (SeqRecord(Seq(s), id = str(paddedSequences.index(s))) for s in paddedSequences) #creating a SeqRecord\n return(records)", "def _init_annotation(self):\n annotations = []\n for frame in self.frames:\n coordinates, sources, targets, ids = [], [], [], []\n frame_id = set()\n for spot_id, spot_annot in frame.items():\n coordinates.append((spot_annot[\"x\"], spot_annot[\"y\"]))\n sources.append(spot_annot[\"source\"])\n targets.append(spot_annot[\"target\"])\n ids.append(spot_id)\n frame_id.add(spot_annot[\"frame\"])\n if len(frame_id) != 1:\n raise ValueError(f\"Invalid frame number found in spot: {spot_id}\")\n annotations.append((\n np.array(coordinates, dtype=np.float),\n np.array(sources, dtype=np.str),\n targets,\n np.array(ids, dtype=np.str),\n frame_id.pop()))\n self.annotations = annotations", "def annotate(m, ss_seq): # -> None:\n ...", "def add_gene_annotation(self, genome_version, include_RISCC_reads=False, print_info=False):\n # add the annotation info to each mutant (or nothing, if gene has no annotation)\n # MAYBE-TODO should I even store gene annotation in each mutant (AND in each genome-side LEAPseq read), or just keep a separate per-gene dictionary to save space?\n gene_annotation_dict, gene_annotation_header = get_all_gene_annotation(genome_version, print_info=False)\n if gene_annotation_header: self.gene_annotation_header = gene_annotation_header\n else: self.gene_annotation_header = 'GENE_ANNOTATION_DATA'\n # add the annotation info to each mutant (or nothing, if gene has no annotation) \n N_annotated = 0\n for mutant in self:\n annotation = self._get_annotation_for_gene(mutant.gene, gene_annotation_dict)\n mutant.gene_annotation = annotation\n if annotation: N_annotated += 1\n if include_RISCC_reads:\n for RISCC_data in mutant.RISCC_genome_side_aligned_reads.values():\n annotation = self._get_annotation_for_gene(RISCC_data[3], gene_annotation_dict)\n RISCC_data[7:] = annotation\n if annotation: N_annotated += 1\n if print_info: print(\"Added %s annotations\"%N_annotated)\n elif not N_annotated: print(\"Warning: No gene annotations found!\")\n # LATER-TODO add this to the gene-info run-test case! But the get_all_gene_annotation method has tests.", "def _dataset_fields(geno):\n return {'title': geno['title'], 'notes': geno.get('notes', '')}", "def annotate(self, annotation):\n self._data = self._data.annotate(**annotation)", "def load_gene_annotation(self, file_path):\n\t\tpass", "def get_seqrecord_annotations_comments(phage_genome):\n if phage_genome.subcluster == \"\":\n cluster_comment = \"Cluster: {}; Subcluster: None\".format\\\n (phage_genome.cluster)\n else:\n cluster_comment = \"Cluster: {}; Subcluster: {}\".format\\\n (phage_genome.cluster, phage_genome.subcluster)\n auto_generated_comment =\\\n \"Auto-generated genome record from the MySQL database\"\n annotation_status_comment =\\\n \"Annotation Status: {}; Annotation Author: {}\".format\\\n (phage_genome.annotation_status,\\\n phage_genome.annotation_author)\n retrieval_value = \\\n \"RetrieveRecord: {}\".format(phage_genome.retrieve_record)\n\n return (cluster_comment, auto_generated_comment,\\\n annotation_status_comment, retrieval_value)", "def set_values(self):\n\n if self.featureType != \"gene\":\n self.transcriptId = self.meta['transcript_id']\n self.transcriptName = self.meta['transcript_name']\n self.transcriptBioType = self.meta['transcript_biotype']\n if self.featureType == 'exon':\n self.exonNum = self.meta['exon_number']\n self.exonId = self.meta['exon_id']\n elif self.featureType == 'CDS' or self.featureType == 'intron':\n self.exonNum = self.meta['exon_number']", "def bulk_update_gene_annotations(c, bulk_annotations):\n\n cols = \" (\" + \", \".join([str_wrap_double(x) for x in [\"ID\",\"annot_name\",\n \"source\", \"attribute\", \"value\"]]) + \") \"\n command = 'INSERT INTO \"gene_annotations\"' + cols + \"VALUES \" + \\\n '(?,?,?,?,?)'\n c.executemany(command, bulk_annotations)\n\n return", "def updateAnnotations(self):\n self.backupDatafiles()\n print(\"Updating annotation files \", self.field(\"trainDir\"))\n listOfDataFiles = QDir(self.field(\"trainDir\")).entryList(['*.data'])\n for file in listOfDataFiles:\n # Read the annotation\n segments = Segment.SegmentList()\n newsegments = Segment.SegmentList()\n segments.parseJSON(os.path.join(self.field(\"trainDir\"), file))\n allSpSegs = np.arange(len(segments)).tolist()\n newsegments.metadata = segments.metadata\n for segix in allSpSegs:\n seg = segments[segix]\n if self.field(\"species\") not in [fil[\"species\"] for fil in seg[4]]:\n newsegments.addSegment(seg) # leave non-target segments unchanged\n else:\n for seg2 in self.segments:\n if seg2[1] == seg:\n # find the index of target sp and update call type\n seg[4][[fil[\"species\"] for fil in seg[4]].index(self.field(\"species\"))][\"calltype\"] = self.clusters[seg2[-1]]\n newsegments.addSegment(seg)\n newsegments.saveJSON(os.path.join(self.field(\"trainDir\"), file))", "def load_annots(annot_file):\n with open(annot_file, 'r') as annot:\n data = annot.read().split('\\n')\n for line in data:\n temp = line.split(',')\n db_annot.setdefault(temp[0], temp[1:4])", "def load_annotations(self):\n assert self.ann_file.endswith('.pkl')\n ann_info = hload_pkl(self.ann_file)\n\n video_infos = []\n for i, video_info in enumerate(ann_info):\n frame_dir = video_info['filename']\n video_info['filename'] = osp.join(self.data_prefix, video_info['filename'])\n video_info['frame_dir'] = frame_dir\n video_info['index'] = i\n \n video_info['text'] = [video_info['text']] \n video_infos.append(video_info) \n del ann_info\n\n return video_infos", "def __init__(self, seq, annotation=False):\n self.seq = seq\n self.length = len(seq)\n self.annotation = annotation", "def _get_annotation_for_gene(gene, gene_annotation_dict):\n # grab annotations for each gene\n annotations = []\n for gene in gene.split(MULTIPLE_GENE_JOIN):\n try: annotations.append(gene_annotation_dict[gene])\n except KeyError: pass\n # make joint annotation (each field for all genes); \n # make this look better by dealing with empty data specially - turn \" & \" into \"\" and \" & x\" into \"- & x\", \n joint_annotations = []\n for ann in zip(*annotations):\n if any(ann):\n ann = [a if a else '-' for a in ann]\n joint_annotations.append(MULTIPLE_GENE_JOIN.join(ann))\n else:\n joint_annotations.append('')\n # MAYBE-TODO do duplicate-removal etc? But that might just make things confusing - not obvious what goes with which gene.\n return joint_annotations\n # TODO unit-test!", "def gff2FA(annotation, sequence, windows, output):\n df_gff = pd.read_csv(annotation, index_col=False, sep='\\t', header=None, comment=\"#\")\n df_gff.columns = ['seqname', 'source', 'feature', 'start', 'end', 'score', 'strand', 'frame', 'attribute']\n fasta_seq = SeqIO.parse(sequence, 'fasta')\n buffer_seqs = []\n cont = 0\n for record in fasta_seq:\n print(record.id)\n dff_extract = df_gff[df_gff.seqname == record.id]\n for key,val in dff_extract.iterrows():\n clean_seq = ''.join(str(record.seq).splitlines())\n if int(val.start) - windows < 0:\n start = 0\n else:\n start = int(val.start) - windows\n if int(val.end) + windows > len(clean_seq):\n end = len(clean_seq)\n else:\n end = int(val.end) + windows\n new_seq = clean_seq[start:end]\n att = val.attribute\n id = record.id + '_' + str(start) + '_' + str(end)\n desc = \"seq_id:\" + str(record.id)\n desc += \" feature_start:\" + str(val.start)\n desc += \" feature_end:\" + str(val.end)\n desc += \" genome_start:\" + str(start)\n desc += \" genome_end:\" + str(end)\n desc += \" feature:\" + str(val.feature)\n desc += \" attributes:\" + val.attribute\n seq = SeqRecord(Seq(new_seq), id=id, description=desc)\n buffer_seqs.append(seq)\n cont += 1\n if output:\n print('Saving...')\n SeqIO.write(buffer_seqs, output, \"fasta\")\n else:\n return buffer_seqs", "def annotate(self, **annotations):\n _check_annotations(annotations)\n self.annotations.update(annotations)", "def build_seq_obj(self, code, gene_code, our_taxon_names, all_seqs):\n this_voucher_seqs = self.extract_sequence_from_all_seqs_in_db(all_seqs, code, gene_code)\n\n if this_voucher_seqs == '?':\n seq = '?' * self.gene_codes_metadata[gene_code]['length']\n else:\n seq = self.create_seq_record(this_voucher_seqs)\n\n seq_record = SeqRecordExpanded(seq)\n\n if code in our_taxon_names:\n seq_record.voucher_code = code\n seq_record.taxonomy = our_taxon_names[code]\n seq_record.gene_code = gene_code\n seq_record.reading_frame = self.gene_codes_metadata[gene_code]['reading_frame']\n seq_record.table = self.gene_codes_metadata[gene_code]['genetic_code']\n return seq_record\n else:\n return None", "def test_sequence_annotate(self):\n self.t(\"1,2 annotate note\")\n code, out, err = self.t(\"_get 1.annotations.1.description 2.annotations.1.description\")\n self.assertEqual(\"note note\\n\", out)", "def separate_annotations():\n data_root = '/home/ubuntu/datasets/YT-VIS/'\n ann_file = data_root + 'annotations/instances_train_sub.json'\n import json\n with open(ann_file, 'r') as f:\n ann = json.load(f)\n # ann['videos'] = ann['videos'][15]\n # video_id = [0]\n from tqdm import tqdm\n for id in tqdm(range(len(ann['videos']))):\n videos = []\n anns = []\n video = ann['videos'][id]\n video['id'] = 1\n videos.append(video)\n\n i = 1\n for a in ann['annotations']:\n if a['video_id'] == id + 1:\n anno = a\n anno['id'] = i\n anno['video_id'] = 1\n anns.append(anno)\n i += 1\n # anno = ann['annotations'][id]\n # anno['id'] = 1\n # anno['video_id'] = 1\n # anns.append(anno)\n\n file_name = videos[0]['file_names'][0].split('/')[0]\n\n ann_new = dict()\n ann_new['info'] = ann['info']\n ann_new['licenses'] = ann['licenses']\n ann_new['categories'] = ann['categories']\n ann_new['videos'] = videos\n ann_new['annotations'] = anns\n\n with open(data_root + 'train/Annotations/{}/{}_annotations.json'.format(file_name, file_name), 'w') as f:\n json.dump(ann_new, f, ensure_ascii=False)" ]
[ "0.61961854", "0.614041", "0.59840614", "0.59612197", "0.59370255", "0.5875188", "0.58745486", "0.5842022", "0.57989097", "0.57771647", "0.5719793", "0.57143587", "0.56944454", "0.56943953", "0.5606682", "0.5590644", "0.55596644", "0.55595595", "0.55420077", "0.5540718", "0.54887766", "0.548256", "0.54654795", "0.5452925", "0.54498845", "0.5412219", "0.54109114", "0.54004824", "0.53813154", "0.536705" ]
0.6855981
0
Helper function that uses Genome data to populate the comment annotation attribute
def get_seqrecord_annotations_comments(phage_genome): if phage_genome.subcluster == "": cluster_comment = "Cluster: {}; Subcluster: None".format\ (phage_genome.cluster) else: cluster_comment = "Cluster: {}; Subcluster: {}".format\ (phage_genome.cluster, phage_genome.subcluster) auto_generated_comment =\ "Auto-generated genome record from the MySQL database" annotation_status_comment =\ "Annotation Status: {}; Annotation Author: {}".format\ (phage_genome.annotation_status,\ phage_genome.annotation_author) retrieval_value = \ "RetrieveRecord: {}".format(phage_genome.retrieve_record) return (cluster_comment, auto_generated_comment,\ annotation_status_comment, retrieval_value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init():\n\n # Complete our stop words set.\n add_extra_words()\n\n model = read_model(MODEL_FILE)\n model_keys = list(model.keys())\n\n # Basic random.\n new_comment = generate_comment(model=model, order=2,\n number_of_sentences=2,\n initial_prefix=random.choice(model_keys))\n\n # Selective random.\n new_comment = generate_comment(model=model, order=2,\n number_of_sentences=2,\n initial_prefix=get_prefix(model_keys))\n\n # Context-aware.\n new_comment = generate_comment(model=model, order=2,\n number_of_sentences=2,\n initial_prefix=get_prefix_with_context(model, \"Agent_Phantom\"))\n\n print(new_comment)", "def annotate(self, annotation):\n self._data = self._data.annotate(**annotation)", "def comment():", "def _generate_pr_comment_markdown(self, data):\n pass", "def __init__(self, sample_metadata, Comments):\r\n self._metadata = sample_metadata\r\n self.Comments = Comments\r\n self.no_data_value = 'no_data'", "def _get_annotation_data_attr(self, index, el):\r\n\r\n data_attrs = {}\r\n attrs_map = {\r\n 'body': 'data-comment-body',\r\n 'title': 'data-comment-title',\r\n 'problem': 'data-problem-id'\r\n }\r\n\r\n for xml_key in attrs_map.keys():\r\n if xml_key in el.attrib:\r\n value = el.get(xml_key, '')\r\n html_key = attrs_map[xml_key]\r\n data_attrs[html_key] = {'value': value, '_delete': xml_key}\r\n\r\n return data_attrs", "def add_annotation_comment(self, doc, comment):\n if len(doc.annotations) != 0:\n if not self.annotation_comment_set:\n self.annotation_comment_set = True\n if validations.validate_annotation_comment(comment):\n doc.annotations[-1].comment = str_from_text(comment)\n return True\n else:\n raise SPDXValueError('AnnotationComment::Comment')\n else:\n raise CardinalityError('AnnotationComment::Comment')\n else:\n raise OrderError('AnnotationComment::Comment')", "def parseAnnotation(self, i, j) :\n\n if self.config.get('annotations', 'model') == 'oa':\n # Create triples according to Open Annotation model\n\n body = BNode()\n\n self.annotationGraph.add((self.annotationNamespaces['scope'][self.source_cell_qname], \n RDF.type, \n self.annotationNamespaces['oa']['Annotation']\n ))\n self.annotationGraph.add((self.annotationNamespaces['scope'][self.source_cell_qname], \n self.annotationNamespaces['oa']['hasBody'], \n body\n ))\n self.annotationGraph.add((body,\n RDF.value, \n Literal(self.annotations[(i,j)].text.replace(\"\\n\", \" \").replace(\"\\r\", \" \").replace(\"\\r\\n\", \" \").encode('utf-8'))\n ))\n self.annotationGraph.add((self.annotationNamespaces['scope'][self.source_cell_qname], \n self.annotationNamespaces['oa']['hasTarget'], \n self.namespaces['scope'][self.source_cell_qname]\n ))\n self.annotationGraph.add((self.annotationNamespaces['scope'][self.source_cell_qname], \n self.annotationNamespaces['oa']['annotator'], \n Literal(self.annotations[(i,j)].author.encode('utf-8'))\n ))\n self.annotationGraph.add((self.annotationNamespaces['scope'][self.source_cell_qname], \n self.annotationNamespaces['oa']['annotated'], \n Literal(datetime.datetime.fromtimestamp(os.path.getmtime(self.filename)).strftime(\"%Y-%m-%d\"),datatype=self.annotationNamespaces['xsd']['date'])\n ))\n self.annotationGraph.add((self.annotationNamespaces['scope'][self.source_cell_qname], \n self.annotationNamespaces['oa']['generator'], \n URIRef(\"https://github.com/Data2Semantics/TabLinker\")\n ))\n self.annotationGraph.add((self.annotationNamespaces['scope'][self.source_cell_qname], \n self.annotationNamespaces['oa']['generated'], \n Literal(datetime.datetime.now().strftime(\"%Y-%m-%d\"), datatype=self.annotationNamespaces['xsd']['date'])\n ))\n self.annotationGraph.add((self.annotationNamespaces['scope'][self.source_cell_qname], \n self.annotationNamespaces['oa']['modelVersion'], \n URIRef(\"http://www.openannotation.org/spec/core/20120509.html\")\n ))\n else:\n # Create triples according to Nanopublications model\n print \"Nanopublications not implemented yet!\"", "def _writeComments(self):\n self.header.write(wrapLine(\"NSCOML\", self.annotation, self.delimiter, \"%d\\n\" % self.NSCOML))\n self.header.write(wrapLines(\"SCOM\", self.annotation, self.delimiter, \"%s\\n\" * self.NSCOML % tuple(self.SCOM)))\n self.header.write(wrapLine(\"NNCOML\", self.annotation, self.delimiter, \"%d\\n\" % self.NNCOML))\n self.header.write(wrapLines(\"NCOM\", self.annotation, self.delimiter, \"%s\\n\" * self.NNCOML % tuple(self.NCOM)))", "def __init__(self, comment_id, name, data):\n\n self.comment_id = comment_id\n self.name = name\n self.data = data", "def _dataset_fields(geno):\n return {'title': geno['title'], 'notes': geno.get('notes', '')}", "def model_comment(comment_type, text, other=None):\n if other and isinstance(other, dict):\n comment = other\n else:\n comment = dict()\n\n comment['type'] = comment_type.upper()\n if text:\n comment['comment'] = text\n\n string = json.dumps(comment)\n return \"/* LDV {} */\".format(string)", "def get_comment(self):\n if self.simulation_data is None:\n return self.comment\n else:\n return self.simulation_data.mfdata[self.comment_path]", "def comment_in_json(self):\n\t\tpass", "def _convert_annotations(self, ast):\n self.annotations = IDLAnnotations(ast)", "def make_comment(self, offset, comment):\n #self.ret = idc.MakeComm(offset, comment)\n return self.ret", "def comment(self, content):\n pass", "def decorate(self, content: StringList) -> None:\n super().decorate(content)\n for line in content[:20]:\n if line.startswith(TITLE_MARKER):\n title = line[len(TITLE_MARKER) :].strip()\n fence = \"=\" * len(title)\n content.insert(0, \"\", \"<generated>\", 0)\n content.insert(0, fence, \"<generated>\", 0)\n content.insert(0, title, \"<generated>\", 0)\n content.insert(0, fence, \"<generated>\", 0)", "def create_comment(data):\n\tchecksum = sha256(data).hexdigest()\n\tcomment = checksum + ' ' + data\n\treturn comment", "def __init__(self):\r\n\t\tself.label = \"Linked Data Batch No Functional Property Merge\"\r\n\t\tself.description = \"\"\"The related seperated tables from Linked Data Location Entities Property Enrichment Tool have multivalue for each wikidata location because the coresponding property is not functional property. \r\n\t\tThis Tool helps user to merge these multivalue to a single record and add it to original feature class sttribute table by using merge rules which are specified by users.\"\"\"\r\n\t\tself.canRunInBackground = False", "def add_annotation_comment(self, doc, comment):\n if len(doc.annotations) != 0:\n if not self.annotation_comment_set:\n self.annotation_comment_set = True\n doc.annotations[-1].comment = comment\n return True\n else:\n raise CardinalityError('AnnotationComment')\n else:\n raise OrderError('AnnotationComment')", "def generate_annotations(self, caching=CachingType.NONE):\n # Make the nltk Text list of words\n text = self.nltk_text(self.text)\n\n # Get the uncommon_words\n uncommon_words = self.eliminate_common(text)\n # Get the places / VIPs / hystorical events / etc.\n extras = self.get_extras(text)\n # Generate the annotations\n annotations = []\n for word in uncommon_words:\n ann = annot.TextAnnotation(word, AnnotationType.UNCOMMON_WORD,\n caching)\n ann.save_to_db()\n if ann.data is None or not ann.data:\n continue\n annotations.append(ann)\n for word in extras:\n ann = annot.TextAnnotation(word, AnnotationType.EXTRA, caching)\n ann.save_to_db(case_sensitive=True)\n if ann.data is None or not ann.data:\n continue\n annotations.append(ann)\n # Return the list of annotations\n return annotations", "def _readComments(self): \n self.NSCOML = nappy.utils.text_parser.readItemFromLine(self.file.readline(), int)\n self._readSpecialComments()\n self.NNCOML = nappy.utils.text_parser.readItemFromLine(self.file.readline(), int)\n self._readNormalComments()", "def copy_annotations(from_data, to_data, annot_type):\n\n for annot in from_data.annotations.select_type(annot_type):\n entity = anafora.AnaforaEntity()\n entity.id = annot.id\n entity.spans = annot.spans\n entity.type = annot.type\n to_data.annotations.append(entity)", "def create_comment(data):\n checksum = sha256(data).hexdigest()\n comment = checksum + ' ' + data\n return comment", "def __init__(self):\r\n\t\tself.label = \"Linked Data Single No Functional Property Merge\"\r\n\t\tself.description = \"\"\"The related seperated tables from Linked Data Location Entities Property Enrichment Tool have multivalue for each wikidata location because the coresponding property is not functional property. \r\n\t\tThis Tool helps user to merge these multivalue to a single record and add it to original feature class sttribute table by using merge rules which are specified by users.\"\"\"\r\n\t\tself.canRunInBackground = False", "def comment_for_run (ins, exp, runnum) :\n return dict_of_recs_for_run(ins, exp, runnum)['comment']", "def __init__(self, program: ghidra.program.model.listing.Program, it: ghidra.program.model.address.AddressIterator, commentType: int):\n ...", "def _defineNAComments(self, normal_comments=None, special_comments=None):\n normal_comments = normal_comments or []\n special_comments = special_comments or []\n\n if hasattr(self, \"NCOM\"): normal_comments = getattr(self, \"NCOM\") + normal_comments\n\n NCOM = []\n for ncom in normal_comments:\n NCOM.append(ncom)\n\n if len(NCOM) > 0:\n NCOM.append(\"\")\n\n # Use third item in self.extra_comments and adds to NCOM\n if len(self.extra_comments[2]) > 0:\n for excom in self.extra_comments[2]:\n NCOM.append(excom)\n\n if len(self.extra_comments[1]) > 0: \n NCOM.append(hp[\"addl_globals\"])\n for excom in self.extra_comments[1]:\n NCOM.append(excom)\n\n if hasattr(self, \"history\"):\n for h in self.history:\n NCOM.append(h)\n \n # When NCOM has been defined then surround it in some extras \n if len(NCOM) > 0:\n NCOM.insert(0, hp[\"nc_start\"]) \n NCOM.append(\"\")\n NCOM.append(hp[\"nc_end\"])\n NCOM.append(hp[\"data_next\"])\n\n spec_comm_flag = None\n # Start with special_comments added in\n SCOM = []\n\n # Uses first item in self.extra_comments to start SCOM\n special_comments = special_comments + self.extra_comments[0]\n\n if len(special_comments) > 0: \n SCOM = [hp[\"sc_start\"]]\n spec_comm_flag = 1\n\n for scom in special_comments:\n SCOM.append(scom)\n\n used_var_atts = (\"id\", \"missing_value\", \"fill_value\", \n \"nasa_ames_var_number\", \"nasa_ames_aux_var_number\")\n var_comm_flag = None\n\n # Create a string for the Special comments to hold rank-zero vars\n rank_zero_vars_string = []\n\n for var in self.rank_zero_vars:\n rank_zero_vars_string.append(\" Variable %s: %s\" % (var.name, xarray_utils.getBestName(var)))\n\n for att in var.attrs.keys():\n value = var.attrs[att]\n\n if isinstance(value, str) or np.isscalar(value):\n rank_zero_vars_string.append(\" %s = %s\" % (att, var.attrs[att]))\n\n if len(rank_zero_vars_string) > 0:\n rank_zero_vars_string.insert(0, hp[\"sing_start\"])\n rank_zero_vars_string.append(hp[\"sing_end\"])\n\n # Loop through variables and add \n for var in self.ordered_vars:\n varflag = \"unused\"\n var_name_written = False\n\n name = xarray_utils.getBestName(var)\n\n for scom, value in var.attrs.items():\n if hasattr(value, \"__len__\") and len(value) == 1:\n value = value[0]\n\n if isinstance(value, str) or np.isscalar(value) and scom not in used_var_atts:\n if varflag == \"unused\":\n if var_comm_flag == None:\n var_comm_flag = 1\n\n if spec_comm_flag == None:\n SCOM = [hp[\"sc_start\"]] + rank_zero_vars_string\n SCOM.append(hp[\"addl_vatts\"])\n SCOM.append(hp[\"ncatts_start\"])\n varflag = \"using\" \n spec_comm_flag = 1\n\n if not var_name_written:\n SCOM.append(\" Variable %s: %s\" % (var.name, name))\n var_name_written = True\n\n SCOM.append(\" %s = %s\" % (scom, value))\n\n if var_comm_flag == 1: \n SCOM.append(hp[\"ncatts_end\"])\n if spec_comm_flag == 1:\n SCOM.append(hp[\"sc_end\"])\n\n # Strip out empty lines (or returns)\n NCOM_cleaned = []\n SCOM_cleaned = []\n\n for c in NCOM:\n if c.strip() not in (\"\", \" \", \" \"):\n # Replace new lines within one attribute with a newline and tab so easier to read\n lines = c.split(\"\\n\")\n for line in lines:\n if line != lines[0]: \n line = \" \" + line\n\n NCOM_cleaned.append(line)\n\n for c in SCOM:\n if c.strip() not in (\"\", \" \", \" \"):\n # Replace new lines within one attribute with a newline and tab so easier to read\n lines = c.split(\"\\n\")\n\n for line in lines:\n if line != lines[0]: \n line = \" \" + line\n\n SCOM_cleaned.append(line)\n\n self.na_dict[\"NCOM\"] = NCOM_cleaned\n self.na_dict[\"NNCOML\"] = len(self.na_dict[\"NCOM\"])\n self.na_dict[\"SCOM\"] = SCOM_cleaned\n self.na_dict[\"NSCOML\"] = len(self.na_dict[\"SCOM\"])\n\n return", "def set_attribute(self, name, value, comment):\n setattr(self, '%s__' % name, value_or_none(value))\n setattr(self, '%s__comment' % name, value_or_none(comment))" ]
[ "0.5940948", "0.5896796", "0.57457376", "0.56866765", "0.56605685", "0.5631936", "0.5573606", "0.55731833", "0.5529364", "0.54920137", "0.5472266", "0.54506373", "0.54092973", "0.5386058", "0.53741366", "0.5355898", "0.5355448", "0.5333661", "0.53121537", "0.5307911", "0.5288451", "0.52851754", "0.5281835", "0.5274791", "0.52430886", "0.52427495", "0.5234922", "0.5227721", "0.52265257", "0.52040225" ]
0.59162307
1
Create a fastaformatted Biopython SeqRecord object.
def create_fasta_seqrecord(header, sequence_string): seq = Seq(sequence_string, alphabet=IUPAC.unambiguous_dna) seqrecord = SeqRecord(seq, description=header) return seqrecord
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_seq_record(self):\n\t\t#create the anotations in a pythonic manner\n\t\texempt = ['name', 'description', 'features', 'sequence'] #things which aren't annotations\n\t\tannotations = { }\n\t\tfor key, value in self.__dict__.iteritems():\n\t\t\tif key.lower() not in exempt:\n\t\t\t\tannotations[key] = value\n\t\t\n\t\t#create the features\n\t\tfeatures = []\n\t\tfor feat in self.features:\n\t\t\tfeatures.append( SeqFeature( \n\t\t\t\tlocation = FeatureLocation(feat['startpos'] - 1, feat['endpos']), #NB partsregistry uses 1-offset, and inclusive.\n\t\t\t\ttype = feat['type'],\n\t\t\t\tstrand = feat['strand'],\n\t\t\t\tqualifiers = {'title': feat['name'],}))\n\t\t\n\t\treturn SeqRecord(\tself.sequence, \n\t\t\t\t\t\t\tid=self.name,\n\t\t\t\t\t\t\tname=self.name,\n\t\t\t\t\t\t\tdescription=self.description,\n\t\t\t\t\t\t\tfeatures=features,\n\t\t\t\t\t\t\tannotations=annotations)", "def sequence_to_biopython_record(\n sequence, id=\"<unknown id>\", name=\"<unknown name>\", features=()\n):\n if has_dna_alphabet:\n seq = Seq(sequence, alphabet=DNAAlphabet())\n else:\n seq = Seq(sequence)\n\n return SeqRecord(\n seq=seq,\n id=id,\n name=name,\n features=list(features),\n annotations={\"molecule_type\": \"DNA\"},\n )", "def make_protein_record(nuc_record):\n return SeqRecord(seq = nuc_record.seq.translate(to_stop=True), \\\n id = \"trans_\" + nuc_record.id, \\\n description = \"translation of CDS, using default table\")", "def __init__(self, seq_record=None):\n\t\tself._record = seq_record", "def change_biopython_record_sequence(record, new_seq):\n new_record = deepcopy(record)\n\n if has_dna_alphabet:\n seq = Seq(new_seq, alphabet=DNAAlphabet())\n else:\n seq = Seq(new_seq)\n\n new_record.seq = seq\n return new_record", "def test_make_fasta_rec(self):\r\n header = '>E2_1 FYI2DSB01B17QJ orig_bc=ATCACTAGTCAC new_bc=ATCACTAGTCAC bc_diffs=0'\r\n seq = 'CTGGTC'\r\n qual = map(int, '32 32 32 19 19 19'.split())\r\n self.assertEqual(make_fastq_rec(header, seq, qual),\r\n \"\"\"@E2_1 FYI2DSB01B17QJ orig_bc=ATCACTAGTCAC new_bc=ATCACTAGTCAC bc_diffs=0\r\nCTGGTC\r\n+E2_1 FYI2DSB01B17QJ orig_bc=ATCACTAGTCAC new_bc=ATCACTAGTCAC bc_diffs=0\r\nAAA444\"\"\")", "def genome_to_seqrecord(phage_genome):\n\n assert phage_genome != None,\\\n \"Genome object passed is None and not initialized\"\n try:\n record = SeqRecord(phage_genome.seq)\n record.seq.alphabet = IUPAC.IUPACAmbiguousDNA()\n except AttributeError:\n print(\"Genome object failed to be converted to SeqRecord.\",\n \"Genome valid attribute 'seq' is required to\",\n \"convert to SeqRecord object.\")\n raise\n record.name = phage_genome.name\n if phage_genome.accession != \"\":\n record.id = phage_genome.accession\n record.features = get_seqrecord_features(phage_genome)\n record.description = get_seqrecord_description(phage_genome)\n record.annotations=\\\n get_seqrecord_annotations(phage_genome)\n\n return record", "def create_seq_record(self, s):\n gene_code = s['gene_code']\n length = self.gene_codes_metadata[gene_code]['length']\n sequence = s['sequences']\n length_difference = length - len(sequence)\n\n sequence += '?' * length_difference\n return sequence", "def parseFastaHeader(self, seqRecord):\n [modbaseId, uniprotId] = seqRecord.id.split('|')\n seq = pcssPeptide.PcssProtein(modbaseId, self.pcssRunner)\n seq.setUniprotId(uniprotId)\n return seq", "def build_seq_obj(self, code, gene_code, our_taxon_names, all_seqs):\n this_voucher_seqs = self.extract_sequence_from_all_seqs_in_db(all_seqs, code, gene_code)\n\n if this_voucher_seqs == '?':\n seq = '?' * self.gene_codes_metadata[gene_code]['length']\n else:\n seq = self.create_seq_record(this_voucher_seqs)\n\n seq_record = SeqRecordExpanded(seq)\n\n if code in our_taxon_names:\n seq_record.voucher_code = code\n seq_record.taxonomy = our_taxon_names[code]\n seq_record.gene_code = gene_code\n seq_record.reading_frame = self.gene_codes_metadata[gene_code]['reading_frame']\n seq_record.table = self.gene_codes_metadata[gene_code]['genetic_code']\n return seq_record\n else:\n return None", "def parse_sequence(fasta_seq, length):\n # extract name and sequence\n name, seq = fasta_seq.id, fasta_seq.seq\n\n # Cannot create a larger sequence than the original\n length = min(length, len(seq))\n\n # find the maximum starting index that generates a full\n # length subsequence\n max_start = len(seq) - length\n start = random.randint(0, max_start)\n \n # generate a new sequence\n gen_seq = seq[start:(start + length)]\n\n # return the generated sequence\n return SeqRecord(gen_seq, name, '', '')", "def to_record(self, val):\n while len(val) < self.length:\n val.append(self.record_class())\n return ''.join([v.to_record() for v in val])", "def _sequences_to_new_records(sequences):\n if isinstance(sequences, dict):\n sequences = list(sequences.items())\n records = []\n for seq in sequences:\n if hasattr(seq, \"id\"):\n records.append(deepcopy(seq))\n else:\n name, seq = seq\n records.append(\n sequence_to_biopython_record(seq, id=name, name=name)\n )\n return records", "def _new_record():\n nonlocal key\n nonlocal value_list\n nonlocal record\n nonlocal origin\n nonlocal field_offset_map\n key = None\n value_list = None\n if source is not None:\n origin = Origin(source, None, None)\n field_offset_map = {}\n record = RFC822Record(data_cls(), origin, data_cls(), field_offset_map)", "def to_record(\n self,\n filepath=None,\n features_type=\"misc_feature\",\n with_original_features=True,\n with_original_spec_features=False,\n with_constraints=True,\n with_objectives=True,\n with_sequence_edits=False,\n colors_dict=None,\n use_short_labels=True,\n record_id = None\n ):\n record = sequence_to_biopython_record(self.sequence)\n if record_id is not None:\n record.id = record_id\n\n record.features = []\n if with_constraints:\n record.features += [\n cst.to_biopython_feature(\n role=\"constraint\",\n feature_type=features_type,\n colors_dict=colors_dict,\n use_short_label=use_short_labels,\n )\n for cst in self.constraints\n if cst.__dict__.get(\"location\", False)\n ]\n if with_objectives:\n record.features += [\n obj.to_biopython_feature(\n role=\"objective\",\n feature_type=features_type,\n colors_dict=colors_dict,\n use_short_label=use_short_labels,\n )\n for obj in self.objectives\n ]\n if with_original_features and (self.record is not None):\n record.features += [\n f\n for f in self.record.features\n if with_original_spec_features\n or not find_specification_label_in_feature(f)\n ]\n if with_sequence_edits:\n record.features += self.sequence_edits_as_features()\n\n if filepath is not None:\n write_record(record=record, target=filepath, file_format=\"genbank\")\n else:\n return record", "def parse(record):\n\n #Extract individual parts of the FASTA record\n\n identifier = record.id #The sequence's Id\n sequence = record.seq #The sequence itself\n sequence = sequence.upper() #Turns all the nucleotides to upper case\n\n return identifier, sequence", "def createHeaderRecord(self):\n\n # ascii-character limit for every header record information (in bytes)\n lenVersion = 8\n lenLocalPatientID = 80\n lenLocalRecordingID = 80\n lenStartDate = 8\n lenStartTime = 8\n lennBytesHeader = 8\n lenEDFPlus = 44\n lennDataRecord = 8\n lenDurationDataRecord = 8\n lennSignals = 4\n \n HeaderInfolist = [self.Version, self.LocalPatientID, self.LocalRecordingID, self.StartDate, self.StartTime, self.nBytesHeader, self.EDFPlus,\\\n self.nDataRecord, self.DurationDataRecord, self.nSignals]\n lenHeaderInfo = [lenVersion, lenLocalPatientID, lenLocalRecordingID, lenStartDate, lenStartTime, lennBytesHeader, lenEDFPlus, lennDataRecord,\\\n lenDurationDataRecord, lennSignals]\n\n for i in range(len(HeaderInfolist)):\n maxlen = lenHeaderInfo[i]\n if len(HeaderInfolist[i]) > maxlen:\n # truncates the string if length is greater than limit\n HeaderInfolist[i] = HeaderInfolist[i][:maxlen] \n \n else:\n HeaderInfolist[i] = HeaderInfolist[i].ljust(maxlen)\n \n # converts the list to a string with no separator in between elements\n self.HeaderRecord = ''.join(HeaderInfolist) \n\n # concatenates each BioSignal TechInfo to the Header Record string\n for i in range(len(self.BioSignals[0].TechInfo)):\n for x in range(len(self.BioSignals)):\n self.HeaderRecord = self.HeaderRecord + self.BioSignals[x].TechInfo[i]", "def fromString(cls, s):\n try:\n lines = s.splitlines()\n assert len(lines) > 1\n assert lines[0][0] == cls.DELIMITER\n name = lines[0][1:]\n sequence = \"\".join(lines[1:])\n return FastaRecord(name, sequence)\n except AssertionError:\n raise ValueError(\"String not recognized as a valid FASTA record\")", "def snapgene_file_to_seqrecord(*a, **k):\n raise ImportError(\n \"Please install snapgene_reader to import Snapgene .dna files\"\n )", "def make_fastq_rec(header, seq, qual, offset=33):\r\n result = []\r\n if header.startswith('>'):\r\n header = header[1:]\r\n result.append('@' + header)\r\n result.append(seq)\r\n result.append('+' + header)\r\n result.append(''.join(map(chr, [33 + i for i in qual])))\r\n return '\\n'.join(result)", "def _to_acknowledgement_record(parsed):\n return AcknowledgementRecord(record_type=parsed.record_type,\n transaction_sequence_n=parsed.transaction_sequence_n,\n record_sequence_n=parsed.record_sequence_n,\n original_group_id=parsed.group_id,\n original_transaction_sequence_n=parsed.original_transaction_sequence_n,\n original_transaction_type=parsed.original_transaction_type,\n transaction_status=parsed.transaction_status,\n creation_date_time=parsed.creation_date_time,\n processing_date=parsed.processing_date,\n creation_title=parsed.creation_title,\n submitter_creation_n=parsed.submitter_creation_n,\n recipient_creation_n=parsed.recipient_creation_n)", "def format_fastq_record(label,\r\n seq,\r\n qual):\r\n\r\n return \"@%s\\n%s\\n+\\n%s\\n\" % (label, seq, qual)", "def geneSpecificRecord (self, orfList, headList, num):\n sequenceInfo = []\n for gene in orfList: # Finds target gene in each genome\n sequenceInfo.append(gene[num]) # ***any gene can be utilized***\n longestLength = max(len(s) for s in sequenceInfo) # gets longest seq to match length with gap characters\n paddedSequences = [s.ljust(longestLength, '-') for s in sequenceInfo] # Adds gap characters\n \n records = (SeqRecord(Seq(s), id = str(paddedSequences.index(s))) for s in paddedSequences) #creating a SeqRecord\n return(records)", "def createDataRecord(self):\n\n counter = 0\n DataRecordlist = []\n end = []\n offset = []\n start = []\n temp = []\n\n def DecToBin(num):\n # converts a decimal number to its 16-bit binary representation\n \n BinStr = ''\n if num == 0: return '0'*16\n while num > 0:\n BinStr = str(num % 2) + BinStr\n num = num >> 1 # right-shift the num by 1 bit\n BinStr = BinStr.zfill(16) # make BinStr a 16-bit string\n return BinStr\n\n for i in range(len(self.BioSignals)):\n offset.append(int(self.BioSignals[i].NRsamples))\n start.append(0)\n end.append(0)\n\n while (counter < int(self.nDataRecord)):\n \n for x in range(len(self.BioSignals)):\n\n end[x] = start[x] + offset[x]\n temp = self.BioSignals[x].RawBioSignal[start[x]:end[x]]\n\n for i in range(len(temp)):\n intRawValue = temp[i]\n\n if intRawValue >= 0:\n # if positive-valued, convert to binary\n binRawValue = DecToBin(intRawValue)\n else:\n # if negative, get the positive representation by adding (2**16-1)\n # these are the numbers from 32768-65535\n negRawValue = intRawValue + (2**16-1)\n # then convert to binary\n binRawValue = DecToBin(negRawValue)\n\n # divide the 16-bit binary number to two 8-bit binary numbers (MSByte and LSByte)\n MSByte = binRawValue[:8]\n LSByte = binRawValue[8:]\n\n # convert each byte to decimal then get its ASCII representation \n chrMSByte = chr(int(MSByte,2))\n chrLSByte = chr(int(LSByte,2))\n\n # each value in the data record is a 2-byte 2's complement integer represented by its ASCII character\n # it is arranged as: Value = LSB,MSB\n DataRecordlist.extend([chrLSByte, chrMSByte])\n\n # update the pointer for the next set of data records\n start[x] = end[x]\n\n counter += 1\n\n # when all BioSignal objects are accessed and encoded, converts the list to a string\n self.DataRecord = ''.join(DataRecordlist)", "def create(cls: Type[Sequence], sequence: bytes, alphabet: Alphabet) -> Sequence:\n return cls(lib.imm_seq_create(sequence, alphabet.imm_abc), alphabet)", "def fasta_format(self, line_width=None):\n return fasta_formatted_string(self.name, self._sequence,\n description=self.description,\n line_width=line_width)", "def get_cds(geneid, seqdict):\n nuc_seq = seqdict[geneid]\n # Translate it\n aa_seq = nuc_seq.seq.translate()\n # Decorate it like you would a full SeqRecord object\n aa_seq_rec = SeqRecord.SeqRecord(\n aa_seq,\n id=geneid,\n description='')\n return aa_seq_rec", "def load_record(filepath, linear=True, name=\"unnamed\", file_format=\"auto\"):\n if file_format != \"auto\":\n record = SeqIO.read(filepath, file_format)\n elif filepath.lower().endswith((\"gb\", \"gbk\")):\n record = SeqIO.read(filepath, \"genbank\")\n elif filepath.lower().endswith((\"fa\", \"fasta\")):\n record = SeqIO.read(filepath, \"fasta\")\n elif filepath.lower().endswith(\".dna\"):\n record = snapgene_file_to_seqrecord(filepath)\n else:\n raise ValueError(\"Unknown format for file: %s\" % filepath)\n record.linear = linear\n if name != \"unnamed\":\n record.id = name\n record.name = name.replace(\" \", \"_\")[:20]\n return record", "def read_sequence(filename):\n record = next(SeqIO.parse(filename, \"fasta\"))\n return record.description, str(record.seq)", "def test_to_fasta(self):\n even = \"TCAGAT\"\n odd = even + \"AAA\"\n even_dna = self.SequenceClass(even, name=\"even\")\n odd_dna = self.SequenceClass(odd, name=\"odd\")\n self.assertEqual(even_dna.to_fasta(), \">even\\nTCAGAT\\n\")\n # set line wrap to small number so we can test that it works\n self.assertEqual(even_dna.to_fasta(block_size=2), \">even\\nTC\\nAG\\nAT\\n\")\n self.assertEqual(odd_dna.to_fasta(block_size=2), \">odd\\nTC\\nAG\\nAT\\nAA\\nA\\n\")\n # check that changing the linewrap again works\n self.assertEqual(even_dna.to_fasta(block_size=4), \">even\\nTCAG\\nAT\\n\")" ]
[ "0.8072106", "0.7331823", "0.6897975", "0.68514806", "0.6835495", "0.67624587", "0.67114806", "0.66011626", "0.6347892", "0.62518346", "0.60974985", "0.58882695", "0.5858205", "0.5857612", "0.5838891", "0.5797587", "0.57610375", "0.57281584", "0.5715485", "0.57087386", "0.57081676", "0.568561", "0.56787413", "0.5676607", "0.5675398", "0.5667335", "0.5650315", "0.5645052", "0.5613532", "0.5585077" ]
0.7524539
1
Relabel one file which is labeled
def relabel_one_file(file_path: Path, result_file: Path = None, label_items=LABEL_ITEMS): global all_files_labels logger.info(f'Processing file: {file_path}') file_name = file_path.stem # Load data df = pd.read_csv(file_path, index_col=False, comment='#') ts = df['EventTimestamp(ns)'].values if 'CurrentTimeMillis' in df.columns: utc_ref_ts = df['CurrentTimeMillis'].values[0] else: try: time_str = file_name.split('-')[1] utc_ref = datetime.strptime(time_str, '%Y_%m_%d_%H_%M_%S_%f') utc_ref_ts = utc_ref.timestamp() except ValueError as e: print(e) exit(1) old_labels = df['Activity'].values # Old labels to idx labels_idx = [] previous_type = old_labels[0] start_idx = 0 for i, t in enumerate(old_labels[1:], 1): if t != previous_type: if previous_type != 0: labels_idx.append([previous_type, start_idx, i]) previous_type = t start_idx = i print(f'Current labels: {labels_idx}') acc = df[['AccelX', 'AccelY', 'AccelZ']].values guess_type_name = file_name.split('-')[0] if guess_type_name in AILAB_LABEL_NAMES_CONVERT_MAP.keys(): guess_type_name = AILAB_LABEL_NAMES_CONVERT_MAP.get(guess_type_name) guess_type = LABEL_ITEMS_INDEX_DICT.get(guess_type_name, 0) logger.debug(f'Current guess type name: {guess_type_name}:{guess_type}') labeler = DataLabeler(label_items) labels = labeler.process(utc_ref_ts, ts, acc, file_name, selected=guess_type, labels=labels_idx) labels = merge_labels(labels) if len(labels) > 0: all_files_labels[file_name] = labels # Write to record fold label result file # Write to global result file if result_file is not None: with result_file.open('a+') as f_result: f_result.write(file_name) for v, s, e in labels: f_result.write(f',{v}_{ts[s]}_{ts[e]}') f_result.write('\n') # Write back to data file: new_activity = np.zeros(len(df), dtype=np.int) for label in labels: new_type = label[0] start = label[1] end = label[2] new_activity[start:end] = new_type df['Activity'] = new_activity.tolist() df.to_csv(file_path, index=False)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_original_label(filename):\n original_label = filename.replace('.tab', '.lbl')\n original_label = original_label.replace('.txt', '.lbl')\n original_label = original_label.replace('.TAB', '.lbl')\n original_label = original_label.replace('.TXT', '.lbl')\n original_label = original_label.replace('rad', 'psv')\n original_label = original_label.replace('RAD', 'PSV')\n return original_label", "def apply(labels: Dict[str, str], dry=False) -> None:\n for old, new in labels.items():\n log.rename_label(old, new, subroutine, dry=dry)", "def relabel_module(exp_file_list, col_names, file_dir, wav_format, old_dir, lang, dict):\r\n\tword_list = []\r\n\texp_files = get_exp_files(exp_file_list) # step 2\r\n\tci, ti, fi = get_index(exp_files, col_names, wav_format) # steps 3 - 5\r\n\t\r\n\t# check to see that there are no .lab files\r\n\tf_list = glob.glob(file_dir + \"*\")\r\n\tlab_found = False\r\n\tfor f in f_list:\r\n\t\tif \".lab\" in f:\r\n\t\t\tlab_found = True\r\n\tif lab_found == True:\r\n\t\tmake_old_directory(file_dir, old_dir) # step 6\r\n\tdel f_list\r\n\t\r\n\tfile_list = read_files(file_dir, \"wav\") # step 7\r\n\tfor file in file_list:\r\n\t\tname = os.path.basename(file).replace('.wav', '')\r\n\r\n\t\t# get a file name for the lab file\r\n\t\tfile_name = file.replace(file_dir, '')\r\n\t\tfile_name = file_name.replace('.wav', '')\r\n\t\tlab_name = file_name + \".lab\"\r\n\t\t\r\n\t\tbits = parse_file_name(file_name, fi) # step 8a\r\n\r\n\t\ttry:\r\n\t\t\tloc = [x[0].replace(' ', '_') for x in exp_files[0]].index(name)\r\n\t\t\ttext = exp_files[0][loc][1] # old code: find_line_in_file(exp_files, bits, ci, ti) # step 8b\r\n\r\n\t\t\tif text != False:\r\n\t\t\t\ttext = clean_text(lang, text) # step 8ci\r\n\t\t\t\tif dict == True:\r\n\t\t\t\t\tword_list = store_to_dictionary(text, word_list) # step 8cii\r\n\t\t\t\tif lab_found == True:\r\n\t\t\t\t\tmove_old_lab(file_dir, old_dir, lab_name) # step 8ciii\r\n\t\t\t\twrite_to_lab(file_dir, lab_name, text) # step 8civ\r\n\t\t\telse:\r\n\t\t\t\tprint(\"No text found for the file \" + file_name)\r\n\t\texcept ValueError:\r\n\t\t\tcontinue\r\n\r\n\tif word_list != [] and dict == True:\r\n\t\tmake_dictionary_file(file_dir, word_list) # step 9\r", "def new_mrcnn(semantic_label_file, output_label_file):\n img = skimage.io.imread(semantic_label_file)\n img = img[64:192, 64:192]\n img_labeled = skimage.measure.label(img, connectivity=1)\n idx = [np.where(img_labeled == label) for label in np.unique(img_labeled) if label]\n\n list_of_all_mask_indices = []\n list_of_all_class_ids = []\n for i in range(len(idx)):\n tmp = np.zeros(img.shape)\n tmp[idx[i]] = img[idx[i]]\n cur_class_id = np.unique(tmp)[1].astype(int)\n list_of_all_mask_indices.append(idx[i])\n list_of_all_class_ids.append(cur_class_id)\n np.save(output_label_file, [list_of_all_mask_indices, list_of_all_class_ids, len(list_of_all_class_ids)])", "def label_file(input_file):\n file_name, file_ext = os.path.splitext(input_file)\n output_file = file_name + \".label\" + file_ext\n\n # read input file and save them in dict\n features = load_protobuf(input_file)\n\n # for each obstacle ID, sort dict by their timestamp\n fea_trajs = build_trajectory(features)\n\n # for each obstacle ID, label them, remove record cannot be labeled\n for fea_key, fea_traj in fea_trajs.items():\n fea_traj = fea_trajs[fea_key]\n fea_traj = TrajectoryToSample.clean(fea_traj)\n fea_traj = TrajectoryToSample.label(fea_traj)\n for i, fea in enumerate(fea_traj):\n if not fea.HasField('label_update_time_delta'):\n del fea_traj[i]\n continue\n if fea.label_update_time_delta < parameters['feature']['threshold_label_time_delta']:\n del fea_traj[i]\n fea_trajs[fea_key] = fea_traj\n # save them in the output file with the same format as the input file\n save_protobuf(output_file, fea_trajs.values())", "def load_labels(labels_dir, trial_name):\n labels_path = labels_dir + trial_name + \".txt\"\n raw_labels_data = np.genfromtxt(labels_path, dtype=np.int,\n converters=LABELS_CONVERTERS,\n usecols=LABELS_USECOLS)\n #print(\"rawlabelsdata: \", raw_labels_data)\n #print(get_first_frame(labels_path))\n frames = np.arange(get_first_frame(labels_path), get_last_frame(labels_path)+1, dtype=np.int)\n #print(\"frames: \", frames)\n #print(frames.shape)\n #labels = np.zeros(frames.shape, dtype=np.int)\n labels1 = []\n #print(labels)\n for start, end, label in raw_labels_data:\n #mask = (frames >= start) & (frames <= end)\n #print(start)\n #print(end)\n i = start\n while(i<end):\n if(i%6 == 0):\n labels1.append(label)\n i = i+1\n\n #labels[mask] = label\n #print(\"labels[mask]: \",labels[mask])\n labels1 = np.array(labels1)\n #print(labels1)\n labels_data = labels1.reshape(-1,1)\n #print(labels1.shape)\n #print(\"labels: \", labels_data)\n \n return labels_data", "def okoo_merge_label(file_name):\n labels_dic = {}\n label = 0\n with open(\"label_doc_3\", encoding='utf-8') as f:\n for line in f:\n if len(line) < 2:\n continue\n for key in re.findall('(\\d+)', line):\n labels_dic[''.join(key)] = label\n label += 1\n cur_true_label = label + 1\n with open(file_name, encoding='utf-8') as f1:\n texts = []\n data = json.load(f1)['all']\n for text_ in data:\n label = text_['label']\n if label in labels_dic:\n text_['merged_label'] = labels_dic[label]\n else:\n print(text_)\n text_['merged_label'] = cur_true_label\n # text_['text'] = ' '.join([c[0] for c in thu0.fast_cut(text_['text'])])\n texts.append(text_)\n\n with open('okoo-merged-3-label.json', 'w', encoding='utf-8') as f:\n json.dump(texts, f, ensure_ascii=False, indent=4, separators=(',', ': '))", "def add_label_info(filepath, label_filepath):\n label_lines = []\n with open(label_filepath, 'r') as f:\n inside_label = False\n for line in f:\n stripped_line = line.strip()\n if not inside_label and stripped_line == '__label__':\n inside_label = True\n elif inside_label:\n # stop if blank line or next section starts\n if not stripped_line or line.startswith('__'):\n break\n # save label content (in case it's the last line, force newline)\n label_lines.append(f'{stripped_line}\\n')\n\n with open(filepath, 'r') as f:\n # create a temporary file with the modified content before it replaces the original file\n temp_dir = tempfile.mkdtemp()\n try:\n temp_filepath = os.path.join(temp_dir, 'test.p8')\n with open(temp_filepath, 'w') as temp_f:\n inside_label = False\n for line in f:\n stripped_line = line.strip()\n if inside_label:\n # reset inside_label if blank line or next section starts\n if not stripped_line or line.startswith('__'):\n inside_label = False\n else:\n temp_f.write(line)\n if stripped_line == '__label__':\n inside_label = True\n # immediately print all label lines\n for label_line in label_lines:\n temp_f.write(label_line)\n\n shutil.copy(temp_filepath, filepath)\n finally:\n shutil.rmtree(temp_dir)", "def load_label(self, idx):\n im = open('{}/GTTXT/{}.txt'.format(root_dir, idx))\n\t#print(type(im.readlines()[0].rstrip(\"\\n\")))\n rgb_label = [i.rstrip(\"\\n\").split(\" \") for i in im.readlines()]\n\tlabel=[]\t\n\tfor i in rgb_label:\n\t\tlabel+=[int(j) for j in i]\n\tlabel=np.array(label).reshape(720,960)\n\tlabel[label==-1]=12\n\t#print(np.unique(label))\n #label = label[np.newaxis, ...]\n return label", "def relabel(self, old: int, new: int) -> None:\n if not (isinstance(old, int) and isinstance(new, int)):\n try:\n old = int(old)\n new = int(new)\n except ValueError:\n msg = f'Expecting integer arguments, got {type(old)} and {type(new)}!'\n raise ValueError(msg)\n\n if new in set(self.infos.keys()):\n msg = f'New label < {new} > is in existing labels {set(self.infos.keys())}!'\n raise ValueError(msg)\n\n # modify corresponding SegmentInfo object\n seginfo = self.infos[old]\n seginfo.label_value = new\n # modify array data\n self.data = relabel(self.data, old, new)\n # propagate state changes\n self._update_state_from_infos()", "def predict_label(self, src): # real signature unknown; restored from __doc__\n pass", "def load_labels():\n filename = os.path.join(config['inference']['model_dir'], 'output_labels.txt')\n global labels\n labels = [line.rstrip() for line in tf.gfile.FastGFile(filename)]", "def rename(self, label):\n self._seg_header = self._seg_header.replace(self._label, label)\n self._label = label\n for seg in self._segments:\n seg.rename(label)", "def load_idx_to_label(dataset_name):\n if dataset_name == 'imagenet':\n path = 'https://gist.githubusercontent.com/yrevar/'\n path += '6135f1bd8dcf2e0cc683/raw/'\n path += 'd133d61a09d7e5a3b36b8c111a8dd5c4b5d560ee'\n path += '/imagenet1000_clsid_to_human.pkl'\n idx_to_label = pickle.load(urllib.request.urlopen(path))\n \n elif dataset_name == 'indoor_scenes':\n label_to_idx = {'airport_inside': 0,\n 'bar': 1,\n 'bedroom': 2,\n 'casino': 3,\n 'inside_subway': 4,\n 'kitchen': 5,\n 'livingroom': 6,\n 'restaurant': 7,\n 'subway': 8,\n 'warehouse': 9}\n idx_to_label = {idx: label for label, idx in label_to_idx.items()}\n \n elif dataset_name == 'pubfig10':\n celebs = ['Aaron-Eckhart', 'Adriana-Lima',\n 'Angela-Merkel', 'Beyonce-Knowles', \n 'Brad-Pitt', 'Clive-Owen', \n 'Drew-Barrymore', 'Milla-Jovovich', \n 'Quincy-Jones', 'Shahrukh-Khan']\n idx_to_label = { i: celebs[i] for i in range(len(celebs)) }\n\n elif dataset_name == 'pubfig83':\n celebs = ['adam-sandler', 'alex-baldwin', 'angelina-jolie', 'anna-kournikova', 'ashton-kutcher', 'avril-lavigne',\n 'barack-obama', 'ben-affleck', 'beyonce-knowles', 'brad-pitt', 'cameron-diaz', 'cate-blanchett', 'charlize-theron',\n 'christina-ricci', 'claudia-schiffer', 'clive-owen', 'colin-farell', 'colin-powell', 'cristiano-ronaldo', 'daniel-craig',\n 'daniel-radcliffe', 'david-beckham', 'david-duchovny', 'denise-richards', 'drew-barrymore', 'dustin-hoffman', 'ehud-olmert',\n 'eva-mendes', 'faith-hill', 'george-clooney', 'gordon-brown', 'gwyneth-paltrow', 'halle-berry', 'harrison-ford',\n 'hugh-jackman', 'hugh-laurie', 'jack-nicholson', 'jennifer-aniston', 'jennifer-lopez', 'jennifer-lovehewitt',\n 'jessica-alba', 'jessica-simpson', 'joaquin-phoenix', 'john-travolta', 'julia-roberts', 'jula-stiles', 'kate-moss',\n 'kate-winslet', 'katherine-heigl', 'keira-knightley', 'kiefer-sutherland', 'leonardo-dicaprio', 'lindsay-lohan', 'mariah-carey',\n 'martha-stewart', 'matt-damon', 'meg-ryan', 'meryl-streep', 'michael-bloomberg', 'mickey-rourke', 'miley-cyrus',\n 'morgan-freeman', 'nicole-kidman', 'nicole-richie', 'orlando-bloom', 'reese-witherspoon', 'renee-zellweger', 'ricky-martin',\n 'robert-gates', 'sania-mirza', 'scarlett-johansson', 'shahrukh-khan', 'shakira', 'sharon-stone', 'silvio-berlusconi',\n 'stephen-colbert', 'steve-carell', 'tom-cruise', 'uma-thurman', 'victoria-beckham', 'viggo-mortensen', 'will-smith', 'zac-efron']\n idx_to_label = { i: celebs[i] for i in range(len(celebs)) }\n\n elif dataset_name == 'vggface2':\n path = \"../utils/vggface2_80_to_complete.pkl\"\n with open(path, 'rb') as file:\n idx_to_label = pickle.load(file)\n\n else:\n raise NotImplementedError\n \n return idx_to_label", "def label_one_file(file_path: Path,\n force=False,\n result_file: Path = None,\n label_items=LABEL_ITEMS):\n global all_files_labels\n logger.info(f'Processing file: {file_path}')\n file_name = file_path.stem\n\n # Result label file check\n file_name_prefix = '-'.join(file_name.split('-')[:-2])\n label_file = file_path.parent / f'{file_name_prefix}-label-result.csv'\n logger.info(f'Label result file path: {label_file}')\n labels_ts = []\n if label_file.exists():\n logger.warning('Label result file exists')\n if not force:\n logger.warning('##Skipped')\n return\n labels_ts = load_label_result(label_file)\n\n # Load data\n df = pd.read_csv(file_path, index_col=False, comment='#')\n ts = df['EventTimestamp(ns)'].values\n utc_ts = df['CurrentTimeMillis'].values\n labels_idx = label_convert_ts2index(labels_ts, ts)\n acc = df.drop(columns=['CurrentTimeMillis', 'EventTimestamp(ns)']).values\n guess_type_name = get_meta_from_file_name(file_name)\n guess_type = LABEL_ITEMS_INDEX_DICT.get(guess_type_name, 0)\n logger.debug(f'Current guess type name: {guess_type_name}:{guess_type}')\n\n labeler = DataLabeler(label_items)\n labels = labeler.process(utc_ts[0],\n ts,\n acc,\n file_name,\n selected=guess_type,\n labels=labels_idx)\n labels = merge_labels(labels)\n if len(labels) > 0:\n all_files_labels[file_name] = labels\n # Write to record fold label result file\n with label_file.open('w+') as f_label:\n for v, s, e in labels:\n f_label.write(f'{v}_{ts[s]}_{ts[e]}\\n')\n # Write to global result file\n if result_file is not None:\n with result_file.open('a+') as f_result:\n f_result.write(file_name)\n for v, s, e in labels:\n f_result.write(f',{v}_{ts[s]}_{ts[e]}')\n f_result.write('\\n')", "def rdf_update_labels(rdf, node):\n final_list = []\n for i in node.get_labels():\n # print(i)\n final_list += rdf_get_branch(rdf, i)\n for i in final_list:\n node.add_label(i)", "def _reflow_labels(self, filename=\"Dockerfile\"):\n\n dfp = DockerfileParser(path=filename)\n labels = dict(dfp.labels) # Make a copy of the labels we need to add back\n\n # Delete any labels from the modeled content\n for key in dfp.labels:\n del dfp.labels[key]\n\n # Capture content without labels\n df_content = dfp.content.strip()\n\n # Write the file back out and append the labels to the end\n with open(filename, 'w') as df:\n df.write(\"%s\\n\\n\" % df_content)\n if labels:\n df.write(\"LABEL\")\n for k, v in labels.iteritems():\n df.write(\" \\\\\\n\") # All but the last line should have line extension backslash \"\\\"\n escaped_v = v.replace('\"', '\\\\\"') # Escape any \" with \\\"\n df.write(\" %s=\\\"%s\\\"\" % (k, escaped_v))\n df.write(\"\\n\\n\")", "def assign_labels(basename, data_folder=Path(\"/data\"), verbose=False):\n urls_path = data_folder / \"graphs\" / basename / (basename + \".urls\")\n assert urls_path.exists(), \"Urls file not found!\"\n # check if labels dict already existing\n labels_path = data_folder / \"models\" / basename / (\"labels.json\")\n if labels_path.exists():\n print(\"Labels json already existing.\")\n else:\n print(\"Building labels json..\")\n # count number of lines in file\n num_lines = sum(1 for line in urls_path.open())\n labels_array = [0] * num_lines\n with urls_path.open() as f:\n clusters_count = Counter()\n labels = dict()\n class_index = 0\n for pos, line in enumerate(tqdm(f, total=num_lines)):\n # extract the TLD\n complete_domain = tldextract.extract(line).suffix\n # we only need the country domain now\n domain = complete_domain.split(\".\")[-1]\n # if domain unseen add it to class indices\n if domain not in labels:\n class_index += 1\n labels[domain] = class_index\n # assign label and add it to array\n y = labels[domain]\n labels_array[pos] = y\n clusters_count[domain] += 1\n labels_data = dict()\n # labels_data['labels'] = labels # do we really need this?\n labels_data['labels'] = {int(v): k for k, v in labels.items()}\n labels_data['count'] = clusters_count\n labels_data['array'] = labels_array\n if verbose:\n print(\"Found following labels:\")\n print(labels)\n with open(labels_path, 'w', encoding='utf-8') as outfile:\n json.dump(labels_data, outfile, ensure_ascii=False, indent=4)\n return labels_path", "def label_generator(predictions, processor, filename):\n # Hash predictions for always unique filename\n hashed = hashlib.sha1(predictions).hexdigest()\n\n # Get label from keras predictor\n label = processor(predictions, top=1)[0][0][1]\n\n # Capture original image suffix\n suffix = \"\".join(Path(filename).suffixes)\n\n new_label = f\"{label}_{hashed}{suffix}\"\n\n return new_label", "def relabelMatrix(matrix, args):\n if args.groupLabels:\n if len(args.groupLabels) != len(matrix.matrix.group_labels):\n sys.exit(\"You specified {} group labels, but {} are required.\\n\".format(len(args.groupLabels), len(matrix.matrix.group_labels)))\n matrix.matrix.group_labels = args.groupLabels\n if args.sampleLabels:\n if len(args.sampleLabels) != len(matrix.matrix.sample_labels):\n sys.exit(\"You specified {} sample labels, but {} are required.\\n\".format(len(args.sampleLabels), len(matrix.matrix.sample_labels)))\n matrix.matrix.sample_labels = args.sampleLabels", "def propagate_labels(image,labels,conflict=0):\n rlabels,_ = label(image)\n cors = correspondences(rlabels,labels,False)\n outputs = zeros(amax(rlabels)+1,'i')\n oops = -(1<<30)\n for o,i in cors.T:\n if outputs[o]!=0: outputs[o] = oops\n else: outputs[o] = i\n outputs[outputs==oops] = conflict\n outputs[0] = 0\n return outputs[rlabels]", "def get_labels(fasta_file):\n\t\tbase_name = basename(fasta_file)\n\t\tname = splitext(base_name)[0]\n\t\tlabel = name.split(\"_\")[-1]\n\t\tassert label == \"pos\" or label == \"hard\", \"AssertionError: label {} not found, possible labels pos, hard.\"\n\t\tif label == \"pos\":\n\t\t\treturn \"Toxin\"\n\t\telif label == \"hard\":\n\t\t\treturn \"No_toxin\"", "def _relabel(labels, minval=0, bgval=None):\n\n labels = np.unique(labels, return_inverse=True)[-1] + minval\n if bgval is not None:\n labels[labels == minval] = bgval\n return labels", "def extract_labels(nlabels,filename, one_hot=False):\n print('Extracting', filename,'bbbccicicicicib')\n\n labels=numpy.loadtxt(filename,dtype='int64')\n \n if one_hot:\n print(\"LABELS ONE HOT\")\n print(labels.shape)\n XXX=dense_to_one_hot(labels,nlabels)\n print(XXX.shape)\n return dense_to_one_hot(labels,nlabels)\n print(\"LABELS\")\n print(labels.shape)\n return labels", "def set_labels(repo: Repository, labels: list[Label]):\n\n log.info(f\"Fetching existing labels from {repo.full_name}\")\n existing_labels = {label.name.casefold(): label for label in repo.get_labels()}\n log.info(f\"Found {len(existing_labels)} existing labels\")\n\n for label in labels:\n qualified_name = label.qualified_name\n folded_name = qualified_name.casefold()\n if folded_name not in existing_labels:\n log.info(f\"Creating label {qualified_name}\")\n repo.create_label(**label.api_arguments)\n elif label != existing_labels[folded_name]:\n log.info(f\"Updating label {qualified_name}\")\n existing_label = existing_labels[folded_name]\n existing_label.edit(**label.api_arguments)\n else:\n log.info(f\"Label {qualified_name} already exists\")", "def relabel_labelmask(labelmask, preserve_order=True):\n mask = np.copy(labelmask)\n # Get all object labels and their counts.\n labels, counts = np.unique(mask, return_counts=True)\n # Get the indexes of sorted counts, descending.\n ordered_indexes = np.argsort(counts)[::-1]\n # Set largest object as background (ID=0).\n background_label = labels[ordered_indexes[0]]\n mask[mask == background_label] = 0\n # Renumber the rest of the objects 1..n.\n obj_num=1\n if (preserve_order):\n oldlabels = labels\n else:\n oldlabels = labels[ordered_indexes]\n for old_label in oldlabels:\n if (old_label != background_label):\n mask[labelmask == old_label] = obj_num\n obj_num = obj_num + 1\n return mask", "def UpdateLabel(self) -> _n_6_t_0:", "def read_labelmap_vidor(labelmap_file):\n\n labelmap = []\n class_ids = set()\n name = \"\"\n class_id = \"\"\n\n with open('idx_to_pred.pkl', 'rb') as f:\n idx_to_pred = pickle.load(f)\n\n # with PathManager.open(labelmap_file, \"r\") as f:\n # import pdb; pdb.set_trace()\n # for line in f:\n # if line.startswith(\" name:\"):\n # name = line.split('\"')[1]\n # elif line.startswith(\" id:\") or line.startswith(\" label_id:\"):\n # class_id = int(line.strip().split(\" \")[-1])\n # labelmap.append({\"id\": class_id, \"name\": name})\n # class_ids.add(class_id)\n # return labelmap, class_ids\n\n \"\"\"\n (Pdb) categories\n [{'id': 1, 'name': 'bend/bow (at the waist)'}, {'id': 3, 'name': 'crouch/kneel'}, {'id': 4, 'name': 'dance'}, {'id': 5, 'name': 'fall down'}, {'id': 6, 'name': 'get up'}, {'id': 7, 'name': 'jump/leap'}, {'id': 8, 'name': 'lie/sleep'}, {'id': 9, 'name': 'martial art'}, {'id': 10, 'name': 'run/jog'}, {'id': 11, 'name': 'sit'}, {'id': 12, 'name': 'stand'}, {'id': 13, 'name': 'swim'}, {'id': 14, 'name': 'walk'}, {'id': 15, 'name': 'answer phone'}, {'id': 17, 'name': 'carry/hold (an object)'}, {'id': 20, 'name': 'climb (e.g., a mountain)'}, {'id': 22, 'name': 'close (e.g., a door, a box)'}, {'id': 24, 'name': 'cut'}, {'id': 26, 'name': 'dress/put on clothing'}, {'id': 27, 'name': 'drink'}, {'id': 28, 'name': 'drive (e.g., a car, a truck)'}, {'id': 29, 'name': 'eat'}, {'id': 30, 'name': 'enter'}, {'id': 34, 'name': 'hit (an object)'}, {'id': 36, 'name': 'lift/pick up'}, {'id': 37, 'name': 'listen (e.g., to music)'}, {'id': 38, 'name': 'open (e.g., a window, a car door)'}, {'id': 41, 'name': 'play musical instrument'}, {'id': 43, 'name': 'point to (an object)'}, {'id': 45, 'name': 'pull (an object)'}, {'id': 46, 'name': 'push (an object)'}, {'id': 47, 'name': 'put down'}, {'id': 48, 'name': 'read'}, {'id': 49, 'name': 'ride (e.g., a bike, a car, a horse)'}, {'id': 51, 'name': 'sail boat'}, {'id': 52, 'name': 'shoot'}, {'id': 54, 'name': 'smoke'}, {'id': 56, 'name': 'take a photo'}, {'id': 57, 'name': 'text on/look at a cellphone'}, {'id': 58, 'name': 'throw'}, {'id': 59, 'name': 'touch (an object)'}, {'id': 60, 'name': 'turn (e.g., a screwdriver)'}, {'id': 61, 'name': 'watch (e.g., TV)'}, {'id': 62, 'name': 'work on a computer'}, {'id': 63, 'name': 'write'}, {'id': 64, 'name': 'fight/hit (a person)'}, {'id': 65, 'name': 'give/serve (an object) to (a person)'}, {'id': 66, 'name': 'grab (a person)'}, {'id': 67, 'name': 'hand clap'}, {'id': 68, 'name': 'hand shake'}, {'id': 69, 'name': 'hand wave'}, {'id': 70, 'name': 'hug (a person)'}, {'id': 72, 'name': 'kiss (a person)'}, {'id': 73, 'name': 'lift (a person)'}, {'id': 74, 'name': 'listen to (a person)'}, {'id': 76, 'name': 'push (another person)'}, {'id': 77, 'name': 'sing to (e.g., self, a person, a group)'}, {'id': 78, 'name': 'take (an object) from (a person)'}, {'id': 79, 'name': 'talk to (e.g., self, a person, a group)'}, {'id': 80, 'name': 'watch (a person)'}]\n (Pdb) class_whitelist\n {1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 17, 20, 22, 24, 26, 27, 28, 29, 30, 34, 36, 37, 38, 41, 43, 45, 46, 47, 48, 49, 51, 52, 54, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 72, 73, 74, 76, 77, 78, 79, 80}\n \"\"\"", "def assign_labels(self, data):\n data[self.label] = self.labeler(data.index.values)", "def extract_labels(filename,tag,one_hot):\n print('Extracting labels',filename)\n return extractdb_labels(filename,tag,one_hot=one_hot)" ]
[ "0.63218987", "0.6226931", "0.62268233", "0.62173223", "0.6149746", "0.6009117", "0.60019803", "0.59985584", "0.59711593", "0.5957811", "0.5940782", "0.59025943", "0.5864083", "0.5859577", "0.5859372", "0.5857351", "0.5856371", "0.5854134", "0.5827571", "0.5821693", "0.5807018", "0.5787047", "0.578541", "0.57316697", "0.5714139", "0.5709212", "0.57075113", "0.56993055", "0.5698496", "0.56596065" ]
0.7301757
0
returns True if source was changed (cache revalidation is needed)
def source_changed(source, cache): return os.path.getmtime(source)>os.path.getmtime(cache)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hasChanged(self):\n return ((self.mtime != getmtime(self.path)) or\n (self.size != os.path.getsize(self.path)) )", "def has_changed(self) -> bool:\n # TODO: Add in change logic here\n state = None\n if state != self._file_state:\n self._changed_flag = True\n self._file_state = state\n return self._changed_flag", "def changed(self):\n return True", "def has_changed(self):\n timestamp = os.stat(self.filename).st_mtime\n if timestamp > self.last_timestamp:\n self.last_timestamp = timestamp\n return True\n return False", "def check_modified(self) -> bool:\n return bool(self._modified)", "def _assets_are_stale(self, sourcedirectory, cachedirectory):\n comparison = filecmp.dircmp(sourcedirectory, cachedirectory, [], [])\n if comparison.left_only or comparison.right_only:\n # We have files in one directory and not the other\n return True\n if comparison.diff_files:\n # Some of the files have changed\n return True\n\n return False", "def hasChanged(self):\r\n if self.is_updated:\r\n self.is_updated = False\r\n return True\r\n else:\r\n return False\r\n\r\n # if not self.hasBeenUpdatedOnce:\r\n # self.hasBeenUpdatedOnce = True\r\n # return True\r\n # else:\r\n # if BLENDER_MODE == 'BPY':\r\n # # for e in dir(self.obj): print(e)\r\n # # print(self.obj, self.obj.name, self.obj.is_updated, self.obj.is_updated_data)\r\n # # return self.obj.is_updated # DOESN't UPDATE A THING!\r\n # # return True\r\n # return self.is_updated\r\n\r\n # return False # no update in BGE mode\r", "def changed(self):\n if self.exists():\n return self.current_content != self.content\n else:\n return True", "def canReloadWithChange(self, externalFilePath):\n return False", "def needs_update(self, cache_key):\r\n return self._read_sha(cache_key) != cache_key.hash", "def has_changed(self):\n return bool(self.changed_data)", "def has_been_modified(self):\n return self._has_been_modified", "def is_changed(self, include_md: bool = True) -> bool:\n current = self.calculate_hash(include_md=include_md)\n stored = self.hash if include_md else self.stub_hash\n log.trace(f\"changed = {self.hash != current} | Stored: {stored} | Current: {current}\")\n return stored != current", "def changed(self) -> bool:\n return self._changed", "def changed(self) -> bool:\n for chunk_location, chunk in self._chunk_cache.items():\n if chunk is None:\n # if the chunk is None and the saved record is not None, the chunk has changed.\n if chunk_location not in self._chunk_index:\n return True\n _, save_chunk_index = self._chunk_index[chunk_location]\n chunk_storage = self._chunk_history[chunk_location]\n if chunk_storage[save_chunk_index] is not None:\n return True\n elif chunk.changed:\n return True\n for chunk_index, save_chunk_index in self._chunk_index.values():\n if chunk_index != save_chunk_index:\n return True\n return False", "def needs_rebuild(source, target):\n return not os.path.isfile(target) or (\n os.path.getmtime(source) > os.path.getmtime(target))", "def is_change(self) -> bool:\n return self._change", "def isUpdated(self):\n seq = self.readSeq()\n\n if (seq != self.seq):\n self.seq = seq\n return True\n else:\n return False", "def has_resource_changed(self, resource):\n logger.debug(\"Checking for changes in %s\" % resource)\n self.load_template_if_needed()\n self.load_site_if_needed()\n\n target = File(self.site.config.deploy_root_path.child(\n resource.relative_deploy_path))\n if not target.exists or target.older_than(resource.source_file):\n logger.debug(\"Found changes in %s\" % resource)\n return True\n if resource.source_file.is_binary:\n logger.debug(\"No Changes found in %s\" % resource)\n return False\n if self.site.config.needs_refresh() or \\\n not target.has_changed_since(self.site.config.last_modified):\n logger.debug(\"Site configuration changed\")\n return True\n\n deps = self.get_dependencies(resource)\n if not deps or None in deps:\n logger.debug(\"No changes found in %s\" % resource)\n return False\n content = self.site.content.source_folder\n layout = Folder(self.site.sitepath).child_folder('layout')\n logger.debug(\"Checking for changes in dependents:%s\" % deps)\n for dep in deps:\n if not dep:\n return True\n source = File(content.child(dep))\n if not source.exists:\n source = File(layout.child(dep))\n if not source.exists:\n return True\n if target.older_than(source):\n return True\n logger.debug(\"No changes found in %s\" % resource)\n return False", "def has_source_file( self ):\n return self._source_file is not None", "def dirty(self):\n return self._orig_line is not None", "def has_changed(self):\n return self.get_old_value() != self.get_current_value()", "def updated(self):\n return self.expires != self.orig_expires", "def is_outdated(compiler_suite: str, grammar_source: str) -> bool:\n try:\n _, grammar, _, _ = load_compiler_suite(compiler_suite)\n return grammar_changed(grammar(), grammar_source)\n except ValueError:\n return True", "def test_verify_changed_source_file(self):\n # This test was made to pass in fixing Bug #1354880\n self.backup(u\"full\", u\"testfiles/various_file_types\", options=[])\n\n # Edit source file\n with open('testfiles/various_file_types/executable', 'r+') as f:\n f.write('This changes a source file.')\n\n # Test verify for the file\n self.verify(u'testfiles/various_file_types/executable', file_to_verify=u'executable', options=[])", "def needs_rebuild(self) -> bool:\n old_hash = self._cache.get(\"config\", None)\n new_hash = utilities.hash_object_sha256(self._get_config_raw())\n self._cache[\"config\"] = new_hash\n\n if not old_hash:\n return False\n return old_hash != new_hash", "def is_updated(self):\n return self.timestamp > 0", "def is_modified(self):\n return len(self.modified_fields) > 0", "def has_state_changed(self) -> bool:\r\n ...", "def isChanged(self, p_int): # real signature unknown; restored from __doc__\n return False" ]
[ "0.68384165", "0.67520934", "0.67060685", "0.6670692", "0.6652378", "0.6626898", "0.6595107", "0.6590439", "0.65189856", "0.6509706", "0.64642346", "0.6343046", "0.6337292", "0.62796354", "0.62630564", "0.6256935", "0.6254536", "0.6247282", "0.62462795", "0.6181623", "0.6174619", "0.6142391", "0.61350495", "0.61145985", "0.6096518", "0.60763747", "0.60728675", "0.6068639", "0.60417604", "0.6035389" ]
0.8501976
0
returns CachedImage retrieved from cache or None
def get_image(self, processor, source, cache=None): cache = cache or self._get_cache_filename(processor, source) cache_path = os.path.join(self.cache_dir, cache) if os.path.exists(cache_path): return CachedImage(cache_path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_cached_item(self, path):\n item_path = '%s/%s' % (\n self.cache_folder,\n path.strip('/')\n )\n\n try:\n cf_object = self.container.get_object(item_path)\n except NoSuchObject:\n return False\n\n f = tempfile.NamedTemporaryFile()\n f.write(cf_object.fetch())\n f.seek(0)\n image = Image.open(f.name)\n f.close()\n\n return image", "def get_image(self):\n if not hasattr(self, '_BasePublication__image_cache'):\n images = self.get_images()\n self.__image_cache = images[0].picture if images else None\n return self.__image_cache", "def get_image_from_cache(cache, file_path):\n if file_path in cache:\n return cache[file_path]\n image = read_image(file_path, GRAY_NUMBER)\n cache[file_path] = image\n return image", "def get_image(self, address):\r\n # Do a bit of caching\r\n if self.last_image and self.last_image.contains(address):\r\n return self.last_image\r\n \r\n # if it was not cached, traverse all of the loaded images\r\n for image in self.loaded_images:\r\n if image.contains(address):\r\n self.last_image = image\r\n return image\r\n \r\n return None", "def cache_get(self, key: str) -> Optional[bytes]:\n if self.cache is not None:\n return self.cache.get(key)\n return None", "def _cache_image(self, instance):\n\n image_name = '%s.tar.gz' % instance['image_id']\n full_image_path = '%s/%s' % (FLAGS.ovz_image_template_dir, image_name)\n\n if not os.path.exists(full_image_path):\n # These objects are required to retrieve images from the object store.\n # This is known only to work with glance so far but as I understand it\n # glance's interface matches that of the other object stores.\n user = manager.AuthManager().get_user(instance['user_id'])\n project = manager.AuthManager().get_project(instance['project_id'])\n\n # Grab image and place it in the image cache\n images.fetch(instance['image_id'], full_image_path, user, project)\n return True\n else:\n return False", "def cache_get(item: str) -> object:\n\titem = str(item)\n\tcache = cache_find(item)\n\n\t# cache_find() will return none if the cache does not exist\n\t# the returned location is guaranteed to exist, so no point checking again.\n\n\tif cache is not None:\n\t\ttry:\n\t\t\tcached = pickle.load(open(cache, \"rb\"))\n\t\texcept EOFError as ex:\n\t\t\t# Cache file is corrupted, so print an error and act like it does\n\t\t\t# not exist. We do not delete the cache file incase the user wants\n\t\t\t# to recover the file.\n\t\t\tuux.show_error(\"Error when loading file from cache: \" + str(ex))\n\t\t\treturn None\n\t\texcept Exception as ex:\n\t\t\traise ex\n\t\tuux.show_debug(\"Cache hit for \" + item)\n\t\treturn cached\n\n\treturn None", "def __cached(self):\n # already cached stuff\n if self._cached is None:\n self._cached = Cached(self.resource)\n return self._cached", "def get(self):\n try:\n imageFilename = random.choice(os.listdir(self.cacheDir))\n imagePath = os.path.join(self.cacheDir, imageFilename)\n with open(imagePath) as imageFile:\n self.image = imageFile.read()\n except IndexError:\n raise GimpCaptchaError(\"CAPTCHA cache dir appears empty: %r\"\n % self.cacheDir)\n except (OSError, IOError):\n raise GimpCaptchaError(\"Could not read Gimp captcha image file: %r\"\n % imageFilename)\n\n self.answer = imageFilename.rsplit(os.path.extsep, 1)[0]\n self.challenge = self.createChallenge(self.answer)\n\n return (self.image, self.challenge)", "def get_cache(self, key):\n return self.r.get(key)", "def get(self, key):\n if key:\n return self.cache_data.get(key)\n else:\n return None", "def get_image(self, filename):\n\n # Try the cache first.\n bmp = self._images.get(filename)\n if bmp is None:\n # Load the image from the file and add it to the list.\n #\n # N.B 'wx.BITMAP_TYPE_ANY' tells wxPython to attempt to autodetect\n # --- the image format.\n image = wx.Image(filename, wx.BITMAP_TYPE_ANY)\n\n # We force all images in the cache to be the same size.\n self._scale(image)\n\n # We also force them to be bitmaps!\n bmp = image.ConvertToBitmap()\n\n # Add the bitmap to the cache!\n self._images[filename] = bmp\n\n return bmp", "def get_image():\n return models.Image.objects.all()[0]", "def get_image_file(self, processor, source, cache=None):\n cache = cache or self._get_cache_filename(processor, source)\n cache_path = os.path.join(self.cache_dir, cache)\n if not os.path.exists(cache_path) or source_changed(source, cache):\n processor.process(source).save(cache_path)\n return cache_path", "def get(self, key):\n if key and key in self.cache_data.keys():\n return self.cache_data[key]\n else:\n return None", "def get(self, key):\n if key and key in self.cache_data.keys():\n return self.cache_data[key]\n else:\n return None", "def store_image(self, http_client, link_hash, src, config):\r\n # check for a cache hit already on disk\r\n image = self.read_localfile(link_hash, src, config)\r\n if image:\r\n return image\r\n\r\n # no cache found download the image\r\n data = self.fetch(http_client, src)\r\n if data:\r\n image = self.write_localfile(data, link_hash, src, config)\r\n if image:\r\n return image\r\n\r\n return None", "def get(key):\n return Cache.cache_connector.get(key)", "def get(self, identifier):\n cache_file_path = self._get_cache_file_path(identifier)\n\n if os.path.isfile(cache_file_path):\n with open(cache_file_path, 'rb') as fp:\n result = pickle.load(fp)\n return result\n\n return None", "def GetFromCache(self, filename):\n return memcache.get('%s%s' % (self.CACHE_PREFIX, filename))", "def GetFromCache(self, filename):\n return memcache.get('%s%s' % (self.CACHE_PREFIX, filename))", "def _load_cached_2to3(self, path, cache):\n try:\n cache_stats = os.stat(cache)\n source_stats = os.stat(path)\n except OSError as e:\n if e.errno == errno.ENOENT: # FileNotFoundError\n self.logger.debug('Cache miss: %s' % cache)\n return None\n else:\n raise\n\n if cache_stats.st_mtime <= source_stats.st_mtime:\n self.logger.debug('Cache miss (stale): %s' % cache)\n return None\n\n self.logger.debug(\"Cache hit: %s\" % cache)\n return super().get_data(cache)", "def get(self, key):\n if key is None:\n return None\n return self.cache_data.get(key, None)", "def cache_box(self):\n caches = [box for box in self.child_boxes.all()\n if box.storage_type == StorageBox.CACHE]\n if len(caches) == 1:\n return caches[0]\n elif len(caches) > 1:\n return caches[random.choice(range(len(caches)))]\n return None", "def get(self, key):\n if key and key in self.cache_data:\n return self.cache_data[key]\n return None", "def image(self):\n if self.hasImage():\n return self._image.pixmap().toImage()\n return None", "def get_local_image(self, src):\r\n local_image = ImageUtils.store_image(None,\r\n self.link_hash, src, self.config)\r\n return local_image", "def cache_image(self):\n img_temp = NamedTemporaryFile()\n # Header required for HTTPS connections\n request = Request(self.url, headers={'User-Agent': ''})\n response = urlopen(request)\n type_file = dict(response.info()._headers)['Content-Type']\n if 'image' not in type_file:\n raise ValidationError(\"The URL does not contains any image. (Content-Type: {0}) (URL: {1})\".format(type, self.url))\n # Store the filename with extension\n url_image = urlparse(self.url)\n filename, file_ext = splitext(basename(url_image.path))\n # If the file doesn't have a extension, find it out from the header\n if file_ext == '':\n file_ext = type_file.replace('image/', '')\n self.filename = \"{0}.{1}\".format(filename, file_ext)\n source_data = response.read()\n # Compress the image\n source_data = optimize(source_data)\n img_temp.write(source_data)\n img_temp.flush()\n # Save the image in the server\n self.image .save(self.url, File(img_temp))", "def getOrDownloadImageObject(self, url):\n \n if \"//\" in url:\n return self.downloadImage(url)\n else:\n return self.getPILFromPath(url)", "def get_image(self, image):\n return self._get(_image.Image, image)" ]
[ "0.7916652", "0.77383137", "0.7470543", "0.72071636", "0.70587105", "0.684402", "0.6749766", "0.6610611", "0.65577084", "0.6536232", "0.65349305", "0.6476579", "0.6444317", "0.6421493", "0.64117175", "0.64117175", "0.63977987", "0.6368785", "0.63677204", "0.63548386", "0.63548386", "0.6349277", "0.63212574", "0.62994677", "0.62908083", "0.6283642", "0.6280723", "0.62715983", "0.62466764", "0.6246467" ]
0.789398
1
returns processed PIL Image file path retrieved from cache or generated using processor
def get_image_file(self, processor, source, cache=None): cache = cache or self._get_cache_filename(processor, source) cache_path = os.path.join(self.cache_dir, cache) if not os.path.exists(cache_path) or source_changed(source, cache): processor.process(source).save(cache_path) return cache_path
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_processed_path(self):\n\n return self.processed_img_path", "def get_image(self, processor, source, cache=None):\n cache = cache or self._get_cache_filename(processor, source)\n cache_path = os.path.join(self.cache_dir, cache)\n if os.path.exists(cache_path):\n return CachedImage(cache_path)", "def process(self):\n return self.output_image", "def imagefile(self):\n return self.__inputfilename", "def _get_cache_filename(self, processor, source):\n return '%s%s' % (str(hashlib.md5(str(processor.quality) + source\\\n + str(os.path.getsize(source)) \\\n + make_filters_hash(processor.filters) \\\n + source.encode('utf-8')).hexdigest()),\n os.path.splitext(source)[1])", "def image_process(image_info):\n path = os.path.join(cfg.IMAGESET, image_info.get(\"index\") + \".jpg\")\n if not os.path.exists(path):\n raise IOError(\"please check your file is not exists: \" + path)\n def load_image(path):\n image = Image.open(path)\n return image\n return load_image(path)", "def get_image(self):\n return self.process_raw_image(self.get_raw_image())", "def get_image_from_cache(cache, file_path):\n if file_path in cache:\n return cache[file_path]\n image = read_image(file_path, GRAY_NUMBER)\n cache[file_path] = image\n return image", "def get_image_url():", "def process_image(self):\n pass", "def get_raw_path(self):\n\n return self.raw_img_path", "def get_image_path(self):\n\t\treturn call_sdk_function('PrlVmDev_GetImagePath', self.handle)", "def process(image):\n pass", "def get_image(self):\n if not hasattr(self, '_BasePublication__image_cache'):\n images = self.get_images()\n self.__image_cache = images[0].picture if images else None\n return self.__image_cache", "def make_image(self, path):\n\t\treturn None", "def get_pathname(self):\n return self.image_data.path", "def _image_location(image_info):\n return os.path.join(tempfile.gettempdir(), image_info['id'])", "def _getImage(self, img):\n\n # lazily fill in some attributes\n if not 'local_file_path' in img:\n img['local_file_path'] = os.path.join(self.image_root, img['filename'])\n if not 'feat' in img: # also fill in the features\n # NOTE: imgid is an integer, and it indexes into features\n fn = os.path.basename(img['filename'])\n return img", "def imagePath(self):\n return self.path", "def image_reference(self, image_id):\n info = self.image_info[image_id]\n if info[\"source\"] == \"pcb\":\n return info[\"path\"]\n else:\n super(self.__class__, self).image_reference(image_id)", "def get_image_path(self) -> Optional[str]:\n if not self.image or not self.image.file_path:\n return None\n return self.image.file_path", "def imagefile(self):\n return os.path.join(self.__folder, self.__name + '.jpg')", "def _real_image_path(self, path):\r\n return osp.join(self.train_image_path, path)", "def get_cache_path(self):", "def get_cache_path(self):", "def imagePath(self):\n if self.use_dic:\n if self.imlist:\n paths = []\n for img in self.allimgs:\n paths.append(join(self.home, 'data'+str(self.data), self.activity, self.imsize, str(img)+'.jpg'))\n return paths\n else:\n path = join(self.home, 'data'+str(self.data), self.activity, self.imsize, str(self.img)+'.jpg')\n else:\n path = self.img\n return path", "def get_img_file(image, db):\n img_dir = db.source\n if img_dir == None:\n raise ValueError('Cannot locate file without a base path. This method looks for it at \\\n db.source, which is not set. This should be set by the loader during DB construction!')\n img_dir = path.join(img_dir, 'img')\n # get location title.\n loc_id = db.get_img_loc(int(image))\n if loc_id == None:\n raise ValueError('The image %s could not be found' % image)\n loc = db.get_location(loc_id)\n title = loc['title']\n # add to file name\n img_dir = path.join(img_dir, title, str(image) + '.jpg')\n return img_dir", "def get_local_image(self, src):\r\n local_image = ImageUtils.store_image(None,\r\n self.link_hash, src, self.config)\r\n return local_image", "def source_path(self, workspace):\n if self.file_name_method.value == FN_FROM_IMAGE:\n path_feature = \"%s_%s\" % (\n C_PATH_NAME,\n self.file_image_name.value,\n )\n assert workspace.measurements.has_feature(\"Image\", path_feature), (\n \"Image %s does not have a path!\" % self.file_image_name.value\n )\n return workspace.measurements.get_current_image_measurement(path_feature)\n\n # ... otherwise, chase the cpimage hierarchy looking for an image with a path\n cur_image = workspace.image_set.get_image(self.image_name.value)\n while cur_image.path_name is None:\n cur_image = cur_image.parent_image\n assert (\n cur_image is not None\n ), \"Could not determine source path for image %s' % (self.image_name.value)\"\n return cur_image.path_name", "def getimage(self):" ]
[ "0.7165686", "0.7003112", "0.6352672", "0.6270706", "0.6210086", "0.6124692", "0.60887355", "0.60833514", "0.60302365", "0.60283566", "0.6013624", "0.6001999", "0.5993517", "0.59487355", "0.5870053", "0.5862867", "0.585564", "0.58496", "0.58461076", "0.5843072", "0.58290166", "0.58243287", "0.5809639", "0.58085215", "0.58085215", "0.5806925", "0.5802367", "0.57961434", "0.5791516", "0.57913524" ]
0.7657464
0
make md5 hash filename from source file properties and processor filters
def _get_cache_filename(self, processor, source): return '%s%s' % (str(hashlib.md5(str(processor.quality) + source\ + str(os.path.getsize(source)) \ + make_filters_hash(processor.filters) \ + source.encode('utf-8')).hexdigest()), os.path.splitext(source)[1])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def file_field_md5(source_file, blocksize=65536):\n hasher = hashlib.md5()\n if source_file.closed:\n source_file.open('rb')\n buf = source_file.read(blocksize)\n while len(buf) > 0:\n hasher.update(buf)\n buf = source_file.read(blocksize)\n source_file.seek(0)\n return hasher.hexdigest()", "def compute(fileName, excludeLine=\"\", includeLine=\"\"):\n m = hashlib.md5()\n fd = open(fileName,\"rb\")\n content = fd.readlines()\n fd.close()\n for eachLine in content:\n if excludeLine and eachLine.startswith(excludeLine):\n continue\n m.update(eachLine)\n m.update(includeLine)\n return m.hexdigest()", "def __build_file_name(self, func, args):\n # Build a unique string to hash\n if self.__log:\n self.__logger.info(f\"Building file name for {func.__name__} with {args}\")\n\n # Hash with the specified algorithm and hexdigest\n # to produce a string\n fname = self.algorithm(\n b\"\".join([func.__name__.encode(\"utf8\"), pickle.dumps(args)])\n ).hexdigest()\n\n pathToFile = os.path.join(self.cacheDir, fname)\n if self.__log:\n self.__logger.info(f\"Built path {pathToFile}\")\n return pathToFile", "def calc_file_hash(filepath):\n with open(filepath, 'rb') as f:\n return md5(f.read()).hexdigest()", "def static_file_hash(filepath):\n hasher = hashlib.md5() # nosec: B303\n\n with contextlib.closing(open(filepath, 'rb')) as file:\n hasher.update(file.read())\n return hasher.hexdigest()", "def compute_gzip_md5(fqfn):\n md5 = hashlib.md5()\n file_obj = gzip.open(fqfn, 'rb')\n for chunk in iter(lambda: file_obj.read(8192), ''):\n md5.update(chunk)\n\n file_obj.close()\n return md5.hexdigest()", "def _fingerprint(self):\n hasher = hashlib.md5()\n source = inspect.getsource(self._func)\n hasher.update(source.encode('utf-8'))\n\n return hasher.hexdigest()", "def getFileMD5(f: java.io.File, monitor: ghidra.util.task.TaskMonitor) -> unicode:\n ...", "def _generate_filename(instance, filename, prefix):\n md5 = hashlib.md5()\n md5.update(struct.pack('f', time.time()))\n for chunk in instance.file.chunks():\n md5.update(chunk)\n extension = os.path.splitext(filename)[1]\n return os.path.join(prefix, md5.hexdigest() + extension)", "def ondisk_digest(self):\n with open(self.rename_phase_src) as f:\n return hasher(f.read()).hexdigest()", "def hash_file(path: str) -> str:\n return _hash_file(path, hashlib.md5()).hexdigest()", "def outfile_name(cmd):\n return md5.md5(cmd).hexdigest()[:8]", "def generate_hash(self):\r\n\r\n hash_list = []\r\n for root, dirs, files in os.walk(self.options['source']):\r\n for f in sorted([f for f in files if not f.startswith('.')]):\r\n hash_list.append(os.path.join(root, f))\r\n hash_list.append(str(os.path.getmtime(os.path.join(root, f))))\r\n hash_list = ''.join(hash_list)\r\n\r\n if sys.version < '3':\r\n return hashlib.sha1(hash_list).hexdigest()\r\n return hashlib.sha1(hash_list.encode('utf-8')).hexdigest()", "def get_file_hash (fullpath) : \n\n # This bit was sourced from Stack Overflow via Google, specifically:\n # http://stackoverflow.com/questions/1131220/get-md5-hash-of-a-files-without-open-it-in-python\n\n md5 = hashlib.md5()\n with open(fullpath,'rb') as f: \n for chunk in iter(lambda: f.read(512*md5.block_size), ''): \n md5.update(chunk)\n # Hexdigest is the safe varchar(32) style output\n return md5.hexdigest()", "def get_file_hash(file_path):\n with open(file_path, 'rb') as f:\n file_name = os.path.basename(file_path)\n to_hash = f.read() + file_name.encode('utf-8')\n new_hash = hashlib.md5(to_hash).hexdigest()\n return new_hash", "def _hash_file(fpath, algorithm='sha256', chunk_size=65535):\n if (algorithm == 'sha256') or (algorithm == 'auto' and len(hash) == 64):\n hasher = hashlib.sha256()\n else:\n hasher = hashlib.md5()\n\n with open(fpath, 'rb') as fpath_file:\n for chunk in iter(lambda: fpath_file.read(chunk_size), b''):\n hasher.update(chunk)\n\n return hasher.hexdigest()", "def vectorization_md5_hash(self):\n keys = sorted(pr.__dict__)\n keys.remove('threshold_config')\n keys.remove('threshold_center')\n return hashlib.md5(\n str([pr.__dict__[i] for i in keys]).encode()\n ).hexdigest()", "def _HashFilename(filename):\n if isinstance(filename, unicode):\n filename = filename.encode(UTF8)\n else:\n filename = unicode(filename, UTF8).encode(UTF8)\n m = hashlib.sha1(filename)\n return 'TRACKER_' + m.hexdigest() + '.' + filename[-16:]", "def _hash_file(fpath, algorithm='sha256', chunk_size=65535):\n if (algorithm is 'sha256') or (algorithm is 'auto' and len(hash) is 64):\n hasher = hashlib.sha256()\n else:\n hasher = hashlib.md5()\n\n with open(fpath, 'rb') as fpath_file:\n for chunk in iter(lambda: fpath_file.read(chunk_size), b''):\n hasher.update(chunk)\n\n return hasher.hexdigest()", "def hash_files(verbose, debug):\n\n found = []\n h = hashlib.new('md5')\n for pattern in FILES_PATTERNS:\n for f in glob.iglob(pattern, flags=FLAGS):\n name = f.replace('\\\\', '/')\n found.append(name)\n if verbose:\n print('FILES:')\n for f in sorted(found):\n if verbose:\n print(f)\n h.update(f.encode('ascii'))\n with open(f, 'rb') as f:\n h.update(f.read().replace(b'\\r\\n', b'\\n'))\n result = h.hexdigest()\n print('HASH: ', result)\n return result", "def svn_fs_file_md5_checksum(*args):\r\n return _fs.svn_fs_file_md5_checksum(*args)", "def CalcMD5(filepath):\n with open(filepath,'rb') as f:\n md5obj = hashlib.md5()\n md5obj.update(f.read())\n return md5obj.hexdigest()", "def source(dirname, filename, gen_content):\n if dirname in lut['sources']:\n s.add('MD5SUM=\"$(find \"{0}\" -printf %T@\\\\\\\\n | md5sum)\"', dirname)\n if secret is None:\n s.add('tar xf \"{0}\" -C \"{1}\"',\n filename,\n dirname,\n sources={filename: gen_content()})\n else:\n s.add('wget \"{0}/{1}/{2}/{3}\"', server, secret, b.name, filename)\n s.add('tar xf \"{0}\" -C \"{1}\"', filename, dirname)\n for manager, service in lut['sources'][dirname]:\n s.add('[ \"$MD5SUM\" != \"$(find \"{0}\" -printf %T@\\\\\\\\n ' # No ,\n '| md5sum)\" ] && {1}=1',\n dirname,\n manager.env_var(service))", "def generate_sum(file_path):\n #file = open(file_path, 'rb')\n #header = file.read()\n header = open(file_path, 'rb').read()\n suma_md5 = md5(header).hexdigest()\n return suma_md5", "def _compute_hal9000_md5(observable: Observable) -> str:\n md5_hasher = md5()\n md5_hasher.update(observable.type.encode('utf-8', errors='ignore'))\n md5_hasher.update(observable.value.encode('utf-8', errors='ignore'))\n return md5_hasher.hexdigest()", "def md5(self):\n if self.source_file is None:\n return None\n if self._md5 is None:\n self._md5 = file_field_md5(self.source_file)\n # try: # Locally stored file\n # self._md5 = local_md5(self.source_file.path)\n # except NotImplementedError: # AWS S3 storage\n # self._md5 = s3_md5(self.source_file.file.key)\n return self._md5", "def fingerprint():\n files = (glob.glob(base_dir + '**/*.html') +\n glob.glob(base_dir + '*.html') +\n glob.glob(base_dir + 'core.js'))\n\n md5s = OrderedDict()\n\n for fil in sorted(files):\n name = fil[len(base_dir):]\n with open(fil) as fp:\n md5 = hashlib.md5(fp.read().encode('utf-8')).hexdigest()\n md5s[name] = md5\n\n template = \"\"\"\\\"\\\"\\\"DO NOT MODIFY. Auto-generated by script/fingerprint_frontend.\\\"\\\"\\\"\n\nFINGERPRINTS = {}\n\"\"\"\n\n result = template.format(json.dumps(md5s, indent=4))\n\n with open(fingerprint_file, 'w') as fp:\n fp.write(result)", "def hash_file(fname):\n hash_md5 = hashlib.md5()\n with open(fname, \"rb\") as f:\n for chunk in iter(lambda: f.read(4096), b\"\"):\n hash_md5.update(chunk)\n return hash_md5.hexdigest()", "def checksumFile(filename):\n return md5File(filename)", "def md5(filename: str) -> str:\n # using md5 for speed\n _hash = hashlib.md5()\n # open file for reading in binary mode\n with open(filename,'rb') as file:\n for block in iter(lambda: file.read(1024), b\"\"):\n _hash.update(block)\n return _hash.hexdigest()" ]
[ "0.6519305", "0.63283056", "0.62906706", "0.62502766", "0.62337464", "0.62247896", "0.62084156", "0.6197898", "0.6172346", "0.60608166", "0.59985685", "0.5990981", "0.5989901", "0.5962435", "0.59557986", "0.59446186", "0.5944345", "0.59428257", "0.5932913", "0.5931676", "0.59251386", "0.5920897", "0.59154016", "0.5907685", "0.58969575", "0.5896647", "0.58913285", "0.58891904", "0.58867186", "0.587956" ]
0.7254631
0
Fit IV array to 2 Ic RSJ model and return arrays of fit params, error.
def fit2rsj_arr(iarr, varr, **kwargs): if 'guess' in kwargs: kwargs['guess'] = np.array(kwargs['guess']) # array type update = kwargs.get('updateguess', 0.95) n = len(iarr) npopt = 4 popt_arr, pcov_arr = np.zeros((n, npopt)), np.zeros((n, npopt, npopt)) for k in range(n): try: done = False; l = 0 while not done: # fit popt, pcov = jjiv.fit2rsj(iarr[k], varr[k], **kwargs) # update guess if k == 0: kwargs['guess'] = popt else: kwargs['guess'] = (1-update)*kwargs['guess'] + update*popt # check if fit is good l += 1 if np.shape(pcov)==(4,4): perr = np.sqrt(np.diag(pcov)) else: perr = (np.inf, np.inf, np.inf, np.inf) if (np.amax(perr) < .05) or (l > 5): done = True popt_arr[k], pcov_arr[k] = popt, pcov else: print('Fit not good. Index: {}, Trial: {}'.format(k,l)) except RuntimeError: print('Can\'t fit. Index: {}!'.format(k)) return popt_arr, pcov_arr
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def svmfit(itr,C):\r\n train_x, train_y, valid_x, valid_y=get_next_train_valid(itr)\r\n train_y=train_y.reshape(len(train_y),1)\r\n n = len(train_y)\r\n P = matrix(np.dot(train_x,train_x.T) * np.outer(train_y,train_y))\r\n q = matrix(-np.ones([n, 1], np.float64))\r\n G = matrix(np.vstack((-np.eye((n)), np.eye(n))))\r\n h = matrix(np.vstack((np.zeros((n,1)), np.ones((n,1)) * C)))\r\n A = matrix(train_y.reshape(n,1).T)\r\n b = matrix(np.zeros(1))\r\n solvers.options['show_progress'] = False\r\n sol = solvers.qp(P,q,G,h,A,b)\r\n lbd = np.array(sol['x'])\r\n threshold = 1e-5\r\n S = (lbd > threshold).reshape(-1, )\r\n w = np.dot(train_x.T, lbd * train_y)\r\n bb = train_y[S] - np.dot(train_x[S], w)\r\n bb = np.mean(b)\r\n \r\n return w, bb", "def fit(self):\n\n fitdata = np.polyfit(self.v**(-2./3.), self.e, 3, full=True)\n ssr = fitdata[1]\n sst = np.sum((self.e - np.average(self.e))**2.)\n residuals0 = ssr/sst\n deriv0 = np.poly1d(fitdata[0])\n deriv1 = np.polyder(deriv0, 1)\n deriv2 = np.polyder(deriv1, 1)\n deriv3 = np.polyder(deriv2, 1)\n\n self.v0 = None\n for x in np.roots(deriv1):\n if x > 0 and deriv2(x) > 0:\n self.v0 = x**(-3./2.)\n break\n\n if self.v0 is None:\n raise ValueError('No minimum!')\n\n derivV2 = 4./9. * x**5. * deriv2(x)\n derivV3 = (-20./9. * x**(13./2.) * deriv2(x) -\n 8./27. * x**(15./2.) * deriv3(x))\n bulk_modulus0 = derivV2 / x**(3./2.)\n bulk_deriv0 = -1 - x**(-3./2.) * derivV3 / derivV2\n\n self.e0 = deriv0(x)\n self.B0 = bulk_modulus0\n self.B1 = bulk_deriv0\n\n return self.v0, self.e0, self.B0, self.B1, residuals0", "def test_joint_fitter(self):\n p1 = [14.9, 0.3]\n p2 = [13, 0.4]\n A = 9.8\n p = np.r_[A, p1, p2]\n\n def model(A, p, x):\n return A * np.exp(-0.5 / p[1] ** 2 * (x - p[0]) ** 2)\n\n def errfunc(p, x1, y1, x2, y2):\n return np.ravel(\n np.r_[model(p[0], p[1:3], x1) - y1, model(p[0], p[3:], x2) - y2]\n )\n\n coeff, _ = optimize.leastsq(\n errfunc, p, args=(self.x, self.ny1, self.x, self.ny2)\n )\n assert_allclose(coeff, self.jf.fitparams, rtol=10 ** (-2))", "def rls_fit0(xdata: np.ndarray,\n ydata: np.ndarray | ma.MaskedArray) -> tuple:\n if xdata.size < 2:\n raise RuntimeError('too few points for a fit')\n if xdata.size != ydata.shape[-1]:\n raise RuntimeError('number of samples not equal for xdata, ydata')\n\n # perform all computations on 2 dimensional arrays\n img_shape = ydata.shape[:-1]\n yy1 = ydata.reshape(-1, xdata.size)\n\n # calculate weights\n if ma.isMaskedArray(ydata):\n wghts = calc_ma_weights(xdata, ma.getmaskarray(yy1))\n else:\n buff = np.concatenate(([2 * (xdata[1] - xdata[0])],\n xdata[2:] - xdata[0:-2],\n [2 * (xdata[-1] - xdata[-2])]))\n wghts = np.repeat([buff], yy1.shape[0], axis=0)\n wx1 = wghts / xdata\n wx2 = wghts / xdata ** 2\n\n # calculate the Q elements\n q00 = wghts.sum(axis=1)\n q11 = (wx1 * yy1).sum(axis=1)\n q22 = (wx2 * yy1 ** 2).sum(axis=1)\n\n # calculate fit parameter and its variance\n num = yy1.count(axis=1) if ma.isMaskedArray(ydata) else len(xdata)\n cc1 = q11 / q00\n if ma.isMaskedArray(ydata):\n cc1[num < 1] = ma.masked\n chi2 = ma.abs(q22 - q00 * cc1 ** 2) / np.clip(num - 1, 1, None)\n chi2[num <= 1] = ma.masked\n sc1 = ma.sqrt(chi2 / q00)\n return (cc1.reshape(img_shape).filled(np.nan),\n sc1.reshape(img_shape).filled(np.nan))\n\n # using only non-MaskedArray functions\n cc1[num < 1] = np.nan\n chi2 = np.abs(q22 - q00 * cc1 ** 2) / np.clip(num - 1, 1, None)\n chi2[num <= 1] = np.nan\n sc1 = np.sqrt(chi2 / q00)\n return cc1.reshape(img_shape), sc1.reshape(img_shape)", "def fit_and_report(model, X, y, Xv, yv, mode = 'regression'):\n model.fit(X, y)\n if mode.lower().startswith('regress'):\n errors = [mean_squared_error(y, model.predict(X)), mean_squared_error(yv, model.predict(Xv))]\n if mode.lower().startswith('classif'):\n errors = [1 - model.score(X,y), 1 - model.score(Xv,yv)] \n \n # tests\n assert len(errors) ==2, 'the len of errors is 2'\n \n return errors", "def test_fit(self):\n X,Y,Z = self.generate_data()\n\n p={'k':-1,'r':0}\n key = (p['r'],p['k'])\n\n task = mmSCHPOLY()\n fit_result = task.fit(X,Y,Z)\n\n self.assertEqual(fit_result.best_poly_df[key].shape[0]==4,True)\n\n task2 = mmSCH2W()\n fit_result = task2.fit(X,Y,Z)\n\n self.assertEqual(fit_result.best_inter_df[key].shape[0]==6,True)", "def V_fit(x, a, b, c, d, e, f):\n x1 = x[0] # I\n x2 = x[1] # dT\n m = (a * x1 ** 2 + b * x1 + c)\n b = (d * x1 ** 2 + e * x1 + f)\n return m * x2 + b", "def overall_fitting(filepath, system_information, model_function, model_function_for_ib, fitting_method):\n # load data\n data = load_data(filepath)\n\n x = data[\"x\"]\n\n parameters = []\n overall_rmse = []\n # cpu fit\n cpu_parameters = fitting(\n x, data[\"CPU Time\"], model_function, fitting_method, system_information + \"_CPU\")\n cpu_rmse = calculate_rmse(\n x, data[\"CPU Time\"], model_function, cpu_parameters)\n parameters.append(cpu_parameters)\n overall_rmse.append(cpu_rmse)\n\n # mb fit\n mb_parameters = fitting(\n x, data[\"Memory Time\"], model_function, fitting_method, system_information + \"_memory\")\n mb_rmse = calculate_rmse(\n x, data[\"Memory Time\"], model_function, mb_parameters)\n parameters.append(mb_parameters)\n overall_rmse.append(mb_rmse)\n\n # ib fit\n ib_parameters = fitting(\n x, data[\"MPI Time\"], model_function_for_ib, fitting_method, system_information + \"_IB\")\n ib_rmse = calculate_rmse(\n x, data[\"MPI Time\"], model_function_for_ib, ib_parameters)\n parameters.append(ib_parameters)\n overall_rmse.append(ib_rmse)\n\n return parameters, overall_rmse", "def _fit(self, X):\n n = len(X)\n\n # Initialise start vectors\n w = [0] * n\n for i in range(n):\n if self.randomState is None:\n w[i] = np.ones((X[i].shape[1], 1))\n else:\n w[i] = self.randomState.rand(X[i].shape[1], 1)\n w[i] = w[i] / np.linalg.norm(w[i])\n\n # Find model (Gauss-Siedel iteration)\n func_val = [self.f(X, w)]\n for it in range(self.max_iter):\n for i in range(n):\n wi = 0.0\n for j in range(n):\n if self.pred_comp[i][j] > 0:\n wi += np.dot(X[i].T, np.dot(X[j], w[j]))\n norm_wi = np.linalg.norm(wi)\n if norm_wi > consts.TOLERANCE:\n wi /= norm_wi\n w[i] = wi\n\n func_val.append(self.f(X, w))\n\n if it >= 1:\n err = func_val[-1] - func_val[-2]\n else:\n err = func_val[-1]\n\n self.num_iter = it + 1\n\n if abs(err) < consts.TOLERANCE:\n break\n\n # Find all model vectors\n t = [0] * n\n p = [0] * n\n for i in range(n):\n t[i] = np.dot(X[i], w[i])\n p[i] = np.dot(X[i].T, t[i])\n titi = np.linalg.norm(t[i])**2.0\n if titi > consts.TOLERANCE:\n p[i] = p[i] / titi\n else:\n self.warn(\"Too small joint component for matrix %d! \"\n \"Trying to continue!\" % (i,))\n\n # Normalise P? It matters whether we update with W or with P!\n # TODO: Option?\n# normp = norm(P{i});\n# P{i} = P{i} / normp;\n# T{i} = T{i} * normp;\n\n return w, t, p, func_val", "def residual(pars, data= None):\n\n\t\t\tresid = np.array([])\n\n\n\t\t\t# make residual per data set\n\n\t\t\tfor N in range(n_annulus):\n\n\t\t\t\tmdl_ev = 0\n\t\t\t\tr_space_k = rings_pos[N+1] - rings_pos[N] \n\t\t\t\tmask = np.where( (r_n >= rings_pos[N] ) & (r_n < rings_pos[N+1]) )\n\t\t\t\tx,y = XY_mesh[0][mask], XY_mesh[1][mask] \n\t\t\t\tXY = (x,y)\n\n\n\n\n\t\t\t\tfor kk in range(2):\n\t\t\t\t\tVxy,Vsys = vmodel_dataset(pars, N+kk, XY, r_0 = rings_pos[N], r_space = r_space_k )\n\n\t\t\t\t\tmdl_ev = mdl_ev + Vxy[kk]\n\n\n\t\t\t\t\tif N == 0 and kk == 0:\n\t\t\t\t\t\t\n\t\t\t\t\t\tmask1 = np.where( (r_n < rings_pos[0] ) )\n\t\t\t\t\t\tx1,y1 = XY_mesh[0][mask1], XY_mesh[1][mask1] \n\t\t\t\t\t\tXY1 = (x1,y1)\n\n\n\t\t\t\t\t\t#\n\t\t\t\t\t\t#\n\t\t\t\t\t\t# inner interpolation\n\t\t\t\t\t\t#\n\t\t\t\t\t\t#\n\t\t\t\t\t\n\t\t\t\t\t\t#(a) velocity rise linearly from zero\n\n\t\t\t\t\t\tr_space_0 = rings_pos[0]\n\t\t\t\t\t\tVxy,Vsys = vmodel_dataset(pars, 0, XY1, r_0 = 0, r_space = r_space_0)\n\t\t\t\t\t\n\t\t\t\t\t\tinterp_model[mask1] = Vxy[1] + Vsys\n\n\t\t\t\tinterp_model[mask] = mdl_ev + Vsys\n\n\n\n\n\t\t\t\n\t\t\tsigma = np.sqrt(e_vel_map**2 + e_ISM**2)\n\t\t\t#interp_model[interp_model == 0] = np.nan\n\n\n\t\t\tconvolved = 0\n\n\t\t\tif convolved == True:\n\n\t\t\t\tmy_beam = Beam(2.5*u.arcsec, 2.5*u.arcsec, 0*u.deg)\n\t\t\t\tpix_scale = pixel_scale * u.arcsec\n\t\t\t\tgauss_kern = my_beam.as_kernel(pix_scale, x_size = nx, y_size = ny)\n\n\n\t\t\t\textend = np.zeros((3*ny,3*nx))\n\t\t\t\textend[ny:2*ny,nx:2*nx] = interp_model\n\t\t\t\tconvolve_extend = convolve_fft(extend, gauss_kern, mask = extend == 0 )\n\t\t\t\tinterp_model_conv = convolve_extend[ny:2*ny,nx:2*nx]\n\t\t\t\tinterp_model_conv[interp_model == 0] = 0\n\n\n\n\n\t\t\telse:\n\t\t\t\tinterp_model_conv = interp_model\n\n\t\t\tinterp_model_conv[interp_model_conv == 0] = np.nan\n\t\t\tres = vel_map - interp_model_conv\n\t\t\tResid = res/sigma\n\n\n\t\t\treturn Resid.flatten()", "def fit(self, X, Y):\n\n # copy since this will contains the residuals (deflated) matrices\n check_consistent_length(X, Y)\n X = check_array(X, dtype=np.float64, copy=True)\n Y = check_array(Y, dtype=np.float64, copy=True, ensure_2d=False)\n if Y.ndim == 1:\n Y = Y.reshape(-1, 1)\n\n n = X.shape[0]\n p = X.shape[1]\n q = Y.shape[1]\n\n if self.n_components < 1 or self.n_components > p:\n raise ValueError('Invalid number of components: %d' %\n self.n_components)\n # Scale (in place)\n X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_ = (\n _center_scale_xy(X, Y, self.scale))\n # Residuals (deflated) matrices\n Xk = X.copy()\n Yk = Y.copy()\n# STEP 1\n self.x_params = params_initialize(kind=self.x_kind)\n self.y_params = params_initialize(kind=self.y_kind)\n # Results matrices\n# STEP 2\n self.x_scores_ = np.zeros((n, self.n_components))\n self.y_scores_ = np.zeros((n, self.n_components))\n self.x_weights_ = np.zeros((p, self.n_components))\n self.y_weights_ = np.zeros((q, self.n_components))\n self.x_loadings_ = np.zeros((p, self.n_components))\n self.y_loadings_ = np.zeros((q, self.n_components))\n self.n_iter_ = []\n\n # NIPALS algo: outer loop, over components\n# STEP 3\n for k in range(self.n_components):\n if np.all(np.dot(Yk.T, Yk) < np.finfo(np.double).eps):\n # Yk constant\n warnings.warn('Y residual constant at iteration %s' % k)\n break\n # 1) weights estimation (inner loop)\n # -----------------------------------\n# STEP 17\n x_weights, y_weights, n_iter_ = \\\n _nipals_twoblocks_inner_loop(\n X=Xk, Y=Yk, max_iter=self.max_iter,\n tol=self.tol, x_kind=self.x_kind, y_kind=self.y_kind,\n x_params=self.x_params, y_params=self.y_params, flag_first_iter=(k == 0),\n learning_rate=self.learning_rate)\n self.n_iter_.append(n_iter_)\n # Forces sign stability of x_weights and y_weights\n # Sign undeterminacy issue from svd if algorithm == \"svd\"\n # and from platform dependent computation if algorithm == 'nipals'\n x_weights, y_weights = svd_flip(x_weights, y_weights.T)\n y_weights = y_weights.T\n # compute scores\n \n Xk_hat = f(Xk, kind=self.x_kind, params=self.x_params)\n Yk_hat = f(Yk, kind=self.y_kind, params=self.y_params)\n \n x_scores = np.dot(Xk_hat, x_weights)\n y_ss = np.dot(y_weights.T, y_weights)\n y_scores = np.dot(Yk_hat, y_weights) / y_ss\n # test for null variance\n if np.dot(x_scores.T, x_scores) < np.finfo(np.double).eps:\n warnings.warn('X scores are null at iteration %s' % k)\n break\n # 2) Deflation (in place)\n # ----------------------\n # Possible memory footprint reduction may done here: in order to\n # avoid the allocation of a data chunk for the rank-one\n # approximations matrix which is then subtracted to Xk, we suggest\n # to perform a column-wise deflation.\n #\n# STEP 19\n x_loadings = np.dot(Xk_hat.T, x_scores) / np.dot(x_scores.T, x_scores)\n y_loadings = (np.dot(Yk_hat.T, x_scores)\n / np.dot(x_scores.T, x_scores))\n # - regress Xk's on x_score\n # - subtract rank-one approximations to obtain remainder matrix\n# STEP 22\n Xk_hat -= np.dot(x_scores, x_loadings.T)\n # - regress Yk's on x_score, then subtract rank-one approx.\n# STEP 23\n Yk_hat -= np.dot(x_scores, y_loadings.T)\n# STEP 24\n Xk = finv(Xk_hat, kind=self.x_kind, params=self.x_params)\n Yk = finv(Yk_hat, kind=self.y_kind, params=self.y_params)\n # 3) Store weights, scores and loadings # Notation:\n self.x_scores_[:, k] = x_scores.ravel() # T\n self.y_scores_[:, k] = y_scores.ravel() # U\n self.x_weights_[:, k] = x_weights.ravel() # W\n self.y_weights_[:, k] = y_weights.ravel() # C\n self.x_loadings_[:, k] = x_loadings.ravel() # P\n self.y_loadings_[:, k] = y_loadings.ravel() # Q\n # Such that: X = TP' + Err and Y = UQ' + Err\n\n # 4) rotations from input space to transformed space (scores)\n # T = X W(P'W)^-1 = XW* (W* : p x k matrix)\n # U = Y C(Q'C)^-1 = YC* (C* : q x k matrix)\n self.x_rotations_ = np.dot(\n self.x_weights_,\n pinv2(np.dot(self.x_loadings_.T, self.x_weights_),\n check_finite=False))\n if Y.shape[1] > 1:\n self.y_rotations_ = np.dot(\n self.y_weights_,\n pinv2(np.dot(self.y_loadings_.T, self.y_weights_),\n check_finite=False))\n else:\n self.y_rotations_ = np.ones(1)\n\n # Estimate regression coefficient\n # Regress Y on T\n # Y = TQ' + Err,\n # Then express in function of X\n # Y = X W(P'W)^-1Q' + Err = XB + Err\n # => B = W*Q' (p x q)\n self.coef_ = np.dot(self.x_rotations_, self.y_loadings_.T)\n # self.coef_ = self.coef_ * self.y_std_\n return self", "def residual(pars, data= None):\n\n\t\t\tresid = np.array([])\n\n\n\t\t\t# make residual per data set\n\n\t\t\tfor N in range(n_annulus):\n\n\t\t\t\tmdl_ev = 0\n\t\t\t\tr_space_k = rings_pos[N+1] - rings_pos[N] \n\t\t\t\tmask = np.where( (r_n >= rings_pos[N] ) & (r_n < rings_pos[N+1]) )\n\t\t\t\tx,y = XY_mesh[0][mask], XY_mesh[1][mask] \n\t\t\t\tXY = (x,y)\n\n\n\n\n\t\t\t\tfor kk in range(2):\n\t\t\t\t\tVxy,Vsys = vmodel_dataset(pars, N+kk, XY, r_0 = rings_pos[N], r_space = r_space_k)\n\n\t\t\t\t\tmdl_ev = mdl_ev + Vxy[kk]\n\n\n\t\t\t\t\tif N == 0 and kk == 0:\n\t\t\t\t\t\t\n\t\t\t\t\t\tmask1 = np.where( (r_n < rings_pos[0] ) )\n\t\t\t\t\t\tx1,y1 = XY_mesh[0][mask1], XY_mesh[1][mask1] \n\t\t\t\t\t\tXY1 = (x1,y1)\n\n\n\t\t\t\t\t\t#\n\t\t\t\t\t\t#\n\t\t\t\t\t\t# inner interpolation\n\t\t\t\t\t\t#\n\t\t\t\t\t\t#\n\t\t\t\t\t\n\t\t\t\t\t\t#(a) velocity rise linearly from zero\n\n\t\t\t\t\t\tr_space_0 = rings_pos[0]\n\t\t\t\t\t\tVxy,Vsys = vmodel_dataset(pars, 0, XY1, r_0 = 0, r_space = r_space_0)\n\t\t\t\t\t\n\t\t\t\t\t\tinterp_model[mask1] = Vxy[1] + Vsys\n\n\t\t\t\tinterp_model[mask] = mdl_ev + Vsys\n\n\n\n\n\t\t\t\n\t\t\tsigma = np.sqrt(e_vel_map**2 + e_ISM**2)\n\n\n\n\t\t\tconvolved = 0\n\n\t\t\tif convolved == True:\n\n\t\t\t\tmy_beam = Beam(2.5*u.arcsec, 2.5*u.arcsec, 0*u.deg)\n\t\t\t\tpix_scale = pixel_scale * u.arcsec\n\t\t\t\tgauss_kern = my_beam.as_kernel(pix_scale, x_size = nx, y_size = ny)\n\n\n\t\t\t\textend = np.zeros((3*ny,3*nx))\n\t\t\t\textend[ny:2*ny,nx:2*nx] = interp_model\n\t\t\t\tconvolve_extend = convolve_fft(extend, gauss_kern, mask = extend == 0 )\n\t\t\t\tinterp_model_conv = convolve_extend[ny:2*ny,nx:2*nx]\n\t\t\t\tinterp_model_conv[interp_model == 0] = 0\n\n\n\t\t\telse:\n\t\t\t\tinterp_model_conv = interp_model\n\n\t\t\tinterp_model[interp_model == 0] = np.nan\n\t\t\tres = vel_map - interp_model_conv\n\t\t\tResid = res/sigma\n\n\n\n\t\t\treturn Resid.flatten()", "def residual(pars, data= None):\n\n\t\t\tresid = np.array([])\n\n\n\t\t\t# make residual per data set\n\n\t\t\tfor N in range(n_annulus):\n\n\t\t\t\tmdl_ev = 0\n\t\t\t\tr_space_k = rings_pos[N+1] - rings_pos[N] \n\t\t\t\tmask = np.where( (r_n >= rings_pos[N] ) & (r_n < rings_pos[N+1]) )\n\t\t\t\tx,y = XY_mesh[0][mask], XY_mesh[1][mask] \n\t\t\t\tXY = (x,y)\n\n\n\n\n\t\t\t\tfor kk in range(2):\n\t\t\t\t\tVxy,Vsys = vmodel_dataset(pars, N+kk, XY, r_0 = rings_pos[N], r_space = r_space_k)\n\n\t\t\t\t\tmdl_ev = mdl_ev + Vxy[kk]\n\n\n\t\t\t\t\tif N == 0 and kk == 0:\n\t\t\t\t\t\t\n\t\t\t\t\t\tmask1 = np.where( (r_n < rings_pos[0] ) )\n\t\t\t\t\t\tx1,y1 = XY_mesh[0][mask1], XY_mesh[1][mask1] \n\t\t\t\t\t\tXY1 = (x1,y1)\n\n\n\t\t\t\t\t\t#\n\t\t\t\t\t\t#\n\t\t\t\t\t\t# inner interpolation\n\t\t\t\t\t\t#\n\t\t\t\t\t\t#\n\t\t\t\t\t\n\t\t\t\t\t\t#(a) velocity rise linearly from zero\n\n\t\t\t\t\t\tr_space_0 = rings_pos[0]\n\t\t\t\t\t\tVxy,Vsys = vmodel_dataset(pars, 0, XY1, r_0 = 0, r_space = r_space_0)\n\t\t\t\t\t\n\t\t\t\t\t\tinterp_model[mask1] = Vxy[1] + Vsys\n\n\t\t\t\tinterp_model[mask] = mdl_ev + Vsys\n\n\n\n\n\t\t\t\n\t\t\tsigma = np.sqrt(e_vel_map**2 + e_ISM**2)\n\n\n\n\t\t\tconvolved = 0\n\n\t\t\tif convolved == True:\n\n\t\t\t\tmy_beam = Beam(2.5*u.arcsec, 2.5*u.arcsec, 0*u.deg)\n\t\t\t\tpix_scale = pixel_scale * u.arcsec\n\t\t\t\tgauss_kern = my_beam.as_kernel(pix_scale, x_size = nx, y_size = ny)\n\n\n\t\t\t\textend = np.zeros((3*ny,3*nx))\n\t\t\t\textend[ny:2*ny,nx:2*nx] = interp_model\n\t\t\t\tconvolve_extend = convolve_fft(extend, gauss_kern, mask = extend == 0 )\n\t\t\t\tinterp_model_conv = convolve_extend[ny:2*ny,nx:2*nx]\n\t\t\t\tinterp_model_conv[interp_model == 0] = 0\n\n\n\t\t\telse:\n\t\t\t\tinterp_model_conv = interp_model\n\n\n\t\t\tinterp_model_conv[interp_model_conv == 0] = np.nan\n\t\t\tres = vel_map - interp_model_conv\n\t\t\tResid = res/sigma\n\n\t\t\treturn Resid.flatten()", "def residual(pars, data= None):\n\n\t\t\tresid = np.array([])\n\n\n\t\t\t# make residual per data set\n\n\t\t\tfor N in range(n_annulus):\n\n\t\t\t\tmdl_ev = 0\n\t\t\t\tr_space_k = rings_pos[N+1] - rings_pos[N] \n\t\t\t\tmask = np.where( (r_n >= rings_pos[N] ) & (r_n < rings_pos[N+1]) )\n\t\t\t\tx,y = XY_mesh[0][mask], XY_mesh[1][mask] \n\t\t\t\tXY = (x,y)\n\n\n\n\n\t\t\t\tfor kk in range(2):\n\t\t\t\t\tVxy,Vsys = vmodel_dataset(pars, N+kk, XY, r_0 = rings_pos[N], r_space = r_space_k)\n\n\t\t\t\t\tmdl_ev = mdl_ev + Vxy[kk]\n\n\n\t\t\t\t\tif N == 0 and kk == 0:\n\t\t\t\t\t\t\n\t\t\t\t\t\tmask1 = np.where( (r_n < rings_pos[0] ) )\n\t\t\t\t\t\tx1,y1 = XY_mesh[0][mask1], XY_mesh[1][mask1] \n\t\t\t\t\t\tXY1 = (x1,y1)\n\n\n\t\t\t\t\t\t#\n\t\t\t\t\t\t#\n\t\t\t\t\t\t# inner interpolation\n\t\t\t\t\t\t#\n\t\t\t\t\t\t#\n\t\t\t\t\t\n\t\t\t\t\t\t#(a) velocity rise linearly from zero\n\n\t\t\t\t\t\tr_space_0 = rings_pos[0]\n\t\t\t\t\t\tVxy,Vsys = vmodel_dataset(pars, 0, XY1, r_0 = 0, r_space = r_space_0)\n\t\t\t\t\t\n\t\t\t\t\t\tinterp_model[mask1] = Vxy[1] + Vsys\n\n\t\t\t\tinterp_model[mask] = mdl_ev + Vsys\n\n\n\n\n\t\t\t\n\t\t\tsigma = np.sqrt(e_vel_map**2 + e_ISM**2)\n\n\n\n\t\t\tconvolved = 0\n\n\t\t\tif convolved == True:\n\n\t\t\t\tmy_beam = Beam(2.5*u.arcsec, 2.5*u.arcsec, 0*u.deg)\n\t\t\t\tpix_scale = pixel_scale * u.arcsec\n\t\t\t\tgauss_kern = my_beam.as_kernel(pix_scale, x_size = nx, y_size = ny)\n\n\n\t\t\t\textend = np.zeros((3*ny,3*nx))\n\t\t\t\textend[ny:2*ny,nx:2*nx] = interp_model\n\t\t\t\tconvolve_extend = convolve_fft(extend, gauss_kern, mask = extend == 0 )\n\t\t\t\tinterp_model_conv = convolve_extend[ny:2*ny,nx:2*nx]\n\t\t\t\tinterp_model_conv[interp_model == 0] = 0\n\n\n\t\t\telse:\n\t\t\t\tinterp_model_conv = interp_model\n\n\n\t\t\tinterp_model_conv[interp_model_conv == 0] = np.nan\n\t\t\tres = vel_map - interp_model_conv\n\t\t\tResid = res/sigma\n\n\n\t\t\treturn Resid.flatten()", "def combine_models(model1,coef1,model2,coef2):\n\n # find global parameters (try to be self-consistent):\n\n # this first part is simply a linear combination:\n glb = np.empty((nglb,),dtype=gtype)\n glb[0:nlin] = coef1*model1.glb[0:nlin] + coef2*model2.glb[0:nlin]\n\n # this next part depends on previous results:\n glb[iradius] = (glb[imass]/(coef1*model1.glb[imass]/model1.glb[iradius]**3\n + coef2*model2.glb[imass]/model2.glb[iradius]**3))**(1.0/3.0)\n cnst1 = model1.glb[iluminosity]/(model1.glb[iradius]**2*model1.glb[itemperature]**4)\n cnst2 = model2.glb[iluminosity]/(model2.glb[iradius]**2*model2.glb[itemperature]**4)\n glb[iluminosity] = (coef1*cnst1 + coef2*cnst2)*glb[iradius]**2*glb[itemperature]**4\n # glb[ifreq_ref] will be correctly defined when the Model() constructor is invoked\n\n # interpolate spectra:\n size3 = min(model1.modes.shape[0],model2.modes.shape[0])\n nvalues = np.empty((size3,),dtype=ntype)\n lvalues = np.empty((size3,),dtype=ltype)\n fvalues = np.empty((size3,),dtype=ftype)\n ivalues = np.empty((size3,),dtype=ftype)\n\n # sanity check:\n if (size3 == 0):\n return Model(glb, _modes=list(zip(nvalues,lvalues,fvalues,ivalues)))\n else:\n nvalues,lvalues,fvalues,ivalues,n3 = aims_fortran.combine_modes( \\\n coef1,model1.modes['n'],model1.modes['l'],model1.modes['freq'],model1.modes['inertia'], \\\n coef2,model2.modes['n'],model2.modes['l'],model2.modes['freq'],model2.modes['inertia'], \\\n nvalues,lvalues,fvalues,ivalues)\n return Model(glb, _modes=list(zip(nvalues[0:n3],lvalues[0:n3],fvalues[0:n3],ivalues[0:n3])))", "def fit(train_data, train_target):\r\n for name in models.keys():\r\n est = models[name]\r\n est_params = params2[name]\r\n gscv = GridSearchCV(estimator=est, param_grid=est_params, cv=5)\r\n gscv.fit(train_data, train_target)\r\n print(\"best parameters are: {}\".format(gscv.best_estimator_))\r\n print(\"Where we selected the parameters: {}\" .format(gscv.cv_results_['params'][gscv.best_index_]))\r\n print(\"with mean cross-validated score: {}\" .format(gscv.best_score_))", "def set_fit_params(self):\n\n self.p0 = np.array([self.A_arr, self.T_a])\n # initial guess at A_arr and T_a\n\n self.popt, self.pcov = curve_fit(\n self.get_eta_fit, self.T_exp, self.eta_exp, p0=self.p0\n )\n\n self.A_arr = self.popt[0]\n self.T_a = self.popt[1]\n\n self.T_array = self.T_model", "def SVMRegression(trainingObs,trainingParam,Obs):\n svr = SVR(C=1.0, cache_size=2000, coef0=0.0, degree=3, epsilon=0.1, gamma=0., kernel='rbf', probability=False, shrinking=True, tol=0.00001)\n nparam=trainingParam.shape[1]\n vparam = []\n for i in range(0,nparam):\n print i\n svr.fit(trainingObs,trainingParam[:,i])\n vparam.append(svr.predict(Obs))\n vparam = np.array(vparam)\n return vparam.T", "def a_test2_bbvi():\n model = ARIMAX(formula=\"y ~ x1 + x2\", data=data, ar=1, ma=1, family=Exponential())\n x = model.fit('BBVI',iterations=100)\n assert(len(model.latent_variables.z_list) == 5)\n lvs = np.array([i.value for i in model.latent_variables.z_list])\n assert(len(lvs[np.isnan(lvs)]) == 0)", "def a_test2_bbvi():\n model = ARIMAX(formula=\"y ~ x1 + x2\", data=data, ar=1, ma=1, family=Exponential())\n x = model.fit('BBVI',iterations=100)\n assert(len(model.latent_variables.z_list) == 5)\n lvs = np.array([i.value for i in model.latent_variables.z_list])\n assert(len(lvs[np.isnan(lvs)]) == 0)", "def multifit(self, p0s, verbose=True):\n t1 = time.perf_counter()\n # fit first (hopefully larger) spot\n opts1 = self.singlefit(p0s)\n t2 = time.perf_counter()\n if verbose:\n print('FIRST FIT: {0:.2f} s'.format(t2 - t1))\n opts = np.array([])\n for i in range(1, self.n_spots):\n t2 = time.perf_counter()\n p = []\n for p1 in opts1:\n y_r = self.y\n # let current light curve be the residual from previously fitted spots\n self.y = y_r - self.solve(p1) + 1\n opts2 = self.singlefit(p0s, star_params=p1[:3])\n # retrieve original light curve\n self.y = y_r\n for p2 in opts2:\n p.append(np.append(p1, p2[3:]))\n t3 = time.perf_counter()\n if verbose:\n print('MULTIFIT #{1}: {0:.2f} s'.format(t3 - t2, i))\n # for each new spot, do a simultaneous fit of all parameters so far\n opts, sses = self.llsq(p)\n t4 = time.perf_counter()\n if verbose:\n print('SIMULFIT #{1}: {0:.2f} s'.format(t4 - t3, i))\n # sort fits with respect to chi\n mask = np.isfinite(sses)\n sses = np.asarray(sses)[mask]\n opts = np.asarray(opts)[mask]\n sorted_ids = np.argsort(sses)\n opts = opts[sorted_ids]\n # opts stores all spots fitted so far\n opts1 = opts\n t4 = time.perf_counter()\n if verbose:\n print('TOTAL: {0:.2f} s'.format(t4 - t1))\n return opts", "def test_joint_parameter(self):\n assert_allclose(self.jf.fitparams[0], self.g1.parameters[0])\n assert_allclose(self.jf.fitparams[0], self.g2.parameters[0])", "def get_data():\r\n if not path_validation(MODEL_PATH, read_access=True):\r\n exit(0) \r\n if not path_validation(TEST_DATA_PATH, read_access=True):\r\n exit(0) \r\n if not path_validation(TEST_LABEL_PATH, read_access=True):\r\n exit(0) \r\n\r\n params = joblib.load(MODEL_PATH)\r\n test_images = np.load(TEST_DATA_PATH)\r\n test_labels = np.load(TEST_LABEL_PATH)\r\n\r\n # Addition of bias in test set\r\n test_images = np.insert(test_images, 0, 1, axis=1)\r\n\r\n return params, test_images, test_labels", "def rls_fit(xdata: np.ndarray,\n ydata: np.ndarray | ma.MaskedArray) -> tuple:\n # pylint: disable=too-many-locals\n if xdata.size < 2:\n raise RuntimeError('too few sample points for a fit')\n if xdata.size != ydata.shape[-1]:\n raise RuntimeError('number of samples not equal for xdata, ydata')\n\n # perform all computations on 2 dimensional arrays\n img_shape = ydata.shape[:-1]\n yy1 = ydata.reshape(-1, xdata.size)\n\n # calculate weights\n if ma.isMaskedArray(ydata):\n wghts = calc_ma_weights(xdata, ma.getmaskarray(yy1))\n else:\n buff = np.concatenate(([2 * (xdata[1] - xdata[0])],\n xdata[2:] - xdata[0:-2],\n [2 * (xdata[-1] - xdata[-2])]))\n wghts = np.repeat([buff], yy1.shape[0], axis=0)\n wx1 = wghts / xdata\n wx2 = wghts / xdata ** 2 # is wx1 / xdata faster?\n\n # calculate the Q elements\n q00 = wghts.sum(axis=1)\n q01 = wx1.sum(axis=1)\n q02 = wx2.sum(axis=1)\n\n q11 = (wx1 * yy1).sum(axis=1)\n q12 = (wx2 * yy1).sum(axis=1)\n q22 = (wx2 * yy1 ** 2).sum(axis=1)\n\n # calculate the Z elements\n zz1 = q00 * q02 - q01 ** 2\n zz2 = q00 * q12 - q01 * q11\n zz3 = q02 * q11 - q01 * q12\n\n # calculate fit parameters and their uncertainties\n num = yy1.count(axis=1) if ma.isMaskedArray(ydata) else len(xdata)\n cc0 = zz2 / zz1\n cc1 = zz3 / zz1\n if ma.isMaskedArray(ydata):\n chi2 = ma.abs(q22 - q12 * cc0 - q11 * cc1) / np.clip(num - 2, 1, None)\n chi2[num <= 2] = 0\n sc0 = ma.sqrt(q00 * chi2 / zz1)\n sc1 = ma.sqrt(q02 * chi2 / zz1)\n\n return (cc0.reshape(img_shape).filled(np.nan),\n cc1.reshape(img_shape).filled(np.nan),\n sc0.reshape(img_shape).filled(np.nan),\n sc1.reshape(img_shape).filled(np.nan))\n\n # using only non-MaskedArray functions\n chi2 = np.abs(q22 - q12 * cc0 - q11 * cc1) / np.clip(num - 2, 1, None)\n chi2[num <= 2] = 0\n sc0 = np.sqrt(q00 * chi2 / zz1)\n sc1 = np.sqrt(q02 * chi2 / zz1)\n\n return (cc0.reshape(img_shape), cc1.reshape(img_shape),\n sc0.reshape(img_shape), sc1.reshape(img_shape))", "def fit_ij(self, i, j, **kwargs):\n assert i != j, \"i and j must be different\"\n\n # Get the layer of interest in the list of circuits\n l = self._find_layer(i, j)\n\n # Take the circuits of interest\n circuits = self._circuits[0:3]\n circuits += self._circuits[(3 + 6*l) : (3 + 6*(l+1))]\n\n # This will create an empty _data dict for the fit function\n # We are using a member field so that we can use the super() fit \n # function\n self._data = {}\n\n # Process measurement counts into probabilities\n for circ in circuits:\n # Take only the relevant qubit labels from the circuit label\n tup = literal_eval(circ.name)\n tup = (tup[i], tup[j])\n\n # Marginalize the counts for the two relevant qubits\n counts = marginal_counts(self._result.get_counts(circ), [i, j])\n\n # Populate the data\n self._data[tup] = counts\n\n # Test that all the required measurements are there\n expected_corr = product(['X', 'Y', 'Z'], ['X', 'Y', 'Z'])\n if set(self._data.keys()) != set(expected_corr):\n raise Exception(\"Could not find all the measurements required for tomography\")\n\n # Do the actual fit\n result = super().fit(**kwargs)\n\n # clear the _data field\n self._data = None\n return result", "def fit_me(X, Y, n_components = 2, period = 24, model_type = 'lin', lin_comp = False, alpha = 0, name = '', save_to = '', plot=True, plot_residuals=False, plot_measurements=True, plot_margins=True, return_model = False, color = False, plot_phase = True, hold=False, x_label = \"\", y_label = \"\"):\n X_test = np.linspace(0, 100, 1000)\n\n if n_components == 0:\n X_fit = X\n X_fit_test = X_test\n lin_comp = True\n else:\n for i in range(n_components):\n n = i+1\n\n A = np.sin((X/(period/n))*np.pi*2)\n B = np.cos((X/(period/n))*np.pi*2) \n A_test = np.sin((X_test/(period/n))*np.pi*2)\n B_test = np.cos((X_test/(period/n))*np.pi*2)\n\n if not i:\n X_fit = np.column_stack((A, B))\n X_fit_test = np.column_stack((A_test, B_test)) \n else:\n X_fit = np.column_stack((X_fit, np.column_stack((A, B))))\n X_fit_test = np.column_stack((X_fit_test, np.column_stack((A_test, B_test))))\n\n \n X_fit_eval_params = X_fit_test\n \n if lin_comp and n_components:\n X_fit = np.column_stack((X, X_fit))\n X_fit_eval_params = np.column_stack((np.zeros(len(X_test)), X_fit_test))\n X_fit_test = np.column_stack((X_test, X_fit_test)) \n\n\n #if model_type == 'lin':\n X_fit = sm.add_constant(X_fit, has_constant='add')\n X_fit_test = sm.add_constant(X_fit_test, has_constant='add')\n X_fit_eval_params = sm.add_constant(X_fit_eval_params, has_constant='add')\n \"\"\"\n ###\n # fit\n ###\n \"\"\" \n if model_type == 'lin':\n model = sm.OLS(Y, X_fit)\n results = model.fit()\n elif model_type == 'poisson':\n model = sm.GLM(Y, X_fit, family=sm.families.Poisson())\n results = model.fit()\n elif model_type =='gen_poisson':\n model = statsmodels.discrete.discrete_model.GeneralizedPoisson(Y, X_fit)\n results = model.fit()\n elif model_type == 'nb':\n #exposure = np.zeros(len(Y))\n #exposure[:] = np.mean(Y)\n #model = sm.GLM(Y, X_fit, family=sm.families.NegativeBinomial(), exposure = exposure)\n \n \n # https://towardsdatascience.com/negative-binomial-regression-f99031bb25b4\n # https://dius.com.au/2017/08/03/using-statsmodels-glms-to-model-beverage-consumption/#cameron\n if not alpha:\n train_model = sm.GLM(Y, X_fit, family=sm.families.Poisson())\n train_results = train_model.fit()\n\n df_train = pd.DataFrame()\n df_train['Y'] = Y\n df_train['mu'] = train_results.mu\n df_train['AUX_OLS_DEP'] = df_train.apply(lambda x: ((x['Y'] - x['mu'])**2 - x['Y']) / x['mu'], axis=1)\n ols_expr = \"\"\"AUX_OLS_DEP ~ mu - 1\"\"\"\n aux_olsr_results = smf.ols(ols_expr, df_train).fit()\n\n alpha=aux_olsr_results.params[0]\n #print(alpha)\n\n model = sm.GLM(Y, X_fit, family=sm.families.NegativeBinomial(alpha=alpha))\n \n results = model.fit()\n else:\n print(\"Invalid option\")\n return\n\n \n if model_type =='lin':\n Y_fit = results.fittedvalues\n else:\n Y_fit = results.predict(X_fit)\n \n \n if model_type in ['lin', 'poisson', 'nb']:\n statistics = calculate_statistics(X, Y, Y_fit, n_components, period, lin_comp)\n if model_type in ['poisson', 'nb']:\n statistics['count'] = np.sum(Y) \n else:\n RSS = sum((Y - Y_fit)**2)\n p = results.llr_pvalue\n statistics = {'p':p, 'RSS':RSS, 'count': np.sum(Y)}\n \n Y_test = results.predict(X_fit_test)\n Y_eval_params = results.predict(X_fit_eval_params)\n \n rhythm_params = evaluate_rhythm_params(X_test, Y_eval_params)\n \n \"\"\"\n ###\n # plot\n ###\n \"\"\"\n if plot:\n if plot_margins:\n if model_type == 'lin':\n sdev, lower, upper = wls_prediction_std(results, exog=X_fit_test, alpha=0.05)\n if color:\n plt.fill_between(X_test, lower, upper, color=color, alpha=0.1)\n else:\n plt.fill_between(X_test, lower, upper, color='#888888', alpha=0.1)\n else:\n res2 = copy.deepcopy(results)\n params = res2.params\n CIs = results.conf_int()\n \n #N = 512\n N = 1024\n \n if n_components == 1:\n #N2 = 8\n N2 = 10\n elif n_components == 2:\n #N2 = 6\n N2 = 8\n else: \n #N2 = 8 - n_components \n N2 = 10 - n_components \n \n \n P = np.zeros((len(params), N2))\n \n for i, CI in enumerate(CIs):\n P[i,:] = np.linspace(CI[0], CI[1], N2)\n \n amplitude_CI = [rhythm_params['amplitude']]\n mesor_CI = [rhythm_params['mesor']]\n acrophase_CI = [rhythm_params['acrophase']]\n \n param_samples = list(itertools.product(*P))\n N = min(N, len(param_samples))\n \n for i,p in enumerate(sample(param_samples, N)):\n res2.initialize(results.model, p) \n Y_test_CI = res2.predict(X_fit_test)\n \n rhythm_params_CI = evaluate_rhythm_params(X_test, Y_test_CI)\n amplitude_CI.append(rhythm_params_CI['amplitude'])\n mesor_CI.append(rhythm_params_CI['mesor'])\n acrophase_CI.append(rhythm_params_CI['acrophase'])\n \n \n \"\"\"\n if i == 0:\n Y_min = Y\n Y_max = Y\n else:\n Y_min = np.min(np.vstack([Y,Y_min]), axis=0)\n Y_max = np.max(np.vstack([Y,Y_max]), axis=0)\n \"\"\"\n if color and color != '#000000':\n plt.plot(X_test, Y_test_CI, color=color, alpha=0.05)\n else:\n plt.plot(X_test, Y_test_CI, color='#888888', alpha=0.05)\n \n \n #plt.fill_between(X_test, Y_min, Y_max, color='#888888', alpha=0.1)\n \n #amplitude_CI = (min(amplitude_CI), max(amplitude_CI))\n #mesor_CI = (min(mesor_CI), max(mesor_CI))\n #acrophase_CI = (min(acrophase_CI), max(acrophase_CI))\n \n rhythm_params['amplitude_CI'] = amplitude_CI\n rhythm_params['mesor_CI'] = mesor_CI\n rhythm_params['acrophase_CI'] = acrophase_CI\n \n \n ###\n if not color:\n color = 'black'\n\n if plot_measurements: \n if not hold: \n plt.plot(X,Y, 'ko', markersize=1, label = 'data', color=color)\n else:\n plt.plot(X,Y, 'ko', markersize=1, color=color)\n #plt.plot(X, results.fittedvalues, label = 'fit')\n \n if not hold:\n plt.plot(X_test, Y_test, 'k', label = 'fit', color=color)\n else:\n plt.plot(X_test, Y_test, 'k', label = name, color=color)\n #if color and not plot_margins: \n # plt.plot(X_test, Y_test, 'k', label = 'fit', color=color)\n #else:\n # plt.plot(X_test, Y_test, 'k', label = 'fit')\n \n if plot_measurements:\n X = X % period\n\n if model_type == 'lin': \n #plt.axis([min(min(X),0), 1.1*max(max(X),period), 0.9*min(min(Y), min(Y_test)), 1.1*max(max(Y), max(Y_test))])\n plt.axis([min(min(X),0), max(X), 0.9*min(min(Y), min(Y_test)), 1.1*max(max(Y), max(Y_test))])\n else:\n plt.axis([min(min(X),0), max(X), 0.9*min(min(Y), min(Y_test)), 1.1*max(max(Y), max(Y_test))])\n else:\n plt.axis([min(X_test), period, min(Y_test)*0.9, max(Y_test)*1.1])\n #plt.title(name + ', components=' + str(n_components) +' , period=' + str(period) + '\\np-value=' + str(statistics['p']) + ', p-value(gof)=' + str(statistics['p_reject']))\n #plt.title(name + ', components=' + str(n_components) +' , period=' + str(period) + '\\np-value=' + str(statistics['p']))\n if model_type == 'lin':\n if name: \n plt.title(name + ', p-value=' + \"{0:.5f}\".format(statistics['p']))\n else:\n plt.title('p-value=' + \"{0:.5f}\".format(statistics['p']))\n else:\n if name:\n plt.title(name + ', p-value=' + '{0:.3f}'.format(statistics['p']) + ' (n='+str(statistics['count'])+ ')') \n else:\n plt.title('p-value=' + '{0:.3f}'.format(statistics['p']) + ' (n='+str(statistics['count'])+ ')')\n if x_label:\n plt.xlabel(x_label)\n else:\n plt.xlabel('Time [h]')\n \n if y_label:\n plt.ylabel(y_label)\n elif model_type == 'lin':\n plt.ylabel('Measurements')\n else:\n plt.ylabel('Count')\n #fig = plt.gcf()\n #fig.set_size_inches(11,8) \n \n\n \n if not hold:\n if save_to:\n plt.savefig(save_to+'.png')\n plt.savefig(save_to+'.pdf')\n plt.close()\n else:\n plt.show()\n if plot_residuals:\n resid = results.resid\n fig = sm.qqplot(resid)\n plt.title(name)\n if save_to:\n plt.savefig(save_to+'_resid.pdf', bbox_inches='tight')\n plt.savefig(save_to+'_resid.png') \n plt.close()\n else:\n plt.show()\n \n if plot_phase:\n per = rhythm_params['period']\n amp = rhythm_params['amplitude']\n phase = rhythm_params['acrophase']\n if save_to:\n plot_phases([phase], [amp], [name], period=per, folder=\"\\\\\".join(save_to.split(\"\\\\\")[:-1]))\n else:\n plot_phases([phase], [amp], [name], period=per)\n\n if return_model: \n return results, statistics, rhythm_params, X_test, Y_test, model\n else: \n return results, statistics, rhythm_params, X_test, Y_test", "def fit(self, X, y):\n self.support_vectors_ = check_array(X)\n self.y = check_array(y, ensure_2d=False)\n random_state = check_random_state(self.random_state)\n self.kernel_args = {}\n if self.kernel == \"rbf\" and self.gamma is not None:\n self.kernel_args[\"gamma\"] = self.gamma\n elif self.kernel == \"poly\":\n self.kernel_args[\"degree\"] = self.degree\n self.kernel_args[\"coef0\"] = self.coef0\n elif self.kernel == \"sigmoid\":\n self.kernel_args[\"coef0\"] = self.coef0\n K = pairwise_kernels(X, metric=self.kernel, **self.kernel_args)\n self.dual_coef_ = np.zeros(X.shape[0])\n self.intercept_ = _svm.smo(\n K, y, self.dual_coef_, self.C, random_state, self.tol,\n self.numpasses, self.maxiter, self.verbose)\n # If the user was using a linear kernel, lets also compute and store\n # the weights. This will speed up evaluations during testing time.\n if self.kernel == \"linear\":\n self.coef_ = np.dot(self.dual_coef_ * self.y, self.support_vectors_)\n # only samples with nonzero coefficients are relevant for predictions\n support_vectors = np.nonzero(self.dual_coef_)\n self.dual_coef_ = self.dual_coef_[support_vectors]\n self.support_vectors_ = X[support_vectors]\n self.y = y[support_vectors]\n return self", "def residual2P2Z(paras):\n initcond = setupinitcond(paras['pfun_num'].value, paras['zoo_num'].value)\n\n\n\n model = g2P2Z(initcond, timedays_model, paras)\n\n # to implement fitting algorithm make sure to calculate residual only for the last year!\n\n # will have to 1. : simplify the data (i.e. median per month)\n # will have to put data into structure to calculate efficiently (i.e. pandas dataframe like df[1] = N, df[2] = Si, etc.)\n model_ly = model[1460:1825]\n\n # aggregate model output in the same way as validation data (monthly mean)\n # create month vector to add to model output dataframe for analysis\n oneyearmodel = pandas.DataFrame()\n oneyearmodel = oneyearmodel.assign(day=pandas.Series(np.linspace(1, 365, 365)))\n\n # combine two columns\n phyto_model = pandas.DataFrame(\n {'data': model_ly[:, 4], 'month': pandas.to_datetime(oneyearmodel['day'], format='%j').dt.month})\n phyto_monthly_median = phyto_model.groupby('month').median()\n phyto_resid = (phyto_monthly_median['data'].values - ChlA_monthly_median['ChlA'].values * 0.1)\n\n nitrate_model = pandas.DataFrame(\n {'data': model_ly[:, 0], 'month': pandas.to_datetime(oneyearmodel['day'], format='%j').dt.month})\n nitrate_monthly_median = nitrate_model.groupby('month').median()\n nitrate_resid = (nitrate_monthly_median['data'].values - NO3NO2_monthly_median['NO3NO2'].values * 0.1)\n\n silicate_model = pandas.DataFrame(\n {'data': model_ly[:, 1], 'month': pandas.to_datetime(oneyearmodel['day'], format='%j').dt.month})\n silicate_monthly_median = silicate_model.groupby('month').median()\n silicate_resid = (silicate_monthly_median['data'].values - SiOH_USF_monthly_median['SiOH'].values * 0.1)\n\n zoo_model = pandas.DataFrame(\n {'data': model_ly[:, 3], 'month': pandas.to_datetime(oneyearmodel['day'], format='%j').dt.month})\n zoo_monthly_median = zoo_model.groupby('month').median()\n zoo_resid = (zoo_monthly_median['data'].values - ZooBM_monthly_median['ZooBM'].values * 0.1)\n\n ss = np.concatenate((phyto_resid, nitrate_resid, silicate_resid, zoo_resid))\n return ss", "def f_refine_all(xdata, *params):\n if len(params) < 10 or len(params[10:]) % 6 != 0:\n raise ValueError('Check parameter vector encoding')\n if xdata.ndim != 1:\n raise ValueError('Check data vector encoding')\n\n intrinsics = params[:10]\n\n M = len(params[10:]) // 6\n N = xdata.shape[0] // (2 * M)\n\n # Unpack data vector\n X = xdata[:N]\n Y = xdata[N:2*N]\n model = np.zeros((N, 2))\n model[:, 0] = X\n model[:, 1] = Y\n \n # model_hom = util.to_homogeneous_3d(model)\n\n # Unpack parameters\n K, k, extrinsic_matrices = unpack_refinement_params(params)\n\n # Form observation vectors\n obs_x = np.zeros(N*M)\n obs_y = np.zeros(N*M)\n for e, E in enumerate(extrinsic_matrices):\n # Project the model into the sensor frame for each image, and append the \n # predicted points to the observation vectors\n sensor_proj = util_improved2.project(K, k, E, model)\n\n x, y = sensor_proj[:, 0], sensor_proj[:, 1]\n\n obs_x[e*N:(e+1)*N] = x\n obs_y[e*N:(e+1)*N] = y\n\n # Stack observation vectors. Note that we observe the same convention as before\n # in stacking all x observations prior to all y observations\n result = np.zeros(2*N*M)\n result[:N*M] = obs_x\n result[N*M:] = obs_y\n\n return result", "def fit_model(x1, x2, order=None, max_order=10,\r\n criterion=utils.bayesian_information_criterion):\r\n c_old = np.inf\r\n n_process = 2\r\n Ntotal = n_process * x1.shape[-1]\r\n\r\n # If model order was provided as an input:\r\n if order is not None:\r\n lag = order + 1\r\n Rxx = utils.autocov_vector(np.vstack([x1, x2]), nlags=lag)\r\n coef, ecov = alg.lwr_recursion(np.array(Rxx).transpose(2, 0, 1))\r\n\r\n # If the model order is not known and provided as input:\r\n else:\r\n for lag in range(1, max_order):\r\n Rxx_new = utils.autocov_vector(np.vstack([x1, x2]), nlags=lag)\r\n coef_new, ecov_new = alg.lwr_recursion(\r\n np.array(Rxx_new).transpose(2, 0, 1))\r\n order_new = coef_new.shape[0]\r\n c_new = criterion(ecov_new, n_process, order_new, Ntotal)\r\n if c_new > c_old:\r\n # Keep the values you got in the last round and break out:\r\n break\r\n\r\n else:\r\n # Replace the output values with the new calculated values and\r\n # move on to the next order:\r\n c_old = c_new\r\n order = order_new\r\n Rxx = Rxx_new\r\n coef = coef_new\r\n ecov = ecov_new\r\n else:\r\n e_s = (\"Model estimation order did not converge at max_order = %s\"\r\n % max_order)\r\n raise ValueError(e_s)\r\n\r\n return order, Rxx, coef, ecov" ]
[ "0.56063974", "0.54226416", "0.5420667", "0.5295927", "0.52612674", "0.52542776", "0.5231817", "0.5189687", "0.51571906", "0.5116141", "0.5085025", "0.5074433", "0.5074433", "0.5074433", "0.5072969", "0.5034838", "0.50345165", "0.50255895", "0.49887204", "0.49887204", "0.49748436", "0.49694175", "0.49669573", "0.49607217", "0.49605408", "0.49489817", "0.4948818", "0.49397597", "0.49199516", "0.49154142" ]
0.59477234
0
Compare two ContentItem objects with each other in a weak way we only take the filename and path into consideration and ignore constraints (as the proper __eq__ method would do).
def _content_item_comparison_weak(item_a, item_b): if item_a is None or item_b is None: log.debug("Item is None") return False return item_a.get_xml() == item_b.get_xml()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __eq__(self, other):\n return self.doc_type == other.doc_type and \\\n self.src == other.src and \\\n self.name == other.name", "def __eq__(self, other):\n contentsmatchfail = False\n equal = False\n for i in self.contents:\n if i in other.contents:\n pass\n else:\n contentsmatchfail = True\n for i in other.contents:\n if i in self.contents:\n pass\n else:\n contentsmatchfail = True\n if self.name == other.name and self.name == other.name and contentsmatchfail == False:\n equal = True\n return equal", "def compare_contents(lhs, rhs):\n for filename in (lhs, rhs):\n if not os.path.exists(filename):\n return False\n\n with open(lhs, \"r\") as lhs_file, open(rhs, \"r\") as rhs_file:\n return lhs_file.read() == rhs_file.read()", "def __eq__(self, manager_obj):\n if len(self.file_paths) == len(self.manager_obj.file_paths):\n sorted_file_paths = sorted(self.file_paths)\n sorted_file_paths_other = sorted(manager_obj.file_paths)\n for p1, p2 in zip(sorted_file_paths, sorted_file_paths_other):\n if p1 != p2:\n break\n return False", "def __eq__(self, other):\n return type(self) == type(other) and self._full_path == other.full_path", "def test_equal(self):\n\n qs = FBO(path=TEST_FILES_ROOT, glob='*.md').order_by('name')\n qs2 = FBO(path=TEST_FILES_ROOT, glob='*.md').order_by('name')\n self.assertEqual(\n qs[0],\n qs2[0],\n )", "def __eq__(self, other):\n if not isinstance(other, ImportItem):\n return False\n\n return self.__dict__ == other.__dict__", "def compareObjects(self, item1, item2):\n\t\tkeys1 = item1.keys()\n\t\tkeys2 = item2.keys()\n\t\tkeys1.sort()\n\t\tkeys2.sort()\n\t\tresult=True\n\t\tif keys1 != keys2:\n\t\t\treturn False\n\t\tfor key in keys1:\n\t\t\tif key == 'meta': continue\n\t\t\tkey1 = item1[key]\n\t\t\tkey2 = item2[key]\n\t\t\t# For our purpose, 30 is equal to 30.000\n\t\t\tif key == 'check_interval':\n\t\t\t\tkey1 = int(float(key1))\n\t\t\t\tkey2 = int(float(key2))\n\t\t\tif key1 != key2:\n\t\t\t\tresult = False\n\t\tif result == False: return False\n\t\treturn True", "def __eq__(self, other):\r\n if not hasattr(other, 'name') or not hasattr(other, 'attrs') or not hasattr(other, 'contents') or self.name != other.name or self.attrs != other.attrs or len(self) != len(other):\r\n return False\r\n for i in range(0, len(self.contents)):\r\n if self.contents[i] != other.contents[i]:\r\n return False\r\n return True", "def __eq__(self, other):\n return other and self.item == other.item", "def __eq__(self, other):\n if isinstance(other, Page):\n return self.fields == other.fields and self.filename == other.filename\n return False", "def __eq__(self, other):\n return (isinstance(other, self.__class__) and\n (self.path == other.path) and\n (self.type == other.type))", "def __eq__(self, other):\n return self.item == other", "def __eq__(self, other):\n return ((self.item_A == other.item_A) and (self.item_B == other.item_B)) or (\n (self.item_A == other.item_B) and (self.item_B == other.item_A)\n )", "def cmp(f1, f2):\n with open(f1) as f1, open(f2) as f2:\n return f1.read() == f2.read()", "def __eq__(self, other):\n if not isinstance(other, Attachment):\n return False\n\n return self.__dict__ == other.__dict__", "def assertObjEquals(self, rhs, lhs, is_saved=False):\n\n if isinstance(lhs, list) and isinstance(rhs, list):\n self.assertEqual(len(lhs), len(rhs))\n for l_folder in lhs:\n r_folder = next(n for n in rhs if n[\"name\"] == l_folder[\"name\"])\n self.assertObjEquals(l_folder, r_folder)\n return\n\n self.assertEqual(rhs[\"type\"], lhs[\"type\"])\n self.assertEqual(rhs[\"name\"], lhs[\"name\"])\n if lhs[\"type\"] == \"folder\":\n if is_saved:\n self.assertDictEqual(rhs['mapping'], lhs['mapping'])\n self.assertEqual(len(lhs[\"children\"]), len(rhs[\"children\"]))\n for child in lhs[\"children\"]:\n r_child = next(c for c in rhs[\"children\"] if c[\"name\"] == child[\"name\"])\n self.assertObjEquals(child, r_child, is_saved)\n else:\n if \"filename\" in rhs or \"filename\" in lhs:\n try:\n self.assertEqual(rhs[\"download_link\"], lhs[\"download_link\"])\n self.assertEqual(rhs[\"filename\"], lhs[\"filename\"])\n except Exception:\n import pdb; pdb.set_trace() # DEBUG\n self.assertEqual(rhs[\"predownload_link\"], lhs[\"predownload_link\"])", "def __eq__(self, other):\n if not isinstance(other, ImageSearchItem):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n\n return self.item_id == other.item_id", "def test_duplicate_equality(self):\r\n def duplicate_and_verify(source_usage_key, parent_usage_key):\r\n usage_key = self._duplicate_item(parent_usage_key, source_usage_key)\r\n self.assertTrue(check_equality(source_usage_key, usage_key), \"Duplicated item differs from original\")\r\n\r\n def check_equality(source_usage_key, duplicate_usage_key):\r\n original_item = self.get_item_from_modulestore(source_usage_key, draft=True)\r\n duplicated_item = self.get_item_from_modulestore(duplicate_usage_key, draft=True)\r\n\r\n self.assertNotEqual(\r\n original_item.location,\r\n duplicated_item.location,\r\n \"Location of duplicate should be different from original\"\r\n )\r\n # Set the location and display name to be the same so we can make sure the rest of the duplicate is equal.\r\n duplicated_item.location = original_item.location\r\n duplicated_item.display_name = original_item.display_name\r\n\r\n # Children will also be duplicated, so for the purposes of testing equality, we will set\r\n # the children to the original after recursively checking the children.\r\n if original_item.has_children:\r\n self.assertEqual(\r\n len(original_item.children),\r\n len(duplicated_item.children),\r\n \"Duplicated item differs in number of children\"\r\n )\r\n for i in xrange(len(original_item.children)):\r\n if not check_equality(original_item.children[i], duplicated_item.children[i]):\r\n return False\r\n duplicated_item.children = original_item.children\r\n\r\n return original_item == duplicated_item\r\n\r\n duplicate_and_verify(self.problem_usage_key, self.seq_usage_key)\r\n duplicate_and_verify(self.html_usage_key, self.seq_usage_key)\r\n duplicate_and_verify(self.seq_usage_key, self.chapter_usage_key)\r\n duplicate_and_verify(self.chapter_usage_key, self.usage_key)", "def __eq__(self, other):\n if not isinstance(other, AdaptivePath):\n return False\n\n return super().__eq__(other)", "def _verify_archive_equality(self, file1, file2):\r\n temp_dir_1 = mkdtemp()\r\n temp_dir_2 = mkdtemp()\r\n try:\r\n extract_source(file1, temp_dir_1)\r\n extract_source(file2, temp_dir_2)\r\n return directories_equal(temp_dir_1, temp_dir_2)\r\n\r\n finally:\r\n shutil.rmtree(temp_dir_1)\r\n shutil.rmtree(temp_dir_2)", "def __eq__(self, other):\n if not isinstance(other, Attachment):\n return False\n\n return self.to_dict() == other.to_dict()", "def __eq__(self, other):\n if not isinstance(other, SharedFileMetadata):\n return False\n\n return self.to_dict() == other.to_dict()", "def __eq__(self, other):\n return isinstance(other, self.__class__) and \\\n self.content == other.content and self.justification == other.justification", "def OnCompareItems(self, item1, item2):\r\n\r\n return cmp(self.GetItemText(item1), self.GetItemText(item2))", "def __compare_files(self, filename1, filename2):\n self.assertTrue(os.path.isfile(filename1))\n self.assertTrue(os.path.isfile(filename2))\n self.assertEqual(os.path.getsize(filename1), os.path.getsize(filename2))\n with open(filename1, \"rb\") as f1:\n with open(filename2, \"rb\") as f2:\n n_blocks = int(self.args.size) // self.max_block_size\n for i in range(n_blocks):\n self.assertEqual(f1.read(self.max_block_size), \\\n f2.read(self.max_block_size))\n remaining = int(self.args.size) % self.max_block_size\n if remaining > 0:\n self.assertEqual(f1.read(remaining), \\\n f2.read(remaining))", "def __eq__(self, other):\n if not isinstance(other, Path):\n return False\n\n return list.__eq__(self, other)", "def __ne__(self, other):\n if not isinstance(other, SharedFileMetadata):\n return True\n\n return self.to_dict() != other.to_dict()", "def test_eq(self):\n\n self.assertEqual(\n description.BaseDescription('/path/to/local'),\n description.BaseDescription('/path/to/local'),\n 'equality between two descriptions'\n )\n\n self.assertNotEqual(\n description.BaseDescription('/path/to/local/a'),\n description.BaseDescription('/path/to/local/b'),\n 'inequality between two descriptions'\n )" ]
[ "0.66765034", "0.6653508", "0.6383707", "0.6308725", "0.6296383", "0.6288879", "0.6228396", "0.62265986", "0.6225754", "0.62123716", "0.6193374", "0.6163143", "0.61564714", "0.6117004", "0.60719264", "0.60421354", "0.60350317", "0.6013134", "0.6004329", "0.5997933", "0.5996311", "0.5979323", "0.5972764", "0.5969368", "0.59681654", "0.59414196", "0.59326106", "0.5909032", "0.5909001", "0.5903749" ]
0.71401227
0
Determines if the specified item should be kept. If this method returns False then the item will be filtered out of the CDS. In this implementation, we compare each item from the CDS against the touch input. We only keep items that do match the input.
def keep_item(self, content_item): return self._content_item_comparison_weak( content_item, self.touch_content_item )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def condition(keep_it, item):\n if keep_it:\n return item\n else:\n return not item", "def _apply_item(self, item: Item) -> bool:\n if self.locked:\n self.__locked = item.item_type != self.__key\n return not self.locked", "def retain_if(self, condition):\n for item in(self.data_):\n if(item !=None):#Ignore space that we haven't filled yet\n if not condition(item):\n self.data_.remove(item)\n self.size_-=1", "def __contains__(self, item: object) -> bool:\n return item in self._used", "def _apply_item(self, item: Item) -> bool:\n return False", "def trim_items(self, items):\r\n\t\tlogger.debug(\"Enter\")\r\n\t\t\r\n\t\tif self.transactions:\r\n\t\t\tall_items = set.union(*[self.transactions[u][-1] for u in self.transactions.keys()])\r\n\t\telse:\r\n\t\t\treturn items\r\n\t\t\t\r\n\t\ttmp = items.copy()\r\n\t\t\r\n\t\tfor i in items:\r\n\t\t\tif i in all_items:\r\n\t\t\t\tlogger.debug(\"Removing %r\" % i)\r\n\t\t\t\ttmp.remove(i)\r\n\t\t\t\t\r\n\t\tlogger.debug(\"Exit\")\r\n\t\treturn tmp", "def keep_item(self, content_item):\n raise NotImplementedError()", "def __contains__(self, item):\n return item in self._data", "def remove(self, item):\n item_found = False\n\n try:\n # Traverse through the array to look for the 'item'\n for i in range(len(self)):\n if self.the_array[i] == item:\n # Move every item after the 'item' found to left in order\n # to remove the 'item'\n for j in range(i, self.count - 1):\n self.the_array[j] = self.the_array[j + 1]\n self.count -= 1\n item_found = True\n\n if (self.capacity // 2 >= self.BASE_SIZE) and (self.count < self.capacity / 8):\n self._resize(self.capacity // 2)\n break\n\n if not item_found:\n raise ValueError\n\n except ValueError:\n print(\"Item not found in list.\")\n\n return item_found", "def filter(self, items, relative=True):\n if relative: items = self.items[items]\n self.items = np.intersect1d(self.items, items)", "def is_final_item(item_id):\n return \"into\" not in items[\"data\"][str(item_id)]", "def __contains__(self, item):\n return self.contains(item)", "def item_filter(item):\n\tcch_geoserver_services = get_only_cch_geoserver_services(item['services'])\n\thas_cch_geoserver_services = 0 != len(cch_geoserver_services)\n\tis_data = 'data' == item['itemType']\n\treturn is_data and has_cch_geoserver_services;", "def __contains__(self, item: object) -> bool:\n val = conv_kv(item) # type: ignore\n for fixup in self._mapping._fixup.values():\n if fixup.value == val:\n return True\n return False", "def delete(self, item):\n is_found, active, node = self._find(item)\n if is_found and active:\n idx = node.items.index(item)\n node.active[idx] = False\n return True\n else:\n return False", "def __contains__(self, item):\n pass", "def Filter(self, info):\n\n add_item = True\n \n if add_item and (self.use_only_ids != None) and (uid not in self.use_only_ids):\n add_item = False\n \n if add_item and (info['latlong'] == None):\n add_item = False\n \n if add_item and (self.size != None) and (info['size'] < self.size):\n add_item = False\n \n if add_item and (self.expert_rank != None) and (info['expert_rank'] < self.expert_rank):\n add_item = False\n \n if add_item and (self.kosher != None) and (self.kosher == True and info['kosher'] == False):\n add_item = False\n \n if add_item and (self.visiting_center != None) and (self.visiting_center == True and info['visiting_center'] == False):\n add_item = False\n \n if add_item and (self.visiting_center_free_admission != None) and (self.visiting_center_free_admission == True and info['visiting_center_free_admission'] == False):\n add_item = False\n \n if add_item and self.visit_time != None:\n day_of_visit = time.strftime(\"%A\", time.localtime(self.visit_time)).lower()\n if info['hours'][day_of_visit] != None:\n closing_at_that_day = time.mktime(time.strptime(time.strftime(\"%A, %d %b %Y\", time.localtime(self.visit_time)) + \" %d:00:00\" % (info['hours'][day_of_visit]), \"%A, %d %b %Y %H:%M:%S\"))\n if self.visit_time > (closing_at_that_day - self._delta_time_before_close):\n add_item = False\n if day_of_visit == 'saturday' and self.kosher == True:\n add_item = False\n \n if add_item and self.use_weather: \n if not self.weather_client.GoodForWinery(self.weather_client.GetCondition(info['latlong'])):\n add_item = False\n \n return add_item", "def _handle_player_collide_item(self, player: Player, item: DroppedItem, data,\n arbiter: pymunk.Arbiter):\n\n if self._inventory.add_item(item.get_item()):\n print(f\"Picked up a {item!r}\")\n self._world.remove_item(item)\n\n return False", "def __contains__(self, item):\n\n if self.is_view:\n return item in self._view\n return item in self._storage", "def __contains__ (self, item):\n return False", "def __contains__(self, item):\n return item in self._fetch()", "def empty_filter(item, *args, **kwargs):\n return True", "def undelete(self, item):\n is_found, active, node = self._find(item)\n if is_found and not active:\n idx = node.items.index(item)\n node.active[idx] = True\n return True\n else:\n return False", "def match(self, item):\n return item == self._expected_item", "def uncheck(self,item):\r\n raise AbstractError\r\n return False", "def is_allergic_to(self, item):\n if item in self.list:\n return True\n else:\n return False", "def drop(self, item: Item) -> bool:\n if item in self.bag:\n self.__bag.remove(item)\n self.room._add_item(item)\n return True\n return False", "def must_skip(self, item):\n user = c.user if c.user_is_loggedin else None\n\n if hasattr(item, \"promoted\") and item.promoted is not None:\n return False\n\n # can_view_slow only exists for Messages, but checking was_comment\n # is also necessary because items may also be comments that are being\n # viewed from the inbox page where their render class is overridden.\n # This check needs to be done before looking at whether they can view\n # the subverbify, or modmail to/from private subverbifys that the user\n # doesn't have access to will be skipped.\n if hasattr(item, 'can_view_slow') and not item.was_comment:\n return not item.can_view_slow()\n\n if hasattr(item, 'subverbify') and not item.subverbify.can_view(user):\n return True", "def Filter(self, name, items):\n self.changed = True\n if name in self.ticker_lists:\n self.ticker_lists[name] = [\n t for t in self.ticker_lists[name] if t not in items]", "def _handle_mob_collide_item(self, mob: Mob, dropped_item: DroppedItem, data,\n arbiter: pymunk.Arbiter) -> bool:\n return False" ]
[ "0.6118802", "0.5894216", "0.5867209", "0.57629687", "0.5717916", "0.5690692", "0.5572477", "0.5494828", "0.54161733", "0.5409092", "0.53812903", "0.5371803", "0.53713757", "0.5323428", "0.53209174", "0.527529", "0.5270618", "0.5258588", "0.5252858", "0.5237473", "0.5220362", "0.5204001", "0.5175396", "0.5172697", "0.51559806", "0.51522803", "0.51481897", "0.514362", "0.5135757", "0.5133372" ]
0.6703609
0
Get marcels for each game
def get_marcels(goalies, date, df): goalies_marcels = [] for goalie in goalies: goalie_marcels = marcels_players(goalie, date, df) goalies_marcels.append({"goalie": goalie, "adj_fsv": goalie_marcels['fsv'], "gp": goalie_marcels['gp']}) return goalies_marcels
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_all_meals():", "def get_all_games():\n games = brain.get_all_games()\n return games", "def get_teams():", "def get_mos_from_aps(self):\n rewards = dict() # saves the reward of each client from each ap\n for ap in self.aps:\n _, data = self.command_ap(ap.name, ap.port, ap.iface, \"/get_mos_client\")\n self.log.debug(\"data for MOS @ {} => {}\".format(ap.name, data))\n rs = self.get_rs(data)\n rewards[ap.id] = rs # save the reward for each client in ap.id\n return rewards", "def get_game_cards(gameId):\n pass", "def get_games():\n feed = feedparser.parse(FEED_URL)\n\n games = []\n for entry in feed['entries']:\n game = Game(entry.title, entry.link)\n games.append(game)\n\n return games", "def get_free_games(self) -> List[Game]:", "def get_games():\r\n feed = feedparser.parse(FEED_URL)\r\n games = []\r\n for entry in feed.entries:\r\n games.append(Game(title = entry['title']\r\n , link = entry['link']\r\n ))\r\n return games", "def get_mos_from_localhost(self):\n rewards = dict() # saves the reward of each client from each ap\n _, data = self.command_ap('localhost', 8080, '', \"/get_mos_client\") # the interface (3rd param) does not matter\n self.log.debug(\"data for MOS @ {} => {}\".format('all', data))\n stations = {'gnu-nb3': ['cloud'],\n 'fenrir': ['storm'],\n }\n for ap in self.aps:\n d = []\n for sta in stations[ap.name]:\n entries = [x[:4] for x in data if x[4] == sta]\n d.extend(entries)\n rs = self.get_rs(d)\n rewards[ap.id] = rs\n return rewards", "async def fill_game_list(self):\n games = await self.fetch_games()\n Game.MAPPING = games", "async def fetch_games(self):\n return await self.http.get_game_list()", "def create_meeples(self, playerid):\n\t\tmeeples = []\n\t\tfor meepleid in range(4):\n\t\t\tmeeples.append(S_Meeple(playerid,\n\t\t\t\t\t\t\t\t\tmeepleid,\n\t\t\t\t\t\t\t\t\tMEEPLESIZE))\n\t\tprint(\"meeples von spieler {} gebaut!\".format(playerid))\n\t\treturn meeples", "def get_games():\n\n return jsonify({\"games\": list(map(make_public_game, games))})", "def returnJointMovers(self):\n\n name = self.groupBox.title()\n\n # select global movers\n cmds.select(name + \"*_mover\")\n globalMovers = cmds.ls(sl=True)\n\n # select offset movers\n cmds.select(name + \"*_mover_offset\")\n offsetMovers = cmds.ls(sl=True)\n\n # mesh movers\n cmds.select(name + \"*_mover_geo\")\n geoMovers = cmds.ls(sl=True)\n\n return [globalMovers, offsetMovers, geoMovers]", "def list_melons():\n melons = model.get_melons()\n return render_template(\"all_melons.html\",\n melon_list = melons)", "def getCubes():", "def get_all_boards():\n return [board for board in GRAPH_DB.find(\"board\")]", "def find_all():\r\n data = store.read().items()\r\n return [Game(id=id, **value) for id,value in data]", "def all_games(self):\r\n return sorted(self.games + list(g for sp in self.sub_pools for g in sp.games) + self.finals,\r\n key=lambda g: (g.datetime, g.pitch.rank))", "def get_all_limbs(body):\n\t#logging.warning('Deprecated: life.get_all_limbs() will be removed in next version.')\n\t\n\treturn body", "def find_meldings(hand):\n\n assert 'game' in hand\n assert 'action_table' in hand\n\n actions = hand['action_table']\n\n chows = [[] for i in range(4)]\n pungs = [[] for i in range(4)]\n kongs = [[] for i in range(4)]\n\n for i, action in enumerate(actions):\n assert len(action) > 1\n index, action_type = action[0], action[1]\n assert index in '1234'\n assert action_type in 'ACDGKNRd'\n index = int(index) - 1\n\n prev_action = actions[i - 1] if i > 0 else None\n\n if action_type == 'C':\n assert prev_action\n chows[index].append(prev_action[2:] + action[2:])\n continue\n elif action_type == 'N':\n assert prev_action\n pungs[index].append(prev_action[2:])\n continue\n elif action_type == 'K':\n # Test if this is extending a melded pung to a kong, or 加槓.\n tile = action[2:]\n if tile in pungs[index]:\n continue\n # Test if this is a concealed kong, or 暗槓.\n if prev_action and prev_action[1] == 'G':\n continue\n # Otherwise, this is a melded kong, or 大明槓.\n assert (not prev_action) or (prev_action[1] in 'dD')\n kongs[index].append(tile)\n continue\n\n hand.update(\n chows=chows,\n pungs=pungs,\n kongs=kongs)", "def generate_mobs(self):\n mobs = []\n z = 0\n while z < 10: # 10 mobs per level for now\n c = 0\n while c == 0:\n x = random.randint(0, self.map.width - 1)\n y = random.randint(0, self.map.height - 1)\n if self.map.map[x][y].blocked == False:\n mobs.append(Monster.Monster(x=x, y=y))\n z += 1\n c += 1\n\n return mobs", "def scrape_mars():\n (news_title, news_p) = scrape_news()\n\n\n mars_data = {\n \"news_title\": news_title,\n \"news_p\": news_p,\n \"jpl_url\": scrape_jpl_images(),\n \"facts_tbl\": scrape_mars_facts(),\n \"weather\": scrape_weather(),\n \"hemi_pct\": scrape_hemispheres(),\n }\n\n\n return mars_data", "def MONSTERS():\n list_of_monster = [\n (\"Elfy the Elf\", \"A small elvin creature with a bow and arrows around its back.\", 2, 1, 1),\n (\"Bazilisk\", \"A long snake with piercing eyes\", 3, 2, 1), \n (\"Shrak the Ogre\", \"A large green ogre with a penchant for eating people\", 4, 2, 2),\n (\"Wanda the Witch\", \"A tall scary looking witch riding a broom\", 6, 3, 2),\n (\"Dragon\", \"A large red dragon with the ability to breathe fire\", 8, 4, 2)]\n return list_of_monster", "def get_ammos(self):\n return self.__ammos", "def molarMasses(self, species = None):\n mm = _cantera.phase_getarray(self._phase_id,22)\n return self.selectSpecies(mm, species)", "def scrape_all_world_cup_games():\n\n def scrape_scores_year(year):\n urls = scrape_world_cup_scoreboard(year)\n scores = [scrape_fifa_game(url, 'FIFA World Cup') for url in urls]\n return scores\n\n l = []\n for year in sorted(world_cup_mapping.keys()):\n l.extend(scrape_scores_year(year))\n return l", "def get_all_games(season):\n url = BASE_URL.format(season)\n json_data = requests.get(url, headers=HEADERS).json()\n all_games = json_data[\"resultSets\"][0][\"rowSet\"]\n return all_games", "def get_meals(v2_response, building_id):\n result_data = v2_response[\"result_data\"]\n meals = []\n day_parts = result_data[\"days\"][0][\"cafes\"][building_id][\"dayparts\"][0]\n for meal in day_parts:\n stations = []\n for station in meal[\"stations\"]:\n items = []\n for item_id in station[\"items\"]:\n item = result_data[\"items\"][item_id]\n new_item = {}\n new_item[\"txtTitle\"] = item[\"label\"]\n new_item[\"txtPrice\"] = \"\"\n new_item[\"txtNutritionInfo\"] = \"\"\n new_item[\"txtDescription\"] = item[\"description\"]\n new_item[\"tblSide\"] = \"\"\n new_item[\"tblFarmToFork\"] = \"\"\n attrs = [{\"description\": item[\"cor_icon\"][attr]} for attr in item[\"cor_icon\"]]\n if len(attrs) == 1:\n new_item[\"tblAttributes\"] = {\"txtAttribute\": attrs[0]}\n elif len(attrs) > 1:\n new_item[\"tblAttributes\"] = {\"txtAttribute\": attrs}\n else:\n new_item[\"tblAttributes\"] = \"\"\n if isinstance(item[\"options\"], list):\n item[\"options\"] = {}\n if \"values\" in item[\"options\"]:\n for side in item[\"options\"][\"values\"]:\n new_item[\"tblSide\"] = {\"txtSideName\": side[\"label\"]}\n items.append(new_item)\n stations.append({\"tblItem\": items, \"txtStationDescription\": station[\"label\"]})\n meals.append({\"tblStation\": stations, \"txtDayPartDescription\": meal[\"label\"]})\n return meals", "def getMyArmies(self):\n r = []\n for army in self.__armies:\n if (army.getOwner() == 1):\n r.append(army)\n return r" ]
[ "0.6226665", "0.5985738", "0.56903183", "0.5650183", "0.5593422", "0.5577678", "0.5503695", "0.5473676", "0.54020166", "0.5368344", "0.53635", "0.53524166", "0.53257835", "0.5325075", "0.5287838", "0.5281853", "0.5265225", "0.5262602", "0.5246494", "0.52396417", "0.52058876", "0.519979", "0.51884395", "0.5185623", "0.5176426", "0.5170208", "0.5169806", "0.5148742", "0.51427513", "0.5119226" ]
0.6752537
0
Loads a store file into a dictionary with keys as tuples (x,y) coords and values as the integer encoding what is there.
def load_store(filename): result = {} # Open file with open(filename, 'r') as file: # Read first character char = file.read(1) while char: # ; defines a new point if char == ";": # The next characters are of the form (x,y,e) char = file.read(1) # left bracket char = file.read(1) # x x = char char = file.read(1) # comma or second digit # This means x is a two digit number if char != ',': # Add the second digit and then cast x += char x = int(x) char = file.read(1) # Now read the comma else: # One digit number so just cast print(char) x = int(x) # Follow a similar process for y and e char = file.read(1) # y y = char char = file.read(1) # comma or second digit if char != ',': y += char y = int(y) char = file.read(1) else: y = int(y) char = file.read(1) # encoded product e = char char = file.read(1) if char != ')': e += char e = int(e) char = file.read(1) else: e = int(e) # Add to the dictionary coords = (x,y) result[(x,y)] = e char = file.read(1) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_from_file(self, filename):\n # clear datastore mape\n self._datastoreMap = {}\n # citanje filea\n with open(filename, 'rb') as f:\n binstr = f.read()\n inMap = pickle.loads(binstr)\n # za svaki kanal moramo dodati element u _datastoreMap\n for kanal in inMap:\n # stvaramo instancu Datastore\n self._datastoreMap[kanal] = DataStore()\n # instanca Datastore zna se otpakirati iz mape (dictionary)\n self._datastoreMap[kanal].dict2store(inMap[kanal])", "def save_pickle(self, filename):\n x, y, _ = self.get_coords_enu()\n cx, cy = self.get_centres_enu()\n coords = dict(x=x, y=y, cx=cx, cy=cy)\n pickle.dump(coords, open(filename, 'wb'))", "def load_store_location_data():\n try:\n store_location_data = []\n with open('store-locations.csv') as csvfile:\n reader = csv.DictReader(csvfile, dialect=csv.excel)\n for row in reader:\n store_location_data.append(dict(OrderedDict(row)))\n\n return store_location_data\n except IOError as error:\n print(\"I/O error({0}): {1}\".format(error.errno, error.strerror))\n return None", "def loadCoordinatesFromDumpFile(self):\n coordinates_dump_file = open(self.COORDINATES_DUMP_FNAME , 'r')\n coordinates = pickle.load(coordinates_dump_file)\n coordinates_dump_file.close()\n return coordinates", "def load_n3d_coords(file_path): \n \n import core.nuc_io as io\n\n seq_pos_dict = {}\n coords_dict = {} \n \n with io.open_file(file_path) as file_obj:\n chromo = None\n \n for line in file_obj:\n \n data = line.split()\n n_items = len(data)\n \n if not n_items:\n continue\n \n elif data[0] == '#':\n continue\n \n elif n_items == 3:\n chromo, n_coords, n_models = data\n \n #if chromo.lower()[:3] == 'chr':\n # chromo = chromo[3:]\n \n if chromo in coords_dict:\n raise Exception('Duplicate chromosome \"%s\" records in file %s' % (chromo, file_path))\n \n n_coords = int(n_coords)\n n_models = int(n_models)\n \n chromo_seq_pos = np.empty(n_coords, int)\n chromo_coords = np.empty((n_models, n_coords, 3), float)\n \n coords_dict[chromo] = chromo_coords\n seq_pos_dict[chromo] = chromo_seq_pos\n \n check = (n_models * 3) + 1\n i = 0\n \n elif not chromo:\n raise Exception('Missing chromosome record in file %s' % file_path)\n \n elif n_items != check:\n msg = 'Data size in file %s does not match Position + Models * Positions * 3'\n raise Exception(msg % file_path)\n \n else:\n chromo_seq_pos[i] = int(data[0])\n \n coord = [float(x) for x in data[1:]]\n coord = np.array(coord).reshape(n_models, 3)\n chromo_coords[:,i] = coord\n i += 1\n \n return seq_pos_dict, coords_dict", "def loadIdMap(self, filename:str) -> None :\n if(not isinstance(filename,str)):\n raise TypeError(\"filename must be a string but %s was passed\"%str(type(filename)))\n if(not os.path.exists(filename) or not os.path.isfile(filename)):\n raise ValueError(\"invalid filename\")\n\n self.idMap = self.ioutil.loadKeysVals(filename, \";\")", "def load(self):\n file = os.path.join(\"./data\", self.name + \".map\")\n with open(file) as fp:\n lines = fp.readlines()\n self.row, self.col = map(int, lines[0].split())\n self.default = int(lines[1]) # デフォルト値\n for line in lines[2:]: # マップデータを読み込む\n line = line.rstrip() # 改行除去\n self.map.append([int(x) for x in list(line)])", "def load_cows(filename):\r\n\r\n cow_dict = dict()\r\n\r\n f = open(filename, 'r')\r\n \r\n for line in f:\r\n line_data = line.split(',')\r\n cow_dict[line_data[0]] = int(line_data[1])\r\n return cow_dict", "def load_id_dict(self) -> None:\n sys.stdout.write(\"Loading identifier dictionaries...\\n\")\n assert os.path.exists(self.mapping_file)\n with open(self.mapping_file, 'r') as f:\n self.forward_map, self.backward_map = json.load(f)\n self.forward_map = {int(k): v for k, v in self.forward_map.items()}", "def filecoords(self):\n coords = sorted(self.map.keys())\n for coord in coords:\n yield coord, self.map[coord]", "def load_cows(filename):\r\n\r\n cow_dict = dict()\r\n\r\n f = open(filename, 'r')\r\n\r\n for line in f:\r\n line_data = line.split(',')\r\n cow_dict[line_data[0]] = int(line_data[1])\r\n return cow_dict", "def load_track_from_dict(self, filename):\n\n # load track data\n track_dict = np.load(\"./static/\" + filename, allow_pickle = True).item()\n\n assert(isinstance(track_dict, dict))\n\n self.points, self.pivots, self.size, self.track_data = track_dict[\"points\"], track_dict[\"pivots\"], \\\n track_dict[\"size\"], track_dict[\"data\"]", "def read_map(self, map_path):\n with open(map_path, mode='rb') as f:\n index_id_map = pickle.load(f)\n return index_id_map", "def load_cows(filename):\n\n cow_dict = dict()\n\n f = open(filename, 'r')\n \n for line in f:\n line_data = line.split(',')\n cow_dict[line_data[0]] = int(line_data[1])\n return cow_dict", "def load_cows(filename):\n\n cow_dict = dict()\n\n f = open(filename, 'r')\n \n for line in f:\n line_data = line.split(',')\n cow_dict[line_data[0]] = int(line_data[1])\n return cow_dict", "def load_file(fpath):\n sick_data = {'X_A': [], 'X_B': [], 'y': []}\n with open(fpath, 'r', encoding='utf-8') as f:\n for line in f:\n text = line.strip().split('\\t')\n sick_data['X_A'].append(text[5].split())\n sick_data['X_B'].append(text[6].split())\n sick_data['y'].append(float(text[4]))\n return sick_data", "def load_map(self, filename):\n with open(filename, 'rb') as file:\n self.current_obstacles = pickle.load(file)\n self.current_goal = pickle.load(file)\n try:\n setstate(pickle.load(file))\n except EOFError:\n print(\"No random state stored\")", "def load_indices(mode='char', words=None, counts=None):\n if os.path.exists(mode+'indices.p'):\n indices = pickle.load(open(mode+'indices.p', 'rb'), encoding='latin1')\n else:\n indices = {}\n i = 0\n for word in counts.keys():\n indices[word] = int(i)\n indices[i] = str(word)\n i += 1\n print(\"i is: \" + str(i))\n print(\"len is: \" + str(len(indices.keys())))\n pickle.dump(indices, open(mode+'indices.p', 'wb'))\n return indices", "def readInstance(self):\n file = open(self.fName, 'r')\n self.genSize = int(file.readline())\n self.data = {}\n for line in file:\n (id, x, y) = line.split()\n self.data[int(id)] = (int(x), int(y))\n file.close()", "def readPointFile(filename):\n pointInfo = {}\n f = open(filename, 'r')\n for data in f.readlines():\n point, info = data.split(\"==\")\n lng, lat = [float(p) for p in point.split(\",\")]\n pointInfo[(lng, lat)] = parseInfoToDict(info)\n f.close()\n\n return pointInfo", "def file_to_dictionary():\n\n return;", "def load_cows(filename:str) -> dict:\n dict_of_cow = {}\n\n with open(filename, 'r') as open_file:\n content = open_file.read()\n \n ls_line = content.split('\\n')\n\n for line in ls_line:\n ls_context = line.split(',')\n dict_of_cow[ls_context[0]] = int(ls_context[1])\n\n return dict_of_cow", "def load_data_map(self):\n with open(\"map/maps.txt\") as maps:\n for x_axis, line in enumerate(maps):\n self.x_axis = x_axis\n self.full_map.insert(x_axis, [])\n for y_axis, case in enumerate(line.strip()):\n self.y_axis = y_axis\n if case == \"D\":\n self.full_map[x_axis].insert(y_axis, \"M\")\n self.user.position = (x_axis, y_axis)\n elif case == \"A\":\n self.full_map[x_axis].insert(y_axis, \"A\")\n elif case == \"_\":\n self.full_map[x_axis].insert(y_axis, \"_\")\n elif case == \"#\":\n self.full_map[x_axis].insert(y_axis, \"#\")", "def load(self, *args, **kwargs):\r\n for store_attr in self.__store_attrs__:\r\n setattr(self, store_attr, {})", "def save_to_file(self, filename):\n outMap = {}\n for kanal in self._datastoreMap:\n # svaki pojedini Datastore se zna zapakirati u mapu (dictionary)\n outMap[kanal] = self._datastoreMap[kanal].store2dict()\n # serialize u binary string\n binstr = pickle.dumps(outMap)\n # zapis u file\n with open(filename, 'wb') as f:\n f.write(binstr)", "def read_scoring_matrix(filename):\n scoring_dict = {}\n scoring_file = urllib2.urlopen(filename)\n ykeys = scoring_file.readline()\n ykeychars = ykeys.split()\n for line in scoring_file.readlines():\n vals = line.split()\n xkey = vals.pop(0)\n scoring_dict[xkey] = {}\n for ykey, val in zip(ykeychars, vals):\n scoring_dict[xkey][ykey] = int(val)\n return scoring_dict", "def build_dict(infile):\n\n coords = {}\n sizes = {}\n\n for line in infile:\n fields = line.split()\n ref_st, ref_end, qry_st, qry_end = map(int, fields[0:4])\n qry_chr, qry_size = fields[14], int(fields[8])\n if qry_chr not in coords:\n coords[qry_chr] = {0:[], 1:[]} # 0=ref; 1=qry\n sizes[qry_chr] = qry_size\n coords[qry_chr][0].append([ref_st, ref_end])\n coords[qry_chr][1].append(sorted([qry_st, qry_end]))\n \n return coords, sizes", "def read_scoring_matrix(filename):\n scoring_dict = {}\n scoring_file = urlopen(filename)\n ykeys = scoring_file.readline()\n ykeys = ykeys.decode('ascii')\n ykeychars = ykeys.split()\n for line in scoring_file.readlines():\n line = line.decode('ascii')\n vals = line.split()\n xkey = vals.pop(0)\n scoring_dict[xkey] = {}\n for ykey, val in zip(ykeychars, vals):\n scoring_dict[xkey][ykey] = int(val)\n return scoring_dict", "def load_geometry(mapfile):\n with open(mapfile, 'r') as f:\n def blank_or_comment(l):\n return l.startswith('#') or len(l) == 0\n lines = [l.strip() for l in f.readlines()]\n lines = [l for l in lines if not blank_or_comment(l)]\n\n def to_ints(seq):\n return [int(x) for x in seq]\n\n def p(raw):\n \"returns a tuple containing ([a,a,a], [b,b,b]) given a raw string\"\n raw = raw.strip()\n if ' ' not in raw:\n return (to_ints(raw.split(',')), None)\n else:\n # print \">>%s<<\" % raw\n a,b = raw.split()\n return (to_ints(a.split(',')), to_ints(b.split(',')))\n\n dat = {} # defaultdict(list)\n for line in lines:\n # print line\n (num, rest) = line.split(' ', 1)\n dat[int(num)] = p(rest.strip())\n\n return dat", "def _encode_and_store_(self, latitude, longitude, ID):\n hash = geohash.encode(latitude=latitude, longitude=longitude)\n self.storage[hash] = ID\n self.points_by_id[ID] = (latitude, longitude)" ]
[ "0.6371407", "0.5922768", "0.57294613", "0.56840926", "0.5636541", "0.5611314", "0.558087", "0.55750114", "0.55637383", "0.55598134", "0.5546833", "0.5530001", "0.5480446", "0.5475486", "0.5475486", "0.5475411", "0.5465203", "0.54610336", "0.54399496", "0.5429055", "0.5410855", "0.5394544", "0.5371463", "0.5347942", "0.53306204", "0.53243285", "0.53120273", "0.5309861", "0.5304241", "0.5301155" ]
0.79950714
0
Visualises the store, colour coded by section
def plot_results(store): plt.figure() c = 0 for i in store.keys(): plt.scatter(i[0], -1*i[1], color=get_colour(store[i])) c += 1 plt.show()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show(self):\n import Helpers\n for p in self.parts:\n color = (p[1][0]*255, p[1][1]*255, p[1][2]*255, 0)\n Helpers.show(p[0], color)", "def visualise(self):\n\n scores, education = self.get_data()\n self.write_data(scores, education)\n\n return True", "def color_key(self, samples=5, display=True):\n if self.colors is None:\n self.select_color_range(samples=samples)\n if self.track is None:\n print(\"tracking color range\")\n self.track = []\n progress = 0\n for l in self.layers:\n img = l.load_image()\n hsv = colors.rgb_to_hsv(img)\n low_hue = self.colors[:, 0].min()\n hi_hue = self.colors[:, 0].max()\n if low_hue < 0:\n hues = np.logical_or(\n hsv[:, :, 0] > 1 + low_hue,\n hsv[:, :, 0] < hi_hue)\n else:\n hues = np.logical_and(\n hsv[:, :, 0] > low_hue,\n hsv[:, :, 0] < hi_hue)\n sats = np.logical_and(\n hsv[:, :, 1] > self.colors[:, 1].min(),\n hsv[:, :, 1] < self.colors[:, 1].max())\n vals = np.logical_and(\n hsv[:, :, 2] > self.colors[:, 2].min(),\n hsv[:, :, 2] < self.colors[:, 2].max())\n mask = np.logical_and(hues, sats, vals)\n track = center_of_mass(mask)\n self.track += [(track[1], track[0])]\n # l.image = None\n progress += 1\n print_progress(progress, len(self.layers))\n if display:\n # plt.ion()\n first = True\n for l, (x, y) in zip(self.layers, self.track):\n # l.load_image()\n if first:\n self.image_fig = plt.imshow(l.image)\n dot = plt.plot(x, y, 'o')\n plt.show()\n else:\n self.image_fig.set_data(l.image)\n dot[0].set_data(x, y)\n plt.draw()\n plt.pause(.001)\n l.image = None\n if first:\n first = False", "def visualize(houses:pd.DataFrame) -> None:\n #price_distribution(houses)\n #prop_types(houses)\n #zip_code(houses)\n #year_built(houses)\n #bed_bath(houses)\n return", "def show(self):\n #print(\" ===== I am in show function ----\")\n if self.brightness > 0.99:\n global myItemTab\n for i in range(len(cfg.myItemTabHandler)):\n pen = QPen(QColor(self.stripTab[i]))\n brush = QBrush(pen.color())\n #brush = QBrush(pen.color().darker(100))\n cfg.myItemTabHandler[i].setPen(pen)\n cfg.myItemTabHandler[i].setBrush(brush)\n\n\n else:\n pass", "def display(self, style):\n self.stl = Style(style, mc_version='1.15.2')\n vae = self.stl.models.vae\n\n vae_data = Tile.vectorize_all(self.stl.info['mc_version'])\n encodings = vae.encoder.predict(vae_data)[0]\n\n tiles = [\n Tile('minecraft:quartz_stairs[half=bottom]', version='1.15.2'),\n Tile('minecraft:birch_stairs[half=bottom]', version='1.15.2'),\n Tile('minecraft:brick_stairs[half=bottom]', version='1.15.2'),\n Tile('minecraft:bricks', version='1.15.2'),\n Tile('minecraft:nether_bricks', version='1.15.2'),\n Tile('minecraft:white_carpet', version='1.15.2'),\n Tile('minecraft:snow[layers=1]', version='1.15.2')\n ]\n\n vae_data = vectorize(tiles, pad_to=vae.input_dim)\n encodings_subset = vae.encoder.predict(vae_data)[0]\n\n import matplotlib.pyplot as plt\n\n fig=plt.figure()\n ax = fig.add_subplot(111)\n ax.scatter(encodings[:,0], encodings[:,1], c=[[.9,.9,.9]], marker='x')\n ax.scatter(encodings_subset[:,0], encodings_subset[:,1], color='r', marker='x')\n for idx, t in enumerate(tiles):\n ax.annotate(t.name, (encodings_subset[idx,0], encodings_subset[idx,1]))\n ax.set_title('Minecraft tile-ok 2D látenstere')\n plt.show()", "def draw(self):\n self.draw_occupied_cells()\n self.draw_open_cells()\n self.draw_edges()\n plt.xlabel(\"Red\")\n plt.ylabel(\"Black\")\n plt.title('Hex')\n self.camera.snap()", "def display(self, color = (190,205,205), add = False):\r\n\t\tpass", "def show_data(self):\n\n self.area_canvas.axes.cla()\n self.draw_scatterplot(self.scatter_canvas, 'x [µm]', 'y [µm]', self.p_inputs['flip y-axis'].isChecked())\n self.draw_hist(self.area_canvas, 'area', 'cluster area [µm²]', 'number of clusters')\n self.draw_hist(self.number_canvas, 'nclusters', 'number of cluster', 'number of regions')\n self.draw_hist(self.density_canvas, 'density', 'cluster density [µm⁻²]', 'number of clusters')\n self.draw_hist(self.percentage_canvas, 'pclustered', 'percentage clustered',\n 'number of regions')\n self.draw_hist(self.ratio_canvas, 'reldensity', 'relative density clusters/background',\n 'number of regions')", "def display(self, color = (190,205,205), add = False): \r\n s += pgl.Shape(pgl.FaceSet( [[0,0,0],[1,0,0],[1,1,0],[0,1,0]], [[0,1,2,3]]) , pgl.Material((0,100,0)))", "def render(self, mode='human', type_of='color'):\n if type_of == 'color':\n plot_color(self.observation, self.servers_mem ,self.services_mem)\n elif type_of=='black':\n plot_black(self.observation, self.servers_mem ,self.services_mem)\n print('\\n----services mem----\\n')\n print('services resource usage: {}\\n'.format(self.services_mem))\n print('servers capacity: {}\\n'.format(self.servers_mem))\n print('\\n----observation----\\n')\n print('service placements: {}\\n'.format(self.observation))", "def visualize(self):\r\n self.aggregator.plot_loss()\r\n self.save_figure()", "def show(self):\n for x in range(0,3):\n for y in range(0,3):\n item = self[x,y]\n print(f\"({x},{y}): {item.id}, {item.cw}\")", "def __display(self,state: dict):\n width = 1+max(len(state[s]) for s in self.__boxes)\n line = '+'.join(['-'*(width*3)]*3)\n for r in self.__rows:\n print(''.join( state[r+c].center(width)+ ('|' if c in '36' else '')\n for c in self.__cols))\n if r in 'CF': print(line)", "def open(self):\n print('palette c_edge heat50 .6')\n print('palette c_vertex heat50 .9')\n print('palette c_sus_range heat10 .6')\n print('palette c_sus heat10 .9')\n print('palette c_inf_range heat85 .6')\n print('palette c_inf heat85 .9')\n print('palette c_wait_sus_range heat30 .6')\n print('palette c_wait_sus heat30 .9')", "def DrawFormatted(varexp, selection, histpars=None ) :\n\n global samples\n\n print 'DrawFormatted histpars ', histpars\n samples.MakeStack(varexp, selection, histpars)\n\n statuslabel = ROOT.TLatex(0.4, 0.8, 'Atlas Internal')\n statuslabel.SetNDC()\n luminosity = ROOT.TLatex(0.4, 0.7, ' 10.0 fb^{-1}')\n luminosity.SetNDC()\n samples.add_decoration(statuslabel)\n samples.add_decoration(luminosity)\n\n samples.DrawCanvas()\n\n statuslabel.Draw()\n luminosity.Draw()", "def render(self, mode='human', type_of='color'):\n if type_of == 'color':\n plot_color(self.observation[0:self.num_of_services],\n self.servers_mem ,self.services_mem)\n elif type_of=='black':\n plot_black(self.observation[0:self.num_of_services],\n self.servers_mem ,self.services_mem)\n print('\\n----services mem----\\n')\n print('services resource usage: {}'.format(self.services_mem))\n print('servers capacity: {}'.format(self.servers_mem))\n print('users services: {}'.format(self.users_services))\n print('\\n----observation----\\n')\n print('services servers: {}'.format(self.observation[0:self.num_of_services]))\n print('users stations: {}'.format(self.observation[self.num_of_services:]))", "def show_tree(self):\n G, vertex_dict = self.tree().graph()\n root = self.tree().root()\n vertical_list = []\n horizontal_list = []\n no_component_list = []\n for i, xi in vertex_dict.items():\n if xi.is_equal(root):\n root_index = i\n if self.is_component(xi):\n if xi.type() == \"II\":\n vertical_list.append(i)\n else:\n horizontal_list.append(i)\n print(i, \": \", xi)\n else:\n no_component_list.append(i)\n vertex_colors = {'red': vertical_list, 'blue': horizontal_list,\n 'grey': no_component_list}\n G.show(vertex_colors=vertex_colors, tree_root=root_index, layout='tree')", "def showSS(self, show):\n\t\tfrom chimera.Sequence import defHelixColor, defStrandColor\n\t\thelixReg = self.getRegion(\"structure helices\", create=1,\n\t\t\t\tfill=(1.0, 1.0, 0.8), outline=defHelixColor)\n\t\tstrandReg = self.getRegion(\"structure strands\", create=1,\n\t\t\t\tfill=(0.8, 1.0, 0.8), outline=defStrandColor)\n\t\thelixReg.shown = show\n\t\tstrandReg.shown = show\n\t\tif not show:\n\t\t\treturn\n\t\thelixReg.clear(makeCB=False) # callback will happen in\n\t\tstrandReg.clear(makeCB=False) # addBlocks below\n\n\t\tassocSeqs = {}\n\t\thelices = []\n\t\tstrands = []\n\t\tfor aseq in self.seqCanvas.mav.associations.values():\n\t\t\tassocSeqs[aseq] = 1\n\t\tfor aseq in assocSeqs.keys():\n\t\t\tinHelix = inStrand = 0\n\t\t\tfor pos in range(len(aseq.ungapped())):\n\t\t\t\tisHelix = isStrand = 0\n\t\t\t\tfor matchMap in aseq.matchMaps.values():\n\t\t\t\t\ttry:\n\t\t\t\t\t\tres = matchMap[pos]\n\t\t\t\t\texcept KeyError:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif res.isHelix:\n\t\t\t\t\t\tisHelix = 1\n\t\t\t\t\telif res.isStrand:\n\t\t\t\t\t\tisStrand = 1\n\t\t\t\tgapped = aseq.ungapped2gapped(pos)\n\t\t\t\tif isHelix:\n\t\t\t\t\tif inHelix:\n\t\t\t\t\t\thelices[-1][-1] = gapped\n\t\t\t\t\telse:\n\t\t\t\t\t\thelices.append([aseq, aseq,\n\t\t\t\t\t\t\t\tgapped, gapped])\n\t\t\t\t\t\tinHelix = 1\n\t\t\t\telse:\n\t\t\t\t\tif inHelix:\n\t\t\t\t\t\tinHelix = 0\n\t\t\t\tif isStrand:\n\t\t\t\t\tif inStrand:\n\t\t\t\t\t\tstrands[-1][-1] = gapped\n\t\t\t\t\telse:\n\t\t\t\t\t\tstrands.append([aseq, aseq,\n\t\t\t\t\t\t\t\tgapped, gapped])\n\t\t\t\t\t\tinStrand = 1\n\t\t\t\telse:\n\t\t\t\t\tif inStrand:\n\t\t\t\t\t\tinStrand = 0\n\t\thelixReg.addBlocks(helices)\n\t\tstrandReg.addBlocks(strands)", "def show(self,canvas): \n for piece in self.bluh:\n piece.render(canvas)\n\n #create vertical and horizontal bold outline\n for i in range(len(self.board)+1):\n x0=300+self.piecesize*i\n y0=100\n x1=300+self.piecesize*i\n y1=900\n canvas.create_line(x0,y0,x1,y1,width=5,fill=self.mode.color1)\n for a in range(len(self.board)+1):\n for i in range(len(self.board)+1):\n x2=300\n y2=100+self.piecesize*i\n x3=1100\n y3=100+self.piecesize*i\n canvas.create_line(x2,y2,x3,y3,width=5,fill=self.mode.color1)\n for piece in self.bluh:\n if piece.isselected==True:\n piece.dropShadow(canvas)\n piece.render(canvas)\n #print(piece.__repr__())", "def display(self, colorArray):\n pass", "def show_vertex_colors():\n if bpy.app.version > (2, 80, 0):\n for area in bpy.context.screen.areas:\n if area.type == 'VIEW_3D':\n for space in area.spaces:\n if space.type == 'VIEW_3D':\n space.shading.type = 'SOLID'\n space.shading.color_type = 'VERTEX'", "def render(self):\r\n super().render()\r\n layers, titles, latVect, lonVect = self.make_layers()\r\n LON, LAT = np.meshgrid(lonVect, latVect)\r\n lon = LON.flatten()\r\n lat = LAT.flatten()\r\n for i in range(len(layers)):\r\n vals = layers[i].flatten()\r\n hovertext = []\r\n for k in range(len(vals)):\r\n hovertext.append('lon: {:.2f}<br>lat: {:.2f}<br>{}: {:.1e}'.format(lon[k], lat[k], self.variable + self.unit,vals[k]))\r\n if self.levels == 0:\r\n data = [\r\n go.Heatmap(\r\n x=lon,\r\n y=lat,\r\n z=vals,\r\n colorscale=self.cmap,\r\n zmin=self.vmin,\r\n zmax=self.vmax,\r\n hoverinfo='text',\r\n text=hovertext \r\n )\r\n ]\r\n elif self.levels > 0:\r\n data = [\r\n go.Contour(\r\n x=lon,\r\n y=lat,\r\n z=vals,\r\n colorscale=self.cmap,\r\n hoverinfo='text',\r\n text=hovertext, \r\n connectgaps=False,\r\n contours=dict(\r\n coloring='heatmap',\r\n showlabels=True,\r\n start=self.vmin,\r\n end=self.vmax,\r\n size=(self.vmax-self.vmin) / float(self.levels)\r\n )\r\n # line=dict(smoothing=0.85) \r\n )\r\n ] \r\n\r\n\r\n layout = go.Layout(\r\n autosize=False,\r\n title=titles[i],\r\n width=self.width,\r\n height=self.height,\r\n xaxis={'title': self.xlabel},\r\n yaxis={'title': self.ylabel}\r\n ) \r\n\r\n\r\n\r\n if self.surface3D:\r\n data = [\r\n go.Surface(\r\n x=lonVect,\r\n y=latVect,\r\n z=layers[i],\r\n colorscale=self.cmap,\r\n # hoverinfo='text',\r\n # text=hovertext \r\n )\r\n ]\r\n\r\n layout = go.Layout(\r\n autosize=False,\r\n title=titles[i],\r\n width=self.width,\r\n height=self.height,\r\n scene = dict(\r\n xaxis={'title': self.xlabel},\r\n yaxis={'title': self.ylabel},\r\n zaxis={'title': self.variable + self.unit}\r\n )\r\n ) \r\n\r\n\r\n self._save_plotly_(go, data, layout)", "def setDisplayShaded():\n for node in nuke.allNodes():\n print node.name()\n goodGeo = [\"Group\", \"ReadGeo\",\"ReadGeo2\",\"Sphere\",\"Cube\",\"Cylinder\",\"Card\", \"Card2\"]\n if node.Class() in goodGeo:\n if node.Class() == \"Group\":\n node.begin()\n for child in nuke.allNodes():\n if child.Class() in goodGeo:\n child['display'].setValue(2)\n node.end()\n else:\n node['display'].setValue(2)", "def plot_colour(self, label):\n label = label.lower()\n pretty_colours = {}\n # SPIce HD\n pretty_colours['544'] = 'maroon'\n pretty_colours['545'] = 'goldenrod'\n pretty_colours['548'] = 'blueviolet'\n pretty_colours['549'] = 'forestgreen'\n # H2\n ## DOM Efficiency Sets\n pretty_colours['551'] = 'cornflowerblue'\n pretty_colours['552'] = 'cornflowerblue'\n pretty_colours['553'] = 'cornflowerblue'\n pretty_colours['554'] = 'mediumseagreen'\n pretty_colours['555'] = 'mediumseagreen'\n pretty_colours['556'] = 'mediumseagreen'\n ## Hole Ice Sets\n pretty_colours['560'] = 'olive'\n pretty_colours['561'] = 'olive'\n pretty_colours['564'] = 'darkorange'\n pretty_colours['565'] = 'darkorange'\n pretty_colours['572'] = 'teal'\n pretty_colours['573'] = 'teal'\n ## Dima Hole Ice Set without RDE\n pretty_colours['570'] = 'mediumvioletred'\n ## Baseline\n pretty_colours['585'] = 'slategrey'\n # Systematics\n pretty_colours['aeff_scale'] = 'maroon'\n pretty_colours['atm_muon_scale'] = 'goldenrod'\n pretty_colours['deltam31'] = 'blueviolet'\n pretty_colours['theta23'] = 'forestgreen'\n pretty_colours['hole_ice_fwd'] = 'mediumvioletred'\n pretty_colours['dom_eff'] = 'cornflowerblue'\n pretty_colours['genie_ma_qe'] = 'mediumseagreen'\n pretty_colours['genie_ma_res'] = 'olive'\n pretty_colours['hole_ice'] = 'darkorange'\n pretty_colours['nue_numu_ratio'] = 'teal'\n pretty_colours['theta13'] = 'fuchsia'\n pretty_colours['barr_nu_nubar'] = 'thistle'\n pretty_colours['barr_uphor'] = 'orchid'\n pretty_colours['delta_index'] = 'navy'\n # Mass ordering\n pretty_colours['no'] = 'r'\n pretty_colours['io'] = 'b'\n # Asimov fits\n pretty_colours['th_to_wh'] = 'darkviolet'\n pretty_colours['wh_to_th'] = 'deepskyblue'\n colourlabel = None\n for colourkey in pretty_colours.keys():\n if (colourkey in label) or (colourkey == label):\n colourlabel = pretty_colours[colourkey]\n if colourlabel is None:\n logging.debug(\"I do not have a colour scheme for your label %s. \"\n \"Returning black.\"%label)\n colourlabel = 'k'\n return colourlabel", "def viz_samples(data, trace, num_sweeps, K, viz_interval=3, figure_size=3, title_fontsize=20, marker_size=1.0, opacity=0.3, bound=20, colors=['#AA3377','#0077BB', '#EE7733', '#009988', '#BBBBBB', '#EE3377', '#DDCC77'], save_name=None):\n E_tau, E_mu, E_z = trace['E_tau'].cpu(), trace['E_mu'].cpu(), trace['E_z'].cpu()\n num_rows = len(data)\n num_cols = 2 + int((num_sweeps-1) / viz_interval)\n gs = gridspec.GridSpec(num_rows, num_cols)\n gs.update(left=0.0 , bottom=0.0, right=1.0, top=1.0, wspace=0, hspace=0)\n fig = plt.figure(figsize=(figure_size * num_cols, figure_size * num_rows))\n for row_ind in range(num_rows):\n ax = fig.add_subplot(gs[row_ind, 0])\n viz_gmm(ax, data[row_ind], K, marker_size, opacity, bound, colors, latents=None) ## visualize raw dataset in the 1st column\n if row_ind == 0:\n ax.set_title('Data', fontsize=title_fontsize)\n# col_ind = 1\n for col_ind in range(num_cols-1):\n sweep = col_ind * viz_interval\n ax = fig.add_subplot(gs[row_ind, col_ind+1])\n viz_gmm(ax, data[row_ind], K, marker_size, opacity, bound, colors, latents=(E_tau[sweep, row_ind], E_mu[sweep, row_ind], E_z[sweep, row_ind]))\n if row_ind == 0:\n if sweep == 0:\n ax.set_title('RWS', fontsize=title_fontsize)\n else:\n ax.set_title('sweep %d' % sweep, fontsize=title_fontsize)\n if save_name is not None:\n plt.savefig(save_name + '.svg', dpi=300)", "def show_splits(chunk, splits):\n assert len(splits) <= 3, f\"only 3 colors supported for now\"\n colors = (255, 0, 0), (0, 255, 0), (0, 0, 255)\n cubes = {c: (255, 255, 255) for c in chunk}\n for color, split in zip(colors, splits):\n for c in split:\n cubes[c] = color\n Doodler(cubes, size=(200,200)).show()", "def __init__(self, colour):\n # TODO: Set up state representation.\n\n self.colour = colour\n # Initial layout\n self.layout = {\n \"whites\": [[1, 0, 1], [1, 1, 1], [1, 3, 1], [1, 4, 1], [1, 6, 1], [1, 7, 1],\n [1, 0, 0], [1, 1, 0], [1, 3, 0], [1, 4, 0], [1, 6, 0], [1, 7, 0]],\n \"blacks\": [[1, 0, 7], [1, 1, 7], [1, 3, 7], [1, 4, 7], [1, 6, 7], [1, 7, 7],\n [1, 0, 6], [1, 1, 6], [1, 3, 6], [1, 4, 6], [1, 6, 6], [1, 7, 6]]\n }", "def main():\n try:\n\n OUTPUTOVERVIEW.write(r'\\documentclass[12pt,a4paper,twocolumn]{article}'+'\\n\\n'\\\n r'\\usepackage[utf8x]{inputenc}'+'\\n'\\\n r'\\usepackage{graphicx}'+'\\n'\\\n r'\\usepackage{tikz}'+'\\n'\\\n r'\\usepackage[left=2.5cm, right=1cm, top=1.5cm, bottom=2cm]{geometry}'+'\\n'\\\n r'\\usepackage{xcolor}'+'\\n'\\\n r'\\usepackage{siunitx}'+'\\n'\\\n r'\\usepackage{titlesec}'+'\\n'\\\n r'\\titleformat{\\section}{\\Large\\scshape}{\\thesection}{1em}{}'+'\\n'\\\n r'\\titlespacing{\\section}{0pt}{12pt plus 4pt minus 2pt}{0pt plus 2pt minus 2pt}'+'\\n'\\\n r'\\setlength{\\parindent}{0pt}'+'\\n'\\\n r'\\usepackage{LatexColors.incl}'+'\\n'\\\n r'\\begin{document}'+'\\n' + '\\n')\n\n startletter = ''\n for strline in COLORLINES[1:]:\n\n if strline.strip():\n # get color name and hex\n colname = colorname(strline)\n\n if startletter != strline[:1]:\n startletter = strline[:1]\n OUTPUTOVERVIEW.write(r'\\section*{' + startletter +'}\\n')\n\n # get RBG\n rcol, gcol, bcol = tuple(int(colname[1][i:i+2], 16) for i in (0, 2, 4))\n\n # \\definecolor{airforceblue}{HTML}{5d8aa8}\n clname = strip_accents(re.sub(BAD_CHARS_NAME, '',\\\n colname[2], 0, re.MULTILINE | re.IGNORECASE)).title()\n\n rcol = rcol/255.\n gcol = gcol/255.\n bcol = bcol/255.\n\n cmyk = convert_rgb_cmyk(rcol, gcol, bcol)\n hsv = convert_rgb_hsv(rcol, gcol, bcol)\n hsl = convert_rgb_hsl(rcol, gcol, bcol)\n\n OUTPUTOVERVIEW.write(r'\\begin{minipage}{\\linewidth}\\tikz[baseline=1mm]\\draw [fill='\\\n + colname[0] + r', rounded corners=5pt] (0,0) rectangle (2cm,1cm); {\\textbf{'\\\n + clname + r'} \\\\ \\scriptsize{'+'RGB: {0:.0f}, {1:.0f}, {2:.0f}'\\\n .format(*tuple(int(colname[1][i:i+2], 16) for i in (0, 2, 4))) + r'; ' + \\\n r'HEX:~\\#' + colname[1] + r'\\\\' + \\\n r'CMYK: \\SI{{{0:.1f}}}{{\\percent}}, \\SI{{{1:.1f}}}{{\\percent}}, '\n r'\\SI{{{2:.1f}}}{{\\percent}}, \\SI{{{3:.1f}}}{{\\percent}}'\\\n .format(cmyk[0]*100, cmyk[1]*100, cmyk[2]*100, cmyk[3]*100) + r' \\\\' + \\\n r'HSV: \\SI{{{0:.0f}}}{{\\degree}}, \\SI{{{1:.1f}}}{{\\percent}}, '\n r'\\SI{{{2:.1f}}}{{\\percent}}'\\\n .format(hsv[0], hsv[1]*100, hsv[2]*100) + r' \\\\' + \\\n r'HSL: \\SI{{{0:.0f}}}{{\\degree}}, \\SI{{{1:.1f}}}{{\\percent}}, '\n r'\\SI{{{2:.1f}}}{{\\percent}}'\\\n .format(hsl[0], hsl[1]*100, hsl[2]*100)\\\n + '}}\\n'\\\n r'\\vspace{.5em}\\end{minipage}' + '\\n')\n\n OUTPUTOVERVIEW.write(r'\\end{document}')\n\n except OSError as err:\n print(\"OS error: {0}\".format(err))\n # except Exception as ex: #comment for pylint 10.0!\n # print(str(ex))\n else:\n print('Overview file written.')\n OUTPUTOVERVIEW.close()", "def visualize(self):\n\n self.check_model()\n show(prepare(self.model, self.vectorized_data, self.vectorizer, mds='tsne'))" ]
[ "0.621113", "0.58131015", "0.57735425", "0.56498563", "0.56281793", "0.5530164", "0.55135596", "0.54813457", "0.5477863", "0.5418798", "0.54141897", "0.53675026", "0.53595763", "0.53586966", "0.52963907", "0.5247792", "0.5236707", "0.52300817", "0.5226265", "0.5215034", "0.52133745", "0.5204823", "0.51960886", "0.5196059", "0.5193508", "0.5162181", "0.51610404", "0.51541054", "0.5145217", "0.5128442" ]
0.6128887
1
map on the list of tensors unpacked from `elems` on dimension 0. The simplest version of `map_fn` repeatedly applies the callable `fn` to a sequence of elements from first to last. The elements are made of the tensors unpacked from `elems`. `dtype` is the data type of the return value of `fn`. Users must provide `dtype` if it is different from the data type of `elems`. Suppose that `elems` is unpacked into `values`, a list of tensors. The shape of the result tensor is `[values.shape[0]] + fn(values[0]).shape`. This method also allows multiarity `elems` and output of `fn`. If `elems` is a (possibly nested) list or tuple of tensors, then each of these tensors must have a matching first (unpack) dimension. The signature of `fn` may match the structure of `elems`. That is, if `elems` is
def map_fn(fn, elems, dtype=None, parallel_iterations=None, back_prop=True, swap_memory=False, infer_shape=True, name=None): if not callable(fn): raise TypeError("fn must be callable.") if isinstance(elems, sparse_tensor.SparseTensor): raise TypeError( "To perform a map on the values of a sparse tensor use either " " SparseTensor(input.indices, fn(input.values), input.dense_shape) or " " SparseTensor(input.indices, map_fn(fn, input.values), " "input.dense_shape)") in_graph_mode = not context.executing_eagerly() # Set the default number of parallel_iterations depending on graph/eager mode. if in_graph_mode and not parallel_iterations: parallel_iterations = 10 elif not in_graph_mode and not parallel_iterations: parallel_iterations = 1 if not in_graph_mode and parallel_iterations > 1: logging.log_first_n(logging.WARN, "Setting parallel_iterations > 1 has no " "effect when executing eagerly. Consider calling map_fn" " with tf.contrib.eager.defun to execute fn in " "parallel.", 1) parallel_iterations = 1 input_is_sequence = nest.is_sequence(elems) input_flatten = lambda x: nest.flatten(x) if input_is_sequence else [x] def input_pack(x): return nest.pack_sequence_as(elems, x) if input_is_sequence else x[0] if dtype is None: output_is_sequence = input_is_sequence output_flatten = input_flatten output_pack = input_pack else: output_is_sequence = nest.is_sequence(dtype) output_flatten = lambda x: nest.flatten(x) if output_is_sequence else [x] def output_pack(x): return (nest.pack_sequence_as(dtype, x) if output_is_sequence else x[0]) elems_flat = input_flatten(elems) with ops.name_scope(name, "map", elems_flat): # TODO(akshayka): Remove the in_graph_mode check once caching devices are # supported in Eager if in_graph_mode: # Any get_variable calls in fn will cache the first call locally # and not issue repeated network I/O requests for each iteration. varscope = vs.get_variable_scope() varscope_caching_device_was_none = False if varscope.caching_device is None: # TODO(ebrevdo): Change to using colocate_with here and in other # methods. varscope.set_caching_device(lambda op: op.device) varscope_caching_device_was_none = True elems_flat = [ ops.convert_to_tensor(elem, name="elem") for elem in elems_flat] dtype = dtype or input_pack([elem.dtype for elem in elems_flat]) dtype_flat = output_flatten(dtype) # Convert elems to tensor array. n may be known statically. static_shape = elems_flat[0].shape if static_shape.ndims is not None and static_shape.ndims < 1: if len(elems_flat) == 1: raise ValueError("elems must be a 1+ dimensional Tensor, not a scalar") else: raise ValueError( "elements in elems must be 1+ dimensional Tensors, not scalars" ) n = (tensor_shape.dimension_value(static_shape[0]) or array_ops.shape(elems_flat[0])[0]) # TensorArrays are always flat elems_ta = [ tensor_array_ops.TensorArray(dtype=elem.dtype, size=n, dynamic_size=False, infer_shape=True) for elem in elems_flat] # Unpack elements elems_ta = [ elem_ta.unstack(elem) for elem_ta, elem in zip(elems_ta, elems_flat)] i = constant_op.constant(0) accs_ta = [ tensor_array_ops.TensorArray(dtype=dt, size=n, dynamic_size=False, infer_shape=infer_shape) for dt in dtype_flat] def compute(i, tas): """The loop body of map_fn. Args: i: the loop counter tas: the flat TensorArray accumulator list Returns: (i + 1, tas): the updated counter + updated TensorArrays Raises: TypeError: if dtype and packed_fn_values structure do not match ValueType: if dtype and packed_fn_values lengths do not match """ packed_values = input_pack([elem_ta.read(i) for elem_ta in elems_ta]) packed_fn_values = fn(packed_values) nest.assert_same_structure(dtype or elems, packed_fn_values) flat_fn_values = output_flatten(packed_fn_values) tas = [ta.write(i, value) for (ta, value) in zip(tas, flat_fn_values)] return (i + 1, tas) _, r_a = control_flow_ops.while_loop( lambda i, _: i < n, compute, (i, accs_ta), parallel_iterations=parallel_iterations, back_prop=back_prop, swap_memory=swap_memory, maximum_iterations=n) results_flat = [r.stack() for r in r_a] n_static = tensor_shape.Dimension(tensor_shape.dimension_value( elems_flat[0].get_shape().with_rank_at_least(1)[0])) for elem in elems_flat[1:]: n_static.merge_with(tensor_shape.Dimension(tensor_shape.dimension_value( elem.get_shape().with_rank_at_least(1)[0]))) for r in results_flat: r.set_shape(tensor_shape.TensorShape(n_static).concatenate( r.get_shape()[1:])) # TODO(akshayka): Remove the in_graph_mode check once caching devices are # supported in Eager if in_graph_mode and varscope_caching_device_was_none: varscope.set_caching_device(None) return output_pack(results_flat)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def map(self, fn, *iterables, **kwargs):\n fn = self._prepare_fn(fn)\n return self._self.map(fn, *iterables, **kwargs)", "def array_map(fn, arrs, n):\n # we shouldn't need a special case for n == 0, but NumPy complains about indexing into a zero-dimensional\n # array a using a[(Ellipsis,)].\n if n == 0:\n return fn(*arrs)\n \n full_shape = tuple(np.array([a.shape[:n] for a in arrs]).max(0))\n result = None\n for full_idx in itertools.product(*map(range, full_shape)):\n inputs = [a[broadcast(full_idx, a.shape[:n]) + (Ellipsis,)] for a in arrs]\n curr = fn(*inputs)\n \n if result is None:\n if type(curr) == tuple:\n result = tuple(np.zeros(full_shape + np.asarray(c).shape) for c in curr)\n else:\n result = np.zeros(full_shape + np.asarray(curr).shape)\n\n if type(curr) == tuple:\n for i, c in enumerate(curr):\n result[i][full_idx + (Ellipsis,)] = c\n else:\n result[full_idx + (Ellipsis,)] = curr\n return result", "def deepmap(func, *seqs):\n if isinstance(seqs[0], (list, Iterator)):\n return [deepmap(func, *items) for items in zip(*seqs)]\n else:\n return func(*seqs)", "def maplist(f, xs):\n return list(map(f, xs))", "def Map(dataset, map_func, input_columns=None):\n return dataset.map(map_func)", "def map_ids(x, indices, map_fn):\n indices = tf.reshape(indices, [-1])\n\n t_i = tf.constant(0)\n # batch_coordinates start at 0\n t_batch_size = tf.reduce_max(indices) + 1\n\n # ta_stack_out will store the intermediate results for each individual id\n # As alternative to tf.TensorArray, scatter_update could potentially be used\n # but that would require an additional mutable tensor.\n ta_stack_out = tf.TensorArray(\n x.dtype,\n size=t_batch_size,\n )\n\n # Then we iterate over each sequence individually and compute the\n # transformation for each id\n while_condition = lambda t_i, *args: tf.less(t_i, t_batch_size)\n\n def body(t_i, ta_stack_out):\n \"\"\"Loop body.\"\"\"\n # Gather the ids\n current_ids = tf.to_int32(tf.where(tf.equal(indices, t_i)))\n t_row = tf.gather_nd(x, indices=current_ids)\n\n # TODO(epot): Should not call map_fn if t_row size is 0\n\n # Apply transformation to each id\n # Restore batch_dim=1 as most function expect [batch_dim, length, ...] as\n # input\n t_row = tf.expand_dims(t_row, axis=0)\n t_row = map_fn(t_row)\n t_row = tf.squeeze(t_row, axis=0) # Squeeze for concatenation\n ta_stack_out = ta_stack_out.write(t_i, t_row)\n\n return [tf.add(t_i, 1), ta_stack_out] # ++i\n\n # Run the loop, equivalent to:\n # stack_out = []\n # while i < batch_size:\n # stack_out.expand(map_fn(x[indices==i]))\n _, ta_stack_out = tf.while_loop(while_condition, body, [t_i, ta_stack_out])\n\n # Merge all results\n return ta_stack_out.concat()", "def map(self, function):\n return FunctionalWrapper(map(function, self.data))", "def map_and_batch_with_legacy_function(map_func,\n batch_size,\n num_parallel_batches=None,\n drop_remainder=False,\n num_parallel_calls=None):\n\n if num_parallel_batches is None and num_parallel_calls is None:\n num_parallel_calls = batch_size\n elif num_parallel_batches is not None and num_parallel_calls is None:\n num_parallel_calls = batch_size * num_parallel_batches\n elif num_parallel_batches is not None and num_parallel_calls is not None:\n raise ValueError(\n \"`map_and_batch_with_legacy_function` allows only one of \"\n \"`num_parallel_batches` and \"\n \"`num_parallel_calls` to be set, but \"\n f\"`num_parallel_batches` was set to {num_parallel_batches} \"\n f\"and `num_parallel_calls` as set to {num_parallel_calls}.\")\n\n def _apply_fn(dataset):\n return _MapAndBatchDataset(dataset, map_func, batch_size,\n num_parallel_calls, drop_remainder,\n use_legacy_function=True)\n\n return _apply_fn", "def map(self, f_list: List[Callable[[np.ndarray], int]], axis: int = 0, chunksize: int = 1000, selection: np.ndarray = None) -> List[np.ndarray]:\n\t\tif hasattr(f_list, '__call__'):\n\t\t\traise ValueError(\"f_list must be a list of functions, not a function itself\")\n\n\t\tresult = []\n\t\tif axis == 0:\n\t\t\trows_per_chunk = chunksize\n\t\t\tfor i in range(len(f_list)):\n\t\t\t\tresult.append(np.zeros(self.shape[0]))\n\t\t\tix = 0\n\t\t\twhile ix < self.shape[0]:\n\t\t\t\trows_per_chunk = min(self.shape[0] - ix, rows_per_chunk)\n\t\t\t\tif selection is not None:\n\t\t\t\t\tchunk = self[ix:ix + rows_per_chunk, :][:, selection]\n\t\t\t\telse:\n\t\t\t\t\tchunk = self[ix:ix + rows_per_chunk, :]\n\t\t\t\tfor i in range(len(f_list)):\n\t\t\t\t\tresult[i][ix:ix + rows_per_chunk] = np.apply_along_axis(f_list[i], 1, chunk)\n\t\t\t\tix = ix + rows_per_chunk\n\t\telif axis == 1:\n\t\t\tcols_per_chunk = chunksize\n\t\t\tfor i in range(len(f_list)):\n\t\t\t\tresult.append(np.zeros(self.shape[1]))\n\t\t\tix = 0\n\t\t\twhile ix < self.shape[1]:\n\t\t\t\tcols_per_chunk = min(self.shape[1] - ix, cols_per_chunk)\n\t\t\t\tif selection is not None:\n\t\t\t\t\tchunk = self[:, ix:ix + cols_per_chunk][selection, :]\n\t\t\t\telse:\n\t\t\t\t\tchunk = self[:, ix:ix + cols_per_chunk]\n\t\t\t\tfor i in range(len(f_list)):\n\t\t\t\t\tresult[i][ix:ix + cols_per_chunk] = np.apply_along_axis(f_list[i], 0, chunk)\n\t\t\t\tix = ix + cols_per_chunk\n\t\treturn result", "def apply_to_tensors(\n fn: Callable, container: Union[torch.Tensor, Dict, List, Tuple, Set]\n) -> Union[torch.Tensor, Dict, List, Tuple, Set]:\n\n def _apply(\n x: Union[torch.Tensor, Dict, List, Tuple, Set]\n ) -> Union[torch.Tensor, Dict, List, Tuple, Set]:\n if torch.is_tensor(x):\n return fn(x)\n elif isinstance(x, OrderedDict):\n od = x.__class__()\n for key, value in x.items():\n od[key] = _apply(value)\n return od\n elif isinstance(x, PackedSequence):\n _apply(x)\n return x\n elif isinstance(x, dict):\n return {key: _apply(value) for key, value in x.items()}\n elif isinstance(x, list):\n return [_apply(x) for x in x]\n elif isinstance(x, tuple):\n return tuple(_apply(x) for x in x)\n elif isinstance(x, set):\n return {_apply(x) for x in x}\n else:\n return x\n\n return _apply(container)", "def map_and_batch(map_func,\n batch_size,\n num_parallel_batches=None,\n drop_remainder=False,\n num_parallel_calls=None):\n\n if num_parallel_batches is None and num_parallel_calls is None:\n num_parallel_calls = batch_size\n elif num_parallel_batches is not None and num_parallel_calls is None:\n num_parallel_calls = batch_size * num_parallel_batches\n elif num_parallel_batches is not None and num_parallel_calls is not None:\n raise ValueError(\n \"`map_and_batch` allows only one of `num_parallel_batches` and \"\n \"`num_parallel_calls` to be set, but \"\n f\"`num_parallel_batches` was set to {num_parallel_batches} \"\n f\"and `num_parallel_calls` as set to {num_parallel_calls}.\")\n\n def _apply_fn(dataset):\n return _MapAndBatchDataset(dataset, map_func, batch_size,\n num_parallel_calls, drop_remainder)\n\n return _apply_fn", "def lmap(f: Callable, *xs) -> list:\n return list(map(f, *xs))", "def make_pbroadcast_function(fn, in_axes, out_axes, out_dtype):\n\n if not isinstance(in_axes, tuple):\n in_axes = (in_axes,)\n\n def pbroadcast_fn(*args):\n nest.assert_shallow_structure(args, in_axes)\n nest.assert_shallow_structure(out_dtype, out_axes)\n map_in_axes = nest.map_structure_up_to(args, canonicalize_axis_name,\n in_axes)\n map_out_axes = nest.map_structure_up_to(out_dtype, canonicalize_axis_name,\n out_axes)\n\n def _pbroadcast_input(out_axes, x, in_axes):\n psum_axes = [\n axis_name for axis_name in out_axes if axis_name not in in_axes\n ]\n return pbroadcast(x, psum_axes)\n\n def _flat_fn_index(i, *args):\n out = fn(*args)\n return tf.nest.flatten(out)[i]\n\n def _flat_fn(*args):\n outputs = []\n for i, out_axis in enumerate(nest.flatten_up_to(out_dtype, map_out_axes)):\n local_args = nest.map_structure_up_to(\n args, functools.partial(_pbroadcast_input, out_axis), args,\n map_in_axes)\n outputs.append(_flat_fn_index(i, *local_args))\n return tf.nest.pack_sequence_as(out_dtype, outputs)\n\n return _flat_fn(*args)\n\n return pbroadcast_fn", "def list_map(data, function):\n return list(map(function, data))", "def map_(func, some_list):\n \n result = []\n \n for arg in some_list:\n result.append(func(arg))\n \n return result", "def mapf(f: Callable[[D_], R_], C: Iterable[D_]) -> Iterator[R_]:\n return (f(x) for x in C)", "def apply_over_rows(func, data, **kwargs):\n\n axis = 1\n cpu_cnt = multiprocessing.cpu_count()\n \n chunks = [(func, axis, split_data, kwargs) for split_data in np.array_split(data, cpu_cnt) if split_data.size > 0]\n\n pool = multiprocessing.Pool()\n map_results = pool.starmap(np_apply_along_axis, chunks)\n \n pool.close()\n pool.join()\n\n return np.concatenate(map_results)", "def map(self, fn, inv_fn):\r\n\t\treturn MapProjectedList(self, [fn], [inv_fn])", "def map(self, func: Callable[[T], V]) -> 'List[V]':\n return [func(v) for v in self.array]", "def map(iterable, function):\n for x in iterable:\n yield function(x)", "def map(function, iterable):\n\n return [function(x) for x in iterable]", "def MapDataList(ea, length, func, wordsize=1):\n PutDataList(ea, map(func, GetDataList(ea, length, wordsize)), wordsize)", "def imap(self, func: Callable[[T], V]) -> '_[V]':\n return _(map(func, self.array))", "def applymap(self, func, *args, **kwargs):\n return DataFrameDefault.register(pandas.DataFrame.applymap)(\n self, func, *args, **kwargs\n )", "def gmap(\n func: Callable,\n *iterables: Iterable,\n mapper: Callable[[Callable, tuple[Iterable]], Iterable] = map,\n evaluator: Callable[[Iterable], Any] = tuple\n):\n return evaluator(mapper(func, *iterables))", "def map(iteratee, *seqs):\n return _map(fnc.iteratee(iteratee), *seqs)", "def flat_map(fn, collection):\n return chain.from_iterable(map(fn, collection))", "def Map(\r\n data,\r\n map_fct: Callable,\r\n info: List[Dict] = None,\r\n lazy: bool = True,\r\n workers: int = 1,\r\n buffer_len: int = 3,\r\n *arg: list,\r\n **kwargs: Dict\r\n) -> Union[MapAbstract, DataAbstract, np.ndarray, list]:\r\n\r\n if lazy:\r\n return MapAbstract(data, map_fct, *arg, info=info, **kwargs)\r\n else:\r\n return DataAbstract(\r\n MapAbstract(data, map_fct, *arg, info=info, **kwargs),\r\n workers=workers,\r\n buffer_len=buffer_len,\r\n )[:]", "def map(self, func, *sequences):\n return self.mapper().map(func, *sequences)", "def map_collection(func, collection):\n datatype = type(collection)\n if isinstance(collection, Mapping):\n return datatype((key, func(val)) for key, val in collection.items())\n if is_string(collection):\n return collection\n elif isinstance(collection, Iterable):\n return datatype(map(func, collection))\n else:\n return collection" ]
[ "0.58948135", "0.57702446", "0.56236166", "0.54255617", "0.542432", "0.54126114", "0.541121", "0.53815204", "0.5381166", "0.53656024", "0.5327522", "0.5287739", "0.5266946", "0.5264301", "0.52466375", "0.5220995", "0.5209663", "0.5201138", "0.5159362", "0.5151307", "0.5148298", "0.51466554", "0.5115447", "0.5104119", "0.50701714", "0.5063366", "0.5052522", "0.50421363", "0.501499", "0.5014603" ]
0.7334713
0
define how many pixels thick the grid hint is
def _grid_hint_size(self) -> int:
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_grid_width(self):\n # replace with your code\n return 0", "def get_grid_height(self):\n # replace with your code\n return 0", "def sizeHint(self):\n return self.minimumSize() * 3", "def get_grid_width(self):\r\n # replace with your code\r\n return self.grid_width", "def get_grid_width(self):\r\n # replace with your code\r\n return self._grid_width", "def get_grid_width(self):\r\n # replace with your code\r\n return self._grid_width", "def get_grid_width(self):\r\n # replace with your code\r\n return self._width", "def plot_insertsize():", "def setwinsize(self, rows, cols):", "def get_grid_width(self):\n # replace with your code\n return self.grid_width", "def get_grid_width(self):\n # replace with your code\n return self.grid_width", "def get_grid_width(self):\n # replace with your code\n return self._grid_width", "def get_grid_width(self):\n # replace with your code\n return self._grid_width", "def get_grid_width(self):\n # replace with your code\n return self._width", "def get_grid_width(self):\n # replace with your code\n return self._width", "def description(self) -> str:\n return f\"Maximize number of {colour_name(self.colour)} unit cells \" \\\n f\"that form a blob by touching sides. \" \\\n f\"Touching corners doesn't count\"", "def get_grid_height(self):\r\n # replace with your code\r\n return self.grid_height", "def configure_grid(self):\r\n\r\n for r in range(3):\r\n self.rowconfigure(r, weight=1)\r\n for c in range(3):\r\n self.columnconfigure(c, weight=1)", "def get_grid_height(self):\r\n # replace with your code\r\n return self._grid_height", "def get_grid_height(self):\r\n # replace with your code\r\n return self._grid_height", "def options(self):\n opt = self.main_window.toplevel()\n cur_l = tkinter.Scale(opt, length=200, label=\"Number of lines:\",\n orient=tkinter.HORIZONTAL, from_=1, to=12,\n command=self.update_nb_rows)\n cur_l.set(self.game.n_row) # initial position of the cursor\n cur_l.pack()\n cur_h = tkinter.Scale(opt, length=200, label=\"Number of columns:\",\n orient=tkinter.HORIZONTAL, from_=1, to=12,\n command=self.update_nb_cols)\n cur_h.set(self.game.n_col)\n cur_h.pack()", "def get_grid_width(self):\r\n return self.width", "def _extra_width(self) -> int:\n width = 0\n if self.box and self.show_edge:\n width += 2\n if self.box:\n width += len(self.columns) - 1\n return width", "def SimpleMeasuredGrid(min_x,min_y,max_x,max_y,x_spacing,y_spacing,\n color=(0.5,1.0,0.5,1.0),xoff=-0.14,yoff=1.04,\n label_type=None,shapes_name=\"Grid\"):\n\n shps=gview.GvShapes(name=shapes_name)\n gview.undo_register( shps )\n shps.add_field('position','string',20)\n\n if os.name == 'nt':\n font=\"-adobe-helvetica-medium-r-*-*-12-*-*-*-*-*-*-*\"\n else:\n #font=\"-adobe-helvetica-medium-r-*-*-12-*-*-*-*-*-*-*\"\n #font=\"-urw-helvetica-medium-r-normal-*-9-*-*-*-p-*-iso8859-2\"\n font=\"-adobe-helvetica-medium-r-normal-*-8-*-*-*-p-*-iso10646-1\"\n #font=\"-misc-fixed-medium-r-*-*-9-*-*-*-*-*-*-*\"\n\n\n # Round to nearest integer space\n max_x=min_x+numpy.floor((max_x-min_x)/x_spacing)*x_spacing\n max_y=min_y+numpy.floor((max_y-min_y)/y_spacing)*y_spacing\n\n lxoff=(max_x-min_x)*xoff # horizontal label placement\n lyoff=(max_y-min_y)*yoff # vertical label placement\n\n for hval in numpy.arange(min_x,\n max_x+x_spacing/100.0,\n x_spacing):\n nshp=gview.GvShape(type=gview.GVSHAPE_LINE)\n nshp.set_node(hval,max_y,0,0)\n nshp.set_node(hval,min_y,0,1)\n shps.append(nshp)\n pshp=gview.GvShape(type=gview.GVSHAPE_POINT)\n pshp.set_node(hval,min_y+lyoff)\n pshp.set_property('position',\"%d\" % int(hval+0.5))\n shps.append(pshp)\n\n for vval in numpy.arange(min_y,\n max_y+y_spacing/100.0,\n y_spacing):\n nshp=gview.GvShape(type=gview.GVSHAPE_LINE)\n nshp.set_node(min_x,vval,0,0)\n nshp.set_node(max_x,vval,0,1)\n shps.append(nshp)\n pshp=gview.GvShape(type=gview.GVSHAPE_POINT)\n pshp.set_node(min_x+lxoff,vval)\n pshp.set_property('position',\"%d\" % int(vval+0.5))\n shps.append(pshp)\n\n cstr=gvogrfs.gv_to_ogr_color(color)\n if len(cstr) < 9:\n cstr=cstr+\"FF\"\n clstr=str(color[0])+' '+str(color[1])+' '+str(color[2])+' '+str(color[3])\n\n layer=gview.GvShapesLayer(shps)\n layer.set_property('_line_color',clstr)\n layer.set_property('_point_color',clstr)\n # Set antialias property so that lines look nice\n # when rotated.\n layer.set_property('_gl_antialias','1')\n layer.set_property('_gv_ogrfs_point',\n 'LABEL(t:{position},f:\"'+font+'\",c:'+cstr+')')\n layer.set_read_only(True) \n\n return layer", "def width(self) -> int:", "def width(self) -> int:", "def get_grid_height(self):\n # replace with your code\n return self.grid_height", "def get_grid_height(self):\n # replace with your code\n return self.grid_height", "def grid_size(self):\n return self._grid_size", "def get_grid_height(self):\n # replace with your code\n return self._grid_height" ]
[ "0.6378773", "0.61242104", "0.6004149", "0.5950054", "0.59119844", "0.59119844", "0.5891236", "0.5891007", "0.5885277", "0.58248097", "0.58248097", "0.57842076", "0.57842076", "0.5780021", "0.5780021", "0.5767355", "0.5745657", "0.56863886", "0.5684655", "0.5684655", "0.56666505", "0.56631917", "0.56378824", "0.5613457", "0.5608461", "0.5608461", "0.56051636", "0.56051636", "0.55994266", "0.55495787" ]
0.8068411
0
Define a mapping from types that are implementations of BaseSpriteLoader to a dictionary that contain a chunk_size and a chunk_map
def _origin_map(self) -> Dict[Type[BaseSpriteLoader], chunk_map_type]:
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _createMap(self):\n width = self.map_size[0] * self.chunk_size\n height = self.map_size[1] * self.chunk_size\n map_array = np.zeros((height, width), dtype=float)\n chunks = {}\n clist = []\n for i in range(0, self.map_size[0]*self.map_size[1]):\n chunks[i+1] = Chunk(self)\n chunk_array = np.asarray(list(chunks.keys()))\n chunk_array.resize(self.map_size[0], self.map_size[1])\n return map_array, chunk_array, chunks", "def createMap(self):\n map = {}\n for rows in xrange(0,(size[1]/50)):\n for columns in xrange(0,(size[0]/50)):\n if rows == (size[1]/50)-1 or rows == 0 or columns== (size[0]/50)-1 or columns==0:\n map.update({(rows,columns):\"block\"})\n elif(rows%3 == 0):\n map.update({(rows,columns):random.choice(map_options)})\n else:\n map.update({(rows,columns):random.choice(map_options[:1])})\n\n self.map = map", "def _load(mapping, **keys):\n return keys[\"loader\"](mapping, **keys)", "def custom_dictionary_from(nMarkers, markerSize, baseDictionary):\n pass", "def initMaps(self):\r\n assert isinstance(self.CLASSES, (list, tuple))\r\n assert self.CLASSES[0] == \"__background__\"\r\n cls = self.CLASSES\r\n self.name_to_id = dict(zip(cls, range(len(cls))))\r\n self.id_to_name = dict(zip(range(len(cls)), cls))", "def config_mapping(self) -> typing.Dict[str, type]:\n return self._subclasses", "def base_type_dict():\n return {'filter' : filters.Filter,\n 'global_options' : global_options.GlobalOptions,\n 'input_device' : input_devices.InputDevice,\n 'input_stream' : input_streams.InputStream,\n 'output_device' : output_devices.OutputDevice,\n 'output_stream' : output_streams.OutputStream}", "def _generate_batch(\n loader_source: Iterator[jnp.ndarray],\n loader_target: Iterator[jnp.ndarray],\n ) -> Dict[str, jnp.ndarray]:\n return {\n \"source\": next(loader_source),\n \"target\": next(loader_target),\n }", "def _map_segments(self, type_: Any) -> Dict:\n mapping: Dict = {}\n for seg in self.segments:\n if seg.name and isinstance(seg, type_):\n if mapping.get(seg.name) and mapping.get(seg.name) != seg:\n raise ValueError(f\"Duplicate segment: {seg.name}\")\n mapping[seg.name] = seg\n return mapping", "def _init_meg_map_dict(bands, length=0):\n\n # Initialize dictionary\n meg_map = dict()\n\n # Add oscillation bands\n for band in bands:\n meg_map[band] = np.zeros(length)\n\n return meg_map", "def Dictionary_create_from(nMarkers, markerSize, baseDictionary):\n pass", "def _load_sprites():\n sprite_list = []\n\n # load all static sprites\n for sprite_filepath in spull.STATIC_CHARTS:\n with open(os.path.normcase(sprite_filepath), \"r\") as sprite_file:\n sprite_list.extend(spull.pull_sprite_list_from_file(\n sprite_file,\n True\n ))\n\n # generate dict of static sprites\n sprite_db = {}\n for sprite_code in sprite_list:\n sprite_obj = StaticSprite(sprite_code)\n\n # immediately quit if invalid\n if sprite_obj.invalid:\n return None\n\n # otherwise add\n sprite_db[sprite_code] = sprite_obj\n\n # make as atl if possible\n atl_sprite = sprite_obj.make_atl()\n if atl_sprite is not None:\n sprite_db[atl_sprite.spcode] = atl_sprite\n\n return sprite_db", "def _make_buffers_maps(self):\n self.buffer_names = ['actions', 'states', 'states_next', 'rewards',\n 'terminals']\n\n self.buffer_types = ['1-dim', 'state_space', 'state_space',\n '1-dim', '1-dim']\n # TODO: make this more general buffer type will cause the image crash\n self.buffer_dtype = [np.int32, np.float32, np.float32,\n np.float32, np.bool]\n\n buffers = {}\n for i, name in enumerate(self.buffer_names):\n if self.buffer_types[i] != '1-dim':\n try:\n buffers[name] = ((eval('self.{}.n'.format(\n self.buffer_types[i])),),\n self.buffer_dtype[i])\n except AttributeError:\n buffers[name] = (eval('self.{}.shape'.format(\n self.buffer_types[i])), self.buffer_dtype[i])\n else:\n buffers[name] = ((1,), self.buffer_dtype[i])\n\n return buffers", "def add_decoder_image_sizes(instance_shape, common_module=common):\n return {\n common_module.TFSE_KEY_IMAGE_HEIGHT:\n tf.io.FixedLenFeature(instance_shape, dtype=tf.int64),\n common_module.TFSE_KEY_IMAGE_WIDTH:\n tf.io.FixedLenFeature(instance_shape, dtype=tf.int64),\n }", "def setup_loaders(labels, meta_dict):\n\n loaders = {}\n loader_kwargs = {}\n\n for k in labels.keys():\n k, l = loader_from_key(k)\n if l is not None:\n loaders[k] = l\n\n meta_loaders = retrieve(meta_dict, \"loaders\", default={})\n meta_loader_kwargs = retrieve(meta_dict, \"loader_kwargs\", default={})\n\n loaders.update(meta_loaders)\n\n for k, l in loaders.items():\n if l in DEFAULT_LOADERS:\n loaders[k] = DEFAULT_LOADERS[l]\n else:\n loaders[k] = get_obj_from_str(l)\n\n if k in meta_loader_kwargs:\n loader_kwargs[k] = meta_loader_kwargs[k]\n else:\n loader_kwargs[k] = {}\n\n return loaders, loader_kwargs", "def load_resource_map():\n # to avoid a circular dependency\n from coinbase_commerce.api_resources.base import APIResource\n global RESOURCE_MAP\n RESOURCE_MAP = {k.RESOURCE_NAME: k for k in APIResource.get_subclasses()\n if getattr(k, \"RESOURCE_NAME\", None)}", "def get_tile_mapping(image_names):\n tile_map = {}\n tile_num = 0\n\n # iterate over all files\n for file_name in image_names:\n with BioReader(file_name) as br:\n \n # iterate over tiles\n for x in range(0,br.X,tile_size):\n x_max = min([br.X,x+tile_size])\n for y in range(0,br.Y, tile_size):\n y_max = min([br.Y,y+tile_size])\n\n # add tile to tile_map\n tile_map[tile_num] = (file_name, (x,x_max), (y,y_max))\n tile_num+=1\n return tile_map", "def base_mappings():\n return {\n 'from_1': {\n 'to_1': {\n 'mol_1': ({}, {}, []),\n 'mol_2': ({}, {}, []),\n },\n },\n }", "def images_mapped(self):\n try:\n return dict([x for x in enumerate(self.images())])\n except:\n return None", "def create_map(width, height, pixels):\n\n\n\n\n def index_to_xy(i, width, height):\n \"\"\" Takes 0 based index going line wise from top\n left to bottom right, returns x, y coordinates so\n that 0,0 is on bottom left corner\n \"\"\"\n x = i % width\n y = i // width\n y*= -1\n y+= height - 1\n return (x,y)\n\n def place_terrain(type, i):\n \"\"\"This won't return anything, just do side effects\n\n The object \"gameLogic\" is used to place the object\n initially. It doesn't matter where this object is,\n as long as it exists. There must be an easier way,\n but this works.\n \"\"\"\n x,y = index_to_xy(i, width, height)\n\n object_name = terrain_types.get(type, \"water\")\n\n if ob[\"fast_create\"] > 0 and not (x%ob[\"fast_create\"] == 0 and y%ob[\"fast_create\"] == 0):\n return\n\n if object_name != \"water\":\n object = scene.addObject(object_name, \"gameLogic\")\n object.worldPosition = (x,y,0)\n\n\n list(map( (lambda tup : place_terrain(tup[1], tup[0])), list(enumerate(pixels)) ))", "def _load_children(self,\n children: Sequence, loader: Callable, *,\n address: metadata.Address, path: Tuple[int, ...],\n resources: Mapping[str, wrappers.MessageType]) -> Mapping:\n # Iterate over the list of children provided and call the\n # applicable loader function on each.\n answer = {}\n for child, i in zip(children, range(0, sys.maxsize)):\n wrapped = loader(child, address=address, path=path + (i,),\n resources=resources)\n answer[wrapped.name] = wrapped\n return answer", "def cache_maps(\n self,\n cache_path: Path,\n map_cache_class: Type[SceneCache],\n map_params: Dict[str, Any],\n ) -> None:\n pass", "def _init_loaders(self):\n @self.loaders_wrapper(\"nx2nx\")\n def get_nx2nx_loader(extractor, stream, transformers, graph):\n \"\"\"\n :param tranformers: List of dicts.\n :extractor: function.\n :param graph: networkx.Graph\n :returns: projx.nx_loader\n \"\"\"\n return loaders.nx2nx_loader(extractor, stream, transformers,\n self.loader_json[self.loader_name],\n graph)\n\n @self.loaders_wrapper(\"neo4j2nx\")\n def get_neo4j2nx_loader(extractor, stream, transformers, graph):\n \"\"\"\n :param tranformers: List of dicts.\n :extractor: function.\n :param graph: networkx.Graph\n :returns: projx.nx_loader\n \"\"\"\n return loaders.neo4j2nx_loader(extractor, stream, transformers,\n self.loader_json[self.loader_name],\n graph)\n\n\n @self.loaders_wrapper(\"neo4j2edgelist\")\n def get_neo4j2edgelist_loader(extractor, stream, transformers, graph):\n \"\"\"\n :param tranformers: List of dicts.\n :extractor: function.\n :param graph: networkx.Graph\n :returns: projx.nx_loader\n \"\"\"\n return loaders.neo4j2edgelist_loader(\n extractor,\n stream,\n transformers,\n self.loader_json[self.loader_name],\n graph\n )\n\n\n @self.loaders_wrapper(\"edgelist2neo4j\")\n def get_edgelist2neo4j_loader(extractor, stream, transformers, graph):\n \"\"\"\n :param tranformers: List of dicts.\n :extractor: function.\n :param graph: networkx.Graph\n :returns: projx.nx_loader\n \"\"\"\n return loaders.edgelist2neo4j_loader(\n extractor,\n stream,\n transformers,\n self.loader_json[self.loader_name],\n graph\n )", "def __init__(self):\n# This is the top container for all data. The gid is the global id (for a image).\n# Before calling convert most of the values are strings. Some additional\n# values are also calculated, see convert() for details. After calling\n# convert, most values are integers or floats where appropriat.\n # set through parser\n self.orientation = None\n self.tileheight = 0\n self.tilewidth = 0\n self.width = 0\n self.height = 0\n self.version = 0\n self.tile_sets = [] # TileSet\n self.layers = [] # WorldTileLayer <- what order? back to front (guessed)\n self.indexed_tiles = {} # {gid: (offsetx, offsety, image}\n self.object_groups = []\n self.properties = {} # {name: value}\n # additional info\n self.pixel_width = 0\n self.pixel_height = 0\n self.named_layers = {} # {name: layer}\n self.named_tile_sets = {} # {name: tile_set}\n self.map_file_name = \"\"\n self._image_loader = None", "def __init__(self):\n# This is the top container for all data. The gid is the global id (for a image).\n# Before calling convert most of the values are strings. Some additional\n# values are also calculated, see convert() for details. After calling\n# convert, most values are integers or floats where appropriat.\n # set through parser\n self.orientation = None\n self.tileheight = 0\n self.tilewidth = 0\n self.width = 0\n self.height = 0\n self.version = 0\n self.tile_sets = [] # TileSet\n self.layers = [] # WorldTileLayer <- what order? back to front (guessed)\n self.indexed_tiles = {} # {gid: (offsetx, offsety, image}\n self.object_groups = []\n self.properties = {} # {name: value}\n # additional info\n self.pixel_width = 0\n self.pixel_height = 0\n self.named_layers = {} # {name: layer}\n self.named_tile_sets = {} # {name: tile_set}\n self.map_file_name = \"\"\n self._image_loader = None", "def _get_type_mapping():\n return {\n Box.SPACE_NAME: Box,\n Dict.SPACE_NAME: Dict,\n Discrete.SPACE_NAME: Discrete\n }", "def load_dict(ranks_to_load = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'valet', 'dame', 'roi'],\n suits_to_load = ['hearts', 'diamonds', 'clubs', 'spades']):\n\n ranks = {}\n suits = {}\n for rank in ranks_to_load:\n img = binarize(skio.imread('../images/groundtruth/ranks/{}.jpg'.format(rank)))\n ranks[rank] = img\n for suit in suits_to_load:\n img = binarize(skio.imread('../images/groundtruth/suits/{}.jpg'.format(suit)))\n suits[suit] = img\n return ranks, suits", "def get_chunks_cache(chunks):\n bin_chunks = [binascii.unhexlify(chunk.encode('utf-8'))\n for chunk in chunks]\n links = LinkageEntity.query.filter(\n LinkageEntity.linkage_hash.in_(bin_chunks)).all()\n links_cache = {link.friendly_hash(): link for link in links}\n\n result = {}\n for chunk in chunks:\n result[chunk] = links_cache.get(chunk, None)\n\n return result", "def init_states(self, batch_size: int) -> NestedMap:\n raise NotImplementedError('Abstract method')", "def factory_type_dict():\n return {'filter' : filters.generate_filter,\n 'global_options' : global_options.generate_global_options,\n 'input_device' : input_devices.generate_input_device,\n 'input_stream' : input_streams.generate_input_stream,\n 'output_device' : output_devices.generate_output_device,\n 'output_stream' : output_streams.generate_output_stream}" ]
[ "0.6178685", "0.57068276", "0.5565248", "0.54777354", "0.5474463", "0.54582816", "0.53941673", "0.5388777", "0.533501", "0.5314602", "0.52988505", "0.5262939", "0.52612567", "0.5257605", "0.52540195", "0.5243595", "0.519796", "0.51598465", "0.51360023", "0.50686884", "0.5067276", "0.50628513", "0.5061579", "0.5060388", "0.5060388", "0.50495315", "0.50459", "0.5040018", "0.5034698", "0.5033047" ]
0.78368694
0
Send a request and return a response. If argument ``cache_key`` is not ``None``, session will check its cache before sending the request. For now, we don't support setting ``cache_key`` in ``request``. ``sticky_key`` is similar to ``cache_key`` except that it refers to an unbounded cache (thus the name "sticky"). If argument ``cache_revalidate`` is evaluated to true, session will revalidate the cache entry. If argument ``circuit_breaker_key`` is not ``None``, it will override the default key (request URL domain name).
async def __call__(self, request, **kwargs): cache_key = kwargs.pop('cache_key', None) sticky_key = kwargs.pop('sticky_key', None) cache_revalidate = kwargs.pop('cache_revalidate', None) if cache_key is not None and sticky_key is not None: raise AssertionError( 'expect at most one: cache_key=%r, sticky_key=%r' % (cache_key, sticky_key) ) if cache_key is not None: return await self._try_cache( self._cache, cache_key, cache_revalidate, request, kwargs, ) if sticky_key is not None: return await self._try_cache( self._unbounded_cache, sticky_key, cache_revalidate, request, kwargs, ) circuit_breaker_key = kwargs.pop('circuit_breaker_key', None) if circuit_breaker_key is None: circuit_breaker_key = urllib.parse.urlparse(request.url).netloc breaker = self._circuit_breakers.get(circuit_breaker_key) for retry_count in itertools.count(): # Check rate limit out of the breaker async-with context to # avoid adding extra delay in the context so that, when the # breaker is in YELLOW state, another request may "go" into # the context as soon as the previous one completes. await self._rate_limit() async with breaker: response, backoff = await self._loop_body( request, kwargs, breaker, retry_count ) if response is not None: return response # Call `sleep` out of the breaker async-with context for the # same reason above. await timers.sleep(ASSERT.not_none(backoff)) ASSERT.unreachable('retry loop should not break')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def send(self, request, **kw):\r\n if request.method == 'GET':\r\n cached_response = self.controller.cached_request(request)\r\n if cached_response:\r\n return self.build_response(request, cached_response, from_cache=True)\r\n\r\n # check for etags and add headers if appropriate\r\n request.headers.update(self.controller.conditional_headers(request))\r\n\r\n resp = super(CacheControlAdapter, self).send(request, **kw)\r\n\r\n return resp", "def make_request(self, key):\n params = {'requestedKey': key}\n res = requests.get(url=self.proxy_url, params=params)\n return res", "def dispatch(self, *args, **kwargs):\n cache_allowed = self.is_cache_allowed()\n logging.debug('%s: caching is %s', self.request.path, 'allowed' if cache_allowed else 'NOT allowed', )\n\n response = None\n cache_hit = False\n if cache_allowed: # get from cache\n response = yield self.get_cached()\n cache_hit = True if response is not None else False\n logging.debug('%s: cache %s', self.request.uri, 'HIT' if cache_hit else 'MISS')\n\n if response is None: # get actual\n response = yield self.proxy_async_request()\n\n if cache_allowed:\n if 200 <= response.code <= 299: # store into cache\n yield self.set_cache(response)\n logging.debug('%s: status %d - stored in cache', self.request.uri, response.code)\n else:\n logging.debug('%s: error status %d', self.request.uri, response.code)\n\n # output proxied response\n self.process_response(response)\n self.finish()\n\n if cache_allowed:\n if cache_hit: # renew cache if cache hit\n yield self.renew_cache(self.proxy_async_request)\n logging.debug('%s: slow endpoint, cache %s', self.request.path, 'updated' if cache_hit else 'NOT updated')", "def _request(self, *arg, **kwarg):\n return self.request_session.request(*arg, **kwarg)", "def send_blocking(self, request, **kwargs):\n LOG.debug('send: %r, kwargs=%r', request, kwargs)\n\n # ``requests.Session.get`` and friends do a little more than\n # ``requests.Session.request``; so let's use the former.\n method = getattr(self._session, request.method.lower())\n\n # ``kwargs`` may overwrite ``request._kwargs``.\n final_kwargs = request._kwargs.copy()\n final_kwargs.update(kwargs)\n\n source = method(request.url, **final_kwargs)\n stream = final_kwargs.get('stream')\n if stream:\n response = source\n else:\n try:\n response = Response(\n source,\n source.content, # Force consuming the content.\n )\n finally:\n source.close()\n\n try:\n response.raise_for_status()\n except Exception:\n # Force consuming the content. In case caller sets\n # stream=True, this ensures that exc.response.content is not\n # empty.\n response.content # pylint: disable=pointless-statement\n # On error, close the original response for the caller since\n # the caller usually forgets to do this.\n response.close()\n raise\n\n return response", "def _request(self, *args):\n self._silent_request(*args)\n return self._get_response()", "def _request(self, method, *args, **kwargs):\n if not \"headers\" in kwargs:\n kwargs[\"headers\"] = self._headers\n return self._session.request(method, self._url(*args), **kwargs)", "def do_cache(*args, **kws):\n resp = self.response\n out = resp.out\n namespace = ''\n if self.cache_nsfuncs.get(func, None):\n namespace = self.cache_nsfuncs[func](self.request)\n p = urlsplit(self.request.url)[2]\n c = memcache.get(p, namespace)\n if c:\n # in case cache is found, use it \n # instead of rendering by calling function.\n out.write(c['body'])\n for k, i in c['hdr'].items():\n resp.headers[k] = i\n return\n\n r = func(*args, **kws)\n expire = self.cache_expires.get(func, 0)\n if expire == 0:\n return\n out.seek(0)\n try:\n p = urlsplit(self.request.url)[2]\n memcache.set(p, {'hdr':resp.headers,'body':out.read()},\n expire, namespace=namespace)\n logging.debug('%s is cahed' % p)\n except:\n memcache.flush_all()\n logging.debug('memcache is flashed.')", "def _request(cls, method, url, request=None, keep_trying=False, *args,\n **kwargs):\n\n stream = kwargs.pop('stream', False)\n prepared_request = requests.Request(\n method, url, *args, **kwargs).prepare()\n\n # prepared_request.headers[\"Content-Type\"] = \"application/json\"\n if request and request.alice_id:\n prepared_request.headers[\"Cookie\"] = \"sessionid={}\".format(\n request.alice_id\n )\n\n url = urlsplit(url)\n path = bytes(url.path, \"utf-8\")\n if url.query:\n path += bytes(\"?{}\".format(url.query), \"utf-8\")\n salt = bytes(settings.UI_SECRET, \"utf-8\")\n\n body = prepared_request.body or b\"\"\n if isinstance(body, str):\n body = bytes(body, \"utf-8\")\n\n signature = sha256(path + body + salt).hexdigest()\n prepared_request.headers[\"X-Signature\"] = signature\n\n response = cls.send_request(prepared_request, keep_trying, stream=stream)\n\n if response.status_code > 299:\n logger.error(\"Rabbit error: {} - {}\".format(\n response.status_code,\n response.content\n ))\n\n if response.status_code == 403:\n raise RabbitException(\n \"\"\" Data server access is failing for {} requests to {}\n with error {}. request: {}. alice_id {}\n \"\"\".format(\n method,\n str(path, \"utf-8\"),\n response.content,\n request,\n request.alice_id if request else None,\n )\n )\n\n return response", "def _send_api_request(self, request, captcha_response=None):\n url = self.API_URL + request.method_name\n\n # Prepare request arguments\n method_kwargs = {'v': self.api_version}\n\n # Shape up the request data\n for values in (request.method_args,):\n method_kwargs.update(stringify_values(values))\n\n if self.is_token_required() or self._service_token:\n # Auth api call if access_token hadn't been gotten earlier\n method_kwargs['access_token'] = self.access_token\n\n if captcha_response:\n method_kwargs['captcha_sid'] = captcha_response['sid']\n method_kwargs['captcha_key'] = captcha_response['key']\n\n http_params = dict(url=url,\n data=method_kwargs,\n **request.http_params)\n logger.debug('send_api_request:http_params: %s', http_params)\n response = self.http_session.post(**http_params)\n return response", "async def get_response(self, key: str) -> Optional[CachedResponse]:\n # Attempt to fetch response from the cache\n logger.debug(f'Attempting to get cached response for key: {key}')\n try:\n if not await self.responses.contains(key):\n key = str(await self.redirects.read(key))\n response = await self.responses.read(key)\n except (KeyError, TypeError):\n logger.debug('No cached response found')\n return None\n if not isinstance(response, CachedResponse):\n logger.debug('Cached response is invalid')\n return None\n # If the item is expired or filtered out, delete it from the cache\n if not self.is_cacheable(response):\n logger.info('Cached response expired; deleting')\n await self.delete(key)\n return None\n\n # Optionally update last_used time\n if self.lru:\n response.last_used = datetime.utcnow()\n await self.responses.write(key, response)\n\n logger.info(f'Cached response found for key: {key}')\n return response", "def update_cached_response(self, request, response):\r\n cache_url = self.cache_url(request.url)\r\n\r\n cached_response = self.serializer.loads(request, self.cache.get(cache_url))\r\n\r\n if not cached_response:\r\n # we didn't have a cached response\r\n return response\r\n\r\n # Lets update our headers with the headers from the new request:\r\n # http://tools.ietf.org/html/draft-ietf-httpbis-p4-conditional-26#section-4.1\r\n #\r\n # The server isn't supposed to send headers that would make\r\n # the cached body invalid. But... just in case, we'll be sure\r\n # to strip out ones we know that might be problmatic due to\r\n # typical assumptions.\r\n excluded_headers = [\r\n \"content-length\",\r\n ]\r\n\r\n cached_response.headers.update(\r\n dict((k, v) for k, v in response.headers.items()\r\n if k.lower() not in excluded_headers)\r\n )\r\n\r\n # we want a 200 b/c we have content via the cache\r\n cached_response.status = 200\r\n\r\n # update our cache\r\n self.cache.set(\r\n cache_url,\r\n self.serializer.dumps(request, cached_response),\r\n )\r\n\r\n return cached_response", "def get_request_session(self, key):\n with self.GLOB_LOCK:\n inst = self._request_sessions.get(key, None)\n if inst is not None:\n inst.touch()\n\n return inst", "def send_request(self, view_name, params=None, expected_response_code=200):\n\n url = reverse(viewname=view_name, kwargs=params)\n\n client = Client()\n response = client.get(path=url, data=params)\n\n self.assertEqual(expected_response_code, response.status_code)\n\n return response", "def send_request(self, request, strip=None, retry=True):\n\n data = request.to_xml()\n\n if self.debug:\n self.log('sending:\\n{0}'.format(data))\n try:\n resp = self._handle.open(self.request_url, data)\n resp_str = resp.read()\n if self.debug:\n self.log('received:\\n{0}'.format(resp_str))\n\n return XGResponse.fromstring(request, resp_str, strip)\n except AuthenticationError as e:\n self._closed = True\n if self.keepalive and retry:\n self.log('Attempting keepalive reconnect')\n if self.login():\n return self.send_request(request, strip, False)\n raise e\n except (urllib2.HTTPError, urllib2.URLError) as e:\n msg = '{0}: {1}'.format(e.__class__.__name__, e)\n self.log(msg)\n raise NetworkError(msg)", "def __req(self, route_or_uri, params, query, op, raw_response, **kwargs):\n from pykern.pkdebug import pkdlog, pkdexc, pkdc, pkdp\n from pykern import pkjson\n from sirepo import uri, util, reply\n\n redirects = kwargs.setdefault(\"__redirects\", 0) + 1\n assert redirects <= 5\n kwargs[\"__redirects\"] = redirects\n\n u = None\n r = None\n try:\n u = uri.server_route(route_or_uri, params, query)\n pkdc(\"uri={}\", u)\n r = op(u)\n pkdc(\n \"status={} data={}\",\n r.status_code,\n \"<snip-file>\" if \"download-data-file\" in u else r.data,\n )\n # Emulate code in sirepo.js to deal with redirects\n if r.status_code == 200 and r.mimetype == \"text/html\":\n m = _JAVASCRIPT_REDIRECT_RE.search(pkcompat.from_bytes(r.data))\n if m:\n if m.group(1).endswith(\"#/error\"):\n raise util.Error(\n PKDict(error=\"server error uri={}\".format(m.group(1))),\n )\n if kwargs.get(\"redirect\", True):\n # Execute the redirect\n return self.__req(\n m.group(1),\n params=None,\n query=None,\n op=self.get,\n raw_response=raw_response,\n __redirects=redirects,\n )\n return r.change_to_redirect(m.group(1))\n if r.status_code in (301, 302, 303, 305, 307, 308):\n if kwargs.get(\"redirect\", True):\n # Execute the redirect\n return self.__req(\n r.headers[\"Location\"],\n params=None,\n query=None,\n op=self.get,\n raw_response=raw_response,\n __redirects=redirects,\n )\n if raw_response:\n return r\n # Treat SRException as a real exception (so we don't ignore them)\n d = pkjson.load_any(r.data)\n if isinstance(d, dict) and d.get(\"state\") == reply.SR_EXCEPTION_STATE:\n raise util.SRException(\n d.srException.routeName,\n d.srException.params,\n )\n return d\n except Exception as e:\n if not isinstance(e, (util.ReplyExc)):\n pkdlog(\n \"Exception: {}: msg={} uri={} status={} data={} stack={}\",\n type(e),\n e,\n u,\n r and r.status_code,\n r and r.data,\n pkdexc(),\n )\n raise", "def build_response(self, request, response, from_cache=False):\r\n if not from_cache and request.method == 'GET':\r\n if response.status == 304:\r\n # We must have sent an ETag request. This could mean\r\n # that we've been expired already or that we simply\r\n # have an etag. In either case, we want to try and\r\n # update the cache if that is the case.\r\n cached_response = self.controller.update_cached_response(\r\n request, response\r\n )\r\n\r\n if cached_response is not response:\r\n from_cache = True\r\n\r\n response = cached_response\r\n else:\r\n # Wrap the response file with a wrapper that will cache the\r\n # response when the stream has been consumed.\r\n response._fp = CallbackFileWrapper(\r\n response._fp,\r\n functools.partial(\r\n self.controller.cache_response,\r\n request,\r\n response,\r\n )\r\n )\r\n\r\n resp = super(CacheControlAdapter, self).build_response(\r\n request, response\r\n )\r\n\r\n # See if we should invalidate the cache.\r\n if request.method in self.invalidating_methods and resp.ok:\r\n cache_url = self.controller.cache_url(request.url)\r\n self.cache.delete(cache_url)\r\n\r\n # Give the request a from_cache attr to let people use it\r\n resp.from_cache = from_cache\r\n\r\n return resp", "def _request(self, request_method, url, *args, **kwargs):\n\n full_url = self.get_full_url(url)\n\n self.logger.info('Calling %s url: %s', request_method, full_url)\n\n request_args = self.get_request_args(kwargs)\n\n request = NapRequest(request_method, full_url, *args, **request_args)\n\n for mw in self.model._meta['middleware']:\n request = mw.handle_request(request)\n\n resource_response = request.send()\n response = NapResponse(\n url=request.url,\n status_code=resource_response.status_code,\n headers=resource_response.headers,\n content=resource_response.content,\n request_method=request_method,\n )\n\n for mw in reversed(self.model._meta['middleware']):\n response = mw.handle_response(request, response)\n\n return response", "def make_request(self, action, data=None):\n response = self.call_opener(action, data)\n if self.login_path in response.geturl():\n # client has be rerouted to login\n login_response = self.login(response)\n response_parse = urllib.parse.urlparse(response.geturl())\n login_parse = urllib.parse.urlparse(login_response.geturl())\n if response_parse[2] == login_parse[2]: \n # still on login page\n return response \n else:\n # retry request once more\n return self.call_opener(action, data)\n else:\n return response", "def request(self, method, url, headers=None, params=None,\n data=None, raw_response=False):\n kwargs = dict(self.requests, **{\n 'headers': headers or {},\n 'params': params or {},\n 'data': data or {},\n })\n\n if 'Content-Type' not in kwargs['headers'] and method in ('post',\n 'put'):\n kwargs['data'] = json.dumps(data)\n kwargs['headers']['Content-Type'] = 'application/json'\n\n if self.impersonate is not None:\n kwargs['headers']['X-Redmine-Switch-User'] = self.impersonate\n\n # We would like to be authenticated by API key by default\n if self.key is not None:\n kwargs['params']['key'] = self.key\n if self.username and self.password:\n kwargs['auth'] = (self.username, self.password)\n if self.auth_cookie:\n kwargs['cookies'] = dict(auth_pubtkt=self.auth_cookie)\n\n response = getattr(requests, method)(url, **kwargs)\n\n if response.status_code in (200, 201):\n if raw_response:\n return response\n elif not response.content.strip():\n return True\n else:\n return response.json()\n elif response.status_code == 401:\n raise AuthError\n elif response.status_code == 404:\n raise ResourceNotFoundError\n elif response.status_code == 409:\n raise ConflictError\n elif response.status_code == 412 and self.impersonate is not None:\n raise ImpersonateError\n elif response.status_code == 413:\n raise RequestEntityTooLargeError\n elif response.status_code == 422:\n raise ValidationError(to_string(', '.join(\n response.json()['errors'])))\n elif response.status_code == 500:\n raise ServerError\n\n raise UnknownError(response.status_code)", "def cache():\n is_conditional = request.headers.get(\"If-Modified-Since\") or request.headers.get(\n \"If-None-Match\"\n )\n\n if is_conditional is None:\n response = view_get()\n response.headers[\"Last-Modified\"] = http_date()\n response.headers[\"ETag\"] = uuid.uuid4().hex\n return response\n else:\n return status_code(304)", "def process_response(self, request, response):\n #if not self._should_update_cache(request, response):\n # # We don't need to update the cache, just return.\n # return response\n\n if response.streaming or response.status_code != 200:\n return response\n \n # Don't cache responses that set a user-specific (and maybe security\n # sensitive) cookie in response to a cookie-less request.\n if not request.COOKIES and response.cookies and has_vary_header(response, 'Cookie'):\n return response\n\n # Try to get the timeout from the \"max-age\" section of the \"Cache-\n # Control\" header before reverting to using the default cache_timeout\n # length.\n timeout = get_max_age(response)\n if timeout == None:\n timeout = self.cache_timeout\n elif timeout == 0:\n # max-age was set to 0, don't bother caching.\n return response\n patch_response_headers(response, timeout)\n if timeout:\n cache_key = \"%s-%s\" % (self.key_prefix, request.get_full_path())\n #raise ValueError(cache_key)\n if hasattr(response, 'render') and isinstance(response.render, collections.Callable):\n response.add_post_render_callback(\n lambda r: cache._cache.set(cache_key.encode(\"utf-8\"), zlib.compress(r.content, 9), timeout)\n )\n else:\n # we use the highest compression level, because since it is cached we hope for it to pay off\n cache._cache.set(cache_key.encode(\"utf-8\"), zlib.compress(response.content, 9), timeout)\n return response", "def __call__(self, client=None, REQUEST={}, RESPONSE=None, **kw):\n if not self._cache_namespace_keys:\n data = self.ZCacheable_get(default=_marker)\n if data is not _marker:\n # Return cached results.\n return data\n\n __traceback_supplement__ = (PathTracebackSupplement, self)\n kw['document_id'] = self.getId()\n kw['document_title'] = self.title\n if hasattr(self, 'aq_explicit'):\n bself = self.aq_explicit\n else:\n bself = self\n\n security = getSecurityManager()\n security.addContext(self)\n\n try:\n if client is None:\n # Called as subtemplate, so don't need error propagation!\n r = HTML.__call__(self, bself, REQUEST, **kw)\n if RESPONSE is None:\n result = r\n else:\n result = decapitate(r, RESPONSE)\n if not self._cache_namespace_keys:\n self.ZCacheable_set(result)\n return result\n\n r = HTML.__call__(self, (client, bself), REQUEST, **kw)\n\n if RESPONSE is None or not isinstance(r, str):\n if not self._cache_namespace_keys:\n self.ZCacheable_set(r)\n return r\n\n finally:\n security.removeContext(self)\n\n have_key = RESPONSE.headers.__contains__\n if not (have_key('content-type') or have_key('Content-Type')):\n if 'content_type' in self.__dict__:\n c = self.content_type\n else:\n encoding = getattr(self, 'encoding', default_encoding)\n c, e = guess_content_type(self.getId(), r.encode(encoding))\n RESPONSE.setHeader('Content-Type', c)\n result = decapitate(r, RESPONSE)\n if not self._cache_namespace_keys:\n self.ZCacheable_set(result)\n return result", "def get_request(self: 'WebScraper', \n url: str, \n params: Union[dict, None] = None\n ) -> req.Response:\n res = None\n while True:\n try: \n res = self.session.get(url, params=params)\n break\n except req.RequestException: \n # If a requests exception is raised, then some kind of HTTP \n # error occured, so wait for one second before re-attempting \n # the request\n time.sleep(1)\n continue\n return res", "def require_idempotency_key(view_func):\n\n @wraps(view_func)\n def wrapped_view(*args, **kwargs):\n # args can contain either (HttpRequest,) or (ViewSet, HttpRequest).\n view_set, request = args if len(args) > 1 else (None, *args)\n\n # If a method in SAFE_METHODS just return a response.\n if request.method in idempotency_settings.SAFE_METHODS:\n return view_func(*args, *kwargs)\n\n # Try to get idempotency key from headers.\n idempotency_key_from_header = request.META.get(idempotency_settings.HEADER)\n if not idempotency_key_from_header:\n return idempotency_settings.BAD_RESPONSE_FUNCTION(\n \"Idempotency key is missing. \"\n \"Generate a unique key and specify it in the header\"\n )\n\n # Generate a hashed cache key.\n key = idempotency_settings.GET_CACHE_KEY_FUNCTION(\n request, idempotency_key_from_header\n )\n # Get hashed value of the request's body.\n request_body_hash = hashlib.sha256(request.body).hexdigest()\n\n # Acquire distributed lock while processing the request.\n mutex = idempotency_settings.LOCK_CLASS()\n with mutex.lock(name=f\"Idempotency_{key}\"):\n # Try to get the cached value.\n storage = idempotency_settings.STORAGE_CLASS()\n value_from_cache = storage.get(key)\n\n if value_from_cache is None:\n response = view_func(*args, **kwargs)\n\n # We need to finalize response for the ViewSet action.\n if view_set is not None:\n response = view_set.finalize_response(request, response)\n\n # Store hash value of request body with the rendered response\n # in the cache only if the response is success.\n if is_success(response.status_code):\n storage.set(\n key,\n (\n request_body_hash,\n response.render()\n if hasattr(response, \"render\")\n else response,\n ),\n )\n return response\n\n # Otherwise, process cached value.\n cached_request_body_hash, cached_response = value_from_cache\n # The current request body hash and cached value are the same.\n if request_body_hash == cached_request_body_hash:\n return cached_response\n # The same idempotency key was used with a different request body.\n return idempotency_settings.BAD_RESPONSE_FUNCTION(\n \"You've already used this idempotency key. \"\n \"Please, repeat the request with another idempotency key.\",\n )\n\n return wrapped_view", "def request_safe(self, url_request_safe, method='GET', headers_request_safe=None, data_request_safe=None, params_request_safe=None, mood_looptry=False, stream_request_safe=False):\n while True:\n if method == 'GET':\n tic = time.time()\n try:\n res_request_safe = self.session.get(\n url_request_safe,\n headers=headers_request_safe,\n stream=stream_request_safe,\n params=params_request_safe,\n timeout=4\n )\n except requests.exceptions.Timeout:\n self.time_request.append(time.time()-tic)\n print('time out request')\n continue\n self.time_request.append(time.time()-tic)\n # status code filter\n if res_request_safe.status_code in [500,560]:\n print(f'{res_request_safe.status_code} status code')\n time.sleep(2)\n continue\n return True, res_request_safe\n elif method == 'POST':\n tic = time.time()\n try:\n res_request_safe = self.session.post(\n url_request_safe,\n headers=headers_request_safe,\n data=data_request_safe,\n stream=stream_request_safe,\n params=params_request_safe\n )\n except requests.exceptions.Timeout:\n self.time_request.append(time.time()-tic)\n print('time out request')\n continue\n self.time_request.append(time.time()-tic)\n # status code filter\n if res_request_safe.status_code in [500]:\n print(f'{res_request_safe.status_code} status code')\n time.sleep(2)\n continue\n return True, res_request_safe", "def request(self, method, url, params=None, json=None, data=None, affinity=None, streaming=False, raw_response=False):\n params = params or {}\n json = json or {}\n data = data or {}\n if self.api_entry_point.endswith('/') and url.startswith('/'):\n url = '{}{}'.format(self.api_entry_point, url[1:])\n else:\n url = '{}{}'.format(self.api_entry_point, url)\n method = method.upper()\n if method not in ['GET', 'POST']:\n raise ValueError(\"method should be in ['GET', 'POST']\")\n\n headers = self.session.headers.copy()\n if affinity is not None:\n headers['HC-WorkerAffinity'] = affinity\n\n if method == 'POST' and streaming:\n # Create new data with encoder\n encoder = MultipartEncoder(fields=data)\n\n multi_data = MultipartEncoderMonitor(encoder, None)\n\n headers['Content-Type'] = multi_data.content_type\n resp = self.session.request(method, url, params=params, json=json, data=multi_data, headers=headers)\n else:\n resp = self.session.request(method, url, params=params, json=json, data=data, headers=headers)\n\n if raw_response:\n return resp\n\n if not resp.ok:\n raise requests.exceptions.HTTPError(\n 'Error while trying to do a {} at {}. Reason is {}\\nResponse content: {}'.format(method, url,\n resp.reason,\n resp.text),\n response=resp.status_code,\n request=url)\n try:\n return resp.json()\n except Exception:\n return resp.content", "def request( key, server, node, netrc=os.getenv('NETRC', os.path.join(os.path.expanduser('~'), '.netrc')), verbose=False ):\n ### format and send the packet\n packet = Packet(server, node, ptype='request', key=key)\n if verbose:\n print( \"%s->%s : %s\"%(server, node, packet.dumps()) )\n send( packet, server, node, netrc, verbose=verbose )", "def _request(self, method, url,\n params=None, data=None, headers=None,\n files=None, save=False, savedir='', timeout=None, cache=None,\n stream=False, auth=None, continuation=True, verify=True,\n allow_redirects=True,\n json=None, return_response_on_save=False):\n\n if cache is None: # Global caching not overridden\n cache = cache_conf.cache_active\n\n if save:\n local_filename = url.split('/')[-1]\n if os.name == 'nt':\n # Windows doesn't allow special characters in filenames like\n # \":\" so replace them with an underscore\n local_filename = local_filename.replace(':', '_')\n\n local_filepath = os.path.join(savedir or self.cache_location or '.', local_filename)\n\n response = self._download_file(url, local_filepath, cache=cache, timeout=timeout,\n continuation=continuation, method=method,\n allow_redirects=allow_redirects,\n auth=auth, params=params, data=data, headers=headers,\n files=files, json=json)\n if return_response_on_save:\n return local_filepath, response\n else:\n return local_filepath\n else:\n query = AstroQuery(method, url, params=params, data=data, headers=headers,\n files=files, timeout=timeout, json=json)\n if not cache:\n with cache_conf.set_temp(\"cache_active\", False):\n response = query.request(self._session, stream=stream,\n auth=auth, verify=verify,\n allow_redirects=allow_redirects,\n json=json)\n else:\n response = query.from_cache(self.cache_location, cache_conf.cache_timeout)\n if not response:\n response = query.request(self._session,\n self.cache_location,\n stream=stream,\n auth=auth,\n allow_redirects=allow_redirects,\n verify=verify,\n json=json)\n to_cache(response, query.request_file(self.cache_location))\n\n self._last_query = query\n return response", "def send_request(request):\n info = evmapy.util.get_app_info()\n client_socket_name = '%s-client.%d.socket' % (info['name'], os.getpid())\n client_socket_path = os.path.join(info['config_dir'], client_socket_name)\n try:\n client_socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)\n client_socket.bind(client_socket_path)\n os.chmod(client_socket_path, stat.S_IRUSR | stat.S_IWUSR)\n request_data = json.dumps(request).encode()\n client_socket.sendto(request_data, _get_control_socket_path())\n if request['wait']:\n (read_fds, _, _) = select.select([client_socket], [], [], 1.0)\n if read_fds:\n data = client_socket.recv(1024)\n return json.loads(data.decode())\n else:\n raise TimeoutError\n finally:\n os.remove(client_socket_path)" ]
[ "0.56710464", "0.5380218", "0.5277406", "0.51162297", "0.49647447", "0.49608576", "0.49380323", "0.4910418", "0.48294824", "0.4757973", "0.46899825", "0.46096906", "0.45851278", "0.45390433", "0.4521895", "0.45152617", "0.450051", "0.44955355", "0.44897163", "0.44563928", "0.44560644", "0.44555423", "0.44095165", "0.44005615", "0.4399897", "0.43967322", "0.43938938", "0.4392424", "0.43869793", "0.4385192" ]
0.60925895
0
Update cookies with a dictlike object.
def update_cookies(self, cookie_dict): requests.cookies.cookiejar_from_dict( cookie_dict, self._session.cookies )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update(self, cookie=None):\n if isinstance(cookie, str):\n self.__udpate_str(cookie)\n if isinstance(cookie, dict):\n self.__update_dict(cookie)\n if isinstance(cookie, requests.cookies.RequestsCookieJar):\n d = cookie.get_dict()\n for key, val in d.items():\n self.__data_dict[key.strip()] = val.strip()", "def __update_dict(self, cookie):\n for key, val in cookie.items():\n val = str(val)\n data = re.findall(\".+?=.+?;\", val.replace('Set-Cookie:', ''))\n store_cookie_str = data[0]\n self.__update_to_data(store_cookie_str)", "def update(self, cookies):\n self._update(cookies, domain=None, path=None)", "def _update_cookies():\n global SESSION\n SESSION.cookies = browsercookie.chrome()", "def _update_cookie(self, encoded_data, response):\n if encoded_data:\n response.set_cookie(\n self.cookie_name,\n encoded_data,\n domain=settings.SESSION_COOKIE_DOMAIN,\n secure=settings.SESSION_COOKIE_SECURE or None,\n httponly=settings.SESSION_COOKIE_HTTPONLY or None,\n samesite=settings.SESSION_COOKIE_SAMESITE,\n )\n else:\n response.delete_cookie(\n self.cookie_name,\n domain=settings.SESSION_COOKIE_DOMAIN,\n samesite=settings.SESSION_COOKIE_SAMESITE,\n )", "def set_cookie(self, cookie):\n c = self._cookies\n if cookie.domain not in c:\n c[cookie.domain] = {}\n c2 = c[cookie.domain]\n if cookie.path not in c2:\n c2[cookie.path] = {}\n c3 = c2[cookie.path]\n c3[cookie.name] = cookie", "def set_cookies(self, cookies_list):\n self.cookies = cookies_list", "def set_cookie( cookies, name, morsel, **kwargs ) :", "async def _set_cookies(self, page: Page, cookies: Union[List[Dict[str, str]], Dict[str, str]]) -> None:\r\n if isinstance(cookies, dict):\r\n await page.setCookie(cookies)\r\n elif isinstance(cookies, (list, tuple, set)):\r\n await asyncio.gather(\r\n *[page.setCookie(cookie) for cookie in cookies])", "def set_cookie( name, value, **kwargs ) :", "def refresh(self):\n\n self.cookies = dict()\n\n cursor = self.conn.cursor()\n query = 'SELECT baseDomain, name from moz_cookies ORDER BY id'\n cursor.execute(query)\n\n for (base_domain, cookie_name) in cursor.fetchall():\n ascii_cookie = cookie_name.encode('ascii', 'ignore')\n ascii_domain = base_domain.encode('ascii', 'ignore')\n\n if not ascii_domain in self.cookies:\n self.cookies[ascii_domain] = list()\n self.cookies[ascii_domain].append(ascii_cookie)", "def reload_cookies(self):\n\n if os.path.exists(self.location_of_cookies):\n with open(self.location_of_cookies, 'rb') as f:\n cookies = pickle.load(f)\n self.load_cookies(cookies, self.cookie_domain)\n \n f.close()", "def _set_request_cookies(self, request):\n response_cookies = request.response.cookies\n for key, cookie in response_cookies.items():\n request.cookies[key] = cookie['value']", "def set_cookie_data(storage, messages, invalid=False, encode_empty=False):\n encoded_data = storage._encode(messages, encode_empty=encode_empty)\n if invalid:\n # Truncate the first character so that the hash is invalid.\n encoded_data = encoded_data[1:]\n storage.request.COOKIES = {CookieStorage.cookie_name: encoded_data}\n if hasattr(storage, '_loaded_data'):\n del storage._loaded_data", "def create_or_update(cls, session, username, password, cookies):\n cookie = session.query(Cookie). \\\n filter(Cookie.username == username).one_or_none()\n\n password = encode_passwd(password)\n\n if cookie:\n cookie.password = password\n cookie.cookie = cookies\n cookie.updated_at = text('NOW()')\n else:\n cookie = Cookie(username=username, password=password, cookie=cookies)\n session.add(cookie)", "def update_session(user):\n\n # Setup/update cookie\n user.cookie = token_urlsafe(64)\n user.cookie_expiration = datetime.now() + timedelta(hours=2)\n\n # Commit\n db.session.add(user)\n db.session.commit()\n\n cookie = user.cookie\n return cookie", "def cookies(self):\r\n return Dict(**self._get_cookies())", "def add_cookies(self, cookies):\n self.base_driver.add_cookie(cookie_dict=cookies)", "def saveCookie(self, resp):\n #save Cookie\n if resp.has_key('set-cookie'):\n self.updateHeaders('Cookie', resp['set-cookie'])\n print '--', 'Save cookie : ', resp['set-cookie']", "def setCookie(self, key, value):\n self.PDFreactorConfiguration.in1[\"cookies\"].append([key, value])", "def setCookieFile(self, cookie):\n if os.path.isfile(cookie):\n jc = jsoncookie.jsoncookie()\n jc.open(cookie)\n self.cookiejar = jc.cookiejar(self.server)\n jc.close()", "def set_cookie(headers, cookie_string):\n headers.append((b'Set-Cookie', isomorphic_encode(cookie_string)))", "def cookies(self):\n return Dict(**self._get_cookies())", "def _update_object(self, data_dict):\r\n pass", "def set_cookies():\n\n cookies = dict(request.args.items())\n r = app.make_response(redirect(url_for(\"view_cookies\")))\n for key, value in cookies.items():\n r.set_cookie(key=key, value=value, secure=secure_cookie())\n\n return r", "def _update_loose (self, dict):\n self.__dict__.update(dict)", "def set_object():\n posted_json = request.get_json(force=True)\n contents = posted_json.get('contents', None)\n if contents is None:\n r = jsonify(message=\"Not all required keys are present\", success=False, status_code=400)\n r.status_code = 400\n return r\n\n key = cache_utils.set(contents)\n return jsonify(key=key, success=True)", "def cookie_from_dict(dct):\n if 'name' not in dct or 'value' not in dct:\n raise TypeError('Cookie dictionary must contain name and value')\n\n cookie_kwargs = _cookie_attrs.copy()\n cookie_kwargs['rest'] = {}\n\n extra_args = set(dct) - set(cookie_kwargs)\n if extra_args:\n err = 'Unexpected keys in Cookie dictionary: {}'\n raise TypeError(err.format(sorted(extra_args)))\n\n cookie_kwargs.update(dct)\n for key, func in _bool_attrs:\n cookie_kwargs[key] = func(cookie_kwargs)\n\n return _Cookie(**cookie_kwargs)", "def update_auth_data(self, auth_data: AuthData) -> None:\n self.auth_data.update(auth_data)\n if \"refresh_id\" in self.auth_data:\n self.set_cookie(COOKIE_NAME, self.auth_data[\"refresh_id\"])\n if self.on_auth_data_changed:\n self.on_auth_data_changed(self.auth_data)", "def syncrepl_set_cookie(self, cookie):\n pass" ]
[ "0.7328599", "0.7113552", "0.7019901", "0.6602599", "0.6172265", "0.6155734", "0.6105894", "0.6006707", "0.5999476", "0.59140986", "0.5844728", "0.57702464", "0.5757035", "0.56695026", "0.56473345", "0.56406015", "0.5631936", "0.5561294", "0.55334646", "0.551439", "0.5495705", "0.54826754", "0.54634166", "0.54068816", "0.5383761", "0.53551775", "0.53230613", "0.53139377", "0.5286238", "0.52837855" ]
0.7592125
0
Send an HTTP request and return a response. If argument ``priority`` is not ``None``, the request is sent with priority (this requires ``PriorityExecutor``). For now, we do not support setting ``priority`` in ``request``.
async def send(self, request, **kwargs): priority = kwargs.pop('priority', None) if priority is None: future = self._executor.submit( self.send_blocking, request, **kwargs ) else: LOG.debug( 'send: priority=%r, %r, kwargs=%r', priority, request, kwargs ) future = self._executor.submit_with_priority( priority, self.send_blocking, request, **kwargs ) future.set_finalizer(lambda response: response.close()) return await adapters.FutureAdapter(future).get_result()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _send_request(self, http_request, **kwargs):\n # type: (HttpRequest, Any) -> HttpResponse\n http_request.url = self._client.format_url(http_request.url)\n stream = kwargs.pop(\"stream\", True)\n pipeline_response = self._client._pipeline.run(http_request, stream=stream, **kwargs)\n return pipeline_response.http_response", "def send_blocking(self, request, **kwargs):\n LOG.debug('send: %r, kwargs=%r', request, kwargs)\n\n # ``requests.Session.get`` and friends do a little more than\n # ``requests.Session.request``; so let's use the former.\n method = getattr(self._session, request.method.lower())\n\n # ``kwargs`` may overwrite ``request._kwargs``.\n final_kwargs = request._kwargs.copy()\n final_kwargs.update(kwargs)\n\n source = method(request.url, **final_kwargs)\n stream = final_kwargs.get('stream')\n if stream:\n response = source\n else:\n try:\n response = Response(\n source,\n source.content, # Force consuming the content.\n )\n finally:\n source.close()\n\n try:\n response.raise_for_status()\n except Exception:\n # Force consuming the content. In case caller sets\n # stream=True, this ensures that exc.response.content is not\n # empty.\n response.content # pylint: disable=pointless-statement\n # On error, close the original response for the caller since\n # the caller usually forgets to do this.\n response.close()\n raise\n\n return response", "def request(self, host, handler, request_body, verbose):\n headers = {'User-Agent': self.user_agent,\n 'Content-Type': 'text/xml',\n }\n url = self._build_url(host, handler)\n kwargs = {}\n if StrictVersion(requests.__version__) >= StrictVersion('0.8.8'):\n kwargs['verify'] = True\n else:\n if self.use_https:\n warnings.warn(\n 'using https transport but no certificate '\n 'verification. (Hint: upgrade requests package.)')\n try:\n resp = requests.post(url, data=request_body, headers=headers,\n **kwargs)\n except ValueError:\n raise\n except Exception:\n raise # something went wrong\n else:\n try:\n resp.raise_for_status()\n except requests.RequestException as e:\n raise xmlrpc.ProtocolError(\n url, resp.status_code, str(e), resp.headers)\n else:\n return self.parse_response(resp)", "def _request(self, *args):\n self._silent_request(*args)\n return self._get_response()", "def request(self, *args, **kwargs):\n try:\n return self._http.request(*args, timeout=TIMEOUT, **kwargs)\n except Exception as exc:\n raise RequestException(exc, args, kwargs)", "def __call__(self, request, *args):\n http_method = request.method.lower()\n if http_method == 'head':\n http_method = 'get'\n try:\n handler_method = getattr(self, http_method)\n except:\n raise HTTPMethodNotAllowed()\n response = handler_method(request, *args)\n if isinstance(response, str):\n response = HTTPResponse(response)\n if request.method.lower() == 'head':\n response = HTTPResponse('', response.status, response.content_type, response.charset, response.headerlist)\n return response", "def _send_http_request(self, resource, method, data=None, params=None, headers=None):\n\n url = '/'.join((self.https_url, resource))\n\n response = self._session.request(\n url=url,\n method=method,\n data=data,\n params=params,\n headers=headers,\n proxies=self._proxies)\n response.raise_for_status()\n\n return response", "def _send_request(self):\n url = self.config['url']\n agent = Agent(reactor)\n response = (yield agent.request(\n 'GET',\n url.encode(\"ASCII\"),\n ))\n\n d = defer.Deferred()\n response.deliverBody(ReceiveBody(d))\n defer.returnValue((yield d))", "def _perform_http_request(self, url, data=None, headers=(), timeout=7.0):\n if self._is_first_request:\n self._is_first_request = False\n self._handle_first_request()\n\n if data is not None:\n if isinstance(data, dict) or isinstance(data, list):\n data = urlencoder.urlencode(data)\n else:\n raise RouterFetchError(\n 'POST data should be a dict, a list or None!'\n )\n\n try:\n req = requestor.Request(url, data)\n for header, value in headers:\n req.add_header(header, value)\n with contextlib.closing(requestor.urlopen(req, timeout=timeout)) as handle:\n self._is_logged_in = True\n return (\n handle.geturl(),\n handle.info(),\n handle.read().decode('utf-8', 'ignore')\n )\n except Exception as e:\n raise RouterFetchError('Failed making request: %s' % repr(e))", "def __call__(self, **parameters):\n request = self._build_request(**parameters)\n\n return self.requestor.request(**request)", "def http_request(self) -> 'outputs.HttpRequestResponse':\n return pulumi.get(self, \"http_request\")", "def send(self, request: Request, **requests_kwargs) -> Response:", "def do_request(self, request_wrapper):\n assert isinstance(request_wrapper, RequestWrapper)\n logger.debug('Request to %s (thread %s)', request_wrapper.url,\n threading.current_thread().name)\n start_time = time.time()\n try:\n response = request_wrapper.session.request(\n *request_wrapper.args, **request_wrapper.kwargs)\n except Exception as exc:\n logger.error('Request exception: %s\\n%s',\n exc, '\\n'.join(traceback.format_tb(exc.__traceback__)))\n with self.lock:\n self.failure_count += 1\n response = exc\n else:\n request_time = time.time() - start_time\n logger.debug('Response from %s in %.3f s. (thread %s)', request_wrapper.url,\n request_time, threading.current_thread().name)\n\n with self.lock:\n self.request_count += 1\n self.total_request_time += request_time\n\n response_wrapper = ResponseWrapper(response=response, request_wrapper=request_wrapper)\n self.response_queue.put(response_wrapper)", "def request(self, *args, **kwargs):\n response = super().request(*args, **kwargs)\n\n # Log request headers\n for header, value in response.request.headers.items():\n LOGGER.debug(\"REQUEST %s: %s\", header, value)\n\n # Raise HTTP errors as exceptions\n try:\n response.raise_for_status()\n except requests.HTTPError as http_error:\n LOGGER.error(http_error)\n LOGGER.error(http_error.response.json())\n raise\n\n # Log response headers\n for header, value in response.headers.items():\n LOGGER.debug(\"RESPONSE %s: %s\", header, value)\n\n return response", "def _request(self, request_method, url, *args, **kwargs):\n\n full_url = self.get_full_url(url)\n\n self.logger.info('Calling %s url: %s', request_method, full_url)\n\n request_args = self.get_request_args(kwargs)\n\n request = NapRequest(request_method, full_url, *args, **request_args)\n\n for mw in self.model._meta['middleware']:\n request = mw.handle_request(request)\n\n resource_response = request.send()\n response = NapResponse(\n url=request.url,\n status_code=resource_response.status_code,\n headers=resource_response.headers,\n content=resource_response.content,\n request_method=request_method,\n )\n\n for mw in reversed(self.model._meta['middleware']):\n response = mw.handle_response(request, response)\n\n return response", "def make_HTTP_request(self, method, url, body, headers, callback=None):\r\n self.push_HTTP_request(method, url, body, headers, callback)\r\n self.pop_response()", "def _api_request(*args, **kwargs):\n response = requests.request(*args, **kwargs)\n return APIResponse(response)", "def _send_request(self, method='post', headers=None, json=None):\n response = getattr(requests, method)(self.url, headers=headers, json=json)\n return response", "def __call__(self, request):\n response = self.get_request(request)\n return response", "def send_post(url, data, headers, return_output=False):\n req = requests.post(url=url, data=json.dumps(data), headers=headers)\n if return_output:\n return req\n if str(req.status_code).startswith('2'):\n print 'SUCCESS! {0} {1} {2}'.format(req.status_code, req.reason, req.content)\n else:\n print 'FAIL! {0} {1} {2}'.format(req.status_code, req.reason, req.content)\n exit(77)", "def send_request(self, path, body=None, params=None):\n alist = None\n if params:\n pairs = ' '.join('(%s . %s)' % (lisp_string(k), lisp_string(v))\n for k, v in six.iteritems(params))\n alist = \"'(%s)\" % pairs\n\n uri = self.uri(path)\n\n return self.conn.evalInServer('''\n (prog1 (net.aserve.client:do-http-request \n {uri} \n :method {method}\n :query {query}\n :headers '((:authorization . \"test {key}\"))\n :content-type \"text/plain\"\n :content {body}))\n '''.format(uri=lisp_string(uri),\n query=alist or 'nil',\n key=self.key,\n method=':post' if body else ':get',\n body=lisp_string(body) if body else 'nil'))", "def post(self, *args, **kwargs):\n # Add some task debug information.\n headers = []\n for key, value in self.request.headers.items():\n k = key.lower()\n if k.startswith(\"x-appengine-\") and k not in self._SKIP_HEADERS:\n headers.append(\"%s:%s\" % (key, value))\n logging.debug(\", \".join(headers))\n\n # Make sure all modules are loaded\n if WARMUP_MODULE:\n importlib.import_module(WARMUP_MODULE)\n\n # Make sure we are called from the Task Queue (security)\n if isFromTaskQueue(self.request):\n try:\n _run(self.request.body)\n except deferred.SingularTaskFailure as e:\n msg = \"Failure executing task, task retry forced\"\n if e.message:\n msg += \": %s\" % e.message\n logging.debug(msg)\n self.response.set_status(408)\n except deferred.PermanentTaskFailure:\n logging.exception(\"Permanent failure attempting to execute task\")\n\n else:\n logging.critical('Detected an attempted XSRF attack: we are not executing from a task queue.')\n self.response.set_status(403)", "def _external_request(self, method, url, *args, **kwargs):\n self.last_url = url\n if url in self.responses.keys() and method == 'get':\n return self.responses[url] # return from cache if its there\n\n headers = kwargs.pop('headers', None)\n custom = {'User-Agent': useragent}\n if headers:\n headers.update(custom)\n kwargs['headers'] = headers\n else:\n kwargs['headers'] = custom\n\n response = getattr(requests, method)(url, *args, **kwargs)\n\n if self.verbose:\n print(\"Got Response: %s\" % url)\n\n if response.status_code == 503:\n raise SkipThisService(\"Service returned 503 - Temporarily out of service.\")\n\n if method == 'get':\n self.responses[url] = response # cache for later\n\n self.last_raw_response = response\n return response", "def _send_response(self, request):\n request_line, headers = split_http_request(request)\n if DEBUG_LEVEL > 1:\n print \"Request: {}\\nHeaders: {}\".format(request_line, headers)\n\n request = HTTPRequest.HTTPRequest(request_line, headers, DEBUG_LEVEL)\n\n uri = request.get_uri_with_no_params()\n uri = uri[1:] if uri[0] == \"/\" else uri\n\n if uri in server_functions.AVAILABLE_FUNCTIONS.keys():\n response, flag = server_functions.\\\n AVAILABLE_FUNCTIONS[uri](request.get_params())\n self._client.send(response.build_response())\n return flag\n\n result = self._check_status_errors(request)\n if result == -1:\n return False\n elif result == 1:\n return True\n\n full_file_path = self._get_full_path(request)\n\n requested_file = open(full_file_path, \"r\")\n data = requested_file.read()\n requested_file.close()\n\n headers = HTTPHeaders.HTTPHeaders()\n public_response_functions.add_default_headers(headers)\n headers[\"Content-Length\"] = str(len(data))\n\n response = HTTPResponse.HTTPResponse(version=1.0, status_code=200,\n phrase=\"OK\", headers=headers)\n self._client.send(response.build_response() + data)\n return True", "def http_request(self, method, path, data=None, params=None):\n\n s = Session()\n url = urljoin(self.BASE_URL, path)\n full_url = url\n try:\n full_url = full_url + \"?\" + urlencode(params)\n except:\n pass\n\n headers = self.request_headers(method, full_url)\n\n req = Request(\n method,\n url,\n headers=headers,\n data=data,\n params=params\n )\n prepped = req.prepare()\n resp = s.send(prepped, timeout=self.timeout)\n if resp.status_code == 429:\n raise errors.APIRateLimitError(\"Threat Stack API rate limit exceeded\")\n else:\n return self.handle_response(resp)", "def _request(self, *args, **kwargs):\n request = self._make_request(*args, **kwargs)\n\n return self._collect_request(request)", "def respond(self, request):\n self.prepare(request)\n try:\n self.process(request)\n return self.get_response(request)\n finally:\n self.finalize()", "def send_request(self, method, scheme, host, *path, **kwargs):\n port = kwargs.pop('port', None)\n netloc = host if port is None else '{}:{}'.format(host, port)\n target = '{}://{}/{}'.format(scheme, netloc,\n '/'.join(parse.quote(str(s), safe='')\n for s in path))\n if 'headers' in kwargs:\n headers = self.headers.copy()\n headers.update(kwargs.pop('headers'))\n kwargs['headers'] = headers\n else:\n kwargs['headers'] = self.headers\n\n request = httpclient.HTTPRequest(target, method=method, **kwargs)\n self.logger.debug('sending %s %s', request.method, request.url)\n\n future = concurrent.TracebackFuture()\n\n def handle_response(f):\n try:\n future.set_result(f.result())\n except httpclient.HTTPError as error:\n future.set_exception(HTTPError.from_tornado_error(request,\n error))\n except Exception as exception:\n future.set_exception(exception)\n\n coro = self.client.fetch(request)\n self.client.io_loop.add_future(coro, handle_response)\n\n return future", "def call(self):\n\n self.url = self._prepare_url()\n status_code, response = self._do_request(self.url)\n return self._process_response(status_code, response)", "def _request(self, method, url, params=None, data=None, request_type=PRIVATE, headers={}):\n self._is_valid_request_option(request_type=request_type)\n\n request_headers = copy.deepcopy(self.BASE_HEADERS)\n request_headers.update(headers)\n\n response = getattr(requests, method.lower())(\n url,\n headers=request_headers,\n params=params,\n data=data\n )\n\n return self._handle_response(response)" ]
[ "0.55242413", "0.54918885", "0.545943", "0.54478884", "0.52762944", "0.51643455", "0.5151465", "0.505821", "0.5055705", "0.50430596", "0.50257236", "0.5018931", "0.5013865", "0.49931225", "0.49770147", "0.4931497", "0.49172196", "0.48725277", "0.4831139", "0.48177412", "0.47971368", "0.4785099", "0.47850564", "0.47786838", "0.47779578", "0.47705963", "0.47606584", "0.4737845", "0.47354558", "0.47258195" ]
0.7391536
0
Send a request in a blocking manner. If ``stream`` is set to true, we will return the original response object, and will NOT copythenclose it to our response class. In this case, the caller is responsible for closing the response object. This does not implement rate limit nor retry.
def send_blocking(self, request, **kwargs): LOG.debug('send: %r, kwargs=%r', request, kwargs) # ``requests.Session.get`` and friends do a little more than # ``requests.Session.request``; so let's use the former. method = getattr(self._session, request.method.lower()) # ``kwargs`` may overwrite ``request._kwargs``. final_kwargs = request._kwargs.copy() final_kwargs.update(kwargs) source = method(request.url, **final_kwargs) stream = final_kwargs.get('stream') if stream: response = source else: try: response = Response( source, source.content, # Force consuming the content. ) finally: source.close() try: response.raise_for_status() except Exception: # Force consuming the content. In case caller sets # stream=True, this ensures that exc.response.content is not # empty. response.content # pylint: disable=pointless-statement # On error, close the original response for the caller since # the caller usually forgets to do this. response.close() raise return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None):\n if self.rate_limit_handler:\n self.rate_limit_handler.pre_send(request)\n\n try:\n response = super().send(request, stream, timeout, verify, cert, proxies)\n except exceptions.RetryError:\n # store current retries configuration\n max_retries = self.max_retries\n\n # temporarily disable retries and make one last request\n self.max_retries = Retry(0, read=False)\n\n # make request with max_retries turned off\n response = super().send(request, stream, timeout, verify, cert, proxies)\n\n # reset retries configuration\n self.max_retries = max_retries\n\n if self.rate_limit_handler:\n self.rate_limit_handler.post_send(response)\n\n return response", "def send(\n self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None\n ):\n retries = self.max_retries\n\n try:\n while not retries.is_exhausted():\n try:\n response = self._curl_send(\n request,\n stream=stream,\n timeout=timeout,\n verify=verify,\n cert=cert,\n proxies=proxies,\n )\n\n return response\n\n except RequestException as error:\n retries = retries.increment(\n method=request.method, url=request.url, error=error\n )\n retries.sleep()\n\n except MaxRetryError as retry_error:\n raise retry_error.reason", "def _send_request(self, http_request, **kwargs):\n # type: (HttpRequest, Any) -> HttpResponse\n http_request.url = self._client.format_url(http_request.url)\n stream = kwargs.pop(\"stream\", True)\n pipeline_response = self._client._pipeline.run(http_request, stream=stream, **kwargs)\n return pipeline_response.http_response", "def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None):\n\n conn = self.get_connection(request.url, proxies)\n\n self.cert_verify(conn, request.url, verify, cert)\n url = self.request_url(request, proxies)\n\n try:\n if hasattr(conn, 'proxy_pool'):\n conn = conn.proxy_pool\n\n low_conn = conn._get_conn(timeout=timeout)\n low_conn.putrequest(request.method, url, skip_accept_encoding=True)\n\n for header, value in request.headers.items():\n low_conn.putheader(header, value)\n\n low_conn.endheaders()\n\n for i in request.body:\n low_conn.send(i)\n\n r = low_conn.getresponse()\n resp = HTTPResponse.from_httplib(r,\n pool=conn,\n connection=low_conn,\n preload_content=False,\n decode_content=False\n )\n\n except socket.error as sockerr:\n raise ConnectionError(sockerr)\n\n except MaxRetryError as e:\n raise ConnectionError(e)\n\n except (_SSLError, _HTTPError) as e:\n if isinstance(e, _SSLError):\n raise SSLError(e)\n elif isinstance(e, TimeoutError):\n raise Timeout(e)\n else:\n raise Timeout('Request timed out.')\n\n r = self.build_response(request, resp)\n\n if not stream:\n r.content\n\n return r", "def _curl_send(\n self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None\n ):\n try:\n curl_connection = self._get_curl_connection(request.url, proxies)\n curl_request = CURLRequest(\n request, timeout=timeout, cert=cert, verify=verify\n )\n\n response = curl_connection.send(curl_request)\n\n return response.to_requests_response()\n\n except pycurl.error as curl_error:\n requests_exception = translate_curl_exception(curl_error)\n raise requests_exception(\"CURL error {0}\".format(curl_error.args))", "def StreamExecute(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()", "def send(self, request, stream=False, cert=None, verify=True, proxies=None,\n timeout=None, **kwargs):\n proxy = select_proxy(request.url, proxies)\n if proxy:\n proxy = prepend_scheme_if_needed(proxy, 'http')\n\n parsed = urlparse(request.url)\n conn = self.get_connection(\n parsed.hostname,\n parsed.port,\n parsed.scheme,\n cert=cert,\n verify=verify,\n proxy=proxy,\n timeout=timeout)\n\n # Build the selector.\n selector = parsed.path\n selector += '?' + parsed.query if parsed.query else ''\n selector += '#' + parsed.fragment if parsed.fragment else ''\n\n conn.request(\n request.method,\n selector,\n request.body,\n request.headers\n )\n resp = conn.get_response()\n\n r = self.build_response(request, resp)\n\n if not stream:\n r.content\n\n return r", "def _stream_request(self,\n endpoint: str = \"/api/\",\n params: object = {}) -> bytes:\n ret: dict = {}\n if not self.api_key:\n ret[\"error\"] = \"API key is empty\"\n raise APIError(ret['error'])\n try:\n with requests.post(\n f\"{self.apibase}{endpoint}\",\n json=params,\n headers=self.headers,\n verify=self.verify_ssl,\n stream=True,\n ) as r:\n check_status_code(request=r, debug=self.debug, ret=ret)\n for chunk in r.iter_lines():\n # skip keep-alive chunks\n if chunk:\n yield chunk\n except:\n ret[\"error\"] = f\"Unexpected Stream error\"\n raise APIError(ret['error'])", "def _request(cls, method, url, request=None, keep_trying=False, *args,\n **kwargs):\n\n stream = kwargs.pop('stream', False)\n prepared_request = requests.Request(\n method, url, *args, **kwargs).prepare()\n\n # prepared_request.headers[\"Content-Type\"] = \"application/json\"\n if request and request.alice_id:\n prepared_request.headers[\"Cookie\"] = \"sessionid={}\".format(\n request.alice_id\n )\n\n url = urlsplit(url)\n path = bytes(url.path, \"utf-8\")\n if url.query:\n path += bytes(\"?{}\".format(url.query), \"utf-8\")\n salt = bytes(settings.UI_SECRET, \"utf-8\")\n\n body = prepared_request.body or b\"\"\n if isinstance(body, str):\n body = bytes(body, \"utf-8\")\n\n signature = sha256(path + body + salt).hexdigest()\n prepared_request.headers[\"X-Signature\"] = signature\n\n response = cls.send_request(prepared_request, keep_trying, stream=stream)\n\n if response.status_code > 299:\n logger.error(\"Rabbit error: {} - {}\".format(\n response.status_code,\n response.content\n ))\n\n if response.status_code == 403:\n raise RabbitException(\n \"\"\" Data server access is failing for {} requests to {}\n with error {}. request: {}. alice_id {}\n \"\"\".format(\n method,\n str(path, \"utf-8\"),\n response.content,\n request,\n request.alice_id if request else None,\n )\n )\n\n return response", "def simulate_get_stream(self, path='/', **kwargs):\n\n kwargs['_stream_result'] = True\n\n return _AsyncContextManager(self.simulate_request('GET', path, **kwargs))", "def _process_request(self):\n if not self._requests:\n if self._stream:\n self._stream.close()\n self._stream = None\n if self._processing:\n self._processing = False\n Engine.instance().stop()\n return\n\n request = self._requests[0]\n\n request.append(\n Engine.instance().defer(request[5], self._request_timeout, request))\n\n port = request[2].port\n if not port:\n if request[2].scheme.lower() == 'https':\n port = 443\n else:\n port = 80\n\n host = \"%s:%d\" % (request[2].hostname, port)\n\n if self._stream:\n if not self._server == host.lower() or not \\\n self._is_secure == (request[2].scheme.lower() == 'https'):\n self._stream.end()\n return\n\n if not self._stream:\n # Store the current server.\n self._server = host.lower()\n\n # Create a Stream, hook into it, and connect.\n self._stream = Stream()\n\n self._stream.on_close = self._on_close\n self._stream.on_connect = self._on_connect\n\n self._is_secure = request[2].scheme.lower() == 'https'\n if self._is_secure:\n raise Exception(\"SSL has not yet been implemented in this version of Pants.\")\n self._stream.startTLS()\n\n self._stream.connect((request[2].hostname, port))\n return\n\n # If we got here, we're connected, and to the right server. Do stuff.\n self._stream.write('%s %s HTTP/1.1%s' % (request[0], request[8], CRLF))\n for k, v in request[3].iteritems():\n self._stream.write('%s: %s%s' % (k, v, CRLF))\n\n if request[4]:\n self._stream.write('%s%s' % (CRLF, request[4]))\n else:\n self._stream.write(CRLF)\n\n # Now, wait for a response.\n self._stream.on_read = self._read_headers\n self._stream.read_delimiter = DOUBLE_CRLF", "def UpdateStream(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()", "def stream(self):\n if self.body_used:\n raise RuntimeError('Cannot use both stream and body')\n self.stream_used = True\n return self._stream", "def stream(self):\n return ResponseStream(self)", "async def _handle_request(\n self, request: web.Request, **kwargs: Any\n ) -> web.Response | web.StreamResponse:\n url = self._create_url(**kwargs)\n if not url:\n return web.Response(status=HTTP_NOT_FOUND)\n\n data = await request.read()\n source_header = _init_header(request)\n\n async with self._websession.request(\n request.method,\n url,\n headers=source_header,\n params=request.query,\n allow_redirects=False,\n data=data,\n ) as result:\n headers = _response_header(result)\n\n # Stream response\n response = web.StreamResponse(status=result.status, headers=headers)\n response.content_type = result.content_type\n\n try:\n await response.prepare(request)\n async for data in result.content.iter_chunked(4096):\n await response.write(data)\n\n except (aiohttp.ClientError, aiohttp.ClientPayloadError) as err:\n _LOGGER.debug(\"Stream error for %s: %s\", request.rel_url, err)\n\n return response", "async def _send_request_body(self, request: Request, stream_id: int) -> None:\n if not has_body_headers(request):\n return\n\n assert isinstance(request.stream, typing.AsyncIterable)\n async for data in request.stream:\n await self._send_stream_data(request, stream_id, data)\n await self._send_end_stream(request, stream_id)", "async def send(self, request, **kwargs):\n priority = kwargs.pop('priority', None)\n if priority is None:\n future = self._executor.submit(\n self.send_blocking, request, **kwargs\n )\n else:\n LOG.debug(\n 'send: priority=%r, %r, kwargs=%r', priority, request, kwargs\n )\n future = self._executor.submit_with_priority(\n priority, self.send_blocking, request, **kwargs\n )\n future.set_finalizer(lambda response: response.close())\n return await adapters.FutureAdapter(future).get_result()", "def send(\n self, request: Request\n ) -> Union[Response, Coroutine[None, None, Response]]:\n return self.sender.send(request)", "def stream():\n while True:\n try:\n r = requests.post(\"http://streamer_0:5000/stream\", json={})\n break\n except requests.exceptions.ConnectionError:\n logging.error(\"Could not connect to server streamer_0, retrying\")\n time.sleep(2)\n continue\n logging.info(\"'http://streamer_0:5000/stream', response = {}\".format(r.status_code))\n if r.status_code != 200:\n time.sleep(2)\n stream()", "def _send_request(self):\n url = self.config['url']\n agent = Agent(reactor)\n response = (yield agent.request(\n 'GET',\n url.encode(\"ASCII\"),\n ))\n\n d = defer.Deferred()\n response.deliverBody(ReceiveBody(d))\n defer.returnValue((yield d))", "def _send_request(self, method, url, params=None, data=None, headers=None, json=None,\n http_status=2, parse_json=False, error_processors=[],\n allow_redirects=None, cookies=None, stream=False):\n call_time = now()\n if self.last_call_time:\n if self.request_wait_seconds:\n delta = (call_time - self.last_call_time).total_seconds()\n if delta < self.request_wait_seconds:\n self.sleep(self.request_wait_seconds - delta,\n log_reason='request wait')\n self.last_call_time = now()\n else:\n self.last_call_time = call_time\n else:\n self.last_call_time = call_time\n else:\n self.first_call_time = self.last_call_time = call_time\n\n if not urlparse(url).scheme and self.base_url:\n url = urljoin(self.base_url, url)\n\n if allow_redirects is None:\n allow_redirects = self.allow_redirects\n\n if self.debug_level >= 5:\n self.logger.debug(\n _color_em('REQUEST %s' % method) + ' ' + url + (' params=%s' % params)\n + '\\n' + _color_em('REQUEST HEADERS:', back=colorama.Back.BLUE) + '\\n'\n + pprint(headers, print_=False)\n + (('\\n' + _color_em('REQUEST BODY:', back=colorama.Back.BLUE) + '\\n'\n + pprint(data or json, print_=False)) if (data or json) else '')\n )\n\n try:\n kwargs = dict(params=params, data=data, json=json, headers=headers,\n allow_redirects=allow_redirects, proxies=self.proxy,\n verify=self.ssl_verify, cookies=cookies, stream=stream)\n if self.timeout is not None:\n # Allow session (ConfigurableSession for example) to handle timeout\n kwargs['timeout'] = self.timeout\n response = self.session.request(method, url, **kwargs)\n except Exception as exc:\n self.error_processor(exc, error_processors)\n raise\n finally:\n if self.request_wait_since_response:\n self.last_call_time = now()\n\n if self.debug_level >= 5:\n self.logger.debug(\n _color_em('RESPONSE %s' % response.request.method, back=colorama.Back.GREEN)\n + colorama.Style.RESET_ALL + colorama.Style.BRIGHT + (' %s ' % response.status_code)\n + colorama.Style.RESET_ALL + response.url\n + '\\n' + _color_em('RESPONSE HEADERS:', back=colorama.Back.GREEN) + '\\n'\n + pprint(response.headers, print_=False)\n + '\\n' + _color_em('RESPONSE BODY:', back=colorama.Back.GREEN) + '\\n'\n + (stream and '<stream>' or pprint(response.text, print_=False))\n )\n\n elapsed_seconds = response.elapsed.total_seconds()\n if elapsed_seconds > self.request_warn_elapsed_seconds:\n self.logger.warning('Request %s %s took %s seconds after calls(%s/%s) since(%s)',\n response.request.method, response.request.url,\n elapsed_seconds, self.calls_count, self.calls_elapsed_seconds,\n self.first_call_time)\n self.calls_elapsed_seconds += elapsed_seconds\n self.calls_count += 1\n self.last_response = response # NOTE: only for debug purposes!\n\n if (http_status and not check_http_status(response.status_code, http_status)):\n self.set_response_json_data(response, parse_json, raise_=False)\n exc = self.HTTPError(response, expected_status=http_status)\n self.error_processor(exc, error_processors)\n raise exc\n\n try:\n self.set_response_json_data(response, parse_json, raise_=True)\n except _JSONDecodeError as exc:\n exc = self.JSONDecodeError(response, exc)\n self.error_processor(exc, error_processors)\n raise exc\n\n return response", "def send_request(self, request_headers):\n stream_id = next(self.counter)\n\n request_stream = SendStream(stream_id)\n response_stream = ReceiveStream(stream_id)\n self.receive_streams[stream_id] = response_stream\n self.send_streams[stream_id] = request_stream\n\n request_stream.headers.set(*request_headers)\n\n self.pending_requests.append(stream_id)\n\n return request_stream, response_stream", "def __httpsRequest(self, method, _uri, body='', files=None, headers={}, stream=False):\n\n #-------------------- \n # Make the request arguments\n #--------------------\n url = Xnat.path.makeXnatUrl(self.host, _uri)\n print(f\"{method} XNAT URL: {url}\")\n if (body):\n print(body)\n\n #-------------------- \n # Conduct REST call\n #-------------------- \n # self.__requests_worker(method, url, body, files, headers, stream)\n t = threading.Thread(\n target=self.__requests_worker, \n args=(method, url, body, files, headers, stream,))\n t.start()\n t.join()\n \n return self.response", "def _send_http_request(self, resource, method, data=None, params=None, headers=None):\n\n url = '/'.join((self.https_url, resource))\n\n response = self._session.request(\n url=url,\n method=method,\n data=data,\n params=params,\n headers=headers,\n proxies=self._proxies)\n response.raise_for_status()\n\n return response", "def __send_request(self, url, params=None, headers=None):\n\n if self.rate_limit is not None and self.rate_limit <= self.min_rate_to_sleep:\n seconds_to_reset = self.rate_limit_reset_ts - int(time.time()) + 1\n cause = \"GitHub rate limit exhausted.\"\n if self.sleep_for_rate:\n logger.info(\"%s Waiting %i secs for rate limit reset.\", cause, seconds_to_reset)\n time.sleep(seconds_to_reset)\n else:\n raise RateLimitError(cause=cause, seconds_to_reset=seconds_to_reset)\n\n r = requests.get(url, params=params, headers=headers)\n r.raise_for_status()\n self.rate_limit = int(r.headers['X-RateLimit-Remaining'])\n self.rate_limit_reset_ts = int(r.headers['X-RateLimit-Reset'])\n logger.debug(\"Rate limit: %s\" % (self.rate_limit))\n return r", "def MessageStream(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()", "async def __call__(self, send):\n await send(\n {\n \"type\": \"http.response.start\",\n \"status\": self.status_code,\n \"headers\": self.headers.items(),\n }\n )\n\n await send({\"type\": \"http.response.body\", \"body\": self.content})", "def open(self):\n streaming_specs = self.get_streaming_specs()\n self._stream = chunked_requests.Stream(**streaming_specs)", "def response_as_stream(self) -> Any:\n raise NotImplementedError # pragma: no cover", "def return_stream(self, entity, request, environ, start_response,\n response_headers, method):\n coll = entity.entity_set.open()\n try:\n if method == \"GET\":\n sinfo, sgen = coll.read_stream_close(entity.key())\n else:\n sinfo = coll.read_stream(entity.key())\n sgen = []\n coll.close()\n except Exception:\n coll.close()\n raise\n types = [sinfo.type] + self.StreamTypes\n response_type = self.content_negotiation(request, environ, types)\n if response_type is None:\n return self.odata_error(\n request, environ, start_response, \"Not Acceptable\",\n 'media stream type refused, try application/octet-stream', 406)\n response_headers.append((\"Content-Type\", str(response_type)))\n if sinfo.size is not None:\n response_headers.append((\"Content-Length\", str(sinfo.size)))\n if sinfo.modified is not None:\n response_headers.append((\"Last-Modified\",\n str(params.FullDate(src=sinfo.modified))))\n if sinfo.md5 is not None:\n response_headers.append(\n (\"Content-MD5\", force_ascii(base64.b64encode(sinfo.md5))))\n self.set_etag(entity, response_headers)\n start_response(\"%i %s\" % (200, \"Success\"), response_headers)\n return sgen" ]
[ "0.7113001", "0.67975926", "0.673969", "0.6423465", "0.6422512", "0.6057528", "0.604382", "0.5993796", "0.58285165", "0.58225983", "0.57846904", "0.5776013", "0.57309645", "0.5710379", "0.566662", "0.5618056", "0.55856884", "0.5550998", "0.5527313", "0.55215275", "0.5517145", "0.5500332", "0.5497538", "0.5486627", "0.5451605", "0.5442655", "0.54278946", "0.54109126", "0.53740394", "0.5362964" ]
0.790529
0
Make a "copy" from a ``requests`` Response object. Note that this consumes the content of the ``source`` object, which forces ``source`` to read the whole response body from the server (and so we do not need to do this in the Sender class).
def __init__(self, source, content, *, _copy_history=True): self._content = content self.status_code = source.status_code self.headers = source.headers self.url = source.url if _copy_history: self.history = [ Response( r, # TODO: Should we load r.content? None, # TODO: In some rare cases, history seems to have # loops. We probably should try to detect loops, # but for now, let us only go into one level of the # history. _copy_history=False, ) for r in source.history ] else: # Make it non-iterable so that if user (accidentally) # iterates this, it will err out. self.history = None self.encoding = source.encoding self.reason = source.reason self.cookies = source.cookies self.elapsed = source.elapsed # We do not copy source.request for now.
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def copy_response(context):\n context.response_copy = context.response.json()\n logging.debug('Successfully copied the response')", "def copy(self, dest):\n if not isinstance(dest, Request):\n raise ValueError(\"'%s' should be a sub-class of 'Request'\" % dest)\n return dest.update_url(self.url).update_verb(self.verb)\\\n .update_params(self.params).update_data(self.data)", "def from_sync_httpx_response(cls, httpx_response, target, **kwargs):\n return httpcore.Response(\n status=httpx_response.status_code,\n headers=httpx_response.headers.raw,\n content=httpx_response.stream,\n extensions=httpx_response.extensions,\n )", "def from_response(cls, response):\n\n d = {\n \"headers\": dict(response.getheaders()),\n \"content\": response.read(),\n \"status\": response.status,\n \"reason\": response.reason,\n \"raw_headers\": response.msg.headers,\n \"length\": response.length,\n \"version\": response.version,\n }\n return cls.from_dict(d)", "async def _extract_response_content(\n self, response: Response\n ) -> Tuple[Response, List[Dict[str, Any]]]:\n body = b\"\"\n new_response = response\n if isinstance(response, StreamingResponse):\n async for chunk in response.body_iterator:\n body += chunk\n new_response = StreamingResponse(\n content=(chunk for chunk in [body]),\n status_code=response.status_code,\n headers={k: v for k, v in response.headers.items()},\n media_type=response.media_type,\n background=response.background,\n )\n else:\n body = response.body\n return new_response, json.loads(body)", "def __call__(self, get_response, request):\n response = get_response(request)\n\n if response.streaming:\n response.streaming_content = self.wrap_streaming_content(response.streaming_content)\n\n return response", "def compare_original_response_with_copy(context):\n original = context.response.json()\n copy = context.response_copy\n\n def compare_top_level_values():\n # get the list of fields that are JSON values not arrays\n keys = [val for val in original.iterkeys() if not isinstance(original[val], (dict, list, set))]\n assert keys, ('Expected at least 1 field key to compare but got none!')\n logging.debug('List of top tier field keys to compare: %s', keys)\n for key in keys:\n assert original[key] == copy[key]\n logging.debug(\n 'All top level fields in the response copy have the same values as'\n ' in the original response. Here is a list of compared fields:\\n%s',\n ', '.join(keys))\n\n def compare_items():\n original_items = original['items']\n copy_items = copy['items']\n skip = ['title', 'last_activity_date']\n for original_item in original_items:\n # get all item field keys\n keys = [val for val in original_item.iterkeys()]\n # remove the keys that need to be skipped\n keys = [x for x in keys if x not in skip]\n for copy_item in copy_items:\n # find matching items\n if original_item['question_id'] == copy_item['question_id']:\n # compare original an copied items\n for key in keys:\n assert original_item[key] == copy_item[key]\n logging.debug(\n 'All fields in the copied item ID: %s'\n ' have the same values as in in the original items',\n copy_item['question_id'])\n\n compare_top_level_values()\n compare_items()", "def clone_from(self, source):\n\n if type(source) is type(self):\n # special case using class internals\n self._clone_from_common_class(source)\n else:\n self._clone_from_general(source)", "def __get_raw_content(self, response):\n\n files = response.get('files')\n\n for f in files:\n file_data = files.get(f)\n if file_data.get('truncated'):\n r = requests.get(file_data.get('raw_url'))\n file_data.update({\n 'content': str(r.content, 'utf-8')\n })\n\n return response", "def upgrade_response(response):\n wrapper_class = get_seek_wrapper_class(response)\n if hasattr(response, \"closeable_response\"):\n if needs_seek_wrapper(response):\n response = wrapper_class(response)\n assert hasattr(response, \"get_data\")\n return copy.copy(response)\n\n # a urllib2 handler constructed the response, i.e. the response is an\n # urllib.addinfourl or a urllib2.HTTPError, instead of a\n # _Util.closeable_response as returned by e.g. mechanize.HTTPHandler\n try:\n code = response.code\n except AttributeError:\n code = None\n try:\n msg = response.msg\n except AttributeError:\n msg = None\n\n # may have already-.read() data from .seek() cache\n data = None\n get_data = getattr(response, \"get_data\", None)\n if get_data:\n data = get_data()\n\n response = closeable_response(response.fp,\n response.info(), response.geturl(), code,\n msg)\n response = wrapper_class(response)\n if data:\n response.set_data(data)\n return response", "def build_response(self, request, resp):\n response = Response()\n\n response.status_code = resp.status\n response.headers = CaseInsensitiveDict((\n map(to_native_string, h)\n for h in resp.headers.iter_raw()\n ))\n response.raw = resp\n response.reason = resp.reason\n response.encoding = get_encoding_from_headers(response.headers)\n\n extract_cookies_to_jar(response.cookies, request, response)\n response.url = request.url\n\n response.request = request\n response.connection = self\n\n # First horrible patch: Requests expects its raw responses to have a\n # release_conn method, which I don't. We should monkeypatch a no-op on.\n resp.release_conn = lambda: None\n\n # Next, add the things HTTPie needs. It needs the following things:\n #\n # - The `raw` object has a property called `_original_response` that is\n # a `httplib` response object.\n # - `raw._original_response` has three simple properties: `version`,\n # `status`, `reason`.\n # - `raw._original_response.version` has one of three values: `9`,\n # `10`, `11`.\n # - `raw._original_response.msg` exists.\n # - `raw._original_response.msg._headers` exists and is an iterable of\n # two-tuples.\n #\n # We fake this out. Most of this exists on our response object already,\n # and the rest can be faked.\n #\n # All of this exists for httpie, which I don't have any tests for,\n # so I'm not going to bother adding test coverage for it.\n class FakeOriginalResponse(object): # pragma: no cover\n def __init__(self, headers):\n self._headers = headers\n\n def get_all(self, name, default=None):\n values = []\n\n for n, v in self._headers:\n if n == name.lower():\n values.append(v)\n\n if not values:\n return default\n\n return values\n\n def getheaders(self, name):\n return self.get_all(name, [])\n\n response.raw._original_response = orig = FakeOriginalResponse(None)\n orig.version = 20\n orig.status = resp.status\n orig.reason = resp.reason\n orig.msg = FakeOriginalResponse(resp.headers.iter_raw())\n\n return response", "def adapt_response(self, response):\n return response", "def adapt_response(self, response):\n return response", "def make_response_message(response):\n encoding = response.encoding or 'ISO-8859-1'\n original = response.raw._original_response\n response_headers = response.headers\n return HTTPMessage(\n line='HTTP/{version} {status} {reason}'.format(\n version='.'.join(str(original.version)),\n status=original.status, reason=original.reason,),\n headers=str(original.msg),\n body=response.content.decode(encoding) if response.content else '',\n content_type=response_headers.get('Content-Type'))", "def copy(self: _R) -> _R:\n return self.__class__(self.dumps())", "def stream(self):\n return ResponseStream(self)", "def unpack_raw_response(\n self,\n bound_obj: Any,\n message: Message,\n raw_response: Response | SysResponse,\n ) -> Response | None:\n response = self._unpack_raw_response(bound_obj, raw_response)\n assert (\n response is None\n or type(response) in type(message).get_response_types()\n )\n return response", "def copy_sms(self) -> GetResponseType:\n return self._connection.get('sms/copy-sms')", "def copy(self):\r\n copy = StreamDecompressor()\r\n copy._buf, copy._header_found = self._buf, self._header_found\r\n return copy", "def _prepare_response(self, response):\n\n if not isinstance(response, Response):\n return Response(0, response)\n return response", "def copy_report(cls, req):\n try:\n if req.report:\n report_url = cls.dvs_api_v1 + '/download/' + req.report\n upload_path = os.path.join(app.config['DRS_UPLOADS'], '{0}'.format(req.tracking_id))\n response = requests.post(url=report_url)\n file = open(os.path.join(upload_path, req.report), \"w+\")\n file.write(response.text)\n file.close()\n except Exception as e:\n app.logger.exception(e)\n raise e", "def copy(self):\n return self.from_builder(self)", "def make_response(self, request, response, **response_kwargs):\n while iscoroutine(response):\n response = yield from response\n\n if isinstance(response, StreamResponse):\n return response\n\n response_kwargs.setdefault('content_type', 'application/json')\n\n return Response(text=dumps(response), **response_kwargs)", "def src(self):\n if self._src:\n return self._src\n\n # Parse and create a new client\n conn = parse_url(self.source_url)\n client = get_client(conn)\n self._src = client\n return self._src", "def _remote_copy(self, source, destn):\n s = remote_copy(host_ip=self.ip, username=self.username,\n password=self.password, source=source, destn=destn)\n\n if s.get('status') == \"Failed\":\n raise AssertionError(s.get('error', \"Error encountered\"))\n\n return s", "def from_response(cls, response: ClientResponse) -> MultipartResponseWrapper:\n ...", "def _fetch_package_requests(source, headers, auth):\n import requests\n dest = build_temp_package_filepath()\n response = requests.get(source, stream=True, headers=headers, auth=auth)\n response.raise_for_status()\n with open(dest, 'wb') as handle:\n for block in response.iter_content(1024):\n handle.write(block)\n package = tarfile.open(dest)\n return package", "def _process_request(self, connection, request):\n conn = urllib2.Request(url=connection, data=request.encode(\"utf-8\"))\n f = urllib2.urlopen(conn)\n all_results = f.read()\n self.raw = all_results\n return(fromstring(all_results))", "def mangle_response(self, response):\n body = response.get_body()\n\n for regex, string in self._manglers['s']['b']:\n body = regex.sub(string, body)\n\n response.set_body(body)\n\n header_string = str(response.get_headers())\n\n for regex, string in self._manglers['s']['h']:\n header_string = regex.sub(string, header_string)\n\n try:\n mangled_header = Headers.from_string(header_string)\n except ValueError:\n error = 'Your header modifications created an invalid header'\\\n ' string that could NOT be parsed back to a Header object.'\n om.out.error(error)\n else:\n response.set_headers(mangled_header)\n\n if self._user_option_fix_content_len:\n response = self._fix_content_len(response)\n\n return response", "def scrub_response(self, data):\n return self.__response_scrubber(data)" ]
[ "0.677958", "0.59345746", "0.57835406", "0.55620325", "0.5471291", "0.5337002", "0.5294734", "0.52340436", "0.520091", "0.5153462", "0.5144874", "0.5106061", "0.5106061", "0.50697017", "0.5021026", "0.5019364", "0.5016278", "0.5000586", "0.49666125", "0.49373594", "0.49259564", "0.49193048", "0.49150768", "0.49080303", "0.49006706", "0.48745126", "0.4822717", "0.48210418", "0.48178512", "0.48171464" ]
0.67062664
1
Parse response as an HTML document. Caller may pass ``encoding`` and ``errors`` to instructing us how to decode response content. This is useful because lxml's default is to silently skip the rest of the document when there is any encoding error in the middle. lxml's strictbutsilent policy is counterproductive because web is full of malformed documents, and it should either be lenient about the error, or raise it to the caller, not a mix of both as it is right now.
def html(self, encoding=None, errors=None): if encoding and errors: string = self.content.decode(encoding=encoding, errors=errors) parser = _get_html_parser(None) else: ASSERT.none(errors) string = self.content parser = _get_html_parser( encoding or ASSERT.not_none(self.encoding) ) document = lxml.etree.fromstring(string, parser) # Check whether fromstring returns None because apparently # HTMLParser is more lenient than XMLParser and may cause # fromstring to return None on some malformed HTML input. if document is None: raise AssertionError( 'lxml.etree.fromstring error: %s content=%r' % (self.url, self.content) ) return document
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def htmlParseDoc(cur, encoding):\n ret = libxml2mod.htmlParseDoc(cur, encoding)\n if ret is None:raise parserError('htmlParseDoc() failed')\n return xmlDoc(_obj=ret)", "def htmlParseDocument(self):\n ret = libxml2mod.htmlParseDocument(self._o)\n return ret", "def html_parser():\n return HTMLParser(strict=True)", "def parse_response(self, resp):\n p, u = self.getparser()\n\n if hasattr(resp, 'text'):\n # modern requests will do this for us\n text = resp.text # this is unicode(py2)/str(py3)\n else:\n\n encoding = requests.utils.get_encoding_from_headers(resp.headers)\n if encoding is None:\n encoding = 'utf-8' # FIXME: what to do here?\n\n if sys.version_info[0] == 2:\n text = unicode( # noqa: F821\n resp.content, encoding, errors='replace')\n else:\n assert sys.version_info[0] == 3\n text = str(resp.content, encoding, errors='replace')\n p.feed(text)\n p.close()\n return u.close()", "def htmlParseFile(filename, encoding):\n ret = libxml2mod.htmlParseFile(filename, encoding)\n if ret is None:raise parserError('htmlParseFile() failed')\n return xmlDoc(_obj=ret)", "def parse_html(html):\n parser = lxml.html.HTMLParser(encoding='utf8')\n return lxml.html.fromstring(html.encode('utf8'), parser=parser)", "def parse(html, encoding='utf-8'):\n if isinstance(html, unicode):\n return bs4.BeautifulSoup(html, 'html.parser')\n\n return bs4.BeautifulSoup(html, 'html.parser', from_encoding=encoding)", "def parse_html(self):\n\n try:\n parser = HtmlParser(self.url)\n\n parser.set_pattern(self.pattern)\n parser.set_urls(self.spider_config)\n parser.set_next_depth(self.depth)\n parser.feed(self.page)\n parser.close()\n except UnicodeDecodeError as e:\n logging.error('Thread:{} parse {} failed, msg:{}'.format(self.thread_id, self.url, e))\n return False\n\n return True", "def htmlReadDoc(cur, URL, encoding, options):\n ret = libxml2mod.htmlReadDoc(cur, URL, encoding, options)\n if ret is None:raise treeError('htmlReadDoc() failed')\n return xmlDoc(_obj=ret)", "def __parse_response(self, response_text):\n root = etree.fromstring(response_text)\n namespace = re.search('{(.*)}', root.tag).group(1)\n status_path = '{%s}Status' % namespace\n status = int(root.findtext(status_path))\n response = None\n if status != 0:\n response = Error(status, root, namespace)\n else:\n response = self._parse_response_body(root, namespace)\n return response", "def test_parseHtml(self):\n dom = lunchr.parseHtml(self.html)\n self.assertTrue(isinstance(dom, xml.dom.minidom.Document))", "def _process_response(self, response, marker_elems=None):\r\n body = response.read()\r\n #print body\r\n if '<Errors>' not in body:\r\n rs = ResultSet(marker_elems)\r\n h = handler.XmlHandler(rs, self)\r\n xml.sax.parseString(body, h)\r\n return rs\r\n else:\r\n raise MTurkRequestError(response.status, response.reason, body)", "def _get_resp_body_errors(self):\n\n if self._resp_body_errors and len(self._resp_body_errors) > 0:\n return self._resp_body_errors\n\n errors = []\n warnings = []\n resp_codes = []\n\n if self.verb is None:\n return errors\n\n dom = self.response.dom()\n if dom is None:\n return errors\n\n for e in dom.findall('Errors'):\n eSeverity = None\n eClass = None\n eShortMsg = None\n eLongMsg = None\n eCode = None\n\n try:\n eSeverity = e.findall('SeverityCode')[0].text\n except IndexError:\n pass\n\n try:\n eClass = e.findall('ErrorClassification')[0].text\n except IndexError:\n pass\n\n try:\n eCode = e.findall('ErrorCode')[0].text\n except IndexError:\n pass\n\n try:\n eShortMsg = smart_encode(e.findall('ShortMessage')[0].text)\n except IndexError:\n pass\n\n try:\n eLongMsg = smart_encode(e.findall('LongMessage')[0].text)\n except IndexError:\n pass\n\n try:\n eCode = e.findall('ErrorCode')[0].text\n if int(eCode) not in resp_codes:\n resp_codes.append(int(eCode))\n except IndexError:\n pass\n\n msg = str(\"Class: {eClass}, Severity: {severity}, Code: {code}, {shortMsg} {longMsg}\") \\\n .format(eClass=eClass, severity=eSeverity, code=eCode, shortMsg=eShortMsg,\n longMsg=eLongMsg)\n\n # from IPython import embed; embed()\n\n if eSeverity == 'Warning':\n warnings.append(msg)\n else:\n errors.append(msg)\n\n self._resp_body_warnings = warnings\n self._resp_body_errors = errors\n self._resp_codes = resp_codes\n\n if self.config.get('warnings') and len(warnings) > 0:\n log.warn(\"{verb}: {message}\\n\\n\".format(\n verb=self.verb, message=\"\\n\".join(warnings)))\n\n if self.response.reply.Ack == 'Failure':\n if self.config.get('errors'):\n log.error(\"{verb}: {message}\\n\\n\".format(\n verb=self.verb, message=\"\\n\".join(errors)))\n\n return errors\n\n return []", "async def text(self, encoding=\"utf-8\", errors=\"strict\"):\n return self.response.decode(encoding, errors=errors)", "def parse(self, html=None):\n if html:\n self.html = html.encode('utf-8').decode('utf-8')\n\n # lets do the actual parsing\n self._parse()\n\n # Apply subclass specific behaviour after parsing has happened\n # This is needed because different parsers need to clean/modify\n # the parsed data uniquely.\n self.after_parsing()", "def lxml(self):\n\n if self._lxml is None:\n self._lxml = html.fromstring(self.html)\n return self._lxml", "def process(self, url, configuration={}, data=None):\n try:\n response = self.fetch(url, data, configuration.get(\"headers\", {}))\n response.mime_type = response.headers.get('Content-Type', ';').split(';')[0]\n response.body = response.read()\n if configuration.get(\"parse_result\", True) and response.mime_type.startswith('text'):\n if response.body.startswith(b\"<!DOC\") or response.body.startswith(b\"<!doc\"):\n response.soup = BeautifulSoup(\n response.body[response.body.find(b\">\")+1:], \"html.parser\")\n else:\n response.soup = BeautifulSoup(response.body, \"html.parser\")\n else:\n response.soup = BeautifulSoup('', \"html.parser\")\n return response\n except urllib.error.HTTPError as e:\n status = int(str(e).split()[2][0:3])\n if status in configuration.get('status', [200, 301, 302, 303]):\n # This is OK -- the status matches what we're expecting\n class response(object):\n status_code = status\n soup = BeautifulSoup('', \"html.parser\")\n body = ''\n def __init__(self, u):\n self.url = u\n return response(url)\n raise", "def handle_errors(resp: requests.Response):\n error_text = resp.text\n if isinstance(resp.text, bytes):\n try:\n error_text = error_text.decode(UTF_ENCODING)\n except UnicodeDecodeError:\n error_text = error_text.decode(\"iso-8859-1\")\n if error_text != \"\":\n _raise_error(error_text)\n resp.raise_for_status()", "def get_document(url):\n r = requests.get(url, timeout=5)\n r.raise_for_status()\n return lxml.html.fromstring(r.content)", "def _parse_xml(self, response):\n if response.startswith('\\n'):\n response = response[1:]\n tree = etree.fromstring(response)\n return tree", "def validate_nrml(request):\n xml_text = request.POST.get('xml_text')\n if not xml_text:\n return HttpResponseBadRequest(\n 'Please provide the \"xml_text\" parameter')\n try:\n xml_text = xml_text.replace('\\r\\n', '\\n').replace('\\r', '\\n')\n _do_validate_nrml(xml_text)\n except (HTTPError, ValueError) as e:\n exc = e.args[0]\n return _make_response(error_msg=exc['message'],\n error_line=exc['lineno'],\n valid=False)\n except Exception as exc:\n # get the exception message\n exc_msg = exc.args[0]\n if isinstance(exc_msg, bytes):\n exc_msg = exc_msg.decode('utf-8') # make it a unicode object\n elif isinstance(exc_msg, unicode):\n pass\n else:\n # if it is another kind of object, it is not obvious a priori how\n # to extract the error line from it\n # but we can attempt anyway to extract it\n error_line = _get_error_line(unicode(exc_msg))\n return _make_response(\n error_msg=unicode(exc_msg), error_line=error_line,\n valid=False)\n error_msg = exc_msg\n error_line = _get_error_line(exc_msg)\n return _make_response(\n error_msg=error_msg, error_line=error_line, valid=False)\n else:\n return _make_response(error_msg=None, error_line=None, valid=True)", "def parse(self, response: BeautifulSoup):\n raise NotImplementedError", "def htmlReadFile(filename, encoding, options):\n ret = libxml2mod.htmlReadFile(filename, encoding, options)\n if ret is None:raise treeError('htmlReadFile() failed')\n return xmlDoc(_obj=ret)", "def parseHTML(html: str, *, piHandler: Optional[PIHandler] = None) -> XML:\n parser = _XHTMLParser(piHandler)\n parser.feed(html)\n (_, content), = parser.stack\n return xhtml[content]", "def htmlReadMemory(buffer, size, URL, encoding, options):\n ret = libxml2mod.htmlReadMemory(buffer, size, URL, encoding, options)\n if ret is None:raise treeError('htmlReadMemory() failed')\n return xmlDoc(_obj=ret)", "def _scrape_response(self, headers, response):\n # identify the responding server\n server_type = None\n server_string = headers.get(\"server\", \"\")\n\n if server_string and \"jetty\" in server_string.lower():\n server_type = \"jetty\"\n\n if server_string and \"coyote\" in server_string.lower():\n server_type = \"tomcat\"\n\n reason = None\n full_html = \"\"\n dom_tree = None\n\n # In Python3, response can be made of bytes\n if IS_PY3 and hasattr(response, \"decode\"):\n response = response.decode()\n if response.startswith(\"<?xml\"):\n # Try a strict XML parse\n try:\n soup = ElementTree.fromstring(response)\n\n reason_node = soup.find('lst[@name=\"error\"]/str[@name=\"msg\"]')\n tb_node = soup.find('lst[@name=\"error\"]/str[@name=\"trace\"]')\n if reason_node is not None:\n full_html = reason = reason_node.text.strip()\n if tb_node is not None:\n full_html = tb_node.text.strip()\n if reason is None:\n reason = full_html\n\n # Since we had a precise match, we'll return the results now:\n if reason and full_html:\n return reason, full_html\n except ElementTree.ParseError:\n # XML parsing error, so we'll let the more liberal code handle it.\n pass\n\n if server_type == \"tomcat\":\n # Tomcat doesn't produce a valid XML response or consistent HTML:\n m = re.search(r\"<(h1)[^>]*>\\s*(.+?)\\s*</\\1>\", response, re.IGNORECASE)\n if m:\n reason = m.group(2)\n else:\n full_html = \"%s\" % response\n else:\n # Let's assume others do produce a valid XML response\n try:\n dom_tree = ElementTree.fromstring(response)\n reason_node = None\n\n # html page might be different for every server\n if server_type == \"jetty\":\n reason_node = dom_tree.find(\"body/pre\")\n else:\n reason_node = dom_tree.find(\"head/title\")\n\n if reason_node is not None:\n reason = reason_node.text\n\n if reason is None:\n full_html = ElementTree.tostring(dom_tree)\n except SyntaxError as err:\n LOG.warning( # NOQA: G200\n \"Unable to extract error message from invalid XML: %s\",\n err,\n extra={\"data\": {\"response\": response}},\n )\n full_html = \"%s\" % response\n\n full_html = force_unicode(full_html)\n full_html = full_html.replace(\"\\n\", \"\")\n full_html = full_html.replace(\"\\r\", \"\")\n full_html = full_html.replace(\"<br/>\", \"\")\n full_html = full_html.replace(\"<br />\", \"\")\n full_html = full_html.strip()\n return reason, full_html", "def tidy(temp_dir, tidy_path, strict_xml=False, errors_to_ignore=None,\r\n indent=False, wrap=False, warnings=True):\r\n response = cherrypy.response\r\n \r\n # the tidy tool, by its very nature it's not generator friendly, \r\n # so we just collapse the body and work with it.\r\n orig_body = response.collapse_body()\r\n \r\n fct = response.headers.get('Content-Type', '')\r\n ct = fct.split(';')[0]\r\n encoding = ''\r\n i = fct.find('charset=')\r\n if i != -1:\r\n encoding = fct[i + 8:]\r\n \r\n if ct == 'text/html':\r\n page_file = os.path.join(temp_dir, 'page.html')\r\n open(page_file, 'wb').write(orig_body)\r\n \r\n out_file = os.path.join(temp_dir, 'tidy.out')\r\n err_file = os.path.join(temp_dir, 'tidy.err')\r\n tidy_enc = encoding.replace('-', '')\r\n if tidy_enc:\r\n tidy_enc = '-' + tidy_enc\r\n \r\n strict_xml = (\"\", \" -xml\")[bool(strict_xml)]\r\n \r\n if indent:\r\n indent = ' -indent'\r\n else:\r\n indent = ''\r\n \r\n if wrap is False:\r\n wrap = ''\r\n else:\r\n try:\r\n wrap = ' -wrap %d' % int(tidyWrap)\r\n except:\r\n wrap = ''\r\n \r\n result = os.system('\"%s\" %s%s%s%s -f %s -o %s %s' %\r\n (tidy_path, tidy_enc, strict_xml, indent, wrap,\r\n err_file, out_file, page_file))\r\n use_output = bool(indent or wrap) and not result\r\n if use_output:\r\n output = open(out_file, 'rb').read()\r\n \r\n new_errs = []\r\n for err in open(err_file, 'rb').read().splitlines():\r\n if (err.find('Error') != -1 or\r\n (warnings and err.find('Warning') != -1)):\r\n ignore = 0\r\n for err_ign in errors_to_ignore or []:\r\n if err.find(err_ign) != -1:\r\n ignore = 1\r\n break\r\n if not ignore:\r\n new_errs.append(err)\r\n \r\n if new_errs:\r\n response.body = wrong_content('<br />'.join(new_errs), orig_body)\r\n if response.headers.has_key(\"Content-Length\"):\r\n # Delete Content-Length header so finalize() recalcs it.\r\n del response.headers[\"Content-Length\"]\r\n return\r\n elif strict_xml:\r\n # The HTML is OK, but is it valid XML?\r\n # Use elementtree to parse XML\r\n from elementtree.ElementTree import parse\r\n tag_list = ['nbsp', 'quot']\r\n for tag in tag_list:\r\n orig_body = orig_body.replace('&' + tag + ';', tag.upper())\r\n \r\n if encoding:\r\n enctag = '<?xml version=\"1.0\" encoding=\"%s\"?>' % encoding\r\n orig_body = enctag + orig_body\r\n \r\n f = StringIO.StringIO(orig_body)\r\n try:\r\n tree = parse(f)\r\n except:\r\n # Wrong XML\r\n body_file = StringIO.StringIO()\r\n traceback.print_exc(file = body_file)\r\n body_file = '<br />'.join(body_file.getvalue())\r\n response.body = wrong_content(body_file, orig_body, \"XML\")\r\n if response.headers.has_key(\"Content-Length\"):\r\n # Delete Content-Length header so finalize() recalcs it.\r\n del response.headers[\"Content-Length\"]\r\n return\r\n \r\n if use_output:\r\n response.body = [output]\r\n if response.headers.has_key(\"Content-Length\"):\r\n # Delete Content-Length header so finalize() recalcs it.\r\n del response.headers[\"Content-Length\"]", "def _extract_html(self, url):\n self.response = requests.get(url, timeout=5)\n self.html = BeautifulSoup(self.response.content, \"lxml\") if self.response.ok else None\n # return self.html", "def set_response(self):\r\n import cherrypy\r\n \r\n response = cherrypy.response\r\n \r\n clean_headers(self.status)\r\n \r\n # In all cases, finalize will be called after this method,\r\n # so don't bother cleaning up response values here.\r\n response.status = self.status\r\n tb = None\r\n if cherrypy.request.show_tracebacks:\r\n tb = format_exc()\r\n response.headers['Content-Type'] = \"text/html\"\r\n \r\n content = self.get_error_page(self.status, traceback=tb,\r\n message=self._message)\r\n response.body = content\r\n response.headers['Content-Length'] = len(content)\r\n \r\n _be_ie_unfriendly(self.status)", "def parse(afile, builder=None, encoding=None):\n bob = builder\n\n def emit(this_soup):\n \"\"\"\n emit cleaned up html\n :type this_soup:\n \"\"\"\n if isinstance(this_soup, BS.element.NavigableString):\n for ignorable in ignorable_soup:\n if isinstance(this_soup, ignorable):\n return\n bob.data(unescape(this_soup))\n else:\n attrib = dict([(k, unescape(v)) for k, v in this_soup.attrs])\n bob.start(this_soup.name, attrib)\n for s in this_soup:\n emit(s)\n bob.end(this_soup.name)\n\n # determine encoding (the document charset is not reliable)\n if not hasattr(afile, \"read\"):\n infile = open(afile)\n text = infile.read()\n assert isinstance(encoding, object)\n if not encoding:\n try:\n encoding = \"utf-8\"\n unicode(text, encoding)\n except UnicodeError:\n encoding = \"iso-8859-1\"\n soup = BS.BeautifulSoup(\n text, convertEntities=\"html\", fromEncoding=encoding\n )\n # build the tree\n if not bob:\n bob = ET.TreeBuilder()\n emit(soup)\n root = bob.close()\n assert isinstance(root, object)\n # wrap the document in a html root element, if necessary\n if 1 == len(root) and \"html\" == root[0].tag:\n return root[0]\n root.tag = \"html\"\n return root" ]
[ "0.5807232", "0.5546697", "0.55118394", "0.5314366", "0.5302231", "0.51846737", "0.5178749", "0.5171138", "0.516803", "0.5043235", "0.49736738", "0.4957396", "0.49460128", "0.49409348", "0.49071482", "0.48973745", "0.4881638", "0.48738164", "0.48732352", "0.48569918", "0.4833337", "0.48323655", "0.4792391", "0.4778607", "0.47734836", "0.47568145", "0.4719667", "0.46933493", "0.46380192", "0.46347716" ]
0.71148086
0
Insert single record object in the collection
async def insert(self, record, collection: str): db_record = await self.database[collection].insert_one(record.dict(exclude={'id'})) return record
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save(self, record):\n self.collection.insert(record)\n self.record = record\n\n return self", "def insert(self):\n self.getDbRecord().insert()\n\n return", "def insert_record(self):\r\n try:\r\n db.session.add(self)\r\n db.session.commit()\r\n return {\"error\": False, \"id\": self.id}\r\n except exc.SQLAlchemyError as e: # pragma: no cover\r\n # print(e)\r\n # print(sys.exc_info())\r\n db.session.rollback()\r\n return {\"error\": True}\r\n finally:\r\n db.session.close()", "async def insert_one(self, model):\n\n pass", "def insert(self):\n db.session.add(self)\n db.session.commit()", "def insert(self):\n db.session.add(self)\n db.session.commit()", "def insert(self):\n db.session.add(self)\n db.session.commit()", "def insert(self, data):\n return self.collection.insert(data)", "def insert(self):\n item = self.create()\n return item.id", "def add(self, rec):\n rec['ts'] = datetime.now()\n self.collection.insert_one(rec)", "def insert_one(self, data):\n _client = self.client\n _db = _client[self.database]\n _col = _db[self.collection]\n\n x = _col.insert_one(data)\n\n return x", "def insert_record(self, record, session):\n try:\n session.add(record)\n session.commit()\n session.close()\n return True\n except:\n\n logging.exception(\"http record cannot be added to db \" \":Time: \" + str(datetime.datetime.now()))\n return False", "def save_single_record(self, record, collection_name):\n try:\n self.logger.info('in save_single_record()')\n collection = self.get_db()[collection_name]\n record_id = collection.insert_one(record)\n self.logger.info('out save_single_record()')\n return record_id\n except Exception as e:\n self.logger.error(f'Error occurred while saving single record {e}')", "def insert_one(collection, data):\n try:\n return collection.insert_one(data).inserted_id == data['_id']\n except Exception as error:\n print(error)\n raise", "def findtinsert(dictonary, collection):\n try:\n return collection.find_one(dictonary)['_id']\n except TypeError:\n return collection.insert(dictonary, safe=True)", "def add_record(self, record):\n pass", "def add_a_record(self, record):\n '''\n doc = { \"P/N\": record,#record.get_PN(),\n \"supplier\": \"\",\n \"inventory\": \"\",\n \"specification\": \"\",\n \"description\": \"\",\n \"OEM\": \"\",\n \"tags\": [\"mongodb\", \"python\", \"pymongo\"],\n \"date\": datetime.datetime.utcnow()}'''\n self.collection.insert(record)", "def insert_document(self, collection, doc):\n # Create/Access your collection\n mycol = self.db[collection]\n # Insert your document into the collection\n x = mycol.insert_one(doc)\n # Return the inserted id to verify success\n return x.inserted_id", "def create(cls, collection, data, schema=None):\n validated = cls.validate(data, schema=schema)\n result = collection.insert_one(validated)\n return collection.find_one({\"_id\": result.inserted_id})", "async def insert_one(self, model: Model) -> Model:\n\n if not isinstance(model, Model):\n raise ValueError('insert_one method expects Model instance.')\n\n model_as_dict = model.as_dict\n\n if not model_as_dict.get('_id'):\n model_as_dict.pop('_id')\n\n result = await self.collection.insert_one(model_as_dict)\n return await self.get_one(where={'_id': result.inserted_id})", "def store_one(self, item):\n with mongo_connection(self.cfg_mongo) as mongo:\n client, coll = mongo\n coll.insert_one(item)\n\n return None", "def insert(self):\n ret = True\n\n schema = self.schema\n fields = self.depopulate(False)\n\n q = self.query\n q.set_fields(fields)\n pk = q.insert()\n if pk:\n fields = q.fields\n fields[schema.pk.name] = pk\n self._populate(fields)\n\n else:\n ret = False\n\n return ret", "def insert(self, data):\n\n if not data:\n raise ValueError('invalid data')\n\n # TODO: validate and insert data into model", "def insert_player(document):\n players_col.insert_one(document)", "def add(self, data):\n self.collection.insert(data)", "def _insert(self, object_arr):\n _object = None\n\n try:\n if not self._is_session_valid():\n self._reset_session()\n for obj in object_arr:\n obj.setdefault(\"mode\", \"add\")\n\n _object = obj[\"instance\"]\n if obj[\"mode\"] == \"merge\":\n self._merge(_object)\n elif obj[\"mode\"] == \"add\":\n self._add(_object)\n elif obj[\"mode\"] == \"merge_by_query\":\n self._merge_by_query(obj)\n else:\n raise NotImplementedError(\"Invalid mode: {mode}\".format(mode=obj[\"mode\"]))\n self._commit()\n except DatabaseError.ConnectionError:\n raise\n except Exception:\n self._rollback()\n self._reset_session()\n raise", "def createRecord(self):\n self.dto.getRecord().append(self.controller.createNewObj())\n print(\"Record added.\")", "def add_object(_object):\n print('add_object: ' + str(_object))\n try_insert_or_update(\n models.objects.insert(), # pylint: disable=no-value-for-parameter\n [dict(\n project_id=_object['project_id'], filename=_object['filename'])])", "def insert_event_to_db(self):\n try:\n events_coll.insert_one(self.event_info_to_dic())\n except Exception as e:\n print(e)", "def insert_one(self, document: dict) -> None:\n if isinstance(document, dict):\n self._store_document(document)\n else:\n raise TypeError(\"The document must be a dictionary.\")\n self._dump()" ]
[ "0.73335576", "0.728069", "0.72504306", "0.71881104", "0.7135278", "0.7135278", "0.7135278", "0.71175796", "0.70221514", "0.7010273", "0.6919808", "0.68871707", "0.68585205", "0.6805966", "0.6790478", "0.6775092", "0.674791", "0.6710327", "0.66818607", "0.66420037", "0.6641766", "0.65985656", "0.6582948", "0.65763175", "0.6558159", "0.65518737", "0.6482596", "0.64700985", "0.6459253", "0.64523864" ]
0.8090869
0
Method to get records from a collection based on key and value
async def get_by_value(self, collection: str, key: str, value): query = {key: value} db_records = self.database[collection].find(query) output_records = [] async for record in db_records: output_records.append(QuestionInDB(**record, id=record["_id"])) return output_records
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __getitem__(self, key):\n return self.query(key)", "def find(self, key=None, lo=None, hi=None, reverse=None, include=False,\n txn=None, rec=None, default=None):\n it = self.values(key, lo, hi, reverse, None, include, txn, rec)\n v = next(it, default)\n if v is default and rec and default is not None:\n v = Record(self.coll, default)\n return v", "async def get(self, collection, key):\n raise NotImplementedError", "def _filter_search_values(key: str, values: list, collection: list):\n return_data = []\n for item in collection:\n if any(val in values for val in item[key]):\n return_data.append(item)\n return return_data", "def get_by(cls, name, value, keys_only=None):\n return cls.query(getattr(cls, name) == value).get(keys_only=keys_only)", "def find(cls, key):\r\n return cls.query().get(key)", "def find(self, key, condition) -> list:\n pass", "def task_3_find_item_via_value(data: DT, value) -> DT:\n return [dic for dic in data if value in dic.values()]", "def get_for_key(self, key) -> list:\n return [res[key] for res in self.list]", "def __getitem__(self, key):\n with SessionContext(self.SessionClass) as session:\n q = session.query(PAW2_DBObject)\n return q.filter(PAW2_DBObject.key == key).one()", "async def get_entry(self, key, *, convert=True, as_dict=False):\r\n\r\n query = \"SELECT * FROM {table_name} WHERE {primary_key} = ?\"\r\n cur = await self.data.db.execute(query.format(table_name=self.name, primary_key=self.primary_key.name), [key])\r\n data = await cur.fetchone()\r\n print(data)\r\n if not data:\r\n return []\r\n if convert and as_dict:\r\n raise ArgumentError(\"Incorrect arguments passed. only one can be True between arguments (convert, as_dict)\")\r\n converted = self.compile_as_list([data])\r\n if as_dict:\r\n return data\r\n obj = Record(**converted[0])\r\n return obj", "def get_value(self, key, getter=lambda x: x):\n record_trie = self._record_trie\n assert key in record_trie\n value = record_trie[key]\n # Record trie allows keys to have multiple values and returns a list of values for each key.\n # As we make the value for each key a list already (to control order/not have to sort again),\n # we need to assert there is only a single value\n assert len(value) == 1\n value = value[0]\n return_value = inverse_qid_cand_with_score(value=value, itos=self._itos)\n res = list(map(getter, return_value))\n assert len(res) <= self._max_value\n return res", "def find(self, args=None, lo=None, hi=None, reverse=None, include=False,\n txn=None, rec=None, default=None):\n it = self.values(args, lo, hi, reverse, None, include, txn, rec)\n v = next(it, default)\n if v is default and rec and default is not None:\n v = Record(self.coll, default)\n return v", "def get(self, key: Any, **kwargs) -> Iterable:\n return self.store.get(key, **kwargs)", "def getcolldetails(collectionobj, getattr, getval):\n return collectionobj.find({getattr: getval})", "def where(self, key, value):\n comparison = key + \" = \" + sanitize_value(value)\n results = self.__run(\n pagination_template.substitute(\n tablename=self.tablename,\n comparison=comparison,\n sortKey=key,\n asc=\"ASC\",\n limit=1\n ),\n )\n return results", "def get_document_by_key(db, col, key, raw_results=False, scrub_results=False):\n\n aql = 'FOR d in ' + col.name + ' FILTER d._key == @key RETURN d'\n params = {\n 'key': key\n }\n\n result = db.AQLQuery(aql, bindVars=params, rawResults=raw_results)\n\n if len(result) > 0:\n if scrub_results:\n return scrub_db_specific_data(result[0])\n else:\n return result[0]\n return None", "def get_items(self, value, key=None):\n if key is None:\n return self.dicts(value)\n else:\n items = self.dicts(value)\n return [item[key] for item in items]", "def lookup(self, key):", "def find_records(self, check, keys=None):\n matches = self._match(check)\n if keys:\n return [self._extract_subdict(rec, keys) for rec in matches]\n else:\n return matches", "def find_distinct(self, collection, key):\n obj = getattr(self.db, collection)\n result = obj.distinct(key)\n return result", "def get_many(collection, query: dict, selection=None) -> List[dict]:\n data = []\n if selection is None:\n for item in collection.find(query):\n data.append(item)\n else:\n for item in collection.find(query, selection):\n data.append(item)\n return data", "def __getitem__(self, key):\n for db in self.db:\n if db.name == key:\n return db\n raise IndexError", "def getData(self, value=None, key=\"description\"):\n if value:\n result = None\n for item in self.data:\n target = None\n current = item[key] # it could be either string or a list of string\n if type(value) == list:\n if type(current) == list:\n found = False\n for valueItem in value:\n if valueItem in current:\n found = True\n else:\n found = False\n if found:\n target = item\n else:\n if current in value:\n target = item\n else:\n if type(current) == list:\n if value in current:\n target = item\n else:\n if value == current:\n target = item\n if target:\n if not result:\n result = []\n result.append(target)\n return result\n \n else:\n return self.data", "def keys(self, key=None, lo=None, hi=None, reverse=None, max=None,\n include=False, txn=None, rec=None):\n return itertools.imap(ITEMGETTER_0,\n self.items(key, lo, hi, reverse, max, include, txn, rec))", "def getSpecific(self, keyword, key):", "def get(list_of_dict, key, value):\n return filter(lambda dictionary: dictionary[key] == value, list_of_dict)", "def get(self, **args ):\n # Make sure its a valid argument\n for key in args.keys():\n if not key in self.schema:\n raise BadArgument(\"Key %s not a valid argument\" % key )\n\n query = STD.select('*')\n query = query.where( args )\n item = query.list()\n\n # If a list return make sure there is only one item\n if isinstance(item, collections.Iterable):\n if len(item) > 1:\n raise NotUnique(\"More than one items found\")\n if len(item) == 0:\n print \"No items found\"\n return None\n else:\n item = item[0]\n return item", "def get_doc_by_keyword(self,collection,field_name,search_key,like=True):\n if like:\n # This finds the records in which the field just \"contains\" the search_key\n res = self.client['rephie'][collection].find(({field_name : {'$regex' : \".*\"+search_key+\".*\"}}))\n else:\n # This finds the records in which the field is equal to the search_key\n res = self.client['rephie'][collection].find({field_name : search_key})\n\n return self._make_result_list(res)", "async def get_entries(self, *args,convert = True, listed=False, as_dict=False):\r\n consts = args\r\n condition = condition = \" AND \".join(consts)\r\n if not consts:\r\n query = \"SELECT * FROM {table_name}\"\r\n else:\r\n query = \"SELECT * FROM {table_name} WHERE {condition}\"\r\n query = query.format(condition = condition, table_name=self.name)\r\n cur = await self.data.db.execute(query)\r\n data = await cur.fetchall()\r\n await cur.close()\r\n if not data:\r\n return []\r\n if (convert and listed) or (convert and as_dict):\r\n raise ArgumentError(\"Incorrect arguments passed. only one can be True between arguments (convert, listed, as_dict)\")\r\n #Data contains all the info retrieved. Compile into dicts and also get the primary key data\r\n if listed:\r\n data = self.compile_as_list(data)\r\n return data\r\n if as_dict:\r\n data = self.compile_as_dict(data)\r\n return data\r\n data = self.compile_as_obj(data)\r\n return Records(data)" ]
[ "0.6563864", "0.63076335", "0.6280453", "0.6171545", "0.613677", "0.6104942", "0.6096535", "0.6076541", "0.60433006", "0.59330887", "0.5929107", "0.5899685", "0.5890298", "0.5871584", "0.58656615", "0.5824654", "0.58050776", "0.5792954", "0.57815087", "0.57759804", "0.57705414", "0.5765565", "0.5765471", "0.5750592", "0.5744008", "0.57323956", "0.57146513", "0.5709383", "0.569339", "0.5681772" ]
0.75334543
0
r""" Helper function for performing convolution after upsampling.
def _upsample_conv(self, x, conv): return conv( F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=False))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def upsample_conv_2d(x, w, k=None, factor=2, gain=1, data_format='NCHW', impl='cuda', gpu=True):\r\n\r\n assert isinstance(factor, int) and factor >= 1\r\n\r\n # Check weight shape.\r\n w = tf.convert_to_tensor(w)\r\n assert w.shape.rank == 4\r\n convH = w.shape[0]\r\n convW = w.shape[1]\r\n inC = Oncuda._shape(w, 2)\r\n outC = Oncuda._shape(w, 3)\r\n assert convW == convH\r\n\r\n # Setup filter kernel.\r\n if k is None:\r\n k = [1] * factor\r\n k = Oncuda._setup_kernel(k) * (gain * (factor ** 2))\r\n p = (k.shape[0] - factor) - (convW - 1)\r\n\r\n # Determine data dimensions.\r\n if data_format == 'NCHW':\r\n stride = [1, 1, factor, factor]\r\n output_shape = [Oncuda._shape(x, 0), outC, (Oncuda._shape(x, 2) - 1) * factor + convH, (Oncuda._shape(x, 3) - 1) * factor + convW]\r\n num_groups = Oncuda._shape(x, 1) // inC\r\n else:\r\n stride = [1, factor, factor, 1]\r\n output_shape = [Oncuda._shape(x, 0), (Oncuda._shape(x, 1) - 1) * factor + convH, (Oncuda._shape(x, 2) - 1) * factor + convW, outC]\r\n num_groups = Oncuda._shape(x, 3) // inC\r\n\r\n # Transpose weights.\r\n w = tf.reshape(w, [convH, convW, inC, num_groups, -1])\r\n w = tf.transpose(w[::-1, ::-1], [0, 1, 4, 3, 2])\r\n w = tf.reshape(w, [convH, convW, -1, num_groups * inC])\r\n\r\n # Execute.\r\n x = tf.nn.conv2d_transpose(x, w, output_shape=output_shape, strides=stride, padding='VALID', data_format=data_format)\r\n return Oncuda._simple_upfirdn_2d(x, k, pad0=(p+1)//2+factor-1, pad1=p//2+1, data_format=data_format, impl=impl, gpu=gpu)", "def upsample_conv_2d(x, w, k=None, factor=2, gain=1, data_format='NHWC'):\n\n assert isinstance(factor, int) and factor >= 1\n\n # Check weight shape.\n assert len(w.shape) == 4\n convH = w.shape[0]\n convW = w.shape[1]\n inC = w.shape[2]\n outC = w.shape[3]\n assert convW == convH\n\n # Setup filter kernel.\n if k is None:\n k = [1] * factor\n k = _setup_kernel(k) * (gain * (factor**2))\n p = (k.shape[0] - factor) - (convW - 1)\n\n stride = [factor, factor]\n # Determine data dimensions.\n if data_format == 'NCHW':\n num_groups = _shape(x, 1) // inC\n else:\n num_groups = _shape(x, 3) // inC\n\n # Transpose weights.\n w = jnp.reshape(w, [convH, convW, inC, num_groups, -1])\n w = jnp.transpose(w[::-1, ::-1], [0, 1, 4, 3, 2])\n w = jnp.reshape(w, [convH, convW, -1, num_groups * inC])\n\n ## Original TF code.\n # x = tf.nn.conv2d_transpose(\n # x,\n # w,\n # output_shape=output_shape,\n # strides=stride,\n # padding='VALID',\n # data_format=data_format)\n ## JAX equivalent\n x = jax.lax.conv_transpose(\n x,\n w,\n strides=stride,\n padding='VALID',\n transpose_kernel=True,\n dimension_numbers=(data_format, 'HWIO', data_format))\n\n return _simple_upfirdn_2d(\n x,\n k,\n pad0=(p + 1) // 2 + factor - 1,\n pad1=p // 2 + 1,\n data_format=data_format)", "def upfirdn(s, h, p, q):\n return downsample(fftconvolve(h, upsample(s, p)), q)", "def up_sampling_block(A, B, up, channel, kernel_width, stride, initializer, hidden_activation):\n A = Concatenate()([A, B])\n A = UpSampling2D(up)(A)\n B = UpSampling2D(up)(B)\n A = Conv2D(channel, kernel_width, stride,\n padding=\"same\", kernel_initializer=initializer)(A)\n A = LayerNormalization()(A)\n A = hidden_activation(A)\n return A, B", "def upsample(img):\n\n filtered = sp.signal.convolve2d(img, guassianFilter, 'same')\n i, j = img.shape\n upsampled = np.zeros((i*2, j*2))\n for r in range(i):\n upsampled[2 * r, ::2] = img[r, ::]\n for c in range(j):\n upsampled[::2, 2 * c] = img[::, c]\n\n # Need to raise values of upsampled image by 4 (1px in original -> 4px in upsampled)\n return 4 * sp.signal.convolve2d(upsampled, guassianFilter, 'same')", "def _crop_concat(self, upsampled, bypass):\n c = (bypass.size()[2] - upsampled.size()[2]) // 2\n bypass = F.pad(bypass, (-c, -c, -c, -c))\n\n return torch.cat((upsampled, bypass), 1)", "def clConvolution(self, size, mask):", "def crop_and_concat(self, upsampled, bypass, crop=False):\n logging.debug(\"Before - Upsampled: {}\".format(upsampled.size()))\n logging.debug(\"Before - bypass: {}\".format(bypass.size()))\n if crop:\n c1 = (bypass.size()[2] - upsampled.size()[2]) // 2\n c2 = (bypass.size()[3] - upsampled.size()[3]) // 2\n bypass = F.pad(bypass, (-c2, -c2, -c1, -c1))\n logging.debug(\"Upsampled: {}\".format(upsampled.size()))\n logging.debug(\"bypass: {}\".format(bypass.size()))\n return torch.cat((upsampled, bypass), 1)", "def convolveAndDownsample(img):\n # Select every other pixel from G\n G = sp.signal.convolve2d(img, guassianFilter, 'same')\n return G[::2, ::2]", "def upsampleImage( arr, kernelSize ):\n return scipy.ndimage.zoom( arr, kernelSize )", "def upsample(x, filters):\n x = tf.keras.layers.Conv2DTranspose(\n filters, kernel_size=3, strides=2, padding='same', use_bias=True)(\n x)\n x = tf.keras.layers.LeakyReLU(alpha=0.2)(x)\n return x", "def downsampling(inp_img):\n\n\n img = np.array(inp_img)\n f = max(1, np.rint(np.amin(img)/256))\n\n if f > 1:\n lpf = np.ones((f, f))\n f = (1/(f*f))*lpf\n img = cv2.filter2D(img, -1, kernel=f)\n out = np.hstack((img[:, :, 0], img[:, :, 1], img[:, :, 2]))\n\n return out", "def askapsoft_decimate_n_extract(af, over_sampling, kernel_support):\n\n # why is this normalization required..?\n rescale = over_sampling*over_sampling\n #rescale = 1\n\n cSize = 2 * kernel_support + 1\n itsConvFunc=np.zeros((over_sampling, over_sampling, cSize, cSize), dtype=complex)\n\n for fracu in range(0,over_sampling):\n for fracv in range(0,over_sampling):\n\n # Now cut out the inner part of the convolution function and\n # insert it into the convolution function\n for iy in range(-kernel_support,kernel_support+1):\n for ix in range(-kernel_support,kernel_support+1):\n\n nx = af.shape[0]\n ny = af.shape[1]\n\n # assumes support is the same for all w-planes:\n xval = (ix) * over_sampling + fracu + nx / 2\n yval = (iy) * over_sampling + fracv + ny / 2\n\n itsConvFunc[fracu, fracv, ix+cSize/2, iy+cSize/2] \\\n = rescale * af[xval, yval]\n\n return itsConvFunc[::-1,::-1]", "def conv_downsample_2d(x, w, k=None, factor=2, gain=1, data_format='NCHW', impl='cuda', gpu=True):\r\n\r\n assert isinstance(factor, int) and factor >= 1\r\n w = tf.convert_to_tensor(w)\r\n convH, convW, _inC, _outC = w.shape.as_list()\r\n assert convW == convH\r\n if k is None:\r\n k = [1] * factor\r\n k = Oncuda._setup_kernel(k) * gain\r\n p = (k.shape[0] - factor) + (convW - 1)\r\n if data_format == 'NCHW':\r\n s = [1, 1, factor, factor]\r\n else:\r\n s = [1, factor, factor, 1]\r\n x = Oncuda._simple_upfirdn_2d(x, k, pad0=(p+1)//2, pad1=p//2, data_format=data_format, impl=impl, gpu=gpu)\r\n return tf.nn.conv2d(x, w, strides=s, padding='VALID', data_format=data_format)", "def conv2d_forward(x, w, b, pad, stride):\n #raise NotImplementedError\n \n\n \n #######################################################################\n # #\n # #\n # TODO: YOUR CODE HERE #\n # #\n # #\n #######################################################################\n ba,h,wd,c=x.shape\n f,fh,fw,c=w.shape\n n_h=((h-fh+2*pad)//stride)+1\n n_w=((wd-fw+2*pad)//stride)+1\n x_paded=np.pad(x,pad,'constant')\n temp_dim=x_paded.shape[3]\n #print(temp_dim)\n out=np.zeros((ba,n_h,n_w,f))\n for m in range(0,ba):\n for i in range(0,n_h):\n for j in range(0,n_w):\n for n in range(0,f):\n h_t=i*stride\n h_t2=i*stride+fh\n w_t=j*stride\n w_t2=j*stride+fw\n temp=x_paded[pad+m,h_t:h_t2,w_t:w_t2,pad:temp_dim-pad] \n out[m,i,j,n]=np.sum(temp*w[n,:,:,:])+b[n]\n \n return out", "def get_upsampling_weight(in_channels, out_channels, kernel_size):\n factor = (kernel_size + 1) // 2\n if kernel_size % 2 == 1:\n center = factor - 1\n else:\n center = factor - 0.5\n og = np.ogrid[:kernel_size, :kernel_size]\n filt = (1 - abs(og[0] - center) / factor) * \\\n (1 - abs(og[1] - center) / factor)\n weight = np.zeros((in_channels, out_channels, kernel_size, kernel_size),\n dtype=np.float64)\n weight[range(in_channels), range(out_channels), :, :] = filt\n return torch.from_numpy(weight).float()", "def get_upsampling_weight(in_channels, out_channels, kernel_size):\n factor = (kernel_size + 1) // 2\n if kernel_size % 2 == 1:\n center = factor - 1\n else:\n center = factor - 0.5\n og = np.ogrid[:kernel_size, :kernel_size]\n filt = (1 - abs(og[0] - center) / factor) * \\\n (1 - abs(og[1] - center) / factor)\n weight = np.zeros((in_channels, out_channels, kernel_size, kernel_size),\n dtype=np.float64)\n weight[range(in_channels), range(out_channels), :, :] = filt\n return torch.from_numpy(weight).float()", "def get_upsampling_weight(in_channels, out_channels, kernel_size):\n factor = (kernel_size + 1) // 2\n if kernel_size % 2 == 1:\n center = factor - 1\n else:\n center = factor - 0.5\n og = np.ogrid[:kernel_size, :kernel_size]\n filt = (1 - abs(og[0] - center) / factor) * \\\n (1 - abs(og[1] - center) / factor)\n weight = np.zeros((in_channels, out_channels, kernel_size, kernel_size),\n dtype=np.float64)\n weight[range(in_channels), range(out_channels), :, :] = filt\n return torch.from_numpy(weight).float()", "def upsampling_nhwc(data, scale):\n\n batch, height, width, channel = data.shape\n out_height = util.simplify(height * scale)\n out_width = util.simplify(width * scale)\n\n return tvm.compute((batch, out_height, out_width, channel), \\\n lambda n, h, w, c: data[n, h/scale, w/scale, c])", "def upsample_model():\n\n inputs = tf.keras.Input(shape=(16, 16, 3,))\n x = tf.keras.layers.Conv2D(8, (2, 2))(inputs)\n x = tf.keras.layers.BatchNormalization(momentum=.3, epsilon=.65)(x)\n x = tf.nn.relu(x)\n x = tf.keras.layers.MaxPool2D()(x)\n residual = x\n\n x = tf.keras.layers.Conv2D(8, (1, 1))(x)\n x = tf.keras.layers.Conv2D(8, (1, 1))(x)\n x = tf.keras.layers.BatchNormalization(momentum=.4, epsilon=.25)(x)\n x = tf.add(x, residual)\n x = tf.nn.relu(x)\n\n x = tf.keras.layers.Conv2D(4, (1, 1))(x)\n x = tf.keras.layers.AvgPool2D()(x)\n x = tf.keras.layers.Flatten()(x)\n outputs = tf.keras.layers.Dense(2, activation=tf.nn.softmax, name=\"upsample_model\")(x)\n return outputs", "def TransitionUp(skip_connection, block_to_upsample, n_filters_keep):\n\n # Upsample\n l = ConcatLayer(block_to_upsample)\n l = Deconv2DLayer(l, n_filters_keep, filter_size=3, stride=2,\n crop='valid', W=HeUniform(gain='relu'), nonlinearity=linear)\n # Concatenate with skip connection\n l = ConcatLayer([l, skip_connection], cropping=[None, None, 'center', 'center'])\n\n return l\n # Note : we also tried Subpixel Deconvolution without seeing any improvements.\n # We can reduce the number of parameters reducing n_filters_keep in the Deconvolution", "def _shortcut(self, x):\n if self.learnable_sc:\n x = self._upsample_conv(\n x, self.c_sc) if self.upsample else self.c_sc(x)\n return x\n else:\n return x", "def get_upsampling_weight(in_channels, out_channels, kernel_size):\n assert in_channels == out_channels, ValueError('in_channels must equal out_channels for bilinear initialization')\n factor = (kernel_size + 1) // 2\n if kernel_size % 2 == 1:\n center = factor - 1\n else:\n center = factor - 0.5\n og = np.ogrid[:kernel_size, :kernel_size]\n filt = (1 - abs(og[0] - center) / factor) * \\\n (1 - abs(og[1] - center) / factor)\n if in_channels == out_channels:\n weight = np.zeros((in_channels, out_channels, kernel_size, kernel_size),\n dtype=np.float64)\n weight[range(in_channels), range(out_channels), :, :] = filt\n else:\n weight = \\\n (filt[np.newaxis, np.newaxis, :, :]).repeat(in_channels, axis=0).repeat(out_channels, axis=1).astype(\n np.float64)\n return torch.from_numpy(weight).float()", "def compute_conv(in_size, kernel, stride, padding):\n return (in_size + 2 * padding - kernel) // stride + 1", "def upsample_filt(size):\r\n factor = (size + 1) // 2\r\n if size % 2 == 1:\r\n center = factor - 1\r\n else:\r\n center = factor - 0.5\r\n og = np.ogrid[:size, :size]\r\n return (1 - abs(og[0] - center) / factor) * \\\r\n (1 - abs(og[1] - center) / factor)", "def upsampling_nchw(data, scale):\n batch, channel, height, width = data.shape\n out_height = util.simplify(height * scale)\n out_width = util.simplify(width * scale)\n\n return tvm.compute((batch, channel, out_height, out_width), \\\n lambda n, c, h, w: data[n, c, h/scale, w/scale])", "def l_up(self, filters, l_in, l_c):\n l = tf.keras.layers.UpSampling2D(self.pooling_size)(l_in)\n l = tf.keras.layers.Conv2D(filters, 2, activation='relu', padding='same', kernel_initializer='he_normal')(l)\n l = tf.keras.layers.concatenate([l_c, l], axis=3)\n l = tf.keras.layers.Conv2D(filters, 3, activation='relu', padding='same', kernel_initializer='he_normal')(l)\n l = tf.keras.layers.Conv2D(filters, 3, activation='relu', padding='same', kernel_initializer='he_normal')(l)\n\n return l", "def up_conv_2d(input_tensor, nb_filters, name):\n resize = UpSampling2D(size=(2, 2), interpolation='nearest')(input_tensor)\n paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]])\n resize = tf.pad(resize, paddings, \"SYMMETRIC\")\n output_layer = Conv2D(\n filters=nb_filters,\n kernel_size=(3, 3),\n activation='relu',\n name=name)(\n resize)\n\n return output_layer", "def _downsample(x):\n return nn.AvgPool2d(kernel_size=2)(x)", "def conv_downsample_2d(x, w, k=None, factor=2, gain=1, data_format='NHWC'):\n\n assert isinstance(factor, int) and factor >= 1\n convH, convW, _inC, _outC = w.shape\n assert convW == convH\n if k is None:\n k = [1] * factor\n k = _setup_kernel(k) * gain\n p = (k.shape[0] - factor) + (convW - 1)\n s = [factor, factor]\n x = _simple_upfirdn_2d(x, k, pad0=(p + 1) // 2,\n pad1=p // 2, data_format=data_format)\n\n return jax.lax.conv_general_dilated(\n x,\n w,\n window_strides=s,\n padding='VALID',\n dimension_numbers=(data_format, 'HWIO', data_format))" ]
[ "0.67663765", "0.65779626", "0.6553816", "0.6398234", "0.6392767", "0.6387929", "0.63407", "0.6329719", "0.6299886", "0.6282646", "0.62362", "0.6212653", "0.6173056", "0.6152558", "0.61221737", "0.6120341", "0.6120341", "0.6120341", "0.60860676", "0.60844463", "0.6062132", "0.6055026", "0.6021964", "0.6021748", "0.60160285", "0.59849113", "0.59834754", "0.59700346", "0.59587425", "0.59340775" ]
0.69547844
0
This funtion is to add the multiple link attributes to graph G
def add_multi_link_attributes(self, attr1,attr2, attr3): i = 0 for (u, v) in self.G.edges(): if self.checkKey(attr1, (u,v)) and self.checkKey(attr2, (u,v)) and self.checkKey(attr3, (u,v)): self.G.add_edge(u,v,w=attr1[(u,v)],c1=attr2[(u,v)], c2= attr3[(u,v)]) elif self.checkKey(attr1, (v,u)) and self.checkKey(attr2, (v,u)) and self.checkKey(attr3, (v,u)): self.G.add_edge(u,v,w=attr1[(v,u)],c1=attr2[(v,u)], c2= attr3[(v,u)]) else: if not self.checkKey(attr1, (u,v)) and not self.checkKey(attr1, (v,u)): raise Exception("Weight edge list has missing value for ", u, v) if not self.checkKey(attr2, (u,v)) and not self.checkKey(attr2, (v,u)): raise Exception("Concave edge list has missing value for ", u, v) if not self.checkKey(attr3, (u,v)) and not self.checkKey(attr3, (v,u)): raise Exception("CPU edge has list missing value for ", u, v) i = i+1 return self.G
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_link_attr(d: Dict):\n d.update({\"link\": urljoin(\"https://vdb-kasf1i23nr1kl2j4.rapid7.com/v1/content/\", d.get(\"identifier\"))})", "def _add_links_from_mergers(self):\n for i, node_name in enumerate(self.node_list):\n self.builder.addDirectedLink(node_name, self, islot=i)", "def add_edge_attributes(self, attribute_key, attribute_value):\n G_prime = self.__deepcopy__() # create a deepcopy of the undirected graph\n for edge in G_prime.get_edges(): # for every edge in the graph\n # add the attribute key-value pair to the attributes registry of the edge\n edge.add_attribute(attribute_key, attribute_value)\n return G_prime # return the modified graph", "def add_link(self, **kwgs):\n self.links.append(kwgs)", "def feed_link_decorator(context, feed):\n for item in feed.items:\n current_link = item['link']\n # print(current_link)\n new_link = current_link + FUD_DEFAULT['parameters']\n item['link'] = new_link\n # print(item)\n return feed", "def add_graph_attributes(G, filename):\n Ef = dict() # feature -> edges\n Nf = dict() # node -> features\n with open(filename) as f:\n for line in f: # for each node, list of features it belongs to\n d = line.split()\n u = int(d[0])\n features = d[1:]\n for f in features:\n Ef.setdefault(f, []).extend(G.in_edges(u)) # add feature-dependent edges\n #G.node[u]['Fu'] = features\n G.nodes[u]['Fu'] = features\n Nf[u] = features\n print('Read graph attributes')\n return Ef, Nf", "def add_node_attributes(self, attribute_key, attribute_value):\n G_prime = self.__deepcopy__() # create a deepcopy of the undirected graph\n for node in G_prime.get_nodeset(): # for every node in the graph\n # add the attribute key-value pair to the attributes registry of the node\n node.add_attribute(attribute_key, attribute_value)\n return G_prime # return the modified graph", "def _setattrs(self, handle=\"\",\n edge=\"\", node=\"\", subg=\"\", proto=\"\",\n **attrs):\n head, tail = '', ''\n if edge:\n head, tail = edge\n\n node, head, tail, subg = map(encode_page, [node, head, tail, subg])\n\n self.changed = 1\n\n if proto in [\"node\", \"edge\"]:\n # Gets handle when called from Subraphs.set()\n if subg:\n handle = gv.findsubg(self.handle, subg)\n # Called by self.set() and GraphvizSubgraph.set(), handle known\n item = getattr(gv, \"proto%s\" % proto)(handle)\n # print \"item = gv.proto\" + proto + \"(g)\"\n elif head and tail:\n item = gv.findedge(gv.findnode(handle, head),\n gv.findnode(handle, tail))\n # print \"item = gv.findedge(gv.findnode(g, '\" + head + \"'),\" + \\\n # \"gv.findnode(g, '\" + tail + \"'))\"\n elif node:\n item = gv.findnode(handle, node)\n # print \"item = gv.findnode(g, '\" + node + \"')\"\n elif subg:\n item = gv.findsubg(handle, subg)\n # print \"item = gv.findsubg(g, '\" + subg + \"')\"\n elif handle:\n item = handle\n else:\n raise ValueError(\"No graph element or element type specified\")\n\n for key, elem in attrs.iteritems():\n if isinstance(elem, set):\n for e in elem:\n key, e = map(encode_page, [key, e])\n gv.setv(item, key, e)\n else:\n key, elem = map(encode_page, [key, elem])\n gv.setv(item, key, elem)\n # print \"gv.setv(item, '\" + key + \"', '\" + elem + \"')\"", "def add_edges_from(self, edges_to_add, **attr):\n for e in edges_to_add:\n if len(e) == 3:\n u, v, d = e\n else:\n u, v = e\n d = {}\n u, v = sorted([e[0], e[1]])\n d = {**attr, **d}\n self.add_edge(u, v, **d)", "def merge(self, ASGgraph ):\r\n \r\n self.mergedASG.append(ASGgraph)\t\t\t\t\t# add the graph to the list of merged graphs\r\n for nodeType in ASGgraph.listNodes.keys():\r\n if not nodeType in self.listNodes.keys():\t\t\t# node type was not known\r\n self.listNodes[nodeType] = ASGgraph.listNodes[nodeType]\r\n self.nodeTypes.append(nodeType)\r\n else: \t# node type existed...\r\n for node in ASGgraph.listNodes[nodeType]:\t\t\t# add each node of merged graph to actual graph\r\n self.listNodes[nodeType].append(node)\r\n \r\n # copy also the model's attribute\r\n errors = []\r\n for attr in ASGgraph.generatedAttributes.keys():\r\n if attr in self.generatedAttributes.keys(): # Attribute is present!\r\n #print \"Attribute collision for \", attr, \"<-- New attribute value ignored\" \r\n errors.append(attr)\r\n if( not self.__collidedAttributeTracker.has_key( attr ) ):\r\n self.__collidedAttributeTracker[ attr ] = 1\r\n else:\r\n self.__collidedAttributeTracker[ attr ] += 1\r\n continue\r\n self.generatedAttributes[attr] = ASGgraph.generatedAttributes[attr]\r\n # now create the attribute!\r\n self.setAttrValue(attr, ASGgraph.getAttrValue(attr).clone())\r\n if( errors ):\r\n print 'Attribute name collisions occured during load (could affect '\\\r\n + 'old formalisms)\\nThe following attributes collided: '\\\r\n + str(errors) \r\n ## print 'In fact, these messages are slated for removal, as this ' \\\r\n ## 'attribute system is being bypassed to fix this problem'\r", "def add_links(G, df, col1, col2, relation):\n df_tmp = df[(~df[col1].isnull()) & (~df[col2].isnull()) & (~df[relation].isnull())]\n links = list(zip(df_tmp[col1], df_tmp[col2], df_tmp[relation]))\n G.add_edges_from([(src, trg, dict(type=rel)) for src, trg, rel in links])\n print(\"Edges (%s->%s,%s) were added\" % (col1, col2, relation))", "def set_edge_attributes(graph: BaseGraph, attributes: Dict) -> None:\n return set_edge_attributes(graph.graph, attributes)", "def add_links(self, *args):\n for link in args:\n self.add_link(link)", "def build_graph(link_data, links):\n graph = {}\n\n # add all data for links\n for l in links:\n #print(\"Adding \"+l)\n #print(link_data.get(l))\n graph[l] = list(link_data.get(l))\n\n # add all links that point to links\n for slink in link_data:\n for l in links:\n # the links is already in graph, skip\n if graph.has_key(slink):\n continue\n\n try:\n dest_links = list(link_data.get(slink))\n # if slink points to l\n _ = dest_links.index(l)\n # add the slink to graph\n graph[slink] = dest_links\n #print(\"Adding \"+slink)\n except Exception as e:\n pass\n\n #print(len(graph))\n #print(graph)\n\n return graph", "def add_edge (self, src, dst, link):\n raise NotImplementedError", "def _add_link_to_targets(self, link):\n for target in self._selected_data():\n target.add_component_link(link)", "def create( basic_graph, mcs_ids, rule, add_attr = True ) :\n g = copy.deepcopy( basic_graph )\n for id in mcs_ids :\n id0, id1 = mcs.get_parent_ids( id )\n simi = rule.similarity( id0, id1, mcs_id = id )\n if (simi > 0) :\n if (add_attr) :\n try :\n partial_ring = int( KBASE.ask( id, \"partial_ring\" ) )\n except LookupError :\n partial_ring = 0\n try :\n slack_simi = KBASE.ask( id, \"slack_similarity\" )\n except LookupError :\n slack_simi = 0.0\n g.add_edge( id0, id1, similarity = simi, slack_similarity = slack_simi,\n partial_ring = partial_ring, mcs_id = id )\n else :\n g.add_edge( id0, id1, similarity = simi )\n return g", "def addLink(self, name, alias, **attrs):\n self.globalConfig.pageList.append(name)\n self.globalConfig.pageAttributes[name] = dict(attrs)\n self.globalConfig.pageAttributes[name]['alias'] = alias", "def simple_linkage(x):\n \n nodes = []\n edges = []\n for i in range(len(x)):\n node_attr ={\"lvl\":x[i]}\n nodes.append((i, node_attr))\n edges.append((i,i+1,{'weight':1}))\n edges.pop()\n \n g =nx.Graph()\n g.add_nodes_from(nodes) \n g.add_edges_from(edges) \n return g", "def connect_and_write_gml(self, f):\n G = self.graph.copy()\n node_base_set = set([i[:-2] for i in list(G.nodes)])\n for node in node_base_set:\n G.add_edge(node + \"_b\", node + \"_e\")\n\n # networkx doesn't like writing non-string attributes to GML\n for u, v in G.edges:\n for key in list(G[u][v].keys()):\n G[u][v][key] = str(G[u][v][key])\n nx.readwrite.gml.write_gml(G, f)", "def draw_relation_graph(database_name, table_name, primary_key, group_name) -> Graph:\n\n nodes = []\n links = []\n disease_list = get_icd_diseasegroup_diseaseinfo(database_name, table_name, primary_key, group_name)[1]\n disease_list = disease_list.split(',')\n # print(disease_list)\n\n for disease in disease_list:\n disease_node = {\n \"name\": disease,\n \"symbolSize\": 50\n }\n\n if disease_node not in nodes:\n nodes.append(disease_node)\n\n gene_list = get_mesh_disease_info(database_name, 'mesh_gene', disease, 'DISEASE_ID')[1]\n gene_list = gene_list.split(',')\n for gene in gene_list:\n gene_node = {\n 'name': gene,\n 'symbolSize': 10\n }\n\n if gene_node not in nodes:\n nodes.append(gene_node)\n\n for gene in gene_list:\n links.append({\"source\": disease, \"target\": gene})\n\n print(nodes)\n print(links)\n\n c = (\n Graph(init_opts=opts.InitOpts(width=\"1440px\", height=\"900px\")).add(\"\", nodes, links, repulsion=3000)\n .set_global_opts(title_opts=opts.TitleOpts(title=\"gene-disease association network\"))\n )\n\n return c", "def add_nodes_from(self, nodes_for_adding, **attr):\n for n in nodes_for_adding:\n if isinstance(n, tuple):\n nn, ndict = n\n self.add_node(nn, **{**attr, **ndict})\n else:\n self.add_node(n, **attr)", "def make_link(Graph, node1, node2):\n if node1 not in Graph:\n Graph[node1] = {}\n (Graph[node1])[node2] = 1\n if node2 not in Graph:\n Graph[node2] = {}\n (Graph[node2])[node1] = 1\n return Graph", "def add_attributes(self, pore_dict, throat_dict):\n\n self.add_node_attributes(self.graph, pore_dict)\n self.add_edge_attributes(self.graph, throat_dict)\n\n self.compute_geometry()", "def add_link(self, start, end, link_type):\n\n key = str(start.id()) + \"_\" + link_type + \"_\" + str(end.id())\n\n # Add link only if it does not exist yet\n if (key in self.__links):\n return\n\n js = \"links.push({source: \" + self.__nodes[start.id()] + \", target: \" + self.__nodes[end.id()] + \"});\"\n\n d3_link_id = self.frame.evaluateJavaScript(js) - 1\n\n self.__links[key] = d3_link_id", "def add_edge(self, from_website, to_link):\n self.graph[to_link - 1, from_website - 1] = 1", "def setAddLinks(self,value):\n self.PDFreactorConfiguration.in1[\"addLinks\"] = value", "def xml_add_links(cls, data):\n xml = \"\"\n chunk = '<link rel=\"%s\" href=\"%s\" title=\"%s\" />'\n links = data.pop(config.LINKS, {})\n ordered_links = OrderedDict(sorted(links.items()))\n for rel, link in ordered_links.items():\n if rel == \"related\":\n # add data relation links back for\n # future processing of hateoas attributes\n data.update({config.LINKS: {rel: link}})\n\n elif isinstance(link, list):\n xml += \"\".join(\n chunk % (rel, escape(d[\"href\"]), escape(d[\"title\"]))\n for d in link\n )\n else:\n xml += \"\".join(chunk % (rel, escape(link[\"href\"]), link[\"title\"]))\n return xml", "def add_link(self, link):\n raise NotImplementedError", "def _add_node_attributes(self):\n ensemble_mapping = SankeyLayout._ensemble_map(\n df=self.supergraph.gf.df, nxg=self.nxg, columns=SankeyLayout._COLUMNS\n )\n for idx, key in enumerate(ensemble_mapping):\n nx.set_node_attributes(self.nxg, name=key, values=ensemble_mapping[key])\n\n dataset_mapping = {}\n for run in self.runs:\n dataset_mapping[run] = SankeyLayout._dataset_map(\n df=self.supergraph.gf.df,\n nxg=self.nxg,\n tag=run,\n columns=SankeyLayout._COLUMNS,\n )\n nx.set_node_attributes(\n self.nxg, name=self.supergraph.tag, values=dataset_mapping[run]\n )" ]
[ "0.65256554", "0.6264825", "0.62624276", "0.60532814", "0.60358834", "0.6033251", "0.6006186", "0.59122646", "0.5909699", "0.5881892", "0.58349824", "0.5819813", "0.57816803", "0.5781342", "0.57540774", "0.57408094", "0.5718112", "0.5714831", "0.57105774", "0.5698091", "0.565687", "0.563396", "0.5615806", "0.5601639", "0.5594657", "0.55901843", "0.5577682", "0.5565974", "0.55584383", "0.5552966" ]
0.7415763
0
This function is to remove edges in the rm_edge_list from G
def remove_Edge(self, rm_edge_list): self.G.remove_edges_from(rm_edge_list) self.G.edges() return self.G
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_edges(g, edgelist):\n for edge in edgelist:\n (u, v) = tuple(edge)\n g[u].remove(v)\n g[v].remove(u)", "def remove_edges(self, node: NodeKey) -> Edge:", "def remove_edge(self, edge: Edge) -> Edge:", "def delete_edges_from(self, edges: Iterable):\n for i, j in edges:\n self.delete_edge(i, j)", "def clean_edges(self):\n for from_node in self.all_nodes():\n for to_node in self.all_nodes():\n if from_node == to_node:\n continue\n dup = list(filter(lambda x: x.from_node == from_node and x.to_node == to_node, self.edges))\n if len(dup) > 1:\n for d in dup[1:]:\n self.edges.remove(d)", "def clean_edges(self):", "def clear_edges(infr):\n for graph in infr.review_graphs.values():\n graph.remove_edges_from(list(graph.edges()))\n infr.graph.remove_edges_from(list(infr.graph.edges()))\n infr.pos_redun_nids.clear()\n infr.neg_redun_metagraph.clear()\n infr.nid_to_errors.clear()", "def remove_edge(self):\r\n (face, cycle) = self.find_girth()\r\n \r\n k = len(face)\r\n assert k >= 2, \"length of face less than 2\"\r\n\r\n e_id = face[0]\r\n self.graph.remove_edge(e_id)\r\n a = cycle[0]\r\n b = cycle[1]\r\n e1 = self.graph.smooth_vertex(a)\r\n #(x1, y1) = self.graph.get_edge(e1).get_endpoints()\r\n #removed_1st = (a, x1, y1)\r\n e2 = self.graph.smooth_vertex(b)\r\n #(x2, y2) = self.graph.get_edge(e2).get_endpoints()\r\n #removed_2nd = (b, x2, y2)\r\n #\r\n # e1 = x --- a --- x\r\n # |e_id\r\n # e2 = x --- b --- x\r\n #\r\n # ( (v_id1,(x1,y1)), (v_id2,(x1,y1)) )\r\n #self.edge_removed_info.append((removed_1st, removed_2nd))\r\n self.state = \"initial\"\r\n self.removed.append((e1, e2, cycle))\r\n #print \"removed: \", (e1, e2, cycle)\r", "def _delete_edges(self, to_be_deleted_set, adj_dict):\n for pair in to_be_deleted_set:\n first_node = pair[0]\n second_node = pair[1]\n adj_dict.pop((first_node, second_node), None)", "def cutoff_graph( g, simi_cutoff ) :\n g = copy.deepcopy( g )\n edges_to_be_deleted = []\n for e in g.edges() :\n if (g[e[0]][e[1]][\"similarity\"] < simi_cutoff) :\n edges_to_be_deleted.append( e )\n g.remove_edges_from( edges_to_be_deleted )\n return g", "def trim_edges(self, keep=0.5):\n\n for tid1, tid2 in self.graph.edges():\n if random.random() > keep:\n self.graph.remove_edge(tid1, tid2)", "def remove_inconsistent_edges(graph: BELGraph) -> None:\n for u, v in get_inconsistent_edges(graph):\n edges = [(u, v, k) for k in graph[u][v]]\n graph.remove_edges_from(edges)", "def remove_edge(self, u, v):\n \n try:\n del self.prefix[v][u]\n del self.suffix[u][v]\n except:\n print(\"ERROR: The edges not in graph\")", "def remove_edge(self, e):\n assert len(self.e2k) == self.VEK[1]\n assert len(self.k2e) == self.VEK[1]\n neighbors = self.neighbors\n components = self.components\n k = self.e2k.pop(e)\n self.k2e.pop(k)\n v1, v2 = self.grid[1:, k]\n neighbors[v1].remove(v2)\n neighbors[v2].remove(v1)\n stack = [v1]\n while stack:\n v1 = stack.pop()\n components[v1] = True\n for v2 in neighbors[v1]:\n if not components[v2]:\n stack.append(v2)\n assert len(self.e2k) == self.VEK[1] - 1\n assert len(self.k2e) == self.VEK[1] - 1\n return k", "def remove_edge(G, u, v):\n h = G.copy()\n h.remove_edge(u, v)\n return h", "def repress_edge_removal(graph, active_nodes, repression_rate):\n for node in active_nodes:\n neighbors = list(graph[node].keys())\n remove_which = np.random.binomial(1, repression_rate, size=(len(neighbors)))\n for idx in range(len(neighbors)):\n if remove_which[idx]:\n graph.remove_edge(node, neighbors[idx])", "def cleanGraph(self,graph):\n i=0\n while i+1<len(graph):\n if self.getDistance(graph[i],graph[i+1])==0:\n del graph[i+1]\n else:\n i+=1\n return graph", "def eliminiateEmptyEdges(self, distance = 100):\n print \"Edge elimination started\"\n \n selected_edge_ids = []\n # let us \n \n for point in self.gps_points:\n results = self.idx.nearest(((point.getPoint().x-distance/2), \n (point.getPoint().y-distance/2),\n (point.getPoint().x+distance/2),\n (point.getPoint().y+distance/2)), objects=True)\n for result in results:\n from_node = self.node_counter__node.get(result.object.from_node.getAttributes().get(\"nodecounter\"))\n to_node = self.node_counter__node.get(result.object.to_node.getAttributes().get(\"nodecounter\"))\n edge_counter = self.G.edge[from_node][to_node].get(\"edgecounter\")\n if edge_counter not in selected_edge_ids:\n selected_edge_ids.append(edge_counter)\n print str(len(selected_edge_ids)) + \" edges found to keep.\"\n \n elimination_counter = 0\n for edge in self.G.edges():\n edgecounter = self.G.edge[edge[0]][edge[1]].get(\"edgecounter\")\n if edgecounter not in selected_edge_ids:\n edge_tuple = (self.G.edge[edge[0]][edge[1]].get(\"edge\").from_node, self.G.edge[edge[0]][edge[1]].get(\"edge\").to_node)\n self.G.remove_edge(*edge_tuple)\n elimination_counter = elimination_counter + 1\n \n print str(elimination_counter) + \" edges eliminated.\"", "def _remove_edge(self, actor, target):\n nodes = (actor, target)\n for i in (0, 1):\n try:\n self._vertices[nodes[i]]\n except KeyError:\n continue\n\n self._vertices[nodes[i]].remove_neighbor(nodes[(i + 1) % 2])\n\n if self._vertices[nodes[i]].degree == 0:\n del self._vertices[nodes[i]]", "def run_removing_edges(self):\n indices = np.where(self.X==1)\n idx=[]\n for i in range(len(indices[0])):\n idx.append((indices[0][i],indices[1][i]))\n idx = np.array(idx)\n return self.node_equivalent(idx)", "def remove_edge(self, edge):\n if self.get_edge(edge):\n del self[edge[0]][edge[1]]\n del self[edge[1]][edge[0]]", "def remove_edges(self, to_remove, return_mods=False):\n # validate the incoming removal request\n assert 'start_id' in to_remove.columns\n assert 'end_id' in to_remove.columns\n assert 'type' in to_remove.columns\n\n # Ensure edges have abbrevations\n if 'abbrev' not in to_remove.columns:\n if all(to_remove['type'].str.contains('_')):\n to_remove['abbrev'] = to_remove['type'].apply(lambda t: '_'.join(t.split('_')))\n else:\n raise ValueError('Edge Abbreviations not provieded for edge removal')\n\n # Determine the edges to alter\n altered_edges = to_remove['abbrev'].unique()\n args = []\n\n # Ensure that the network is unmodified\n # Must run .reset_edges() before running subsequent .remove_edges()\n assert self._modified_edges is None\n assert self._weighted_modified_edges is None\n\n for edge in altered_edges:\n current_edges = self.edge_df.query('abbrev == @edge')\n remove_edges = set(to_remove.query('abbrev == @edge')[['start_id', 'end_id']].apply(tuple, axis=1))\n\n filtered_edges = []\n for row in current_edges.itertuples():\n # Add to filtered edges if not in the remove edges\n if not {tuple([row.start_id, row.end_id])} & remove_edges:\n filtered_edges.append(row)\n\n # Rebuild the DataFrame\n filtered_edges = pd.DataFrame(filtered_edges)\n\n # Get arguments for building the new adjacency matrices\n args.append(self._prepare_parallel_adj_matrix_args(filtered_edges))\n\n # Get the new adjacency matrices\n res = parallel_process(array=args, function=mt.get_adj_matrix, use_kwargs=True, n_jobs=self.n_jobs,\n front_num=0)\n\n # Get the new results and reset the args for weighted calculations\n modded_edges = dict()\n args = []\n for metaedge, matrix in zip(altered_edges, res):\n modded_edges[metaedge] = matrix\n args.append(matrix)\n res = parallel_process(array=args, function=mt.calculate_degrees, n_jobs=self.n_jobs, front_num=0)\n\n # Prepare the Args for degree weighted calulations\n args = []\n for (metaedge, matrix), (out_degree, in_degree) in zip(modded_edges.items(), res):\n # Store the original and new degrees\n self._orig_in_degree[metaedge] = self.in_degree[metaedge]\n self._orig_out_degree[metaedge] = self.out_degree[metaedge]\n self.in_degree[metaedge] = in_degree\n self.out_degree[metaedge] = out_degree\n\n # Add the arguments to the function\n args.append({'matrix': matrix, 'w': self.w, 'degree_fwd': out_degree,\n 'degree_rev': in_degree})\n res = parallel_process(array=args, function=mt.weight_by_degree, use_kwargs=True, n_jobs=self.n_jobs,\n front_num=0)\n\n # Unpack the new degree-weighted results\n modded_dw_edges = dict()\n for metaedge, matrix in zip(altered_edges, res):\n modded_dw_edges[metaedge] = matrix\n\n # Cache the original values for easy recovery\n self._modified_edges = {k: v for k, v in self.adj_matrices.items() if k in modded_edges.keys()}\n self._weighted_modified_edges = {k: v for k, v in self.degree_weighted_matrices.items()\n if k in modded_edges.keys()}\n\n # and Update the edges\n self.adj_matrices = {**self.adj_matrices, **modded_edges}\n self.degree_weighted_matrices = {**self.degree_weighted_matrices, **modded_dw_edges}\n if return_mods:\n return [k for k in modded_edges.keys()]", "def _removeEdgesNodes(self, curNode):\n\n # Figure out all edges to be removed first then take them out of the temp graph\n # then remove all the nodes from the temp graph.\n # At the start the temp graph is equal to the initial graph.\n\n self.deletedEdges = set()\n self.deletedNodes = set()\n kNodes = self.kPath.nodeList\n index = 0\n tempNode = kNodes[index]\n index += 1\n while tempNode != curNode:\n edges = self.tempG.edges(tempNode)\n if len(edges) != 0:\n for edge in edges:\n self.deletedEdges.add(edge)\n self.tempG.remove_edge(edge[0], edge[1])\n\n #\n\n self.deletedNodes.add(tempNode)\n self.tempG.remove_node(tempNode)\n tempNode = kNodes[index]\n index += 1\n\n # Also need to remove those old deleted edges that start on curNode\n\n oldDelEdges = self.kPath.deletedEdges\n if self.g.is_directed():\n outEdges = self.g.out_edges(curNode)\n else:\n outEdges = self.g.edges(curNode)\n\n # outEdges = self.g.edges(curNode)\n\n for e in outEdges:\n if e in oldDelEdges:\n self.deletedEdges.add(e)\n self.tempG.remove_edge(e[0], e[1])\n\n # Now delete the edge from the curNode to the next in the path\n\n tempNode = kNodes[index]\n e = (curNode, tempNode)\n self.deletedEdges.add(e)\n self.tempG.remove_edge(curNode, tempNode)", "def remove_stems(graph = None):\n\tfor x,y in basepairs(graph = graph):\n\t\tgraph.remove_node(x)\n\t\tgraph.remove_node(y)", "def test_remove_self_loops_1(self):\n G = [[0, [1, 2, 3]], [1, [0, 2]], [2, [0, 1, 3]], [3, [0, 2]]]\n edges = kargermincut.get_edges(G)\n edges = kargermincut.remove_self_loops(edges)\n self.assertEqual(edges, [[0, 1], [0, 2], [0, 3], [1, 2], [2, 3]])", "def _pair_based_graph_cut(self, graph):\n for node in self._find_paired_nodes(graph):\n graph.remove_node(node)\n return", "def removeEdge(self, edge: Edge):\n if edge in self.edges:\n self.edges.remove(edge)\n else:\n print('!W', 'Scene:removeEdge', 'wanna remove edge', edge, 'from self.edges but it is not in the list!')", "def remove_edges(self, ids, properties, **kwargs):\r\n\t\traise NotImplementedError", "def test_graph_deletes_nodes(graph_with_edges):\n graph_with_edges.del_nodes('B')\n listy = ['A', 'C', 'D', 'E', 'F']\n for node in listy:\n assert node in graph_with_edges.nodes()\n assert 'B' not in graph_with_edges.nodes()", "def disconnect_nodes(self):\n for src_id, trg_id in itertools.product(self.selected_nodes, repeat=2):\n if src_id != trg_id:\n # `discard` ignores non-existing elements (unlike `remove`)\n app.edges[src_id].discard(trg_id)\n self.mark_as_unsaved()\n self.update()" ]
[ "0.7794919", "0.73144794", "0.7183438", "0.71494997", "0.70152396", "0.69617105", "0.68124664", "0.6739959", "0.67176265", "0.6659268", "0.6655754", "0.6595909", "0.6566617", "0.65123194", "0.6509404", "0.6497717", "0.64858997", "0.6465598", "0.6465135", "0.64296776", "0.64265037", "0.6411749", "0.6405715", "0.6351005", "0.62640435", "0.6251796", "0.6249887", "0.6244018", "0.6236109", "0.62081826" ]
0.7745967
1
This function is to find the path cost based on the additive costs
def additive_path_cost(self, path, attr): return sum([self.G[path[i]][path[i+1]][attr] for i in range(len(path)-1)])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def path_cost(self, c, state1, action, state2):\n # This should probably just be 1 every state....\n return c + 1", "def path_cost(self, c, state1, action, state2):\n # This should probably just be 1 every state....\n return c + 1", "def path_cost(self, c, state1, move, state2):\n # THIS WAS TAKEN DIRECTLY FROM THE AIMA code provided by the textbook\n current_cost = c\n\n return current_cost + 1", "def path_cost(self, c, state1, action, state2):\n\t\treturn c + 1", "def path_cost(self, c, state1, action, state2):\n\n\t\treturn c + self.action_cost(action)", "def path_cost(self, c, state1, action, state2):\n return c + 1", "def path_cost(self, c, state1, action, state2):\n return c + 1", "def path_cost(self, c, state1, action, state2):\n return c + 1", "def calculate_path_cost_with_weighted_sum(self, path, attr1, attr2): \n costs = [] \n for i in range(len(path) - 1):\n a = (1- self.G[path[i]][path[i+1]][attr2]) / (2 - self.G[path[i]][path[i+1]][attr1] - self.G[path[i]][path[i+1]][attr2]) \n b = (1- self.G[path[i]][path[i+1]][attr1]) / (2 - self.G[path[i]][path[i+1]][attr1] - self.G[path[i]][path[i+1]][attr2]) \n costs.append(a * self.G[path[i]][path[i+1]][attr1] + b * self.G[path[i]][path[i+1]][attr2]) \n return max(costs)", "def path_cost(path):\n # path = [state, (action, total_cost), state, ... ]\n if len(path) < 2:\n return 0\n else:\n return path[-2][-1]", "def path_cost(path):\r\n # path = (state, (action, total_cost), state, ... )\r\n if len(path) < 3:\r\n return 0\r\n else:\r\n action, total_cost = path[-2]\r\n return total_cost", "def cost(self):\n node, path_back = self, []\n cost = 0\n while node:\n path_back.append(node)\n if node.action is not None:\n cost = cost + node.action.cost\n node = node.parent\n # remove one due to root empty node \n #cost = cost-1\n return [cost, list(reversed(path_back))]", "def path_cost(self, c, state1, action, state2):\n # print(c)\n accao, direcao = action.split()\n if accao == WALK:\n return c + 1\n elif accao == PUSH:\n return c + 1", "def calculate_path_cost_with_concave_function(self, path, attr1, attr2): \n c1 = max([self.G[path[i]][path[i+1]][attr1] for i in range(len(path)-1)])\n c2 = max([self.G[path[i]][path[i+1]][attr2] for i in range(len(path)-1)]) \n return max([c1,c2])", "def GetPathCost(self, bucketOfActions):", "def calculate_costs(self):\n cost_matrix = self.make_cost_matrix()\n \n if self.greedy:\n # Riesen et al., \"Greedy Graph Edit Distance\"\n costs = []\n psi = []\n \n for row in range(self.N):\n phi = self.M\n row_min = sys.maxint\n for column in range(self.N+self.M):\n if column not in psi:\n if cost_matrix[row, column] < row_min:\n row_min = cost_matrix[row, column]\n phi = column\n \n costs.append(row_min)\n if phi < self.M:\n psi.append(phi)\n \n for row in range(self.N, self.N+self.M):\n if (row - self.N) not in psi:\n costs.append(cost_matrix[row, row - self.N])\n else:\n # Riesen & Bunke, \"Approximate graph edit distance computation by means of bipartite graph matching\"\n row_ind, col_ind = optimize.linear_sum_assignment(cost_matrix)\n \n if self.verbose:\n for row, column in (row_ind, col_ind):\n value = cost_matrix[row, column]\n print '%d, %d, %.4f' % (row, column, value)\n \n return row_ind, col_ind, cost_matrix[row_ind, col_ind]", "def route_cost(self, route):\n total_weight = 0\n c = 0\n start = route[0]\n for end in route[1:]:\n y = float(self.stars[start][end]['weight']) - c\n t = total_weight + y\n c = (t - total_weight) - y\n\n total_weight = t\n\n start = end\n return total_weight", "def _calculate_costs(self):\n cost = 0\n cost += self._cost_route_fine()\n cost += self._cost_petrol()\n cost += self._cost_wage()\n cost += self._cost_refueling()\n cost += self._cost_caught_by_police()\n cost += self._cost_vehicle_malfunction()\n return cost", "def CostOfPath( self, P, D ):\n cost = 0\n for tup in P:\n cost += D[tup[0]][tup[1]]\n return cost", "def total_cost(path: Path) -> float:\n\t\n\tdistance = calc_total_dist(path)\n\tavg_speed = calc_average_speed(path)\n\t\n\t# Speed is less important, but gets a huge multiplier, because speed and\n\t# \tdistance are in different units. Speed requires a high ratio to have\n\t# \tsimilar amounts of variation.\n\tSPEED_DISTANCE_COST_RATIO = 7865.099\n\t\n\treturn (\n\t\t(distance * 1) +\n\t\t(-avg_speed * SPEED_DISTANCE_COST_RATIO)\n\t)", "def totalDist(currPath, distances):\n currCost = 0\n for i in range(1, len(currPath)):\n currCost += distances.get(currPath[i], currPath[i - 1])\n currCost += distances.get(currPath[0], currPath[-1])\n return currCost", "def Cost_of_Hamiltonian_Path(path):\n cost = 0\n for i in range(len(path)-1):\n cost += edge_dic[(path[i], path[i+1])]\n return cost", "def fastest_path_estimation(sol):\n\n class Path:\n def __init__(self, places, graph):\n self.g = 0 # current cost\n self.graph = graph\n self.visited = [places[0]] # list of already visited attractions\n self.not_visited = copy.deepcopy(places[1:]) # list of attractions not yet visited\n\n def __lt__(self, other):\n return self.g < other.g\n\n def add(self, idx):\n # add the cost\n self.g += self.graph[self.visited[-1], idx]\n # add the to the visited place and remove from the unvisited places\n self.visited.append(idx)\n self.not_visited.remove(idx)\n\n def add_to_heap_queue(path):\n # custom function to add to heap queue sorted by the solution's cost\n heappush(h_queue, path)\n\n if len(sol.not_visited) == 0:\n return 0\n elif len(sol.not_visited) == 1:\n return sol.graph[sol.visited[-1], sol.not_visited[0]]\n\n c = sol.visited[-1]\n pm = sol.not_visited[-1]\n # the heap queue of solution sorted by their cost - change all to tuples with g for dijkstra\n h_queue = []\n\n # the places to use for the graph\n sub_search_places = [c]\n sub_search_places.extend(sol.not_visited)\n\n # push the first \"node\" in the queue\n add_to_heap_queue(Path(sub_search_places, sol.graph))\n while True:\n # take the next solution with the shortest cost\n path = heappop(h_queue)\n # if it contains destination, stop and return that solution\n if pm in path.visited:\n return path.g\n # create a new solution for each neighbor of the current vertex and add it to heap queue\n for place in path.not_visited:\n new_path = copy.deepcopy(path)\n new_path.add(place)\n add_to_heap_queue(new_path)", "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n path_to_point = {}\n cost_to_point = {}\n\n # Get the start node\n start_node = problem.getStartState()\n fringe_node = [start_node]\n path_to_point[start_node] = []\n cost_to_point[start_node] = problem.getCostOfActions(path_to_point[start_node])\n\n goal_found = False\n\n while(not goal_found):\n #for i in range(100): \n nodes_to_expand = set()\n # get max value node in the fringe node\n min_val = float(\"inf\")\n for one_node in fringe_node:\n # Compute the cost to reach a node\n if cost_to_point[one_node] < min_val:\n min_val = cost_to_point[one_node]\n \n for one_node in fringe_node:\n # Compute the cost to reach a node\n if cost_to_point[one_node] == min_val:\n nodes_to_expand.add(one_node)\n fringe_node.remove(one_node)\n\n # Expand the fringe node \n for one_node in nodes_to_expand:\n path_to_parent = path_to_point[one_node]\n for nxt_node in problem.getSuccessors(one_node):\n pos = nxt_node[0]\n mv = nxt_node[1]\n # check if point already present in path to point\n prev_cost = float(\"inf\")\n if pos in cost_to_point:\n prev_cost = cost_to_point[pos]\n new_path = path_to_parent + [mv]\n if prev_cost > problem.getCostOfActions(new_path):\n path_to_point[pos] = new_path\n cost_to_point[pos] = problem.getCostOfActions(new_path)\n fringe_node.append(pos)\n\n # Check if destination is reached in the fringe node\n for one_node in fringe_node:\n if problem.isGoalState(one_node):\n final_node = one_node\n goal_found = True\n break\n \n #print(len(fringe_node))\n print(final_node)\n print(path_to_point[final_node])\n return path_to_point[final_node] \n\n util.raiseNotDefined()", "def compute_path_cost(G, path, weight=None):\n # Initialize cost\n cost = 0\n # If weight is None\n if weight is None:\n cost = len(path) - 1\n else:\n # Traverse nodes during the path\n for v, u in pairwise(path):\n # Sum all weight of edges during the path\n cost += G[v][u][weight]\n\n return cost", "def Option3_routing(self, S, D, L):\n if self.has_path(S, D): \n Shortest_path = nx.dijkstra_path(self.G, S, D, weight='w') \n Opt_path = Shortest_path\n path_cost_with_concave_function = self.calculate_path_cost_with_concave_function(Shortest_path, 'c1', 'c2')\n return path_cost_with_concave_function, Opt_path\n while len(Shortest_path) != 0:\n path_cost = self.additive_path_cost(Shortest_path, 'w') \n #self.logger.info('Path cost - %d', path_cost)\n if path_cost <= L:\n \"\"\"go to path cost with weighted sum\"\"\"\n path_cost_with_concave_function = self.calculate_path_cost_with_concave_function(Shortest_path, 'c1', 'c2')\n self.G = self.rm_edge_constraint(path_cost_with_concave_function) # remove all links where the concave link is greater than PathConcave_cost \n Opt_path = Shortest_path\n if self.has_path(S, D):\n Shortest_path = nx.dijkstra_path(self.G, S, D, weight='w')\n else:\n Shortest_path = [] \n else:\n break \n else:\n self.logger.info('No path from %s to %s', S, D)\n Opt_path = []\n path_cost_with_concave_function = 0\n return path_cost_with_concave_function, Opt_path", "def path_cost(path):\n return len(path)", "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n\n\n templist=[]\n explored = set()\n fringe = util.PriorityQueue()\n # state, list of directions till now and the cost is pushed in the stack\n # so that algorithm can explore the node with lowest cost first\n fringe.push((problem.getStartState(),templist),1)\n\n while (not fringe.isEmpty()):\n (currentNode,currDir) = fringe.pop()\n\n if problem.isGoalState(currentNode):\n pathToGoal = currDir\n break\n if not (currentNode in explored):\n explored.add(currentNode)\n for childNode in problem.getSuccessors(currentNode):\n # total cost is cost till now plus cost to the child node\n totalCost = childNode[2]+problem.getCostOfActions(currDir)\n fringe.push((childNode[0],currDir+[childNode[1]]),totalCost)\n\n\n\n\n return pathToGoal;", "def get_expected_cost(self):", "def __search_path(self, start_node, goal_node):\n\n path = []\n queue = PriorityQueue()\n queue.put((0, start_node))\n visited = set(start_node)\n\n branch = {}\n found = False\n \n while not queue.empty():\n item = queue.get()\n current_cost = item[0]\n current_node = item[1]\n\n if current_node == goal_node: \n found = True\n break\n else:\n for next_node in self._route_graph[current_node]:\n cost = self._route_graph.edges[current_node, next_node]['weight']\n new_cost = current_cost + cost + self.__heuristic(next_node, goal_node)\n\n if next_node not in visited: \n visited.add(next_node) \n queue.put((new_cost, next_node))\n\n branch[next_node] = (new_cost, current_node)\n\n path = []\n path_cost = 0\n if found:\n # retrace steps\n path = []\n n = goal_node\n path_cost = branch[n][0]\n while branch[n][1] != start_node:\n path.append(branch[n][1])\n n = branch[n][1]\n path.append(branch[n][1])\n else:\n print(\"Path Not Found\")\n\n return path[::-1], path_cost" ]
[ "0.7617994", "0.7617994", "0.7597403", "0.7554137", "0.7527699", "0.74822974", "0.74822974", "0.74822974", "0.7429028", "0.72676957", "0.72414786", "0.711392", "0.6980379", "0.6906745", "0.6895414", "0.6795053", "0.67780983", "0.6749605", "0.67375493", "0.6723749", "0.6688599", "0.6682823", "0.6651029", "0.6646826", "0.6628104", "0.6588083", "0.6562833", "0.65585345", "0.65559363", "0.65404016" ]
0.79299235
0
This function is to find the path cost based on the concave function
def calculate_path_cost_with_concave_function(self, path, attr1, attr2): c1 = max([self.G[path[i]][path[i+1]][attr1] for i in range(len(path)-1)]) c2 = max([self.G[path[i]][path[i+1]][attr2] for i in range(len(path)-1)]) return max([c1,c2])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def path_cost(self, c, state1, action, state2):\n # This should probably just be 1 every state....\n return c + 1", "def path_cost(self, c, state1, action, state2):\n # This should probably just be 1 every state....\n return c + 1", "def path_cost(self, c, state1, action, state2):\n\t\treturn c + 1", "def path_cost(self, c, state1, action, state2):\n return c + 1", "def path_cost(self, c, state1, action, state2):\n return c + 1", "def path_cost(self, c, state1, action, state2):\n return c + 1", "def path_cost(self, c, state1, move, state2):\n # THIS WAS TAKEN DIRECTLY FROM THE AIMA code provided by the textbook\n current_cost = c\n\n return current_cost + 1", "def path_cost(self, c, state1, action, state2):\n\n\t\treturn c + self.action_cost(action)", "def path_cost(self, c, state1, action, state2):\n # print(c)\n accao, direcao = action.split()\n if accao == WALK:\n return c + 1\n elif accao == PUSH:\n return c + 1", "def path_cost(path):\n # path = [state, (action, total_cost), state, ... ]\n if len(path) < 2:\n return 0\n else:\n return path[-2][-1]", "def additive_path_cost(self, path, attr): \n return sum([self.G[path[i]][path[i+1]][attr] for i in range(len(path)-1)])", "def path_cost(path):\r\n # path = (state, (action, total_cost), state, ... )\r\n if len(path) < 3:\r\n return 0\r\n else:\r\n action, total_cost = path[-2]\r\n return total_cost", "def calculate_path_cost_with_weighted_sum(self, path, attr1, attr2): \n costs = [] \n for i in range(len(path) - 1):\n a = (1- self.G[path[i]][path[i+1]][attr2]) / (2 - self.G[path[i]][path[i+1]][attr1] - self.G[path[i]][path[i+1]][attr2]) \n b = (1- self.G[path[i]][path[i+1]][attr1]) / (2 - self.G[path[i]][path[i+1]][attr1] - self.G[path[i]][path[i+1]][attr2]) \n costs.append(a * self.G[path[i]][path[i+1]][attr1] + b * self.G[path[i]][path[i+1]][attr2]) \n return max(costs)", "def path_cost(path):\n return len(path)", "def GetPathCost(self, bucketOfActions):", "def total_cost(path: Path) -> float:\n\t\n\tdistance = calc_total_dist(path)\n\tavg_speed = calc_average_speed(path)\n\t\n\t# Speed is less important, but gets a huge multiplier, because speed and\n\t# \tdistance are in different units. Speed requires a high ratio to have\n\t# \tsimilar amounts of variation.\n\tSPEED_DISTANCE_COST_RATIO = 7865.099\n\t\n\treturn (\n\t\t(distance * 1) +\n\t\t(-avg_speed * SPEED_DISTANCE_COST_RATIO)\n\t)", "def max_path_cost(self, path, attr): \n return max([self.G[path[i]][path[i+1]][attr] for i in range(len(path)-1)])", "def cost(self):\n node, path_back = self, []\n cost = 0\n while node:\n path_back.append(node)\n if node.action is not None:\n cost = cost + node.action.cost\n node = node.parent\n # remove one due to root empty node \n #cost = cost-1\n return [cost, list(reversed(path_back))]", "def compute_path_cost(G, path, weight=None):\n # Initialize cost\n cost = 0\n # If weight is None\n if weight is None:\n cost = len(path) - 1\n else:\n # Traverse nodes during the path\n for v, u in pairwise(path):\n # Sum all weight of edges during the path\n cost += G[v][u][weight]\n\n return cost", "def CalculateChebyPaths(self):\n Kmin, Kmax = self.Kmin, self.Kmax\n self.apath = array([0 for y in range(self.T)], dtype=float)\n self.cpath = array([0 for y in range(self.T)], dtype=float)\n self.npath = array([0 for y in range(self.T)], dtype=float)\n # generate each generation's asset, consumption and labor supply forward\n for y in range(self.T-1): # y = 0, 1,..., 58\n self.cpath[y] = self.chebeval(array([self.apath[y]]),self.ac[y],Kmin,Kmax)\n # if self.cpath[y] < 0:\n # self.cpath[y] = 0\n if y >= self.W:\n income = self.b\n else:\n self.npath[y] = self.chebeval(array([self.apath[y]]),self.an[y],Kmin,Kmax)\n income = (1-self.tau)*self.w*self.npath[y]\n self.apath[y+1] = (1+self.r)*self.apath[y] + income - self.cpath[y]\n self.upath[y] = self.util(self.cpath[y], self.npath[y])\n # the oldest generation's consumption and labor supply\n self.cpath[self.T-1] = (1+self.r)*self.apath[self.T-1] + self.b\n # self.cpath[self.T-1] = self.chebeval(array([self.apath[self.T-1]]),self.ac[self.T-1],Kmin,Kmax)\n self.upath[self.T-1] = self.util(self.cpath[self.T-1], self.npath[self.T-1])\n # print self.cpath, self.apath, self.npath", "def CostOfPath( self, P, D ):\n cost = 0\n for tup in P:\n cost += D[tup[0]][tup[1]]\n return cost", "def get_cost(self):\n if self.distance == 0:\n for i in range(1, len(self.cities) + 1):\n point1 = self.cities[i - 1]\n point2 = self.cities[i % len(self.cities)]\n self.distance += self.distance_to(point1, point2)\n return self.distance", "def calculate_path(self):\n #Se repite el ciclo para el número especificado de veces\n for i in range(self.iterations):\n for ant in self.ants:\n ant.setup_ant()\n while not ant.final_node_reached:\n #Seleccion aleatoria del nodo a visitar\n node_to_vist = self.select_next_node(self.map.nodes_array[int(ant.actual_node[0])][int(ant.actual_node[1])])\n #Mover la hormiga al siguiente nodo seleccionado al azar\n ant.move_ant(node_to_visit)\n #Compruebe si se ha alcanzado la solución\n ant.is_final_node_reached()\n #Agregar la ruta resultante a la lista de rutas\n self.add_to_path_results(self.delete_loops(ant.get_visited_nodes()))\n # Habilitar a la hormiga para otra busqueda\n ant.enable_start_new_path()\n \n # Actualizar el nivel global de feromonas\n self.pheromone_update()\n self.best_result = self.paths[0]\n\n #Vaciar la lista de rutas\n self.empty_paths()\n print('Iteration: ', i, 'lenght of the path: ', len(self.best_result))\n return self.best_result", "def total_cost(self):\n return np.einsum('i->', self.c[self.s])", "def Cost_of_Hamiltonian_Path(path):\n cost = 0\n for i in range(len(path)-1):\n cost += edge_dic[(path[i], path[i+1])]\n return cost", "def _local_cost(self, p, q):\n diagnol = q[0] == p[0] or q[1] == p[1]\n \n # c0, c1 and c2 are costs from Canny operator, gradient magnitude and gradient direction respectively\n if diagnol:\n c0 = self.cost_edges[p[0]][p[1]]-SQRT_0_5*(self.cost_edges[p[0]][p[1]]-self.cost_edges[q[0]][q[1]])\n c1 = self.cost_grad_mag[p[0]][p[1]]-SQRT_0_5*(self.cost_grad_mag[p[0]][p[1]]-self.cost_grad_mag[q[0]][q[1]])\n c2 = SQRT_0_5 * self._get_grad_direction_cost(p, q)\n else:\n c0 = self.cost_edges[q[0]][q[1]]\n c1 = self.cost_grad_mag[q[0]][q[1]]\n c2 = self._get_grad_direction_cost(p, q)\n \n if np.isnan(c2):\n c2 = 0.0\n \n w0, w1, w2 = self.weight\n cost_pq = w0*c0 + w1*c1 + w2*c2\n \n return cost_pq * cost_pq", "def pathcost_cooler(k=5, lam=0.0003, limit=3000):\n return lambda t: (k * exp(-lam * t) if t < limit else 0)", "def compute_costs(self, node, observation=None, area=None):\n if (node.RRT.hierarchy_number == 0) and (\n node in node.RRT.starts.values()): # in case an observation is made immediately (root node has no parent)\n return node.path_costs, node.terminal_costs, node.node_costs\n\n C = self.get_C(observation, area)\n node.vs = self.get_vs(node, C)\n\n # Compute node, and terminal costs\n h = []\n hN = []\n for i in range(self.N_goal_states):\n h.append(self.cost_h(node, self.goal_states[i]))\n hN.append(self.cost_hN(node, self.goal_states[i]))\n\n path_costs = node.parent.path_costs.copy() + node.parent.node_costs.copy()\n\n N_vs = len(node.vs)\n node_costs = []\n terminal_costs = []\n for i in range(N_vs):\n node_costs.append(np.dot(h, node.vs[i]))\n terminal_costs.append(np.dot(hN, node.vs[i]))\n node_costs = np.array(node_costs).reshape((1, N_vs))\n terminal_costs = np.array(terminal_costs).reshape((1, N_vs))\n\n if path_costs.shape[1] == node_costs.shape[1] / self.N_goal_states:\n path_costs_temp = np.zeros(node_costs.shape)\n for i in range(int(node_costs.shape[1] / self.N_goal_states)):\n for j in range(self.N_goal_states):\n path_costs_temp[0, self.N_goal_states * i + j] = path_costs[0, i].copy() + node_costs[\n 0, self.N_goal_states * i + j].copy()\n path_costs = path_costs_temp.copy()\n\n return path_costs, terminal_costs, node_costs", "def _c2c_cost(sclst, eclst):\n def _c2c(point):\n _c_sum = 0\n for pt in eclst.points:\n _c_sum += point.frequency(pt)\n return _c_sum\n return int(sum(map(_c2c, sclst.points)))", "def computeShortestPathCoherence(node1, node2, w):\n\n\tif node1.strip()==node2.strip():\n\t\treturn w\n\n\tfromCache=rds.get(\"%s:%s\" % (node1, node2))\n\tif fromCache:\n\t\treturn float(fromCache)*w\n\telse:\n\t\tg = Graph()\n\t\tq=\"MATCH path=shortestPath((m:Page {name:\\\"%s\\\"})-[LINKS_TO*1..10]-(n:Page {name:\\\"%s\\\"})) RETURN LENGTH(path) AS length, path, m, n\" % (node1, node2)\n\n\t\tcursor=g.run(q)\n\t\tpath=None\n\t\tfor c in cursor:\n\t\t\tpath=c\n\n\t#\n\t\tif path:\n\t\t\trds.set(\"%s:%s\" % (node1, node2), 1/path[\"length\"])\n\t\t\trds.set(\"%s:%s\" % (node2, node1), 1/path[\"length\"])\n\t\t\treturn w/path[\"length\"]\n\t\telse:\n\t\t\trds.set(\"%s:%s\" % (node1, node2), 0.0)\n\t\t\trds.set(\"%s:%s\" % (node2, node1), 0.0)\n\t\t\treturn 0.0" ]
[ "0.7507695", "0.7507695", "0.74749666", "0.73756534", "0.73756534", "0.73756534", "0.72768503", "0.7256803", "0.7144491", "0.7008772", "0.6996443", "0.692127", "0.6799869", "0.6563299", "0.64602214", "0.6419999", "0.6410938", "0.63985276", "0.6318047", "0.6301476", "0.62793493", "0.6222818", "0.620983", "0.6168235", "0.6139337", "0.61366683", "0.6120806", "0.6098129", "0.60919905", "0.60889167" ]
0.79420674
0
Return True if G has a path from source to target, False otherwise.
def has_path(self, source, target): try: sp = nx.shortest_path(self.G, source, target) except nx.NetworkXNoPath: return False return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def has_path_source(self) -> bool:\n\n return any(self.is_path_type(x) for x in self.parameters)", "def exists_path(self, start, end):\n return end in self.paths(start)", "def is_path(self, plays):\n edges = [self.ind_dict[p] for p in plays]\n path = [self.source]\n for i in range(len(edges)):\n path.append(self.successor(edges[i], path[i]))\n return (nx.algorithms.simple_paths.is_simple_path(self.graph, path) and\n path[-1] == self.target)", "def path_exists(grid, start, goal):\n\n stack = [(start, [start])]\n\n visited = set()\n while stack:\n (vertex, path) = stack.pop()\n visited.add(vertex)\n\n legal_cells = set(legal_directions(grid, *vertex)) - visited\n for next in legal_cells:\n if next == goal:\n return True\n stack.append((next, path + [next]))\n\n return False", "def path_exists(G, node1, node2):\n visited_nodes = set()\n\n # Initialize the queue of cells to visit with the first node: queue\n queue = [node1]\n\n # Iterate over the nodes in the queue\n for node in queue:\n\n # Get neighbors of the node\n neighbors = G.neighbors(node)\n\n # Check to see if the destination node is in the set of neighbors\n if node2 in neighbors:\n print('Path exists between nodes {0} and {1}'.format(node1, node2))\n return True\n break\n\n else:\n visited_nodes.add(node)\n queue.extend([n for n in neighbors if n not in visited_nodes])\n\n # Check to see if the final element of the queue has been reached\n if node == queue[-1]:\n print('Path does not exist between nodes {0} and {1}'.format(\n node1, node2))\n\n # Place the appropriate return statement\n return False", "def paths(self, source, target):\n assert source in self.node_map\n assert target in self.node_map\n if has_path(self.G2, source, target):\n return nx.all_simple_paths(self.G2, source=source, target=target)\n return None", "def valid_path(self, source, sink, path):\n\n if source == sink:\n return path\n for edge in self.adjacents[source]:\n if edge not in path:\n if edge.capacity - self.edges[edge] > 0:\n return self.valid_path(edge.sink, sink, path + [edge])\n\n # In case there is no more possible path:\n return None", "def path_exists(graph, start, end):\n explored = set()\n q = deque([start])\n while q:\n current = q.pop()\n if current == end:\n return True\n if current in explored:\n continue\n explored.add(current)\n for adjacent in graph.get(current, set()):\n q.appendleft(adjacent)\n return False", "def check_path(match_tuple: MatchTuple) -> bool:\n relative_path = match_tuple.link.split('#')[0]\n full_path = os.path.join(\n os.path.dirname(str(match_tuple.source)), relative_path)\n return os.path.exists(full_path)", "def __checkDestination(self):\n return os.path.exists(self.__targetPath)", "def _check_path(G, max_res, min_res, direction, algorithm):\n try:\n if not has_path(G, \"Source\", \"Sink\"):\n raise NetworkXException(\"Disconnected Graph\")\n except NetworkXException as e:\n raise Exception(\"An error occurred: {}\".format(e))", "def connected( self, u, v ):\n try:\n self.shortestPath(u, v)\n return True\n except nx.NetworkXNoPath:\n return False", "def has_access(self, source, destination, port):\n logger.info('Looking for path from %s to %s on port %s', source, destination, 80)\n self._validate_args(source, destination)\n paths = self.list()\n logger.info('Found paths %s', paths)\n return self._has_access(paths, source, destination, port)", "def inPath(self, oth: 'StateNode') -> bool:\n if self == oth:\n return True\n if self.isSameState(oth):\n return True\n if self.previous is not None:\n return self.previous.inPath(oth)", "def independent(self) -> bool:\n parent = self._parent()\n if parent is None:\n return True\n connections = parent._graph.connections\n path = self._path\n lp = len(path)\n for con in connections:\n if con[\"type\"] == \"connection\":\n if con[\"target\"][:lp] == path:\n return False\n return True", "def hasPath(startVert, endVert, sequencer):\n visited = set()\n sequencer.push(startVert)\n while (not sequencer.empty()):\n current = sequencer.pop()\n visited.add(current)\n print(\"Visting \" + str(current.id))\n for n in current.getConnections():\n if n == endVert:\n return True\n if n not in visited:\n sequencer.push(n)\n return False", "def __bool__(self):\n for root, products in self.rel_paths():\n if products:\n return True\n return False", "def isSource(self):\n return (len(self.parents()) == 0)", "def is_reachable(graph, root, destination):\n \n if root in graph.nodes and destination in graph.nodes:\n connected_path = dfs(graph, root)\n return destination in connected_path\n else:\n logger.error('Root or destination nodes not in graph')", "def is_relative_to(p1: Path, p2: Path) -> bool:\n # stolen from CPython's source\n try:\n p1.relative_to(p2)\n return True\n except ValueError:\n return False", "def path_exists(board: QBoard, start: Coordinates,\n goals: List[Coordinates]) -> bool:\n\n visited: Set[Coordinates] = set()\n queue = [start]\n\n while queue:\n v = queue.pop(0)\n if v not in visited:\n visited.add(v)\n print(f\"Visiting {v}\")\n queue.extend(set(board[v]) - visited)\n if v in goals:\n return True\n\n # print(visited)\n return False", "def has_relationship(self, source_node: Node, target_node: Node) -> bool: # pylint: disable=no-self-use\n return source_node.node_id in target_node.in_nodes_ids", "def process(self, source_path: pathlib.Path) -> bool:", "def existShortestPath(self):\r\n # s, t = self.findSourceDest(source, dest)\r\n return self.run()", "def exists(self):\n return self.islink() or exists(self._path)", "def contains_edge(self, source: n, destination: n) -> bool:\n if not contains_vertex(source):\n return False\n if not contains_vertex(destination):\n return False\n return destination in self._graph[source].get_connections()", "def checkSourceLocations(packageKey):\n directoryPath = dotfilePath + \\\n configDict['options'][packageKey]['directoryName'] + \"/\"\n\n for link in configDict['options'][packageKey]['links']:\n for key, value in link.items():\n sourcePath = directoryPath + key\n\n if symMod.symlinkLocationExists(sourcePath):\n return False\n\n return True", "def reach(self, from_symbol, to_symbol):\n # type: (Type[Nonterminal], Type[Nonterminal]) -> bool\n return len(self.path_rules(from_symbol, to_symbol)) > 0", "def goal_check(current_node, target_node):\n if current_node.id == target_node.id:\n return True\n else:\n return False", "def _find_path(self, start, end, path, visited):\n path.append(start)\n visited.add(start)\n if start == end:\n return path\n for vertex in self.neighbors(start):\n if vertex not in visited:\n if not self._find_path(vertex, end, path, visited):\n path.remove(vertex)\n else:\n return True" ]
[ "0.6959824", "0.6836107", "0.68324447", "0.6604859", "0.6593845", "0.6593503", "0.64851725", "0.64775616", "0.6451273", "0.6392526", "0.6296195", "0.62843114", "0.62619346", "0.6250411", "0.6164813", "0.6146912", "0.6066695", "0.60657865", "0.60454684", "0.6038807", "0.6026593", "0.60253775", "0.6024699", "0.6016977", "0.6009446", "0.59697974", "0.59108955", "0.58852684", "0.588459", "0.58667964" ]
0.85063815
0
This function is to find the optimal path from S to D with constraint L
def Optimum_prun_based_routing(self, S, D, L): if self.has_path(S, D): Shortest_path = nx.dijkstra_path(self.G, S, D, weight='w') Opt_path = Shortest_path PathConcave_cost = self.max_path_cost(Shortest_path, 'c1') while len(Shortest_path) != 0: path_cost = self.additive_path_cost(Shortest_path, 'w') if path_cost <= L: """go to concave cost""" PathConcave_cost = self.max_path_cost(Shortest_path, 'c1') self.G = self.rm_edge_constraint(PathConcave_cost) # remove all links where the concave link is greater than PathConcave_cost Opt_path = Shortest_path if self.has_path(S, D): Shortest_path = nx.dijkstra_path(self.G, S, D, weight='w') else: Shortest_path = [] else: break else: self.logger.info('No path from %s to %s', S, D) PathConcave_cost = 0 Opt_path = [] return PathConcave_cost, Opt_path
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Option2_routing(self, S, D, L):\n if self.has_path(S, D): \n Shortest_path = nx.dijkstra_path(self.G, S, D, weight='w') \n Opt_path = Shortest_path\n path_cost_with_weighted_sum = self.calculate_path_cost_with_weighted_sum(Shortest_path, 'c1', 'c2')\n return path_cost_with_weighted_sum, Opt_path\n\n while len(Shortest_path) != 0:\n path_cost = self.additive_path_cost(Shortest_path, 'w') \n #self.logger.info('Path cost - %d', path_cost)\n if path_cost <= L:\n \"\"\"go to path cost with weighted sum\"\"\"\n path_cost_with_weighted_sum = self.calculate_path_cost_with_weighted_sum(Shortest_path, 'c1', 'c2')\n self.G = self.rm_edge_constraint(path_cost_with_weighted_sum) # remove all links where the concave link is greater than PathConcave_cost \n Opt_path = Shortest_path\n if self.has_path(S, D):\n Shortest_path = nx.dijkstra_path(self.G, S, D, weight='w')\n else:\n Shortest_path = [] \n else:\n break \n else:\n self.logger.info('No path from %s to %s', S, D)\n Opt_path = []\n path_cost_with_weighted_sum = 0\n return path_cost_with_weighted_sum, Opt_path", "def Option3_routing(self, S, D, L):\n if self.has_path(S, D): \n Shortest_path = nx.dijkstra_path(self.G, S, D, weight='w') \n Opt_path = Shortest_path\n path_cost_with_concave_function = self.calculate_path_cost_with_concave_function(Shortest_path, 'c1', 'c2')\n return path_cost_with_concave_function, Opt_path\n while len(Shortest_path) != 0:\n path_cost = self.additive_path_cost(Shortest_path, 'w') \n #self.logger.info('Path cost - %d', path_cost)\n if path_cost <= L:\n \"\"\"go to path cost with weighted sum\"\"\"\n path_cost_with_concave_function = self.calculate_path_cost_with_concave_function(Shortest_path, 'c1', 'c2')\n self.G = self.rm_edge_constraint(path_cost_with_concave_function) # remove all links where the concave link is greater than PathConcave_cost \n Opt_path = Shortest_path\n if self.has_path(S, D):\n Shortest_path = nx.dijkstra_path(self.G, S, D, weight='w')\n else:\n Shortest_path = [] \n else:\n break \n else:\n self.logger.info('No path from %s to %s', S, D)\n Opt_path = []\n path_cost_with_concave_function = 0\n return path_cost_with_concave_function, Opt_path", "def path(l_s, l_f, leg_list):\n # candidates =\n return min(heuristic_path([l_s], l_f, leg_list, []), key = len)", "def optimal_path(self, mission, start, sp):\n mission.add(start)\n while mission.targets[0] and mission.is_active():\n ds = [(sp[start][t], t) for t in mission.targets[0] if t in sp[start]]\n if not ds:\n mission.add(u'-1') # target not connected --> fill with dummies\n continue\n target = min(ds)\n for i in range(target[0] - 1):\n mission.add(u'0')\n mission.add(target[1])\n start = target[1]", "def smooth_input(xs, ys, L):\n n = len(xs)\n\n # obj = [1 for i in range(n)]\n # for i in range(2 * n):\n # obj.append(0)\n\n # Create the model\n model = LpProblem(name=\"small-problem\", sense=LpMinimize)\n ws = [LpVariable(name=\"w_{}\".format(i), lowBound=0, upBound=1) for i in range(n)]\n ls = [LpVariable(name=\"L_{}\".format(i), lowBound=0) for i in range(n)]\n zs = [LpVariable(name=\"z_{}\".format(i)) for i in range(n)]\n\n # objective\n model += lpSum(ws)\n\n # constraint 1:\n # sum of Li <= L\n model += (lpSum(ls) <= L * n, \"sum of Li <= L\")\n\n # Constraint 2:\n # w_i >= |z_i - y_i|\n for i in range(n):\n model += (ws[i] + zs[i] >= ys[i], \"C2.a_{}\".format(i))\n model += (ws[i] - zs[i] >= -ys[i], \"C2.b_{}\".format(i))\n\n # Constraint 3\n # |z_i - z_j| <= L_i * dist(x_i, x_j)\n for i in range(n):\n for j in range(n):\n if i != j:\n model += (zs[i] - zs[j] - abs(xs[i] - xs[j]) * ls[i] <= 0, \"C3.a_{}_{}\".format(i, j))\n model += (zs[j] - zs[i] - abs(xs[i] - xs[j]) * ls[i] <= 0, \"C3.b_{}_{}\".format(i, j))\n\n if model.solve() == 1:\n print(\n \"------------------------------------\\nFound solution for the linear program\\n------------------------------------\\n\")\n return [[xs[i], zs[i].value()] for i in range(n)]\n # return [zi.value() for zi in zs], [li.value() for li in ls]\n\n print(\"Linear program: no solution found\")\n exit(1)\n return -1", "def sol_cost ( S, D, R ) :\n\tmask = array([0]* len(R))\n\tmask[R>0] = 1\n\tmaxerr = max(mask * abs(S - D))\n\tmeanerr = float(sum(mask * abs(S - D)))\n\tmeanerr /= sum(mask)\n\t\n\treturn (sum(R * (S - D)**2),maxerr,meanerr)", "def safeJourney(Alist,s,d):\n #Initialize dictionaries\n dinit = 10**6\n Edict = {} #Explored nodes\n Udict = {} #Unexplored nodes\n path = [[] for l in Alist]\n\n Alen = len(Alist) #length of Alist\n dinits = [dinit]*Alen #list of airport indexes\n Udict = dict(zip(list(range(Alen)),dinits)) #zip into dictionary\n Udict[s] = 0\n path[s] = [s]\n \n #Main search\n while len(Udict)>0:\n #Find node with min d in Udict and move to Edict\n dmin = dinit\n for n,w in Udict.items():\n if w<dmin:\n dmin=w\n nmin=n\n Edict[nmin] = Udict.pop(nmin)\n print(\"moved node\", nmin)\n\n #Update provisional distances for unexplored neighbors of nmin\n \n #for n,w in G.adj[nmin].items():\n for item in Alist[nmin]: #nminth element is a list of two element tuples (node, weight)\n n = item[0] #first elt of tuple is node/neighbour\n w = item[1] #2nd elt is density/weigh\n #for n,w in etc_______________________-\n \n if n in Edict:\n pass\n elif n in Udict:\n #dcomp = dmin + w\n dcomp = max(w,dmin) #take largest value to record most dangerous segment\n if dcomp<Udict[n]:\n print(Udict)\n Udict[n]=dcomp\n path[n] = path[nmin] + [n]\n #path[n].extend(path[nmin])\n #path[n] = path[nmin]\n \n #path[n].append(n) #n not nmin\n print(path)\n # else:\n #dcomp = dmin + w\n # dcomp = max(w,dmin)\n # Udict[n] = dcomp\n #path[n].extend(path[nmin])\n #path[n].append(nmin) \n \n if nmin == d: #if current node is destination\n return path[d],Edict[d]\n return [] #no path", "def fastest_path_estimation(sol):\n\n class Path:\n def __init__(self, places, graph):\n self.g = 0 # current cost\n self.graph = graph\n self.visited = [places[0]] # list of already visited attractions\n self.not_visited = copy.deepcopy(places[1:]) # list of attractions not yet visited\n\n def __lt__(self, other):\n return self.g < other.g\n\n def add(self, idx):\n # add the cost\n self.g += self.graph[self.visited[-1], idx]\n # add the to the visited place and remove from the unvisited places\n self.visited.append(idx)\n self.not_visited.remove(idx)\n\n def add_to_heap_queue(path):\n # custom function to add to heap queue sorted by the solution's cost\n heappush(h_queue, path)\n\n if len(sol.not_visited) == 0:\n return 0\n elif len(sol.not_visited) == 1:\n return sol.graph[sol.visited[-1], sol.not_visited[0]]\n\n c = sol.visited[-1]\n pm = sol.not_visited[-1]\n # the heap queue of solution sorted by their cost - change all to tuples with g for dijkstra\n h_queue = []\n\n # the places to use for the graph\n sub_search_places = [c]\n sub_search_places.extend(sol.not_visited)\n\n # push the first \"node\" in the queue\n add_to_heap_queue(Path(sub_search_places, sol.graph))\n while True:\n # take the next solution with the shortest cost\n path = heappop(h_queue)\n # if it contains destination, stop and return that solution\n if pm in path.visited:\n return path.g\n # create a new solution for each neighbor of the current vertex and add it to heap queue\n for place in path.not_visited:\n new_path = copy.deepcopy(path)\n new_path.add(place)\n add_to_heap_queue(new_path)", "def solve_tsp(dist):\n\n # number of nodes\n N = dist.shape[0]\n\n # tsp path for quick calculation of cost\n ii = np.arange(N)\n jj = np.hstack((np.arange(1, N), 0))\n\n # for each node, a sorted list of closest nodes\n dsort = [np.argsort(d) for d in dist]\n dsort = [d[d != i] for i, d in enumerate(dsort)]\n\n # randomly initialize path through graph\n path = np.random.permutation(N)\n idx = np.argsort(path)\n cost = np.sum(dist[path[ii], path[jj]])\n \n # keep track of objective function over time\n cost_hist = [cost]\n\n # optimization loop\n node = 0\n while node < N:\n\n # we'll try breaking the connection i -> j\n i = path[node]\n j = path[(node+1) % N]\n \n # since we are breaking i -> j we can remove the cost of that connection\n c = cost - dist[i, j]\n\n # search over nodes k that are closer to j than i\n for k in dsort[j]:\n # can safely continue if dist[i,j] < dist[k,j] for the remaining k\n if k == i:\n node += 1\n break\n\n # break connection k -> p\n # add connection j -> p\n # add connection i -> k\n p = path[(idx[k]+1) % N]\n new_cost = c - dist[k,p] + dist[j,p] + dist[i,k]\n\n # if this swap improves the cost, implement it and move to next i\n if new_cost < cost:\n path = reverse_segment(path, idx[j], idx[k])\n idx = np.argsort(path)\n # make sure that we didn't screw up\n assert np.abs(np.sum(dist[path[ii], path[jj]]) - new_cost) < 1e-6\n cost = new_cost\n # restart from the begining of the graph\n cost_hist.append(cost)\n node = 0\n break\n\n return path, cost_hist", "def TSP_ILP(G):\n V1 = range(len(G))\n n, V = len(G), set(V1)\n model = Model() # binary variables indicating if arc (i,j) is used\n # on the route or not\n x = [[model.add_var(var_type=BINARY) for j in V] for i in V]\n # continuous variable to prevent subtours: each city will have a\n # different sequential id in the planned route except the 1st one\n y = [model.add_var() for i in V]\n # objective function: minimize the distance\n model.objective = minimize(xsum(G[i][j]*x[i][j] for i in V for j in V))\n\n # constraint : leave each city only once\n for i in V:\n model += xsum(x[i][j] for j in V - {i}) == 1\n # constraint : enter each city only once\n for i in V:\n model += xsum(x[j][i] for j in V - {i}) == 1 # subtour elimination\n for (i, j) in product(V - {0}, V - {0}):\n if i != j:\n model += y[i] - (n+1)*x[i][j] >= y[j]-n # optimizing\n\n model.verbose = 0\n model.optimize() # checking if a solution was found\n\n if model.num_solutions:\n nc = 0 # cycle starts from vertex 0\n cycle = [nc]\n while True:\n nc = [i for i in V if x[nc][i].x >= 0.99][0]\n cycle.append(nc)\n if nc == 0:\n break\n\n return (model.objective_value, cycle)", "def OptimalWarpingPath( self, colStart=None ):\n rows = len(self.D)\n cols = len(self.D[0])\n n = rows-1\n m = cols-1\n if colStart:\n m=colStart\n path = [(n,m)]\n while n > 0 or m > 0:\n if n == 0 :\n path.insert(0,(0,m-1))\n m -= 1\n elif m == 0 :\n path.insert(0,(n-1,0))\n n -= 1\n else:\n minStep = min( self.D[n-1][m-1], self.D[n-1][m], self.D[n][m-1] )\n if self.D[n-1][m-1] == minStep:\n path.insert(0,(n-1,m-1))\n n -= 1\n m -= 1\n elif self.D[n-1][m] == minStep:\n path.insert(0,(n-1,m))\n n -= 1\n else: # self.D[n][m-1] == min:\n path.insert(0,(n,m-1))\n m -= 1\n return path, self.CostOfPath( path, self.D )", "def g_solving_subproblem_of_LR(self,vehicle_id):\r\n global_LB=-10000\r\n global_UB=10000\r\n iteration_for_RSP=20\r\n optimal_solution_for_RSP=None\r\n optimal_value_y=0\r\n self.multiplier_v=0.5\r\n\r\n #solve the expected shortest path problem\r\n self.g_dynamic_programming_algorithm(vehicle_id, 4)\r\n #obtain the variance\r\n y_=self.g_ending_state_vector[vehicle_id].VSStateVector[0].Primal_Label_cost_variance\r\n\r\n for k in range(iteration_for_RSP):\r\n # print(k)\r\n LB=0\r\n # step 2: solve decomposed dual problems\r\n # Part I: subproblem of x\r\n self.g_dynamic_programming_algorithm(vehicle_id, 2)\r\n LB+=self.g_ending_state_vector[vehicle_id].VSStateVector[0].Label_cost_for_lagrangian\r\n\r\n # Part II: subproblem of y\r\n obj_of_y_ = self.reliability * (y_) ** 0.5 - self.multiplier_v * y_\r\n if obj_of_y_ > 0:\r\n y = 0\r\n LB += 0\r\n else:\r\n y = y_\r\n LB += obj_of_y_\r\n # generate an upper bound\r\n variance = self.g_ending_state_vector[vehicle_id].VSStateVector[0].Primal_Label_cost_variance\r\n Label_cost_for_lagrangian_mean=self.g_ending_state_vector[vehicle_id].VSStateVector[0].Label_cost_for_lagrangian_mean\r\n UB=Label_cost_for_lagrangian_mean+self.reliability*(variance)**0.5\r\n\r\n # print(\"UB:{}\".format(UB))\r\n # print(\"LB:{}\".format(LB))\r\n\r\n # UB and LB update\r\n if LB > global_LB:\r\n global_LB = LB\r\n optimal_solution_for_RSP = self.g_ending_state_vector[vehicle_id].VSStateVector\r\n optimal_value_y = y\r\n\r\n if UB < global_UB:\r\n global_UB = UB\r\n\r\n\r\n # step 3: update multipliers\r\n if variance-y!= 0:\r\n self.multiplier_v+= (global_UB - LB) / (variance-y)\r\n # if self.multiplier_v<0:\r\n # self.multiplier_v=1\r\n # print(self.multiplier_v)\r\n\r\n # step 4: termination condition test\r\n if global_UB != 0:\r\n gap = abs((global_UB-global_LB) / global_UB)\r\n # print(gap)\r\n if gap < 0.02:\r\n print(\"iteration{}\".format(k + 1))\r\n print(self.multiplier_v)\r\n print(global_LB, global_UB)\r\n return optimal_solution_for_RSP, optimal_value_y,global_LB,global_UB\r\n else:\r\n if global_UB - global_LB == 0:\r\n print(\"iteration{}\".format(k + 1))\r\n print(self.multiplier_v)\r\n print(global_LB, global_UB)\r\n return optimal_solution_for_RSP,optimal_value_y,global_LB,global_UB\r\n\r\n if k == iteration_for_RSP - 1:\r\n print(\"iteration{}\".format(k + 1))\r\n print(self.multiplier_v)\r\n print(global_LB, global_UB)\r\n return optimal_solution_for_RSP,optimal_value_y,global_LB,global_UB", "def optimize(self):\n\n self.logger.info(\"Solving with Dynamic Slope Scaling Procedure in Julia :\")\n optimization_start = time.time()\n\n # 1. Preprocess for old network graph\n if self.old_network_graph is not None:\n\n # DSSP on old network\n old_network_obj = sum(list(nx.get_node_attributes(self.old_network_graph, config.BUILDING_CONSUMPTION_KEY).values()))-1e-5\n try:\n self.check_infeasibility(self.old_network_graph, old_network_obj)\n except DHCOptimizerException as e:\n e.data = \"Invalid existing network: \" + e.data\n raise e\n\n flows, obj_val = self.optimize_with_dssp_julia(self.old_network_graph, old_network_obj, set())\n self.logger.info(\"Optimization phase time: %.2fs\" % (time.time() - optimization_start))\n solution_old_graph = self.build_solution_graph(self.old_network_graph, flows)\n\n if self.modify_old_network:\n\n # Add max capacity on old edges\n self.old_capacity = deepcopy(flows)\n old_buildings = list(nx.get_node_attributes(self.old_network_graph, config.BUILDING_CONSUMPTION_KEY).values())\n for key in flows:\n if (key[1],key[0],0) not in self.old_capacity and key[1] not in old_buildings:\n self.old_capacity[(key[1],key[0],0)] = self.old_capacity[key]\n\n # Add Imaginary edges\n for edge in self.old_capacity:\n if self.optimization_graph.has_edge(*edge):\n\n # add nodes\n if not self.optimization_graph.has_node(config.IM_PREFIX+edge[0]):\n self.optimization_graph.add_node(config.IM_PREFIX+edge[0])\n self.optimization_graph.nodes[config.IM_PREFIX+edge[0]][config.GPD_GEO_KEY] = \\\n self.optimization_graph.nodes[edge[0]][config.GPD_GEO_KEY]\n if not self.optimization_graph.has_node(config.IM_PREFIX+edge[1]):\n self.optimization_graph.add_node(config.IM_PREFIX+edge[1])\n self.optimization_graph.nodes[config.IM_PREFIX+edge[1]][config.GPD_GEO_KEY] = \\\n self.optimization_graph.nodes[edge[1]][config.GPD_GEO_KEY]\n # add edges\n if not self.optimization_graph.has_edge(edge[0],config.IM_PREFIX+edge[0]):\n self.optimization_graph.add_edge(edge[0],config.IM_PREFIX+edge[0])\n if not self.optimization_graph.has_edge(config.IM_PREFIX+edge[0],config.IM_PREFIX+edge[1]):\n self.optimization_graph.add_edge(config.IM_PREFIX+edge[0],config.IM_PREFIX+edge[1])\n if not self.optimization_graph.has_edge(config.IM_PREFIX+edge[1],edge[1]):\n self.optimization_graph.add_edge(config.IM_PREFIX+edge[1],edge[1])\n\n # put cost\n self.optimization_graph.edges[(config.IM_PREFIX+edge[0],config.IM_PREFIX+edge[1],0)][config.EDGE_COST_KEY] = \\\n self.optimization_graph.edges[(edge[0],edge[1],0)][config.EDGE_COST_KEY]\n self.optimization_graph.edges[(edge[0],edge[1],0)][config.EDGE_COST_KEY] = 1e-5\n self.optimization_graph.edges[(edge[0],config.IM_PREFIX+edge[0],0)][config.EDGE_COST_KEY] = 1e-5\n self.optimization_graph.edges[(config.IM_PREFIX+edge[1],edge[1],0)][config.EDGE_COST_KEY] = 1e-5\n\n else:\n # if we don't modify the old network, we have to change the capacity of the supplies\n already_consummed = {}\n for edge in solution_old_graph.edges():\n if solution_old_graph.nodes[edge[0]].get(config.NODE_TYPE_KEY) == config.SUPPLY_NODE_TYPE:\n already_consummed[edge[0]] = already_consummed.get(edge[0], 0) + \\\n solution_old_graph.edges[edge][config.SOLUTION_POWER_FLOW_KEY]\n for source in already_consummed:\n if already_consummed[source] <= self.optimization_graph.nodes[source][config.SUPPLY_POWER_CAPACITY_KEY]:\n self.optimization_graph.nodes[source][config.SUPPLY_POWER_CAPACITY_KEY] -= already_consummed[source]\n self.network_objective -= already_consummed[source]\n else:\n self.network_objective -= self.optimization_graph.nodes[source][config.SUPPLY_POWER_CAPACITY_KEY]\n self.optimization_graph.nodes[source][config.SUPPLY_POWER_CAPACITY_KEY] = 0\n\n # Remove edges from old network\n edges_to_remove = set()\n for e in self.optimization_graph.edges():\n if self.old_network_graph.has_edge(*e) or self.old_network_graph.has_edge(e[1],e[0]):\n edges_to_remove.add(e)\n self.optimization_graph.remove_edges_from(edges_to_remove)\n\n # Remove isolated buildings of optimization graph\n isolated_to_remove = set()\n for e in self.old_network_graph.edges():\n if e[0] in self.old_network_graph.nodes() and \\\n self.optimization_graph.nodes[e[1]].get(config.NODE_TYPE_KEY) == config.BUILDING_NODE_TYPE:\n isolated_to_remove.add(e)\n self.optimization_graph.remove_edges_from(isolated_to_remove)\n\n # Remove buildings from old network\n for n, data in self.old_network_graph.nodes(data=True):\n if data.get(config.NODE_TYPE_KEY) == config.BUILDING_NODE_TYPE:\n self.optimization_graph.remove_node(n)\n\n # Re-link sources\n sources = set()\n for n, data in self.optimization_graph.nodes(data=True):\n if data.get(config.NODE_TYPE_KEY) == config.SUPPLY_NODE_TYPE:\n sources.add(n)\n source_graph = self.optimization_graph.subgraph(sources).copy()\n self.optimization_graph.remove_nodes_from(sources)\n gnx.remove_isolates(self.optimization_graph)\n node_filter = lambda n: self.optimization_graph.nodes.get(n,{}).get(config.NODE_TYPE_KEY) != config.BUILDING_NODE_TYPE\n gnx.spatial_points_merge(self.optimization_graph, source_graph.nodes_to_gdf(), node_filter=node_filter, inplace=True)\n\n # fill missing information\n gnx.fill_edges_missing_geometry_attributes(self.optimization_graph)\n gnx.fill_length_attribute(self.optimization_graph, config.EDGE_LENGTH_KEY, only_missing=True)\n gnx.fill_length_attribute(self.optimization_graph, config.EDGE_COST_KEY, only_missing=True)\n for e in self.optimization_graph.edges(keys=True):\n self.optimization_graph.edges[e][config.LEASTCOST_COEF_KEY] = \\\n self.optimization_graph.edges[e].get(config.LEASTCOST_COEF_KEY,0)\n\n\n\n # 2. Process the DSSP on optimization graph\n self.check_is_ready()\n self.check_infeasibility(self.optimization_graph, self.network_objective)\n\n if self.old_network_graph is not None and self.modify_old_network:\n old_buildings = set(nx.get_node_attributes(self.old_network_graph, config.BUILDING_CONSUMPTION_KEY).keys())\n else:\n old_buildings = set()\n flows, obj_val = self.optimize_with_dssp_julia(self.optimization_graph, self.network_objective, old_buildings,postprocess= (not self.modify_old_network))\n self.logger.info(\"Optimization phase time: %.2fs\" % (time.time() - optimization_start))\n self.solution_graph = self.build_solution_graph(self.optimization_graph, flows, self.connected)\n\n # 3. Postprocess for old network graph\n if self.old_network_graph is not None:\n \n if self.modify_old_network:\n # Put the right supply capacity and cost\n for edge in self.old_capacity:\n if self.solution_graph.has_edge(edge[0],edge[1]):\n self.solution_graph.edges[(edge[0],edge[1])][config.EDGE_COST_KEY] = \\\n self.optimization_graph.edges[(config.IM_PREFIX+edge[0],config.IM_PREFIX+edge[1],0)][config.EDGE_COST_KEY]\n \n # Remove imaginary edges\n imaginary_nodes_to_remove = set()\n nodes_to_relabel = {}\n for edge in self.solution_graph.edges():\n if str(edge[0]).startswith(config.IM_PREFIX) and str(edge[1]).startswith(config.IM_PREFIX):\n real_edge = edge[0][len(config.IM_PREFIX):],edge[1][len(config.IM_PREFIX):]\n self.old_capacity[(real_edge[0], real_edge[1], 0)] = pd.np.inf\n self.old_capacity[(real_edge[1], real_edge[0], 0)] = pd.np.inf\n if not self.solution_graph.has_edge(*real_edge):\n for i in range(2):\n nodes_to_relabel[edge[i]] = real_edge[i]\n else:\n self.solution_graph.edges[real_edge[0],real_edge[1]][config.SOLUTION_POWER_FLOW_KEY] += \\\n self.solution_graph.edges[edge].get(config.SOLUTION_POWER_FLOW_KEY,0)\n imaginary_nodes_to_remove.add(edge[0])\n imaginary_nodes_to_remove.add(edge[1])\n elif str(edge[0]).startswith(config.IM_PREFIX):\n imaginary_nodes_to_remove.add(edge[0])\n elif str(edge[1]).startswith(config.IM_PREFIX):\n imaginary_nodes_to_remove.add(edge[1])\n\n nx.relabel_nodes(self.solution_graph, nodes_to_relabel, copy=False)\n self.solution_graph.remove_nodes_from(list(imaginary_nodes_to_remove))\n for node in nodes_to_relabel.values():\n if self.solution_graph.has_edge(node, node):\n self.solution_graph.remove_edge(node, node)\n\n else:\n for source in nx.get_node_attributes(self.solution_graph, config.SUPPLY_POWER_CAPACITY_KEY):\n self.solution_graph.nodes[source][config.SUPPLY_POWER_CAPACITY_KEY] += already_consummed.get(source,0)\n self.optimization_graph.nodes[source][config.SUPPLY_POWER_CAPACITY_KEY] += already_consummed.get(source,0)\n\n return flows, obj_val", "def find_min_hamiltonian_path(G,weights,probs_instead_of_weights=False):\n\n # Create a new model\n m = Model(\"hamiltonian_cycle\")\n \n # Create variables\n x_vars = {}\n u_vars = {}\n for var1 in permute(G.vertices()):\n for var2 in permute(G.vertices()):\n if var1 != var2:\n x_vars[(var1,var2)] = m.addVar(vtype='B', name=\"x_\"+str(var1)+'_'+str(var2))\n u_vars[var1] = m.addVar(vtype=GRB.INTEGER, name=\"u_\"+str(var1))\n m.update()\n \n for var in G.vertices():\n if var != START_NODE:\n cur_incoming = LinExpr([(1.0,v) for k,v in x_vars.items() if (k[1] == var)])\n #print(cur_incoming)\n m.addConstr(cur_incoming,GRB.EQUAL,1.0)\n \n if var != END_NODE:\n cur_outgoing = LinExpr([(1.0,v) for k,v in x_vars.items() if (k[0] == var)])\n #print(cur_outgoing)\n m.addConstr(cur_outgoing,GRB.EQUAL,1.0)\n \n for var1 in G.vertices():\n for var2 in G.vertices():\n if var1 != var2:\n c = LinExpr([(1.0,u_vars[var1]),(-1.0,u_vars[var2]),(G.num_vertices(),x_vars[(var1,var2)])])\n #print(c)\n m.addConstr(c,GRB.LESS_EQUAL,G.num_vertices()-1)\n \n # Set objective\n #try:\n edge_weights = permute(G.get_edge_weights(weights))\n if probs_instead_of_weights:\n all_probs = []\n for v in G.vertices():\n if v != END_NODE:\n batch_scores = [(e,w) for e,w in edge_weights if e[0] == v]\n S = logsumexp([x[1] for x in batch_scores])\n batch_scores = [(e,np.exp(w-S)) for e,w in batch_scores]\n all_probs.extend(batch_scores)\n edge_weights = all_probs\n objective = LinExpr([(weight,x_vars[edge]) for edge,weight in edge_weights])\n #except TypeError:\n # return None\n \n m.setObjective(objective,GRB.MINIMIZE)\n m.update()\n code = m.optimize()\n \n try:\n return [k for k,v in x_vars.items() if v.x > 0.98]\n except GurobiError:\n return None", "def find_path(required_pos, required_speed, current_pos, current_speed, path):\n if (required_pos == current_pos) and (required_speed == current_speed):\n# print(\"== Solution : \" + str(path), file=sys.stderr)\n return path # ok, we have a solution\n if required_speed <= 0:\n# print(\"negative speed\", file=sys.stderr)\n return -1\n if required_pos < current_pos:\n# print(\"negative position : {0} < {1}\".format(required_pos, current_pos),\n# file=sys.stderr)\n return -1 # No path from current pos to get to required pos\n\n path.append( [required_pos, required_speed] )\n# print(\"path : \" + str(path), file=sys.stderr)\n \n # find path, if we slow, keep speed or accelerate\n path_speed = find_path(required_pos - required_speed, required_speed+1,\n current_pos, current_speed, path.copy())\n path_wait = find_path(required_pos - required_speed, required_speed,\n current_pos, current_speed, path.copy())\n path_slow = find_path(required_pos - required_speed, required_speed-1,\n current_pos, current_speed, path.copy())\n \n # find best path\n best_path = -1\n for p in [path_slow, path_wait, path_speed]:\n if p == -1:\n continue\n if best_path == -1:\n best_path = p\n elif len(p) < len(best_path):\n best_path = p\n return best_path", "def find_min_path(s, t, dist):\n\n rows = len(dist) - 1\n cols = len(dist[0]) - 1\n col = cols\n row = rows\n pos_str = \"Position: (row={} col={}) -> (row={} col={})\"\n cst_str = \"Cost: {}\"\n prev_row = row\n prev_col = col\n\n # init sparse path matrix\n sparse_path = [[\" \" for x in range(cols + 1)] for x in range(rows + 1)]\n sparse_path[0][0] = \"0\"\n\n # start with operation at (rows, cols) and work backwards\n sparse_path[rows][cols] = dist[rows][cols]\n\n if verbose == 2:\n print()\n print(\"Initial Minimum Path Matrix:\")\n print_matrix(s, t, sparse_path)\n\n while True:\n\n # bail out if we are in the corner\n if row == 0 and col == 0:\n break\n\n # if we are not at a matrix boundary\n if row != 0 and col != 0: # if at left edge or top row, cannot move diagonally\n\n # diagonal\n if (dist[row - 1][col - 1] == min(dist[row - 1][col],\n dist[row][col - 1],\n dist[row - 1][col - 1])) and (dist[row - 1][col - 1] == dist[row][col] or dist[row - 1][col - 1] == dist[row][col] - 1):\n sparse_path[row - 1][col - 1] = dist[row - 1][col - 1]\n temp_cost = dist[row - 1][col - 1]\n\n # move current cell\n prev_row = row\n prev_col = col\n if col > 0:\n col -= 1\n if row > 0:\n row -= 1\n\n if verbose == 2:\n print(pos_str.format(str(prev_row), str(prev_col), str(row), str(col)))\n print(cst_str.format(temp_cost))\n print()\n\n # left\n elif dist[row][col - 1] <= dist[row][col]:\n sparse_path[row][col - 1] = dist[row][col - 1]\n temp_cost = dist[row][col - 1]\n\n # move current cell\n prev_row = row\n prev_col = col\n if col > 0:\n col -= 1\n\n if verbose == 2:\n print(pos_str.format(str(prev_row), str(prev_col), str(row), str(col)))\n print(cst_str.format(temp_cost))\n print()\n\n # above\n else:\n sparse_path[row - 1][col] = dist[row - 1][col]\n temp_cost = dist[row - 1][col]\n\n # move current cell\n prev_row = row\n prev_col = col\n if row > 0:\n row -= 1\n\n if verbose == 2:\n print(pos_str.format(str(prev_row), str(prev_col), str(row), str(col)))\n print(cst_str.format(temp_cost))\n print()\n\n # if at matrix edge, can only move up\n elif col == 0:\n # above\n sparse_path[row - 1][col] = dist[row - 1][col]\n temp_cost = dist[row - 1][col]\n\n # move current cell\n prev_row = row\n prev_col = col\n if row > 0:\n row -= 1\n\n if verbose == 2:\n print(pos_str.format(str(prev_row), str(prev_col), str(row), str(col)))\n print(cst_str.format(temp_cost))\n print()\n\n # must be at row boundary, can only move left\n else:\n # left\n if dist[row][col - 1] <= dist[row][col]:\n sparse_path[row][col - 1] = dist[row][col - 1]\n temp_cost = dist[row][col - 1]\n\n # move current cell\n prev_row = row\n prev_col = col\n if col > 0:\n col -= 1\n\n if verbose == 2:\n print(pos_str.format(str(prev_row), str(prev_col), str(row), str(col)))\n print(cst_str.format(temp_cost))\n print()\n\n # print matrix\n if verbose == 2:\n print_matrix(s, t, sparse_path)\n\n return sparse_path", "def shortJourney(Alist,s,d):\n \"\"\"Find shortest distances to s in weighted graph, G\"\"\"\n \n #Initialize dictionaries\n dinit = 10**6\n Edict = {} #Explored nodes\n Udict = {} #Unexplored nodes\n path = [[] for l in Alist]\n\n Alen = len(Alist) #length of Alist\n dinits = [dinit]*Alen #list of airport indexes\n Udict = dict(zip(list(range(Alen)),dinits)) #zip into dictionary\n Udict[s] = 0\n path[s] = [s]\n \n #Main search\n while len(Udict)>0:\n #Find node with min d in Udict and move to Edict\n dmin = dinit\n for n,w in Udict.items():\n if w<dmin:\n dmin=w\n nmin=n\n Edict[nmin] = Udict.pop(nmin)\n print(\"moved node\", nmin)\n\n #Update provisional distances for unexplored neighbors of nmin \n for item in Alist[nmin]: #nminth element is a list of two element tuples (node, weight)\n n = item[0] #first elt of tuple is node/neighbour\n w = item[1] #2nd elt is density/weigh\n #for n,w in etc_______________________-\n \n if n in Edict:\n pass\n elif n in Udict:\n #key difference below\n dcomp = (w+dmin) #take sum as you go along\n if dcomp<Udict[n]:\n print(Udict)\n Udict[n]=dcomp\n path[n] = path[nmin] + [n]\n print(path) \n if nmin == d: #if current node is destination\n return [path[d],Edict[d]]\n return [] #no path", "def optimal_path(T, C, r, c):\n seam_path = [0] * (r + c)\n k = r + c - 1\n while k >= 0:\n seam_path[k] = C[r,c]\n T[r,c] = None\n k -= 1\n if C[r,c] == 0:\n r = r-1\n else:\n c = c-1\n assert r == 0 and c == 0\n return seam_path", "def solver(graph,homes,source,home_clusters,all_pairs_distances,all_pairs_shortest_paths):\n\n car_path = [get_car_path(graph,home_clusters,source,all_pairs_distances,all_pairs_shortest_paths, \n source_in_clusters = B1, christofides = B2) for B1 in [False,True] for B2 in [False,True]]\n\n dropoffs = [cluster_solver_utils.nearest_dropoff_efficient(graph,path,homes,all_pairs_distances) for path in car_path]\n cost = [cluster_solver_utils.eval_cost_efficient(graph,car_path[i],dropoffs[i],all_pairs_distances) for i in range(len(car_path))]\n\n minimum_cost = min(cost)\n idx = cost.index(minimum_cost)\n\n return minimum_cost, dropoffs[idx], car_path[idx]", "def get_shortest_route_floyd(network, start,destination, excludings=[]):\n\n # On récupère la liste des villes\n list_city = network[1].keys()\n \n # Si la ville de départ ou de fin n'existe pas\n if start not in list_city or destination not in list_city:\n return None\n\n # On retire les villes à exclure\n list_city = [x for x in list_city if x not in excludings]\n\n\n # Initialisation de se qu'on a besoin\n matrix = []\n distance = []\n n = len(list_city)\n\n \n # On construit la matrice adjacente où indique la distance si il existe une autoroute entre 2 villes\n for x in range(n): \n matrix.append( [] )\n distance.append( [] )\n for y in range(n):\n road_id = get_road_to(network,list_city[x],list_city[y])\n if road_id != None:\n matrix[x].append( get_length(network,road_id) )\n else:\n matrix[x].append( None )\n distance[x].append( [road_id] ) # Autoroute -> format: ['LA']\n\n\t \n # Algorithme de Floyd\n for k in range(n):\n for i in range(n):\n for j in range(n):\n if ( matrix[i][k] != None and matrix[k][j] != None ) and ( ( matrix[i][j] == None ) or ( matrix[i][j] > matrix[i][k] + matrix[k][j] ) ):\n matrix[i][j] = matrix[i][k] + matrix[k][j]\n\t\t \n\t\t # Hors Floyd / Ajout personnel\n if i != k and j != k: # Si i == k ou j == k, cela veut dire qu'on additionne un résultat supplémentaire à la case ij\n distance[i][j] = [] # Sinon ca signifie qu'on a trouvé un chemin plus court, du coup on supprime l'ancien chemin\n distance[i][j].extend( distance[i][k] ) # Chemin d'autoroute parcouru en plus -> format: ['LA','AH']\n distance[i][j].extend( distance[k][j] ) # Chemin d'autoroute parcouru en plus -> format: ['LA','AH']\n\n\t\t \n # On récupère simplement la liste des autoroutes parcourus\n idx_start = list_city.index( start )\n idx_destination = list_city.index( destination )\n distance_minimum = distance[ idx_start ][ idx_destination ]\n\n \n # Si on ne trouve aucune solution, on renvoie None\n if distance_minimum == [None]:\n distance_minimum = None\n \n return distance_minimum", "def optimize_path(path):\n rospy.loginfo(\"Optimizing path\")\n\n opt_path = []\n current_direction = (0, 0)\n last_direction = (0, 0)\n\n for i in range(len(path) -1):\n current_direction = (path[i+1][0] - path[i][0], path[i+1][1] - path[i][1])\n if current_direction != last_direction:\n opt_path.append(path[i])\n last_direction = current_direction\n \n opt_path.append(path[-1]) #add the last coordinate back\n\n return opt_path", "def original_solution():\n matrix = get_data()\n # Construct Graph\n G = nx.DiGraph()\n rows, cols = len(matrix), len(matrix[0])\n for r in xrange(rows):\n for c in xrange(cols):\n if 0 < c:\n G.add_edge(r*cols + c, r*cols + c - 1, weight=matrix[r][c-1])\n if c < cols-1:\n G.add_edge(r*cols + c, r*cols + c + 1, weight=matrix[r][c+1])\n if 0 < r:\n G.add_edge(r*cols + c, (r-1)*cols + c, weight=matrix[r-1][c])\n if r < rows-1:\n G.add_edge(r*cols + c, (r+1)*cols + c, weight=matrix[r+1][c])\n # Calculate shortest path\n path = nx.shortest_path(G, 0, rows*cols-1, weighted=True)\n \n # Get cost for path\n s = 0\n for p in path:\n c = p % cols\n r = (p - c) / rows\n s += matrix[r][c]\n return s", "def g_solving_subproblem_of_ALR(self,vehicle_id):\r\n global_LB = -10000\r\n global_UB = 10000\r\n iteration_for_RSP = 20\r\n optimal_solution_for_RSP = None\r\n self.multiplier_v = 0.5\r\n\r\n # solve the expected shortest path problem\r\n self.g_dynamic_programming_algorithm(vehicle_id, 3)\r\n\r\n # obtain the variance\r\n y_ =self.g_ending_state_vector[vehicle_id].VSStateVector[0].Primal_Label_cost_variance\r\n\r\n for k in range(iteration_for_RSP):\r\n # print(k)\r\n LB = 0\r\n # step 2: solve decomposed dual problems\r\n # Part I: subproblem of x\r\n self.g_dynamic_programming_algorithm(vehicle_id, 1)\r\n LB += self.g_ending_state_vector[vehicle_id].VSStateVector[0].Label_cost_for_searching\r\n\r\n # Part II: subproblem of y\r\n obj_of_y_ = self.reliability * (y_) ** 0.5 - self.multiplier_v * y_\r\n if obj_of_y_ > 0:\r\n y = 0\r\n LB += 0\r\n else:\r\n y = y_\r\n LB += obj_of_y_\r\n\r\n # generate an upper bound\r\n variance = self.g_ending_state_vector[vehicle_id].VSStateVector[0].Primal_Label_cost_variance\r\n Label_cost_for_lagrangian_mean = self.g_ending_state_vector[vehicle_id].VSStateVector[0].Label_cost_for_searching_mean\r\n UB = Label_cost_for_lagrangian_mean + self.reliability * (variance) ** 0.5\r\n\r\n # print(\"UB:{}\".format(UB))\r\n # print(\"LB:{}\".format(LB))\r\n\r\n # UB and LB update\r\n if LB > global_LB:\r\n global_LB = LB\r\n\r\n if UB < global_UB:\r\n global_UB = UB\r\n optimal_solution_for_RSP = self.g_ending_state_vector[vehicle_id].VSStateVector[0]\r\n\r\n # step 3: update multipliers\r\n if variance- y != 0:\r\n self.multiplier_v+= (global_UB - LB) / (variance-y)\r\n # if self.multiplier_v<0:\r\n # self.multiplier_v=1\r\n # print(self.multiplier_v)\r\n\r\n # step 4: termination condition test\r\n if global_UB != 0:\r\n gap = abs((global_UB - global_LB) / global_UB)\r\n # print(gap)\r\n if gap < 0.02:\r\n print(\"iteration{}\".format(k + 1))\r\n print(self.multiplier_v)\r\n print(global_LB, global_UB)\r\n return optimal_solution_for_RSP, global_LB\r\n else:\r\n if global_UB - global_LB == 0:\r\n print(\"iteration{}\".format(k + 1))\r\n print(self.multiplier_v)\r\n print(global_LB, global_UB)\r\n return optimal_solution_for_RSP, global_LB\r\n\r\n if k == iteration_for_RSP - 1:\r\n print(\"iteration{}\".format(k + 1))\r\n print(self.multiplier_v)\r\n print(global_LB, global_UB)\r\n return optimal_solution_for_RSP, global_LB", "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n queue = util.PriorityQueue() # PrioritQueue for searshing the graph/ it expand the node with the lowest cost\n visited = [] # Keep track of visited nodes\n path = [] # Keep track of the path\n start =problem.getStartState() # The start node\n\n queue.push((start, path,0), 0) \n \n while not queue.isEmpty():\n (vrtx, path, costparent) = queue.pop() \n if vrtx not in visited: \n if problem.isGoalState(vrtx):\n return [p[1] for p in path]\n\n visited.append(vrtx) \n for successor in problem.getSuccessors(vrtx):\n cost = successor[2]+ costparent\n queue.push((successor[0], path+[successor],cost),cost)\n \n\n util.raiseNotDefined()", "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n path_to_point = {}\n cost_to_point = {}\n\n # Get the start node\n start_node = problem.getStartState()\n fringe_node = [start_node]\n path_to_point[start_node] = []\n cost_to_point[start_node] = problem.getCostOfActions(path_to_point[start_node])\n\n goal_found = False\n\n while(not goal_found):\n #for i in range(100): \n nodes_to_expand = set()\n # get max value node in the fringe node\n min_val = float(\"inf\")\n for one_node in fringe_node:\n # Compute the cost to reach a node\n if cost_to_point[one_node] < min_val:\n min_val = cost_to_point[one_node]\n \n for one_node in fringe_node:\n # Compute the cost to reach a node\n if cost_to_point[one_node] == min_val:\n nodes_to_expand.add(one_node)\n fringe_node.remove(one_node)\n\n # Expand the fringe node \n for one_node in nodes_to_expand:\n path_to_parent = path_to_point[one_node]\n for nxt_node in problem.getSuccessors(one_node):\n pos = nxt_node[0]\n mv = nxt_node[1]\n # check if point already present in path to point\n prev_cost = float(\"inf\")\n if pos in cost_to_point:\n prev_cost = cost_to_point[pos]\n new_path = path_to_parent + [mv]\n if prev_cost > problem.getCostOfActions(new_path):\n path_to_point[pos] = new_path\n cost_to_point[pos] = problem.getCostOfActions(new_path)\n fringe_node.append(pos)\n\n # Check if destination is reached in the fringe node\n for one_node in fringe_node:\n if problem.isGoalState(one_node):\n final_node = one_node\n goal_found = True\n break\n \n #print(len(fringe_node))\n print(final_node)\n print(path_to_point[final_node])\n return path_to_point[final_node] \n\n util.raiseNotDefined()", "def mincost(L,m,n):\n # find the length of the strings\n # declaring the array for storing the dp values\n tc = [[0]*(n + 1) for i in range(m + 1)]\n tc[0][0]=L[0][0]\n for i in range(1,m + 1):\n tc[i][0]=tc[i-1][0]+L[i][0]\n for j in range(1,n + 1):\n tc[0][j]=tc[0][j-1]+L[0][j]\n for i in range(1,m+1):\n for j in range(1,n+1):\n tc[i][j]=min(tc[i-1][j-1],tc[i-1][j],tc[i][j-1])+L[i][j]\n\n\n # L[m][n] contains the length of LCS of X[0..n-1] & Y[0..m-1]\n return tc[m][n]", "def sol_to_integer ( S, D, R, A, rows = None, cols = None, verbose = False ) :\n\n\tn_vars = len(S)\n\t(c0,max0,mean0) = sol_cost(S,D,R)\n\t\n\tintS = array([0]* (n_vars))\n\tfor j in range(n_vars) :\n\t\tif S[j] < D[j] :\n\t\t\tintS[j] = int(ceil(S[j]))\n\t\telse :\n\t\t\tintS[j] = int(floor(S[j]))\n\t\t\t\t\t\t\n\t(c1,max1,mean1) = sol_cost(intS,D,R)\n\t\n\n\t########## FIRST HEURISTIC #######################################\n\t\n\tintS2 = intS.copy() # solution with first heuristics\n\n\t# no displacements are initially fixed\n\tfixed = array([False]* n_vars)\n\tcomplete = array([False]* n_constraints)\n\t\n\tif verbose :\n\t\tdraw_algo_state(rows,cols,fixed,complete)\t\n\t\t\t\n\t# indices of displacements in decreasing order of reliability\n\tsorted_inds = argsort(-R)\n\t\n\t# complete remaining square in order of less reliable displacements not already set\n\tfor j in range(n_vars) :\n\t\tif not fixed[sorted_inds[j]] :\n\t\t\tconstr_inds = (where(abs(A[:,sorted_inds[j]]) == 1))[0] # indices of constraints containing the displacement considered \n\t\t\tind = constr_inds[0] # adjust the first constraint (all other constraints have at least another displacement not fixed)\n\t\t\terr = dot(A[ind],intS2)\n\t\t\tvars_inds = (where(abs(A[ind]) == 1))[0] # indices of displacements in constraint ind\n\t\t\ttemp_inds = vars_inds[fixed[vars_inds] == False]\n\t\t\tmodi = temp_inds[R[temp_inds] == min(R[temp_inds])][0] # index of displacement not set yet\n\t\t\tif A[ind][modi] > 0 :\n\t\t\t\tintS2[modi] -= err\n\t\t\telse :\n\t\t\t\tintS2[modi] += err\n\t\t\tfixed[vars_inds] = True\n\t\t\tcomplete[ind] = True\n\n\t\t\tif verbose :\n\t\t\t\tdraw_algo_state(rows,cols,fixed,complete)\t\n\t\t\t\n\t\t\t# complete all squares that miss just one displacement\n\t\t\tstop = False\n\t\t\twhile not stop :\n\t\t\t\tstop = True\n\t\t\t\tfor i in range(n_constraints) :\n\t\t\t\t\tif not complete[i] :\n\t\t\t\t\t\tvars_inds = (where(abs(A[i]) == 1))[0] # indices of displacements in constraint i\n\t\t\t\t\t\tif len(where(fixed[vars_inds] == True)[0]) == 3 : # just one displacement is missing\n\t\t\t\t\t\t\terr = dot(A[i],intS2)\n\t\t\t\t\t\t\tmodi = vars_inds[(where(fixed[vars_inds] == False))[0][0]] # index of displacement not set yet\n\t\t\t\t\t\t\tif A[i][modi] > 0 :\n\t\t\t\t\t\t\t\tintS2[modi] -= err\n\t\t\t\t\t\t\telse :\n\t\t\t\t\t\t\t\tintS2[modi] += err\n\t\t\t\t\t\t\tfixed[modi] = True\n\t\t\t\t\t\t\tcomplete[i] = True\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\tif verbose :\n\t\t\t\t\t\t\t\tdraw_algo_state(rows,cols,fixed,complete)\t\n\t\t\t\n\t\t\t\t\t\t\tstop = False # one more displacement has been set: check for other squares to complete\n\t\t\t\n\tif verbose :\n\t\tdraw_algo_state(rows,cols,fixed,complete)\t\n\t\n\tcomplete = array([True]* n_constraints)\n\tif check_constraints(A,intS2,complete) :\n\t\t#raise ValueError('')\n\t\t(c2,max2,mean2) = sol_cost(intS2,D,R)\n\telse :\n\t\t(c2,max2,mean2) = (float('inf'),float('inf'),float('inf'))\n\n\n\t########## SECOND HEURISTIC #######################################\n\n\tintS3 = intS.copy() # solution with second heuristics\n\n\t# no displacements are initially fixed\n\tfixed = array([False]* n_vars)\n\n\tcomplete_bool = array([False]* n_constraints)\n\n\tincomplete = set(range(n_constraints)) # constraints indices to be completed\n\tcomplete = set([]) # constraints indices completed\n\tR_squares = array([0.0]* n_constraints) # maximum displacement reliability of constraints \n\t\n\tfor i in range(n_constraints) :\n\t\tvars_inds = (where(abs(A[i]) == 1))[0] # indices of displacements in constraint i\n\t\tR_squares[i] = sort(R[vars_inds])[len(R[vars_inds])-1] # set maximum displacement reliability of constraint i\n\t\t\n\t# indices of constraints in decreasing order of reliability\n\tsorted_inds = argsort(-R_squares)\n\t\n\t# complete the square with highest reliability\n\terr = dot(A[sorted_inds[0]],intS3)\n\tvars_inds = (where(abs(A[sorted_inds[0]]) == 1))[0] # indices of displacements in constraint with highest reliability\n\ttemp_inds = vars_inds[fixed[vars_inds] == False]\n\tmodi = temp_inds[R[temp_inds] == min(R[temp_inds])][0] # index of displacement not set yet\n\tif A[sorted_inds[0]][modi] > 0 :\n\t\tintS3[modi] -= err\n\telse :\n\t\tintS3[modi] += err\n\tfixed[vars_inds] = True\n\tcomplete |= set([sorted_inds[0]])\n\tincomplete -= set([sorted_inds[0]])\n\n\tif verbose :\n\t\tcomplete_bool[list(complete)] = True\n\t\tdraw_algo_state(rows,cols,fixed,complete_bool)\t\n\t\t\t\n\twhile incomplete <> set([]) :\n\t\tfor k in range(1,n_constraints) :\n\t\t\tsq = sorted_inds[k] # candidate constraint to be completed\n\t\t\tif ( sq in incomplete) :\n\t\t\t\tvars_inds = (where(abs(A[sq]) == 1))[0] # indices of displacements in constraint sq\n\t\t\t\tadjacent = False\t\t\t\t\n\t\t\t\tfor j in vars_inds :\n\t\t\t\t\tconstr_inds = (where(abs(A[:,j]) == 1))[0] # indices of constraints containing the displacement considered (and adjacent to constraint sq) \n\t\t\t\t\tif (set(constr_inds) & complete) <> set([]) : # al least one of the adjacent contraint is complete\n\t\t\t\t\t\tadjacent = True\n\t\t\t\t\t\tbreak\n\t\t\t\tif adjacent :\n\t\t\t\t\tbreak\n\t\t# sq is the next squares adjacent to already completed squares with the highest reliability\n\t\t\n\t\t# complete sq\n\t\terr = dot(A[sq],intS3)\n\t\ttemp_inds = vars_inds[fixed[vars_inds] == False]\n\t\tmodi = temp_inds[R[temp_inds] == min(R[temp_inds])][0] # index of displacement not set yet\n\t\tif A[sq][modi] > 0 :\n\t\t\tintS3[modi] -= err\n\t\telse :\n\t\t\tintS3[modi] += err\n\t\tfixed[vars_inds] = True\n\t\tcomplete |= set([sq])\n\t\tincomplete -= set([sq])\n\t\t\n\t\tif verbose :\n\t\t\tcomplete_bool[list(complete)] = True\n\t\t\tdraw_algo_state(rows,cols,fixed,complete_bool)\t\n\t\t\t\n\t\t# complete all squares that miss just one displacement\n\t\tstop = False\n\t\twhile not stop :\n\t\t\tstop = True\n\t\t\tfor i in range(n_constraints) :\n\t\t\t\tif i in incomplete :\n\t\t\t\t\tvars_inds = (where(abs(A[i]) == 1))[0] # indices of displacements in constraint i\n\t\t\t\t\tif len(where(fixed[vars_inds] == True)[0]) == 3 : # just one displacement is missing\n\t\t\t\t\t\terr = dot(A[i],intS3)\n\t\t\t\t\t\tmodi = vars_inds[(where(fixed[vars_inds] == False))[0][0]] # index of displacement not set yet\n\t\t\t\t\t\tif A[i][modi] > 0 :\n\t\t\t\t\t\t\tintS3[modi] -= err\n\t\t\t\t\t\telse :\n\t\t\t\t\t\t\tintS3[modi] += err\n\t\t\t\t\t\tfixed[modi] = True\n\t\t\t\t\t\tcomplete |= set([i])\n\t\t\t\t\t\tincomplete -= set([i])\n\t\t\t\t\t\t\n\t\t\t\t\t\tif verbose :\n\t\t\t\t\t\t\tcomplete_bool[list(complete)] = True\n\t\t\t\t\t\t\tdraw_algo_state(rows,cols,fixed,complete_bool)\t\n\t\t\n\t\t\t\t\t\tstop = False # one more displacement has been set: check for other squares to complete\n\t\n\tcomplete_bool[list(complete)] = True\n\tif check_constraints(A,intS3,complete_bool) :\n\t\t#raise ValueError('')\n\t\t(c3,max3,mean3) = sol_cost(intS3,D,R)\n\telse :\n\t\t(c3,max3,mean3) = (float('inf'),float('inf'),float('inf'))\n\t\t\n\n\t########## THIRD HEURISTICS #######################################\n\t\n\tintS4 = intS.copy() # solution with second heuristics\n\t\n\t\n\t# find the order in which the constraints have to be processed\n\n\tincomplete = set(range(n_constraints)) # constraints indices to be completed\n\tcomplete = set([]) # constraints indices completed\n\tR_squares = array([0.0]* n_constraints) # maximum displacement reliability of constraints \n\t\n\tfor i in range(n_constraints) :\n\t\tvars_inds = (where(abs(A[i]) == 1))[0] # indices of displacements in constraint i\n\t\tR_squares[i] = sort(R[vars_inds])[len(R[vars_inds])-1] # set maximum displacement reliability of constraint i\n\t\t\n\t# indices of constraints in decreasing order of reliability\n\tsorted_inds = argsort(-R_squares)\n\t\n\ti0 = sorted_inds[0]/(cols-1)\n\ti1 = sorted_inds[0]/(cols-1)\n\tj0 = sorted_inds[0]%(cols-1)\n\tj1 = sorted_inds[0]%(cols-1)\n\tcomplete |= set([sorted_inds[0]])\n\tincomplete -= set([sorted_inds[0]])\n\tconstr_order = array([sorted_inds[0]])\n\twhile incomplete <> set([]) :\n\t\tconstr_inds = array([], dtype=int)\n\t\tup = False\n\t\tdown = False\n\t\tleft = False\n\t\tRight = False\t\n\t\tif i0 > 0 :\n\t\t\tf = (i0-1)*(cols-1) + j0\n\t\t\tl = f + (j1-j0+1)\n\t\t\tconstr_inds = append(constr_inds,array(range(f,l)))\n\t\t\tup = True\n\t\tif i1 < (rows-2) :\n\t\t\tf = (i1+1)*(cols-1) + j0\n\t\t\tl = f + (j1-j0+1)\n\t\t\tconstr_inds = append(constr_inds,array(range(f,l)))\n\t\t\tdown = True\n\t\tif j0 > 0 :\n\t\t\tf = i0*(cols-1) + j0 - 1\n\t\t\tl = (i1+1)*(cols-1) + j0 - 1\n\t\t\tconstr_inds = append(constr_inds,array(range(f,l,(cols-1))))\n\t\t\tleft = True\n\t\tif j1 < (cols-2) :\n\t\t\tf = i0*(cols-1) + j1 + 1\n\t\t\tl = (i1+1)*(cols-1) + j1 + 1\n\t\t\tconstr_inds = append(constr_inds,array(range(f,l,(cols-1))))\n\t\t\tright = True\n\t\tif up and left :\n\t\t\tconstr_inds = append(constr_inds,array([(i0-1)*(cols-1) + j0 - 1]))\n\t\tif up and right :\n\t\t\tconstr_inds = append(constr_inds,array([(i0-1)*(cols-1) + j1 + 1]))\n\t\tif down and left :\n\t\t\tconstr_inds = append(constr_inds,array([(i1+1)*(cols-1) + j0 - 1]))\n\t\tif down and right :\n\t\t\tconstr_inds = append(constr_inds,array([(i1+1)*(cols-1) + j1 + 1]))\n\t\tif up :\n\t\t\ti0 -= 1\n\t\tif down :\n\t\t\ti1 += 1\n\t\tif left :\n\t\t\tj0 -= 1\n\t\tif right :\n\t\t\tj1 += 1\n\t\tcomplete |= set(constr_inds)\n\t\tincomplete -= set(constr_inds)\n\t\tconstr_order = append(constr_order,constr_inds[argsort(-R_squares[constr_inds])])\t\n\n\n\t# process the contraints\n\n\t# no displacements are initially fixed\n\tfixed = array([False]* n_vars)\n\n\tcomplete_bool = array([False]* n_constraints)\n\n\tincomplete = set(range(n_constraints)) # constraints indices to be completed\n\tcomplete = set([]) # constraints indices completed\n\n\t# complete the square with highest reliability\n\terr = dot(A[constr_order[0]],intS4)\n\tvars_inds = (where(abs(A[sorted_inds[0]]) == 1))[0] # indices of displacements in constraint with highest reliability\n\ttemp_inds = vars_inds[fixed[vars_inds] == False]\n\tmodi = temp_inds[R[temp_inds] == min(R[temp_inds])][0] # index of displacement not set yet\n\tif A[constr_order[0]][modi] > 0 :\n\t\tintS4[modi] -= err\n\telse :\n\t\tintS4[modi] += err\n\tfixed[vars_inds] = True\n\tcomplete |= set([constr_order[0]])\n\tincomplete -= set([constr_order[0]])\n\n\tif verbose :\n\t\tcomplete_bool[list(complete)] = True\n\t\tdraw_algo_state(rows,cols,fixed,complete_bool)\t\n\t\t\t\n\twhile incomplete <> set([]) :\n\t\tfor k in range(1,n_constraints) :\n\t\t\tsq = constr_order[k] # candidate constraint to be completed\n\t\t\tif ( sq in incomplete) :\n\t\t\t\tvars_inds = (where(abs(A[sq]) == 1))[0] # indices of displacements in constraint sq\n\t\t\t\tbreak\n\t\t# sq is the next squares adjacent to already completed squares with the highest reliability\n\t\t\n\t\t# complete sq\n\t\terr = dot(A[sq],intS4)\n\t\ttemp_inds = vars_inds[fixed[vars_inds] == False]\n\t\tmodi = temp_inds[R[temp_inds] == min(R[temp_inds])][0] # index of displacement not set yet\n\t\tif A[sq][modi] > 0 :\n\t\t\tintS4[modi] -= err\n\t\telse :\n\t\t\tintS4[modi] += err\n\t\tfixed[vars_inds] = True\n\t\tcomplete |= set([sq])\n\t\tincomplete -= set([sq])\n\t\t\n\t\tif verbose :\n\t\t\tcomplete_bool[list(complete)] = True\n\t\t\tdraw_algo_state(rows,cols,fixed,complete_bool)\t\n\t\t\t\n\t\t# complete all squares that miss just one displacement\n\t\tstop = False\n\t\twhile not stop :\n\t\t\tstop = True\n\t\t\tfor i in range(n_constraints) :\n\t\t\t\tif i in incomplete :\n\t\t\t\t\tvars_inds = (where(abs(A[i]) == 1))[0] # indices of displacements in constraint i\n\t\t\t\t\tif len(where(fixed[vars_inds] == True)[0]) == 3 : # just one displacement is missing\n\t\t\t\t\t\terr = dot(A[i],intS4)\n\t\t\t\t\t\tmodi = vars_inds[(where(fixed[vars_inds] == False))[0][0]] # index of displacement not set yet\n\t\t\t\t\t\tif A[i][modi] > 0 :\n\t\t\t\t\t\t\tintS4[modi] -= err\n\t\t\t\t\t\telse :\n\t\t\t\t\t\t\tintS4[modi] += err\n\t\t\t\t\t\tfixed[modi] = True\n\t\t\t\t\t\tcomplete |= set([i])\n\t\t\t\t\t\tincomplete -= set([i])\n\t\t\t\t\t\t\n\t\t\t\t\t\tif verbose :\n\t\t\t\t\t\t\tcomplete_bool[list(complete)] = True\n\t\t\t\t\t\t\tdraw_algo_state(rows,cols,fixed,complete_bool)\t\n\t\t\n\t\t\t\t\t\tstop = False # one more displacement has been set: check for other squares to complete\n\t\n\tcomplete_bool[list(complete)] = True\n\tif check_constraints(A,intS4,complete_bool) :\n\t\t#raise ValueError('')\n\t\t(c4,max4,mean4) = sol_cost(intS4,D,R)\n\telse :\n\t\t(c4,max4,mean4) = (float('inf'),float('inf'),float('inf'))\n\n\t########## FINAL RESULT #######################################\n\t\n\tif c3 < c2 :\n\t\tif c3 < c4 :\n\t\t\t#print 'The second heuristics gives the best result'\n\t\t\tintS2 = intS3\n\t\t\t(c2,max2,mean2) = (c3,max3,mean3)\n\t\telse :\n\t\t\t#print 'The third heuristics gives the best result'\n\t\t\tintS2 = intS4\n\t\t\t(c2,max2,mean2) = (c4,max4,mean4)\n\telif c4 < c2 :\n\t\t#print 'The third heuristics gives the best result'\n\t\tintS2 = intS4\n\t\t(c2,max2,mean2) = (c4,max4,mean4)\n\telse :\n\t\t#print 'The first heuristics gives the best result'\n\t\tpass\n\t\t\t\t\n\treturn (c0,max0,mean0,c1,max1,mean1,intS2,c2,max2,mean2)", "def cost_state_old(s,state_considered,L,Q,gamma):\n if s==s.goal:\n return 0\n model=Model(\"trajectory of polytopes\")\n p={}\n for row in range(s.n):\n p[row]=model.addVar(lb=-1,ub=1)\n model.update()\n GLG=np.dot(state_considered.G.T,np.dot(L,state_considered.G))\n theta=state_considered.successor[2]\n u=state_considered.successor[1]\n i=state_considered.mode\n theta_Q_theta=np.dot(theta.T,np.dot(Q,theta))\n J=QuadExpr()\n for row in range(s.n):\n for k in range(s.n):\n J.add(p[row]*p[k]*GLG[row,k]+p[row]*p[k]*theta_Q_theta[row,k])\n model.setParam('OutputFlag',False)\n model.setObjective(J)\n model.optimize()\n return model.ObjVal+np.asscalar(np.dot(state_considered.x.T,np.dot(L,state_considered.x))+np.dot(u.T,np.dot(Q,u))+gamma)", "def solution(s, v, dist, pred):\n path = [v]\n total = dist[v]\n while v != s:\n v = pred[v]\n path.insert(0, v)\n return \"length=\" + str(total) + \" \" + str(path)", "def shortest_path(self, id1: int, id2: int) -> (float, list):\n\n if id1 == id2:\n return 0, [id1]\n if id1 not in self.dw_graph.nodes or id2 not in self.dw_graph.nodes:\n return math.inf, []\n\n for n in self.dw_graph.get_all_v().values(): # Set all distance to be max value.\n if n.node_id != id1:\n n.distance = sys.maxsize\n n.visited = 0\n path = []\n min_heap=[]\n self.dw_graph.nodes[id1].distance = 0\n heapq.heappush(min_heap,(self.dw_graph.nodes[id1].distance,self.dw_graph.nodes[id1]))\n\n while len(min_heap):\n node = heapq.heappop(min_heap) # pop the smallest item\n current = node[1] # Get node from tuples\n current.visited = 1 # Set the node to visited\n\n for neighbour in self.dw_graph.all_out_edges_of_node(current.node_id).values(): # Get neighbours\n if self.dw_graph.nodes[neighbour.dest].visited == 0: # if we didn't visit this neighbour\n new_dist = current.distance + neighbour.weight # Set new distance\n\n if self.dw_graph.nodes[neighbour.dest].distance > new_dist: # If new distance is smaller , update it.\n self.dw_graph.nodes[neighbour.dest].distance = new_dist\n\n heapq.heappush(min_heap,(self.dw_graph.nodes[neighbour.dest].distance,\n self.dw_graph.nodes[neighbour.dest])) # add to priority queue\n self.dw_graph.nodes[neighbour.dest].parent = current.node_id # Update parent\n\n if self.dw_graph.nodes[id2].distance == sys.maxsize: # if the distance is still max value , can't reach\n return math.inf, []\n\n\n path.append(id2)\n current = self.dw_graph.nodes[id2].parent\n self.dw_graph.nodes[id1].parent=-1\n while current != -1: # Traverse backwards until parent is -1\n path.append(current)\n current = self.dw_graph.nodes[current].parent\n path.reverse()\n return self.dw_graph.nodes[id2].distance, path" ]
[ "0.73995465", "0.73801374", "0.6814434", "0.6594097", "0.64776325", "0.619143", "0.609329", "0.60840875", "0.60799897", "0.6048444", "0.6044331", "0.60185933", "0.60175085", "0.5949434", "0.5947762", "0.59321314", "0.59200174", "0.5913112", "0.5887984", "0.5882185", "0.58720636", "0.5852649", "0.58412206", "0.5835311", "0.58299834", "0.5814858", "0.57961667", "0.5795014", "0.57873243", "0.57871675" ]
0.7894295
0
This function is to find the optimal path from S to D with constraint L by combining two concave matrics with concave function
def Option3_routing(self, S, D, L): if self.has_path(S, D): Shortest_path = nx.dijkstra_path(self.G, S, D, weight='w') Opt_path = Shortest_path path_cost_with_concave_function = self.calculate_path_cost_with_concave_function(Shortest_path, 'c1', 'c2') return path_cost_with_concave_function, Opt_path while len(Shortest_path) != 0: path_cost = self.additive_path_cost(Shortest_path, 'w') #self.logger.info('Path cost - %d', path_cost) if path_cost <= L: """go to path cost with weighted sum""" path_cost_with_concave_function = self.calculate_path_cost_with_concave_function(Shortest_path, 'c1', 'c2') self.G = self.rm_edge_constraint(path_cost_with_concave_function) # remove all links where the concave link is greater than PathConcave_cost Opt_path = Shortest_path if self.has_path(S, D): Shortest_path = nx.dijkstra_path(self.G, S, D, weight='w') else: Shortest_path = [] else: break else: self.logger.info('No path from %s to %s', S, D) Opt_path = [] path_cost_with_concave_function = 0 return path_cost_with_concave_function, Opt_path
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Optimum_prun_based_routing(self, S, D, L):\n if self.has_path(S, D):\n \n Shortest_path = nx.dijkstra_path(self.G, S, D, weight='w') \n Opt_path = Shortest_path\n PathConcave_cost = self.max_path_cost(Shortest_path, 'c1') \n while len(Shortest_path) != 0:\n path_cost = self.additive_path_cost(Shortest_path, 'w') \n if path_cost <= L:\n \"\"\"go to concave cost\"\"\"\n PathConcave_cost = self.max_path_cost(Shortest_path, 'c1') \n self.G = self.rm_edge_constraint(PathConcave_cost) # remove all links where the concave link is greater than PathConcave_cost\n \n Opt_path = Shortest_path\n if self.has_path(S, D):\n Shortest_path = nx.dijkstra_path(self.G, S, D, weight='w')\n else:\n Shortest_path = [] \n else:\n break \n else:\n self.logger.info('No path from %s to %s', S, D)\n PathConcave_cost = 0\n Opt_path = []\n return PathConcave_cost, Opt_path", "def Option2_routing(self, S, D, L):\n if self.has_path(S, D): \n Shortest_path = nx.dijkstra_path(self.G, S, D, weight='w') \n Opt_path = Shortest_path\n path_cost_with_weighted_sum = self.calculate_path_cost_with_weighted_sum(Shortest_path, 'c1', 'c2')\n return path_cost_with_weighted_sum, Opt_path\n\n while len(Shortest_path) != 0:\n path_cost = self.additive_path_cost(Shortest_path, 'w') \n #self.logger.info('Path cost - %d', path_cost)\n if path_cost <= L:\n \"\"\"go to path cost with weighted sum\"\"\"\n path_cost_with_weighted_sum = self.calculate_path_cost_with_weighted_sum(Shortest_path, 'c1', 'c2')\n self.G = self.rm_edge_constraint(path_cost_with_weighted_sum) # remove all links where the concave link is greater than PathConcave_cost \n Opt_path = Shortest_path\n if self.has_path(S, D):\n Shortest_path = nx.dijkstra_path(self.G, S, D, weight='w')\n else:\n Shortest_path = [] \n else:\n break \n else:\n self.logger.info('No path from %s to %s', S, D)\n Opt_path = []\n path_cost_with_weighted_sum = 0\n return path_cost_with_weighted_sum, Opt_path", "def calculate_path_cost_with_concave_function(self, path, attr1, attr2): \n c1 = max([self.G[path[i]][path[i+1]][attr1] for i in range(len(path)-1)])\n c2 = max([self.G[path[i]][path[i+1]][attr2] for i in range(len(path)-1)]) \n return max([c1,c2])", "def method2(self):\n cres=np.zeros(self.NL,dtype=float) # List of invariants\n # The U matrices from Fukui's method; storage...\n Ux_loc=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n Uy_loc=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n \n for il in range(self.NL):\n # ... and calculation of U matrices for each layer\n for ix in range(self.kS.Nx+1):\n for iy in range(self.kS.Ny+1):\n mat1=self.LDM[il,ix ,iy ,:,:]\n mat2=self.LDM[il,(ix%self.kS.Nx)+1 ,iy ,:,:]\n mat3=self.LDM[il,ix ,(iy%self.kS.Ny)+1 ,:,:]\n \n Ux_loc[ix,iy]=np.dot(np.conj(mat1.T),mat2)[1,1]\n Uy_loc[ix,iy]=np.dot(np.conj(mat1.T),mat3)[1,1]\n \n for ix in range(self.kS.Nx):\n for iy in range(self.kS.Ny):\n ftemp=np.log(Ux_loc[ix,iy]*Uy_loc[ix+1,iy]/Ux_loc[ix,iy+1]/Uy_loc[ix,iy])\n cres[il]+=(ftemp/2./pi/1j).real # Layer specific topological invariant\n \n return cres", "def find_lcs(l1: str, l2: str, length1: int, length2: int):\n \"\"\" Theorem:{\n Initialize matrix with 0 for first row and colm\n If s1[i] = s2[j], update matrix[i][j] with value\n of matrix[i-1][j-1]+1\n Else update matrix[i][j] with max of value among\n matrix[i][j-1],matrix[i-1][j]\n Matrix[n][m] will be lcs\n }\n \"\"\"\n matrix = [[None]*(length1+1) for i in range(0, length2+1)]\n for i in range(0, length2+1):\n for j in range(0, length1+1):\n if i == 0 or j == 0:\n matrix[i][j] = 0\n elif l1[j-1] == l2[i-1]:\n matrix[i][j] = matrix[i-1][j-1] + 1\n else:\n matrix[i][j] = max(matrix[i][j-1], matrix[i-1][j])\n lcs = [None for i in range(0, matrix[length2][length1])]\n index = matrix[length2][length1]\n m = length2 \n n = length1\n while(m > -1 and n > -1):\n if l2[m-1] == l1[n-1]:\n lcs[index-1] = l2[m-1]\n index -= 1\n m -= 1\n n -= 1\n elif matrix[m-1][n] > matrix[m][n-1]:\n m -= 1\n else:\n n -= 1\n return lcs", "def concave_piece(x,k_ind,m_ind):\n line_start=cfg.nfea*sum(cfg.jk[i] for i in range(k_ind))\n f_cc=np.dot(x[line_start:line_start+(cfg.nfea-1)],cfg.a[m_ind,:cfg.nfea-1])+x[line_start+cfg.nfea-1]\n cfg.min_line[k_ind,m_ind] = 0 # a global variable to save the smallest value.\n \n # next lines\n line_start += cfg.nfea\n for j in range(1,cfg.jk[k_ind]): # jk is ok, range does not take the limit itself but jk-1.\n f_tmp = np.dot(x[line_start:line_start+(cfg.nfea-1)],cfg.a[m_ind,:cfg.nfea-1])+x[line_start+cfg.nfea-1]\n \n # Minimum of lines\n if f_tmp < f_cc:\n f_cc = f_tmp\n cfg.min_line[k_ind,m_ind] = j\n line_start += cfg.nfea\n \n return f_cc", "def mincost(L,m,n):\n # find the length of the strings\n # declaring the array for storing the dp values\n tc = [[0]*(n + 1) for i in range(m + 1)]\n tc[0][0]=L[0][0]\n for i in range(1,m + 1):\n tc[i][0]=tc[i-1][0]+L[i][0]\n for j in range(1,n + 1):\n tc[0][j]=tc[0][j-1]+L[0][j]\n for i in range(1,m+1):\n for j in range(1,n+1):\n tc[i][j]=min(tc[i-1][j-1],tc[i-1][j],tc[i][j-1])+L[i][j]\n\n\n # L[m][n] contains the length of LCS of X[0..n-1] & Y[0..m-1]\n return tc[m][n]", "def smooth_input(xs, ys, L):\n n = len(xs)\n\n # obj = [1 for i in range(n)]\n # for i in range(2 * n):\n # obj.append(0)\n\n # Create the model\n model = LpProblem(name=\"small-problem\", sense=LpMinimize)\n ws = [LpVariable(name=\"w_{}\".format(i), lowBound=0, upBound=1) for i in range(n)]\n ls = [LpVariable(name=\"L_{}\".format(i), lowBound=0) for i in range(n)]\n zs = [LpVariable(name=\"z_{}\".format(i)) for i in range(n)]\n\n # objective\n model += lpSum(ws)\n\n # constraint 1:\n # sum of Li <= L\n model += (lpSum(ls) <= L * n, \"sum of Li <= L\")\n\n # Constraint 2:\n # w_i >= |z_i - y_i|\n for i in range(n):\n model += (ws[i] + zs[i] >= ys[i], \"C2.a_{}\".format(i))\n model += (ws[i] - zs[i] >= -ys[i], \"C2.b_{}\".format(i))\n\n # Constraint 3\n # |z_i - z_j| <= L_i * dist(x_i, x_j)\n for i in range(n):\n for j in range(n):\n if i != j:\n model += (zs[i] - zs[j] - abs(xs[i] - xs[j]) * ls[i] <= 0, \"C3.a_{}_{}\".format(i, j))\n model += (zs[j] - zs[i] - abs(xs[i] - xs[j]) * ls[i] <= 0, \"C3.b_{}_{}\".format(i, j))\n\n if model.solve() == 1:\n print(\n \"------------------------------------\\nFound solution for the linear program\\n------------------------------------\\n\")\n return [[xs[i], zs[i].value()] for i in range(n)]\n # return [zi.value() for zi in zs], [li.value() for li in ls]\n\n print(\"Linear program: no solution found\")\n exit(1)\n return -1", "def optimal_path(T, C, r, c):\n seam_path = [0] * (r + c)\n k = r + c - 1\n while k >= 0:\n seam_path[k] = C[r,c]\n T[r,c] = None\n k -= 1\n if C[r,c] == 0:\n r = r-1\n else:\n c = c-1\n assert r == 0 and c == 0\n return seam_path", "def generate_connectivity_constraint(problem, b_list, add_S):\n\n # Constructing A_iq and b_iq for inequality (38) for all S in add_S as sp.coo matrix\n A_iq_row = []\n A_iq_col = []\n A_iq_data = []\n\n constraint_idx = 0\n # For each base\n for b, S_v_t in product(b_list, add_S):\n pre_S_transition = problem.graph.pre_tran_vt(S_v_t)\n pre_S_connectivity = problem.graph.pre_conn_vt(S_v_t)\n for v, t in S_v_t:\n # add y\n A_iq_row.append(constraint_idx)\n A_iq_col.append(problem.get_yb_idx(b, v, t))\n A_iq_data.append(1)\n for v0, v1, t0 in pre_S_transition:\n A_iq_row.append(constraint_idx)\n A_iq_col.append(problem.get_x_idx(b, v0, v1, t0))\n A_iq_data.append(-1)\n for v0, v1, t1 in pre_S_connectivity:\n A_iq_row.append(constraint_idx)\n A_iq_col.append(problem.get_xbar_idx(b, v0, v1, t1))\n A_iq_data.append(-1)\n constraint_idx += 1\n A_iq_38 = sp.coo_matrix(\n (A_iq_data, (A_iq_row, A_iq_col)), shape=(constraint_idx, problem.num_vars)\n )\n\n return Constraint(A_iq=A_iq_38, b_iq=np.zeros(constraint_idx))", "def combined_costs(matrix_MSLL_IO):\r\n return", "def solver(graph,homes,source,home_clusters,all_pairs_distances,all_pairs_shortest_paths):\n\n car_path = [get_car_path(graph,home_clusters,source,all_pairs_distances,all_pairs_shortest_paths, \n source_in_clusters = B1, christofides = B2) for B1 in [False,True] for B2 in [False,True]]\n\n dropoffs = [cluster_solver_utils.nearest_dropoff_efficient(graph,path,homes,all_pairs_distances) for path in car_path]\n cost = [cluster_solver_utils.eval_cost_efficient(graph,car_path[i],dropoffs[i],all_pairs_distances) for i in range(len(car_path))]\n\n minimum_cost = min(cost)\n idx = cost.index(minimum_cost)\n\n return minimum_cost, dropoffs[idx], car_path[idx]", "def find_ld(s, t, c=(1, 1, 1)):\n\n # output string templates\n pos_str = \"Position: row={} col={}\"\n let_str = \"Letters: {} -> {}\"\n op_str = \"Operation: {}, Cost: {}\"\n cst_str = \" {} ({},{}) cost: {} value: {}\"\n\n # keep the shape of the matrix consistent. The algorithm is symmetric\n if len(s) < len(t):\n return find_ld(t, s)\n\n # null inputs\n if len(t) == 0:\n return len(s), []\n\n rows = len(s) + 1\n cols = len(t) + 1\n row = rows\n col = cols\n\n # get costs from input tuple\n d_cost, i_cost, s_cost = c\n\n # setup blank matrices to hold distances and operations\n dist = [[0 for x in range(cols)] for x in range(rows)]\n\n # setup delete costs\n for row in range(1, rows):\n dist[row][0] = row * d_cost\n\n # setup insert costs\n for col in range(1, cols):\n dist[0][col] = col * i_cost\n\n # print initial matrix\n if verbose == 2:\n print()\n print(\"Initial Matrix:\")\n print_matrix(s, t, dist)\n\n for col in range(1, cols):\n for row in range(1, rows):\n\n if verbose == 2:\n print(pos_str.format(str(row), str(col)))\n print(let_str.format(str(s[row - 1]), str(t[col - 1])))\n\n # determine costs\n del_cost = dist[row - 1][col] + d_cost\n if verbose == 2:\n print(cst_str.format(\"delete\", str(row - 1), str(col), str(d_cost), str(del_cost)))\n\n ins_cost = dist[row][col - 1] + i_cost\n if verbose == 2:\n print(cst_str.format(\"insert\", str(row), str(col - 1), str(i_cost), str(ins_cost)))\n\n # sub cost could be 0 if letters are the same\n if s[row - 1] == t[col - 1]:\n sub_cost = dist[row - 1][col - 1] + 0\n if verbose == 2:\n print(cst_str.format(\"substitute (no change)\", str(row - 1), str(col - 1), \"0\", str(sub_cost)))\n else:\n sub_cost = dist[row - 1][col - 1] + s_cost\n if verbose == 2:\n print(cst_str.format(\"substitute\", str(row - 1), str(col - 1), str(s_cost), str(sub_cost)))\n\n # determine least costly operation\n if del_cost == min(del_cost, ins_cost, sub_cost):\n dist[row][col] = del_cost\n if verbose == 2:\n print(op_str.format('delete', del_cost))\n\n elif ins_cost == min(del_cost, ins_cost, sub_cost):\n dist[row][col] = ins_cost\n if verbose == 2:\n print(op_str.format('insert', ins_cost))\n\n else:\n # sub_cost == min(del_cost, ins_cost, sub_cost):\n dist[row][col] = sub_cost\n if verbose == 2:\n print(op_str.format('substitute', sub_cost))\n\n # print matrix every iteration is verbose output is on\n if verbose == 2:\n print()\n print_matrix(s, t, dist)\n\n return dist[row][col], dist", "def cl_alm2d(alm1=None, alm2=None, lmax=100):\n if alm2 is None:\n alm2 = alm1\n cl = np.zeros(lmax+1)\n ls = np.arange(lmax+1)\n for l in ls:\n ms = np.arange(-l,l+1)\n \n cl[l] += ((alm1[l][ms]*np.conjugate(alm2[l][ms])).real).sum()/(2.*l+1.)\n return cl", "def geo_dist_penalty(p_a, p_b): # created on Nov.3 2019\n\n # Offset is 0 for the 1st destination.\n distance_matrix = Network.dist_mat\n if p_a[0] != p_b[0] or p_a[-1] != p_b[-1]:\n raise ValueError('Paths have different o or d.')\n\n # define the penalty in utility form for every two destinations. u_ik stands for the generalized cost of travel\n o, d = p_a[0], p_a[-1]\n\n path_a, path_b = p_a[1:-1], p_b[1:-1] # excluding origin and destination\n\n path_node_check = []\n for _path in [path_a, path_b]:\n _new_path = []\n for node in _path:\n if node <= min(distance_matrix.shape) - 1:\n _new_path.append(node)\n path_node_check.append(_new_path)\n path_a, path_b = path_node_check[0], path_node_check[1]\n\n # utility (negative) penalty evaluation\n cost, a, b = 0, o, o # let a, b be origin\n\n # if exist empty path\n if not path_a: # if observed path is empty\n return cost\n\n while path_a and path_b:\n a, b = path_a.pop(0), path_b.pop(0) # a, b correspond to the i_th node in path_a, path_b\n cost += distance_matrix[a][b]\n\n if path_a: # length of path_a > path b\n while path_a:\n a = path_a.pop(0)\n cost += distance_matrix[a][b]\n else: # case when length of path_b > path a\n while path_b:\n b = path_b.pop(0)\n cost += distance_matrix[a][b]\n return cost", "def create_cont_constraint_mat_separable(H,v1s,v2s,nSides,nConstraints,nC,\n dim_domain,dim_range,tess):\n if dim_domain != 2:\n raise ValueError\n if dim_range not in [1,2]:\n raise ValueError\n nHomoCoo=dim_domain+1 \n length_Avee = dim_range*nHomoCoo\n L1 = np.zeros((nConstraints/2,nC*nHomoCoo))\n\n \n\n nPtsInSide = 2 # Since, in 2D, the side is always a line joining 2 pts.\n# if nSides != nConstraints/(nPtsInSide*dim_domain):\n# raise ValueError(nSides,nConstraints)\n \n if nSides != nConstraints/(nPtsInSide*dim_range):\n print \" print nSides , nConstraints/(nPtsInSide*dim_range):\"\n print nSides , nConstraints/(nPtsInSide*dim_range)\n ipshell('stop')\n raise ValueError( nSides , (nConstraints,nPtsInSide,dim_range))\n\n \n if nSides != H.shape[0]:\n raise ValueError(nSides,H.shape)\n\n\n# M = nPtsInSide*dim_range\n M = nPtsInSide\n if dim_range == 1:\n raise NotImplementedError\n for i in range(nSides): \n v1 = v1s[i]\n v2 = v2s[i]\n \n h = H[i]\n a,b = h.nonzero()[0] # idx for the relevant As \n # s stands for start\n # e stands for end \n s1 = a*length_Avee \n e1 = s1+nHomoCoo \n s2 = b*length_Avee\n e2 = s2+nHomoCoo \n \n # Constraint 1: \n L[i*M,s1:e1]= v1 \n L[i*M,s2:e2]= -v1 \n # Constraint 2: \n L[i*M+1,s1:e1]= v2 \n L[i*M+1,s2:e2]= -v2 \n \n \n elif dim_range==2:\n for i in range(nSides): \n v1 = v1s[i]\n v2 = v2s[i]\n\n if np.allclose(v1,v2):\n raise ValueError(v1,v2)\n\n\n \n \n \n h = H[i]\n a,b = h.nonzero()[0] # idx for the relevant As \n \n\n # L1 is acting on columns of the following form:\n # [ a_1 b_1 c_1 d_1 a_2 b_2 c_2 d_2 ... a_Nc b_Nc c_Nc d_Nc] \n # s stands for start\n # e stands for end \n s1 = a*nHomoCoo\n e1 = s1+nHomoCoo \n s2 = b*nHomoCoo\n e2 = s2+nHomoCoo \n \n \n try: \n # Constraint 1: \n row = np.zeros(L1.shape[1])\n row[s1:e1]=v1\n row[s2:e2]=-v1 \n # x component \n L1[i*M]=row \n except:\n ipshell('fail')\n raise \n\n # Constraint 2: \n row = np.zeros(L1.shape[1])\n row[s1:e1]=v2\n row[s2:e2]=-v2 \n # x component \n L1[i*M+1]=row\n \n\n \n \n \n \n \n else:\n raise ValueError(dim_range)\n\n \n return L1", "def path(l_s, l_f, leg_list):\n # candidates =\n return min(heuristic_path([l_s], l_f, leg_list, []), key = len)", "def longest_common_subsequence(x, y):\n\n # find the length of the strings\n m = len(x)\n n = len(y)\n\n # declaring the array for storing the dp values\n lcs = np.zeros((m + 1, n + 1))\n\n # iterate through each sub problem\n for i in range(m + 1):\n for j in range(n + 1):\n if i == 0 or j == 0:\n lcs[i, j] = 0\n elif x[i - 1] == y[j - 1]:\n lcs[i, j] = lcs[i - 1, j - 1] + 1\n else:\n # use the optimal substructure property\n # of using already computed results previous subproblems\n lcs[i, j] = max(lcs[i - 1, j], lcs[i, j - 1])\n\n # L[m,n] contains the length of LCS of X[0..n-1] & Y[0..m-1]\n return lcs[m, n]", "def calc_cohesion( g, sg0, sg1, max_csize ) :\n score = 0.0\n n0 = len( sg0 )\n n1 = len( sg1 )\n if (n0 + n1 <= max_csize) :\n boundary_edges = networkx.edge_boundary( g, sg0, sg1 )\n for e in boundary_edges :\n score += g[e[0]][e[1]][\"similarity\"]\n return score / max( n0, n1 )", "def find_topo_order(s,graph):\n\n ## initialization\n matrix = graph.get_adjacency()\n n, c = matrix.shape\n sym_matrix = np.empty((n,c), dtype=object)\n # cost_matrix = np.zeros((n,c))\n cache = {}\n\n def symbolize(i,j):\n \"given two indices, create a symbolic variable\"\n s = z.Int('edge_{0}{1}'.format(i,j))\n return s\n\n\n def value_of(i,j):\n \"given two indices, return the (i,j)th value in the adjacency matrix\"\n return sym_matrix[i][j]\n\n\n def constraint_1(n,i,j,k):\n y_ij = value_of(i,j)\n y_jk = value_of(j,k)\n y_ik = value_of(i,k)\n\n name = \"c1\" + str((n,i,j,k))\n constraint = (y_ij + y_jk - y_ik) <= 1\n\n # if name not in cache:\n # cache[name] = constraint\n s.assert_and_track(constraint, name)\n\n\n def constraint_2(n,i,j,k):\n y_ij = value_of(i,j)\n y_jk = value_of(j,k)\n y_ik = value_of(i,k)\n\n name = \"c2\" + str((n,i,j,k))\n constraint = (-y_ij - y_jk + y_ik) <= 0\n\n # if name not in cache:\n # cache[name] = constraint\n s.assert_and_track(constraint, name)\n\n\n def constraint_3(symbolic):\n s.add(z.Or([symbolic == 0, symbolic == 1]))\n\n\n def int_formulation(j):\n left = z.Sum([matrix[k][j] * sym_matrix[k][j] for k in range(j)])\n right = z.Sum([matrix[l][j] * (1 - sym_matrix[j][l]) for l in range(j+1, n)])\n\n return [left, right]\n\n\n ## constraint 3, every edge must be a 0 or a 1, we get the 0 or 1 directly\n ## from the adjacency matrix\n ## we do this first so that the sym_matrix is populated\n for n_iter in range(n):\n for j in range(n_iter+1):\n for i in range(j):\n s_edge = symbolize(i,j)\n sym_matrix[i][j] = s_edge\n constraint_3(s_edge)\n\n ## Iteration for triangle inequalities\n for n_iter in range(n):\n for k in range(n_iter+1):\n for j in range(k):\n for i in range(j):\n constraint_1(n_iter,i,j,k)\n constraint_2(n_iter,i,j,k)\n\n\n ## minimization\n o = z.Optimize()\n y = z.Int('y')\n\n y = z.Sum(u.flatten([int_formulation(j) for j in range(n)]))\n o.minimize(y)\n\n result = []\n\n if s.check() == z.sat:\n result = s.model()\n\n return result", "def shortest_combined_wire_path(grid):\n current_minimum = sys.maxsize\n\n for crossing in grid.crossings:\n crossing_wires = grid.get(crossing[0], crossing[1])\n \n crossing_total = 0\n for wire in crossing_wires:\n crossing_total += wire['distance']\n\n if crossing_total < current_minimum:\n current_minimum = crossing_total\n\n print(f\"Total length of wire at {crossing} is {crossing_total}\")\n return current_minimum", "def _ls_solver(A, B, warm_start=None):\n # TODO - do conjugate gradient if n is too large\n return np.linalg.lstsq(A.T, B.T)[0].T", "def original_solution():\n matrix = get_data()\n # Construct Graph\n G = nx.DiGraph()\n rows, cols = len(matrix), len(matrix[0])\n for r in xrange(rows):\n for c in xrange(cols):\n if 0 < c:\n G.add_edge(r*cols + c, r*cols + c - 1, weight=matrix[r][c-1])\n if c < cols-1:\n G.add_edge(r*cols + c, r*cols + c + 1, weight=matrix[r][c+1])\n if 0 < r:\n G.add_edge(r*cols + c, (r-1)*cols + c, weight=matrix[r-1][c])\n if r < rows-1:\n G.add_edge(r*cols + c, (r+1)*cols + c, weight=matrix[r+1][c])\n # Calculate shortest path\n path = nx.shortest_path(G, 0, rows*cols-1, weighted=True)\n \n # Get cost for path\n s = 0\n for p in path:\n c = p % cols\n r = (p - c) / rows\n s += matrix[r][c]\n return s", "def lcs_le(x: List, y: List) -> Tuple[Matrix, atrix]:\n m = len(x)\n n = len(y)\n lcs_matrix = [[None]*(n+1) for i in range(m+1)\n # each index is a optimal solution for each subproblem\n direction_matrix = [[None]*n for i in range(m)]\n # if either indecd is 0 then each element is 0\n for i n ranage(1, m+1):\n lcs_matrix[i][0] = 0\n for j in range(n+1):\n lcs_matrix[0][j] = 0\n for i in range(m):\n for j in range(n):\n if x[i] == y[j]:\n lcs_matrix[i+1][j+1] = lcs_matrix[i][j]+1\n direction_matrix[i][j] = Direction.UPPER_LEFT\n elif lcs_matrix[i][j+1] >= lcs_matrix[i+1][j]:\n lcs_matrix[i+1][j+1] = lcs_matrix[i][j+1]\n direction_matrix[i][j] = Direction.UP\n else:\n lcs_matrix[i+1][j+1] = lcs_matrix[i+1][j]\n direction_matrix[i][j] = Direction.LEFT\n return lcs_matrix, index_matrix", "def optimal_path(self, mission, start, sp):\n mission.add(start)\n while mission.targets[0] and mission.is_active():\n ds = [(sp[start][t], t) for t in mission.targets[0] if t in sp[start]]\n if not ds:\n mission.add(u'-1') # target not connected --> fill with dummies\n continue\n target = min(ds)\n for i in range(target[0] - 1):\n mission.add(u'0')\n mission.add(target[1])\n start = target[1]", "def C(relatorlist,quit_at=float('inf')):\n F,rels=fg.parseinputwords(relatorlist)\n if not all(r==F.cyclic_reduce(r) for r in rels):\n raise ValueError(\"Relators are not cyclically reduced.\")\n thepieces=pieces(rels)\n minnumberpieces=quit_at\n def min_string_piece_expression(whatsleft,thepieces,quit_at):\n # recursively determine the minimal expression of the string whatsleft as a concatenation of elements of thepieces, or stop once it is determined that any such expression requires at least quit_at many pieces\n # find a piece that agrees with a prefix of whatsleft and the recurse on the suffix\n if not whatsleft:\n return 0\n minexp=quit_at\n for p in thepieces:\n if p!=whatsleft[:len(p)]:\n continue\n else:\n minexp=min(minexp,1+min_string_piece_expression(whatsleft[len(p):],thepieces,minexp-1))\n return minexp\n def min_relator_piece_expression(relator,thepieces,quit_at):\n # This is first step in recursive search. Here we want to find a piece p such that for relator r we can write p=xy and r=yzx, with y nontrivial. That is, in this step only we think of r as cyclic word and allow first piece that wraps.\n r=relator()\n minexp=quit_at\n for p in thepieces:\n if len(p)>len(r):\n continue\n possiblestartingindices=[] # for given p there may be different possible choices of y\n for startingindex in range(len(r)-len(p)+1,len(r)+1):\n if p==(r+r)[startingindex:startingindex+len(p)]:\n possiblestartingindices.append(startingindex)\n if not possiblestartingindices:\n continue\n for startingindex in possiblestartingindices:\n # found a way to fit p into r spanning the beginning of r. This accounts for x and y part of r. Now recursively find shortest expression of z=whatsleft as a concatenation of pieces.\n whatsleft=(r+r)[startingindex+len(p):startingindex+len(r)]\n if not whatsleft:\n return 1\n else:\n minexp=min(minexp,1+min_string_piece_expression(whatsleft,thepieces,minexp-1))\n return minexp\n for thisrelator in rels:\n minnumberpieces=min(minnumberpieces,min_relator_piece_expression(thisrelator,thepieces,minnumberpieces))\n return minnumberpieces", "def lcs(s1, s2):\n\n shape = (len(s1) + 1, len(s2) + 1)\n M = np.zeros(shape)\n\n max_length_so_far = 0\n candidates = set()\n\n for i in range(len(s1)):\n for j in range(len(s2)):\n\n if s1[i] == s2[j]:\n M[i+1,j+1] = M[i,j] + 1\n\n if M[i+1,j+1] > max_length_so_far:\n max_length_so_far = int(M[i+1,j+1])\n candidates = {s1[i - max_length_so_far + 1: i + 1]}\n\n elif M[i+1,j+1] == max_length_so_far:\n candidates.add(s1[i - max_length_so_far + 1: i + 1])\n\n else:\n M[i,j] = 0\n\n candidates.add('')\n\n return candidates", "def auxmin_cc_piece(x,k_ind,m_ind):\n \n # Adding new linear function as a last function:\n # The first line. If jk = 1 and k_ind = nomax, this is a new line, otherwise an old one.\n line_start=cfg.nfea*sum(cfg.jk[i] for i in range(k_ind))\n #print line_start,cfg.jk,k_ind,cfg.nomax-1,cfg.jk[k_ind], cfg.xprev,x\n if cfg.jk[k_ind]==1 and k_ind==cfg.nomax-1:\n #print \"hihu0\"\n f_cc=np.dot(x[0:cfg.nfea-1],cfg.a[m_ind,:cfg.nfea-1])+x[cfg.nfea-1]\n cfg.min_line[k_ind,m_ind] = 0 # a global variable to save the smallest value.\n return f_cc\n else:\n #print \"hihu1\",line_start,k_ind\n f_cc=np.dot(cfg.xprev[line_start:line_start+(cfg.nfea-1)],cfg.a[m_ind,:cfg.nfea-1])+cfg.xprev[line_start+cfg.nfea-1]\n cfg.min_line[k_ind,m_ind] = 0 # a global variable to save the smallest value.\n if cfg.jk[k_ind]==1:\n return f_cc\n \n # Next lines\n line_start += cfg.nfea\n for j in range(1,cfg.jk[k_ind]-1): # Everything but the first and last.\n f_tmp = np.dot(cfg.xprev[line_start:line_start+(cfg.nfea-1)],cfg.a[m_ind,:cfg.nfea-1])+cfg.xprev[line_start+cfg.nfea-1]\n \n # Minimum of lines\n if f_tmp <= f_cc:\n f_cc = f_tmp\n cfg.min_line[k_ind,m_ind] = j\n line_start += cfg.nfea\n \n # The last line.\n if k_ind==cfg.nomax-1:\n #print \"hihu3\"\n f_tmp = np.dot(x[0:cfg.nfea-1],cfg.a[m_ind,:cfg.nfea-1])+x[cfg.nfea-1]\n else: \n \n f_tmp = np.dot(cfg.xprev[line_start:line_start+(cfg.nfea-1)],cfg.a[m_ind,:cfg.nfea-1])+cfg.xprev[line_start+cfg.nfea-1]\n \n # Minimum of lines\n if f_tmp <= f_cc:\n f_cc = f_tmp\n cfg.min_line[k_ind,m_ind] = cfg.jk[k_ind]-1 \n\n return f_cc", "def jacobian_cdas( func, scl, lint=0.8, tol=1e-12, eps = 1e-30, withScl = False ):\n scl = abs(asarray(scl).flatten())\n N = len(scl)\n lint = abs(lint)\n def centDiffJacAutoScl( arg ):\n \"\"\"\n Algorithm: use the value of the function at the center point\n to test linearity of the function. Linearity is tested by\n taking dy+ and dy- for each dx, and ensuring that they\n satisfy lint<|dy+|/|dy-|<1/lint\n \"\"\"\n x0 = asarray(arg).flatten()\n y0 = func(x0)\n s = scl.copy()\n #print \"Jac at \",x0\n idx = slice(None)\n dyp = empty((len(s),len(y0)),x0.dtype)\n dyn = empty_like(dyp)\n while True:\n #print \"Jac iter \",s\n d0 = diag(s)\n dyp[idx,:] = [ func(x0+dx)-y0 for dx in d0[idx,:] ]\n dypc = dyp.conj()\n dyn[idx,:] = [ func(x0-dx)-y0 for dx in d0[idx,:] ]\n dync = dyn.conj()\n dp = sum(dyp * dypc,axis=1)\n dn = sum(dyn * dync,axis=1)\n nul = (dp == 0) | (dn == 0)\n if any(nul):\n s[nul] *= 1.5\n continue\n rat = dp/(dn+eps)\n nl = ((rat<lint) | (rat>(1.0/lint)))\n # If no linearity violations found --> done\n if ~any(nl):\n break\n # otherwise -- decrease steps\n idx, = nl.flatten().nonzero()\n s[idx] *= 0.75\n # Don't allow steps smaller than tol\n s[idx[s[idx]<tol]] = tol\n if all(s[idx]<tol):\n break\n res = ((dyp-dyn)/(2*s[:,newaxis])).T\n if withScl:\n return res, s\n return res\n return centDiffJacAutoScl", "def get_shortest_route_floyd(network, start,destination, excludings=[]):\n\n # On récupère la liste des villes\n list_city = network[1].keys()\n \n # Si la ville de départ ou de fin n'existe pas\n if start not in list_city or destination not in list_city:\n return None\n\n # On retire les villes à exclure\n list_city = [x for x in list_city if x not in excludings]\n\n\n # Initialisation de se qu'on a besoin\n matrix = []\n distance = []\n n = len(list_city)\n\n \n # On construit la matrice adjacente où indique la distance si il existe une autoroute entre 2 villes\n for x in range(n): \n matrix.append( [] )\n distance.append( [] )\n for y in range(n):\n road_id = get_road_to(network,list_city[x],list_city[y])\n if road_id != None:\n matrix[x].append( get_length(network,road_id) )\n else:\n matrix[x].append( None )\n distance[x].append( [road_id] ) # Autoroute -> format: ['LA']\n\n\t \n # Algorithme de Floyd\n for k in range(n):\n for i in range(n):\n for j in range(n):\n if ( matrix[i][k] != None and matrix[k][j] != None ) and ( ( matrix[i][j] == None ) or ( matrix[i][j] > matrix[i][k] + matrix[k][j] ) ):\n matrix[i][j] = matrix[i][k] + matrix[k][j]\n\t\t \n\t\t # Hors Floyd / Ajout personnel\n if i != k and j != k: # Si i == k ou j == k, cela veut dire qu'on additionne un résultat supplémentaire à la case ij\n distance[i][j] = [] # Sinon ca signifie qu'on a trouvé un chemin plus court, du coup on supprime l'ancien chemin\n distance[i][j].extend( distance[i][k] ) # Chemin d'autoroute parcouru en plus -> format: ['LA','AH']\n distance[i][j].extend( distance[k][j] ) # Chemin d'autoroute parcouru en plus -> format: ['LA','AH']\n\n\t\t \n # On récupère simplement la liste des autoroutes parcourus\n idx_start = list_city.index( start )\n idx_destination = list_city.index( destination )\n distance_minimum = distance[ idx_start ][ idx_destination ]\n\n \n # Si on ne trouve aucune solution, on renvoie None\n if distance_minimum == [None]:\n distance_minimum = None\n \n return distance_minimum" ]
[ "0.6558933", "0.62821716", "0.61626667", "0.6098481", "0.581593", "0.57655877", "0.5763163", "0.57363194", "0.5675206", "0.5596622", "0.5536808", "0.55302745", "0.55295086", "0.54934233", "0.54817027", "0.5479422", "0.54510534", "0.5447503", "0.5436156", "0.5408442", "0.540822", "0.53927666", "0.53606784", "0.53538704", "0.5338696", "0.5332927", "0.5306626", "0.5282404", "0.5273651", "0.52710074" ]
0.63657045
1
Verify SSH Access with Root works.
def test_verify_ssh_access_with_root_works(driver):
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def the_root_user_should_be_able_to_login_with_ssh(driver):\n assert ssh_result['result'], ssh_result['output']\n assert '..' in ssh_result['output'], ssh_result['output']", "def test_ssh(self):\n assert self.rc_conf.has_key('sshd_enable')\n assert self.rc_conf['sshd_enable'] == '\"YES\"'\n sshd_conf = open('/etc/ssh/sshd_config').read()\n assert re.search('[^#]PermitRootLogin yes', sshd_conf)", "def checkRoot():\n \n if not os.geteuid() == 0:\n sys.exit(\"You must be root to run this command, please use sudo and try again.\")", "def check_root() -> None:\n if os.geteuid() != 0:\n print(\"Please run as root\")\n exit(1)", "def run_ssh_root_host_with_root_password(driver, host, password):\n global ssh_result\n ssh_result = ssh_cmd('ls -la', 'root', password, host)", "def test_ssh(self):\n self._test_ssh(self.git_ssh_path)", "def check_root():\n if os.getuid():\n logging.critical(\"Please run as root.\")\n sys.exit(ExitCode.ROOT_REQUIRED)", "def is_accessible(self):\n if self._is_accessible:\n return self._is_accessible\n\n check_host_cmd = '/usr/rift/bin/ssh_root {ip} -q -n -o BatchMode=yes -o StrictHostKeyChecking=no ls > /dev/null'\n rc = subprocess.call(check_host_cmd.format(ip=self._ip), shell=True)\n logger.info(\"Checking if {} is accessible\".format(self._ip))\n\n\n\n if rc != 0:\n return False\n\n self._is_accessible = True\n return self._is_accessible", "def has_root():\n return bool(shell32.IsUserAnAdmin())", "def test_correct_sudo_config(self):\n\n pattern = re.compile('^\\s*PermitRootLogin\\s+no')\n # remove escaped characters in case the string supports color\n rem_escape = re.compile(r'\\x1B\\[[0-?]*[ -/]*[@-~]')\n \n for ip in self.IPs:\n try:\n s=pxssh.pxssh(options={\"PasswordAuthentication\" : \"no\"})\n s.login(ip, \"as\", ssh_key=\"~/.ssh/id_as_ed25519\")\n s.sendline('grep -E \"^\\s*PermitRootLogin\\s+no\" /etc/ssh/sshd_config')\n self.assertTrue(s.prompt())\n line_to_match=rem_escape.sub('', s.before.splitlines()[-1])\n self.assertTrue(pattern.match(line_to_match) != None, \"Error in machine {}\".format(ip))\n s.logout()\n except pxssh.ExceptionPxssh as e:\n self.assertTrue(False)\n print s.before\n print 'Sudo verification or login to {} failed!, error: {}'.format(ip, e)\n\n self.assertTrue(True)", "def test_sudo(self):\n self.assertEqual(self.host.user().name, \"matlab\")\n self.assertTrue(self.host.run(\"sudo echo 'Hello World'\").succeeded)", "def root_user_check():\n\n if not os.getuid() == 0:\n print(\"This program requires ROOT privileges. Exiting.\")\n sys.exit()", "def css_login_as_root(css_test_machine):\n ssh_config = collections.namedtuple('ssh_config',\n ('hostname port username '\n 'rsa_key_file password'))\n config = ssh_config(hostname=css_test_machine['public_ip'],\n port=22,\n username=\"root\",\n rsa_key_file=\"\", # Use password for now\n password=css_test_machine['root_password'])\n logger.debug(\"ssh instantiated\")\n yield SshUtil(config)\n # Close connection?", "def __check_ssh(self):\n sfcs = self.sshTunnelDict[\"target_ip\"]\n\n cmd = \"ps aux | grep ssh | awk '{print $20}'\"\n result = subprocess.Popen(cmd,\n shell= True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n stdout, stderr = result.communicate()\n if sfcs not in stdout.decode():\n return False\n else: return True", "def connectSsh(self):\n connect_handle = pexpect.spawn(\"ssh -q -o StrictHostKeyChecking=no root@%s\" % self.ip)\n connect_handle.setwinsize(800,800)\n connect_handle.logfile_read = sys.stdout\n #connect_handle.logfile_send = sys.stdout\n i = 0\n ssh_newkey = r'(?i)Are you sure you want to continue connecting'\n remote_key_changed = r\"REMOTE HOST IDENTIFICATION HAS CHANGED\"\n\n perm_denied = r\"(?i)Permission denied\"\n while True:\n i = connect_handle.expect([ssh_newkey, 'assword:',self.promptshell,\n pexpect.EOF, pexpect.TIMEOUT,\n remote_key_changed, perm_denied])\n if i==0:\n connect_handle.sendline('yes')\n continue\n elif i==1:\n logger.info(\"Password supplied\")\n connect_handle.sendline(self.password)\n continue\n\t elif i==2:\n self._mode = CLI_MODES.shell\n self._prompt = self.promptshell\n break\n elif i==3:\n logger.info(\"Connection closed: %s\" % self)\n logger.info(connect_handle.before) # print out the result\n raise ValueError(\"Connection Closed: %s\" % self)\n elif i==4:\n logger.warning(\"Timeout while waiting for connection\")\n logger.info(connect_handle.before) # print out the result\n raise ValueError(\"Unable to establish connection %s\" % self)\n elif i==5:\n logger.warn(\"Removing offending key from .known_hosts..\")\n known_hosts_file = os.path.expanduser(\"~/.ssh/known_hosts\")\n\n if \"darwin\" in sys.platform.lower():\n # MAC OS\n utils.run_cmd(\"sed -i 1 's/%s.*//' %s\" % (self.ip, known_hosts_file))\n elif \"linux\" in sys.platform.lower():\n # Linux\n utils.run_cmd(\"sed -i 's/%s.*//' %s\" % (self.ip, known_hosts_file))\n\n connect_handle = pexpect.spawn(\"ssh root@%s\" % self.ip)\n connect_handle.setwinsize(800,800)\n connect_handle.logfile_read = sys.stdout\n\n continue\n elif i==6:\n logger.warning(\"Permission denied: %s\" % self)\n logger.info(connect_handle.before) # print out the result\n raise ValueError(\"Permission denied: %s.\" % self)\n return connect_handle", "def check_ssh_key(self):\n return True", "def _test_sudo(self) -> bool:\n self.debug('Check if sudo is necessary.', level=2)\n command = Command('whoami')\n user_output = self.guest.execute(command, silent=True)\n if user_output.stdout is None:\n raise tmt.utils.RunError(\n 'unexpected command output',\n command,\n 0,\n user_output.stdout,\n user_output.stderr)\n\n return user_output.stdout.strip() != 'root'", "def test_cd_above_root(self):\n server = connect_server(\"127.0.0.1\", 8081)\n server.send(str.encode(\"login testing_user password\"))\n server.recv(10000)\n server.send(str.encode(\"change_folder ..\"))\n response = str(server.recv(10000), \"utf-8\")\n server.close()\n self.assertEqual(\"Permission Denied.\", response)", "def user_is_root():\n return os.geteuid() == 0", "def _hostOK(self, host):\n if os.system(\"ping -c 1 $node &> /dev/null\"):\n # No access to host\n return False\n elif os.system(\"ssh -n -a -x $node 'ls' &> /dev/null\"):\n # No route to host\n return False\n else:\n return True", "def _detect_sudo(self, _execnet=None):\n exc = _execnet or execnet\n gw = exc.makegateway(\n self._make_connection_string(self.hostname, use_sudo=False)\n )\n\n channel = gw.remote_exec(\n 'import getpass; channel.send(getpass.getuser())'\n )\n\n result = channel.receive()\n gw.exit()\n\n if result == 'root':\n return False\n self.logger.debug('connection detected need for sudo')\n return True", "def sshKeyOK():\n\n\tkeyOK = False\t\n\t\n\tif os.path.exists('/root/.ssh/id_rsa.pub'):\n\t\tkeyOK = True\n\t\tcfg.LOGGER.info('%s root has an ssh key ready to push out', time.asctime())\n\telse:\n\t\t\n\t\t# Run ssh-keygen, in shell mode to generate the key i.e. use the 'True' parameter\n\t\t(rc, genOut) = issueCMD(\"ssh-keygen -t rsa -N '' -f ~/.ssh/id_rsa\",True)\n\n\t\tfor line in genOut:\n\t\t\tif 'Your public key has been saved' in line:\n\t\t\t\tcfg.LOGGER.info('%s SSH key has been generated successfully', time.asctime())\n\t\t\t\tkeyOK = True\n\t\t\t\tbreak\n\t\n\treturn keyOK", "def test_ssh_cmd_no_user(self):\n self.assertEqual(general.ssh_command(None,'example.com',('ls','-l')).command_line,\n ['ssh','example.com','ls','-l'])", "def sshtest():\n vbox = Vbox(env.vm_name)\n print vbox.ssh_up", "def test_superuser():\n assert os.geteuid() == 0, \"Need ROOT access in order to run tests.\"", "def test_auth_success(self):\n self.assertEqual(Freenas(hostname)._user, 'root')", "def isroot():\n\treturn (os.geteuid() == 0)", "def check_permission():\n if IS_ADMIN:\n out_info(\"Running as Root/Admin\")\n else:\n out_warning(\"Running without root/admin privileges\")", "def test_6_1_2_etc_passwd_user(host):\n assert host.file(ETC_PASSWD).user == 'root'", "def test_ssh_cmd(self):\n self.assertEqual(general.ssh_command('user','example.com',('ls','-l')).command_line,\n ['ssh','[email protected]','ls','-l'])" ]
[ "0.8164614", "0.76414716", "0.7162678", "0.6947462", "0.6803128", "0.6792433", "0.6751752", "0.67172444", "0.66793", "0.66309094", "0.663083", "0.6603524", "0.6576518", "0.65564007", "0.649722", "0.6473558", "0.64599997", "0.6431845", "0.63726485", "0.63556963", "0.6315167", "0.62666976", "0.6261327", "0.62562627", "0.62561", "0.62104625", "0.62006664", "0.6168935", "0.6136338", "0.6109423" ]
0.8793579
0
the browser is open navigate to "{nas_url}".
def the_browser_is_open_navigate_to_nas_url(driver, nas_url): if nas_url not in driver.current_url: driver.get(f"{nas_url}/ui/sessions/signin")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def the_browser_is_open_navigate_to_nas_url(driver, nas_url, request):\n depends(request, ['First_User', 'Setup_SSH'], scope='session')\n global host\n host = nas_url\n if nas_url not in driver.current_url:\n driver.get(f\"http://{nas_url}/ui/sessions/signin\")", "def the_browser_is_open_navigate_to_nas_url(driver, nas_url):\n if nas_url not in driver.current_url:\n driver.get(f\"{nas_url}/ui/sessions/signin\")\n time.sleep(5)", "def the_browser_is_open_navigate_to_nas_url(driver, nas_url):\n global host\n host = nas_url\n if nas_url not in driver.current_url:\n driver.get(f\"http://{nas_url}/ui/sessions/signin\")\n assert wait_on_element(driver, 10, '//input[@data-placeholder=\"Username\"]')", "def go_to_url(self, url):\n if self.browser is not None:\n self.browser.get(url)\n else:\n print('Browser is not running')", "def open_url(self, url):\n\n self.driver.get(url)", "def followlink(self, event):\n webbrowser.open(self.url)", "def open(url):\r\n webbrowser.open(url)", "def open_link(self):\n try:\n # webbrowser.open(self.url) # if you are on Windows OS\n webbrowser.get('safari').open_new_tab(self.url) # if you are on Mac OS\n except(AttributeError):\n self.ids.label.text = self.error_msg", "def open_in_browser(self):\n webbrowser.open(self.url)", "def open_url(self, url: str):\n self.driver.get(url)", "def open_url(self, url):\n try:\n if url != \"\":\n self.driver.maximize_window()\n self.driver.get(url)\n print(url + \" : url is opened\")\n else:\n print(\"Please enter valid url\")\n except Exception as e:\n print(str(e))", "def openSite(url):\n\timport webbrowser\n\twebbrowser.open('http://www.' + url + '.com', 2)", "def openurl(url):\n\n # Open the URL\n webbrowser.open(url)", "def onAboutLeoUrl(self,event=None):\n \n try:\n import webbrowser\n webbrowser.open(self.url)\n except:\n g.es(\"not found: \" + self.url)", "def go(self, url):\n self.driver.get(url)", "def goto_url(self, url):\n try:\n self._browser.get(url)\n except Exception as e:\n self.logger.error(\"Error going to url '\" + url + \"' : \" + str(e))\n raise", "def open(self, event=None, url=None):\n url = url or self.server.url\n try:\n import webbrowser\n webbrowser.open(url)\n except ImportError: # pre-webbrowser.py compatibility\n if sys.platform == 'win32':\n os.system('start \"%s\"' % url)\n elif sys.platform == 'mac':\n try:\n import ic\n ic.launchurl(url)\n except ImportError: pass\n else:\n rc = os.system('netscape -remote \"openURL(%s)\" &' % url)\n if rc: os.system('netscape \"%s\" &' % url)", "def open_browser(url):\n import webbrowser\n webbrowser.open_new(url)", "def open_link(self) -> None:\n\n webbrowser.open_new(self.link)", "def run(self, url=''):\n if url:\n webbrowser.open(url)", "def open_url(name):\n url = localReadConfig.get_webServer(name)\n browser = open_browser()\n browser.get(url)\n return browser", "def open_link(self):\n try:\n webbrowser.open(self.url)\n except:\n self.ids.link.text=self.link_message", "def open_top():\n _open_url_path('')", "def open_doi(doi):\n webbrowser.open_new_tab(DOI_URL % doi)", "def browser_open(url):\n FNULL = open(os.devnull, 'w')\n subprocess.Popen([udata.browser, url], stdout=FNULL, stderr=subprocess.STDOUT )", "def gotoWeb(self,page:str)->None:\n if page=='repo':\n webbrowser.open('http://github.com/ivan866/readTobiiGlasses')\n elif page=='wiki':\n webbrowser.open('http://github.com/ivan866/readTobiiGlasses/wiki')\n elif page=='glasses2API':\n webbrowser.open('http://tobiipro.com/product-listing/tobii-pro-glasses-2-sdk/')\n elif page=='coordSys':\n webbrowser.open('http://developer.tobiipro.com/commonconcepts.html')", "def newwindow(url):\n\n # Open the URL\n webbrowser.open_new(url)", "def navigate(self, url):\n self.log_info(f\"Browser.navigate: Navigating to {url}\")\n self.CORE.get(url)\n return", "def on_OpenExplorer_clicked(self):\n # TODO: not implemented yet\n #raise NotImplementedError\n\n url=\"http://kfc.matrix.io\"\n\n self.browser.openurl(url)\n self.OnlyDisplay(f\"start {url}\")\n #MATRIXWebutil.open_new(url)\n #MATRIXWebutil.open_new_tab(url)", "def open_news_url(self, url):\n\n try:\n if not webbrowser.open_new_tab(url):\n raise webbrowser.Error\n except webbrowser.Error:\n print('Unable to open a web browser, try accessing this URL manually instead:\\n{0}'.format(url))" ]
[ "0.80348057", "0.7913638", "0.7416739", "0.718283", "0.7099535", "0.7070255", "0.7025585", "0.6998565", "0.6980769", "0.6960713", "0.6910556", "0.6764071", "0.67598534", "0.67507684", "0.6718104", "0.67153645", "0.66919", "0.6680896", "0.6666685", "0.66450447", "0.6626909", "0.6622646", "0.6560949", "0.65057147", "0.6451588", "0.6451373", "0.6442011", "0.64350754", "0.6403617", "0.6398487" ]
0.79244345
1
go to System Settings, click Services.
def go_to_system_settings_click_services(driver): rsc.Go_To_Service(driver)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def service_manual(self):\n try:\n webbrowser.open(\"https://openeo.org/documentation/1.0/qgis/#service-management\")\n except:\n pass", "def on_actionSettings_triggered(self):\n self.start_app(SettingsApp)", "def services(status):\n\n run(\"sudo systemctl %s xprof.service\" % status)", "def view_service(options, service_name, client):\n if options.show_events:\n return display_events(client.service_events(service_name))\n\n service_content = client.service(service_name)\n return display.DisplayServices().format_details(service_content)", "def select_goto_application_settings_item(self):\n self.driver.click(\"go_to_application_settings_btn\")", "def open_settings(self):\n self.android_device_driver.adb.exec_adb_cmd(\n \"shell am start -a android.settings.SETTINGS\").wait()", "def on_the_directory_services_page_click_setting_on_the_active_directory_card(driver):\n assert wait_on_element(driver, 7, '//h1[text()=\"Directory Services\"]')\n assert wait_on_element(driver, 5, '//h3[text()=\"Active Directory and LDAP are disabled.\"]')\n assert wait_on_element(driver, 5, '//button[contains(.,\"Configure Active Directory\")]', 'clickable')\n driver.find_element_by_xpath('//button[contains(.,\"Configure Active Directory\")]').click()", "def getServices(self):\n pass", "def list_services(ctx):\n pass", "def saved(self, service):\n if not self.__show:\n msg = QtWidgets.QMessageBox()\n msg.setText(\"You are about to modify a facility wide service configuration. \"\n \"Are you in PSR-Resources?\")\n msg.setStandardButtons(QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No)\n msg.setDefaultButton(QtWidgets.QMessageBox.No)\n if msg.exec_() == QtWidgets.QMessageBox.No:\n return\n\n if self.__new_service:\n if self.__show:\n self.__show.createServiceOverride(service.data)\n else:\n opencue.api.createService(service.data)\n else:\n service.update()\n\n self.refresh()\n self.__new_service = False\n\n for i in range(0, self.__service_list.count()):\n item = self.__service_list.item(i)\n if item:\n if str(item.text()) == service.name():\n self.__service_list.setCurrentRow(i, QtCore.QItemSelectionModel.Select)\n break", "def help():\n managed_services = help_managed_service()\n click.echo(format_text(\n \"List of Fandogh managed services:\", TextStyle.OKBLUE\n ))\n for managed_service in managed_services:\n click.echo(\"\\t* Service name: {}\".format(managed_service['name']))\n for parameter_name, description in managed_service['options'].items():\n click.echo(\"\\t\\t. {}:\\t{}\".format(parameter_name.ljust(20), description))", "def run(self):\n\n if not self.services_loaded:\n if not self.api_key_instance.get_api_keys():\n self.dlg.uLabelWarning.setText(\n \"Access the “Settings” tab to configure a service domain and API key.\"\n )\n self.dlg.uLabelWarning.show()\n else:\n self.load_ui()\n if not self.cache_updated and self.update_cache:\n self.update_service_data_cache()\n self.dlg.show()", "def open_quick_settings(self):\n self.android_device_driver.adb.exec_adb_cmd(\n \"shell cmd statusbar expand-settings\").wait()", "def press_on_configure_ssh(driver):\n assert wait_on_element(driver, 5, xpaths.services.ssh_Service_Button, 'clickable')\n driver.find_element_by_xpath(xpaths.services.ssh_Service_Button).click()", "def view_system():\n\n pass", "def run_services():\n for service in (\"minvd\", \"httpd\", \"ntpd\"):\n sudo(\"service %s start\" % service)\n sudo(\"chkconfig %s on\" % service)", "def service():\n conf = template('remote/addok.service', **config)\n put(conf, '/etc/systemd/system/addok.service')\n systemctl('enable addok.service')", "def get_service(self):", "def cmd_SERVICES(self):\r\n return self._ros.get_services()", "def changeBluetoothService(enable=True):\n \n #blueServiceStatus = os.popen('systemctl status bluetooth.service').read()\n ServStatStdout = execCommand('systemctl status bluetooth.service')\n \n if enable:\n if not 'active (running)' in ServStatStdout:\n checkRoot()\n #blueServiceStatus = os.popen('sudo systemctl start bluetooth.service').read()\n blueServStartStdout = execCommand('sudo systemctl start bluetooth.service')\n return\n \n if not enable:\n if not 'inactive (dead)' in ServStatStdout:\n checkRoot()\n #blueServiceStatus = os.popen('sudo systemctl stop bluetooth.service').read()\n blueServStopStdout = execCommand('sudo systemctl stop bluetooth.service')\n return", "def all_services(self):\n services = oc.all_service_names()\n for s in services:\n print(s)\n print(\"#total\", len(services))", "def addServices(self):\r\n self.addHendrix()\r\n\r\n if not self.options.get('global_cache') and not self.options.get('nocache'):\r\n self.addLocalCacheService()\r\n\r\n if self.is_secure:\r\n self.addSSLService()\r\n\r\n self.catalogServers(self.hendrix)", "def service(self):\n pass", "def openSettings(self):\r\n pass", "def start_services(self):\n logger.info(\"Starting services: %s\", self.services)\n for service in self.services:\n with hide(*fab_quiet):\n sudo('service %s start' % service)", "def click_start_automatically_ssh_checkbox_and_enable_the_ssh_service(driver):\n assert wait_on_element(driver, 5, xpaths.services.title)\n time.sleep(1)\n assert wait_on_element(driver, 5, '//tr[contains(.,\"SSH\")]//mat-checkbox')\n value_exist = attribute_value_exist(driver, '//tr[contains(.,\"SSH\")]//mat-checkbox', 'class', 'mat-checkbox-checked')\n if not value_exist:\n driver.find_element_by_xpath('//tr[contains(.,\"SSH\")]//mat-checkbox').click()\n assert wait_on_element(driver, 5, '//tr[contains(.,\"SSH\")]//mat-slide-toggle/label', 'clickable')\n value_exist = attribute_value_exist(driver, xpaths.services.ssh_Service_Toggle, 'class', 'mat-checked')\n if not value_exist:\n driver.find_element_by_xpath('//tr[contains(.,\"SSH\")]//mat-slide-toggle/label').click()\n time.sleep(1)", "def getDefaultServices():\n return Service.getDefaultServices()", "def enable_service(service_name, start_type='auto'):\n run_program(['sc', 'config', service_name, 'start=', start_type])", "def get_services(**options):\r\n return {}", "def save(self):\n self.client._perform_empty(\n \"PUT\", \"/projects/%s/apiservices/%s/settings\" % (self.project_key, self.service_id),\n body = self.settings)" ]
[ "0.67575246", "0.6122552", "0.5932413", "0.5895286", "0.57974863", "0.5790793", "0.5771788", "0.5738168", "0.573721", "0.57223356", "0.57181805", "0.57047504", "0.56862456", "0.55873585", "0.5547596", "0.5486807", "0.5463146", "0.5442314", "0.5418023", "0.54111516", "0.5387242", "0.538186", "0.5380005", "0.5353171", "0.53530544", "0.5346311", "0.53274703", "0.530437", "0.53034747", "0.52860683" ]
0.8028309
0
the service page should open.
def the_service_page_should_open(driver): assert wait_on_element(driver, 5, xpaths.services.title) time.sleep(1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def service_manual(self):\n try:\n webbrowser.open(\"https://openeo.org/documentation/1.0/qgis/#service-management\")\n except:\n pass", "def isBrowseable(self, service):\n\t\treturn False", "def test_open(self):\n page, resources = self.ghost.open(base_url)\n self.assertEqual(page.url, base_url)\n \n self.ghost.click(\"#run\")", "def _go_to_page(self):\n self.salesforce.go_to_setup_home()\n self.eda.wait_for_new_window(\"Home | Salesforce\")\n self.selenium.switch_window(\"Home | Salesforce\")\n self.salesforce.wait_until_loading_is_complete()", "def test_viewTermsOfServicePage(self):\r\n print('========================================================================')\r\n print('Test for check redirect on TermsOfService page after link TermsOfService click')\r\n #Load Registrtion page\r\n self.reg_page.open_registration_page()\r\n driver = self.reg_page.driver\r\n\r\n #cheks if right title\r\n assert self.reg_page.is_title_matches(), \"Registration title page doesn't match\"\r\n\r\n self.reg_page.click_terms_lnk()\r\n terms_page = page_TermsAndService.Page_TermsAndService(driver)\r\n\r\n\r\n driver.get(terms_page.TERMS_URL)\r\n wait = WebDriverWait(driver, 20)\r\n element = wait.until(EC.title_is(terms_page.get_terms_title()))\r\n assert terms_page.get_terms_title() == 'Snovio terms and conditions', \"Terms title page doesn't match\"\r\n\r\n print('--------- SUCCESS test_viewTermsOfServicePage-----------')\r\n driver.quit()", "def open_restaurant(self):\n\t\tprint(\"restaurant is open\")", "def is_browser_on_page(self):", "def onAboutLeoUrl(self,event=None):\n \n try:\n import webbrowser\n webbrowser.open(self.url)\n except:\n g.es(\"not found: \" + self.url)", "def test_show_on_homepage(self) -> None:\n self.assert_show_on_homepage(apps.wakeup.main.Controller)", "def _is_current_page(self):\n self.selenium.wait_until_location_contains(\"/list\",timeout=60, message=\"Records list view did not load in 1 min\")\n self.selenium.location_should_contain(\"General_Accounting_Unit__c\",message=\"Current page is not a DataImport List view\")", "def open_restaurant(self):\n print(\"We're Open!\")", "def get_page(self):\n self.browser.get(self.url)", "def setup_page(self):\r\n raise NotImplementedError", "def test_get_services_html(self):\n pass", "def onOpen(self):", "def service(request):\n\treturn render(request,'service.html',None)", "def is_open(self) -> bool:\n pass", "def visit_homepage(self) -> None:\n if self.home_page is not None:\n webbrowser.open(self.home_page)", "def is_open(self):\n\t\treturn self._session is not None", "def _verify_page(self):", "def open(self):\n return True", "def open(self):\n return True", "def service(self):\n pass", "def _goto_staff_page(self):\r\n self.courseware_page.visit()\r\n staff_page = StaffPage(self.browser)\r\n self.assertEqual(staff_page.staff_status, 'Staff view')\r\n return staff_page", "def is_open(self):\n return self.name == \"open\"", "def _is_current_page(self):\n location = \"/lightning/n/{}{}\".format(self.eda.get_eda_namespace_prefix(), self._object_name)\n self.selenium.location_should_contain(location)\n\n locator_tab = eda_lex_locators[\"eda_settings\"][\"tab\"].format(\"Relationships\")\n self.selenium.wait_until_page_contains_element(\n locator_tab,\n error=f\"Relationships tab with locator '{locator_tab}' is not available on the page\"\n )", "def setup_page(self):\n raise NotImplementedError", "def Open(self):\n return True", "def Open(self):\n return True", "def initial(self, request, *args, **kwargs):\n super(OdooApi, self).initial(request, *args, **kwargs)\n self.check_service_permission(request, kwargs.get('service_path'))" ]
[ "0.6390198", "0.6162065", "0.6072125", "0.60534656", "0.6048276", "0.5947872", "0.58881843", "0.5836421", "0.57742107", "0.5757675", "0.5726324", "0.57130045", "0.57113594", "0.5677961", "0.5675672", "0.56657815", "0.56542724", "0.5652201", "0.5648147", "0.5630627", "0.5620554", "0.5620554", "0.5617307", "0.5613159", "0.56063646", "0.55968434", "0.55939823", "0.55809385", "0.55809385", "0.55532235" ]
0.75671417
0
press on configure(pencil) SSH.
def press_on_configure_ssh(driver): assert wait_on_element(driver, 5, xpaths.services.ssh_Service_Button, 'clickable') driver.find_element_by_xpath(xpaths.services.ssh_Service_Button).click()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_config(self, args):\n self.config_command.cmdloop(\"Enter to config mode\")", "def open_ssh():\n print('Opening SSH...')", "def configure(username, password, domain):\n art = r'''\nWelcome! __ ___. .__\n_____ ____ ____ ____ __ __ _____/ |______ \\_ |__ | | ____\n\\__ \\ _/ ___\\/ ___\\/ _ \\| | \\/ \\ __\\__ \\ | __ \\| | _/ __ \\\n / __ \\\\ \\__\\ \\__( <_> ) | / | \\ | / __ \\| \\_\\ \\ |_\\ ___/\n(____ /\\___ >___ >____/|____/|___| /__| (____ /___ /____/\\___ >\n \\/ \\/ \\/ \\/ \\/ \\/ \\/\n '''\n click.secho(art, fg='blue')\n Config(username=username, password=password, domain=domain)", "def display_config_prompt(self, message=None, reason=None,\n append_config_message=True):\n if message and append_config_message:\n message += '. Press return to configure %s.' % self.workflow_name\n elif message is None:\n message = 'Configure %s' % self.workflow_name\n self.display_message(message, reason, arg='!config')", "def ssh(args, config):\n print('{}'.format(ssh.__doc__))", "def ssh_config(self, arguments):\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n print(utils.config_ssh_string(self.config_ssh))", "async def config(self, ctx):\n await ctx.send_help(ctx.command)", "def command():\n server = get_server()\n port = get_port()\n \n click.echo(f'{server.get(\"hostname\")}:{port} -> localhost:{port}')\n click.echo('CTRL+C for quit')\n bash('ssh -N -L {port}:localhost:{port} -i {ssh_key_path} {username}@{hostname}'.format(\n ssh_key_path=server.get('ssh_key_path'),\n username=server.get('username'),\n hostname=server.get('hostname'),\n port=port\n ))", "def __gitConfigure(self):\n e5App().getObject(\"UserInterface\").showPreferences(\"zzz_gitPage\")", "def bootstrap_config(self):\n self.logger.info(\"applying bootstrap configuration\")\n self.wait_write(\"\\r\", None)\n # Wait for the prompt\n time.sleep(1)\n self.wait_write(\"system-view\", \"<HPE>\")\n self.wait_write(\"ssh server enable\", \"[HPE]\")\n self.wait_write(\"user-interface class vty\", \"[HPE]\")\n self.wait_write(\"authentication-mode scheme\", \"[HPE-line-class-vty]\")\n self.wait_write(\"protocol inbound ssh\", \"[HPE-line-class-vty]\")\n self.wait_write(\"quit\", \"[HPE-line-class-vty]\")\n self.wait_write(\"local-user %s\" % (self.username), \"[HPE]\")\n self.wait_write(\"password simple %s\" % (self.password), \"[HPE-luser-manage-%s]\" % (self.username))\n self.wait_write(\"service-type ssh\", \"[HPE-luser-manage-%s]\" % (self.username))\n self.wait_write(\"authorization-attribute user-role network-admin\", \"[HPE-luser-manage-%s]\" % (self.username))\n self.wait_write(\"quit\", \"[HPE-luser-manage-%s]\" % (self.username))\n self.wait_write(\"interface GigabitEthernet%s/0\" % (self.num_nics + 1), \"[HPE]\")\n self.wait_write(\"ip address 10.0.0.15 255.255.255.0\", \"[HPE-GigabitEthernet%s/0]\" % (self.num_nics + 1))\n self.wait_write(\"quit\", \"[HPE-GigabitEthernet%s/0]\" % (self.num_nics + 1))\n self.wait_write(\"quit\", \"[HPE]\")\n self.wait_write(\"quit\", \"<HPE>\")\n self.logger.info(\"completed bootstrap configuration\")", "def state_choose_enter(cfg, app, win):", "def ssh_cmd(ctx):\n pass", "def editor_cloud9_ssh_command():\n docker_vars = _editor_cloud9_docker_vars()\n print \"ssh -p %s -i private/ssh/id_rsa_devbox root@%s\" % (docker_vars['public_ssh_port'], env.host)", "def config_mode(self, config_command='configure private', pattern=''):\n if not pattern:\n pattern = re.escape(self.base_prompt[:16])\n return super(ExaROSSSH, self).config_mode(\n config_command=config_command, pattern=pattern)", "def esp32_app_menuconfig(ctx):\n _run_idf_script(ctx, \"menuconfig\", pty=True)", "def GoToConfig(self):\n self.write_ack(MID.GoToConfig)", "def elActivateGraphicalLogin(self):\n # see http://docs.redhat.com/docs/en-US/Red_Hat_Enterprise_Linux/6/html/Installation_Guide/s1-kickstart2-options.html\n commandSection = self.sectionByName(\"command\")\n commandSection.string = commandSection.string + \"\"\"\n#\n# XWindows configuration information.\nxconfig --startxonboot --defaultdesktop=GNOME\n\"\"\"\n return self", "def connectSsh(self):\n connect_handle = pexpect.spawn(\"ssh -q -o StrictHostKeyChecking=no root@%s\" % self.ip)\n connect_handle.setwinsize(800,800)\n connect_handle.logfile_read = sys.stdout\n #connect_handle.logfile_send = sys.stdout\n i = 0\n ssh_newkey = r'(?i)Are you sure you want to continue connecting'\n remote_key_changed = r\"REMOTE HOST IDENTIFICATION HAS CHANGED\"\n\n perm_denied = r\"(?i)Permission denied\"\n while True:\n i = connect_handle.expect([ssh_newkey, 'assword:',self.promptshell,\n pexpect.EOF, pexpect.TIMEOUT,\n remote_key_changed, perm_denied])\n if i==0:\n connect_handle.sendline('yes')\n continue\n elif i==1:\n logger.info(\"Password supplied\")\n connect_handle.sendline(self.password)\n continue\n\t elif i==2:\n self._mode = CLI_MODES.shell\n self._prompt = self.promptshell\n break\n elif i==3:\n logger.info(\"Connection closed: %s\" % self)\n logger.info(connect_handle.before) # print out the result\n raise ValueError(\"Connection Closed: %s\" % self)\n elif i==4:\n logger.warning(\"Timeout while waiting for connection\")\n logger.info(connect_handle.before) # print out the result\n raise ValueError(\"Unable to establish connection %s\" % self)\n elif i==5:\n logger.warn(\"Removing offending key from .known_hosts..\")\n known_hosts_file = os.path.expanduser(\"~/.ssh/known_hosts\")\n\n if \"darwin\" in sys.platform.lower():\n # MAC OS\n utils.run_cmd(\"sed -i 1 's/%s.*//' %s\" % (self.ip, known_hosts_file))\n elif \"linux\" in sys.platform.lower():\n # Linux\n utils.run_cmd(\"sed -i 's/%s.*//' %s\" % (self.ip, known_hosts_file))\n\n connect_handle = pexpect.spawn(\"ssh root@%s\" % self.ip)\n connect_handle.setwinsize(800,800)\n connect_handle.logfile_read = sys.stdout\n\n continue\n elif i==6:\n logger.warning(\"Permission denied: %s\" % self)\n logger.info(connect_handle.before) # print out the result\n raise ValueError(\"Permission denied: %s.\" % self)\n return connect_handle", "def sshtest():\n vbox = Vbox(env.vm_name)\n print vbox.ssh_up", "def config_towercrane(self):\n cloudtype = \"\"\n while cloudtype not in [\"aws\",\"gcloud\"]:\n cloudtype = input(\"what is your choice for cloud storage? aws or gcloud: \") or \"aws\"\n \n self.set_mother_config(\"cloudtype\",cloudtype)\n auth_done = input(f\"Have you authenticated your {cloudtype}? (y/n): \") or \"n\"\n if auth_done in [\"n\",\"N\",\"no\",\"NO\"] :\n print(\"AWS Authentication: https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-quickstart.html#cli-configure-quickstart-config\")\n print(\"Google Cloud Authentication: https://cloud.google.com/docs/authentication/getting-started\")\n elif auth_done in [\"y\",\"Y\",\"yes\",\"YES\"] :\n print(f\"Start with 'towercrane scan' \")", "def update_ssh_shortcut(output_keyfile, quickname=None):\n if quickname:\n with settings(warn_only=True):\n local(\"touch $HOME/.ssh/config\")\n local(r\"echo '' >> $HOME/.ssh/config\")\n local(r\"echo 'Host %s' >> $HOME/.ssh/config\" % quickname)\n local(r\"echo '' >> $HOME/.ssh/config\")\n local(r\"echo 'Hostname %s' >> $HOME/.ssh/config\" % host_name)\n local(r\"echo 'User %s' >> $HOME/.ssh/config\" % user)\n local(r\"echo 'IdentityFile ~/.ssh/%s' >> $HOME/.ssh/config\" % output_keyfile)\n local(r\"echo 'ServerAliveCountMax 3' >> $HOME/.ssh/config\")\n local(r\"echo 'ServerAliveInterval 10' >> $HOME/.ssh/config\")", "def on_pickConfigButton(self, cfg_t, button):\n\n ctxt = \"\"\n cfg_iter = self.cbox[cfg_t].get_active_iter()\n\n # cfg_iter points to user choice? ...\n if cfg_iter != None:\n model = self.cbox[cfg_t].get_model()\n cfgname = model[cfg_iter][0]\n\n # ... or to an Entry?\n else:\n cfgname = self.cbox[cfg_t].get_child().get_text()\n\n if cfgname == \"\":\n ctxt += \"C'mon now! Ya gotta type in a name.\\n\"\n self.consoleBuffer.insert_at_cursor(ctxt)\n return\n\n # are we making a new config?\n if cfgname not in self.cfgmgr.get_cfg_list(cfg_t):\n ctxt += \"Making new {0} '{1}' ...\\n\".format(cfg_t, cfgname)\n\n if not self.cfgmgr.make_config( cfg_t, cfgname ):\n ctxt += self.cfgmgr.errstr()\n self.consoleBuffer.insert_at_cursor(ctxt)\n return\n\n self.sm[cfg_t].set_configf_by_name(cfgname)\n self._load_combobox(cfg_t=cfg_t)\n\n ctxt += \"Activating {0} '{1}' ... \".format(cfg_t, cfgname)\n\n # update ConfigManager and cfg state\n if self.cfgmgr.set_current_cfg_by_name(cfg_t, cfgname):\n ctxt += \"{0} '{1}' activated.\\n\".format(cfg_t, cfgname)\n ctxt += \"==> INFO: {}\\n\".format( self.sm[cfg_t].get('info') )\n else:\n ctxt += \"\\n==> ERROR: could not activate {0} '{1}'\\n\".format(cfg_t, cfgname)\n\n self.consoleBuffer.insert_at_cursor(ctxt)\n\n self._refresh_ui( block='profile', act_l=[cfg_t,] )", "def send_config_set(self, config_commands=None, exit_config_mode=True, **kwargs):\n if self.username == \"root\":\n exit_config_mode = False\n return super(CiscoSSHConnection, self).send_config_set(config_commands=config_commands,\n exit_config_mode=exit_config_mode,\n **kwargs)", "def scp_enable(task):\n cmd = \"ip scp server enable\"\n task.run(task=netmiko_send_config, config_commands=cmd)\n c_print(f\"*** {task.host}: SCP has been enabled ***\")", "def test_ssh(self):\n assert self.rc_conf.has_key('sshd_enable')\n assert self.rc_conf['sshd_enable'] == '\"YES\"'\n sshd_conf = open('/etc/ssh/sshd_config').read()\n assert re.search('[^#]PermitRootLogin yes', sshd_conf)", "def configure(args):\n print('Configures HPC fleet with given name \"{}\"'.format(args))", "def main():\r\n parser = argparse.ArgumentParser(description=\"\"\"Starts SSH session with one\r\n of ARC\\'s Raspberrypis.\"\"\")\r\n\r\n parser.add_argument('usr', help='Username for the remote device.')\r\n parser.add_argument('pwd', help='Password for [email protected].')\r\n\r\n args = parser.parse_args()\r\n\r\n address = get_IP(IP_list(args.pwd), args.usr)\r\n os.system(\"ssh \" + \"pi\" + \"@\" + address)", "def handle(self, args, unknown):\n print(colored('\\n \\033[1m Mantra Cloud Configuration', 'blue') + colored('\\033[1m ☁\\n', 'green'))\n\n use_cloud = input(colored(\"\\n \\033[1m We will configure your Mantra project with cloud support. Continue? (y/n)\\n\\n\", 'green'))\n\n if use_cloud != \"y\":\n exit()\n\n cloud_provider = None\n\n while cloud_provider not in CLOUD_PROVIDERS:\n cloud_provider = input(colored(\"\\n \\033[1m Which cloud provider would you like to use? E.g. aws \\n\\n\", 'green'))\n\n if cloud_provider not in CLOUD_PROVIDERS:\n print(colored('\\n \\033[1m Invalid cloud provider. Please choose one of the available options.\\n', 'red'))\n\n if cloud_provider == 'aws':\n aws_access_key_id = input(colored(\"\\n \\033[1m What is your AWS Access Key Id? e.g. AKIAIL22SZKS2NO4GAOQ \\n\\n\", 'green'))\n aws_secret_access_key = input(colored(\"\\n \\033[1m What is your AWS Secret Access Key? e.g. AFEF4FH34aIfdoRaFAnw21ATp5hSNwsSFOs2cg/a \\n\\n\", 'green')) \n aws_key_path = input(colored(\"\\n \\033[1m What is your AWS Key Path? e.g. ~/.ssh/mykey.pem \\n\\n\", 'green')) \n is_security_group = input(colored(\"\\n \\033[1m Would you like to use a custom security group? (If not will use default) y/n? \\n\\n\", 'green')) \n\n if is_security_group == \"y\":\n aws_security_group = input(colored(\"\\n \\033[1m What is the name of the security group? e.g. mantrasg \\n\\n\", 'green')) \n aws_security_group = \"'%s'\" % aws_security_group\n else:\n aws_security_group = None\n\n print(colored('\\n \\033[1m Ensure that your security group has the right inbound/outbound permissions, and ability to create instances. \\n', 'white'))\n\n aws_default_region = input(colored(\"\\n \\033[1m Please choose your default AWS region. E.g. us-east-1 \\n\\n\", 'green')) \n aws_default_s3_region = input(colored(\"\\n \\033[1m Please choose your default S3 region. E.g. us-east-1 \\n\\n\", 'green')) \n\n settings_path = \"%s/settings.py\" % os.getcwd()\n settings_content = open(settings_path, 'r')\n new_lines = []\n\n for line in settings_content:\n\n if not any([config_var in line for config_var in CONFIG_VARIABLES + ['CLOUD CONFIGURATION SETTINGS']]):\n new_lines.append(line)\n\n new_lines.append('\\n# CLOUD CONFIGURATION SETTINGS\\n')\n new_lines.append(\"CLOUD_PROVIDER = 'AWS'\\n\")\n new_lines.append(\"AWS_KEY_PATH = '%s'\\n\" % aws_key_path)\n new_lines.append(\"AWS_SECURITY_GROUP = %s\\n\" % aws_security_group)\n new_lines.append(\"S3_AVAILABILITY_ZONE = '%s'\\n\" % aws_default_s3_region)\n new_lines.append(\"AWS_ACCESS_KEY_ID = '%s'\\n\" % aws_access_key_id)\n new_lines.append(\"AWS_SECRET_ACCESS_KEY = '%s'\\n\" % aws_secret_access_key.replace(\" \", \"\"))\n new_lines.append(\"AWS_DEFAULT_REGION = '%s'\" % aws_default_region)\n\n with open(settings_path, \"w\") as settings_file:\n settings_file.write(''.join(new_lines))\n\n credentials_file = '[default]\\naws_access_key_id = %s\\naws_secret_access_key = %s' % (aws_access_key_id, aws_secret_access_key)\n config_file = '[default]\\nregion=%s' % (aws_default_region)\n credentials_path = os.path.expanduser(\"~/.aws/credentials\")\n config_path = os.path.expanduser(\"~/.aws/config\")\n\n with open(credentials_path, \"w\") as credentials:\n credentials.write(credentials_file)\n\n with open(config_path, \"w\") as config:\n config.write(config_file)\n\n print(colored(\"\\n \\033[1m Great, that's all set up. Make sure you have the AWS CLI installed as that is a necessary dependency.\\n\\n\", 'blue'))", "def state_chosen_enter(cfg, app, win):", "def do_config(self, args):\n if args.set == \"store_password\":\n put_config_value(\"store_password\", True if args.value.lower() == \"yes\" else False)\n elif args.set == \"password\":\n put_config_value(\"password\", args.value)\n elif args.set == \"username\":\n put_config_value(\"username\", args.value)\n else:\n print(\"Invalid option\")" ]
[ "0.65699154", "0.6418787", "0.62135017", "0.6209512", "0.6088588", "0.60334", "0.6016521", "0.6001123", "0.5966563", "0.5885682", "0.5823382", "0.5806015", "0.5788063", "0.57637227", "0.5755207", "0.5689586", "0.56737524", "0.5669383", "0.5639078", "0.5637163", "0.5633341", "0.5630232", "0.56047875", "0.55732864", "0.55582565", "0.55469084", "0.55465937", "0.55202585", "0.5513944", "0.55118656" ]
0.7515753
0
the SSH General Options page should open.
def the_ssh_general_options_page_should_open(driver): assert wait_on_element(driver, 5, '//h1[text()="SSH"]') assert wait_on_element(driver, 5, '//legend[contains(.,"General Options")]')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def help_opt(self):\n print(OPTIONS)", "def generalHelp(self):\n rf = os.path.join('docs','helpButtons','prefsGeneral.html')\n self.showHelpFile( rf )", "def OnButtonOptionsHelpButton(self, event):\r\n\t\twebbrowser.open(consts.URL_HELP_OPTIONS)", "def otherOptionsFullScreen(self):\n\n # Set Storage List\n storageList = []\n # Create Intel explain menu\n menuDisplay = \"\"\"\n \\n\n [*] Information Verbose:\n Ontop of Asking for the Username and \n Password Should we Gather Even\n More Information about the User such as \n GEOIP / ISP / User Agent etc. etc. \n This Requires Curl to be installed or \n file_get_contents in PHP on selected Server \n \"\"\"\n # display About this\n self.outputText(menuDisplay, \"yellow\")\n # Set Verbose of Intel Gather\n self.results = input(\n \"\\nWould you like to Build a More In-depth Intel Report on Victim ( y Or n ): \")\n if self.results.lower()[0] == \"y\" or self.results.lower() == \"yes\":\n storageList.append(\"INTEL_VERBOSE_LOUD\")\n elif self.results.lower()[0] == \"n\" or self.results.lower() == \"no\":\n storageList.append(\"INTEL_VERBOSE_HUSH\")\n else:\n # Anything Else lets just Hush it then\n storageList.append(\"INTEL_VERBOSE_HUSH\")\n # Redirect Ask\n menuDisplay = \"\"\"\n \\n\n [*] Hitting Enter Keeps the Default \n = Redirect URL Which is the Same \n = URL of the Full-Screen Attack \n = you picked. For Instance If \n = it was AOL Full-Screen Attack\n = the default URL redirect would \n = be https://my.screenname.aol.com\n \"\"\"\n # display About this\n self.outputText(menuDisplay, \"yellow\")\n self.results = input(\n \"After the Victim Inputs Info Where Should the Script Redirect?: \")\n # Check if nothing was entered\n if self.results == \"\" or self.results == \" \":\n # Append Default Redirect Naaaow\n storageList.append(\"REDIRECT_DEFAULT\")\n else:\n # No Checking on URL Let Them Use Whatever lol there bad i guess\n # Append Default Redirect Naaaow\n storageList.append(self.results)\n\n # Spoof link\n menuDisplay = \"\"\"\n \\n\n [*] Hitting Enter Keeps the Default \n = What do you want the URL Link to be spoofed\n = to? This will be displayed when the user\n = rolls over the link. Basically tricking\n = them making them think they are going\n = to that URL..\n \"\"\"\n # display About this\n self.outputText(menuDisplay, \"yellow\")\n self.results = input(\n \"What should the URL be spoofed to? (ex: https://my.screenname.aol.com): \")\n # Check if nothing was entered\n if self.results == \"\" or self.results == \" \":\n # Append Default Redirect Naaaow\n storageList.append(\"DEFAULT_SPOOF\")\n else:\n # Append specified spoof url now\n storageList.append(self.results)\n\n # link name\n menuDisplay = \"\"\"\n \\n\n [*] Hitting Enter Keeps the Default \n = What do you want the Actual URL name\n = to be?\n \"\"\"\n # display About this\n self.outputText(menuDisplay, \"yellow\")\n self.results = input(\n \"What should the URL name be? (ex: Aol Login): \")\n # Check if nothing was entered\n if self.results == \"\" or self.results == \" \":\n # Append Default Redirect Naaaow\n storageList.append(\"DEFAULT_URL_NAME\")\n else:\n # Append url name\n storageList.append(self.results)\n\n menuDisplay = \"\"\"\n \\n\n [*] Hitting Enter Keeps the Default \n = name of Index.php If you feel \n = the need to change the name please \n = do not add the actual extension .php \n = along with it only add whatever crazy \n = name you come up with\n \"\"\"\n # display About this\n self.outputText(menuDisplay, \"yellow\")\n self.results = input(\n \"What Should the Main Index PHP File Be Called? ( ex: login ) : \")\n if self.results == \"\" or self.results == \" \":\n # Append Default Redirect Naaaow\n storageList.append(\"INDEX_DEFAULT\")\n else:\n check = self.results.find(\".\")\n # if it doesn't return a -1 it found a decimal\n if check != -1:\n # Throw Error we found a dot\n self.errorOutput(\n \"[*] Error - Didn't We Say Not to Add an Extension, WOW...\", \"yellow\")\n else:\n # Append name of the File\n storageList.append(self.results)\n\n menuDisplay = \"\"\"\n \\n\n [*] Hitting Enter Keeps the Default \n = Title of the Webpage.\n \"\"\"\n # display About this\n self.outputText(menuDisplay, \"blue\")\n self.results = input(\n \"What Should the Title of the Page be? (ex: AOL Login ) : \")\n if self.results == \"\" or self.results == \" \":\n # Append Default Redirect Naaaow\n storageList.append(\"TITLE_DEFAULT\")\n else:\n # Append name of the File\n storageList.append(self.results)\n\n # Return Storage List for Processing\n return storageList", "def _help_dialogue(self):\n webbrowser.open('https://github.com/ldrumm/yubikey-totp-gui/wiki')", "def optionHelp(self):\n return {}", "def show_general_help(self):\n QMessageBox.question(self, 'General help', get_general_help(), QMessageBox.Ok | QMessageBox.NoButton)", "def __gitConfigure(self):\n e5App().getObject(\"UserInterface\").showPreferences(\"zzz_gitPage\")", "def open_ssh():\n print('Opening SSH...')", "def help():\n print(UI.HELP)", "def help(self):\n\t\treturn", "def quick_test():\n do_command('Help: Command=Help')\n do_command('Help: Command=\"GetInfo\"')\n #do_command('SetPreference: Name=GUI/Theme Value=classic Reload=1')", "def display_other_options():\n print(\"> - Next Song page.\")\n print(\"< - Previous song page.\")\n print(\"q - to quit\")", "def cmd_HELP(self, line):\r\n configs = [UserOptions(self.terminal), ContainerOptions(self.terminal),\r\n NodeOptions(self.terminal), ParameterOptions(self.terminal),\r\n InterfaceOptions(self.terminal),\r\n ConnectionOptions(self.terminal),\r\n RobotOptions(self.terminal), MachineOptions(self.terminal)]\r\n\r\n for config in configs:\r\n self.terminal.nextLine()\r\n config.opt_help()", "def showHelp(self):\n for circle in self.helpCircles:\n self.can.itemconfig(circle, **SCOPT)\n self.helpShown = True", "def _about_dialogue(self):\n webbrowser.open('https://github.com/ldrumm/yubikey-totp-gui')", "def show_help():\n messagebox.showinfo(title='How to Use', message=\"It's really easy.\")", "def habHelp(self):\n rf = os.path.join('docs','helpButtons','prefsHabitat.html')\n self.showHelpFile( rf )", "def _help(self):\n self.onecmd('help')", "def showSettings(self):\n self.c.show()", "def show_help():\n pass", "def substratesHelp(self):\n rf = os.path.join('docs','helpButtons','prefsSubstrate.html')\n self.showHelpFile( rf )", "def settings( self, selection ):\r\n if( self.__optionsDatabase.showOptionsDatabase() ):\r\n self.main( selection )", "def _show_help(self):\n QMessageBox.information(\n self, 'Help',\n \"Help:\\n\"\n \"'+'/'-': zoom\\nleft/right arrow: left/right\\n\"\n \"up/down arrow: superior/inferior\\n\"\n \"left angle bracket/right angle bracket: anterior/posterior\")", "def showSettings():\n cq = dz()\n cq.abag()", "def help_general(game):\n game.window.clear()\n\n game.window.addstr(1, 1, \"Use vim keys or arrows for movement\")\n\n game.window.addstr(12, 1, \"move towards enemies to attack them\")\n\n game.window.addstr(14, 1, \"press e to enter inventory view\")\n game.window.addstr(15, 1, \"press ? for help\")\n game.window.addstr(15, 1, \"press q to quit\")\n\n game.window.getch()", "def test_010_view_settings(self):\n\n testflow.step(\"Showing setting via CLI\")\n assert self.settings_cli.run('show')[0], \"Failed to view settings\"", "def on_about(self):\n MessageBox.showinfo(\"SuperSID\", self.controller.about_app())", "def _options(self):\n return", "def display_help(self):\n pass" ]
[ "0.68342537", "0.65731114", "0.6296498", "0.6134775", "0.6071951", "0.60229146", "0.6021095", "0.5994295", "0.59757835", "0.5967947", "0.5923113", "0.588971", "0.5887491", "0.58640087", "0.5841276", "0.5833848", "0.58013505", "0.5783216", "0.57594115", "0.5745592", "0.57276213", "0.5726179", "0.5725387", "0.5696184", "0.5689935", "0.5677312", "0.5672639", "0.5659093", "0.5654302", "0.5649071" ]
0.77246684
0
click the checkbox "Log in as root with password".
def click_the_checkbox_log_in_as_root_with_password(driver): assert wait_on_element(driver, 5, '//mat-checkbox[contains(.,"Log in as Root with Password")]', 'clickable') time.sleep(0.5) value_exist = attribute_value_exist(driver, '//mat-checkbox[contains(.,"Log in as Root with Password")]', 'class', 'mat-checkbox-checked') if not value_exist: driver.find_element_by_xpath('//mat-checkbox[contains(.,"Log in as Root with Password")]').click() wait_for_value = wait_for_attribute_value(driver, 7, '//mat-checkbox[contains(.,"Log in as Root with Password")]', 'class', 'mat-checkbox-checked') assert wait_for_value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def loginAsManager(self):\n self.browser.open('http://nohost/plone/')\n self.browser.getLink('Log in').click()\n self.browser.getControl('Login Name').value = 'root'\n self.browser.getControl('Password').value = 'secret'\n self.browser.getControl('Log in').click()", "def login(self):\n self.driver.find_element(*BaseLocators.PRIMARY_BUTTON).click()", "def click_login(self):\n self.login.click()\n return self.login", "def login_appear_enter_root_and_password(driver, user, password):\n assert wait_on_element(driver, 10, xpaths.login.user_Input)\n driver.find_element_by_xpath(xpaths.login.user_Input).clear()\n driver.find_element_by_xpath(xpaths.login.user_Input).send_keys(user)\n driver.find_element_by_xpath(xpaths.login.password_Input).clear()\n driver.find_element_by_xpath(xpaths.login.password_Input).send_keys(password)\n assert wait_on_element(driver, 4, xpaths.login.signin_Button, 'clickable')\n driver.find_element_by_xpath(xpaths.login.signin_Button).click()", "def login(self):\n self.open(base_url + '/login')\n self.type(\"#email\", test_user.email)\n self.type(\"#password\", test_user.password)\n self.click('input[type=\"submit\"]')", "def test_login(self):\n self.driver.find_element_by_link_text(\"Sign in\").click()\n self.driver.find_element_by_id(\"email\").send_keys(\"[email protected]\")\n self.driver.find_element_by_id(\"passwd\").send_keys(\"control123\")\n self.driver.find_element_by_id(\"SubmitLogin\").click()\n time.sleep(5)", "def click_login_button(self):", "def click_login_button(self):\n submit_button = self.locate_element_by_css_selector(LOGIN_BUTTON_SELECTPR)\n submit_button.click()", "def login(self):\n self.driver.get(self.login)\n PAUSE = 2\n time.sleep(PAUSE)\n user_input = self.driver.find_element_by_name('username')\n pass_input = self.driver.find_element_by_name('password')\n login_button = self.driver.find_elements_by_xpath(\"//div[contains(text(),'Log In')]\")[0]\n user_input.send_keys(self.username)\n pass_input.send_keys(self.password)\n login_button.click()\n time.sleep(PAUSE)", "def test_login(self):\n # Open the admin index page\n self.open(reverse('admin:index'))\n\n # Selenium knows it has to wait for page loads (except for AJAX requests)\n # so we don't need to do anything about that, and can just\n # call find_css. Since we can chain methods, we can\n # call the built-in send_keys method right away to change the\n # value of the field\n self.wd.find_css('#id_username').send_keys(\"admin\")\n # for the password, we can now just call find_css since we know the page\n # has been rendered\n self.wd.find_css(\"#id_password\").send_keys('pw')\n # You're not limited to CSS selectors only, check\n # http://seleniumhq.org/docs/03_webdriver.html for\n # a more compreehensive documentation.\n self.wd.find_element_by_xpath('//input[@value=\"Log in\"]').click()\n # Again, after submiting the form, we'll use the find_css helper\n # method and pass as a CSS selector, an id that will only exist\n # on the index page and not the login page\n self.wd.find_css(\"#content-main\")", "def test_login(self):\n url_extend = 'user_auth/login/'\n self.browser.get(self.url + url_extend)\n\n # enter the username and password.\n username_field = self.browser.find_element_by_name('user_name')\n username_field.send_keys('user4')\n password_field = self.browser.find_element_by_name('password')\n password_field.send_keys('user')\n\n # click login button.\n # get the first input button under the first form in login page.\n login_button = self.browser.find_element_by_xpath(\"//form[1]/fieldset[1]/input[@type='submit']\")\n try:\n login_button.click()\n except:\n raise Exception(\"Login Error!\")", "def login(self):\n\t\treturn", "def login():", "def login():", "def open_login_page(self):\n com_util.tap_on(self.driver, element['clickOnAtSign'])", "def login(self, asRoot=True):\n self.pc = Telnet(self.ipaddr)\n self.pc.expect(['login'])\n self.pc.write(self.user+'\\n')\n self.pc.expect(['Password'])\n self.pc.write(self.passwd+'\\n')\n ix, ox, tx = self.pc.expect(self.prompt, self.timeout)\n if ix == -1:\n raise Exception('Can not telnet to \\'%s\\'' %self.ipaddr)", "def login_click_change_pwd():\r\n msg, status = \"\", True\r\n try:\r\n sleep(3)\r\n 'Click on change password'\r\n flag = ui_controls.button(get_obj_identifier('login_changePassword_btn'))\r\n\r\n status = False if not flag else True\r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n status = False\r\n return status, msg", "def login_action(login_page, request, driver):\n login_page.login(request.config.getoption(\"--username\"), request.config.getoption(\"--password\"))", "def log_in_button_click(self):\n waiter.find_element(self.driver, LOG_IN_BUTTON_XPATH, by=XPATH).click()", "def login(self):\n self.open(self.urls['login'])\n self.select_form(nr=0)\n\n self.form['custno'] = self.username\n self.form['password'] = self.password\n res = self.submit()\n \n return res", "def _log_into_shib( self, driver ):\n driver.find_element_by_id(\"username\").clear()\n driver.find_element_by_id(\"username\").send_keys( self.USERNAME )\n driver.find_element_by_id(\"password\").clear()\n driver.find_element_by_id(\"password\").send_keys( self.PASSWORD )\n driver.find_element_by_css_selector(\"button[type=\\\"submit\\\"]\").click()\n return driver", "def login(self):\n driver = self.selenium_test.driver\n driver.get(self.selenium_test.get_server_url())\n self.selenium_test.wait_fn(self.preenche_username)\n driver.find_element_by_id('btnlogin').click()\n self.selenium_test.wait_to_be_logged_in()", "def if_the_login_page_appears_enter_root_and_testing(driver, user, password):\n if not is_element_present(driver, '//mat-list-item[@ix-auto=\"option__Dashboard\"]'):\n assert wait_on_element(driver, 10, '//input[@data-placeholder=\"Username\"]')\n driver.find_element_by_xpath('//input[@data-placeholder=\"Username\"]').clear()\n driver.find_element_by_xpath('//input[@data-placeholder=\"Username\"]').send_keys(user)\n driver.find_element_by_xpath('//input[@data-placeholder=\"Password\"]').clear()\n driver.find_element_by_xpath('//input[@data-placeholder=\"Password\"]').send_keys(password)\n assert wait_on_element(driver, 5, '//button[@name=\"signin_button\"]', 'clickable')\n driver.find_element_by_xpath('//button[@name=\"signin_button\"]').click()\n else:\n driver.find_element_by_xpath('//mat-list-item[@ix-auto=\"option__Dashboard\"]').click()", "def login(self):", "def login(self):\n self.client.login(username=self.user.username, password='test')", "def OnButtonLoginHelpButton(self, event):\r\n\t\twebbrowser.open(consts.URL_HELP_LOGIN)", "def login():\n pass", "def login(self):\r\n\r\n # Open browser with the login URL\r\n self.browser.open(self.config[\"base_url\"] + \"login\")\r\n\r\n # Select the login form\r\n self.browser.select_form('form[action=\"/login/\"]')\r\n\r\n # Fill the login form.\r\n self.browser[\"email\"] = self.config[\"email\"]\r\n self.browser[\"password\"] = self.config[\"password\"]\r\n\r\n # Submit form\r\n self.browser.submit_selected()", "def login(self):\n\t\twhile True:\n\t\t\tos.system('clear')\n\t\t\tprint(\"1. Sign in\")\n\t\t\tprint(\"2. Sign up\")\n\t\t\tchoice = input()\n\t\t\tif choice == \"1\":\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tself._sign_up()\n\n\t\twhile self._input():\n\t\t\tos.system(\"clear\")\n\t\t\tprint(\"Wrong username or password! Please re-enter.\")", "def login(**kwargs):\n root_commands.cmd_login(**kwargs)" ]
[ "0.74703526", "0.74241596", "0.74170965", "0.7220949", "0.7136605", "0.7132571", "0.69986665", "0.6882587", "0.68124306", "0.6740211", "0.67398465", "0.6739545", "0.6698767", "0.6698767", "0.6577201", "0.6571196", "0.6553244", "0.65500057", "0.6543612", "0.65392774", "0.6525947", "0.65228975", "0.6521706", "0.64517087", "0.6424028", "0.6420586", "0.6408352", "0.64048445", "0.63951296", "0.6389357" ]
0.7759269
0
click Start Automatically SSH checkbox and enable the SSH service.
def click_start_automatically_ssh_checkbox_and_enable_the_ssh_service(driver): assert wait_on_element(driver, 5, xpaths.services.title) time.sleep(1) assert wait_on_element(driver, 5, '//tr[contains(.,"SSH")]//mat-checkbox') value_exist = attribute_value_exist(driver, '//tr[contains(.,"SSH")]//mat-checkbox', 'class', 'mat-checkbox-checked') if not value_exist: driver.find_element_by_xpath('//tr[contains(.,"SSH")]//mat-checkbox').click() assert wait_on_element(driver, 5, '//tr[contains(.,"SSH")]//mat-slide-toggle/label', 'clickable') value_exist = attribute_value_exist(driver, xpaths.services.ssh_Service_Toggle, 'class', 'mat-checked') if not value_exist: driver.find_element_by_xpath('//tr[contains(.,"SSH")]//mat-slide-toggle/label').click() time.sleep(1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def press_on_configure_ssh(driver):\n assert wait_on_element(driver, 5, xpaths.services.ssh_Service_Button, 'clickable')\n driver.find_element_by_xpath(xpaths.services.ssh_Service_Button).click()", "def open_ssh():\n print('Opening SSH...')", "def scp_enable(task):\n cmd = \"ip scp server enable\"\n task.run(task=netmiko_send_config, config_commands=cmd)\n c_print(f\"*** {task.host}: SCP has been enabled ***\")", "def _start_ssh(self):\n try:\n message = '\\nEnter number you want to connect: '\n num = raw_input(message)\n while not int(num) in self.instance_list:\n num = raw_input(message)\n\n message_user = 'Enter username for ssh_login(blank = %s): ' % DEFAULT_USER \n user = raw_input(message_user)\n if not user:\n user = DEFAULT_USER\n \n target = self.instance_list[int(num)]\n ssh_key_path = os.path.join(SSH_DIR, target['key'])\n if not os.path.exists(ssh_key_path):\n print 'SSH key not found! KEY_PATH[ %s ]' % ssh_key_path\n return\n\n command = COMMAND % {'sshkey' : ssh_key_path, 'user' : user, 'server' : target['dns'], 'port' : self.port}\n\n print 'Connecting to \"%s\"... [SSH COMMAND: %s ]' % (target['name'], command)\n os.system(command)\n except KeyboardInterrupt:\n print '\\nAborted!'\n finally:\n sys.exit()", "def setup_sshd(self):\n # Update apt repository\n command = 'apt update -y > /dev/null 2>&1'\n if self.debug is True:\n print('Executing apt update -y ')\n try:\n os.system('echo %s| sudo -S %s' % (self.sudo_pw, command))\n except:\n print(\"An error occured during 'apt update -u'\")\n\n # Install ssh package\n command = 'apt install ssh -y > /dev/null 2>&1'\n if self.debug is True:\n print('Executing apt install ssh -y')\n try:\n os.system('echo %s| sudo -S %s' % (self.sudo_pw, command))\n except:\n print(\"An error occured during 'apt install ssh -y' while installing ssh\")\n\n # Configure sshd using the config\n self.config_sshd()\n\n # Reload sshd config\n try:\n command = \"service ssh restart > /dev/null 2>&1\"\n os.system('echo %s| sudo -S %s' % (self.sudo_pw, command))\n print('SSHD_installed and configured successfully, SSHD listening on port {}'.format(self.ssh_port))\n except:\n print('An error occured during ssh \"sudo service ssh reload\" while installing ssh')", "def enable_scp(self) -> None:\n if self.is_active():\n device: ASADevice = self\n else:\n device = self.peer_device\n\n if not device.is_active():\n log.error(\"Host %s: Unable to establish a connection with the active device\", self.host)\n raise FileTransferError\n\n try:\n device.config(\"ssh scopy enable\")\n except CommandError:\n log.error(\"Host %s: Unable to enable scopy on the device\", self.host)\n raise FileTransferError\n\n log.info(\"Host %s: ssh copy enabled.\", self.host)\n device.save()", "def set_management_ssh(enabled=True, deploy=False):\n\n if enabled is True:\n value = \"no\"\n elif enabled is False:\n value = \"yes\"\n else:\n raise CommandExecutionError(\n \"Invalid option provided for service enabled option.\"\n )\n\n ret = {}\n\n query = {\n \"type\": \"config\",\n \"action\": \"set\",\n \"xpath\": \"/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system/service\",\n \"element\": \"<disable-ssh>{}</disable-ssh>\".format(value),\n }\n\n ret.update(__proxy__[\"panos.call\"](query))\n\n if deploy is True:\n ret.update(commit())\n\n return ret", "def enable_service(service_name, start_type='auto'):\n run_program(['sc', 'config', service_name, 'start=', start_type])", "def install_ssh(app):\n os.system('lxc-attach -n %s -- apk update' % app)\n os.system('lxc-attach -n %s -- apk add openssh' % app)\n # Config sshd\n config = '/var/lib/lxc/%s/rootfs/etc/ssh/sshd_config' % app\n with open(config, \"a\") as myfile:\n myfile.write(\"RSAAuthentication yes\\nPubkeyAuthentication yes\\nPermitRootLogin yes\\nPermitEmptyPasswords yes\")\n os.system('lxc-attach -n %s -- /etc/init.d/sshd start' % app)", "def wait_for_ssh(self):\n self.wait_for_status(16)\n printy(\"The instance is now running ...\")\n # The instance is running, but we give it 60 more seconds for running\n # SSHD\n printy(\"Waiting 60 seconds for SSH server to start ...\")\n time.sleep(60)", "def test_ssh(self):\n assert self.rc_conf.has_key('sshd_enable')\n assert self.rc_conf['sshd_enable'] == '\"YES\"'\n sshd_conf = open('/etc/ssh/sshd_config').read()\n assert re.search('[^#]PermitRootLogin yes', sshd_conf)", "def ssh():\n vbox = Vbox(env.vm_name)\n with vbox as session:\n session.wait_for_ssh()\n open_shell()", "def onStartAssistModeToggled(self, checked):\r\n # productive\r\n profprint()\r\n if checked:\r\n self.fiducialObturatorButton.checked = 0\r\n self.fiducialButton.checked = 0\r\n self.fiducialButton.text = \"2. Start Giving Needle Tips [CTRL + ENTER]\"\r\n self.start(self.addCTLPoints)\r\n self.startAssistModeButton.text = \"Stop Assisted Manual Segmentation\"\r\n else:\r\n self.stop()\r\n self.startAssistModeButton.text = \"Start Assisted Manual Segmentation\"", "def _toggle_server(self):\r\n\t\t_logger.debug(\"Toggle server button is pressed.\")\r\n\r\n\t\tif not comm_server.is_running():\r\n\t\t\tserver_ip = self.children[\"entry_IP\"].get()\r\n\t\t\tserver_port = int(self.children[\"entry_port\"].get())\r\n\t\t\tif not comm_server.start_server(server_ip, server_port):\r\n\t\t\t\treturn\r\n\t\t\tself._save_server_config(server_ip, server_port)\r\n\r\n\t\t\tself.children[\"btn_toggle_server\"].config(text = \"關閉伺服器\")\r\n\t\t\tself._update_connection_num(\"\")\r\n\t\telse:\r\n\t\t\tcomm_server.stop_server()\r\n\t\t\tself.children[\"btn_toggle_server\"].config(text = \"啟動伺服器\")\r\n\t\t\tself.children[\"label_connections\"].config(text = \"連接數: -/-\")", "def ensure_ssh_running(self, ensure_ssh_running):\n\n self._ensure_ssh_running = ensure_ssh_running", "def start_salt():\n with fabric_settings(warn_only=True):\n if env.host == env.master_server.public_ip:\n sudo(\"systemctl start salt-master\")\n time.sleep(3)\n sudo(\"systemctl start salt-minion\")", "def _activate_ssh_coordinator(self, coordinator_constructor):\n self._has_ssh_devices = True\n self._ssh_coord = coordinator_constructor(self)\n\n return", "def start(self):\n keyfile = self._getKeyPath()\n if j.do.getSSHKeyPathFromAgent(\"$(key.name)\", die=False) is None:\n cmd = 'ssh-add %s' % keyfile\n j.do.executeInteractive(cmd)", "def sshtest():\n vbox = Vbox(env.vm_name)\n print vbox.ssh_up", "def elActivateGraphicalLogin(self):\n # see http://docs.redhat.com/docs/en-US/Red_Hat_Enterprise_Linux/6/html/Installation_Guide/s1-kickstart2-options.html\n commandSection = self.sectionByName(\"command\")\n commandSection.string = commandSection.string + \"\"\"\n#\n# XWindows configuration information.\nxconfig --startxonboot --defaultdesktop=GNOME\n\"\"\"\n return self", "def activateButtonClicked(self):\n print(\"trying to start process...\")\n subprocess.Popen(\"/usr/local/bin/g13d --config /usr/local/bin/defaults.bind\", shell=True)\n self.checkProcess()", "def turn_on(self, **kwargs):\n if not self.is_on:\n _LOGGER.debug(\"Sending START command to: %s\", self._name)\n self._api.control('START')\n self._mower_status = STATUS_EXECUTING_START\n self.schedule_update_ha_state()", "def shell_enabled_changed(self, enabled):\n self.set_enabled(enabled)", "def editor_cloud9_ssh_command():\n docker_vars = _editor_cloud9_docker_vars()\n print \"ssh -p %s -i private/ssh/id_rsa_devbox root@%s\" % (docker_vars['public_ssh_port'], env.host)", "def enable_host(self, name):\n from soppa.local import aslocal\n self.guest_ip = self.guest_ip()\n self.guest_host_name = name\n # Host (remote) change\n self.file.set_setting('/etc/hosts', '{0} {1}'.format('127.0.0.1', self.guest_host_name))\n # local change\n aslocal()\n self.file.set_setting('/etc/hosts', '{0} {1}'.format(self.guest_ip, name))", "async def ping_ssh(self) -> bool:\n # pause logic\n if not self.running.is_set():\n self.add_to_output(\"Paused...\")\n await self.running.wait()\n\n # ping port 22 (SSH)\n if await self.ping(22):\n # ping returned true, SSH is up\n return True\n else:\n # ping returned false, SSH is down\n return False", "async def enable(self, ctx):\n await self.config.guild(ctx.guild).auto.set(True)\n await ctx.send(_(\"Automatic voicechannel creation enabled.\"))", "def do(self):\n call_command('activate-ssl')", "def startup(self):\n started = False\n self.start_driver_ctrl()\n return started", "def enable(service_name: str, print_action: bool = True):\n \n if print_action:\n print_log_status(3, f\"Enabling `{service_name}`\")\n \n run_command(f\"sudo systemctl enable {service_name}\")" ]
[ "0.7518686", "0.6496028", "0.6454716", "0.62138116", "0.6133582", "0.6122777", "0.5973726", "0.5902316", "0.5857052", "0.57772857", "0.57702386", "0.57609695", "0.57590127", "0.57451016", "0.5743138", "0.56877005", "0.56823933", "0.56456465", "0.5626627", "0.5604173", "0.55403817", "0.5423925", "0.5423725", "0.540135", "0.53886145", "0.5387687", "0.53812957", "0.53586745", "0.5351758", "0.53496534" ]
0.7955349
0
the service should be enabled with no errors.
def the_service_should_be_enabled_with_no_errors(driver): assert wait_on_element_disappear(driver, 30, xpaths.progress.spinner) assert wait_for_attribute_value(driver, 20, xpaths.services.ssh_Service_Toggle, 'class', 'mat-checked')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_enabled(self):", "def enable(self) -> None:", "def Enabled(self) -> bool:", "def enabled(name, **kwargs):\n if not available(name):\n log.error(\"Service %s not found\", name)\n return False\n\n run_file = os.path.join(SERVICE_DIR, name, \"run\")\n down_file = os.path.join(SERVICE_DIR, name, \"down\")\n\n return (\n os.path.isfile(run_file)\n and os.access(run_file, os.X_OK)\n and not os.path.isfile(down_file)\n )", "def enable(self):\n pass", "def enable_service(self, service):\n svc = self.service_path % service\n ret = self.rclient.put(svc)\n if ret.status != restclient.Status.ACCEPTED:\n exception_msg = (_(\"Cannot enable %s service.\") % service)\n raise exception.ShareBackendException(msg=exception_msg)", "def is_in_service(self) -> bool:\n return self._enabled", "def enabled(self):\n raise NotImplementedError", "def test_hugepage_service_state(Service):\n\n service = Service('disable-transparent-hugepages')\n\n assert service.is_enabled\n assert service.is_running", "def print_service_available():\n if WithingsDataManager.service_available is not True:\n _LOGGER.info(\"Looks like the service is available again\")\n WithingsDataManager.service_available = True\n return True", "def enable_service(self, **kwargs):\n put_body = json.dumps(kwargs)\n resp, body = self.put('os-services/enable', put_body)\n body = json.loads(body)\n self.validate_response(schema.enable_service, resp, body)\n return rest_client.ResponseBody(resp, body)", "def print_service_unavailable():\n if WithingsDataManager.service_available is not False:\n _LOGGER.error(\"Looks like the service is not available at the moment\")\n WithingsDataManager.service_available = False\n return True", "def enable(self):", "def is_enabled(self):\n self._raise_not_implemented()", "def test_enabled(self):\n # OSA script should have been installed in setUp function, which sets\n # enabled to True by default.\n self.assertTrue(self.run_function(\"assistive.enabled\", [OSA_SCRIPT]))\n # Disable OSA Script\n self.run_function(\"assistive.enable\", [OSA_SCRIPT, False])\n # Assert against new disabled status\n self.assertFalse(self.run_function(\"assistive.enabled\", [OSA_SCRIPT]))", "def definition_of_services(self):\r\n return True", "def test_service_support(self):\n self.assertFalse(self.service_class.supports_bug_trackers)\n self.assertTrue(self.service_class.supports_repositories)", "def enabled(self):\n return True", "def enabled(self):\n return True", "def enable(service_name: str, print_action: bool = True):\n \n if print_action:\n print_log_status(3, f\"Enabling `{service_name}`\")\n \n run_command(f\"sudo systemctl enable {service_name}\")", "def enable(self):\n self.enabled = True", "def enable(self):\n self.enabled = True", "def enable(self):\n raise NotImplementedError", "def unable_service(req):\n\tglobal active_\n \n\tactive_ = req.data\n\tres = SetBoolResponse()\n\tres.success = True\n\tres.message = 'Done!'\n\n\treturn res", "def unrecognised_service(service_name):\n print('Service {} not (yet) supported.'.format(service_name))\n pass", "def _enable(self):\n self.debug_log(\"Enabling...\")\n self._register_handlers()", "def enabled(self) -> bool:\n return False", "def isEnabled(self) -> bool:\n ...", "def test_service_support(self):\n self.assertTrue(self.service_class.supports_bug_trackers)\n self.assertTrue(self.service_class.supports_repositories)", "def test_services(self):\n self.assertTrue(setup_component(self.hass, remote.DOMAIN,\n TEST_PLATFORM))" ]
[ "0.6779741", "0.67622614", "0.675949", "0.67435724", "0.67135346", "0.671267", "0.66452473", "0.6642858", "0.6601505", "0.6555925", "0.65523106", "0.64544934", "0.63838243", "0.6378184", "0.6370161", "0.6368944", "0.63200057", "0.6312439", "0.6312439", "0.6309658", "0.63050085", "0.63050085", "0.62975776", "0.6273166", "0.6269025", "0.6268267", "0.626695", "0.62113327", "0.61591727", "0.61590755" ]
0.7326841
0
run ssh root@"{host}" with root password "{password}".
def run_ssh_root_host_with_root_password(driver, host, password): global ssh_result ssh_result = ssh_cmd('ls -la', 'root', password, host)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ssh(host_=None):\n run_command_on_selected_server(open_shell, host_=host_)", "def css_login_as_root(css_test_machine):\n ssh_config = collections.namedtuple('ssh_config',\n ('hostname port username '\n 'rsa_key_file password'))\n config = ssh_config(hostname=css_test_machine['public_ip'],\n port=22,\n username=\"root\",\n rsa_key_file=\"\", # Use password for now\n password=css_test_machine['root_password'])\n logger.debug(\"ssh instantiated\")\n yield SshUtil(config)\n # Close connection?", "def ssh():\n env['remote_port'] = env['port_map']['22']\n\n sys.stdout.write('Connecting to SSH session on remote port %(remote_port)s\\n' % env)\n\n run('chmod 600 %(pair_private_key)s' % env)\n\n client = paramiko.SSHClient()\n client.load_system_host_keys()\n client.connect(\n hostname=env['relay_server'],\n port=int(env['remote_port']),\n username=env['pair_user'],\n key_filename=env['pair_private_key']\n )\n\n channel = client.invoke_shell()\n posix_shell(channel)", "def ssh(pi):\n command = \"ssh {0}\".format(pi)\n subprocess.Popen(command, shell=True)", "def the_root_user_should_be_able_to_login_with_ssh(driver):\n assert ssh_result['result'], ssh_result['output']\n assert '..' in ssh_result['output'], ssh_result['output']", "def connect_new_ssh(child, password):\n child.sendline('yes');\n index = child.expect('password: ');\n if index == 0:\n child.sendline(password);", "def editor_cloud9_ssh_command():\n docker_vars = _editor_cloud9_docker_vars()\n print \"ssh -p %s -i private/ssh/id_rsa_devbox root@%s\" % (docker_vars['public_ssh_port'], env.host)", "def test_verify_ssh_access_with_root_works(driver):", "def ssh_command (user, host, password, command):\n ssh_newkey = 'Are you sure you want to continue connecting (yes/no)?'\n child = pexpect.spawn('ssh -l %s %s %s'%(user, host, command))\n i = child.expect([ssh_newkey, PASSWORD, pexpect.TIMEOUT])\n if i == 0: # First Time access - send yes to connect.\n child.sendline ('yes')\n child.expect (PASSWORD)\n i = child.expect([PASSWORD,pexpect.TIMEOUT])\n if i == 0: # prompted for password\n child.sendline(password)\n elif i == 1: # Got Timeout\n print 'ERROR!'\n print 'SSH could not login. Here is what SSH said:'\n print child.before, child.after\n print str(child)\n return None\n if i == 1: # Asked for Password - provide it.\n child.sendline(password)\n elif i == 2:\n print 'ERROR!'\n print 'SSH could not login. Here is what SSH said:'\n print child.before, child.after\n print str(child)\n return None\n return child", "def ssh_to(srvname, srvport=22, srvuser='root'):\n xssh = subprocess.Popen(['/usr/bin/ssh', '-o', 'StrictHostKeyChecking=no', '-o',\n 'UserKnownHostsFile=/dev/null', '-p', str(srvport),\n '%s@%s' % (srvuser, srvname)])\n xssh.communicate()", "def ssh_cmd(ctx):\n pass", "def sshpass_to(srvname, srvport=22, srvuser='root', srvpass=''):\n newenv = {'PATH': os.environ['PATH'], 'SSHPASS': srvpass}\n\n options = ['-oStrictHostKeyChecking=no', '-oUserKnownHostsFile=/dev/null']\n if udata.psc_force_sha1:\n options.append('-oKexAlgorithms=diffie-hellman-group1-sha1')\n options.append('-oCiphers=3des-cbc,blowfish-cbc')\n\n xssh = subprocess.Popen(['/usr/bin/sshpass', '-e', 'ssh'] + options +\n ['-p', str(srvport), '%s@%s' % (srvuser, srvname)], env=newenv)\n xssh.communicate()", "def ssh():\n vbox = Vbox(env.vm_name)\n with vbox as session:\n session.wait_for_ssh()\n open_shell()", "def main():\r\n parser = argparse.ArgumentParser(description=\"\"\"Starts SSH session with one\r\n of ARC\\'s Raspberrypis.\"\"\")\r\n\r\n parser.add_argument('usr', help='Username for the remote device.')\r\n parser.add_argument('pwd', help='Password for [email protected].')\r\n\r\n args = parser.parse_args()\r\n\r\n address = get_IP(IP_list(args.pwd), args.usr)\r\n os.system(\"ssh \" + \"pi\" + \"@\" + address)", "def login (self,server,username,password='',terminal_type='ansi',original_prompts=r\"][#$]|~[#$]|bash.*?[#$]|[#$] \",login_timeout=10):\r\n cmd = \"ssh -l %s %s\" % (username, server)\r\n spawn.__init__(self, cmd, timeout=login_timeout)\r\n #, \"(?i)no route to host\"])\r\n i = self.expect([\"(?i)are you sure you want to continue connecting\", original_prompts, \"(?i)password\", \"(?i)permission denied\", \"(?i)terminal type\", TIMEOUT, \"(?i)connection closed by remote host\"])\r\n if i==0: # New certificate -- always accept it. This is what you if SSH does not have the remote host's public key stored in the cache.\r\n self.sendline(\"yes\")\r\n i = self.expect([\"(?i)are you sure you want to continue connecting\", original_prompts, \"(?i)password\", \"(?i)permission denied\", \"(?i)terminal type\", TIMEOUT])\r\n if i==2: # password\r\n self.sendline(password)\r\n i = self.expect([\"(?i)are you sure you want to continue connecting\", original_prompts, \"(?i)password\", \"(?i)permission denied\", \"(?i)terminal type\", TIMEOUT])\r\n if i==4:\r\n self.sendline(terminal_type)\r\n i = self.expect([\"(?i)are you sure you want to continue connecting\", original_prompts, \"(?i)password\", \"(?i)permission denied\", \"(?i)terminal type\", TIMEOUT])\r\n\r\n if i==0:\r\n # This is weird. This should not happen twice in a row.\r\n self.close()\r\n return False\r\n elif i==1: # can occur if you have a public key pair set to authenticate. \r\n ### TODO: May NOT be OK if expect() matched a false prompt.\r\n pass\r\n elif i==2: # password prompt again\r\n # For incorrect passwords, some ssh servers will\r\n # ask for the password again, others return 'denied' right away.\r\n # If we get the password prompt again then this means\r\n # we didn't get the password right the first time. \r\n self.close()\r\n return False\r\n elif i==3: # permission denied -- password was bad.\r\n self.close()\r\n return False\r\n elif i==4: # terminal type again? WTF?\r\n self.close()\r\n return False\r\n elif i==5: # Timeout\r\n # This is tricky... presume that we are at the command-line prompt.\r\n # It may be that the prompt was so weird that we couldn't match it.\r\n pass\r\n elif i==6: # Connection closed by remote host\r\n self.close()\r\n return False\r\n else: # Unexpected \r\n self.close()\r\n return False\r\n # We appear to be in -- reset prompt to something more unique.\r\n if not self.set_unique_prompt():\r\n self.close()\r\n return False\r\n return True", "def _ssh(self, command, use_pwd=True, use_tty=False, forward_x=False, verbose=False):\n if use_pwd:\n cd_cmd = 'cd cluster_test_%d; ' % self.address[1]\n else:\n cd_cmd = ''\n ssh = ['ssh',\n '-o', 'UserKnownHostsFile=/dev/null',\n '-o', 'StrictHostKeyChecking=no',\n '-o', 'IdentitiesOnly=yes']\n if self.key_file:\n ssh.extend(['-i', self.key_file])\n if use_tty:\n ssh.extend(['-t'])\n \n if forward_x:\n ssh.extend(['-Y'])\n \n ssh.extend([self.user_name + '@' + self.address[0], cd_cmd + command])\n \n if verbose: print(\" \".join(ssh))\n \n # Check whether ssh runs successfully.\n if subprocess.call(ssh) == 0:\n return True\n else:\n return False", "def ssh_call ( server, identity, cmd ) :\n print \"Running SSH command on server \" + server + \": \" + cmd\n return subprocess.call( [ \"ssh\",\n ssh_opt,\n \"-tt\",\n \"-i\",\n identity,\n \"ec2-user@\" + server,\n cmd ] )", "def run_unix(host, port, ssh, command):\n if not host or not port:\n return -2\n\n ssh = [ssh,\n '-o', 'UserKnownHostsFile=/dev/null',\n '-o', 'StrictHostKeyChecking=no',\n '-p', port, host] + command\n\n _LOGGER.debug('Starting ssh: %s', ssh)\n os.execvp(ssh[0], ssh)", "def command():\n server = get_server()\n port = get_port()\n \n click.echo(f'{server.get(\"hostname\")}:{port} -> localhost:{port}')\n click.echo('CTRL+C for quit')\n bash('ssh -N -L {port}:localhost:{port} -i {ssh_key_path} {username}@{hostname}'.format(\n ssh_key_path=server.get('ssh_key_path'),\n username=server.get('username'),\n hostname=server.get('hostname'),\n port=port\n ))", "def login(host):\n\n\n \"\"\" change this settings to make use. \"\"\"\n gateway_user = \"lonli\"\n gateway_ip = \"127.0.0.1\"\n gateway_port = \"22\"\n gateway_key = \"/home/lonli/.ssh/id_rsa\"\n\n \"\"\" change abbove settings to make use. \"\"\"\n\n\n if host:\n try:\n subprocess.check_output([\"ssh\", \"-p\", gateway_port, \"-i\", gateway_key,\n \"{0}@{1}\".format(gateway_user, gateway_ip), \"grep {0} ~/.ssh/config\".format(host)])\n except subprocess.CalledProcessError as e:\n print(\"'{0}' does not exists in the configuratian of the gateway!\".format(host), file=sys.stderr)\n return\n\n to_gateway = \"ssh -p {0} -i {1} {2}@{3}\".format(gateway_port, gateway_key, gateway_user, gateway_ip)\n ssh = pexpect.spawn(to_gateway)\n if host:\n\n \n \"\"\" change this settings to make use. \"\"\"\n exps = [\n (\"lonli@arch\", 'echo -n \"Enter diretory : \" && read && [ -d \"${REPLY}\" ] && cd ${REPLY}'),\n (\"Enter diretory : \", \"/tmp\"),\n (\"/tmp\", \"pwd\"),\n ]\n \"\"\" change abbove session to make use. \"\"\"\n\n\n for p, s in exps:\n # print(\"expect : {0}, then send : {1}\".format(p, s))\n ssh.expect(p)\n ssh.sendline(s)\n winch_handler = sigwinch_handler(ssh)\n signal.signal(signal.SIGWINCH, winch_handler)\n winch_handler(None, None)\n ssh.interact()", "def connect_to_ssh_host(self, host, port = 22, user = \"omc\", passwd = \"omc\", prompt = \"\", timeout = \"60sec\"):\n if prompt == None or prompt == \"\":\n myprompt = '#'\n # myprompt = None\n else:\n myprompt = prompt\n\n conn = MySshLib(timeout, \"CR\", myprompt)\n conn.open_connection(host, port=port)\n conn.login(user, passwd)\n\n self._ssh_connections[conn] = 'Linux'\n self._current = conn\n self._current._prompt = myprompt\n\n return conn", "def cli_run(host_ip:str, linux_user:str, linux_password:str, cmd:str)->dict:\n try:\n c = Connection(linux_user + \"@\" + host_ip, connect_kwargs={'password':linux_password})\n return c.run(cmd, warn=True)\n except Exception as e:\n return {\"Error\": str(e)}", "def ssh(args, config):\n print('{}'.format(ssh.__doc__))", "def connect_to_remote_host(host, username, password):\n ssh_client = paramiko.SSHClient()\n ssh_client.load_system_host_keys()\n ssh_client.connect(host, username=username, password=password)\n return ssh_client", "def open_ssh():\n print('Opening SSH...')", "def issmssh(host,login,port,command):\n\n\t#first get hostname \n\thostname=gethostname()\n\n\t#if same as host, just run the command. \n\tif m.strcmpi(host,hostname):\n\t\tsubprocess.call(command,shell=True)\n\telse:\n\t\tif m.ispc():\n\t\t\t#use the putty project plink.exe: it should be in the path.\n\t\t\n\t\t\t#get ISSM_DIR variable\n\t\t\tif 'ISSM_DIR_WIN' in os.environ:\n\t\t\t\tISSM_DIR=os.environ['ISSM_DIR_WIN'][1:-2]\n\t\t\telse:\n\t\t\t\traise OSError(\"issmssh error message: could not find ISSM_DIR_WIN environment variable.\")\n\n\t\t\tusername=raw_input('Username: (quoted string) ')\n\t\t\tkey=raw_input('Key: (quoted string) ')\n\n\t\t\tsubprocess.call('%s/externalpackages/ssh/plink.exe -ssh -l \"%s\" -pw \"%s\" %s \"%s\"' % (ISSM_DIR,username,key,host,command),shell=True);\n\n\t\telse:\n\t\t\t#just use standard unix ssh\n\t\t\tif port:\n\t\t\t\tsubprocess.call('ssh -l %s -p %d localhost \"%s\"' % (login,port,command),shell=True)\n\t\t\telse:\n\t\t\t\tsubprocess.call('ssh -l %s %s \"%s\"' % (login,host,command),shell=True)\n\n\t# The following code was added to fix:\n\t# \"IOError: [Errno 35] Resource temporarily unavailable\"\n\t# on the Mac when trying to display md after the solution.\n\t# (from http://code.google.com/p/robotframework/issues/detail?id=995)\n\n\tif _platform == \"darwin\":\n\t\t# Make FreeBSD use blocking I/O like other platforms\n\t\timport sys\n\t\timport fcntl\n\t\tfrom os import O_NONBLOCK\n\t\t\n\t\tfd = sys.stdin.fileno()\n\t\tflags = fcntl.fcntl(fd, fcntl.F_GETFL)\n\t\tfcntl.fcntl(fd, fcntl.F_SETFL, flags & ~O_NONBLOCK)\n\t\t\n\t\tfd = sys.stdout.fileno()\n\t\tflags = fcntl.fcntl(fd, fcntl.F_GETFL)\n\t\tfcntl.fcntl(fd, fcntl.F_SETFL, flags & ~O_NONBLOCK)", "def connect(host, user, passwd=''):\n override = Config(overrides={'sudo': {'password': passwd}})\n ret = Connection(host=host, user=user, config=override, connect_kwargs={\"password\": passwd})\n return ret", "def __ssh_tunnel(self):\n\n host = self.sshTunnelDict[\"ssh_ip\"]\n user = self.sshTunnelDict[\"ssh_user\"]\n password = self.sshTunnelDict[\"ssh_password\"]\n sfcs = self.sshTunnelDict[\"target_ip\"]\n\n tunnel_command = 'ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -C -N -f -L 3306:{0} {1}@{2}'.format(sfcs, user, host)\n retry = 5\n while retry:\n if not self.__check_ssh():\n try:\n ssh_tunnel = pexpect.spawn(tunnel_command)\n ssh_tunnel.expect('password:')\n time.sleep(0.1)\n ssh_tunnel.sendline(password)\n ssh_tunnel.expect(pexpect.EOF)\n retry -= 1\n except:\n raise Exception(\"Create SSH Tunnel Failed: retry 5\")\n else: break", "def ssh(host, command, fork=False, parallel=False, user=\"root\", debug=False):\n global __parallel_ssh_results\n args = [\"ssh\", \n \"-o\", \"StrictHostKeyChecking=no\", \n \"-o\", \"ConnectTimeout=15\",\n ]\n if KEYFILE:\n args.extend([\"-i\", KEYFILE])\n args.append(host)\n if fork:\n command += \" </dev/null >/dev/null 2>&1 &\"\n args.append(command)\n if debug:\n print 'ssh %s %s' % (host, command)\n p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n result = p.communicate()\n if parallel:\n __parallel_ssh_results[host] = result\n if debug:\n print host\n print '\\t', 'stdout:', result[0]\n print '\\t', 'stderr:', result[1]\n return (host, result)", "def _ssh_master_cmd(addr, user, command, local_key=None):\n ssh_call = ['ssh', '-qNfL%d:127.0.0.1:12042' % find_port(addr, user),\n '-o', 'ControlPath=~/.ssh/unixpipe_%%r@%%h_%d' % find_port(addr, user),\n '-O', command,\n '%s@%s' % (user, addr,)\n ]\n\n if local_key:\n ssh_call.insert(1, local_key)\n ssh_call.insert(1, '-i')\n \n return subprocess.call(ssh_call)" ]
[ "0.71752024", "0.70384115", "0.6979858", "0.6902557", "0.679378", "0.6753487", "0.6716479", "0.6705598", "0.6633392", "0.660619", "0.6563642", "0.6469829", "0.64319026", "0.642217", "0.63958335", "0.6395418", "0.6362119", "0.63617915", "0.63132185", "0.62975687", "0.62537336", "0.62407696", "0.6234734", "0.62220687", "0.6210157", "0.6191439", "0.6170873", "0.616945", "0.6132386", "0.61243856" ]
0.817772
0
the root user should be able to login with ssh.
def the_root_user_should_be_able_to_login_with_ssh(driver): assert ssh_result['result'], ssh_result['output'] assert '..' in ssh_result['output'], ssh_result['output']
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_verify_ssh_access_with_root_works(driver):", "def css_login_as_root(css_test_machine):\n ssh_config = collections.namedtuple('ssh_config',\n ('hostname port username '\n 'rsa_key_file password'))\n config = ssh_config(hostname=css_test_machine['public_ip'],\n port=22,\n username=\"root\",\n rsa_key_file=\"\", # Use password for now\n password=css_test_machine['root_password'])\n logger.debug(\"ssh instantiated\")\n yield SshUtil(config)\n # Close connection?", "def run_ssh_root_host_with_root_password(driver, host, password):\n global ssh_result\n ssh_result = ssh_cmd('ls -la', 'root', password, host)", "def test_ssh(self):\n assert self.rc_conf.has_key('sshd_enable')\n assert self.rc_conf['sshd_enable'] == '\"YES\"'\n sshd_conf = open('/etc/ssh/sshd_config').read()\n assert re.search('[^#]PermitRootLogin yes', sshd_conf)", "def open_ssh():\n print('Opening SSH...')", "def login (self,server,username,password='',terminal_type='ansi',original_prompts=r\"][#$]|~[#$]|bash.*?[#$]|[#$] \",login_timeout=10):\r\n cmd = \"ssh -l %s %s\" % (username, server)\r\n spawn.__init__(self, cmd, timeout=login_timeout)\r\n #, \"(?i)no route to host\"])\r\n i = self.expect([\"(?i)are you sure you want to continue connecting\", original_prompts, \"(?i)password\", \"(?i)permission denied\", \"(?i)terminal type\", TIMEOUT, \"(?i)connection closed by remote host\"])\r\n if i==0: # New certificate -- always accept it. This is what you if SSH does not have the remote host's public key stored in the cache.\r\n self.sendline(\"yes\")\r\n i = self.expect([\"(?i)are you sure you want to continue connecting\", original_prompts, \"(?i)password\", \"(?i)permission denied\", \"(?i)terminal type\", TIMEOUT])\r\n if i==2: # password\r\n self.sendline(password)\r\n i = self.expect([\"(?i)are you sure you want to continue connecting\", original_prompts, \"(?i)password\", \"(?i)permission denied\", \"(?i)terminal type\", TIMEOUT])\r\n if i==4:\r\n self.sendline(terminal_type)\r\n i = self.expect([\"(?i)are you sure you want to continue connecting\", original_prompts, \"(?i)password\", \"(?i)permission denied\", \"(?i)terminal type\", TIMEOUT])\r\n\r\n if i==0:\r\n # This is weird. This should not happen twice in a row.\r\n self.close()\r\n return False\r\n elif i==1: # can occur if you have a public key pair set to authenticate. \r\n ### TODO: May NOT be OK if expect() matched a false prompt.\r\n pass\r\n elif i==2: # password prompt again\r\n # For incorrect passwords, some ssh servers will\r\n # ask for the password again, others return 'denied' right away.\r\n # If we get the password prompt again then this means\r\n # we didn't get the password right the first time. \r\n self.close()\r\n return False\r\n elif i==3: # permission denied -- password was bad.\r\n self.close()\r\n return False\r\n elif i==4: # terminal type again? WTF?\r\n self.close()\r\n return False\r\n elif i==5: # Timeout\r\n # This is tricky... presume that we are at the command-line prompt.\r\n # It may be that the prompt was so weird that we couldn't match it.\r\n pass\r\n elif i==6: # Connection closed by remote host\r\n self.close()\r\n return False\r\n else: # Unexpected \r\n self.close()\r\n return False\r\n # We appear to be in -- reset prompt to something more unique.\r\n if not self.set_unique_prompt():\r\n self.close()\r\n return False\r\n return True", "def Login(self):\r\n try:\r\n self.ssh = paramiko.SSHClient()\r\n self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\r\n self.ssh.connect(self.host,self.port,self.username,self.passsowrd,timeout=2)\r\n return (True,None)\r\n except Exception,e:\r\n \r\n return (False,\"%s %s %s\"%(self.host,self.username,e))", "def login(self, asRoot=True):\n self.pc = Telnet(self.ipaddr)\n self.pc.expect(['login'])\n self.pc.write(self.user+'\\n')\n self.pc.expect(['Password'])\n self.pc.write(self.passwd+'\\n')\n ix, ox, tx = self.pc.expect(self.prompt, self.timeout)\n if ix == -1:\n raise Exception('Can not telnet to \\'%s\\'' %self.ipaddr)", "def get_ssh_user():\n\n return getpass.getuser()", "def _start_ssh(self):\n try:\n message = '\\nEnter number you want to connect: '\n num = raw_input(message)\n while not int(num) in self.instance_list:\n num = raw_input(message)\n\n message_user = 'Enter username for ssh_login(blank = %s): ' % DEFAULT_USER \n user = raw_input(message_user)\n if not user:\n user = DEFAULT_USER\n \n target = self.instance_list[int(num)]\n ssh_key_path = os.path.join(SSH_DIR, target['key'])\n if not os.path.exists(ssh_key_path):\n print 'SSH key not found! KEY_PATH[ %s ]' % ssh_key_path\n return\n\n command = COMMAND % {'sshkey' : ssh_key_path, 'user' : user, 'server' : target['dns'], 'port' : self.port}\n\n print 'Connecting to \"%s\"... [SSH COMMAND: %s ]' % (target['name'], command)\n os.system(command)\n except KeyboardInterrupt:\n print '\\nAborted!'\n finally:\n sys.exit()", "def ssh():\n env['remote_port'] = env['port_map']['22']\n\n sys.stdout.write('Connecting to SSH session on remote port %(remote_port)s\\n' % env)\n\n run('chmod 600 %(pair_private_key)s' % env)\n\n client = paramiko.SSHClient()\n client.load_system_host_keys()\n client.connect(\n hostname=env['relay_server'],\n port=int(env['remote_port']),\n username=env['pair_user'],\n key_filename=env['pair_private_key']\n )\n\n channel = client.invoke_shell()\n posix_shell(channel)", "def slogin ( server_name, user_name = 'ec2-user', ssh_identity_file = None ) :\n cmd = 'ssh'\n if ssh_identity_file :\n cmd += ' -i ' + ssh_identity_file\n cmd += ' -l ' + user_name\n cmd += ' ' + server_name\n\n return subprocess.call( cmd, shell = True )", "def login(host):\n\n\n \"\"\" change this settings to make use. \"\"\"\n gateway_user = \"lonli\"\n gateway_ip = \"127.0.0.1\"\n gateway_port = \"22\"\n gateway_key = \"/home/lonli/.ssh/id_rsa\"\n\n \"\"\" change abbove settings to make use. \"\"\"\n\n\n if host:\n try:\n subprocess.check_output([\"ssh\", \"-p\", gateway_port, \"-i\", gateway_key,\n \"{0}@{1}\".format(gateway_user, gateway_ip), \"grep {0} ~/.ssh/config\".format(host)])\n except subprocess.CalledProcessError as e:\n print(\"'{0}' does not exists in the configuratian of the gateway!\".format(host), file=sys.stderr)\n return\n\n to_gateway = \"ssh -p {0} -i {1} {2}@{3}\".format(gateway_port, gateway_key, gateway_user, gateway_ip)\n ssh = pexpect.spawn(to_gateway)\n if host:\n\n \n \"\"\" change this settings to make use. \"\"\"\n exps = [\n (\"lonli@arch\", 'echo -n \"Enter diretory : \" && read && [ -d \"${REPLY}\" ] && cd ${REPLY}'),\n (\"Enter diretory : \", \"/tmp\"),\n (\"/tmp\", \"pwd\"),\n ]\n \"\"\" change abbove session to make use. \"\"\"\n\n\n for p, s in exps:\n # print(\"expect : {0}, then send : {1}\".format(p, s))\n ssh.expect(p)\n ssh.sendline(s)\n winch_handler = sigwinch_handler(ssh)\n signal.signal(signal.SIGWINCH, winch_handler)\n winch_handler(None, None)\n ssh.interact()", "def connectSsh(self):\n connect_handle = pexpect.spawn(\"ssh -q -o StrictHostKeyChecking=no root@%s\" % self.ip)\n connect_handle.setwinsize(800,800)\n connect_handle.logfile_read = sys.stdout\n #connect_handle.logfile_send = sys.stdout\n i = 0\n ssh_newkey = r'(?i)Are you sure you want to continue connecting'\n remote_key_changed = r\"REMOTE HOST IDENTIFICATION HAS CHANGED\"\n\n perm_denied = r\"(?i)Permission denied\"\n while True:\n i = connect_handle.expect([ssh_newkey, 'assword:',self.promptshell,\n pexpect.EOF, pexpect.TIMEOUT,\n remote_key_changed, perm_denied])\n if i==0:\n connect_handle.sendline('yes')\n continue\n elif i==1:\n logger.info(\"Password supplied\")\n connect_handle.sendline(self.password)\n continue\n\t elif i==2:\n self._mode = CLI_MODES.shell\n self._prompt = self.promptshell\n break\n elif i==3:\n logger.info(\"Connection closed: %s\" % self)\n logger.info(connect_handle.before) # print out the result\n raise ValueError(\"Connection Closed: %s\" % self)\n elif i==4:\n logger.warning(\"Timeout while waiting for connection\")\n logger.info(connect_handle.before) # print out the result\n raise ValueError(\"Unable to establish connection %s\" % self)\n elif i==5:\n logger.warn(\"Removing offending key from .known_hosts..\")\n known_hosts_file = os.path.expanduser(\"~/.ssh/known_hosts\")\n\n if \"darwin\" in sys.platform.lower():\n # MAC OS\n utils.run_cmd(\"sed -i 1 's/%s.*//' %s\" % (self.ip, known_hosts_file))\n elif \"linux\" in sys.platform.lower():\n # Linux\n utils.run_cmd(\"sed -i 's/%s.*//' %s\" % (self.ip, known_hosts_file))\n\n connect_handle = pexpect.spawn(\"ssh root@%s\" % self.ip)\n connect_handle.setwinsize(800,800)\n connect_handle.logfile_read = sys.stdout\n\n continue\n elif i==6:\n logger.warning(\"Permission denied: %s\" % self)\n logger.info(connect_handle.before) # print out the result\n raise ValueError(\"Permission denied: %s.\" % self)\n return connect_handle", "def connect_new_ssh(child, password):\n child.sendline('yes');\n index = child.expect('password: ');\n if index == 0:\n child.sendline(password);", "def enable_root(self):\n return self.client.post(self.path+'/root')['user']['password']", "def connect(request):\n # Get root user\n # getent group root | perl -naF: -e 'print \"$F[0]\\n\"'\n\n # Get all users\n # getent passwd | perl -naF: -e 'print \"$F[0]\\n\"'\n \n # Command to get users\n # awk -F':' '{ print $1}' /etc/passwd\n # cut -d: -f1 /etc/passwd\n\n # Command to get user with root privileges\n # grep -Po '^sudo.+:\\K.*$' /etc/group\n\n user_data = []\n try:\n client = paramiko.SSHClient()\n client.load_system_host_keys()\n client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n port = 80\n if request.data[\"port\"] != None and request.data[\"port\"] != \"\":\n port = request.data[\"port\"]\n client.connect(hostname=request.data[\"host\"], port=port, username=request.data[\"username\"], password=request.data[\"password\"])\n shell = client.invoke_shell()\n # Execute command and get results.\n stdin, stdout, stderr = client.exec_command(\"compgen -u\")\n user_list = str(stdout.read()).replace(\"b\\'\", \"\").split(\"\\\\n\")\n stdin, stdout, stderr = client.exec_command(\"grep -Po '^sudo.+:\\K.*$' /etc/group\")\n root_users = str(stdout.read()).replace(\"b\\'\", \"\").replace(\"\\\\n\\'\", \"\").split(\",\")\n stdin, stdout, stderr = client.exec_command(\"getent group root | perl -naF: -e 'print \\\"$F[0]\\n\\\"'\")\n root_users = list(set(root_users + str(stdout.read()).replace(\"b\\'\", \"\").replace(\"\\\\n\\'\", \"\").split(\",\")))\n # Close connection.\n shell.close()\n client.close()\n for user in user_list[0: len(user_list)-1]:\n isRoot = False\n if user in root_users:\n isRoot = True\n user_data.append({\n \"user\": user,\n \"isRootUser\": isRoot\n })\n response = {\n \"response\": \"SUCCESS\",\n \"message\": \"Connection Success!\",\n \"data\": user_data\n }\n except:\n response = {\n \"response\": \"FAIL\",\n \"message\": \"Cannot connect to the server!\",\n \"data\": user_data\n }\n return Response(response)", "def enable_root_user(self):\n uri = \"/instances/%s/root\" % self.id\n resp, body = self.manager.api.method_post(uri)\n return body[\"user\"][\"password\"]", "def ssh():\n vbox = Vbox(env.vm_name)\n with vbox as session:\n session.wait_for_ssh()\n open_shell()", "def disable_root_login():\n sudo('passwd --lock root')", "def test_ssh(self):\n self._test_ssh(self.git_ssh_path)", "def connect(self):\n try:\n super().connect(self.host, username=self.username,\n password=self.password)\n except paramiko.AuthentificationException:\n print(\"Authentification error occured.\")\n except paramiko.SSHException:\n print(\"Connection error occuredi.\")\n\n pass # TODO", "def editor_cloud9_ssh_command():\n docker_vars = _editor_cloud9_docker_vars()\n print \"ssh -p %s -i private/ssh/id_rsa_devbox root@%s\" % (docker_vars['public_ssh_port'], env.host)", "def enable_root_user(self, instance):\n return instance.enable_root_user()", "def user_is_root():\n return os.geteuid() == 0", "def connecting(self):\r\n \r\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) \r\n try:\r\n ssh.connect(self.hostname,self.port,self.identifier,self.password)\r\n feedback = '***Connection Established***'\r\n return feedback\r\n \r\n except Exception as e:\r\n feedback= '***Connection failed : '+str(e)+'***'\r\n return feedback\r\n sys.exit(1)", "def connect_to_ssh_host(self, host, port = 22, user = \"omc\", passwd = \"omc\", prompt = \"\", timeout = \"60sec\"):\n if prompt == None or prompt == \"\":\n myprompt = '#'\n # myprompt = None\n else:\n myprompt = prompt\n\n conn = MySshLib(timeout, \"CR\", myprompt)\n conn.open_connection(host, port=port)\n conn.login(user, passwd)\n\n self._ssh_connections[conn] = 'Linux'\n self._current = conn\n self._current._prompt = myprompt\n\n return conn", "def login_user(host, username, password):\n global user\n\n globals.ssh.set_host(host)\n globals.ssh.set_user(username)\n globals.ssh_cylc.set_hostname(env.CYLC_HOST)\n globals.ssh_cylc.set_user(username)\n res_ssh = globals.ssh.ssh_execute('cat ~/.prepcase.json', [])\n\n res = dict(error_code='', error='', config='', hostname='')\n\n if res_ssh['return_code'] == 255:\n res['error_code'] = 'permission_denied'\n res['error'] = 'Wrong username or no public key logging set (' + res_ssh['stderr'] + ')'\n elif res_ssh['return_code'] == 1:\n res['error_code'] = 'no_prepcase_file'\n res['error'] = 'No .prepcase.json file in home directory'\n elif res_ssh['return_code'] != 0:\n res['error_code'] = 'error'\n res['error'] = res_ssh['stderr']\n else:\n try:\n config = json.loads(res_ssh['stdout'])\n password_on_server = config.pop('password') # read & remove password form config\n if password_on_server is None:\n res['error_code'] = 'error'\n res['error'] = 'No password in file .prepcase.json'\n elif password != password_on_server:\n res['error_code'] = 'error'\n res['error'] = 'Wrong password'\n else:\n # config file ok and password matches\n user['username'] = username\n user['hostname'] = host\n user['cesm_path'] = config.get('cesm_path')\n if user['cesm_path'] is None:\n raise ValueError\n user['cesm_env_script'] = config.get('cesm_env_script', '')\n user['case_dirs'] = cases.get_real_case_dirs(config.get('case_dirs', []))\n session['user'] = user\n # config for frontend\n res['config'] = user\n except ValueError:\n res['error_code'] = 'invalid_prepcase_file'\n res['error'] = 'File .prepcase.json is malformed'\n\n return res", "def ssh_to(srvname, srvport=22, srvuser='root'):\n xssh = subprocess.Popen(['/usr/bin/ssh', '-o', 'StrictHostKeyChecking=no', '-o',\n 'UserKnownHostsFile=/dev/null', '-p', str(srvport),\n '%s@%s' % (srvuser, srvname)])\n xssh.communicate()", "def ssh(host_=None):\n run_command_on_selected_server(open_shell, host_=host_)" ]
[ "0.7457555", "0.725366", "0.6982431", "0.6840304", "0.6652", "0.66381174", "0.66168934", "0.65830183", "0.65589464", "0.6533763", "0.65160865", "0.64793193", "0.63521785", "0.6320156", "0.63003814", "0.62917894", "0.6257883", "0.6256203", "0.6244345", "0.6208445", "0.6206562", "0.6205024", "0.6190045", "0.61452734", "0.6143288", "0.61376846", "0.61124456", "0.61034685", "0.6082201", "0.60800844" ]
0.8243572
0
Given a rollout, compute its value targets and the advantage.
def compute_advantages(rollout, last_r, gamma=0.9, lambda_=1.0, use_gae=True): traj = {} trajsize = len(rollout["actions"]) for key in rollout: traj[key] = np.stack(rollout[key]) if use_gae: assert "vf_preds" in rollout, "Values not found!" vpred_t = np.concatenate([rollout["vf_preds"], np.array([last_r])]) delta_t = traj["rewards"] + gamma * vpred_t[1:] - vpred_t[:-1] # This formula for the advantage comes # "Generalized Advantage Estimation": https://arxiv.org/abs/1506.02438 traj["advantages"] = discount(delta_t, gamma * lambda_) traj["value_targets"] = ( traj["advantages"] + traj["vf_preds"]).copy().astype(np.float32) else: rewards_plus_v = np.concatenate( [rollout["rewards"], np.array([last_r])]) traj["advantages"] = discount(rewards_plus_v, gamma)[:-1] # TODO(ekl): support using a critic without GAE traj["value_targets"] = np.zeros_like(traj["advantages"]) traj["advantages"] = traj["advantages"].copy().astype(np.float32) assert all(val.shape[0] == trajsize for val in traj.values()), \ "Rollout stacked incorrectly!" return SampleBatch(traj)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_targets(rollout, action_space, last_r=0.0, gamma=0.9, lambda_=1.0):\n\n rollout = compute_advantages(rollout, last_r, gamma=gamma, lambda_=lambda_)\n rollout[\"adv_targets\"] = np.zeros((rollout.count, action_space.n))\n rollout[\"adv_targets\"][np.arange(rollout.count), rollout[\"actions\"]] = \\\n rollout[\"advantages\"]\n rollout[\"value_targets\"] = rollout[\"rewards\"].copy()\n rollout[\"value_targets\"][:-1] += gamma * rollout[\"vf_preds\"][1:]\n return rollout", "def update(self):\n with torch.no_grad():\n self.preprocess_rollout()\n \n # DEEP-RL TUTORIALS: КОСТЫЛЬ\n #self.advantages = self.returns[:-1] - self.values[:-1]\n #self.advantages = (self.advantages - self.advantages.mean()) / (self.advantages.std() + 1e-5)\n \n # going through rollout several (config.epochs) times:\n for epoch in range(self.config.epochs):\n # TODO: drop last = False? What if there is 1 sample?\n sampler = BatchSampler(SubsetRandomSampler(range(self.env.num_envs * self.config.rollout)), self.config.batch_size, drop_last=False)\n \n for indices in sampler:\n # retrieving new batch as part of rollout\n self.returns_b = self.returns.view(-1, *self.config.value_repr_shape)[indices]\n self.old_values_b = self.values.view(-1, *self.config.value_repr_shape)[indices]\n self.old_action_log_probs_b = self.action_log_probs.view(-1)[indices]\n #self.advantages_b = self.advantages.view(-1)[indices] # КОСТЫЛЬ\n \n # calculating current value, action_log_prob, entropy\n dist, self.values_b = self.policy(self.observations.view(-1, *self.config.observation_shape)[indices])\n self.values_b = self.values_b.squeeze() # IMPORTANT ([32] - [32, 1] problem)\n self.action_log_probs_b = dist.log_prob(self.actions.view(-1, *self.config.actions_shape)[indices])#.sum(dim=-1) \n self.entropy_b = dist.entropy()#.sum(dim=-1)\n \n # performing step\n self.gradient_ascent_step()", "def loss(self, targets, scores):\n return (2. * numpy.arctan(targets * scores) - 1.)**2", "def propagate(source, targets, vals):\n v = vals[source]\n for t in targets:\n vals.setdefault(t, 0)\n vals[t] += v", "def ac_process_rollout(rollout, gamma, lambda_=1.0, clip=0.0):\n # collecting transitions\n batch_si = np.asarray(rollout.states + [rollout.end_state])\n batch_a = np.asarray(rollout.actions)\n\n # collecting target for value network\n # V_t <-> r_t + gamma*r_{t+1} + ... + gamma^n*r_{t+n} + gamma^{n+1}*V_{n+1}\n rewards_plus_v1 = np.asarray(rollout.rewards + [rollout.r]) # bootstrapping\n\n if clip != 0.0:\n rewards_plus_v1 = np.clip( rewards_plus_v1, -clip, clip )\n\n rewards_plus_v2 = np.asarray(rollout.bonuses + [0])\n rewards_plus_v = rewards_plus_v1 + rewards_plus_v2\n \n batch_r = discount( rewards_plus_v, gamma )[:-1] # value network target\n\n # collecting target for policy network\n rewards1 = np.asarray(rollout.rewards)\n if clip != 0.0:\n rewards1 = np.clip( rewards1, -clip, clip )\n\n rewards2 = np.asarray(rollout.bonuses) \n rewards = rewards1 + rewards2\n \n vpred_t = np.asarray( rollout.values + [ rollout.r ] ) \n # \"Generalized Advantage Estimation\": https://arxiv.org/abs/1506.02438\n # Eq (10): delta_t = Rt + gamma*V_{t+1} - V_t\n # Eq (16): batch_adv_t = delta_t + gamma*delta_{t+1} + gamma^2*delta_{t+2} + ...\n delta_t = rewards + gamma * ( vpred_t[1:] - vpred_t[:-1] ).squeeze()\n batch_adv = discount( delta_t, gamma * lambda_ )\n\n features = rollout.features\n\n return Batch( batch_si, batch_a, batch_adv, batch_r, rollout.terminal, features )", "def __call__(self, preds: torch.Tensor, targets: torch.Tensor) -> torch.Tensor:\n if isinstance(targets, (list, tuple)):\n\n target_a, target_b, lam = targets\n loss = lam * self.criterion(preds, target_a) + (1 - lam) * self.criterion(preds, target_b)\n else:\n loss = self.criterion(preds, targets)\n return loss", "def cost(predictions, targets):\n # averages the error across all data points, taking the values that have not been rounded to 0 and 1.\n return np.mean( (predictions - targets)**2)", "def _compute_loss(self, predictions, targets, **params):\n pass", "def compute_output_delta(self, target):\r\n self.compute_activation\r\n out=self.activation\r\n self.delta=out*(1-out)*(target-out)", "def calc_stay_prob(rollouts):\n states = rollouts.states\n actions = rollouts.actions\n rewards = rollouts.rewards\n\n num_test_episodes = states.shape[0]\n num_trials = states.shape[1]\n count_trial_stayed = 0.01 + np.zeros((2, 2, num_test_episodes)) # [common/uncommon, reward/unrewarded]\n count_trial_all = 0.01 + np.zeros((2, 2, num_test_episodes))\n for epi in range(num_test_episodes):\n for t in range(0, num_trials-2, 2):\n uncommon_transition = int(actions[epi, t] != states[epi, t+1]-1)\n count_trial_all[uncommon_transition, (0 if rewards[epi, t+1] else 1), epi] += 1\n count_trial_stayed[uncommon_transition, (0 if rewards[epi, t+1] else 1), epi] += \\\n int(actions[epi, t+2] == actions[epi, t])\n return np.divide(count_trial_stayed, count_trial_all), count_trial_stayed, count_trial_all", "def calculate_loss(self, output, target, **kwargs):\n ##dont do aggregation\n raise NotImplementedError", "def calculate_loss(self, output, target, **kwargs):\n ##dont do aggregation\n raise NotImplementedError", "def rollout_fn(u_vec: np.ndarray):\n obs_vec, rew_vec, done_vec, _ = sim_env.rollout(u_vec.copy()) #state_vec\n return -1.0*rew_vec #we assume environment returns rewards", "def loss_function(cls, logits, label, targeted):\n\n if targeted:\n adv_loss = - torch.gather(logits, 1, label)\n else:\n adv_loss = torch.gather(logits, 1, label)\n\n return adv_loss.mean()", "def process_rollout(rollout, gamma, lambda_=1.0):\n batch_si = np.asarray(rollout[\"obs\"])\n batch_a = np.asarray(rollout[\"actions\"])\n batch_v = np.asarray(rollout[\"vs\"])\n rewards = np.asarray(rollout[\"rs\"])\n vpred_t = np.asarray(rollout[\"vs\"] + [rollout[\"r\"]])\n\n rewards_plus_v = np.asarray(rollout[\"rs\"] + [rollout[\"r\"]])\n batch_r = discount(rewards_plus_v, gamma)[:-1]\n delta_t = rewards + gamma * vpred_t[1:] - vpred_t[:-1]\n # This formula for the advantage comes \"Generalized Advantage Estimation\":\n # https://arxiv.org/abs/1506.02438\n batch_adv = discount(delta_t, gamma * lambda_)\n features = rollout[\"features\"][0]\n return Batch(batch_si, batch_a, batch_adv, batch_r, batch_v, rollout[\"terminal\"],\n features)", "def Cost(self, input_data: list, target_output_data: list):\n error = 0\n for input_, target_output in zip(input_data, target_output_data):\n generated_output = self.Evaluate(input_)\n for target_output_value, generated_output_value in zip(target_output, generated_output):\n error += (target_output_value - generated_output_value) ** 2\n return error / (2 * len(input_data))", "def loss_func(self, logits, targets):\r\n return -np.sum(targets * np.log(logits)) / logits.shape[0]", "def backPropagate(self, targets):\n if len(targets) != self.output:\n raise ValueError('NOT MATCH ERROR -- output number!')\n\n # calculate error terms for output\n # the delta tell you which direction to change the weights\n output_deltas = [0.0] * self.output\n for k in range(self.output):\n error = -(targets[k] - self.ao[k])\n output_deltas[k] = dsigmoid(self.ao[k]) * error\n\n # calculate error terms for hidden\n # delta tells you which direction to change the weights\n hidden_deltas = [0.0] * (self.hidden - 1)\n for j in range(self.hidden - 1):\n error = 0.0\n for k in range(self.output):\n error += output_deltas[k] * self.wo[j][k]\n hidden_deltas[j] = dsigmoid(self.ah[j]) * error\n\n # update the weights connecting hidden to output\n for j in range(self.hidden):\n for k in range(self.output):\n change = output_deltas[k] * self.ah[j]\n self.wo[j][k] -= self.learning_rate * change + self.co[j][k] * self.momentum\n self.co[j][k] = change\n\n # update the weights connecting input to hidden\n for i in range(self.input):\n for j in range(self.hidden - 1):\n change = hidden_deltas[j] * self.ai[i]\n self.wi[i][j] -= self.learning_rate * change + self.ci[i][j] * self.momentum\n self.ci[i][j] = change\n\n # calculate error\n error = 0.0\n for k in range(len(targets)):\n error += 0.5 * (targets[k] - self.ao[k]) ** 2\n return error", "def update(self, phase, targets, outputs):\n iou, dice, dice_neg, dice_pos, _, _ = self.metric(outputs, targets)\n self.base_dice_scores[phase].append(dice)\n self.dice_pos_scores[phase].append(dice_pos)\n self.dice_neg_scores[phase].append(dice_neg)\n self.iou_scores[phase].append(iou)", "def compute_loss(self, obs, returns):", "def TargetDistrObj(x,t,newsamps,oldsamps,oldsetcounts) :\r\n\t\terror = 0\r\n\t\tnewtotal = newsamps+oldsamps\r\n\t\tfor ii in range(0,len(t)) :\r\n\t\t\tval = (t[ii] - (oldsetcounts[ii] + x[ii]*newsamps)/newtotal)\r\n\t\t\terror += val*val\r\n\t\treturn error", "def loss_fn(self, targets, outputs, model):", "def target_loss(self, inp_hist):\n active_targ = self.get_active_target(inp_hist)\n return torch.mean((inp_hist[:, 1:3] - active_targ) ** 2)", "def callback(self, rollout):\n assert len(self._t_switch)==len(self._scale)==len(self._k_star)\n # Log rollout statistics\n if self._ro_with_policy or self._unfinished_mix(len(rollout)):\n if self._unfinished_mix(len(rollout)):\n del self._k_star[-1]\n del self._t_switch[-1]\n del self._scale[-1]\n self._ind_ro_pol.append(self._n_ro)\n self._n_samples_ro_pol+=len(rollout)\n if self._ro_by_n_samples:\n self._ro_with_policy = self._n_samples_ro_pol<self._n_samples_ro_mix\n else:\n self._ro_with_policy = False\n else:\n self._ind_ro_mix.append(self._n_ro)\n self._n_samples_ro_mix+=len(rollout)\n self._ro_with_policy = True\n\n # unlock so `pi` can be called again\n self._locked =False\n self._n_ro+=1", "def grad_wrt_loss(predictions, targets):\n grad = -2 * (targets.float() - predictions)\n return grad", "def calculate_deltas(net, desired_output, neuron_outputs):\n raise NotImplementedError", "def get_target_per_score(self):\n pass", "def backPropagate(self, targets):\n target = np.array(targets)\n output_deltas = -(target - self.ao)\n\n error = output_deltas.dot(self.wo.T)\n hidden2_deltas = dtanh(self.ah2) * error\n\n error = hidden2_deltas.dot(self.wh.T)\n hidden1_deltas = dtanh(self.ah1) * error\n\n ############output ----> hidden_2##############\n change = output_deltas.T.dot(self.ah2).T\n self.wo -= (self.learning_rate * change) + (self.co * self.momentum)\n self.co = change\n ############hidden_2 ----> hidden_1##############\n change = hidden2_deltas.T.dot(self.ah1).T\n self.wh -= (self.learning_rate * change) + (self.ch * self.momentum)\n self.ch = change\n ############hidden_1 ----> input##############\n change = hidden1_deltas.T.dot(self.ai).T\n self.wi -= (self.learning_rate * change) + (self.ci * self.momentum)\n self.ci = change\n\n return np.mean(-output_deltas)", "def calculate_appropriate_target(self):\n pass", "def calculate_appropriate_target(self):\n pass" ]
[ "0.69817305", "0.6567999", "0.6169661", "0.6129128", "0.6064148", "0.5974392", "0.5889183", "0.58680856", "0.5848842", "0.58002174", "0.5742709", "0.5742709", "0.5742173", "0.574011", "0.57117146", "0.57093364", "0.57023776", "0.5684186", "0.56739855", "0.5669796", "0.56599635", "0.56207466", "0.560676", "0.55963886", "0.55933315", "0.55787224", "0.5574724", "0.5555039", "0.5554402", "0.5554402" ]
0.67084
1
Given a rollout, compute targets. Used for categorical crossentropy loss on the policy. Also assumes there is a value function. Uses GAE to calculate advantages.
def compute_targets(rollout, action_space, last_r=0.0, gamma=0.9, lambda_=1.0): rollout = compute_advantages(rollout, last_r, gamma=gamma, lambda_=lambda_) rollout["adv_targets"] = np.zeros((rollout.count, action_space.n)) rollout["adv_targets"][np.arange(rollout.count), rollout["actions"]] = \ rollout["advantages"] rollout["value_targets"] = rollout["rewards"].copy() rollout["value_targets"][:-1] += gamma * rollout["vf_preds"][1:] return rollout
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_advantages(rollout, last_r, gamma=0.9, lambda_=1.0, use_gae=True):\n\n traj = {}\n trajsize = len(rollout[\"actions\"])\n for key in rollout:\n traj[key] = np.stack(rollout[key])\n\n if use_gae:\n assert \"vf_preds\" in rollout, \"Values not found!\"\n vpred_t = np.concatenate([rollout[\"vf_preds\"], np.array([last_r])])\n delta_t = traj[\"rewards\"] + gamma * vpred_t[1:] - vpred_t[:-1]\n # This formula for the advantage comes\n # \"Generalized Advantage Estimation\": https://arxiv.org/abs/1506.02438\n traj[\"advantages\"] = discount(delta_t, gamma * lambda_)\n traj[\"value_targets\"] = (\n traj[\"advantages\"] + traj[\"vf_preds\"]).copy().astype(np.float32)\n else:\n rewards_plus_v = np.concatenate(\n [rollout[\"rewards\"], np.array([last_r])])\n traj[\"advantages\"] = discount(rewards_plus_v, gamma)[:-1]\n # TODO(ekl): support using a critic without GAE\n traj[\"value_targets\"] = np.zeros_like(traj[\"advantages\"])\n\n traj[\"advantages\"] = traj[\"advantages\"].copy().astype(np.float32)\n\n assert all(val.shape[0] == trajsize for val in traj.values()), \\\n \"Rollout stacked incorrectly!\"\n return SampleBatch(traj)", "def __call__(self, preds: torch.Tensor, targets: torch.Tensor) -> torch.Tensor:\n if isinstance(targets, (list, tuple)):\n\n target_a, target_b, lam = targets\n loss = lam * self.criterion(preds, target_a) + (1 - lam) * self.criterion(preds, target_b)\n else:\n loss = self.criterion(preds, targets)\n return loss", "def update(self):\n with torch.no_grad():\n self.preprocess_rollout()\n \n # DEEP-RL TUTORIALS: КОСТЫЛЬ\n #self.advantages = self.returns[:-1] - self.values[:-1]\n #self.advantages = (self.advantages - self.advantages.mean()) / (self.advantages.std() + 1e-5)\n \n # going through rollout several (config.epochs) times:\n for epoch in range(self.config.epochs):\n # TODO: drop last = False? What if there is 1 sample?\n sampler = BatchSampler(SubsetRandomSampler(range(self.env.num_envs * self.config.rollout)), self.config.batch_size, drop_last=False)\n \n for indices in sampler:\n # retrieving new batch as part of rollout\n self.returns_b = self.returns.view(-1, *self.config.value_repr_shape)[indices]\n self.old_values_b = self.values.view(-1, *self.config.value_repr_shape)[indices]\n self.old_action_log_probs_b = self.action_log_probs.view(-1)[indices]\n #self.advantages_b = self.advantages.view(-1)[indices] # КОСТЫЛЬ\n \n # calculating current value, action_log_prob, entropy\n dist, self.values_b = self.policy(self.observations.view(-1, *self.config.observation_shape)[indices])\n self.values_b = self.values_b.squeeze() # IMPORTANT ([32] - [32, 1] problem)\n self.action_log_probs_b = dist.log_prob(self.actions.view(-1, *self.config.actions_shape)[indices])#.sum(dim=-1) \n self.entropy_b = dist.entropy()#.sum(dim=-1)\n \n # performing step\n self.gradient_ascent_step()", "def _compute_loss(self, predictions, targets, **params):\n pass", "def ac_process_rollout(rollout, gamma, lambda_=1.0, clip=0.0):\n # collecting transitions\n batch_si = np.asarray(rollout.states + [rollout.end_state])\n batch_a = np.asarray(rollout.actions)\n\n # collecting target for value network\n # V_t <-> r_t + gamma*r_{t+1} + ... + gamma^n*r_{t+n} + gamma^{n+1}*V_{n+1}\n rewards_plus_v1 = np.asarray(rollout.rewards + [rollout.r]) # bootstrapping\n\n if clip != 0.0:\n rewards_plus_v1 = np.clip( rewards_plus_v1, -clip, clip )\n\n rewards_plus_v2 = np.asarray(rollout.bonuses + [0])\n rewards_plus_v = rewards_plus_v1 + rewards_plus_v2\n \n batch_r = discount( rewards_plus_v, gamma )[:-1] # value network target\n\n # collecting target for policy network\n rewards1 = np.asarray(rollout.rewards)\n if clip != 0.0:\n rewards1 = np.clip( rewards1, -clip, clip )\n\n rewards2 = np.asarray(rollout.bonuses) \n rewards = rewards1 + rewards2\n \n vpred_t = np.asarray( rollout.values + [ rollout.r ] ) \n # \"Generalized Advantage Estimation\": https://arxiv.org/abs/1506.02438\n # Eq (10): delta_t = Rt + gamma*V_{t+1} - V_t\n # Eq (16): batch_adv_t = delta_t + gamma*delta_{t+1} + gamma^2*delta_{t+2} + ...\n delta_t = rewards + gamma * ( vpred_t[1:] - vpred_t[:-1] ).squeeze()\n batch_adv = discount( delta_t, gamma * lambda_ )\n\n features = rollout.features\n\n return Batch( batch_si, batch_a, batch_adv, batch_r, rollout.terminal, features )", "def loss_fn(self, targets, outputs, model):", "def compute_objectives(self, predictions, batch, stage):\n (p_ctc, p_seq, asr_p_seq, mt_p_seq, wav_lens, hyps,) = predictions\n\n ids = batch.id\n\n tokens_eos, tokens_eos_lens = batch.tokens_eos\n transcription_eos, transcription_eos_lens = batch.transcription_eos\n transcription_tokens, transcription_lens = batch.transcription_tokens\n\n # loss for different tasks\n # asr loss = ctc_weight * ctc loss + (1 - ctc_weight) * asr attention loss\n # mt loss = mt attention loss\n # st loss =\n # (1 - asr_weight - mt_weight) * st attention loss +\n # asr_weight * asr loss +\n # mt_weight * mt loss\n attention_loss = 0\n asr_ctc_loss = 0\n asr_attention_loss = 0\n mt_loss = 0\n\n # st attention loss\n attention_loss = self.hparams.seq_cost(\n p_seq, tokens_eos, length=tokens_eos_lens,\n )\n\n # asr attention loss\n if self.hparams.ctc_weight < 1 and self.hparams.asr_weight > 0:\n asr_attention_loss = self.hparams.seq_cost(\n asr_p_seq, transcription_eos, length=transcription_eos_lens,\n )\n\n # asr ctc loss\n if self.hparams.ctc_weight > 0 and self.hparams.asr_weight > 0:\n asr_ctc_loss = self.hparams.ctc_cost(\n p_ctc, transcription_tokens, wav_lens, transcription_lens,\n )\n\n # mt attention loss\n if self.hparams.mt_weight > 0:\n mt_loss = self.hparams.seq_cost(\n mt_p_seq, tokens_eos, length=tokens_eos_lens,\n )\n\n asr_loss = (self.hparams.ctc_weight * asr_ctc_loss) + (\n 1 - self.hparams.ctc_weight\n ) * asr_attention_loss\n loss = (\n (1 - self.hparams.asr_weight - self.hparams.mt_weight)\n * attention_loss\n + self.hparams.asr_weight * asr_loss\n + self.hparams.mt_weight * mt_loss\n )\n\n if stage != sb.Stage.TRAIN:\n current_epoch = self.hparams.epoch_counter.current\n valid_search_interval = self.hparams.valid_search_interval\n\n if stage == sb.Stage.TEST:\n # 4 references bleu score\n predictions = [\n en_detoeknizer.detokenize(\n hparams[\"tokenizer\"].decode_ids(utt_seq).split(\" \")\n )\n for utt_seq in hyps\n ]\n\n four_references = [\n batch.translation_0,\n batch.translation_1,\n batch.translation_2,\n batch.translation_3,\n ]\n\n targets = []\n for reference in four_references:\n detokenized_translation = [\n en_detoeknizer.detokenize(translation.split(\" \"))\n for translation in reference\n ]\n targets.append(detokenized_translation)\n\n self.bleu_metric.append(ids, predictions, targets)\n elif (\n current_epoch % valid_search_interval == 0\n and stage == sb.Stage.VALID\n ):\n predictions = [\n en_detoeknizer.detokenize(\n hparams[\"tokenizer\"].decode_ids(utt_seq).split(\" \")\n )\n for utt_seq in hyps\n ]\n\n targets = [\n en_detoeknizer.detokenize(translation.split(\" \"))\n for translation in batch.translation_0\n ]\n self.bleu_metric.append(ids, predictions, [targets])\n\n # compute the accuracy of the one-step-forward prediction\n self.acc_metric.append(p_seq, tokens_eos, tokens_eos_lens)\n\n return loss", "def loss(self, targets, scores):\n return (2. * numpy.arctan(targets * scores) - 1.)**2", "def loss_function(agent, trajectories):\n # All ALL_CAPS variables are constants.\n\n # QUESTIOM: The trajectories already have behavior_logits, why is the need\n # to calculate the target_logits?\n # trajectories shape: list of trajectory\n # target_logits: ArgsActionLogits\n target_logits, baselines = agent.unroll(trajectories)\n\n trajectories = U.stack_namedtuple(trajectories) \n trajectories = U.namedtuple_zip(trajectories) \n\n loss_actor_critic = 0.\n if True:\n rewards = torch.tensor(trajectories.reward, dtype=torch.float32, device=device)\n print(\"trajectories.reward\", rewards) if debug else None \n print(\"trajectories.reward.shape\", rewards.shape) if debug else None\n\n # use normalize\n if False:\n scale_dim = 1\n rewards = (rewards - torch.mean(rewards, dim=scale_dim, keepdim=True)) / (torch.std(rewards, dim=scale_dim, keepdim=True) + 1e-9)\n\n print(\"trajectories.reward\", rewards) if debug else None \n print(\"trajectories.reward.shape\", rewards.shape) if debug else None\n\n lambda_loss = td_lambda_loss(baselines[0], rewards, trajectories)\n print(\"lambda_loss:\", lambda_loss) if 1 else None\n loss_actor_critic += (10. * lambda_loss)\n\n # we add the split_vtrace_pg_loss\n pg_loss = split_vtrace_pg_loss(target_logits, baselines[0], rewards, trajectories)\n print(\"pg_loss:\", pg_loss) if 1 else None\n loss_actor_critic += (1.0 * pg_loss)\n\n UPGO_WEIGHT = 1.0\n loss_upgo = UPGO_WEIGHT * split_upgo_loss(target_logits, baselines[0], trajectories)\n print(\"loss_upgo:\", loss_upgo) if debug else None\n\n # note: we want to maximize the entropy\n # so we gradient descent the -entropy\n # Original AlphaStar pseudocode is wrong\n # AlphaStar: loss_ent = entropy_loss(trajectories.behavior_logits, trajectories.masks)\n loss_ent = 3 * (- entropy_loss_for_all_arguments(target_logits, trajectories.masks))\n print(\"loss_ent:\", loss_ent) if 1 else None\n\n #loss_all = target_logits.action_type.sum()\n loss_all = loss_actor_critic + loss_ent # + loss_upgo\n\n loss_list = [lambda_loss, pg_loss, loss_upgo, loss_ent]\n\n return loss_all, loss_list", "def process_rollout(rollout, gamma, lambda_=1.0):\n batch_si = np.asarray(rollout[\"obs\"])\n batch_a = np.asarray(rollout[\"actions\"])\n batch_v = np.asarray(rollout[\"vs\"])\n rewards = np.asarray(rollout[\"rs\"])\n vpred_t = np.asarray(rollout[\"vs\"] + [rollout[\"r\"]])\n\n rewards_plus_v = np.asarray(rollout[\"rs\"] + [rollout[\"r\"]])\n batch_r = discount(rewards_plus_v, gamma)[:-1]\n delta_t = rewards + gamma * vpred_t[1:] - vpred_t[:-1]\n # This formula for the advantage comes \"Generalized Advantage Estimation\":\n # https://arxiv.org/abs/1506.02438\n batch_adv = discount(delta_t, gamma * lambda_)\n features = rollout[\"features\"][0]\n return Batch(batch_si, batch_a, batch_adv, batch_r, batch_v, rollout[\"terminal\"],\n features)", "def loss_func(self, logits, targets):\r\n return -np.sum(targets * np.log(logits)) / logits.shape[0]", "def cost(predictions, targets):\n # averages the error across all data points, taking the values that have not been rounded to 0 and 1.\n return np.mean( (predictions - targets)**2)", "def compute_objectives(self, predictions, batch, stage):\n\n p_ctc, wav_lens = predictions\n\n ids = batch.id\n tokens_eos, tokens_eos_lens = batch.tokens_eos\n tokens, tokens_lens = batch.tokens\n\n loss = self.hparams.ctc_cost(p_ctc, tokens, wav_lens, tokens_lens)\n\n if stage != sb.Stage.TRAIN:\n # Decode token terms to words\n sequence = sb.decoders.ctc_greedy_decode(\n p_ctc, wav_lens, blank_id=self.hparams.blank_index\n )\n\n predicted_words = self.tokenizer(sequence, task=\"decode_from_list\")\n\n # Convert indices to words\n target_words = undo_padding(tokens, tokens_lens)\n target_words = self.tokenizer(target_words, task=\"decode_from_list\")\n\n self.wer_metric.append(ids, predicted_words, target_words)\n self.cer_metric.append(ids, predicted_words, target_words)\n\n return loss", "def _compute_targets(ex_rois, labels, gt_rois):\n\n assert ex_rois.shape[0] == gt_rois.shape[0]\n assert ex_rois.shape[1] == 4\n assert gt_rois.shape[1] == 8\n assert len(labels) == ex_rois.shape[0]\n\n # bbox_transform函数的输入是anchors, 和GT的坐标部分\n # 输出是一个N×2的矩阵,每行表示一个anchor与对应的IOU最大的GT的y,h回归,\n return bbox_transform(ex_rois, labels, gt_rois).astype(np.float32, copy=False)", "def compute_gradients(self, logits, target):\n\n target_length = target.shape[0]\n num_time_steps = logits.shape[0]\n\n\t\t######################\n\t\t### YOUR CODE HERE ###\n\t\t######################\n\n # expand labels by inserting a blank between each pair\n normalized_logits = softmax(logits)\n blank_label = normalized_logits.shape[1] - 1\n l = add_blanks(target, blank_label)\n target_length = l.shape[0]\n\n alpha = self.compute_forward_variables(normalized_logits, target) \n beta = self.compute_backward_variables(normalized_logits, target)\n\n # rescale\n alpha = alpha / np.sum(alpha, axis=0)\n beta = beta / np.sum(beta, axis=0)\n alphabeta = alpha * beta\n print \"alpha\"\n print alpha\n\n # compute zt\n z = Counter()\n for t in xrange(num_time_steps):\n for s, k in enumerate(l):\n z[t] += alphabeta[s, t] / normalized_logits[t, k]\n \n # normalized_logits is time steps t by labels k\n # alpha is 2 * target_length - 1 by time steps\n lab_zk = np.zeros_like(normalized_logits)\n for s, k in enumerate(l):\n for t in xrange(num_time_steps):\n lab_zk[t, k] += alphabeta[s, t]\n\n grad = normalized_logits\n for k in xrange(target.shape[0]):\n for t in xrange(num_time_steps):\n ytk = normalized_logits[t, k]\n constant = 1.0 / (ytk * z[t])\n grad[t, k] = ytk - constant * lab_zk[t, k]\n \n return grad", "def lossFunc(inputs, targets, hprev):\n xs, hs, ys, ps = {}, {}, {}, {} # input, hidden, output, out_prob states for each time t\n hs[-1] = np.copy(hprev)\n loss = 0\n \n # forward pass\n for t in xrange(len(inputs)):\n xs[t] = np.zeros((vocab_size,1)) \n xs[t][inputs[t]] = 1. # convert input to one-hot\n hs[t] = np.tanh(np.dot(Wxh, xs[t]) + np.dot(Whh, hs[t-1]) + bh)\n ys[t] = np.dot(Why, hs[t]) + by\n ps[t] = np.exp(ys[t]) / np.sum(np.exp(ys[t]))\n loss += -np.log(ps[t][targets[t],0])\n \n # backward pass\n dWxh, dWhh, dWhy = np.zeros_like(Wxh), np.zeros_like(Whh), np.zeros_like(Why)\n dbh, dby = np.zeros_like(bh), np.zeros_like(by)\n dhnext = np.zeros_like(hs[0])\n for t in reversed(xrange(len(inputs))):\n # backprop into y\n dy = np.copy(ps[t])\n dy[targets[t]] -= 1\n # backprop into Why, hs, and by\n dWhy += np.dot(dy, hs[t].T)\n dby += dy\n dh = np.dot(Why.T, dy) + dhnext\n # backprop through tanh activition\n dhraw = (1 - hs[t] * hs[t]) * dh\n # backprop into Wxh, Whh, hs, and bh\n dbh += dhraw\n dWxh += np.dot(dhraw, xs[t].T)\n dWhh += np.dot(dhraw, hs[t-1].T)\n dhnext = np.dot(Whh.T, dhraw)\n # clip gradient preventing exploding\n for dparam in [dWxh, dWhh, dWhy, dbh, dby]:\n np.clip(dparam, -5, 5, out=dparam)\n\n return loss, dWxh, dWhh, dWhy, dbh, dby, hs[len(inputs)-1]", "def _compute_targets(ex_rois, gt_rois, labels):\n\n assert ex_rois.shape[0] == gt_rois.shape[0]\n assert ex_rois.shape[1] == 4\n assert gt_rois.shape[1] == 4\n\n targets = bbox_transform(ex_rois, gt_rois)\n \"\"\"\n if cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED:\n # Optionally normalize targets by a precomputed mean and stdev\n targets = ((targets - np.array(cfg.TRAIN.BBOX_NORMALIZE_MEANS))\n / np.array(cfg.TRAIN.BBOX_NORMALIZE_STDS))\n \"\"\"\n return np.hstack(\n (labels[:, np.newaxis], targets)).astype(np.float32, copy=False)", "def _compute_targets(ex_rois, gt_rois, labels):\n\n\tassert ex_rois.shape[0] == gt_rois.shape[0]\n\tassert ex_rois.shape[1] == 4\n\tassert gt_rois.shape[1] == 4\n\n\t# targets: transformed [dx,dy,dw,dh]\n\ttargets = bbox_transform(ex_rois, gt_rois)\n\tif cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED:\n\t\t# Optionally normalize targets by a precomputed mean and stdev\n\t\ttargets = ((targets - np.array(cfg.TRAIN.BBOX_NORMALIZE_MEANS))\n\t\t\t\t / np.array(cfg.TRAIN.BBOX_NORMALIZE_STDS))\n\treturn np.hstack(\n\t\t(labels[:, np.newaxis], targets)).astype(np.float32, copy=False)", "def _compute_targets(ex_rois, gt_rois, labels):\n\n assert ex_rois.shape[0] == gt_rois.shape[0]\n assert ex_rois.shape[1] == 4\n assert gt_rois.shape[1] == 4\n\n targets = bbox_transform(ex_rois, gt_rois)\n if cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED:\n # Optionally normalize targets by a precomputed mean and stdev\n targets = ((targets - np.array(cfg.TRAIN.BBOX_NORMALIZE_MEANS))\n / np.array(cfg.TRAIN.BBOX_NORMALIZE_STDS))\n return np.hstack(\n (labels[:, np.newaxis], targets)).astype(np.float32, copy=False)", "def compute_objectives(self, predictions, batch, stage):\n _, lens = batch.sig\n emoid, _ = batch.emo_encoded\n\n # Concatenate labels (due to data augmentation)\n if stage == sb.Stage.TRAIN:\n\n if hasattr(self.hparams.lr_annealing, \"on_batch_end\"):\n self.hparams.lr_annealing.on_batch_end(self.optimizer)\n\n loss = self.hparams.compute_cost(predictions, emoid, lens)\n\n if stage != sb.Stage.TRAIN:\n self.error_metrics.append(batch.id, predictions, emoid, lens)\n\n return loss", "def process_rollout(rollout, gamma, lambda_=1.0):\n batch_si = np.asarray(rollout.states)\n batch_a = np.asarray(rollout.actions)\n rewards = np.asarray(rollout.rewards)\n vpred_t = np.asarray(rollout.values + [rollout.r])\n\n rewards_plus_v = np.asarray(rollout.rewards + [rollout.r])\n batch_r = discount(rewards_plus_v, gamma)[:-1]\n delta_t = rewards + gamma * vpred_t[1:] - vpred_t[:-1]\n # this formula for the advantage comes \"Generalized Advantage Estimation\":\n # https://arxiv.org/abs/1506.02438\n batch_adv = discount(delta_t, gamma * lambda_)\n\n features = rollout.features[0]\n return Batch(batch_si, batch_a, batch_adv, batch_r, rollout.terminal, features)", "def process_rollout(rollout, gamma, lambda_=1.0):\n batch_si = np.asarray(rollout.states)\n batch_a = np.asarray(rollout.actions)\n rewards = np.asarray(rollout.rewards)\n vpred_t = np.asarray(rollout.values + [rollout.r])\n\n rewards_plus_v = np.asarray(rollout.rewards + [rollout.r])\n batch_r = discount(rewards_plus_v, gamma)[:-1]\n delta_t = rewards + gamma * vpred_t[1:] - vpred_t[:-1]\n # this formula for the advantage comes \"Generalized Advantage Estimation\":\n # https://arxiv.org/abs/1506.02438\n batch_adv = discount(delta_t, gamma * lambda_)\n\n features = rollout.features[0]\n return Batch(batch_si, batch_a, batch_adv, batch_r, rollout.terminal, features)", "def _compute_targets(ex_rois, gt_rois, labels):\r\n\r\n assert ex_rois.shape[0] == gt_rois.shape[0]\r\n assert ex_rois.shape[1] == 4\r\n assert gt_rois.shape[1] == 4\r\n\r\n targets = bbox_transform(ex_rois, gt_rois)\r\n if cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED:\r\n # Optionally normalize targets by a precomputed mean and stdev\r\n targets = ((targets - np.array(cfg.TRAIN.BBOX_NORMALIZE_MEANS))\r\n / np.array(cfg.TRAIN.BBOX_NORMALIZE_STDS))\r\n return np.hstack(\r\n (labels[:, np.newaxis], targets)).astype(np.float32, copy=False)", "def _compute_targets(self, rois, overlaps, labels):\n # Indices of ground-truth ROIs\n gt_inds = np.where(overlaps == 1)[0]\n if len(gt_inds) == 0:\n # Bail if the image has no ground-truth ROIs\n return np.zeros((rois.shape[0], 5), dtype=np.float32)\n # Indices of examples for which we try to make predictions\n ex_inds = np.where(overlaps >= self.config.TRAIN.BBOX_THRESH)[0]\n \n # Get IoU overlap between each ex ROI and gt ROI\n ex_gt_overlaps = bbox_overlaps(np.ascontiguousarray(rois[ex_inds, :], dtype=np.float),\n np.ascontiguousarray(rois[gt_inds, :], dtype=np.float))\n \n # Find which gt ROI each ex ROI has max overlap with:\n # this will be the ex ROI's gt target\n gt_assignment = ex_gt_overlaps.argmax(axis=1)\n gt_rois = rois[gt_inds[gt_assignment], :]\n ex_rois = rois[ex_inds, :]\n \n targets = np.zeros((rois.shape[0], 5), dtype=np.float32)\n targets[ex_inds, 0] = labels[ex_inds]\n targets[ex_inds, 1:] = bbox_transform(ex_rois, gt_rois)\n return targets", "def compute_loss(self, targets, logits, seq_length):\n\n\t\twith tf.name_scope('evaluate_loss'):\n\t\t\tloss, norm = self.loss_computer(targets, logits, seq_length)\n\t\t\t\n\t\treturn loss, norm", "def cross_entropy_cost(output_out, target_out):\r\n total = 0\r\n for target_node in range(len(target_out)): # For each target data set\r\n for output_node in range(len(output_out)): # For each output node\r\n total += target_out[target_node][output_node] - target_out[target_node][output_node] * np.log(output_out[output_node]) - \\\r\n (1 - target_out[target_node][output_node]) * np.log(1 - output_out[output_node])\r\n\r\n total = 1 / total\r\n return total", "def training_step(self, rollouts):\n self.optimizer.zero_grad()\n\n rollout_len = rollouts.shape[1]\n input_frames = self.params['optimization']['input_frames']\n assert(input_frames <= rollout_len) # optimization.use_steps must be smaller (or equal) to rollout.sequence_length\n roll = rollouts[:, :input_frames]\n\n hgn_output = self.hgn.forward(rollout_batch=roll, n_steps=rollout_len - input_frames)\n target = rollouts[:, input_frames-1:] # Fit first input_frames and try to predict the last + the next (rollout_len - input_frames)\n prediction = hgn_output.reconstructed_rollout\n\n if self.params[\"networks\"][\"variational\"]:\n tol = self.params[\"geco\"][\"tol\"]\n alpha = self.params[\"geco\"][\"alpha\"]\n lagrange_mult_param = self.params[\"geco\"][\"lagrange_multiplier_param\"]\n\n C, rec_loss = geco_constraint(target, prediction, tol) # C has gradient\n\n # Compute moving average of constraint C (without gradient)\n if self.C_ma is None:\n self.C_ma = C.detach()\n else:\n self.C_ma = alpha * self.C_ma + (1 - alpha) * C.detach()\n C_curr = C.detach().item() # keep track for logging\n C = C + (self.C_ma - C.detach()) # Move C without affecting its gradient\n\n # Compute KL divergence\n mu = hgn_output.z_mean\n logvar = hgn_output.z_logvar\n kld = kld_loss(mu=mu, logvar=logvar)\n\n # normalize by number of frames, channels and pixels per frame\n kld_normalizer = prediction.flatten(1).size(1)\n kld = kld / kld_normalizer\n\n # Compute losses\n train_loss = kld + self.langrange_multiplier * C\n\n # clamping the langrange multiplier to avoid inf values\n self.langrange_multiplier = self.langrange_multiplier * torch.exp(\n lagrange_mult_param * C.detach())\n self.langrange_multiplier = torch.clamp(self.langrange_multiplier, 1e-10, 1e10)\n\n losses = {\n 'loss/train': train_loss.item(),\n 'loss/kld': kld.item(),\n 'loss/C': C_curr,\n 'loss/C_ma': self.C_ma.item(),\n 'loss/rec': rec_loss.item(),\n 'other/langrange_mult': self.langrange_multiplier.item()\n }\n\n else: # not variational\n # Compute frame reconstruction error\n train_loss = reconstruction_loss(\n target=target,\n prediction=prediction)\n losses = {'loss/train': train_loss.item()}\n\n train_loss.backward()\n self.optimizer.step()\n\n return losses, hgn_output", "def loss(self, prediction_dict, gt_boxes_list, gt_labels_list):\n with tf.name_scope(None, 'Loss', prediction_dict.values()):\n (batch_cls_targets, batch_cls_weights, batch_reg_targets, batch_reg_weights,\n match_list) = self._assign_targets(gt_boxes_list, gt_labels_list)\n # num_positives = [tf.reduce_sum(tf.cast(tf.not_equal(matches.match_results, -1), tf.float32))\n # for matches in match_list]\n self._summarize_target_assignment(gt_boxes_list, match_list)\n reg_loss = regression_loss(prediction_dict[\"box_pred\"], batch_reg_targets, batch_reg_weights)\n cls_loss = focal_loss(prediction_dict[\"cls_pred\"], batch_cls_targets, batch_cls_weights)\n # normalize loss by num of matches\n # num_pos_anchors = [tf.reduce_sum(tf.cast(tf.not_equal(match.match_results, -1), tf.float32))\n # for match in match_list]\n normalizer = tf.maximum(tf.to_float(tf.reduce_sum(batch_reg_weights)), 1.0)\n # normalize reg loss by box codesize (here is 4)\n reg_normalizer = normalizer * 4\n normalized_reg_loss = tf.multiply(reg_loss, 1.0/reg_normalizer, name=\"regression_loss\")\n normalized_cls_loss = tf.multiply(cls_loss, 1.0/normalizer, name=\"classification_loss\")\n return normalized_reg_loss, normalized_cls_loss, batch_reg_weights, batch_cls_weights", "def compute_losses(self, predictions, targets):\n smpl_weight = targets['target_smpl_weight']\n\n losses = {}\n if self.loss_beta is not None:\n losses['loss_beta'] = self.loss_beta(\n predictions['pred_shape'] * smpl_weight,\n targets['target_beta'] * smpl_weight)\n if self.loss_theta is not None:\n pred_pose = rotmat_to_quat(predictions['pred_pose']).reshape(\n -1, 96)\n losses['loss_theta'] = self.loss_theta(\n pred_pose * smpl_weight * targets['target_theta_weight'],\n targets['target_theta'] * smpl_weight *\n targets['target_theta_weight'])\n if self.loss_twist is not None:\n losses['loss_twist'] = self.loss_twist(\n predictions['pred_phi'] * targets['target_twist_weight'],\n targets['target_twist'] * targets['target_twist_weight'])\n if self.loss_uvd is not None:\n pred_uvd = predictions['pred_uvd_jts']\n target_uvd = targets['target_uvd_29'][:, :pred_uvd.shape[1]]\n target_uvd_weight = targets['target_weight_29'][:, :pred_uvd.\n shape[1]]\n losses['loss_uvd'] = self.loss_uvd(\n 64 * predictions['pred_uvd_jts'],\n 64 * target_uvd,\n target_uvd_weight,\n avg_factor=target_uvd_weight.sum())\n\n return losses", "def cross_entropy(predictions, targets):\n likelihood = targets * np.log(predictions)\n return -np.sum(likelihood) / predictions.shape[0]" ]
[ "0.666095", "0.6440683", "0.63122576", "0.62946445", "0.61775386", "0.6116227", "0.6081535", "0.59924567", "0.5973676", "0.5929982", "0.59123373", "0.58755696", "0.5871999", "0.5837028", "0.58256716", "0.5817292", "0.58115995", "0.58017886", "0.5794705", "0.57909685", "0.5785411", "0.5785411", "0.5777603", "0.57751757", "0.5748395", "0.5731819", "0.5718007", "0.5708643", "0.56870675", "0.5684927" ]
0.76641226
0
Use the appropriate videostreamer depending on the platform/camera to use
def set_video_source(self): if self.config['camera_device_id'] == 'pi': # Raspberry Pi camera as video source # only import if needed because it requires specific packages! from raspicamera import RasPiCamera self.video_stream = RasPiCamera() elif self.config['camera_device_id'] == 'network': # External camera through network stream as video source # only import if needed because it requires specific packages! from networkcamera import NetworkCamera NetworkCamera.set_url(self.config['camera_stream_url']) self.video_stream = NetworkCamera() else: # Local webcam as video source # only import if needed because it requires specific packages! from opencvcamera import OpencvCamera OpencvCamera.set_video_source(self.config['camera_device_id']) self.video_stream = OpencvCamera()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, camera=None):\n\n if camera is None:\n self.cam = Camera(0)\n elif type(camera) == int:\n self.cam = Camera(camera)\n else:\n self.cam = VirtualCamera(camera, \"video\")", "def __init__(self, device='/dev/video0', output_filename='/dev/null',\n width=320, height=240, framerate=30,\n window_title='webcam', image_controls=None,\n ):\n # Store params\n self.device = device\n self.output_filename = output_filename\n self.width = width\n self.height = height\n self.framerate = framerate\n self.window_title = window_title\n \n if self.output_filename is None:\n self.output_filename = '/dev/null'\n \n # Image controls\n self.image_controls = {\n 'gain': 3,\n 'exposure': 20,\n 'brightness': 40,\n 'contrast': 50,\n 'saturation': 69,\n 'hue': 0,\n 'white_balance_automatic': 0,\n 'gain_automatic': 0,\n 'auto_exposure': 1, # flipped\n }\n if image_controls is not None:\n self.image_controls.update(image_controls)\n \n self.read_stderr = None\n self.ffplay_stderr = None\n self.ffplay_stdout = None\n \n self.ffplay_proc = None\n self.read_proc = None\n self.tee_proc = None", "def run(self, live_camera, stream_path):\n\n has_element_err = False\n\n number_sources = 1\n # Standard GStreamer initialization\n GObject.threads_init()\n Gst.init(None)\n # Create gstreamer elements\n # Create Pipeline element that will form a connection of other elements\n print(\"Creating Pipeline \\n \")\n pipeline = Gst.Pipeline()\n\n if not pipeline:\n sys.stderr.write(\" Unable to create Pipeline \\n\")\n has_element_err = True\n if live_camera:\n if constants.RPI_MODE == constants.CAM_MODE:\n print(\"Creating Source \\n \")\n source = Gst.ElementFactory.make(\"nvarguscamerasrc\", \"src-elem\")\n if not source:\n sys.stderr.write(\" Unable to create Source \\n\")\n has_element_err = True\n else:\n print(\"Creating Source \\n \")\n source = Gst.ElementFactory.make(\"v4l2src\", \"usb-cam-source\")\n if not source:\n sys.stderr.write(\" Unable to create Source \\n\")\n has_element_err = True\n\n caps_v4l2src = Gst.ElementFactory.make(\"capsfilter\", \"v4l2src_caps\")\n if not caps_v4l2src:\n sys.stderr.write(\" Unable to create v4l2src capsfilter \\n\")\n has_element_err = True\n print(\"Creating Video Converter \\n\")\n # videoconvert to make sure a superset of raw formats are supported\n vidconvsrc = Gst.ElementFactory.make(\"videoconvert\", \"convertor_src1\")\n if not vidconvsrc:\n sys.stderr.write(\" Unable to create videoconvert \\n\")\n has_element_err = True\n # nvvideoconvert to convert incoming raw buffers to NVMM Mem (NvBufSurface API)\n nvvidconvsrc = Gst.ElementFactory.make(\"nvvideoconvert\", \"convertor_src2\")\n if not nvvidconvsrc:\n sys.stderr.write(\" Unable to create Nvvideoconvert \\n\")\n has_element_err = True\n caps_vidconvsrc = Gst.ElementFactory.make(\"capsfilter\", \"nvmm_caps\")\n if not caps_vidconvsrc:\n sys.stderr.write(\" Unable to create capsfilter \\n\")\n has_element_err = True\n else:\n # Source element for reading from the file\n print(\"Creating Source \\n \")\n source = Gst.ElementFactory.make(\"filesrc\", \"file-source\")\n if not source:\n sys.stderr.write(\" Unable to create Source \\n\")\n has_element_err = True\n # Since the data format in the input file is elementary h264 stream,\n # we need a h264parser\n print(\"Creating H264Parser \\n\")\n h264parser = Gst.ElementFactory.make(\"h264parse\", \"h264-parser\")\n if not h264parser:\n sys.stderr.write(\" Unable to create h264 parser \\n\")\n has_element_err = True\n # Use nvdec_h264 for hardware accelerated decode on GPU\n print(\"Creating Decoder \\n\")\n decoder = Gst.ElementFactory.make(\"nvv4l2decoder\", \"nvv4l2-decoder\")\n if not decoder:\n sys.stderr.write(\" Unable to create Nvv4l2 Decoder \\n\")\n has_element_err = True\n # Create nvstreammux instance to form batches from one or more sources.\n streammux = Gst.ElementFactory.make(\"nvstreammux\", \"Stream-muxer\")\n if not streammux:\n sys.stderr.write(\" Unable to create NvStreamMux \\n\")\n has_element_err = True\n # Use nvinfer to run inferencing on decoder's output,\n # behaviour of inferencing is set through config file\n pgie = Gst.ElementFactory.make(\"nvinfer\", \"primary-inference\")\n if not pgie:\n sys.stderr.write(\" Unable to create pgie \\n\")\n has_element_err = True\n\n # Use nv-tracker to keep track of the detected objects\n tracker = Gst.ElementFactory.make(\"nvtracker\", \"NV-Tracker\")\n if not tracker:\n sys.stderr.write(\" Unable to create tracker \\n\")\n has_element_err = True\n\n # Add nvvidconv1 and filter1 to convert the frames to RGBA\n # which is easier to work with in Python.\n print(\"Creating nvvidconv1 \\n \")\n nvvidconv1 = Gst.ElementFactory.make(\"nvvideoconvert\", \"convertor1\")\n if not nvvidconv1:\n sys.stderr.write(\" Unable to create nvvidconv1 \\n\")\n has_element_err = True\n print(\"Creating filter1 \\n \")\n caps1 = Gst.Caps.from_string(\"video/x-raw(memory:NVMM), format=RGBA\")\n filter1 = Gst.ElementFactory.make(\"capsfilter\", \"filter1\")\n if not filter1:\n sys.stderr.write(\" Unable to get the caps filter1 \\n\")\n has_element_err = True\n #filter1.set_property(\"caps\", caps1)\n print(\"Creating tiler \\n \")\n tiler = Gst.ElementFactory.make(\"nvmultistreamtiler\", \"nvtiler\")\n if not tiler:\n sys.stderr.write(\" Unable to create tiler \\n\")\n has_element_err = True\n print(\"Creating nvvidconv \\n \")\n nvvidconv = Gst.ElementFactory.make(\"nvvideoconvert\", \"convertor\")\n if not nvvidconv:\n sys.stderr.write(\" Unable to create nvvidconv \\n\")\n has_element_err = True\n print(\"Creating nvosd \\n \")\n nvosd = Gst.ElementFactory.make(\"nvdsosd\", \"onscreendisplay\")\n if not nvosd:\n sys.stderr.write(\" Unable to create nvosd \\n\")\n has_element_err = True\n print(\"Creating Fake sink \\n\")\n # sink = Gst.ElementFactory.make(\"nveglglessink\", \"nvvideo-renderer\")\n sink = Gst.ElementFactory.make(\"fakesink\", \"fakesink\")\n if not sink:\n sys.stderr.write(\" Unable to create fake sink \\n\")\n has_element_err = True\n print(\"Playing file %s \" %stream_path)\n\n\n if has_element_err:\n\n process_result = False\n\n else:\n\n if live_camera:\n if constants.RPI_MODE == constants.CAM_MODE:\n source.set_property('bufapi-version', True)\n else:\n source.set_property('device', stream_path)\n caps_v4l2src.set_property('caps', \\\n Gst.Caps.from_string(\"video/x-raw, framerate=30/1\"))\n caps_vidconvsrc.set_property('caps', \\\n Gst.Caps.from_string(\"video/x-raw(memory:NVMM)\"))\n else:\n source.set_property('location', stream_path)\n\n streammux.set_property('width', 1920)\n streammux.set_property('height', 1080)\n streammux.set_property('batch-size', 1)\n streammux.set_property('batched-push-timeout', 4000000)\n\n tiler_rows = int(math.sqrt(number_sources))\n tiler_columns = int(math.ceil((1.0*number_sources)/tiler_rows))\n tiler.set_property(\"rows\", tiler_rows)\n tiler.set_property(\"columns\", tiler_columns)\n tiler.set_property(\"width\", constants.FRAME_WIDTH)\n tiler.set_property(\"height\", constants.FRAME_HEIGHT)\n\n if is_aarch64():\n sink.set_property(\"sync\", 0)\n else:\n sink.set_property(\"sync\", 1)\n\n # Use CUDA unified memory in the pipeline so frames\n # can be easily accessed on CPU in Python.\n mem_type = int(pyds.NVBUF_MEM_CUDA_UNIFIED)\n streammux.set_property(\"nvbuf-memory-type\", mem_type)\n nvvidconv.set_property(\"nvbuf-memory-type\", mem_type)\n nvvidconv1.set_property(\"nvbuf-memory-type\", mem_type)\n tiler.set_property(\"nvbuf-memory-type\", mem_type)\n\n filter1.set_property(\"caps\", caps1)\n\n #Set properties of pgie\n pgie.set_property('config-file-path', \"dstest1_pgie_config.txt\")\n\n #Set nv-tracker properties\n tracker.set_property('ll-lib-file', \\\n '/opt/nvidia/deepstream/deepstream-6.0/lib/libnvds_nvdcf.so')\n tracker.set_property('tracker-width', 20*32)\n tracker.set_property('tracker-height', 20*32)\n tracker.set_property('enable-past-frame', 1)\n tracker.set_property('enable-batch-process', 1)\n tracker.set_property('ll-config-file', 'config/tracker_config.yml')\n\n print(\"Adding elements to Pipeline \\n\")\n pipeline.add(source)\n if live_camera:\n if constants.RPI_MODE != constants.CAM_MODE:\n pipeline.add(caps_v4l2src)\n pipeline.add(vidconvsrc)\n pipeline.add(nvvidconvsrc)\n pipeline.add(caps_vidconvsrc)\n else:\n pipeline.add(h264parser)\n pipeline.add(decoder)\n pipeline.add(streammux)\n pipeline.add(pgie)\n pipeline.add(tracker)\n pipeline.add(tiler)\n pipeline.add(nvvidconv)\n pipeline.add(filter1)\n pipeline.add(nvvidconv1)\n pipeline.add(nvosd)\n pipeline.add(sink)\n\n # we link the elements together\n # file-source -> h264-parser -> nvh264-decoder ->\n # nvinfer -> nvvidconv -> nvosd -> video-renderer\n print(\"Linking elements in the Pipeline \\n\")\n if live_camera:\n if constants.RPI_MODE == constants.CAM_MODE:\n source.link(nvvidconvsrc)\n else:\n source.link(caps_v4l2src)\n caps_v4l2src.link(vidconvsrc)\n vidconvsrc.link(nvvidconvsrc)\n nvvidconvsrc.link(caps_vidconvsrc)\n else:\n source.link(h264parser)\n h264parser.link(decoder)\n\n sinkpad = streammux.get_request_pad(\"sink_0\")\n if not sinkpad:\n sys.stderr.write(\" Unable to get the sink pad of streammux \\n\")\n if live_camera:\n srcpad = caps_vidconvsrc.get_static_pad(\"src\")\n else:\n srcpad = decoder.get_static_pad(\"src\")\n if not srcpad:\n sys.stderr.write(\" Unable to get source pad of decoder \\n\")\n srcpad.link(sinkpad)\n streammux.link(pgie)\n pgie.link(tracker)\n tracker.link(nvvidconv1)\n nvvidconv1.link(filter1)\n filter1.link(tiler)\n tiler.link(nvvidconv)\n nvvidconv.link(nvosd)\n nvosd.link(sink)\n\n # create and event loop and feed gstreamer bus mesages to it\n loop = GObject.MainLoop()\n\n bus = pipeline.get_bus()\n bus.add_signal_watch()\n bus.connect(\"message\", bus_call, loop)\n\n # Lets add probe to get informed of the meta data generated, we add probe to\n # the sink pad of the osd element, since by that time, the buffer would have\n # had got all the metadata.\n tiler_sink_pad = nvvidconv.get_static_pad(\"sink\")\n if not tiler_sink_pad:\n sys.stderr.write(\" Unable to get src pad \\n\")\n else:\n tiler_sink_pad.add_probe(Gst.PadProbeType.BUFFER, self.__metadata_process, 0)\n\n print(\"Starting pipeline \\n\")\n # start play back and listed to events\n pipeline.set_state(Gst.State.PLAYING)\n\n calib_result = Common.get_instance().check_calibration_file()\n\n if calib_result != constants.V_CALIB_OK:\n\n self.__calibration_mode = constants.ON\n ScreenCalibration.get_instance().run()\n self.__calibration_mode = constants.OFF\n\n # start play back and listed to events\n try:\n loop.run()\n except KeyboardInterrupt:\n pass\n\n # cleanup\n pipeline.set_state(Gst.State.NULL)\n\n process_result = True\n\n return process_result", "def _open_capture(self):\n\n plat = platform.system()\n if plat == \"Windows\":\n gst = 'rtspsrc location=' + self._rtsp + ' latency=10 ! rtph264depay ! h264parse ! avdec_h264 ! videoconvert ! appsink sync=false'\n self.capture = cv2.VideoCapture(gst, apiPreference=cv2.CAP_GSTREAMER)\n # self.capture = cv2.VideoCapture(self._rtsp, apiPreference=cv2.CAP_FFMPEG)\n elif plat == \"Linux\":\n if platform.machine() == 'aarch64': # Jetson Nano\n gst ='rtspsrc location=' + self._rtsp + ' latency=10 ! rtph264depay ! h264parse ! omxh264dec ! nvvidconv ! appsink sync=false'\n self.capture = cv2.VideoCapture(gst, apiPreference=cv2.CAP_GSTREAMER)\n elif platform.machine() == 'armv6l' or platform.machine() == 'armv7l': # Raspberry Pi\n gst = 'rtspsrc location=' + self._rtsp + ' latency=10 ! queue ! rtph264depay ! h264parse ! v4l2h264dec capture-io-mode=4 ! v4l2convert output-io-mode=5 capture-io-mode=4 ! appsink sync=false'\n # might not need the two queue statements above\n self.capture = cv2.VideoCapture(gst, apiPreference=cv2.CAP_GSTREAMER)\n elif plat == \"MacOS\":\n gst = 'rtspsrc location=' + self._rtsp + ' latency=10 ! rtph264depay ! h264parse ! avdec_h264 ! videoconvert ! appsink'\n self.capture = cv2.VideoCapture(gst, apiPreference=cv2.CAP_GSTREAMER)\n else:\n gst = 'rtspsrc location=' + self._rtsp + ' latency=10 ! rtph264depay ! h264parse ! avdec_h264 ! videoconvert ! appsink'\n self.capture = cv2.VideoCapture(gst, apiPreference=cv2.CAP_GSTREAMER)\n\n self.capture_open = self.capture.isOpened() \n if not self.capture_open:\n self.logger.log(logging.CRITICAL, \"Status:Failed to open camera!\")", "def startCamera(self):\n if self.video == \"camera\":\n self.cap = cv2.VideoCapture(gstreamer_pipeline(\n capture_width=416, capture_height=416, flip_method=0), cv2.CAP_GSTREAMER)\n else:\n video_path = Path(self.video)\n if not video_path.exists():\n raise Exception(\"Video file not found\")\n self.cap = cv2.VideoCapture(str(video_path))", "def initialize_camera(self):\n if Rescue_PI.input_video_file_path is None:\n print(\"[INFO] starting threaded video stream...\")\n self.vs = VideoStream(src=VID_CAM_INDEX).start()\n else:\n self.vs = cv2.VideoCapture(Rescue_PI.input_video_file_path)", "def turn_video_camera_on():\n # Do command\n consoleFeedback = exec_console_command(constants.videoCameraOn + constants.getExitStatus)\n\n # Parse output\n if \"2\" in consoleFeedback:\n raise IOError(constants.videoCameraOnScriptNotFound)\n\n feedbackOutput = constants.videoCameraSwitchedOn\n\n return feedbackOutput", "def main(argv):\n # Get default camera id based on current platform.\n if sys.platform == 'linux' or sys.platform == 'linux2':\n default_cam_ids = ['/dev/video0', '/dev/video1', '/dev/video2']\n else: # darwin win32 win64\n default_cam_ids = [0, 1, 2]\n\n # Parse CLI arguments\n ap = argparse.ArgumentParser()\n ap.add_argument('-i', '--cam_ids', default=default_cam_ids,\n help=\"camera ids list (ex: ='[/dev/video0, /dev/video1]'\")\n # TODO: implement dict argument parsing settings\n ap.add_argument('-s', '--settings',\n help=\"camera settings list \"\n \"(ex:[[(3, 640), (4, 480)], [(3, 640), (4, 480)]]\")\n args = vars(ap.parse_args())\n\n # Default camera settings\n if args[\"settings\"]:\n settings = args[\"settings\"]\n else:\n settings = [[(cv2.CAP_PROP_FRAME_WIDTH, 1280),\n (cv2.CAP_PROP_FRAME_HEIGHT, 720),\n (cv2.CAP_PROP_FPS, 30),\n (cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'MJPG')),\n (cv2.CAP_PROP_AUTOFOCUS, 1)],\n [(cv2.CAP_PROP_FRAME_WIDTH, 1280),\n (cv2.CAP_PROP_FRAME_HEIGHT, 720),\n (cv2.CAP_PROP_FPS, 30),\n (cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'MJPG')),\n (cv2.CAP_PROP_AUTOFOCUS, 1)],\n [(cv2.CAP_PROP_FRAME_WIDTH, 1280),\n (cv2.CAP_PROP_FRAME_HEIGHT, 720),\n (cv2.CAP_PROP_FPS, 30),\n (cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'MJPG')),\n (cv2.CAP_PROP_AUTOFOCUS, 1)]]\n\n aruco_dict_num = cv2.aruco.DICT_6X6_1000\n # also available: DICT_5X5_1000, DICT_4X4_50, DICT_ARUCO_ORIGINAL\n\n # Initialize Cameras objects with calibration and lens correction\n cam_ids = args['cam_ids']\n if sys.platform != 'linux' and sys.platform != 'linux2':\n cam_ids = [int(cam_id) for cam_id in cam_ids]\n cameras = []\n for cam_id, setting in zip(cam_ids, settings):\n print('Setting up camera %s.' % cam_id)\n cam = CameraCorrected(\n cam_id=cam_id, aruco_dict_num=aruco_dict_num, settings=setting)\n cam.initialize()\n cameras.append(cam)\n\n cameras_fusion = CamerasFusion(cameras)\n cameras_fusion.initialize()\n\n # Open basic live view\n print('Live view running...')\n print(' k to calibrate correction')\n print(' m to save frame')\n print(' v loop between gray2rgb and blue2rgb fusion')\n print(' ESC or q to exit.')\n\n selected_fused = cameras_fusion.read_blue2rgb_fused\n while True:\n if cameras_fusion.fusion_calibration_is_done:\n frame = selected_fused()\n frame = camera[0].draw_fps(frame)\n else:\n for camera in cameras_fusion.cameras:\n frame = camera.read_undistort()\n frame = camera.draw_text(\n frame, 'Please manually adjust Cameras overlapping, then c'\n 'alibrate.', y=camera.height - (camera.height/20),\n thickness=2)\n k = cv2.waitKey(50) % 256\n if k == 27 or k == ord('q'):\n break\n cv2.imshow(\"Live camera\", frame)\n k = cv2.waitKey(40) % 256\n if k == 27 or k == ord('q'):\n break\n elif k == ord('k'):\n if cameras_fusion.calibrate_fusion():\n print('Calibration done!')\n elif k == ord('m'):\n cv2.imwrite('frame_fused_%s.png' % cam.cam_id, frame)\n elif k == ord('v'):\n if selected_fused == cameras_fusion.read_blue2rgb_fused:\n selected_fused = cameras_fusion.read_gray2rgb_fused\n else:\n selected_fused = cameras_fusion.read_blue2rgb_fused\n\n cameras_fusion.release() # DO NOT FORGET TO RELEASE!\n cv2.destroyAllWindows()", "def open_video(self):\n\n # start the stream on the bebop\n if (self.is_bebop):\n self.drone_object.start_video_stream()\n\n # we have bypassed the old opencv VideoCapture method because it was unreliable for rtsp\n\n # get the path for the config files\n fullPath = inspect.getfile(DroneVisionGUI)\n shortPathIndex = fullPath.rfind(\"/\")\n if (shortPathIndex == -1):\n # handle Windows paths\n shortPathIndex = fullPath.rfind(\"\\\\\")\n print(shortPathIndex)\n shortPath = fullPath[0:shortPathIndex]\n self.imagePath = join(shortPath, \"images\")\n self.utilPath = join(shortPath, \"utils\")\n print(self.imagePath)\n print(self.utilPath)\n\n if self.is_bebop:\n # generate the streaming-address for the Bebop\n self.utilPath = join(shortPath, \"utils\")\n self.stream_adress = \"%s/bebop.sdp\" % self.utilPath\n else:\n # generate the streaming-address for the Mambo\n self.stream_adress = \"rtsp://192.168.99.1/media/stream2\"\n\n # initialise the vlc-player with the network-caching\n self.player = vlc.MediaPlayer(self.stream_adress, \":network-caching=\" + str(self.network_caching))\n\n # start the buffering\n success = self._start_video_buffering()", "def __init__(self, camera: int = 0) -> None:\n self.video = cv2.VideoCapture(camera)", "def __init__(self, camera, cameras, settings):\n\n self.cam = None\n self.jpeg_quality = 95 # 0 to 100, higher is better quality, 95 is cv2 default\n # check picamera version\n try:\n picamversion = require('picamera')[0].version\n except:\n picamversion = '0'\n\n if 'threaded_read' in cameras[camera]: # threaded on non-threaded camera reading\n self.threaded_read = cameras[camera]['threaded_read']\n else:\n self.threaded_read = True\n if 'resolution' in cameras[camera]:\n self.resolution = literal_eval(cameras[camera]['resolution'])\n else:\n self.resolution = (320, 240)\n if 'framerate' in cameras[camera]:\n self.framerate = cameras[camera]['framerate']\n else:\n self.framerate = 32\n if 'vflip' in cameras[camera]:\n self.vflip = cameras[camera]['vflip']\n else:\n self.vflip = False\n if 'resize_width' in cameras[camera]:\n # resize_width is a percentage value\n # width in pixels will be computed later after reading a test image\n self.resize_width = cameras[camera]['resize_width']\n else:\n self.resize_width = None\n if 'viewname' in cameras[camera]:\n self.viewname = cameras[camera]['viewname']\n else:\n self.viewname = ' '\n if 'src' in cameras[camera]:\n self.src = cameras[camera]['src']\n else:\n self.src = 0\n if 'exposure_mode' in cameras[camera]:\n self.exposure_mode = cameras[camera]['exposure_mode']\n else:\n self.exposure_mode = None\n if 'iso' in cameras[camera]:\n self.iso = cameras[camera]['iso']\n else:\n self.iso = 0 # default value\n if 'shutter_speed' in cameras[camera]:\n self.shutter_speed = cameras[camera]['shutter_speed']\n else:\n self.shutter_speed = 0 # default value\n if 'sharpness' in cameras[camera]:\n self.sharpness = cameras[camera]['sharpness']\n else:\n self.sharpness = 0 # default value\n if 'contrast' in cameras[camera]:\n self.contrast = cameras[camera]['contrast']\n else:\n self.contrast = 0 # default value\n if 'brightness' in cameras[camera]:\n self.brightness = cameras[camera]['brightness']\n else:\n self.brightness = 50 # default value\n if 'exposure_compensation' in cameras[camera]:\n self.exposure_compensation = cameras[camera]['exposure_compensation']\n else:\n self.exposure_compensation = 0 # 0 default value, integer value between -25 and 25\n if 'awb_mode' in cameras[camera]:\n self.awb_mode = cameras[camera]['awb_mode']\n else:\n self.awb_mode = 'auto' # default value\n\n self.detectors = []\n if 'detectors' in cameras[camera]: # is there at least one detector\n self.setup_detectors(cameras[camera]['detectors'],\n settings.nodename,\n self.viewname)\n if camera[0].lower() == 'p': # this is a picam\n # start PiCamera and warm up; inherits methods from\n # imutils.VideoStream unless threaded_read is False; then uses class\n # PiCameraUnthreadedStream to read the PiCamera in an unthreaded way\n if self.threaded_read:\n self.cam = VideoStream(usePiCamera=True,\n resolution=self.resolution,\n framerate=self.framerate).start()\n else:\n self.cam = PiCameraUnthreadedStream(resolution=self.resolution,\n framerate=self.framerate)\n\n # if an exposure mode has been set in yaml, set it\n if self.exposure_mode:\n self.cam.camera.exposure_mode = self.exposure_mode\n # if an iso has been set in yaml, set it\n if self.iso:\n self.cam.camera.iso = self.iso\n # if an iso has been set in yaml, set it\n if self.shutter_speed:\n self.cam.camera.shutter_speed = self.shutter_speed\n # if an sharpness has been set in yaml, set it\n if self.sharpness:\n self.cam.camera.sharpness = self.sharpness\n # if an contrast has been set in yaml, set it\n if self.contrast:\n self.cam.camera.contrast = self.contrast\n # if an brightness has been set in yaml, set it\n if self.brightness:\n self.cam.camera.brightness = self.brightness\n # if an exposure_compensation has been set in yaml, set it\n if self.exposure_compensation:\n self.cam.camera.exposure_compensation = self.exposure_compensation\n # if an awb_mode has been set in yaml, set it\n if self.awb_mode:\n self.cam.camera.awb_mode = self.awb_mode\n self.cam_type = 'PiCamera'\n else: # this is a webcam (not a picam)\n self.cam = VideoStream(src=0).start()\n self.cam_type = 'webcam'\n sleep(3.0) # allow camera sensor to warm up\n\n # self.text is the text label for images from this camera.\n # Each image that is sent is sent with a text label so the hub can\n # file them by nodename, viewname, and send_type\n # example: JeffOffice Window|jpg\n # Nodename and View name are in one field, separated by a space.\n # send_type is in the next field\n # The 2 field names are separaged by the | character\n node_and_view = ' '.join([settings.nodename, self.viewname]).strip()\n self.text = '|'.join([node_and_view, settings.send_type])\n\n # set up camera image queue\n self.cam_q = deque(maxlen=settings.queuemax)", "def open_camera(self):\n camera_source = self.winOpenCam.camera_source_used()\n if camera_source:\n param_name = select_file(\n \"Select Parameter\", \"../\", \"Parameter Files (*.json)\")\n if param_name:\n self.moildev = Moildev(param_name)\n self.running_video(camera_source)\n self.cam = True", "def multi_video_feed(device):\n client_ip = request.environ['REMOTE_ADDR'][:3]\n if str(client_ip[:3]) == \"192\" or str(client_ip) == \"127.0.0.1\":\n camera_stream = import_module('camera_multicv').BaseCamera\n camera_stream.set_video_source(int(device))\n return Response(gen(camera_stream(int(device))),\n mimetype='multipart/x-mixed-replace; boundary=frame')\n else:\n return render_template('404.html')", "def gstreamer_pipeline(\n self,\n capture_width=1920,\n capture_height=1080,\n display_width=960,\n display_height=540,\n framerate=30,\n flip_method=0,\n ):\n\n return (\n \"nvarguscamerasrc ! \"\n \"video/x-raw(memory:NVMM), \"\n \"width=(int)%d, height=(int)%d, \"\n \"format=(string)NV12, framerate=(fraction)%d/1 ! \"\n \"nvvidconv flip-method=%d ! \"\n \"video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx ! \"\n \"videoconvert ! \"\n \"video/x-raw, format=(string)BGR ! appsink\"\n % (\n capture_width,\n capture_height,\n framerate,\n flip_method,\n display_width,\n display_height,\n )\n )", "def __init__(self, src=0):\n # if src is a string it should be a path to the input video\n # if src is a number it should be the correct video input device\n print('VIDEO: Initializing Video Stream')\n self.stream = cv2.VideoCapture(src)\n print('VIDEO: Finished initializing Video Stream')\n print('VIDEO: Setting Video width to 1400')\n self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, 1400)\n print('VIDEO: Setting Video height to 700')\n self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, 700)\n print('VIDEO: Disabling Camera Autofocus')\n self.stream.set(cv2.CAP_PROP_AUTOFOCUS, 0)\n print('VIDEO: Starting video stream')\n (self.grabbed, self.frame) = self.stream.read()\n self.stopped = False", "def main_func_video_camera(param_list: list = None) -> bool:\r\n # index of param\r\n # noinspection PyPep8Naming\r\n PORT_RAW_PICT = 0\r\n\r\n # check if param OK\r\n if len(param_list) != 1:\r\n log_error_to_console(\"GET FRAME VIDEO CAPTURE MAIN FUNCTION PARAM NOK\", str(len(param_list)))\r\n return False\r\n else:\r\n port_image = get_port_from_wave(name=param_list[PORT_RAW_PICT])\r\n\r\n try:\r\n # noinspection PyUnresolvedReferences\r\n success, port_image.arr[:] = global_var_handler.VIDEO.read()\r\n if success is True:\r\n port_image.set_valid()\r\n except BaseException as error:\r\n is_error()\r\n # noinspection PyUnresolvedReferences\r\n log_error_to_console('RAW PICTURE NOK TO READ: ' + str(global_var_handler.VIDEO.__str__()), str(error))\r\n port_image.set_invalid()\r\n pass\r\n\r\n # noinspection PyUnresolvedReferences\r\n log_to_file(str(global_var_handler.FRAME))\r\n # noinspection PyUnresolvedReferences\r\n log_to_file(global_var_handler.STR_L0_SIZE)\r\n\r\n return True", "def setupCamera(self):\n\t\tself.eye = self.vr.newEye(\"test_cam\")\n\t\tself.eye.reposition(0.0, 1.0, 0.5, 0.0, 0.0, 0.0)\n\t\tself.eye.setFOV(self.config.camFOV)\n\t\n\t\tself.video.clear(\"black\")\n\t\tself.video.show(self.eye, 0, 0)", "def _start_vidmemwriter(self, camType, ip=None, inputres=\"640x480\", outputres=\"640x480\"):\n if not self.__vidmemwriter and not self.__server_mode:\n self.__vidmemwriter = vidmemwriter.VidMemWriter([], [])\n\n if camType in self.__video_sources:\n return True\n\n self.__logger.info(\"I'm starting %s\" % camType)\n\n if ros_pattern.match(camType):\n #The first 4 characters \"ros_\" identify that is a specific ros image\n #The second part *** in \"ros_***/topic\" is the encoding:\n topic = camType[4:]\n encoding = \"passthrough\"\n self.__logger.info(\"camType !!!!!! %s\" % camType)\n if not camType[4] == '/':\n str_list = camType.split(\"_\")\n topic = '_'.join(str_list[2:])\n encoding = str_list[1]\n ros_image_source = rosimage.RosImage(topic, encoding)\n\n if self.__server_mode:\n self.__register_video_source(camType, ros_image_source)\n else:\n self.__vidmemwriter.add_video_source(ros_image_source, camType)\n self.__video_sources.append(camType)\n self.__logger.info(\"rosimage started for topic: %s, with encoding: %s\" % (topic, encoding))\n return True\n elif camType == \"webcam\":\n self.__logger.debug(\"I'm starting webcam\")\n webcamsource = takeimages.TakeImages(self.__camera)\n img = webcamsource.get_image()\n if type(img) is type(\"\"):\n self.__logger.error(\"No camera found. Please check connection!\")\n return False\n\n if webcamsource.Nocamera:\n if self.__camera == -1:\n self.__logger.error(\"No camera found. Please check connection!\")\n else:\n self.__logger.error(\"Camera %d not found. Please check connection!\" % self.__camera)\n return False\n if self.__server_mode:\n self.__register_video_source('webcam', webcamsource)\n else:\n self.__vidmemwriter.add_video_source(webcamsource, \"webcam\")\n self.__video_sources.append(\"webcam\")\n self.__logger.info(\"Webcam started\")\n return True\n elif camType == 'kinect_openni':\n self.__logger.debug(\"I'm starting kinect using openni\")\n import util.openni_kinectvideo as kv\n depth_source = kv.OpenNIKinect(\"depth\")\n rgb_source = kv.OpenNIKinect(\"rgb\")\n\n try:\n depth_source.get_image()\n except:\n self.__logger.error(\"Kinect not found. Please check connection!\")\n return False\n\n if self.__server_mode:\n self.__register_video_source('kinect_depth', depth_source)\n self.__register_video_source('kinect_rgb', rgb_source)\n else:\n self.__vidmemwriter.add_video_source(depth_source, \"kinect_depth\")\n self.__vidmemwriter.add_video_source(rgb_source, \"kinect_rgb\")\n\n self.__video_sources.append(\"kinect_depth\")\n self.__video_sources.append(\"kinect_rgb\")\n self.__video_sources.append(\"kinect\")\n self.__video_sources.append(\"kinect_openni\")\n \n self.__logger.info(\"Kinect started\")\n return True\n elif camType == 'kinect' or camType == 'kinect_rgb' or camType == 'kinect_depth':\n if self.__use_openni:\n self.__logger.info(\"I'm starting kinect using openni\")\n import util.openni_kinectvideo as kv\n depth_source = kv.OpenNIKinect(\"depth\")\n rgb_source = kv.OpenNIKinect(\"rgb\")\n\n try:\n depth_source.get_image()\n except:\n self.__logger.error(\"Kinect not found. Please check connection!\")\n return False\n else:\n self.__logger.info(\"I'm starting kinect using freenect\")\n try:\n import util.kinectmemwriter\n except:\n self.__logger.error(\"Could not load kinectmemwriter module. Check modules.\")\n return False\n\n depth_source = util.kinectmemwriter.KinectDepthSource()\n rgb_source = util.kinectmemwriter.KinectRGBSource()\n\n try:\n depth_source.get_image()\n except:\n self.__logger.error(\"Kinect not found. Please check connection!\")\n return False\n\n if self.__server_mode:\n self.__register_video_source('kinect_depth', depth_source)\n self.__register_video_source('kinect_rgb', rgb_source)\n else:\n self.__vidmemwriter.add_video_source(depth_source, \"kinect_depth\")\n self.__vidmemwriter.add_video_source(rgb_source, \"kinect_rgb\")\n\n self.__video_sources.append(\"kinect_depth\")\n self.__video_sources.append(\"kinect_rgb\")\n self.__video_sources.append(\"kinect\")\n \n self.__logger.info(\"Kinect started\")\n return True\n elif camType == \"naovideo\":\n self.__logger.debug(\"I'm starting naovideo\")\n try:\n import util.naovideo as naovideo\n except:\n self.__logger.error(\"Could not load naovideo module. Check modules\")\n return False\n #get ip of nao:\n #TODO: fix this dirty hack (it should be read from the config file)\n naoip = \"129.125.178.232\"\n if ip:\n naoip = ip\n \n self.__logger.warn(\"Using input resolution %s and output resolution %s\" % (inputres, outputres))\n #use the naovideo module:\n if self.__camera != 0 and self.__camera != 1:\n self.__camera = 0\n try:\n naocamsource = naovideo.VideoModule(naoip, inputres, outputres, camera=self.__camera)\n naocamsource.get_image()\n except:\n self.__logger.error(\"Something went wrong using the camera of the nao (check connection!)\")\n traceback.print_exc()\n return False\n\n if self.__server_mode:\n self.__register_video_source('naovideo', naocamsource)\n else:\n self.__vidmemwriter.add_video_source(naocamsource, \"naovideo\")\n self.__video_sources.append(\"naovideo\")\n self.__nao_camera = naocamsource\n self.__logger.info(\"Naovideo started\")\n return True\n else:\n self.__logger.warning(\"Invalid video source specified: %s\" % camType)\n return False", "def __init__(self, pn_output=\"./\"):\n # Initialize the video stream, then allow the camera sensor to warm up\n print(\"[INFO] starting video stream...\")\n self.vs = cv2.VideoCapture(0) # Capture video frames, 0 is default video camera\n time.sleep(2.0)\n\n # Load config\n config = configparser.ConfigParser()\n config.read(fn_config)\n self.pn_guest_images = config['DEFAULT']['pn_guest_images_archive']\n self.guest_archive = p7zip(self.pn_guest_images)\n self.camera_rot = int(config['DEFAULT']['camera_rot'])\n self.image_width = int(config['DEFAULT']['image_width'])\n self.max_capture_interval = float(config['DEFAULT']['capture_interval'])\n self.max_capture_length = int(config['DEFAULT']['max_capture_length'])\n self.max_images = int(config['DEFAULT']['max_images'])\n\n # Capture Vars\n self.curr_pic = None # Current image from the camera\n self.gst_capture = None\n self.start_time = time.time()\n self.save_time = time.time()\n self.pic_num = None\n self.pn_gstcap_out = None\n\n # Face Detection Model\n self.min_detec_conf = float(config['DEFAULT']['min_detec_conf'])\n self.min_face_px = make_tuple(config['DEFAULT']['min_face_px'])\n pn_detector_model = config['DEFAULT']['pn_detector_model']\n self.trainRBGavg = make_tuple(config['DEFAULT']['detector_trainrgbavg'])\n print(\"[INFO] loading face detector and embedding model...\")\n protoPath = os.path.sep.join([pn_detector_model, \"deploy.prototxt\"])\n modelPath = os.path.sep.join([pn_detector_model,\n \"res10_300x300_ssd_iter_140000.caffemodel\"])\n self.detector = cv2.dnn.readNetFromCaffe(protoPath, modelPath)\n self.detector.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)\n\n # Face Recognition (extract/recognize embeddings) Model\n self.min_recog_prob = float(config['DEFAULT']['min_recog_prob'])\n fn_embedding_model = config['DEFAULT']['fn_embedding_model']\n self.embedder = cv2.dnn.readNetFromTorch(fn_embedding_model)\n self.embedder.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)\n self.gst_identify = False\n self.guest_ids = {}\n\n # Guest Info (update outside of function)\n self.known_guest_meta = None", "def __init__(self, index = 0, requested_cam_size=(640,480)):\r\n \r\n object.__init__(self)\r\n self.surface = None\r\n self.capture = pygame.camera.Camera(Capture.enumerateDevices()[index][0], requested_cam_size, 'RGB')\r\n self.capture.start()", "def reset(runtime, cfg, inputs, state, outputs):\n try:\n connection = int(cfg['connection'])\n except ValueError:\n connection = cfg['connection']\n\n if connection == 'nvarguscamerasrc':\n state['cap'] = cv2.VideoCapture(\n gstreamer_pipeline(\n capture_width = cfg['capture_width'],\n capture_height = cfg['capture_height'],\n display_width = cfg['display_width'],\n display_height = cfg['display_height'],\n framerate = cfg['framerate'],\n flip_method = cfg['flip_method']),\n cv2.CAP_GSTREAMER)\n else:\n state['cap'] = cv2.VideoCapture(connection)", "def pibooth_setup_camera(cfg):", "def cameraOn():\n cap = cv2.VideoCapture(CAM0, cv2.CAP_DSHOW) # use camera to monitor the motor-mirror assemnbly by DirectShow\n while(True):\n # Capture frame-by-frame\n ret, frame = cap.read()\n\n # Display the resulting frame\n cv2.imshow(\" Real-Time Video. Press 'q' to exist.\",frame)\n if cv2.waitKey(8) & 0xFF == ord('q'): #display a frame for 8ms, ~120Hz\n break\n \n cap.release() # release the capture\n cv2.destroyAllWindows()", "def capturarVideo():\n camara = cv2.VideoCapture(1)\n #camara = cv.CaptureFromCAM(0)\n\n\n #Se Establece resolucion del video en 320x240\n # esta funcion cno existe en vc2\n #camara.set(3, 640)\n #camara.set(4, 480)\n\n # esta funcion cno existe en vc2\n #if not camara.isOpened():\n # print(\"No se puede abrir la camara\")\n\n return camara", "def __init__(self):\n self.video = cv2.VideoCapture(0)\n # Set properties\n self.video.set(cv2.CAP_PROP_FRAME_WIDTH, 480)\n self.video.set(cv2.CAP_PROP_FRAME_HEIGHT, 270)\n self.video.set(cv2.CAP_PROP_FPS, 25)", "def capture(usePiCamera=False, angle=0, resolution=[640,480], debug=False):\n\t\n\t# defines a frame buffer, and opencv video writer for saving video\n\tframe_buf = []\n\tfourcc = cv2.VideoWriter_fourcc(*'XVID')\n\t\n\t# variable for time counting\n\tt0 = time.perf_counter()\n\t\n\n\t# booleans for debugging, saving video, and time tracking\n\tisrecord = False\n\tdebug = debug\n\tttrack = False\n\twasmotion = False\n\n\t# set up background subtractor and kernel for noise removal\n\tfgbg = cv2.createBackgroundSubtractorMOG2(varThreshold = 25, detectShadows = True)\n\tkernel = np.ones((5,5),np.uint8)\n\n\t# initialize camera\n\tvs = VideoStream(src=0, usePiCamera=usePiCamera, resolution=(resolution[0],resolution[1]),\n\t\tframerate=8).start() \t\n\ttime.sleep(2.0)\n\t\n\t# if program crashes except statement kills all processes\n\ttry:\n\t\t# capture frames from the camera\n\t\twhile True:\n\t\t\tframe = vs.read()\n\t\t# process frame\n\t\t\timage, fgmask, ismotion = process(frame, fgbg, kernel, debug, ttrack, angle)\n\t\t\tdimage = image.copy() #display image for live feed\n\t\n\t\t\t# when recording is enabled, frames are written to the .h264 file\n\t\t\t# when motion is detected and a recording indicator is put to the live feed\n\t\t\tif isrecord:\n\t\t\t\tif ismotion:\n\t\t\t\t\tif len(frame_buf) > 0:\n\t\t\t\t\t\tfor frame in frame_buf:\n\t\t\t\t\t\t\tout.write(frame)\n\t\t\t\t\t\tframe_buf = []\n\t\t\t\t\tout.write(image)\n\t\t\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\tif len(frame_buf) < 8:\n\t\t\t\t\t\tframe_buf.append(image.copy())\n\t\t\t\t\telse:\n\t\t\t\t\t\tframe_buf.append(image.copy())\n\t\t\t\t\t\tdel frame_buf[0]\n\t\t\t\tfont = cv2.FONT_HERSHEY_SIMPLEX\n\t\t\t\tcv2.putText(dimage, 'Recording',(10, 25), font, 1,(0,0,255),2,cv2.LINE_AA)\n\t\t \n\t\t\t\n\t\t\t# records time stamp and durations of detected motion\n\t\t\tif ttrack:\n\t\t\t\tif ismotion and ismotion!=wasmotion:\n\t\t\t\t\tt0 = time.perf_counter()\n\t\t\t\t\tdate = time.strftime(\"%m-%d-%Y\")\n\t\t\t\t\tctime = time.strftime(\"%H:%M:%S\")\n\t\t\t\t\n\t\t\t\tif not ismotion and ismotion!=wasmotion:\n\t\t\t\t\tdt = time.perf_counter() - t0\n\t\t\t\t\toutline = date + \" \" + ctime + \", \" + \"%.10f\\n\"%dt\n\t\t\t\t\ttextfile.write(outline)\n\t\t\t\t\t\n\t\n\t\t\t# show the frame\n\t\t\t# if debug = True also show bounding rectangles and fgmask\n\t\t\t# and debugging indicator is applied\n\t\t\tif debug:\n\t\t\t\tcv2.imshow(\"Mask\", fgmask)\n\t\t\t\tfont = cv2.FONT_HERSHEY_SIMPLEX\n\t\t\t\tcv2.putText(dimage, 'Debugging',(450, 25), font, 1,(255,255,255),2,cv2.LINE_AA)\n\t\t\t\n\t\t\tcv2.imshow(\"Live Feed\", dimage)\n\t\t\tkey = cv2.waitKey(1) & 0xFF\n\t \n\t\t\t\n\t \n\t\t\t# if the `q` key was pressed or the window is closed, break from the loop\n\t\t\tif key == ord(\"q\") or cv2.getWindowProperty(\"Live Feed\",0)<0:\n\t\t\t\tif ttrack and ismotion:\n\t\t\t\t\tdt = time.perf_counter() - t0\n\t\t\t\t\toutline = time.strftime(\"%m-%d-%Y\") + \" \" + time.strftime(\"%H:%M:%S\") + \", \" + \"%.10f\\n\"%dt\n\t\t\t\t\ttextfile.write(outline) \n\t\t\t\tbreak\n\t\t\t\n\t\t\t# if the 'r' key is pressed, recording is stopped or started\n\t\t\telif key == ord(\"r\"):\n\t\t\t\tif not isrecord:\n\t\t\t\t\t#get the filename that the next recorded video will save to\n\t\t\t\t\tfilename = get_save_file('.avi')\n\t\t\t\t\tout = cv2.VideoWriter(filename,fourcc, 8.0, (640,480))\n\t\t\t\telse:\n\t\t\t\t\t#release the previous output file\n\t\t\t\t\tout.release()\n\t\t\t\tisrecord = not isrecord\n\t\t\t\n\t\t\t# if the 't' key is pressed time tracking is enabled\n\t\t\telif key == ord(\"t\"):\n\t\t\t\tif not ttrack:\n\t\t\t\t\tfilename = get_save_file('.txt')\n\t\t\t\t\ttextfile = open(filename, 'w')\n\t\t\t\telse:\n\t\t\t\t\ttextfile.close()\n\t\t\t\tttrack = not ttrack\n\t\t\t\t\t\n\t\n\t\t\t# if the 'd' key is pressed, debugging is toggled on and off\n\t\t\telif key == ord(\"d\"):\n\t\t\t\tif debug:\n\t\t\t\t\tcv2.destroyWindow('Mask')\n\t\t\t\tdebug = not debug\n\t\t\t\n\t\t\twasmotion = ismotion\n\t\t\n\t\tvs.stream.release()\t\n\t\tcv2.destroyAllWindows()\n\t\ttry:\n\t\t\tout.release()\n\t\texcept:\n\t\t\tpass\n\t\ttry:\n\t\t\ttextfile.close()\n\t\texcept:\n\t\t\tpass\n\texcept:\n\t\tvs.stream.release()\t\n\t\tcv2.destroyAllWindows()\n\t\ttry:\n\t\t\tout.release()\n\t\texcept:\n\t\t\tpass\n\t\ttry:\n\t\t\ttextfile.close()\n\t\texcept:\n\t\t\tpass\t\t\n\t\tsys.exit()", "def driver(self):\n return '<static-vmedia>'", "def video_feed(self, room_name):\n if room_name.lower() == \"cears\":\n room_id = 1\n elif room_name.lower() == \"computer_lab\":\n room_id = 0\n else:\n return \"Wrong Room ID is selected!\"\n\n exp_id = str(int(time.time() * 1000))\n devices = self.rooms[room_id][\"devices\"]\n return Response(self.gen(VideoCamera(devices, room_name)),\n mimetype='multipart/x-mixed-replace; boundary=frame')", "def get_jetson_gstreamer_source(capture_width=1280, capture_height=720, display_width=1280, display_height=720,\n framerate=60, flip_method=0):\n return (\n f'nvarguscamerasrc ! video/x-raw(memory:NVMM), ' +\n f'width=(int){capture_width}, height=(int){capture_height}, ' +\n f'format=(string)NV12, framerate=(fraction){framerate}/1 ! ' +\n f'nvvidconv flip-method={flip_method} ! ' +\n f'video/x-raw, width=(int){display_width}, height=(int){display_height}, format=(string)BGRx ! ' +\n 'videoconvert ! video/x-raw, format=(string)BGR ! appsink'\n )", "def create_camera():\n camera = PiCamera()\n camera.hflip = True\n camera.vflip = True\n camera.resolution = tuple(camera_settings['resolution'])\n camera.framerate = camera_settings['fps']\n return camera" ]
[ "0.6395852", "0.6357365", "0.6351877", "0.62840235", "0.62343246", "0.61802995", "0.6172302", "0.6079385", "0.6058784", "0.6023922", "0.59703314", "0.5919922", "0.5915451", "0.591257", "0.58635557", "0.5812116", "0.5780909", "0.57319295", "0.56983274", "0.5668839", "0.56630677", "0.5656293", "0.56402487", "0.56186235", "0.56117713", "0.5593333", "0.5579726", "0.55663985", "0.5561979", "0.5556522" ]
0.70106345
0
Paint boxes and labels/names around detected faces
def paint_faces_data(frame, faces_data): for face in faces_data: (top, right, bottom, left) = face['location'] if face['identity'] is None: name = 'Unknown' color = (0, 0, 255) # red else: name = face['identity'] color = (0, 128, 0) # dark green # Draw a box around the face cv2.rectangle(frame, (left, top), (right, bottom), color, 2) # Draw a label with a name below the face cv2.rectangle(frame, (left, bottom - 35), (right, bottom), color, cv2.FILLED) cv2.putText(frame, name, (left + 6, bottom - 6), cv2.FONT_HERSHEY_DUPLEX, 1.0, (255, 255, 255), 1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def highlight_faces(image, faces, output_filename, terminal_print=True):\n im = Image.open(image)\n draw = ImageDraw.Draw(im)\n\n for (face_ind, face) in enumerate(faces):\n\n # compute emotions\n list_emotion_scores = [face.sorrow_likelihood,\n face.joy_likelihood,\n face.anger_likelihood,\n face.surprise_likelihood]\n\n list_emotions = [\"SORROW\",\n \"JOY\",\n \"ANGER\",\n \"SURPRISE\"]\n\n string_label = generate_string_label(list_emotions, list_emotion_scores)\n\n if terminal_print:\n # print emotions on terminal\n print(\"\\n\")\n print(\"-----------------------\")\n print(\"Face {}\".format(face_ind))\n\n for (crrt_emotion, crrt_score) in zip(list_emotions, list_emotion_scores):\n print(\"{}: {}\".format(crrt_emotion, crrt_score))\n\n print(string_label)\n\n print(\"-----------------------\")\n\n # draw box around face\n box = [(vertex.x, vertex.y)\n for vertex in face.bounding_poly.vertices]\n draw.line(box + [box[0]], width=5, fill='#00ff00')\n\n # add legend in the face box\n fontsize = 35\n font = ImageFont.truetype(\"/usr/share/fonts/truetype/freefont/FreeMono.ttf\", fontsize)\n\n offset = 5\n heigth_text = 40\n length_text = box[1][0] - box[0][0] - 2 * offset\n draw.rectangle(((box[0][0] + offset, box[0][1] + offset), (box[0][0] + length_text + offset, box[0][1] + heigth_text + offset)), fill=\"black\")\n draw.text((box[0][0] + offset, box[0][1] + offset), string_label, font=font, fill=(255, 255, 255, 255))\n\n # highlight significant points\n point_nbr = 0\n half_width_sqare = 2\n\n list_point_coords = []\n\n for point in face.landmarks:\n x = point.position.x\n y = point.position.y\n\n list_point_coords.append((x, y))\n\n draw.rectangle(((x - half_width_sqare, y - half_width_sqare), (x + half_width_sqare, y + half_width_sqare)), fill=\"red\")\n\n # fontsize = 15\n # font = ImageFont.truetype(\"/usr/share/fonts/truetype/freefont/FreeMono.ttf\", fontsize)\n # draw.text((x, y), str(point_nbr), font=font, fill=(255, 255, 0, 0))\n\n point_nbr += 1\n\n all_lists_points = [\n [10, 11, 9],\n [10, 12, 11],\n [14, 7, 13, 15],\n [7, 6],\n [14, 6, 13, 7, 14],\n [16, 17, 18, 19],\n [21, 22, 23, 24],\n [30, 6],\n ]\n\n for crrt_list_points in all_lists_points:\n draw_line_list_points(draw, crrt_list_points, list_point_coords)\n\n draw_line_list_points(draw, [2, 26, 3], list_point_coords, close=False)\n draw_line_list_points(draw, [4, 27, 5], list_point_coords, close=False)\n draw_line_list_points(draw, [10, 8, 11], list_point_coords, close=False)\n\n im.save(output_filename)", "def display_names_and_boxes(img_array, names, detections):\n fig, ax = plt.subplots()\n ax.imshow(img_array)\n\n for i in range(len(detections)):\n det = detections[i]\n l, r, t, b = det.left(), det.right(), det.top(), det.bottom()\n ax.add_patch(patches.Rectangle((l, t), np.abs(l - r), np.abs(t - b), fill=False))\n ax.text(l, b, names[i], color=\"white\")", "def _display_face(draw, bounding_box, name):\n top, right, bottom, left = bounding_box\n draw.rectangle(((left, top), (right, bottom)), outline=BOUNDING_BOX_COLOR)\n text_left, text_top, text_right, text_bottom = draw.textbbox(\n (left, bottom), name\n )\n draw.rectangle(\n ((text_left, text_top), (text_right, text_bottom)),\n fill=BOUNDING_BOX_COLOR,\n outline=BOUNDING_BOX_COLOR,\n )\n draw.text(\n (text_left, text_top),\n name,\n fill=TEXT_COLOR,\n )", "def visualize(scores, faces):\n pc_min, pc_max = np.min(scores, 0), np.max(scores, 0)\n pc_scaled = (scores - pc_min) / (pc_max - pc_min) \n fig, ax = plt.subplots()\n for i in range(len(faces)):\n imagebox = offsetbox.OffsetImage(faces[i, :].reshape(64,64).T, cmap=plt.cm.gray, zoom=0.5)\n box = offsetbox.AnnotationBbox(imagebox, pc_scaled[i, 0:2])\n ax.add_artist(box)\n plt.show()", "def draw_face_box(data):\n head = extract_head(data);\n face_box = data['position_data']['face_box'][data['i']];\n sefs = data['sefs'][data['i']];\n if face_box is not None:\n cv2.rectangle(head, *face_box, (0, 255, 0));\n else:\n cv2.rectangle(head, *sefs, (0, 0, 255));\n return head;", "def draw_boxes_and_labels(img, localized_objs, obj_classes, box_color=(0, 255, 255)):\n img_h, img_w = img.shape[:2]\n font = cv2.FONT_HERSHEY_SIMPLEX\n font_size = 0.5\n font_color = (0, 0, 0)\n\n for (i, bbox_cv2) in localized_objs:\n # Draw the object boxes\n left, right, top, bottom = handle_bad_corners(bbox_cv2[0], bbox_cv2[1], bbox_cv2[2], bbox_cv2[3], img_w, img_h)\n cv2.rectangle(img, (left, top), (right, bottom), box_color, 4)\n # Draw a filled boxes on top of the bounding box (as the background for the labels)\n left1, top1, right1, _ = handle_bad_corners(left-2, top-40, right+2, bottom, img_w, img_h)\n cv2.rectangle(img, (left1, top1), (right1, top), box_color, -1, 1)\n # Output the labels that show the x and y coordinates of the bounding box center.\n text_label= obj_classes[i]\n top2 = 0 if top<25 else top-25\n cv2.putText(img, text_label, (left, top2), font, font_size, font_color, 1, cv2.LINE_AA)\n text_xy= 'x='+str((left+right)/2)+' y='+str((top+bottom)/2)\n cv2.putText(img, text_xy, (left,top2+20), font, 0.4, font_color, 1, cv2.LINE_AA)\n\n return img", "def show_faces(boxes, landmarks):\n count = 1\n for bb, _ in zip(boxes, landmarks):\n x1, y1, x2, y2 = int(bb[0]), int(bb[1]), int(bb[2]), int(bb[3])\n w = x2 - x1\n h = y2 - y1\n print(\"face ID: {}\".format(count))\n print(\"confidence= {}; x1= {}, y1= {}; w= {}, h={}\".format(float(bb[4]),x1,y1,w,h))\n count += 1", "def paint_detected_face_on_image(frame, location, name=None):\n\n # Unpack the coordinates from the location tuple\n top, right, bottom, left = location\n\n if name is None:\n name = 'unknown'\n color = (0, 0, 255) # Red for unrecognized face\n else:\n color = (0, 128, 0) # Dark green for recognized face\n\n # Draw a box around the face\n cv2.rectangle(frame, (left, top), (right, bottom), color, 2)\n\n # Draw a label with the name around the face\n cv2.rectangle(frame, (left, bottom - 35), (right, bottom), color, cv2.FILLED)\n cv2.putText(frame, name, (left + 6, bottom - 6), cv2.FONT_HERSHEY_DUPLEX, 1.0, (255, 255, 255), 1)", "def draw_boxes_v2(img_name, img, boxes, labels, scores, obj_list=None, figsize=(15,15)):\n fig,ax = plt.subplots(figsize=figsize)\n\n if isinstance(img, torch.Tensor):\n img = img.numpy().squeeze().transpose((1,2,0))\n # Display the image\n ax.imshow(img)\n\n # Create a Rectangle patch\n for box, label, score in zip(boxes, labels, scores):\n label = int(label)\n color = STANDARD_COLORS[label]\n x,y,w,h = box\n rect = patches.Rectangle((x,y),w,h,linewidth=1.5,edgecolor = color,facecolor='none')\n score = np.round(score, 3)\n if obj_list is not None:\n text = '{}: {}'.format(obj_list[label], str(score))\n else:\n text = '{}: {}'.format(label, str(score))\n plt.text(x, y-3,text, color = color, fontsize=15)\n # Add the patch to the Axes\n ax.add_patch(rect)\n plt.axis('off')\n plt.savefig(img_name,bbox_inches='tight')\n plt.close()", "def __draw_rec(self):\n # Draw the bounding boxes to the frame\n while True:\n # Save\n temp_categories = defaultdict(int)\n\n if (self.__detect_info is None) or \\\n (self.__img is None):\n continue\n\n # Get the size of image\n width, height = self.__size\n # Get the copy from self.__img\n self.__frame = np.copy(self.__img)\n for info in self.__detect_info:\n # Get only the person detection\n for i in range(len(self.__categories)):\n if info.name != self.__categories[i]:\n continue\n # Increase the amount of detected object\n temp_categories[info.name] += 1\n\n # Pick out the bounding vertices\n top_right_x, top_right_y = \\\n int(info.bounding_poly.normalized_vertices[0].x * width), \\\n int(info.bounding_poly.normalized_vertices[0].y * height)\n bottom_right_x, bottom_right_y = \\\n int(info.bounding_poly.normalized_vertices[2].x * width), \\\n int(info.bounding_poly.normalized_vertices[2].y * height)\n\n # Draw bounding boxes and put the text\n cv2.rectangle(self.__frame,\n (top_right_x, top_right_y),\n (bottom_right_x, bottom_right_y),\n self.__colors[i],\n 2)\n cv2.putText(self.__frame,\n f\"{info.name} {info.score:.2f}\",\n (top_right_x, top_right_y),\n cv2.FONT_HERSHEY_TRIPLEX,\n 0.5,\n self.__colors[i], 2)\n\n self.__show_categories = temp_categories", "def plot_boxes_cv2(img, trackers, boxes, colours, savename=None, class_names=None):\n img = np.copy(img)\n\n n_boats = 0\n for tracker in trackers:\n if class_names:\n x1 = int(tracker[0])\n y1 = int(tracker[1])\n x2 = int(tracker[2])\n y2 = int(tracker[3])\n\n # BGR color codes\n rgb = colours[int(tracker[4]) % 32]\n\n img = cv2.putText(\n img,\n \"boat (id: {0})\".format(tracker[4]),\n (x1, y1-6),\n cv2.FONT_HERSHEY_SIMPLEX,\n 0.5,\n rgb,\n 1,\n cv2.LINE_AA,\n )\n img = cv2.rectangle(img, (x1, y1), (x2, y2), rgb, 2)\n n_boats += 1\n\n # Infographics box\n sub_img = img[10:60, 10:230]\n white_rect = np.ones(sub_img.shape, dtype=np.uint8) * 255\n res = cv2.addWeighted(sub_img, 0.5, white_rect, 0.5, 1.0)\n img[10:60, 10:230] = res\n\n # Display number of boxes\n img = cv2.putText(\n img,\n 'Number of boats: {0}'.format(n_boats),\n (20, 30),\n cv2.FONT_HERSHEY_SIMPLEX,\n 0.5,\n (0, 0, 0),\n 1,\n cv2.LINE_AA,\n )\n\n if savename:\n print(\"save plot results to {}\".format(savename))\n cv2.imwrite(savename, img)\n return img", "def visualize_detection(self, image):\n\t\tH, W, _ = image.shape\n\t\tpos_list = self.apply_detection(image)\n\t\tdetections = {}\n\t\thasDetection = False\n\t\tfor i, L in enumerate(pos_list):\n\t\t\ttext, coordinates = L[0], L[1]\n\t\t\tCOLOR = COLORS[text]\n\t\t\tfor x, y, w, h in coordinates:\n\t\t\t\t# prune bad homography points\n\t\t\t\tif x < 0 or y < 0 or x + w > W or \\\n\t\t\t\t y + h > H or w <= 1 or h <= 1:\n\t\t\t\t\tcontinue\n\t\t\t\t# add the detection to the dict for tracking\n\t\t\t\tdetections[self.num_detect] = (x, y, w, h)\n\t\t\t\tself.detection_index[self.num_detect] = (x, y, w, h, self.num_save, text)\n\t\t\t\tself.num_detect += 1\n\t\t\t\thasDetection = True\n\t\t\t\t# if the detection is human\n\t\t\t\tif text == 'face':\n\t\t\t\t\tgender = self.genderDetect.classify(image[y:y+h, x:x+w, :])\n\t\t\t\t\tgender = 'female' if gender[0] < 0.5 else 'male'\n\t\t\t\t\tcv2.putText(image, gender, (x + w // 2 -10, y + h + 15),\n\t\t\t\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.6, COLOR, 2, cv2.LINE_AA)\n\n\t\t\t\timage = cv2.rectangle(image, (x, y), (x + w, y + h), COLOR, 2)\n\t\t\t\tcv2.putText(image, text, (x, y - 5),\n\t\t\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.6, COLOR, 2, cv2.LINE_AA)\n\t\tif hasDetection:\n\t\t\tself.detection_frames[self.num_save] = detections\n\t\tself.num_save +=1\n\t\treturn image", "def _draw_detections(frame, frame_detections):\n boxColor = (0,255,0)\n for box in frame_detections:\n cv2.rectangle(frame,(int(box[0]),int(box[1])),(int(box[2]),int(box[3])),boxColor,7)\n # cv2.rectangle(frame,(int(box[0]),int(box[1])),(int(box[2]),int(box[3])),boxColor,7)\n cv2.putText(frame,str(format(box[4],'.2f')),(int(box[0]),int(box[3]+20)),cv2.FONT_HERSHEY_SIMPLEX,0.6,boxColor,1,cv2.LINE_AA)\n\n return frame", "def draw_boxes_info(image, current_data):\n\n font_position1 = (50, 600)\n font_position2 = (50, 650)\n font_scale = .4\n font_thickness = 1\n\n locations = current_data[\"locations\"] #returns x1, y1, x2, y2\n frame_num = \"Frame Number: \" + str(current_data[\"frame_num\"])\n\n for box in locations:\n box_text = (\"Box locations are x1: {0}, y1: {1}, x2: {2}, y2: {3}\").format(box[1],box[3],box[0],box[2])\n\n cv2.rectangle(image, (box[0], box[1]), (box[2], box[3]), (0, 255, 0), 3)\n cv2.putText(image, box_text, font_position1, cv2.FONT_HERSHEY_SIMPLEX, font_scale, (255, 255, 255),\n font_thickness, cv2.LINE_AA)\n\n cv2.putText(image, frame_num, font_position2, cv2.FONT_HERSHEY_SIMPLEX, font_scale, (255, 255, 255),\n font_thickness, cv2.LINE_AA)\n\n return image", "def _draw(self, frame, boxes, probs, landmarks, name):\n try:\n print('drawing')\n for box, prob, ld, id in zip(boxes, probs, landmarks, name):\n # Draw rectangle on frame\n\n cv2.putText(frame, id, (200, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, cv2.LINE_AA)\n\n\n except:\n print('not draw box')\n pass\n\n return frame", "def vis_detections(im, dets):\n im = im[:, :, (2, 1, 0)]\n fig, ax = plt.subplots(figsize=(12, 12))\n ax.imshow(im, aspect='equal')\n\n for det in dets:\n bbox = det[:4]\n score = det[-2]\n\n ax.add_patch(\n plt.Rectangle((bbox[0], bbox[1]),\n bbox[2] - bbox[0],\n bbox[3] - bbox[1], fill=False,\n edgecolor='red', linewidth=2)\n )\n ax.text(bbox[0], bbox[1] - 2,\n '{:s} {:.3f}'.format(det[-1], score),\n bbox=dict(facecolor='blue', alpha=0.5),\n fontsize=14, color='white')\n\n ax.set_title(('eye and pupil detections'), fontsize=14)\n plt.axis('off')\n plt.tight_layout()\n plt.draw()", "def vis_detections(im, class_name, dets, thresh=0.5):\n im = im[:, :, (2, 1, 0)]\n fig, ax = plt.subplots(figsize=(12, 12))\n ax.imshow(im, aspect='equal')\n for i, det in enumerate(dets):\n bbox = dets[i, :4]\n score = dets[i, -1]\n\n ax.add_patch(\n plt.Rectangle((bbox[0], bbox[1]),\n bbox[2] - bbox[0],\n bbox[3] - bbox[1], fill=False,\n edgecolor='red', linewidth=3.5)\n )\n ax.text(bbox[0], bbox[1] - 2,\n '{:s} {:.3f}'.format(class_name, score),\n bbox=dict(facecolor='blue', alpha=0.5),\n fontsize=14, color='white')\n\n ax.set_title(('{} detections with '\n 'p({} | box) >= {:.1f}').format(class_name, class_name,\n thresh),\n fontsize=14)\n plt.axis('off')\n plt.tight_layout()\n plt.draw()", "def vis_detections(im, class_name, dets, bord, thet, fp, thresh=0.5):\n para_dict={\n 'left': 20,\n 'right': -20,\n 'front': 40,\n 'back': 0,\n 'resolution': 0.05\n }\n inds = np.where(dets[:, -1] >= thresh)[0]\n if len(inds) == 0:\n return\n\n im = im[:, :, (2, 1, 0)]\n if save_image:\n fig, ax = plt.subplots(figsize=(12, 12))\n ax.imshow(im, aspect='equal')\n f = open('bbox.txt', 'w')\n for i in inds:\n bbox = dets[i, :4]\n score = dets[i, -1]\n center = np.array([[(dets[i, 0]+dets[i, 2])/2],[(dets[i, 1]+dets[i, 3])/2]])\n theta = thet[i, 0]\n l = bord[i, 0]\n w = bord[i, 1]\n h = bord[i, 2]\n tz = bord[i, 3]\n # theta = fix_theta(theta, l, w, (bbox[2] - bbox[0])*para_dict['resolution'], (bbox[3] - bbox[1])*para_dict['resolution'])\n p1 = box_rot(l, w, theta)/para_dict['resolution'] + center\n p2 = p1.transpose()\n\n \n f.write('%f %f %f %f\\n' % (dets[i, 0], dets[i, 1], dets[i, 2], dets[i, 3]))\n\n fp.write(\"%s %f %f %f %f %f %f %f %f\\n\" % (class_name,\n para_dict['front']-center[1,0]*para_dict['resolution'],\n para_dict['left']-center[0,0]*para_dict['resolution'],\n tz,theta,l,w,h,score))\n\n if save_image:\n\n ax.add_patch(\n plt.Polygon(p2,edgecolor='red',linewidth=2,fill=False)\n )\n ax.add_patch(\n plt.Rectangle((bbox[0], bbox[1]),\n bbox[2] - bbox[0],\n bbox[3] - bbox[1], fill=False,\n edgecolor='yellow', linewidth=2)\n )\n ax.text(bbox[0], bbox[1] - 2,\n '{:s} {:.3f} height {:.3f} tz {:.3f}'.format(class_name, score, h, tz),\n bbox=dict(facecolor='blue', alpha=0.5),\n fontsize=14, color='white')\n if save_image:\n ax.set_title(('{} detections with '\n 'p({} | box) >= {:.1f}').format(class_name, class_name,\n thresh),\n fontsize=14)\n plt.axis('off')\n plt.tight_layout()\n plt.draw()\n f.close()", "def plot_boxes(predictions, labels):\n visuals =[] \n num_det = predictions.shape[0]\n for i in range(num_det):\n box = predictions[i:i+1]#.numpy()\n label = labels[i]\n corner = center_to_corner_box3d(box[:, :3], box[:, 3:6], box[:, -1])[0].tolist()\n color = label2color(int(label) -1)\n visuals.append(corners_to_lines(corner, color))\n return visuals", "def draw_boxes(self, im, boxes):\n for bbox in boxes:\n l = [int(x) for x in bbox[\"coords\"]]\n l = self.scalebox(l)\n icon = self.classes_to_icons[bbox[\"label\"]]\n overlay_im_to_background(im, icon, l[0], l[1] - icon.shape[0] - 5)\n cv2.rectangle(im,(l[0],l[1]),(l[2],l[3]),self.color,2)", "def draw_rects(image, face_locations):\n\n # Placeholder\n emotions = ['Poker'] * len(face_locations)\n\n # Convert to grayscale and extract faces\n gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n faces = [process_face(gray_image, coords)\n for coords in face_locations]\n emotions = [predict_emotion(face, model)\n for face in faces]\n\n # Convert to PIL image\n pil_image = Image.fromarray(image[:, :, ::-1])\n\n # Create a Pillow ImageDraw Draw instance to draw with\n draw = ImageDraw.Draw(pil_image)\n\n for (top, right, bottom, left), emotion in zip(face_locations, emotions):\n\n # Draw bounding box around face\n draw.rectangle(((left, top), (right, bottom)), outline=rect_color)\n\n # Write emotion in caption box\n caption_box_height = 0.1 * (bottom - top)\n fnt = ImageFont.truetype('app/static/gillsans.ttf',\n size=int(caption_box_height))\n textwidth, textheight = draw.textsize(emotion, font=fnt)\n textwidth += 0.25 * textwidth\n\n # Draw Caption box\n draw.rectangle(((left, bottom + caption_box_height),\n (left + textwidth, bottom)),\n fill=rect_color, outline=rect_color)\n\n # Draw text\n draw.text((left + 0.10 * textwidth,\n bottom + (0.05 * caption_box_height)),\n emotion, font=fnt, fill=(255, 255, 255, 255))\n\n del draw\n\n return pil_image", "def draw_person_boxes(self, frame, boxes, probs):\n # convert color space for numpy\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n # for all the boxes:\n for (box, prob) in zip(boxes, probs):\n \n # extract the properties of the box and text:\n (startX, startY, endX, endY) = box.astype(\"int\")\n label = \"{}: {:.2f}%\".format(\"Person\", prob * 100)\n font = cv2.FONT_HERSHEY_SIMPLEX\n font_scale = 0.7\n thickness = 1\n text_size, _ = cv2.getTextSize(label, font, font_scale, thickness)\n text_w, text_h = text_size\n \n text_color_bg = (0,0,0) # black bg for text\n text_color = (255,255,255) # white text\n box_color = (255,0,0) # red box\n \n # draw the bb prediction on the frame\n cv2.rectangle(frame, (startX, startY), (endX, endY), box_color , 1)\n \n # include text:\n y = startY - text_h if startY - text_h > text_h else startY + text_h\n cv2.rectangle(frame, (startX, y - text_h), (startX + text_w, startY-1), text_color_bg, -1)\n cv2.putText(frame, label, (startX, y), font, font_scale, text_color, thickness)\n\n frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)\n return frame", "def plot_faces(img, w, h, B, hist_h, hist_hT, R, name_img, mode_color=\"RGB\", Q=256, g_mask=False, nb_angles=1, nb_scales=1):\n img_skin = convert_colors_probalities(img, hist_h, hist_hT, Q, mode_color)\n #print(img_skin.shape)\n set_face = recognition_function(img_skin, w, h, B, g_mask=g_mask, nb_angles=nb_angles, nb_scales=nb_scales)\n draw_faces(img, set_face, \"raw_\"+name_img, (212, 85, 186))\n if cluster_ellipse(set_face, R) != {}:\n set_face = cluster_ellipse(set_face, R)\n else:\n set_face = non_maximum_suppression(set_face, R)\n draw_faces(img, set_face, name_img, (212, 85, 186))", "def recognize_faces(image_file_path):\n image_pil = Image.open(image_file_path)\n draw = ImageDraw.Draw(image_pil)\n\n known_face_encodings_dict = get_known_face_encodings_dict()\n known_names = list(known_face_encodings_dict.keys())\n known_face_encodings = list(known_face_encodings_dict.values())\n\n del known_face_encodings_dict\n\n for face_location in face_detection.get_face_locations(image_file_path):\n face_encoding = get_face_encodings(\n image_file_path, known_face_locations=[face_location]\n )[0]\n\n recognition_flags = face_recognition.compare_faces(\n known_face_encodings, face_encoding\n )\n\n for flag, name in zip(recognition_flags, known_names):\n if not flag:\n continue\n\n top, right, bottom, left = face_location\n draw.rectangle((left, top, right, bottom), outline=\"#FF1493\")\n text_width, text_height = draw.textsize(name)\n draw.rectangle(\n (left, bottom, right, bottom + text_height + 10),\n fill=\"#FF1493\",\n outline=\"#FF1493\",\n )\n draw.text((left + 6, bottom + 5), name, fill=\"white\")\n\n del draw # conserve resources\n image_pil.show()", "def _box_face(image, face):\n draw = PIL.ImageDraw.Draw(image.image)\n draw.rectangle(face.as_box(), outline=\"yellow\")", "def vis_detections(im, class_name, dets, thresh=0.5, video= None,fid=0):\n dirname = os.path.dirname(__file__)\n show_dir = os.path.join(dirname, '..', 'show/%s' % os.path.basename(video))\n # print(show_dir)\n if not os.path.exists(show_dir):\n os.makedirs(show_dir)\n\n inds = np.where(dets[:, -1] >= thresh)[0]\n if len(inds) == 0:\n return\n\n im = im[:, :, (2, 1, 0)]\n fig, ax = plt.subplots(figsize=(12, 12))\n ax.imshow(im, aspect='equal')\n for i in inds:\n bbox = dets[i, :4]\n score = dets[i, -1]\n\n ax.add_patch(\n plt.Rectangle((bbox[0], bbox[1]),\n bbox[2] - bbox[0],\n bbox[3] - bbox[1], fill=False,\n edgecolor='red', linewidth=3.5)\n )\n ax.text(bbox[0], bbox[1] - 2,\n '{:s} {:.3f}'.format(class_name, score),\n bbox=dict(facecolor='blue', alpha=0.5),\n fontsize=14, color='white')\n\n ax.set_title(('{} detections with '\n 'p({} | box) >= {:.1f}').format(class_name, class_name,\n thresh),\n fontsize=14)\n plt.axis('off')\n plt.tight_layout()\n plt.draw()\n plt.savefig('%s/all_bboxes_%d.jpg' % (show_dir, fid))\n # plt.show()", "def draw_face_landmarks(data):\n head = extract_head(data);\n landmarks = data['position_data']['face_landmarks'][data['i']];\n face_box = data['position_data']['face_box'][data['i']];\n if face_box is not None:\n for p in landmarks:\n cv2.circle(head,p,3,(255,0,0));\n else:\n for p in landmarks:\n cv2.circle(head,p,3,(255,0,200));\n return head;", "def draw_bounding_box(objects,color):\n\n for i in range(len(objects)):\n x, y, w, h, d = objects[i].get_attributes()\n print(x, y, w, h, d)\n corr = get_correction(d, a, hfov, x)\n cv2.rectangle(color, (x-corr, y), (x+w-corr, y+h), (0, 255, 0), 4)\n\n try:\n real_x, real_y = get_dimensions(d, w, h, hfov, vfov, 640, 480)\n real_x = round(real_x, 3)\n real_y = round(real_y, 3)\n\n except:\n real_x, real_y = 'ERROR'\n\n cv2.putText(color, 'depth = ' + str(d) + 'm', (30, i*60 + 30) ,\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)\n cv2.putText(color, 'width = ' + str(real_x)+ 'm', (30, i*60 + 45) ,\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)\n cv2.putText(color, 'height = ' + str(real_y)+ 'm', (30, i*60 + 60) ,\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)\n\n if(i < len(objects)-1):\n ## distance between left and right object\n distance = round(distance_between_objects(objects[i], objects[i+1], hfov, 640), 3)\n if distance > l:\n textcolor = (0, 255, 0)\n else:\n textcolor = (0, 0, 255)\n\n cv2.putText(color, 'distance between objects = ' + str(distance) + 'm',\n (320, i*60 + 70) , cv2.FONT_HERSHEY_SIMPLEX, 0.5, textcolor, 1)", "def draw_box_label(img, bbox_cv2, bbox_class=\"diver\", box_color=(0, 0, 255), show_label=True):\n if not bbox_cv2 or bbox_cv2==[]: \n return img\n \n img_h, img_w = img.shape[:2]\n font = cv2.FONT_HERSHEY_SIMPLEX\n font_size = 0.5\n font_color = (0, 0, 0)\n left, right, top, bottom = handle_bad_corners(bbox_cv2[0], bbox_cv2[1], bbox_cv2[2], bbox_cv2[3], img_w, img_h)\n \n # Draw the bounding box and labels\n cv2.rectangle(img, (left, top), (right, bottom), box_color, 4) \n if show_label:\n # Draw a filled box on top of the bounding box (as the background for the labels)\n left1, top1, right1, _ = handle_bad_corners(left-2, top-40, right+2, bottom, img_w, img_h)\n cv2.rectangle(img, (left1, top1), (right1, top), box_color, -1, 1)\n # Output the labels that show the x and y coordinates of the bounding box center.\n text_label= bbox_class\n top2 = 0 if top<25 else top-25\n cv2.putText(img, text_label, (left, top2), font, font_size, font_color, 1, cv2.LINE_AA)\n text_xy= 'x='+str((left+right)/2)+' y='+str((top+bottom)/2)\n cv2.putText(img, text_xy, (left,top2+20), font, 0.4, font_color, 1, cv2.LINE_AA)\n \n return img", "def vis_gt_boxes(self):\n import cv2\n num_images = len(self.gt)\n for i in range(num_images):\n im = cv2.imread(self.image_path_at(i))\n im = im[:, :, (2, 1, 0)]\n plt.cla()\n plt.imshow(im)\n gt_image = self.gt[i]\n for j in range(len(gt_image['boxes'])):\n bbox = gt_image['boxes'][j]\n c = gt_image['gt_classes'][j] \n plt.gca().add_patch(plt.Rectangle((float(bbox[0]), float(bbox[1])),\n float(bbox[2]) - float(bbox[0]),\n float(bbox[3]) - float(bbox[1]), fill=False,\n edgecolor='r', linewidth=3))\n x = (bbox[0] + bbox[2])/2\n y = bbox[1]\n s = '{}'.format(self.classes[c])\n plt.text(x, y, s, fontsize=14,horizontalalignment='center',weight='bold',backgroundcolor=(1,1,1))\n plt.show()" ]
[ "0.74479693", "0.7110669", "0.7070581", "0.70114297", "0.6918849", "0.67359346", "0.6709659", "0.66393256", "0.65917903", "0.6556406", "0.65250915", "0.65222067", "0.64628524", "0.6459832", "0.6419342", "0.64013", "0.6392646", "0.63896686", "0.6386491", "0.6378196", "0.63775504", "0.63311374", "0.63309795", "0.63307166", "0.63272226", "0.6304253", "0.62912446", "0.62882566", "0.6280393", "0.62526804" ]
0.7864521
0
Return the real value instead of the %.
def real_value(val): return round(val/100*sum(euros), 1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __float__(self):\n return self.num/self.denom", "def __float__(self):\n return self.num/self.denom", "def get_real_percent(self):\n if not (self.votes and self.score):\n return 0\n return 100 * (self.get_real_rating() / self.field.range)", "def __float__(self):\n return self.num / self.denom # result of / is of type float", "def unit_of_measurement(self):\n return \"%\"", "def value(self) -> float:", "def __float__(self) -> float:\n return float(self.p)", "def num (self):\n return self.value[0]/self.value[1]", "def __float__( self ):\r\n\t\tif ( types.ComplexType in ( type( self.numerator ), type( self.denominator ) ) ):\r\n\t\t\tn,d = self.numerator, self.denominator\r\n\t\t\tif ( type( n ) == types.ComplexType ): n = abs( n )\r\n\t\t\tif ( type( d ) == types.ComplexType ): d = abs( d )\r\n\t\t\treturn n / d\r\n\t\treturn float( self.numerator ) / self.denominator", "def __float__(self) -> float:\n val = self.numer_prod() / self.denom_prod()\n return -val if self.neg else val", "def rmdspe(self) -> float:\n return float(np.sqrt(np.median(np.square(self._percentage_error()))) * 100.0)", "def percent(value):\n return f\"{value:,.2f} %\"", "def __float__(self):\n return float(self.number)", "def get_as_float(self):\n return float(self.numerator / self.denominator)", "def get(self) -> float:\n ...", "def value(self):\n return super().value() / self._precision", "def pct(self):\n\t\treturn self.bottle.pct()", "def value_to_percent(value):\n return ...", "def to_float(self): \n return (self._num / self._den)", "def p() -> float:\n return 0.9", "def value(self) -> float:\n ...", "def value(self) -> float:\n ...", "def value(self) -> float:\n ...", "def rel_value(value, pct):\n return(value*(1+pct/100.0))", "def denominator(self, ???):", "def __round__(self, ???):", "def percentage_to_float(self, val):\n return float(val.strip('%'))/100", "def __float__(self):\n\t\toutput = 0.0\n\n\t\tfor index,value in enumerate(self):\n\t\t\tif value > 0:\n\t\t\t\toutput+=float(value) * (10 ** -index)\n\n\t\treturn output", "def __float__(self) -> float:\n float_number = self.numerator_a / self.denominator_b\n print(f'Fraction {self.fraction} float number is {float_number}')\n return float_number", "def short_percent_of_float(self):\n if self._short_percent_of_float == None:\n return 0.\n return self._short_percent_of_float" ]
[ "0.7050284", "0.7050284", "0.6976536", "0.68554485", "0.65986884", "0.6563995", "0.6494256", "0.64709556", "0.6425573", "0.64117116", "0.6388856", "0.6343258", "0.6331123", "0.6329049", "0.6311305", "0.6307815", "0.62896955", "0.6274282", "0.6263361", "0.62617683", "0.6260623", "0.6260623", "0.6260623", "0.625165", "0.6247114", "0.62277657", "0.622094", "0.62130666", "0.61988956", "0.6194634" ]
0.7619975
0
summary of the vibrational analysis
def vibrational_analysis_summary(self): print("Summary of the vibrational analysis:") #cols = [ "eigvals [a.u.]" , "w [a.u.]", "w [THz]", "w [cm^-1]", "T [a.u.]", "T [ps]","E [a.u.]", "n [a.u.]"] df = pd.DataFrame() eigvals = self.eigvals.copy() eigvals [ eigvals == MicroStatePrivate.smallest_float ] = np.nan df["eigvals [a.u.]"] = eigvals df["w [a.u.]"] = [ np.sqrt(i) if i > 0. else None for i in eigvals ] df["w [THz]"] = convert(df["w [a.u.]"],"frequency",_from="atomic_unit",_to="thz") df["w [cm^-1]"] = convert(df["w [a.u.]"],"frequency",_from="atomic_unit",_to="inversecm") df["T [a.u.]"] = 2*np.pi / df["w [a.u.]"] df["T [ps]"] = convert(df["T [a.u.]"],"time",_from="atomic_unit",_to="picosecond") if hasattr(self,"energy"): df["E [a.u.]"] = self.energy.mean(axis=0) if hasattr(self,"occupations"): df["n [a.u.]"] = self.occupations.mean(axis=0) return df
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def summary(self):\r\n self.base.summary()\r\n self.extra_layers.summary()\r\n self.detector.summary()", "def compute_statistics(self):", "def full_analysis(self):\n print('FULL ANALYSIS\\n' +\n '----------------------------------\\n')\n #print('Basic Statistics') # Remove this and run 'basic_stats'\n results.append('FULL ANALYSIS\\n' +\n '----------------------------------\\n')\n print('Basic Information\\n' +\n '----------------------------')\n results.append('Basic Information\\n' +\n '----------------------------')\n self.info_density()\n self.calc_total_rows()\n self.show_empty()\n self.calc_null()\n self.calc_col_len()\n self.calc_row_len()\n self.calc_col_info()\n self.regex_info()", "def summary(self):\r\n print(self.model.summary())", "def vif_cal(data, y):\n \n x_vars=data.drop([y], axis=1)\n xvar_names=x_vars.columns.tolist()\n x_var_col, vif_list = [], []\n str_gap = max([len(c) for c in xvar_names])+2\n\n # print(\"{:*^20s}\".format(\"VIF Summary\"))\n str_len = str_gap + 2 + 7 + 3 + 6 - len(' VIF Summary ')\n star_str = '*'*int(str_len/2)\n str_to_print = ''.join((star_str,' VIF Summary ',star_str))\n print(str_to_print)\n\n for xvar in xvar_names:\n y=xvar \n x=xvar_names.copy()\n x.remove(xvar)\n\n formula = \"{} ~ {} + 1\".format(y, ' + '.join(x))\n rsq=smf.ols(formula, data=x_vars).fit().rsquared \n if rsq==1: vif=np.inf\n else: vif=round(1/(1-rsq),10)\n x_var_col.append(xvar)\n vif_list.append(vif)\n print('vif of {:<{width}} = {:.6}'.format(xvar, vif, width=str_gap))\n\n str_len = str_gap + 2 + 7 + 3 + 6 - len(' VIF Summary END ')\n star_str = '*'*int(str_len/2)\n str_to_print = ''.join((star_str,' VIF Summary END ',star_str))\n print(str_to_print)\n\n vif_df = pd.DataFrame({'x_variable': x_var_col, 'vif': vif_list})\n vif_df = vif_df[['x_variable', 'vif']]\n return vif_df", "def summary(self):\n self.model.summary()", "def dataStats(self):\n print (\"Performing statistical analysis of the data\")\n # stuff to do", "def summary(self):\n print(self.model.summary())", "def advancedStats():", "def compute_analysis(self):\r\n def get_mean(self):\r\n \"\"\"\r\n Compute mean in all sensors\r\n \"\"\"\r\n for i in range(1,len(self.data[0])):\r\n self.prom.append(np.mean(self.data[:,i])) \r\n\r\n \r\n def get_stddev(self):\r\n \"\"\"\r\n Compute mean in all sensors\r\n \"\"\"\r\n for i in range(1,len(self.data[0])):\r\n self.stddev.append(np.std(self.data[:,i])) \r\n \r\n # Get the values\r\n get_mean(self)\r\n get_stddev(self)\r\n \r\n # Check condition\r\n [(self.out_of_3stddev.append(i)) \r\n for (i) in (self.data[:,0:4]) \r\n if (any(\r\n (i[1:4] > 3*np.array(self.stddev)+np.array(self.prom))|\r\n (i[1:4] < -3*np.array(self.stddev)+np.array(self.prom))\r\n ))]", "def show_all(self):\n self.explained_variance_score()\n self.max_error()\n self.mean_absolute_error()\n self.mean_squared_error()\n self.median_absolute_error()\n self.r2_score()\n self.mean_poisson_deviance()\n self.mean_gamma_deviance()\n self.feature_importance()\n self.learning_curve()", "def AllFreAverageV(self,):\n \t\tv_array = self.data\n \t\taaverage_v = np.average(v_array[:,1])\n \t\tprint('Whole frequency average group velocity:\\nVw=',aaverage_v/1000,'km/s')\n \t\treturn", "def print_info(self):\n\t\t\n\t\tweighted_snapshots = np.sqrt(self.weights)*self.snapshots.T\n\t\teigenvectors,eigenvalues,__ = np.linalg.svd(weighted_snapshots.T, full_matrices=False)\n\t\tself.pod_basis = np.transpose(np.power(self.weights,-0.5)*eigenvectors.T)\n\t\t\n\t\tself.cvt_handler = cvt.Cvt(self.mu_values, self.snapshots, self.pod_basis, self.weights)\n\t\tself.cvt_handler.add_new_point()\n\t\t\t\n\t\tprint ('Maximum error on the tassellation: ' + str(self.cvt_handler.max_error))\n\t\tprint ('New baricentric parameter value added to the triangulation ' + str(self.cvt_handler.mu_values[:,-1]) + '\\n')", "def stats(self):", "def print_summary(self):\n self.model.summary()", "def svm():", "def analyse(self):\n pass", "def summary(self):\n\n self.model.summary(print_fn=lambda x: logging.info(x))", "def calibV(self):\n # clear buffer in case of errors\n self.flushInput()\n \n if (self.model == 'GDS'):\n self.write(':CHAN'+str(ch)+':SCAL?\\n')\n # returns V/div, turn it into multiplicative factor\n # between digitizer and actual volts\n vmult = float(self.readline()) * 10./255.\n # GDS includes vertical offset in the data returned.\n voff = 0.\n elif (self.model == 'TDS'):\n self.write('WFMPre:YMUlt?\\n')\n # formula I am using later is from TDS manual, so this\n # is straightforward.\n vmult = float(self.readline())\n self.write('WFMPre:YOFf?\\n')\n voff = float(self.readline())\n \n # clear buffer in case of errors\n self.flushInput()\n\n return (vmult, voff)", "def get_summary_statistics(self):\n # Get log 10 total mutation count\n self.log_mut_count = np.log10(self.variant_df.shape[0])\n\n # Get the number of variants stratified by functional location of variant\n # E.g. Exon, Intron, 5'UTR, etc.\n self.functional_counts = pd.DataFrame(self.variant_df['Func.refGene'].value_counts())\n self.functional_counts.columns = [self.sample_name]\n \n # Get the number of variants stratified by exonic functional outcome of variant\n # E.g. Silent, Nonsense, Missense, etc.\n self.mutational_class_counts = (\n pd.DataFrame(self.variant_df['ExonicFunc.refGene'].value_counts())\n )\n self.mutational_class_counts.columns = [self.sample_name]\n \n # Get number of COSMIC curated events\n self.cosmic_variants = self.variant_df[self.variant_df['cosmic70'] != '.']\n self.cosmic_variants = self.cosmic_variants.assign(sample_name = self.sample_name,\n final_id = self.final_id)\n self.cosmic_variant_counts = self.cosmic_variants.shape[0]\n \n # Get depth summary\n self.depth_summary = pd.DataFrame(self.variant_df['depth'].astype(int).describe())\n self.depth_summary.columns = [self.sample_name]\n \n return self.functional_counts, self.mutational_class_counts, self.depth_summary", "def vis_survival_stats(data, outcomes, feature):\n pass", "def _analyze(self):\n self.sim_setup_name, self.sweep_name = self.renderer.initialize_drivenmodal(\n **self.setup)\n\n self.renderer.analyze_sweep(self.sweep_name, self.sim_setup_name)\n # TODO: return the impedance, admittance and scattering matrices for later use", "def print_inference_result(self):\n if (\n self.params.model_str == 'optfixedsig'\n or self.params.model_str == 'opt'\n or self.params.model_str == 'fixedparam'\n ):\n print('*ls pt est = ' + str(self.sample_list[0].ls) + '.')\n print('*alpha pt est = ' + str(self.sample_list[0].alpha) + '.')\n print('*sigma pt est = ' + str(self.sample_list[0].sigma) + '.')\n elif self.params.model_str == 'samp' or self.params.model_str == 'sampfixedsig':\n ls_arr = np.array([ns.ls for ns in self.sample_list])\n alpha_arr = np.array([ns.alpha for ns in self.sample_list])\n sigma_arr = np.array([ns.sigma for ns in self.sample_list])\n print('*ls mean = ' + str(ls_arr.mean()) + '.')\n print('*ls std = ' + str(ls_arr.std()) + '.')\n print('*alpha mean = ' + str(alpha_arr.mean()) + '.')\n print('*alpha std = ' + str(alpha_arr.std()) + '.')\n print('*sigma mean = ' + str(sigma_arr.mean()) + '.')\n print('*sigma std = ' + str(sigma_arr.std()) + '.')\n print('-----')", "def summary(self):\n raise NotImplementedError", "def test_get_vulnerability_occurrences_summary(self):\n pass", "def summary(self):\n return self.model.summary()", "def stats(self):\n return self._solution", "def stats(self):\n return self._solution", "def stats(self):\n return self._solution", "def analyzeIV(t, V, I, tw, thr):\n ntraces = numpy.shape(V)[0]\n vss = []\n vmin = []\n vm = []\n ic = []\n nspikes = []\n ispikes = []\n tmin = []\n fsl = []\n fisi = []\n for j in range(0, ntraces):\n ts = tw[0]\n te = tw[1]\n td = tw[2]\n ssv = measure('mean', t, V[j,:], te-td, te)\n ssi = measure('mean', t, I[j,:], te-td, te)\n rvm = measure('mean', t, V[j,:], 0.0, ts-1.0)\n minv = measure('min', t, V[j,:], ts, te)\n spk = findspikes(t, V[j,:], thr, t0=ts, t1=te)\n nspikes.append(count_spikes(spk)) # build spike list\n ispikes.append(ssi[0])\n if nspikes[-1] >= 1:\n fsl.append(spk[0])\n else:\n fsl.append(None)\n if nspikes[-1] >= 2:\n fisi.append(spk[1]-spk[0])\n else:\n fisi.append(None)\n vm.append(rvm[0])\n if ssi[0] < 0.0: # just for hyperpolarizing pulses...\n ic.append(ssi[0])\n vss.append(ssv[0]) # get steady state voltage\n vmin.append(minv[0]) # and min voltage\n tmin.append(minv[1]) # and min time\n\n return({'I': numpy.array(ic), 'Vmin': numpy.array(vmin), 'Vss': numpy.array(vss),\n 'Vm': numpy.array(vm), 'Tmin': numpy.array(tmin), \n 'Ispike': numpy.array(ispikes), 'Nspike': numpy.array(nspikes), \n 'FSL': numpy.array(fsl), 'FISI': numpy.array(fisi)})" ]
[ "0.6564602", "0.64965", "0.63673824", "0.6317777", "0.6268647", "0.6255099", "0.6235681", "0.62339216", "0.62134176", "0.6196753", "0.61504763", "0.6085174", "0.6070195", "0.6057506", "0.60011464", "0.59826547", "0.5980289", "0.5943046", "0.5934304", "0.5932248", "0.59235865", "0.5917871", "0.58857214", "0.58854204", "0.58198863", "0.5807645", "0.5803657", "0.5803657", "0.5803657", "0.5799697" ]
0.80188596
0
The `UniqueTogetherValidator` always forces an implied 'required' state on the fields it applies to.
def enforce_required_fields(self, attrs): if self.instance is not None: return # missing_items = { # field_name: self.missing_message # for field_name in self.fields # if field_name not in attrs # } # if missing_items: # raise ValidationError(missing_items, code='required')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clean(self):\n super(RequireOneFormSet, self).clean()\n for error in self.errors:\n if error:\n return\n completed = 0\n for cleaned_data in self.cleaned_data:\n # form has data and we aren't deleting it.\n if cleaned_data and not cleaned_data.get('DELETE', False):\n completed += 1\n\n if completed < 1:\n raise forms.ValidationError(\"At least one %s is required.\" %\n self.model._meta.object_name.lower())", "def unique_together(self):\n if self._meta.unique_together:\n return self._meta.unique_together[0]\n return ()", "def test_unique_together(self):\n form = PriceForm({'price': '6.00', 'quantity': '1'})\n self.assertTrue(form.is_valid())\n form.save()\n form = PriceForm({'price': '6.00', 'quantity': '1'})\n self.assertFalse(form.is_valid())\n self.assertEqual(len(form.errors), 1)\n self.assertEqual(form.errors['__all__'], ['Price with this Price and Quantity already exists.'])", "def validation_required(self, validation_required):\n self._validation_required = validation_required", "def required_dict_validator(self, dict_fields, model_name, erp_required=[]):\n required_fields = self.env['settings.field'].sudo().search([('model_id.model', '=', model_name)])\n\n if required_fields:\n erp_required.extend(required_fields.required_field_ids.filtered(lambda x: x.id not in [er.id for er in erp_required]))\n\n for field in erp_required:\n if field.name in dict_fields and 'required' not in dict_fields[field.name]:\n dict_fields[field.name]['required'] = True\n dict_fields[field.name]['empty'] = False\n\n return dict_fields", "def validate_unique(self, exclude=None, **kwargs):\n return super().validate_unique(exclude=exclude, user=self.user)", "def unique_together(self):\n return self._unique_together", "def is_required(self, field):\n return field.scheme.is_required and not field.scheme.is_pk", "def get_multiple_choices_required(self):\n errors = []\n if self.required:\n for key, msg in self.error_messages.items():\n if key == 'required':\n errors.append(('$error.required', msg))\n return errors", "def apply_unique_together(self, unique_together):\n self.unique_together = unique_together\n self._unique_together_applied = True", "def isRequired(self):\n return not self.isOptional()\n # return self._field.label == FieldDescriptor.LABEL_REQUIRED", "def record_unique_together_applied(model_sig):\n model_sig['meta']['__unique_together_applied'] = True", "def validate_unique(self, exclude=None):\n qs_barcode = Product.objects.filter(barcode=self.barcode)\n qs_item_number = Product.objects.filter(item_number=self.item_number)\n qs_name = Product.objects.filter(name=self.name)\n if qs_barcode.filter(webshop_id=self.webshop_id).exclude(id=self.id).exists():\n raise ValidationError(detail='Barcode must be unique in one webshop')\n if qs_item_number.filter(webshop_id=self.webshop_id).exclude(id=self.id).exists():\n raise ValidationError(detail='Item number must be unique in one webshop')\n if qs_name.filter(webshop_id=self.webshop_id).exclude(id=self.id).exists():\n raise ValidationError(detail='Item Name must be unique in one webshop')", "def validation_required(self):\n return self._validation_required", "def unique_together(self, new_value):\n self._unique_together = self._normalize_together(new_value)", "def testRequiredAndRepeated(self):\n def action(field_class):\n field_class(1, required=True)\n field_class(1, repeated=True)\n self.assertRaises(messages.FieldDefinitionError,\n field_class,\n 1,\n required=True,\n repeated=True)\n self.ActionOnAllFieldClasses(action)", "def remove_validation_unique(self):\n fields = {}\n # extract unique validators\n for name, field in self.fields.items():\n fields[name] = []\n assert hasattr(field, 'validators'), \"no validators on {}\".format(field.__class__.__name__)\n for validator in field.validators:\n if isinstance(validator, UniqueValidator):\n fields[name].append(validator)\n for validator in fields[name]:\n field.validators.remove(validator)\n # extract unique_together validators\n fields['_'] = []\n for validator in self.validators:\n if isinstance(validator, UniqueTogetherValidator):\n fields['_'].append(validator)\n for validator in fields['_']:\n self.validators.remove(validator)\n return fields", "def validators_effect_required(self, field, func, *args, **kwargs):\n NOT_EXIST = '_MISSING_'\n original_required = getattr(field, 'required', NOT_EXIST)\n field.required = False\n func(*args, **kwargs)\n after_false = field.required\n field.required = True\n func(*args, **kwargs)\n after_true = field.required\n result = None\n if after_false and after_true:\n result = True\n elif not after_false and not after_true:\n result = False\n elif after_false and not after_true:\n result = 'Flip'\n field.required = original_required\n if original_required == NOT_EXIST:\n del field.required\n return result", "def get_required_fields(self) -> Iterable[fields.Field]:\n for model_field in self.get_fields():\n if model_field.required:\n yield model_field", "def _validate(self, queryset):\n values_distinct = queryset.values(\n *self._invoice_report_common_fields\n ).distinct()\n if values_distinct.count() != 1:\n raise ValidationError(self._get_non_unique_error(queryset))\n if not all(values_distinct[0].values()):\n raise ValidationError(\"None of {} can't be empty\".format(', '.join(\n self._invoice_report_common_fields\n )))", "def clean(self):\n\n self.cleaned_data = super(RoomForm, self).clean()\n\n cleaned_data = self.cleaned_data.copy()\n\n if cleaned_data.get(\"trial\") is None:\n self.cleaned_data[\"trial\"] = False\n else:\n cleaned_data.pop(\"trial\")\n\n if not all(cleaned_data.get(field) for field in cleaned_data.keys()):\n\n raise forms.ValidationError(\n message=\"You must fill in all the required fields!\",\n )", "def make_fields_unique(self, fields):\n ...", "def validate(self, attrs):\n\n unknown = set(self.initial_data) - set(self.fields)\n if unknown:\n raise ValidationError('Unknown field(s): {}'.format('', ''.join(unknown)))\n return attrs", "def clean(self):\n cleaned_data = super().clean()\n if all(val == \"\" for val in cleaned_data.values()):\n raise ValidationError(\"You must fill at least one field!\")\n\n return cleaned_data", "def validate(self, attrs):\n\n errors = {}\n order_obj = Order.objects.get(order_id=attrs['order_id'])\n if order_obj.courier_id.courier_id != attrs['courier_id'].courier_id:\n errors['order_id'] = f'Order with id {order_obj.order_id} is assigned to another courier.'\n unknown = set(self.initial_data) - set(self.fields)\n if unknown:\n errors['Unknown field(s)'] = ''.join(unknown)\n if order_obj.assign_time > attrs['complete_time']:\n errors['complete_time'] = 'complete_time cannot be greater then assign_time.'\n if errors:\n raise ValidationError(errors)\n return attrs", "def _verify_repeated(self):\n if self._repeated and (self._required or self._default is not None):\n raise ValueError(\n \"repeated is incompatible with required or default\"\n )", "def run_validators(self, value):\r\n for validator in self.validators:\r\n if isinstance(validator, validators.UniqueTogetherValidator):\r\n self.validators.remove(validator)\r\n super(ProfileSerializer, self).run_validators(value)", "def test_empty_required_only(self):\n val = DwcaValidator(yaml.load(self.empty4, Loader=yaml.FullLoader),\n error_handler=WhipErrorHandler)\n document = {'required_to_be_empty': ''}\n self.assertTrue(val.validate(document))\n document = {'required_to_be_empty': 'tdwg'}\n self.assertFalse(val.validate(document))\n self.assertEqual(val.errors,\n {'required_to_be_empty': ['unallowed value tdwg']})", "def has_unique_together_changed(old_model_sig, new_model_sig):\n old_meta = old_model_sig['meta']\n new_meta = new_model_sig['meta']\n old_unique_together = old_meta['unique_together']\n new_unique_together = new_meta['unique_together']\n\n return (list(old_unique_together) != list(new_unique_together) or\n ((old_unique_together or new_unique_together) and\n not old_meta.get('__unique_together_applied', False)))", "def check_for_required_fields(cls, fields=[], dataDict={}):\n\n validateRequired = Validate.required(fields=fields, dataDict=dataDict)\n if validateRequired['status'] == False:\n res = jsonify(\n {'status': 400, 'error': validateRequired['message'], 'data': []})\n return abort(make_response(res, 400))\n return True" ]
[ "0.63508886", "0.62670535", "0.6224601", "0.5925875", "0.5883616", "0.5850091", "0.58260864", "0.58111674", "0.576753", "0.5755194", "0.56639725", "0.5650911", "0.564817", "0.5634258", "0.5611223", "0.5602472", "0.5597902", "0.5581856", "0.55527353", "0.55427045", "0.5531815", "0.5503411", "0.54604787", "0.5439105", "0.54022634", "0.53937715", "0.5372529", "0.5345599", "0.533668", "0.53354704" ]
0.6627792
0
Retrieve the destination Group.
def get_destination_group(self): sg = self.source_group dd = self.destination_directory while True: try: matches = dd.groups.search({'name': sg.name}) return matches[0] if len(matches) > 0 else None except StormpathError as err: logger.error('Failed to search for Group: {} in Directory: {} ({})'.format(sg.name.encode('utf-8'), dd.name.encode('utf-8'), err))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def destination_group_id(self):\n return self._destination_group_id", "def copy_group(self):\n dd = self.destination_directory\n sg = self.source_group\n dg = self.destination_group\n\n data = {\n 'description': sg.description,\n 'name': sg.name,\n 'status': sg.status,\n }\n\n # If this Group already exists, we'll just update it.\n if dg:\n for key, value in data.items():\n setattr(dg, key, value)\n\n while True:\n try:\n dg.save()\n return dg\n except StormpathError as err:\n logger.error('Failed to copy Group: {} into Directory: {} ({})'.format(sg.name.encode('utf-8'), dd.name.encode('utf-8'), err))\n\n # If we get here, it means we need to create the Group from scratch.\n while True:\n try:\n return dd.groups.create(data)\n except StormpathError as err:\n logger.error('Failed to copy Group: {} into Directory: {} ({})'.format(sg.name.encode('utf-8'), dd.name.encode('utf-8'), err))", "def get_group(self):\n return self._group", "def get(self):\n self._group = self._client.get(\n url=self._client.get_full_url(\n self.get_path(\n 'single', realm=self._realm_name, group_id=self._group_id\n )\n )\n )\n self._group_id = self._group[\"id\"]\n return self._group", "def get_group(self, wanted_group):\n if self.group_file:\n return self._get_group_from_file(wanted_group)\n return self._get_group_from_host(wanted_group)", "def migrate(self):\n self.destination_group = self.get_destination_group()\n self.destination_group = self.copy_group()\n self.copy_custom_data()\n\n logger.info('Successfully copied Group: {}'.format(self.destination_group.name.encode('utf-8')))\n return self.destination_group", "def get_group(self, group_name):\n\n return self._group[group_name]", "def getGroup(self):\n\t\treturn self.Group", "def _get_target_group(self):\n return self.__target_group", "def _get_target_group(self):\n return self.__target_group", "def _get_target_group(self):\n return self.__target_group", "def _get_target_group(self):\n return self.__target_group", "def _get_target_group(self):\n return self.__target_group", "def _get_target_group(self):\n return self.__target_group", "def _get_group(self):\n if self.resource.group is not None:\n try:\n return grp.getgrnam(self.resource.group).gr_gid\n except KeyError:\n raise error.InvalidGroup()", "def get_group(self, groupId):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/GroupV2/{groupId}/\"))", "def get_group(self) -> Optional[str]:\n return self.group", "def group(self):\n return self.properties.get('Group', None)", "def group(self):\n return self._group", "def group(self):\n return self._group", "def group(self):\n return self._group", "def get_group(self, group_id):\n return self.root.get(group_id)", "def get_group(self):\n\t\treturn self.variables.get('group')", "def target_group(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"target_group\")", "def get_group(self, group_id: str):\n\n return self._get(f\"cloudConnectorGroups/{group_id}\")", "def get_group(group):\n\n return ldapi.lookup(ld, 'cn', group, cfg['ldap_groups_base'])", "def get_group(self, group_id: str) -> dict:\n group = self.ms_client.http_request(method='GET', url_suffix=f'groups/{group_id}')\n return group", "def getGroup(self, group_id: int) -> 'Group':\n return self.sObj.getGroup(group_id)", "def get_address_group(self, group=None):\n return self.__get_addr_grp('address-group', group)", "def get_group(self, group_path=None):\n if group_path is not None:\n path = '/group/' + group_path\n else:\n path = '/group/%2F'\n try:\n response = self.__session.get(self.__api_base_url + path)\n response.raise_for_status()\n response = response.json()\n except (requests.HTTPError, requests.ConnectionError), error:\n raise Exception(error.message)\n\n return response" ]
[ "0.76878965", "0.6967713", "0.6963653", "0.6943798", "0.68907416", "0.6877535", "0.67855513", "0.6750913", "0.66631055", "0.66631055", "0.66631055", "0.66631055", "0.66631055", "0.66631055", "0.6655987", "0.66556984", "0.6610054", "0.6482741", "0.6440536", "0.6440536", "0.6440536", "0.6429192", "0.64258593", "0.63394755", "0.6329552", "0.6313585", "0.62991166", "0.62392616", "0.6184514", "0.6134198" ]
0.84470224
0
Copy the source Group over into the destination Directory.
def copy_group(self): dd = self.destination_directory sg = self.source_group dg = self.destination_group data = { 'description': sg.description, 'name': sg.name, 'status': sg.status, } # If this Group already exists, we'll just update it. if dg: for key, value in data.items(): setattr(dg, key, value) while True: try: dg.save() return dg except StormpathError as err: logger.error('Failed to copy Group: {} into Directory: {} ({})'.format(sg.name.encode('utf-8'), dd.name.encode('utf-8'), err)) # If we get here, it means we need to create the Group from scratch. while True: try: return dd.groups.create(data) except StormpathError as err: logger.error('Failed to copy Group: {} into Directory: {} ({})'.format(sg.name.encode('utf-8'), dd.name.encode('utf-8'), err))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def migrate(self):\n self.destination_group = self.get_destination_group()\n self.destination_group = self.copy_group()\n self.copy_custom_data()\n\n logger.info('Successfully copied Group: {}'.format(self.destination_group.name.encode('utf-8')))\n return self.destination_group", "def copy_to_se(self, src, dst, create_parent_directory=True):\n mgm, dst = self._safe_split_mgm(dst)\n dst = self._join_mgm_lfn(mgm, dst)\n if create_parent_directory:\n parent_directory = osp.dirname(dst)\n self.create_directory(parent_directory)\n logger.warning('Copying {0} to {1}'.format(src, dst))\n cmd = [ 'xrdcp', '-s', src, dst ]\n svj.core.utils.run_command(cmd)", "def copy(self, src, dst, label=None):\n self._tag(dst, label)\n self._mkdir_for(dst)\n shutil.copyfile(self._rootjoin(src), os.path.join(self.chroot, dst))", "def copy_one(self, src, dest):\n if dest.is_dir():\n shutil.rmtree(dest)\n elif dest.exists():\n dest.unlink()\n\n if not dest.parent.exists():\n self.log.debug(f\"creating folder {dest.parent}\")\n dest.parent.mkdir(parents=True)\n\n self.maybe_timestamp(dest.parent)\n\n if src.is_dir():\n shutil.copytree(src, dest)\n else:\n shutil.copy2(src, dest)\n\n self.maybe_timestamp(dest)", "def copy(self, src, dst, label=None):\r\n self._tag(dst, label)\r\n self._mkdir_for(dst)\r\n shutil.copyfile(self._rootjoin(src), os.path.join(self.chroot, dst))", "def copy(self, src, dst, label=None):\r\n self._tag(dst, label)\r\n self._mkdir_for(dst)\r\n shutil.copyfile(self._rootjoin(src), os.path.join(self.chroot, dst))", "def copy(self, src, dest):\n\n src = os.path.join(os.path.dirname(__file__), \"collections\", \"kitchensink\", src)\n dest = os.path.join(self.checkout, dest)\n if os.path.isdir(src):\n shutil.copytree(src, dest)\n else:\n shutil.copy(src, dest)\n return dest", "def copy_one(self, src, dest):\n if self.manager.no_sourcemaps and self.is_ignored_sourcemap(src.name):\n return\n\n if dest.is_dir():\n shutil.rmtree(dest)\n elif dest.exists():\n dest.unlink()\n\n if not dest.parent.exists():\n self.log.debug(f\"creating folder {dest.parent}\")\n dest.parent.mkdir(parents=True)\n\n self.maybe_timestamp(dest.parent)\n\n copytree_kwargs = {}\n\n if self.manager.no_sourcemaps:\n copytree_kwargs[\"ignore\"] = SOURCEMAP_IGNORE_PATTERNS\n\n if src.is_dir():\n shutil.copytree(src, dest, **copytree_kwargs)\n else:\n shutil.copy2(src, dest)\n\n self.maybe_timestamp(dest)", "def test_rsync_set_group(self):\n \n root = tempfile.mkdtemp(prefix=\"rsync_test_set_group_\")\n avail_groups = os.getgroups()\n exp_group = grp.getgrgid(avail_groups[random.randint(1,len(avail_groups))-1])[0]\n \n # Create some files to move\n to_copy = self._create_test_files(root)\n \n # Run rsync\n with open(os.devnull, 'w') as f:\n old_stdout = sys.stdout\n sys.stdout = f\n rsync_files(to_copy,sys.stdout,exp_group,False)\n sys.stdout = old_stdout\n \n # Verify the copy process set the correct group on created directories\n for ddir in set([d[1] for d in to_copy]):\n gid = os.stat(ddir).st_gid\n obs_group = grp.getgrgid(gid)[0]\n self.assertEqual(obs_group,\n exp_group,\n \"Failed to set group '{}' on directory. Group is {}\".format(exp_group,\n obs_group))\n \n # Verify the copy process set the correct group\n for src, ddir, dname in to_copy:\n dfile = os.path.join(ddir,dname)\n gid = os.stat(dfile).st_gid\n obs_group = grp.getgrgid(gid)[0]\n self.assertEqual(obs_group,\n exp_group,\n \"Failed to set group '{}' on file. Group is {}\".format(exp_group,\n obs_group))", "def copy_structure(self, other_directory):\n pass", "def copyGroupFrom(self, groupName, sourceDesign, sourceProject=None, sourceProjectPath=None):\n oName = self.project_name\n if sourceProject == oName or sourceProject is None:\n oSrcProject = self._desktop.GetActiveProject()\n else:\n self._desktop.OpenProject(sourceProjectPath)\n oSrcProject = self._desktop.SetActiveProject(sourceProject)\n\n oDesign = oSrcProject.SetActiveDesign(sourceDesign)\n oEditor = oDesign.SetActiveEditor(\"3D Modeler\")\n oEditor.Copy([\"NAME:Selections\", \"Selections:=\", groupName])\n\n self.modeler.oeditor.Paste()\n self.modeler.primitives.refresh_all_ids()\n self.materials._load_from_project()\n return True", "def copy_dir(src, dst):\n try:\n debug.log(\"copy dir from \"+ src, \"to \"+ dst)\n shutil.copytree(src, dst)\n except Exception as e:\n debug.log(\"Error: happened while copying!\\n%s\\n\"%e)", "def group_image(directory, image, group):\r\n\tif os.path.exists(directory + \"\\\\\" + group):\r\n\t\tpass\r\n\telse:\r\n\t\ttry:\r\n\t\t\tos.mkdir(directory + '\\\\' + group)\r\n\t\t\tprint(\"Successfully created directory\", group)\r\n\t\texcept OSError:\r\n\t\t\tprint(\"Creation of directory failed.\")\r\n\ttry:\r\n\t\tshutil.copy(str(directory + '\\\\' + image), str(directory + \"\\\\\" + group + \"\\\\\" + image))\r\n\texcept OSError as OSe:\r\n\t\tprint(OSe)", "def copy(self, src, dest, recursive=False, update=False):\n self.makedir(posixpath.dirname(dest))\n command = CommandBuilder.copy(src, dest, recursive, update)\n return self.execute_command(command)", "def _internal_copy(source, source_path, target, target_path, maintain_flag):\n if maintain_flag:\n try:\n target.create_group(target_path)\n except ValueError:\n pass # In case the copy_to() function failed previously and the group already exists.\n\n if target_path == \"/\":\n source.copy(target_path, \"/\") if source == target else source.copy(\n target_path, target\n )\n else:\n if maintain_flag:\n if dest_path != \"\":\n source.copy(source_path, target[dest_path])\n else:\n source.copy(source_path, target)\n else:\n group_name_old = source_path.split(\"/\")[-1]\n try:\n target.create_group(\"/tmp\")\n except ValueError:\n pass\n source.copy(source_path, target[\"/tmp\"])\n try:\n target.move(\"/tmp/\" + group_name_old, target_path)\n except ValueError:\n del target[dest_path]\n target.move(\"/tmp/\" + group_name_old, target_path)\n del target[\"/tmp\"]", "def copy(self, destination):\n destination = Path(destination)\n src_base = str(self.directory)\n if self.flatten:\n dst_base = destination\n else:\n dst_base = Path(destination.joinpath(self.directory.stem))\n\n for src in self.locations_to_copy:\n if src.is_dir():\n for dir_path, dir_names, file_names in os.walk(str(src)):\n if self.flatten:\n dst_dir = dst_base\n else:\n dst_dir = Path(dir_path.replace(src_base, str(dst_base)))\n if not dst_dir.exists():\n dst_dir.mkdir(parents=True)\n for file in file_names:\n shutil.copy2(os.path.join(dir_path, file), str(dst_dir))\n else:\n if self.flatten:\n dst_dir = dst_base\n else:\n dst_dir = Path(str(src.parent).replace(src_base, str(dst_base)))\n if not dst_dir.exists():\n dst_dir.mkdir(parents=True)\n shutil.copy2(str(src), str(dst_dir))", "def copy_folder(source, destination):\n\n try:\n shutil.copytree(source, destination)\n except (OSError, IOError):\n return False\n else:\n return True", "def copyDir(srcPath, destPath):\n shutil.copytree(srcPath, destPath)", "def copydir(self, destination, **kwargs):\n assert _os.path.isdir(self.__str__()) == True\n _shutil.copy(self.__str__(), destination, **kwargs)", "def copy(source, destination):\n if os.path.isdir(source):\n return __copytree(source, destination)\n else:\n return __copyfile2(source, destination)", "def copy_custom_data(self):\n sg = self.source_group\n dg = self.destination_group\n dd = self.destination_directory\n\n for key, value in sanitize(sg.custom_data).items():\n dg.custom_data[key] = value\n\n try:\n dg.custom_data.save()\n return dg.custom_data\n except StormpathError as err:\n logger.error('Failed to copy CustomData for Group: {} in Directory: {} ({})'.format(sg.name.encode('utf-8'), dd.name.encode('utf-8'), err))", "def _copy_dir(src, dst):\n if os.path.isdir(src):\n os.makedirs(dst, exist_ok=True)\n for item in os.listdir(src):\n s = os.path.join(src, item)\n d = os.path.join(dst, item)\n\n if os.path.isdir(s):\n _copy_dir(s, d)\n else:\n shutil.copy2(s, d)\n\n else:\n os.makedirs(os.path.dirname(dst), exist_ok=True)\n _delete_file(dst)\n shutil.copy2(src, dst)", "def cpr(src, dst):\n shutil.copytree(src, dst)", "def copy(source, destination):\r\n\r\n source_ = os.path.abspath(os.path.expanduser(source))\r\n destination_ = os.path.abspath(os.path.expanduser(destination))\r\n\r\n if not os.path.exists(destination_) and not os.path.isfile(source_):\r\n os.makedirs(destination_)\r\n\r\n def recurse(source, destination):\r\n for entry in os.listdir(source):\r\n entry_path = os.path.join(source, entry)\r\n if os.path.isdir(entry_path):\r\n entry_dest = os.path.join(destination, entry)\r\n if os.path.exists(entry_dest):\r\n if not os.path.isdir(entry_dest):\r\n raise IOError('Failed to copy {0} a directory.'\r\n .format(entry_dest))\r\n recurse(entry_path, entry_dest)\r\n else:\r\n shutil.copytree(entry_path, entry_dest)\r\n else:\r\n shutil.copy2(entry_path, destination)\r\n\r\n\r\n if os.path.isdir(source_):\r\n recurse(source_, destination_)\r\n\r\n elif os.path.isfile(source_):\r\n dest_dir = os.path.dirname(destination_)\r\n if not os.path.exists(dest_dir):\r\n os.makedirs(dest_dir)\r\n shutil.copy2(source_, destination_)\r\n logger.info('copying %s to %s' % (source_, destination_))\r\n else:\r\n logger.warning('skipped copy %s to %s' % (source_, destination_))", "def execute(self, context):\n\n # go to subtree, select all except input and output groups and mark nodes to be copied\n group_node = context.active_node\n sub_tree = group_node.group_tree\n\n if len(self.conflicts) > 0:\n self._resolve_conflicts(sub_tree, group_node.get_tree())\n\n bpy.ops.arm.edit_group_tree(node_index=group_node.get_id_str())\n [setattr(n, 'select', False) for n in sub_tree.nodes]\n group_nodes_filter = filter(lambda n: n.bl_idname not in {'LNGroupInputsNode', 'LNGroupOutputsNode'}, sub_tree.nodes)\n for node in group_nodes_filter:\n node.select = True\n node['sub_node_name'] = node.name # this will be copied within the nodes\n\n # the attribute should be empty in destination tree\n tree = context.space_data.path[-2].node_tree\n for node in tree.nodes:\n if 'sub_node_name' in node:\n del node['sub_node_name']\n\n # Frames can't be just copied because they do not have absolute location, but they can be recreated\n frame_names = {n.name for n in sub_tree.nodes if n.select and n.bl_idname == 'NodeFrame'}\n [setattr(n, 'select', False) for n in sub_tree.nodes if n.bl_idname == 'NodeFrame']\n\n if any(n for n in sub_tree.nodes if n.select): # if no selection copy operator will raise error\n # copy and past nodes into group tree\n bpy.ops.node.clipboard_copy()\n context.space_data.path.pop()\n bpy.ops.node.clipboard_paste() # this will deselect all and select only pasted nodes\n\n # move nodes in group node center\n tree_select_nodes = [n for n in tree.nodes if n.select]\n center = reduce(lambda v1, v2: v1 + v2,\n [Vector(ArmLogicTreeNode.absolute_location(n)) for n in tree_select_nodes]) / len(tree_select_nodes)\n [setattr(n, 'location', n.location - (center - group_node.location)) for n in tree_select_nodes]\n\n # recreate frames\n node_name_mapping = {n['sub_node_name']: n.name for n in tree.nodes if 'sub_node_name' in n}\n ArmAddGroupTreeFromSelected.recreate_frames(sub_tree, tree, frame_names, node_name_mapping)\n else:\n context.space_data.path.pop() # should exit from subtree anywhere\n\n # delete group node\n tree.nodes.remove(group_node)\n for node in tree.nodes:\n if 'sub_node_name' in node:\n del node['sub_node_name']\n\n tree.update()\n\n return {'FINISHED'}", "def copydir(self):\n pass", "def backup_directory(self, source_directory, destination_directory):\n pass", "def copytree(src, dest):\n shutil.copytree(src, dest)\n restorecon(dest, recursive=True)", "def copy_dir(src: Text, dst: Text) -> None:\n\n if tf.io.gfile.exists(dst):\n tf.io.gfile.rmtree(dst)\n tf.io.gfile.makedirs(dst)\n\n for dir_name, sub_dirs, leaf_files in tf.io.gfile.walk(src):\n for leaf_file in leaf_files:\n leaf_file_path = os.path.join(dir_name, leaf_file)\n new_file_path = os.path.join(dir_name.replace(src, dst, 1), leaf_file)\n tf.io.gfile.copy(leaf_file_path, new_file_path)\n\n for sub_dir in sub_dirs:\n tf.io.gfile.makedirs(os.path.join(dir_name.replace(src, dst, 1), sub_dir))", "def cp(self, src, dest):\r\n return self._call(\"-cp\", src, dest, suppress_output=True)" ]
[ "0.73172754", "0.67645866", "0.6629909", "0.6627596", "0.66050786", "0.66050786", "0.6604063", "0.6591996", "0.64791036", "0.6458609", "0.6448786", "0.6380771", "0.6378119", "0.63748825", "0.63348556", "0.6277222", "0.6262582", "0.6262367", "0.6216219", "0.61892563", "0.6181397", "0.6177455", "0.61645585", "0.61457515", "0.61413115", "0.611612", "0.6091829", "0.60893095", "0.6027161", "0.602191" ]
0.8197669
0
Copy CustomData to the destination Group.
def copy_custom_data(self): sg = self.source_group dg = self.destination_group dd = self.destination_directory for key, value in sanitize(sg.custom_data).items(): dg.custom_data[key] = value try: dg.custom_data.save() return dg.custom_data except StormpathError as err: logger.error('Failed to copy CustomData for Group: {} in Directory: {} ({})'.format(sg.name.encode('utf-8'), dd.name.encode('utf-8'), err))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def migrate(self):\n self.destination_group = self.get_destination_group()\n self.destination_group = self.copy_group()\n self.copy_custom_data()\n\n logger.info('Successfully copied Group: {}'.format(self.destination_group.name.encode('utf-8')))\n return self.destination_group", "def copy_group(self):\n dd = self.destination_directory\n sg = self.source_group\n dg = self.destination_group\n\n data = {\n 'description': sg.description,\n 'name': sg.name,\n 'status': sg.status,\n }\n\n # If this Group already exists, we'll just update it.\n if dg:\n for key, value in data.items():\n setattr(dg, key, value)\n\n while True:\n try:\n dg.save()\n return dg\n except StormpathError as err:\n logger.error('Failed to copy Group: {} into Directory: {} ({})'.format(sg.name.encode('utf-8'), dd.name.encode('utf-8'), err))\n\n # If we get here, it means we need to create the Group from scratch.\n while True:\n try:\n return dd.groups.create(data)\n except StormpathError as err:\n logger.error('Failed to copy Group: {} into Directory: {} ({})'.format(sg.name.encode('utf-8'), dd.name.encode('utf-8'), err))", "def copy_to(raw_data, obj):\n\n shutil.copyfileobj(raw_data, obj)", "def CopyData(self, p_int, vtkDataSetAttributes, p_int_1, vtkDataSetAttributes_1, p_int_2):\n ...", "def _copy_data_from(self, original):\n raise NotImplementedError()", "def save_object(self, data):\n return GroupAttribute(**data)", "def _copy_visitor(path, source, destination, labels):\n\n # Skip paths corresponding to excluded labels\n if path.split('/')[0] in labels:\n return\n\n # Copy everything else\n source_obj = source[path]\n if isinstance(source_obj, h5py.Group):\n dest_obj = destination.create_group(path)\n else:\n ds = source_obj\n dest_obj = destination.create_dataset(\n path,\n data=source_obj[()],\n chunks=ds.chunks,\n maxshape=ds.maxshape,\n compression=ds.compression,\n compression_opts=ds.compression_opts,\n scaleoffset=ds.scaleoffset,\n shuffle=ds.shuffle,\n fletcher32=ds.fletcher32,\n fillvalue=ds.fillvalue,\n )\n\n dest_obj.attrs.update(source_obj.attrs)", "def copy(self, data):\n required = {'token', 'container_id',\n \"container_path\", \"host_path\",\n \"host_to_container\"}\n api.validate(data, required)\n token = data['token']\n container_id = data['container_id']\n container_path = data[\"container_path\"]\n host_path = data[\"host_path\"]\n host_to_container = data[\"host_to_container\"]\n self.credentials_module.authorize_container(token,\n container_id)\n user_info = self.credentials_module.authorize_directory(\n token, host_path\n )\n uid = user_info[\"uid\"]\n gid = user_info[\"gid\"]\n if host_to_container:\n # TODO(jorgesece): compress to tar before send\n results = self.docker_module.copy_to_container(container_id,\n container_path,\n host_path,\n )\n else:\n # TODO(jorgesece): uncompress\n results = self.docker_module.copy_from_container(container_id,\n container_path,\n host_path,\n uid,\n gid\n )\n return results", "def CopyTo(self, *args, **kwargs):\n pass", "def with_transforms(self: TDataWTransform, group_name: str) -> TDataWTransform:\n datacopy = self._shallow_clone_dataset()\n datacopy._frozen_transform_groups.with_transform(group_name)\n datacopy._transform_groups.with_transform(group_name)\n return datacopy", "def with_transforms(self: TAvalancheDataset, group_name: str) -> TAvalancheDataset:\n datacopy = self._shallow_clone_dataset()\n datacopy._flat_data = datacopy._flat_data.with_transforms(group_name)\n return datacopy", "def execute(self, context):\n\n # go to subtree, select all except input and output groups and mark nodes to be copied\n group_node = context.active_node\n sub_tree = group_node.group_tree\n\n if len(self.conflicts) > 0:\n self._resolve_conflicts(sub_tree, group_node.get_tree())\n\n bpy.ops.arm.edit_group_tree(node_index=group_node.get_id_str())\n [setattr(n, 'select', False) for n in sub_tree.nodes]\n group_nodes_filter = filter(lambda n: n.bl_idname not in {'LNGroupInputsNode', 'LNGroupOutputsNode'}, sub_tree.nodes)\n for node in group_nodes_filter:\n node.select = True\n node['sub_node_name'] = node.name # this will be copied within the nodes\n\n # the attribute should be empty in destination tree\n tree = context.space_data.path[-2].node_tree\n for node in tree.nodes:\n if 'sub_node_name' in node:\n del node['sub_node_name']\n\n # Frames can't be just copied because they do not have absolute location, but they can be recreated\n frame_names = {n.name for n in sub_tree.nodes if n.select and n.bl_idname == 'NodeFrame'}\n [setattr(n, 'select', False) for n in sub_tree.nodes if n.bl_idname == 'NodeFrame']\n\n if any(n for n in sub_tree.nodes if n.select): # if no selection copy operator will raise error\n # copy and past nodes into group tree\n bpy.ops.node.clipboard_copy()\n context.space_data.path.pop()\n bpy.ops.node.clipboard_paste() # this will deselect all and select only pasted nodes\n\n # move nodes in group node center\n tree_select_nodes = [n for n in tree.nodes if n.select]\n center = reduce(lambda v1, v2: v1 + v2,\n [Vector(ArmLogicTreeNode.absolute_location(n)) for n in tree_select_nodes]) / len(tree_select_nodes)\n [setattr(n, 'location', n.location - (center - group_node.location)) for n in tree_select_nodes]\n\n # recreate frames\n node_name_mapping = {n['sub_node_name']: n.name for n in tree.nodes if 'sub_node_name' in n}\n ArmAddGroupTreeFromSelected.recreate_frames(sub_tree, tree, frame_names, node_name_mapping)\n else:\n context.space_data.path.pop() # should exit from subtree anywhere\n\n # delete group node\n tree.nodes.remove(group_node)\n for node in tree.nodes:\n if 'sub_node_name' in node:\n del node['sub_node_name']\n\n tree.update()\n\n return {'FINISHED'}", "def run(self, data, config=None, pipeline=None):\n data[self.dst] = self.copy_fn(data[self.src])\n return data", "def test_6c_copy_data_btw_containers(self):\n if (not GST.logged_in) or (not GST.data_testing_swift_mounted):\n raise unittest.SkipTest(\"Skipped for failed login or failed mounting container.\")\n elif (GST.default_folder_to_be_used):\n if not (default_folders_exists):\n raise unittest.SkipTest(\"Skipped for failed to prepare default directories\")\n elif (not GST.dir1_exists):\n raise unittest.SkipTest(\"Skipped for failed to prepare dir1\")\n elif not GST.copying_data_test_ready:\n raise unittest.SkipTest(\"Skipped for failed to prepare copying data tests.\")\n self.dismiss_dialogs()\n function = js_func[\"copy_file\"] % (GST.gs_file_paths[\"copy_to_container_target_path\"], GST.gs_file_paths[\"file_to_copy_source_path\"])\n try:\n self.send_request(function, \"copy_file()\")\n except Exception as e:\n raise CopyException(\"Failed to copy the file between containers. \\n\" + e.__str__())\n try:\n response = self.get_response()\n assert \"Success\" in response\n self.refresh_page()\n except AssertionError:\n raise CopyException(\"Failed to copy the file between containers. \\n\" + response)", "def preprocess(self, data_group):\n\n input_data = data_group.preprocessed_case\n\n # Split Channels\n if self.channels is None:\n channel_subset = np.copy(input_data)\n else:\n all_channels = set(range(input_data.shape[-1]))\n remaining_channels = list(all_channels.difference(set(self.channels)))\n reminaing_channel_subset = np.take(input_data, remaining_channels, axis=-1)\n channel_subset = np.take(input_data, self.channels, axis=-1)\n\n # Merge Target Channels\n if self.merge_method == 'maximum':\n channel_subset = np.max(channel_subset, axis=-1)[..., np.newaxis]\n\n # Join Channels\n if self.channels is None:\n output_data = channel_subset\n else:\n output_data = np.concatenate((reminaing_channel_subset, channel_subset), axis=-1)\n\n data_group.preprocessed_case = output_data\n self.output_data = output_data", "def copy(cls, group: 'Group') -> 'Group':\n from apps.enrollment.courses.models.term import Term\n from apps.schedulersync.models import TermSyncData\n\n def copy_term(t: Term) -> Term:\n classrooms = list(t.classrooms.all())\n term_sync_data: TermSyncData = list(t.termsyncdata_set.all())\n t.pk = None\n t.save()\n t.classrooms.set(classrooms)\n for tsd in term_sync_data:\n tsd.pk = None\n tsd.term = t\n tsd.save()\n return t\n\n copied_terms = [copy_term(t) for t in group.term.all()]\n copy = cls.objects.get(pk=group.pk)\n copy.pk = None\n copy.save()\n copy.term.set(copied_terms)\n return copy", "def copyGroupFrom(self, groupName, sourceDesign, sourceProject=None, sourceProjectPath=None):\n oName = self.project_name\n if sourceProject == oName or sourceProject is None:\n oSrcProject = self._desktop.GetActiveProject()\n else:\n self._desktop.OpenProject(sourceProjectPath)\n oSrcProject = self._desktop.SetActiveProject(sourceProject)\n\n oDesign = oSrcProject.SetActiveDesign(sourceDesign)\n oEditor = oDesign.SetActiveEditor(\"3D Modeler\")\n oEditor.Copy([\"NAME:Selections\", \"Selections:=\", groupName])\n\n self.modeler.oeditor.Paste()\n self.modeler.primitives.refresh_all_ids()\n self.materials._load_from_project()\n return True", "def CopyAllSubElementsTo(self, other_group, ignore):\n # pylint: disable=protected-access\n collections_to_update = [\n (self._groups_to_load, other_group._groups_to_load),\n (self._commands_to_load, other_group._commands_to_load)]\n\n for src, dst in collections_to_update:\n for name, info in src.iteritems():\n if name in ignore:\n continue\n (module_dir, module_path, name, unused_track) = info\n dst[name] = (module_dir, module_path, name,\n other_group.ReleaseTrack())", "def copy(self, copy_meta_data=False):\n if self.meta_data is not None:\n if copy_meta_data:\n new_meta_data = (self.meta_data[0].copy(),\n self.meta_data[1].copy())\n else:\n new_meta_data = self.meta_data\n else:\n new_meta_data = None\n return xndarray(self.data.copy(), self.axes_names[:],\n self.axes_domains.copy(),\n self.value_label, new_meta_data)", "def copy (a_data) :\n return a_data.copy()", "def preprocess(self, data_group):\n\n input_data = data_group.preprocessed_case\n output_shape = list(input_data.shape)\n output_shape[-1] = len(self.label_splits)\n output_data = np.zeros(output_shape)\n\n # Merge Target Channels\n if self.split_method == 'integer_levels':\n for label_idx, label in enumerate(self.label_splits):\n if type(label) is list:\n # This is a little clunky\n single_label_data = np.zeros(output_shape[0:-1])[..., np.newaxis]\n for index in label:\n single_label_data += np.where(input_data == index, 1, 0)\n single_label_data = np.where(single_label_data > 0, 1, 0)\n else:\n single_label_data = np.where(input_data == label, 1, 0)\n\n output_data[..., label_idx] = single_label_data[..., 0]\n\n data_group.preprocessed_case = output_data\n self.output_data = output_data", "def copyDataFrom (self, other):\n\n self.outErrorPackets=other.outErrorPackets\n self._myHasOutErrorPackets=other._myHasOutErrorPackets\n \n self.inErrorPackets=other.inErrorPackets\n self._myHasInErrorPackets=other._myHasInErrorPackets\n \n self.inDiscardPackets=other.inDiscardPackets\n self._myHasInDiscardPackets=other._myHasInDiscardPackets\n \n self.outUnicastPackets=other.outUnicastPackets\n self._myHasOutUnicastPackets=other._myHasOutUnicastPackets\n \n self.inMulticastPackets=other.inMulticastPackets\n self._myHasInMulticastPackets=other._myHasInMulticastPackets\n \n self.outBroadcastPackets=other.outBroadcastPackets\n self._myHasOutBroadcastPackets=other._myHasOutBroadcastPackets\n \n self.inBroadcastPackets=other.inBroadcastPackets\n self._myHasInBroadcastPackets=other._myHasInBroadcastPackets\n \n self.outMulticastPackets=other.outMulticastPackets\n self._myHasOutMulticastPackets=other._myHasOutMulticastPackets\n \n self.inUnknownProtocolPackets=other.inUnknownProtocolPackets\n self._myHasInUnknownProtocolPackets=other._myHasInUnknownProtocolPackets\n \n self.outDiscardPackets=other.outDiscardPackets\n self._myHasOutDiscardPackets=other._myHasOutDiscardPackets\n \n self.inUnicastPackets=other.inUnicastPackets\n self._myHasInUnicastPackets=other._myHasInUnicastPackets\n \n self.outOctets=other.outOctets\n self._myHasOutOctets=other._myHasOutOctets\n \n self.inOctets=other.inOctets\n self._myHasInOctets=other._myHasInOctets", "def copy(self):\n new_data_collection = DataCollection()\n for item in self.iteritems():\n new_data_collection.add_data(item)\n return new_data_collection", "def copyItem(self):\n # extract all selected item\n itms = []\n for item in self.scene.selectedItems():\n if isinstance(item, DiagramItem):\n itms.append(item.data)\n\n # pickle data\n mime = QMimeData()\n mime.setData( self.__mime__ , QByteArray(pickle.dumps(itms)) )\n\n # copy to clipboard\n QApplication.clipboard().setMimeData(mime,QClipboard.Clipboard)\n self.pasteAction.setEnabled(True)", "def cp(self, copy_from, copy_to, **kwargs):\n return self.exec_command('cp %s %s' % (copy_from, copy_to), **kwargs)", "def copy(self, event):\n return", "def copy_item(self, origin_item_id, target_app_id, field_conversor, extra_data = None, silent=False, hook=True):\n source_item = self.get_item(origin_item_id, external_id=False)\n if extra_data is None:\n destination_dict = {}\n else:\n destination_dict = extra_data\n try:\n for origin, destination in field_conversor:\n try:\n origin = int(origin)\n source_field = source_item[\"values\"][origin]\n except ValueError:\n related_id, field_id = origin.split('#')\n source_field = source_item[\"values\"][int(related_id)][\"value\"][\"values\"][int(field_id)]\n if source_field['type'] == \"image\":\n new_value = []\n for value in source_field['value']:\n new_value.append(self.copy_file(value[1])['file_id'])\n else:\n new_value = source_field['value']\n destination_dict[destination] = new_value\n except KeyError as e:\n self.comment(\n 'item',\n origin_item_id,\n {'value': 'Ha habido un error con la llave %s (IM lo sabe interpretar :) ) pero probablemente no están todos los campos que pide la aplicación nacional y por eso no se pudo crear.' % str(e)}\n )\n return 'Key Error: ' + str(e)\n new_item = self.create_item({\"fields\":destination_dict}, app_id = target_app_id)\n self.comment(\n 'item',\n origin_item_id,\n {'value': 'Se ha copiado el EP al espacio nuevo de PODIO exitosamente en la direccion %s' % new_item['link']}\n )\n return new_item\n #make new item\n #return return code", "def copy_with(\n self,\n *,\n bitrate = ...,\n name = ...,\n parent_id = ...,\n permission_overwrites = ...,\n position = ...,\n region = ...,\n user_limit = ...,\n ):\n # bitrate\n if bitrate is ...:\n bitrate = self.bitrate\n else:\n bitrate = validate_bitrate(bitrate)\n \n # region\n if region is ...:\n region = self.region\n else:\n region = validate_region(region)\n \n # user_limit\n if user_limit is ...:\n user_limit = self.user_limit\n else:\n user_limit = validate_user_limit(user_limit)\n \n # Construct\n new = ChannelMetadataGuildMainBase.copy_with(\n self,\n name = name,\n permission_overwrites = permission_overwrites,\n parent_id = parent_id,\n position = position,\n )\n new.bitrate = bitrate\n new.region = region\n new.user_limit = user_limit\n return new", "def _shallow_clone_dataset(self: TDataWTransform) -> TDataWTransform:\n dataset_copy = copy.copy(self)\n dataset_copy._transform_groups = copy.copy(dataset_copy._transform_groups)\n dataset_copy._frozen_transform_groups = copy.copy(\n dataset_copy._frozen_transform_groups\n )\n return dataset_copy", "def copy(self, src, dest):\n\n src = os.path.join(os.path.dirname(__file__), \"collections\", \"kitchensink\", src)\n dest = os.path.join(self.checkout, dest)\n if os.path.isdir(src):\n shutil.copytree(src, dest)\n else:\n shutil.copy(src, dest)\n return dest" ]
[ "0.7007891", "0.57221156", "0.54404587", "0.54385394", "0.5380675", "0.52697796", "0.51893926", "0.5166701", "0.516446", "0.513698", "0.51313716", "0.5121373", "0.50766444", "0.5023055", "0.5015248", "0.49674436", "0.4964484", "0.49641585", "0.4957558", "0.49224517", "0.49059352", "0.48993322", "0.48830914", "0.48807132", "0.4849667", "0.48391968", "0.48083588", "0.47845468", "0.47665167", "0.47636878" ]
0.804995
0
Migrates one Group to another Tenant =) Won't stop until the migration is complete.
def migrate(self): self.destination_group = self.get_destination_group() self.destination_group = self.copy_group() self.copy_custom_data() logger.info('Successfully copied Group: {}'.format(self.destination_group.name.encode('utf-8'))) return self.destination_group
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run_migration(self):\n step = \"Migrating Database\"\n try:\n self.slacker.send_thread_reply(step)\n self.kuber.run_migration(tag=self.tag, source=config.APP_MIGRATOR_SOURCE)\n self.migration_completed = True\n except Exception as e:\n self.raise_step_error(step=step, error=e)", "def create_organizer_group_migrations_wrapped(apps, schema_editor):\n # Group = apps.get_model(\"auth\", \"Group\")\n # Permission = apps.get_model(\"auth\", \"Permission\")\n create_organizer_group()", "def testMigrateAnInstanceInAnInstanceGroup(self):\n ### create test resources\n instance_name_1 = 'end-to-end-test-instance-1'\n instance_selfLink_1 = \\\n self.test_resource_creator.create_instance_using_template(\n instance_name_1,\n self.test_resource_creator.legacy_instance_template_selfLink)[\n 'targetLink']\n original_config = self.google_api_interface.get_instance_configs(\n instance_name_1)\n unmanaged_instance_group_name = 'end-to-end-test-unmanaged-instance-group-1'\n self.test_resource_creator.create_unmanaged_instance_group(\n unmanaged_instance_group_name,\n [instance_name_1])\n ### start migration\n selfLink_executor = SelfLinkExecutor(self.compute,\n instance_selfLink_1,\n self.test_resource_creator.network_name,\n self.test_resource_creator.subnetwork_name,\n False)\n\n with self.assertRaises(AmbiguousTargetResource):\n migration_handler = selfLink_executor.build_migration_handler()\n migration_handler.network_migration()\n ### check migration result\n # the migration never starts, so the config didn't change\n new_config = self.google_api_interface.get_instance_configs(\n instance_name_1)\n self.assertEqual(new_config, original_config)\n print('Pass the current test')", "def update_user_backward(apps, schema_editor):\n Group.objects.all().delete()", "def __upgrade_group(self, group_old: Group, group_new: str) -> None:\n def upgrade_permissions(permissions_list_1: list, permissions_list_2: list, action) -> list:\n permissions_to_change = [\n permission_change\n for permission_change in permissions_list_1\n if permission_change not in permissions_list_2\n ]\n return self.__upgrade_group_permissions(group_old, permissions_to_change, action)\n\n messages = [f'Group {group_new} permission changes']\n\n permissions_from_db = [p.codename for p in group_old.permissions.all()]\n permissions_from_file = main_app_groups[group_new]\n\n # in db but not in file -> remove\n messages += upgrade_permissions(permissions_from_db, permissions_from_file, REMOVE)\n # in file but not in db -> add\n messages += upgrade_permissions(permissions_from_file, permissions_from_db, ADD)\n\n if len(messages) > 1:\n self.__print_messages(messages)", "def migrate(self):\n\tpass", "def migration():", "def do_push_group(dbsync, group):\n pass", "def sync_to_group(self):\n \n try:\n self.connection.System.Session.set_active_folder(\"/Common\")\n for failover_group in self.connection.Management.DeviceGroup.get_list():\n if 'device-group-failover' in failover_group:\n device_group = failover_group.replace(\"/Common/\", '')\n self.connection.System.ConfigSync.synchronize_to_group_v2(\n group=device_group,device='',force=False\n )\n return \"Cluster Sync Complete\"\n except Exception as e:\n \n raise Exception(\"###############\\n Cluster Sync Failure\\n {} \\n ###############\\n\".format(e))", "def test_05_self_can_downgrade_group(self):\n meowers = self.meowers\n cat = self.cat\n dog = self.dog\n cat.uaccess.share_group_with_user(meowers, dog, PrivilegeCodes.CHANGE)\n self.assertTrue(dog in meowers.gaccess.edit_users)\n self.assertTrue(dog in meowers.gaccess.members)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_group_unshare_users(meowers)))\n dog.uaccess.share_group_with_user(meowers, dog, PrivilegeCodes.VIEW)\n self.assertFalse(dog in meowers.gaccess.edit_users)\n self.assertTrue(dog in meowers.gaccess.members)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_group_unshare_users(meowers)))", "def test_serviceRunsMigrations(self):\n m1 = TestMigration(store=self.store)\n m2 = TestMigration(store=self.store)\n self.store.powerUp(m1)\n self.store.powerUp(m2)\n self.assertEquals(m1.ran, 0)\n self.assertEquals(m2.ran, 0)\n self.manager.startService()\n self.assertEquals(m1.ran, 1)\n self.assertEquals(m2.ran, 1)", "def test_success(self, clean_mongo_tenant_migration):\n tenantadm_cli = cli.CliTenantadm()\n self.logger.info(\"Starting `test_success`\")\n\n uuidv4 = str(uuid.uuid4())\n name, username, password = (\n \"test.mender.io-\" + uuidv4,\n \"some.user+\" + uuidv4 + \"@example.com\",\n \"secretsecret\",\n )\n tenant_id = tenantadm_cli.create_org(\n name=name, username=username, password=password\n )\n self.logger.debug(\"Tenant id: %s\" % tenant_id)\n\n # Retry login every second for 3 min\n for i in range(60 * 3):\n rsp = self.api_mgmt_useradm.call(\n \"POST\", api.useradm.URL_LOGIN, auth=(username, password)\n )\n if rsp.status_code == 200:\n break\n time.sleep(1)\n\n assert rsp.status_code == 200\n\n self.logger.info(\"`test_success` finished successfully.\")", "def test12(self):\n ###get a dataset to migrate from global dbs\n dest_datasets = set((dataset['dataset'] for dataset in self.api.listDatasets()))\n ###only dataset after last DBS2->3 because of the parentage issue in DBS 2 min_cdate=1368162000 =10May2013\n src_datasets = set((dataset['dataset'] for dataset in self.cmsweb_api.listDatasets(min_cdate=1368162000)))\n dataset_to_migrate = choice(list(src_datasets.difference(dest_datasets)))\n\n ###submit migration request\n toMigrate = {'migration_url': self.source_url,\n 'migration_input': dataset_to_migrate}\n migration_request = self.migration_api.submitMigration(toMigrate)\n self.assertTrue('migration_request_id' in migration_request['migration_details'])\n migration_request_id = migration_request['migration_details']['migration_request_id']\n print(\"____toMigrate___\")\n print(toMigrate)\n print(\"----------migration_request -----------\")\n print(migration_request)\n ###check migration status for max. 300s (should be enough time to migrate the dataset)\n with Timeout(300):\n while True:\n request_status = self.migration_api.statusMigration(migration_rqst_id=migration_request_id)\n if request_status[0]['migration_status'] == 2:\n break\n\n ###validate dataset migration\n def check(input, output):\n non_comparable_keys = ('block_id', 'dataset_id', 'last_modification_date',\n 'parent_file_id', 'primary_ds_id')\n if isinstance(input, dict):\n for key, value in input.items():\n if key in non_comparable_keys:\n continue ###do not compare id's\n if key in ('processing_era',): ###do compare create_by, creation_date for re-used entries\n for key2remove in ('create_by', 'creation_date',):\n try:\n del input[key][key2remove]\n del output[key][key2remove]\n except KeyError:\n pass\n self.assertTrue(key in output)\n check(value, output[key])\n elif isinstance(input, list):\n for element_in, element_out in zip(sorted(remove_non_comparable_keys(input, non_comparable_keys)),\n sorted(remove_non_comparable_keys(output, non_comparable_keys))):\n check(element_in, element_out)\n else:\n self.assertEqual(str(input), str(output))\n\n for block_name in (block['block_name'] for block in self.cmsweb_api.listBlocks(dataset=dataset_to_migrate)):\n block_dump_src = self.cmsweb_api.blockDump(block_name=block_name)\n block_dump_dest = self.api.blockDump(block_name=block_name)\n check(block_dump_src, block_dump_dest)\n\n ###try to delete successfully executed migration request\n toDelete = {'migration_rqst_id': migration_request_id}\n self.assertRaises(HTTPError, self.migration_api.removeMigration, toDelete)", "def test_break_security_group_rollback_previous():", "def migrate_to_dest(self):\n self.executor.loader.build_graph()\n self.executor.migrate(self.migrate_to)", "def update_user_forward(apps, schema_editor):\n group = Group.objects.update_or_create(\n id=1,\n name=\"Administrator\"\n )\n Group.objects.update_or_create(\n id=2,\n name=\"Manager\"\n )\n Group.objects.update_or_create(\n id=3,\n name=\"Leader\"\n )\n Group.objects.update_or_create(\n id=4,\n name=\"Sale\"\n )", "def test_duplicate_organization(self, clean_mongo_tenant_migration):\n self.logger.debug(\"Starting `test_duplicate_username`\")\n tenantadm_cli = cli.CliTenantadm()\n\n uuidv4 = str(uuid.uuid4())\n name, username, password = (\n \"test.mender.io-\" + uuidv4,\n \"some.user+\" + uuidv4 + \"@example.com\",\n \"secretsecret\",\n )\n tenant_id = tenantadm_cli.create_org(\n name=name, username=username, password=password\n )\n self.logger.debug(\"Tenant id: %s\" % tenant_id)\n\n # Retry login every second for 2 min\n for _ in retrier(attempts=120, sleepscale=1, sleeptime=1):\n rsp = self.api_mgmt_useradm.call(\n \"POST\", api.useradm.URL_LOGIN, auth=(username, password)\n )\n if rsp.status_code == 200:\n self.logger.debug(\"Successfully logged into account\")\n break\n\n assert rsp.status_code == 200\n\n name, username, password = (\n \"test.mender.io-\" + uuidv4,\n \"some.other.user+\" + uuidv4 + \"@example.com\",\n \"secretsecret\",\n )\n tenant_id = tenantadm_cli.create_org(\n name=name, username=username, password=password\n )\n self.logger.debug(\"Tenant id: %s\" % tenant_id)\n\n # Retry login every second for 2 min\n for _ in retrier(attempts=120, sleepscale=1, sleeptime=1):\n rsp = self.api_mgmt_useradm.call(\n \"POST\", api.useradm.URL_LOGIN, auth=(username, password)\n )\n if rsp.status_code == 200:\n break\n assert rsp.status_code == 200\n\n self.logger.info(\"`test_duplicate_username` finished successfully.\")", "def test_migration_task_rollback(self):\n server, source_host, target_host = self._create_server()\n self._disable_target_host(target_host)\n self._stub_delete_server_during_scheduling(server)\n\n # Now start the cold migration which will fail due to NoValidHost.\n self.api.post_server_action(server['id'], {'migrate': None},\n check_response_status=[202])\n # We cannot monitor the migration from the API since it is deleted\n # when the instance is deleted so just wait for the failed instance\n # action event after the task rollback happens.\n # Note that we get InstanceNotFound rather than NoValidHost because\n # the NoValidHost handler in ComputeTaskManager._cold_migrate calls\n # _set_vm_state_and_notify which raises InstanceNotFound and masks\n # the NoValidHost error.\n self._assert_resize_migrate_action_fail(\n server, instance_actions.MIGRATE, 'InstanceNotFound')\n self._assert_no_allocations(server)", "def upgrade():\n try:\n op.drop_table(\"ggrc_gdrive_integration_alembic_version\")\n except sa.exc.OperationalError as e:\n code, _ = e.orig.args\n if code == 1051: # doesn't exist\n # we're in a new DB with no trace of the removed chain\n pass\n else:\n raise\n\n # The following duplicates a part of a gdrive-related migration,\n # since a bunch of old migrations in ggrc refer to meetings table.\n # This part is relevant only for db_reset (new databases), so we\n # shouldn't recreate this table in downgrade.\n try:\n op.drop_table(\"meetings\")\n except sa.exc.OperationalError as e:\n code, _ = e.orig.args\n if code == 1051: # doesn't exist\n # we're in an old DB where meetings has been dropped in the removed chain\n pass\n else:\n raise", "def migrate():\n if apply_migrations():\n click.echo(OK)\n else:\n sys.exit(1)", "def test_deploy_to_group(self, clean_mongo, test_case):\n self.logger.info(\"RUN: %s\", test_case[\"name\"])\n\n uuidv4 = str(uuid.uuid4())\n tenant, username, password = (\n \"test.mender.io-\" + uuidv4,\n \"some.user+\" + uuidv4 + \"@example.com\",\n \"secretsecret\",\n )\n tenant = create_org(tenant, username, password, \"enterprise\")\n create_roles(tenant.users[0].token, test_case[\"roles\"])\n test_case[\"user\"][\"name\"] = test_case[\"user\"][\"name\"].replace(\"UUID\", uuidv4)\n test_user = create_user(tid=tenant.id, **test_case[\"user\"])\n login(test_user, test_case[\"use_personal_access_token\"])\n\n # Initialize tenant's devices\n grouped_devices = setup_tenant_devices(tenant, test_case[\"device_groups\"])\n\n # Upload a bogus artifact\n artifact = Artifact(\"tester\", [\"qemux86-64\"], payload=\"bogus\")\n\n dplmnt_MGMT = ApiClient(deployments.URL_MGMT)\n rsp = dplmnt_MGMT.with_auth(tenant.users[0].token).call(\n \"POST\",\n deployments.URL_DEPLOYMENTS_ARTIFACTS,\n files=(\n (\n \"artifact\",\n (\"artifact.mender\", artifact.make(), \"application/octet-stream\"),\n ),\n ),\n )\n assert rsp.status_code == 201, rsp.text\n\n # Attempt to create deployment with test user\n rsp = dplmnt_MGMT.with_auth(test_user.token).call(\n \"POST\",\n deployments.URL_DEPLOYMENTS_GROUP.format(name=test_case[\"deploy_group\"]),\n body={\"artifact_name\": \"tester\", \"name\": \"dplmnt\"},\n )\n assert rsp.status_code == test_case[\"status_code\"], rsp.text\n self.logger.info(\"PASS: %s\" % test_case[\"name\"])", "def tearDownClass(cls):\n management.call_command(\"migrate\")", "def test_vm_migration_across_hosts(self):\n\n # Create security group for the server\n group_create_body_update, _ = self._create_security_group()\n\n # Create server with security group\n name = data_utils.rand_name('server-with-security-group')\n server_id = self._create_server_with_sec_group(\n name, self.network['id'],\n group_create_body_update['security_group']['id'])\n self.assertTrue(self.verify_portgroup(self.network['id'], server_id))\n device_port = self.ports_client.list_ports(device_id=server_id)\n port_id = device_port['ports'][0]['id']\n floating_ip = self._associate_floating_ips(port_id=port_id)\n self.assertTrue(self.ping_ip_address(\n floating_ip['floatingip']['floating_ip_address'],\n should_succeed=False))\n\n # Update security group rule for the existing security group\n self.security_group_rules_client.create_security_group_rule(\n security_group_id=group_create_body_update['security_group']['id'],\n protocol='icmp',\n direction='ingress',\n ethertype=self.ethertype\n )\n self.assertTrue(self.ping_ip_address(\n floating_ip['floatingip']['floating_ip_address'],\n should_succeed=True))\n cluster = cfg.CONF.VCENTER.cluster_in_use\n content = self._create_connection()\n host_dic = self._get_host_name(server_id)\n vm_host = host_dic['host_name']\n vm_host_ip = vm_host.name\n cluster_hosts = self._get_hosts_for_cluster(content, cluster)\n if len(cluster_hosts.host) < 2:\n msg = \"Min two hosts needed in cluster for Vmotion\"\n raise testtools.TestCase.skipException(msg)\n for host in cluster_hosts.host:\n if host.name != vm_host_ip:\n dest_host = host\n # Live Migration\n task = self._migrate_vm(content, server_id, dest_host)\n self._wait_for_task(task, content)\n self.assertTrue(self.ping_ip_address(\n floating_ip['floatingip']['floating_ip_address'],\n should_succeed=True))", "def migrate(ctx):\n connecter = ScalingoInterface(ctx.obj)\n connecter.manage_py(\"migrate\")", "def test13(self):\n ###get a block to migrate from global dbs\n dest_datasets = set((dataset['dataset'] for dataset in self.api.listDatasets()))\n ###only dataset after last DBS2->3 because of the parentage issue in DBS 2 min_cdate=1368162000 =10May2013\n src_datasets = set((dataset['dataset'] for dataset in self.cmsweb_api.listDatasets(min_cdate=1368162000)))\n dataset_to_migrate = choice(list(src_datasets.difference(dest_datasets)))\n block_to_migrate = choice([block['block_name']\n for block in self.cmsweb_api.listBlocks(dataset=dataset_to_migrate)])\n\n ###submit migration request\n toMigrate = {'migration_url': self.source_url,\n 'migration_input': block_to_migrate}\n migration_request = self.migration_api.submitMigration(toMigrate)\n self.assertTrue('migration_request_id' in migration_request['migration_details'])\n migration_request_id = migration_request['migration_details']['migration_request_id']\n print(\"____toMigrate___\")\n print(toMigrate)\n print(\"----------migration_request -----------\")\n print(migration_request) \n\n ###check migration status for max. 300s (should be enough time to migrate the dataset)\n with Timeout(300):\n while True:\n request_status = self.migration_api.statusMigration(migration_rqst_id=migration_request_id)\n if request_status[0]['migration_status'] == 2:\n break\n\n ###validate block migration\n def check(input, output):\n non_comparable_keys = ('block_id', 'dataset_id', 'last_modification_date',\n 'parent_file_id', 'primary_ds_id')\n if isinstance(input, dict):\n for key, value in input.items():\n if key in non_comparable_keys:\n continue ###do not compare id's\n if key in ('processing_era',): ###do compare create_by, creation_date for re-used entries\n for key2remove in ('create_by', 'creation_date',):\n try:\n del input[key][key2remove]\n del output[key][key2remove]\n except KeyError:\n pass\n self.assertTrue(key in output)\n check(value, output[key])\n elif isinstance(input, list):\n for element_in, element_out in zip(sorted(remove_non_comparable_keys(input, non_comparable_keys)),\n sorted(remove_non_comparable_keys(output, non_comparable_keys))):\n check(element_in, element_out)\n else:\n self.assertEqual(str(input), str(output))\n\n block_dump_src = self.cmsweb_api.blockDump(block_name=block_to_migrate)\n block_dump_dest = self.api.blockDump(block_name=block_to_migrate)\n check(block_dump_src, block_dump_dest)\n\n ###try to delete successfully executed migration request\n toDelete = {'migration_rqst_id': migration_request_id}\n self.assertRaises(HTTPError, self.migration_api.removeMigration, toDelete)", "def model_post_migrate(*args, **kwargs):\n global IN_MIGRATIONS\n IN_MIGRATIONS = False", "def commit_backup(self, group, name):\n\n cur_path = self.backup_path(group, name, temp = True)\n new_path = self.backup_path(group, name)\n\n try:\n os.rename(cur_path, new_path)\n except Exception as e:\n raise Error(\"Unable to rename backup data directory '{}' to '{}': {}.\",\n cur_path, new_path, psys.e(e))\n\n self.__on_backup_created(group, name, new_path)", "def __check_new_groups(self) -> None:\n for group in main_app_groups:\n try:\n group_old = Group.objects.get(name=group)\n self.__upgrade_group(group_old, group)\n except ObjectDoesNotExist: # need to create new group\n self.__create_new_group(group)\n\n self.stdout.write(f'Added new group {group} with {main_app_groups[group]} permissions')", "def test_live_migration_task_rollback(self):\n server, source_host, target_host = self._create_server()\n self._disable_target_host(target_host)\n self._stub_delete_server_during_scheduling(server)\n\n # Now start the live migration which will fail due to NoValidHost.\n body = {'os-migrateLive': {'host': None, 'block_migration': 'auto'}}\n self.api.post_server_action(server['id'], body)\n # We cannot monitor the migration from the API since it is deleted\n # when the instance is deleted so just wait for the failed instance\n # action event after the task rollback happens.\n self._wait_for_action_fail_completion(\n server, instance_actions.LIVE_MIGRATION,\n 'conductor_live_migrate_instance')\n self._assert_no_allocations(server)", "def migrate_database():\n log('Migrating the keystone database.', level=INFO)\n service_stop(keystone_service())\n # NOTE(jamespage) > icehouse creates a log file as root so use\n # sudo to execute as keystone otherwise keystone won't start\n # afterwards.\n cmd = ['sudo', '-u', 'keystone', 'keystone-manage', 'db_sync']\n subprocess.check_output(cmd)\n service_start(keystone_service())\n time.sleep(10)\n peer_store('db-initialised', 'True')" ]
[ "0.60504246", "0.5771623", "0.57313025", "0.5703251", "0.56833225", "0.56692", "0.55300903", "0.552122", "0.5495515", "0.54355794", "0.542806", "0.54272103", "0.54244304", "0.5424313", "0.5424132", "0.53701836", "0.5363987", "0.53493965", "0.5324889", "0.5317418", "0.52856", "0.52290314", "0.522869", "0.5208446", "0.51933897", "0.5183253", "0.5176497", "0.5168474", "0.5156726", "0.5154087" ]
0.6369106
0
Compile a list of all allergens For each allergeg, find all ingredient lists that reference it Get an intersection of those lists to find all candidates for that allergen Return all ingredients not in the allergen list
def solve_part1(start): all_ilists = load_inputs() allergen_map = get_allergen_map(all_ilists) all_ingredients = get_all_ingredients(all_ilists) all_potential_bad_ingredients = set() for l in allergen_map.values(): all_potential_bad_ingredients.update(l) safe_ingredients = [a for a in all_ingredients if a not in all_potential_bad_ingredients] safe_ingred_count = 0 for ilist in all_ilists: this_ingredients = ilist.get_ingredients() this_safe_ingredients = [a for a in this_ingredients if a in safe_ingredients] safe_ingred_count += len(this_safe_ingredients) return safe_ingred_count
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_allergens(foods):\n\n # Create a dictionary mapping allergens to lists\n # of ingredients that may contain that allergen\n allergen_foods = {}\n for ingredients, allergens in foods:\n for allergen in allergens:\n allergen_foods.setdefault(allergen, []).append(set(ingredients))\n\n # For each allergen, compute the intersection of the lists\n # computed above. This will give us the set of ingredienta\n # that could contain that allergen\n candidate_ingredients = {}\n for allergen in allergen_foods:\n candidate_ingredients[allergen] = set.intersection(*allergen_foods[allergen])\n\n # Repeatedly find an allergen that can only be matched to a single\n # ingredient, and remove that ingredient from the list of candidate\n # ingredients for all the other allergens.\n allergens = {}\n while len(candidate_ingredients) > 0:\n\n for single_allergen, cings in candidate_ingredients.items():\n if len(cings) == 1:\n ingredient = cings.pop()\n allergens[single_allergen] = ingredient\n break\n\n del candidate_ingredients[single_allergen] \n\n for allergen in candidate_ingredients:\n if allergen != single_allergen:\n ingredient = allergens[single_allergen]\n candidate_ingredients[allergen].discard(ingredient)\n\n return allergens", "def get_beer_ingredients(beer):\n beer_ingredients = []\n for ing in beer['ingredients']:\n for item in beer['ingredients'][ing]:\n if 'name' in item:\n if item['name'] not in beer_ingredients:\n beer_ingredients.append(item['name'])\n\n return beer_ingredients", "def fullIn(C, g):\n for set in C:\n if not fullCmpSets(set, g):\n return 1", "def get_all_candidates(self) -> list:", "def filter_non_ingredient(ingredient_list):\n stop_words = set(stopwords.words('english'))\n \n filtered_list = []\n add_list = 0 #a dummy variable to add a text to filtered list\n for phrases in set(ingredient_list): #run through only one item in set (removes duplicates)\n\n for word in phrases:\n if word in stop_words:\n phrases.replace(word,'')\n\n #if one of the word in a phrase is ingredient, counts in to list\n for word in word_tokenize(phrases): #phrases can be phrase (run through phrases)\n \n is_ingredient = is_it_ingredient(word) #returns true if a word is ingridient\n \n if is_ingredient == True:\n add_list = 1\n else:\n add_list = 0\n\n ##if one of the word in a phrase is ingredient, counts in to list\n if add_list == 1 :\n\n filtered_list.append(phrases.capitalize())\n add_list = 0 \n\n return filtered_list", "def _get_invariom_list(self):\n self.invariom_list = []\n for molecule in self.values():\n for atom in molecule.atoms:\n for invariom in atom.invarioms:\n if not invariom in self.invariom_list:\n self.invariom_list.append(invariom)", "def get_all_ingredients(self) -> List[str]:\n return [ingredient for ingredient in self.inventory_availability]", "def gen_all_holds(hand):\n from_hand = [()]\n for item in hand:\n for subset in from_hand:\n from_hand = from_hand + [tuple(subset) + (item, )]\n \n return set(from_hand)", "def generateCandidates(self):\n\t\tprint(\"Candidate list:\\n\")\n\t\tkeys = list(self.prune_list.keys())\n\t\ttuple_count = len(keys[0])\n\t\tprune_list = {}\n\t\ttup = []\n\t\tfor v in comb(keys, 2):\n\t\t\ta = set(v[0])\n\t\t\tb = set(v[1])\n\t\t\t\n\t\t\t# If there's as many common element in a & b as one less than tuple_count\n\t\t\tif((len(a & b) == (tuple_count - 1)) and (tuple(a | b) not in tup)):\n\t\t\t\ttup.append(tuple(a | b))\n\t\t\t\tprint(tup[-1])\n\t\t\t\t# Update prune list\n\t\t\t\tcount = self.getSupport(tup[-1])\n\t\t\t\tif(count >= self.support):\n\t\t\t\t\tprune_list[tup[-1]] = count\n\t\treturn prune_list", "def get_source_candidates(all_data_epigen):\n candids = {s:\n [np.where(np.array(c[1])!=0)[0] for c in mdata[\"test\"] ]\n for s, mdata in all_data_epigen.items()}\n return candids", "def getLigandNbrs(resids: List[Residue], struct:Structure)->List[ResidueDict]:\n\n ns = NeighborSearch(list( struct.get_atoms() ))\n nbrs = []\n\n for r in resids:\n # a ligand consists of residues\n resatoms = r.child_list[0]\n # each residue has an atom plucked at random\n for nbrresidues in ns.search(resatoms.get_coord(), 5,level='R'):\n # we grab all residues in radius around that atom and extend the list of neighbors with those\n nbrs.extend([nbrresidues])\n\n # Filter out the residues that constitute the ligand itself\n filtered = [] \n for neighbor in nbrs:\n present = 0\n for constit in resids:\n if ResidueDict(constit)==ResidueDict( neighbor ):\n present = 1\n if present == 0:\n filtered.append(ResidueDict(neighbor))\n\n return [ * map(lambda x: addBanClass(x) , set(filtered) ) ]", "def get_refinements(self, agent):\n agents = set()\n for ag in self.model_agents:\n if not ag.matches(agent) and ag.refinement_of(agent, bio_ontology):\n agents.add(ag)\n return list(agents)", "def compareWithAll(lijst, previouslist, feedback = 0):\n\n global usedcombos\n\n results = []\n\n\n if feedback == 2: #to make sure there's a 2 letter combination with gaps\n for i in previouslist:\n for letter1, letter2 in lijst:\n if letter1 in i and letter2 in i:\n results.append(i)\n\n elif feedback == 3: #to make sure there's a 3 letter combination with gaps\n for i in previouslist:\n for letter1, letter2, letter3 in lijst:\n if letter1 in i and letter2 in i and letter3 in i:\n results.append(i)\n else:\n for i in previouslist:\n\n for j in range(len(lijst)):\n\n if lijst[j] in i:\n\n results.append(i)\n\n results = [item for item in results if item not in usedcombos]\n results = list(dict.fromkeys(results))\n\n print(f\"It seems I only {len(results)} options left!\")\n\n return AIguessing(results)", "def Allcombos():\n\n global allcombos\n\n allcombos = []\n\n results = product(\"ABCDEF\", repeat=4)\n\n allcombos = resulttolist(results)\n\n return AIguessing(allcombos)", "def collect_coexist(self):\r\n co_list = []\r\n ner_dictKeyList = list(self.ner_dict.keys())\r\n for words in self.ner_sents:\r\n co_ners = set(ner_dictKeyList).intersection(set(words))\r\n co_info = self.combination(list(co_ners))\r\n co_list += co_info\r\n if not co_list:\r\n return []\r\n return {i[0]: i[1] for i in Counter(co_list).most_common()}", "def consolidate_ingredients(breakfasts, lunches, dinners):\n total_ingredients = {}\n meals = [breakfasts, lunches, dinners]\n\n for meal in meals:\n for collection in meal:\n ingredients = fetch_ingredients(collection)\n for lst in ingredients:\n if lst[0] in total_ingredients:\n total_ingredients[lst[0]][0] += lst[1]\n total_ingredients[lst[0]][1].add(lst[2])\n else:\n total_ingredients[lst[0]] = [lst[1], set([lst[2]])]\n\n return total_ingredients", "def intersect(self, other_list):\n assert type(other_list) == type(self)\n \n# if len(self.vals) >= len(other_list.vals):\n# big = self.vals\n# small = other_list.vals\n# else:\n# small = self.vals\n# big = other_list.vals\n# \n# common_list = intSet()\n# for e in big:\n# if e in small:\n# common_list.insert(e)\n# return common_list\n\n common_list = intSet() \n for e in self.vals:\n if other_list.member(e): #if the current e is a member of other_list\n common_list.insert(e)\n return common_list", "def __get_ids_of_all_unrelaxed_candidates__(self):\n\n all_unrelaxed_ids = set([t.gaid for t in self.c.select(relaxed=0)])\n all_relaxed_ids = set([t.gaid for t in self.c.select(relaxed=1)])\n all_queued_ids = set([t.gaid for t in self.c.select(queued=1)])\n\n actually_unrelaxed = [gaid for gaid in all_unrelaxed_ids\n if (gaid not in all_relaxed_ids and\n gaid not in all_queued_ids)]\n\n return actually_unrelaxed", "def filter_to_candidate(self):\n filtered = { k: [] for k in self.annsets }\n for key, annset in self.annsets.items():\n for a in annset:\n if a.overlaps(self.candidate):\n filtered[key].append(a)\n self.annsets = filtered", "def solveAll(self) :\n return [g for g in self if not g.solveAll()]", "def get_matching_genes(self, other):\n innovs = {g.innov_num for g in other.link_genes}\n if not innovs:\n return []\n max_innov = max(innovs)\n return [g for g in self.link_genes\n if g.innov_num in innovs]", "def findIslands(self):\n\n # First lets find the shores.\n shoreList = self.findShores()\n\n # Initialize Blank Values.\n N, S, E, W = (None for i in range(4))\n\n # Next, we find all the furthest extremities among all shore lists.\n # In theory, the only extremities that can occur for shorelines that\n # Don't belong to the main pond body are along the map edge.\n for index, shore in enumerate(shoreList):\n extremityHash = shore.findExtremities()\n if index == 0:\n N, S, E, W = ([shore] for i in range(4))\n continue\n if extremityHash['N'][0].x < N[0].findExtremities()['N'][0].x:\n N = [shore]\n elif extremityHash['N'][0].x == N[0].findExtremities()['N'][0].x:\n N.append(shore)\n if extremityHash['S'][0].x > S[0].findExtremities()['S'][0].x:\n S = [shore]\n elif extremityHash['S'][0].x == S[0].findExtremities()['S'][0].x:\n S.append(shore)\n if extremityHash['E'][0].y > E[0].findExtremities()['E'][0].y:\n E = [shore]\n elif extremityHash['E'][0].y == E[0].findExtremities()['E'][0].y:\n E.append(shore)\n if extremityHash['W'][0].y < W[0].findExtremities()['W'][0].y:\n W = [shore]\n elif extremityHash['W'][0].y == W[0].findExtremities()['W'][0].y:\n W.append(shore)\n\n # Now, lets flatten the list of cardinal extremities\n flatList = [val for sublist in [N, S, E, W] for val in sublist]\n counter = Counter(flatList)\n\n # In theory, the main pond shore should have the most extremities\n probablyPond = counter.most_common(1)\n\n # Wow, what a piece of crap. I feel ashamed of the next 6 lines.\n if probablyPond[0][0] < 4:\n raise Exception(\"Largest Pond does not have 4 max points.\"\n \" Something is horribly Wrong.\")\n if len(probablyPond) != 1:\n raise Exception(\"Equal number of extremities in pond?\"\n \" How can that be?\")\n\n probablyPond = probablyPond[0][0]\n\n # Find any map edges and add them to the Plain Blob Object mapEdge.\n self.mapEdge = self.findMapEdge()\n\n # Well, this probably isn't an island, so drop it from the list.\n shoreList.remove(probablyPond)\n\n # Find any map edges for the island, and create Island Objects.\n islands = list()\n for island in shoreList:\n islands.append(Island(island.points,\n self.analyzeData,\n self.elevation))\n return islands", "def abelian_invariants(self):\n if self.is_trivial:\n return []\n gns = self.generators\n inv = []\n G = self\n H = G.derived_subgroup()\n Hgens = H.generators\n for p in primefactors(G.order()):\n ranks = []\n while True:\n pows = []\n for g in gns:\n elm = g**p\n if not H.contains(elm):\n pows.append(elm)\n K = PermutationGroup(Hgens + pows) if pows else H\n r = G.order()//K.order()\n G = K\n gns = pows\n if r == 1:\n break\n ranks.append(multiplicity(p, r))\n\n if ranks:\n pows = [1]*ranks[0]\n for i in ranks:\n for j in range(i):\n pows[j] = pows[j]*p\n inv.extend(pows)\n inv.sort()\n return inv", "def l_superoctads(contained, not_contained, partial, weight):\n l = superoctads(contained, not_contained, partial, weight)\n return [set24_to_list(v) for v in l]", "def all_genes_in_dataset(self):\n # the empty-set argument is needed in case there are no mutants in the dataset - set.union() with empty args is an error.\n return set.union(set(), *[set(genes) for N_mutants,genes \n in self.dataset.get_gene_dict_by_mutant_number(self.dataset_name).items() if N_mutants>0])", "def gen_all_holds(hand):\n \n answer_set = set([()])\n for dummy_idx in range(len(hand)):\n temp_set = set()\n for partial_sequence in answer_set:\n for item in range(1,len(hand)+1):\n new_sequence = list(partial_sequence)\n new_sequence.append(item)\n if set(tuple(new_sequence)).issubset(set(range(1,len(hand)+1))):\n temp_set.add(tuple(set(new_sequence)))\n answer_set = answer_set.union(temp_set)\n answer_set2 = set([()])\n for seq in answer_set:\n temp_seq = []\n for element in seq: \n temp_el = hand[element -1]\n temp_seq.append(temp_el)\n answer_set2.add(tuple(temp_seq))\n return answer_set2", "def unproductive(g):\n nonts = set(nonterminals(g))\n\n useful = {n for n in nonts if endings(g, n)}\n change = True\n\n while change:\n change = False\n\n for n in nonts.difference(useful):\n for prod in g.productions(n):\n if all(child in useful for child in children(g, prod)):\n useful.add(n)\n change = True\n break\n\n return nonts.difference(useful)", "def get_recipes_by_allergy(self, allergies_list, number_of_results):\n\n # Make sure that query is in the right format\n allergies_formatted = self._format_list_for_query(allergies_list)\n url = \"{}/recipes/search?intolerances={}&number={}&offset=0\".format(\n self.base_url, allergies_formatted, number_of_results\n )\n\n recipe_ids = self._get_recipe_ids(url)\n return [self.get_recipe_by_id(recipe_id) for recipe_id in recipe_ids]", "def getIngredients():\n ingredients = ['Whiskey', 'Tequila', 'Vodka', 'Blue Curacao', 'Orange Juice',\n 'Pineapple Juice', 'Cranberry Juice', 'Sour Mix']\n return ingredients", "def is_allergic_to(self, allergen):\n return allergen in self.list" ]
[ "0.7667913", "0.5850064", "0.5719783", "0.5694653", "0.5650647", "0.5642431", "0.5635721", "0.55769885", "0.55377215", "0.55338204", "0.5510489", "0.5507309", "0.54491836", "0.54376495", "0.5432545", "0.5425725", "0.53932744", "0.53638357", "0.53454673", "0.5335317", "0.53127813", "0.52993566", "0.5292101", "0.52689344", "0.5253799", "0.5232529", "0.5225024", "0.52164525", "0.52089554", "0.51961225" ]
0.6366038
1
Split the list of tags in attribute cols into sub lists grouped by level
def split_tags_to_levels(attribute_cols, max_depth): level_tags = [] for i in range(0, max_depth): level = max_depth - i current_level_tags = [] for att in attribute_cols: att_level = len(att.split('.')) - 1 if att_level == level: current_level_tags += [att] level_tags.append(current_level_tags) return level_tags
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_unique_tags(df):\n tags = []\n\n for index, row in df.iterrows():\n tags = list(set(tags + ast.literal_eval(row.tags)))\n\n pdb.set_trace()", "def splitBy(data, attribute_id):\n \n col = getColumn(data, attribute_id)\n values = set(col)\n split_data = [] \n for i in values:\n subset = [row for row in data if row[attribute_id] == i]\n split_data.append(subset)\n \n return split_data", "def _tags(self):\n retval = []\n for of in self.tagnames:\n retval.append([of, self.get_datatype(of), self.get(of)])\n return retval", "def split_features(data):\n X = data.copy()\n #split nucleotid string (len=60) into a list of independent characters (DNA nucleotids)\n X['dna'] = X['dna'].map(lambda x : list(str(x).strip()))\n #create 60 new attributes (columns) for each DNA nucleotide index\n #each attribute has name dna_idx where idx is index (1-based) in the list above\n for idx in range(60):\n X['dna_%d' % (idx+1)] = X['dna'].map(lambda x : x[idx])\n #remove the old dna column (redundant information)\n del X['dna']\n #remove descriptor\n del X['id']\n \n return X", "def tags(self):\n return [column.tag if column else '' for column in self.columns]", "def _horiz_explode(df_in, column, drop_original=True):\n # expands list of columns\n df = df_in.copy()\n # expand df.tags into its own dataframe\n tags = df[column].apply(pd.Series)\n tags = tags.rename(columns=lambda x: column + '_' + str(x))\n df = pd.concat([df[:], tags[:]], axis=1)\n\n if drop_original:\n df.drop(columns=column, inplace=True)\n return df, tags.columns.to_list()", "def build_taglist(tags):\n taglist = []\n for tag in tags:\n taglist.append(tag['value'].lower())\n return taglist", "def splits_concept_levels(data: pd.DataFrame, type_col: Optional[str], concept_strings: List) -> List:\n\n con_string, anc_string = concept_strings\n data = data.copy().replace(r'^\\s*$', np.nan, regex=True)\n\n # extract relevant columns\n if type_col is not None:\n all_cols = [x for x in data.columns if type_col not in x]\n conc_type = [x for x in data.columns if con_string.upper() in x.upper() and type_col.upper() in x.upper()]\n conc_type_uri = [x for x in conc_type if x.upper().endswith('URI')][0]\n anc_type = [x for x in data.columns if anc_string.upper() in x.upper() and type_col.upper() in x.upper()]\n anc_type_uri = [x for x in anc_type if x.upper().endswith('URI')][0]\n # extract concept codes from ancestor codes\n concept = data[all_cols + conc_type].dropna(subset=conc_type, how='all').drop_duplicates()\n ancestor = data[all_cols + anc_type].dropna(subset=anc_type, how='all').drop_duplicates()\n # get counts of ontology concepts at each concept level\n concept_ont_codes = [i for j in [x.split(' | ') for x in list(concept[conc_type_uri])] for i in j]\n ancestor_ont_codes = [i for j in [x.split(' | ') for x in list(ancestor[anc_type_uri])] for i in j]\n else:\n concept = data[[x for x in data.columns if x.startswith(con_string)]].dropna(how='all').drop_duplicates()\n ancestor = data[[x for x in data.columns if x.startswith(anc_string)]].dropna(how='all').drop_duplicates()\n concept_ont_codes, ancestor_ont_codes = [], []\n\n return [(concept, concept_ont_codes), (ancestor, ancestor_ont_codes)]", "def process_group(row):\n splitted_name = row.name.split(extreme_separator)\n return sorted(splitted_name) + [row[2]]", "def chunked_tags(train):\n cfdist = nltk.ConditionalFreqDist()\n for t in train:\n for word, tag, chtag in tree2conlltags(t):\n if chtag == \"O\":\n cfdist[tag].inc(False)\n else:\n cfdist[tag].inc(True)\n return [tag for tag in cfdist.conditions() if cfdist[tag].max() == True]", "def column_tags(self, data):\n tag_list = list(map(self.get_tag_name, data[11]))\n return ', '.join(sorted(tag_list, key=glocale.sort_key))", "def tags(self):\n return tuple([x.strip() for x in self._dict.get('tags').split(',')])", "def transform_tags(self, instance):\n return instance.tags.split(',')", "def format_labels(self, data):\n ret = []\n for sentence, labels, attr in data:\n sentence_length = len(sentence)\n labels_copy = copy.deepcopy(labels)\n labels_copy = [label[0] for label in labels_copy if type(label) is list ]\n ret.append((sentence, labels_copy, attr))\n return ret", "def tag_words (lx, wds):\n if (wds == []):\n return [[]]\n else:\n tag_first = tag_word (lx, wds[0])\n tag_rest = tag_words (lx, wds[1:])\n return [[fst] + rst for fst in tag_first for rst in tag_rest]", "def tags(row):\n abstract = f\" {row.abstract.lower().replace('.', ' ').replace(',', ' ').replace(';', ' ')} \"\n\n return [\n tag\n for tag, terms in options[\"tag_rules\"].items()\n if any(f\" {term} \" in abstract for term in terms)\n ]", "def parse_and_flatten(df, field_name):\n\n # Parse and flatten the list\n lst = list(df[field_name])\n lst = [x.split('|') for x in lst]\n\n lst_flat = []\n for slist in lst:\n for x in slist:\n lst_flat.append(x)\n return lst_flat", "def normalized_pos_tags(self):\n pos_list = []\n for pos in self.pos_tags:\n pos_list.extend([i for i in re.split('[:;]', pos) if i != ''])\n return pos_list", "def dataset_tags(connection):\n assert connection\n query = \"\"\"select * from tags()\"\"\"\n result = sqlio.read_sql_query(query, connection)\n return [item.strip() for item in result['name']], [tag_id.strip() for tag_id in result['tag_id']]", "def tags_from_csv_field(tag_string):\n split_string = tag_string.split()\n out_list = []\n for tag in split_string:\n out_list.append(clean_tag(tag))\n\n return out_list", "def prepare_tags(self, obj):\n return [tag.name for tag in obj.tags.all()]", "def split_by_attribute(dbsession, group, attr):\n values = []\n for item in group.items:\n if attr in item.attributes and item.attributes[attr]:\n values.extend(item.attributes[attr])\n categories = [\n (v, c) for v, c in Counter(values).most_common() if c < len(group.items) * 0.6666 and c >= 15 # noqa: PLR2004\n ]\n if categories:\n category_values = [v for v, _ in categories]\n has_values = 0\n for item in group.items:\n found = False\n for value in item.attributes[attr]:\n if value in category_values:\n found = True\n break\n if found:\n has_values = has_values + 1\n if has_values / len(group.items) > 0.9: # noqa: PLR2004\n categories.reverse()\n for category in categories:\n new_group = Group(\n value=category[0], label=f\"{group.label} - {category[0]}\", parent=group, split=\"attribute\"\n )\n dbsession.add(new_group)\n for item in list(group.items):\n if category[0] in item.attributes[attr]:\n item.group = new_group\n new_group = Group(value=group.label, label=group.label, parent=group, split=\"attribute\")\n dbsession.add(new_group)\n for item in list(group.items):\n item.group = new_group\n return True\n return False", "def extractAttrs(data):\n\treturn [instance[1:] for instance in data]", "def split_corpus_tags(self, corpus):\n logging.info('Dividindo texto das tags')\n sentences = []\n tags = []\n dict_tags = {}\n for sentence in corpus:\n sentence_tmp = sentence.replace(\"\\n\", '')\n words_tmp = []\n tags_tmp = []\n words = sentence_tmp.split(\" \")\n for word in words:\n tag_word = word.split(\"_\")\n if tag_word[0] == \"\": pass\n else:\n words_tmp.append(tag_word[0])\n tags_tmp.append(tag_word[1])\n if not tag_word[1] in dict_tags.keys(): \n dict_tags[tag_word[1]] = {}\n dict_tags[tag_word[1]]['right'] = 0\n dict_tags[tag_word[1]]['pred'] = 0\n dict_tags[tag_word[1]]['pres'] = 1\n else: dict_tags[tag_word[1]]['pres'] += 1\n sentences.append(words_tmp)\n tags.append(tags_tmp)\n return sentences, tags, dict_tags", "def split_columns(l):\n return [l[:3], l[3:7], l[7:12], l[12:16], l[16:]]", "def tags():", "def get_lr_context(self):\n all_list = []\n\n for element in self.big_table[:3]:\n for all_element in element.\\\n select('.col_list > tbody > tr > td > span'):\n all_list.append(all_element.text)\n\n return all_list", "def get_annotation_values(nested_annotation_column1, nested_annotation_column2):\n flat_list1 = [item for sublist in nested_annotation_column1 for item in sublist]\n flat_list2 = [item for sublist in nested_annotation_column2 for item in sublist]\n uniques = set(flat_list1 + flat_list2)\n return(list(uniques))", "def get_tags(element):\n tags = []\n id_num = element.attrib['id']\n for child in element.iter('tag'):\n attr = child.attrib\n\n # check for problematic characters first and skip if matches\n if PROBLEMCHARS.search(attr['k']):\n continue\n\n child_dict = {}\n child_dict['id'] = id_num\n child_dict['value'] = attr['v']\n\n # stackoverflow.com/questions/6903557/splitting-on-first-occurrence\n child_dict['key'] = attr['k'].split(':', 1)[-1]\n\n # Check if the k tag has : in it and treat according to specs\n if LOWER_COLON.search(attr['k']):\n child_dict['type'] = attr['k'].split(':')[0]\n else:\n child_dict['type'] = default_tag_type\n\n # street name check (not all : matches are addr:)\n if child_dict['type'] == 'addr' & child_dict['key'] == 'street':\n child_dict['value'] = update_street_name(child_dict['value'])\n\n tags.append(child_dict)\n\n return tags", "def splitlines(self):\n bucket_shift = 6\n lines = [[] for _ in xrange((len(self) >> bucket_shift) + 1)]\n pos = 0\n new_lines = []\n line_count = 0\n find = self.find\n l = len(self)\n while pos < l:\n line_end = find(\"\\n\", pos)\n if line_end == -1:\n line_end = len(self) # - 1\n new_lines.append(AttrText(self[pos:line_end]))\n for line_no in xrange(pos >> bucket_shift, (line_end >> bucket_shift) + 1):\n lines[line_no].append((pos, line_end, line_count))\n line_count += 1\n pos = line_end + 1\n\n for start, end, attrs in self.attr_spans:\n for line_list in lines[start >> bucket_shift : (end >> bucket_shift) + 1]:\n for line_start, line_end, line_offset in line_list:\n line = new_lines[line_offset]\n line.attr_spans.append(\n (\n max(0, start - line_start),\n min(len(line), end - line_start),\n attrs,\n )\n )\n\n return new_lines" ]
[ "0.5686046", "0.5586532", "0.5547188", "0.5459328", "0.54577595", "0.53406495", "0.533561", "0.52994555", "0.5283539", "0.5202037", "0.51741797", "0.5152816", "0.515235", "0.5147258", "0.51467", "0.51462287", "0.51429564", "0.5140572", "0.5132887", "0.5115969", "0.5078673", "0.5054392", "0.50515646", "0.5035676", "0.50311923", "0.5026486", "0.5024388", "0.49479058", "0.49334708", "0.49264255" ]
0.7616157
0
stage three, create map of summary = column of same name in level above + all tags containing in level
def create_level_maps(max_depth, level_tags, summary_tags): level_maps = [] for i in range(0, max_depth - 1): level_map = dict() level = max_depth - i for summary in summary_tags[i]: all_sub_tags = [att for att in level_tags[i] if summary[:-4] == ".".join(att.split('.')[:-1])] matching_tag_above = [att for att in level_tags[i + 1] if summary[:-4] == att] level_map[summary] = all_sub_tags + matching_tag_above level_maps.append(level_map) return level_maps
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def populate_df_with_macro_cats(df, level_maps):\n for level_map in level_maps:\n for summary_att, atts in level_map.items():\n df[summary_att] = df[atts].sum(axis=1).apply(lambda e: 1 if e >= 1 else 0)\n\n return df", "def add_summary_mapping(otu_table,\r\n mapping,\r\n level,\r\n md_as_string=False,\r\n md_identifier='taxonomy'):\r\n counts_by_consensus, sample_map = sum_counts_by_consensus(otu_table,\r\n level,\r\n \"Other\",\r\n md_as_string,\r\n md_identifier)\r\n\r\n summary = defaultdict(list)\r\n for row in mapping:\r\n # grab otu idx if the sample exists, otherwise ignore it\r\n sample_id = row[0]\r\n if sample_id not in sample_map:\r\n continue\r\n otu_idx = sample_map[sample_id]\r\n\r\n for consensus, counts in sorted(counts_by_consensus.items()):\r\n summary[sample_id].append(counts[otu_idx])\r\n\r\n taxon_order = sorted(counts_by_consensus.keys())\r\n\r\n return summary, taxon_order", "def conclusion_summary_map(self):\n pass", "def format_add_taxa_summary_mapping(summary, tax_order, mapping, header,\r\n delimiter=';'):\r\n tax_order = [delimiter.join(tax) for tax in tax_order]\r\n header.extend(tax_order)\r\n yield \"#%s\\n\" % '\\t'.join(header)\r\n\r\n for row in mapping:\r\n sample_id = row[0]\r\n\r\n # only save samples we have summaries for\r\n if sample_id not in summary:\r\n continue\r\n\r\n # grab otu counts for each taxon\r\n row.extend(map(str, summary[sample_id]))\r\n yield \"%s\\n\" % '\\t'.join(row)", "def summary_levels(self):\n from geoid.core import names, descriptions\n\n sl = {}\n\n for sl_name, sl_no in names.items():\n sl[sl_no] = {\n 'number': sl_no,\n 'name': sl_name,\n 'desc': descriptions.get(sl_no)\n }\n\n return sl", "def summary(request, tag=''):\n if tag:\n if tag.startswith(\"@\"):\n target = Target.objects.get(id=tag[1:])\n summaries_by_value, consfield_summaries = target.getSummaries()\n elif tag.startswith(\"~\"):\n # TODO: is there a sensible analogous summary for users,\n # or does it look completely different? \n pass\n else:\n # TODO: other log types\n pass\n\n # replace fieldnames in tag_groups with fieldsummaries in grouped_summaries\n grouped_summaries = [ ( gtuple[0], [ summaries_by_value.pop(t) for t in gtuple[1] if t in summaries_by_value])\n for gtuple in tag_groups ]\n # add misc consensus fields\n grouped_summaries.append(('other consensus fields',\n [ summaries_by_value.pop(k) for k in summaries_by_value.keys()\n if summaries_by_value[k].is_consensus and summaries_by_value[k].fieldname ]))\n # add misc consensus labels\n grouped_summaries.append(('consensus labels',\n [ summaries_by_value.pop(k) for k in summaries_by_value.keys()\n if summaries_by_value[k].is_consensus]))\n grouped_summaries[-1][-1].sort(lambda x,y:cmp(x.count,y.count))\n # add misc adhoc fields\n grouped_summaries.append(('adhoc fields',\n [ summaries_by_value.pop(k) for k in summaries_by_value.keys()\n if summaries_by_value[k].fieldname ]))\n grouped_summaries[-1][-1].sort(lambda x,y:cmp(x.count,y.count))\n # add misc adhoc labels\n grouped_summaries.append(('adhoc labels',\n [ summaries_by_value.pop(k) for k in summaries_by_value.keys()]))\n grouped_summaries[-1][-1].sort(lambda x,y:cmp(x.count,y.count))\n \n return render_to_response('summary.html',{'grouped_summaries':grouped_summaries, 'consfield_summaries':consfield_summaries, 'tag':tag})", "def summary(self,attr='raw'):\n g = {}\n g['gid'] = map(lambda x : x.gid, self.taxonomies)\n g['sp'] = map(lambda x : x.presences.species , self.taxonomies)\n \n g['gns'] = map(lambda x : x.presences.genera , self.taxonomies) \n g['fam'] = map(lambda x : x.presences.families , self.taxonomies)\n g['ord'] = map(lambda x : x.presences.orders , self.taxonomies)\n g['cls'] = map(lambda x : x.presences.classes , self.taxonomies)\n g['phy'] = map(lambda x : x.presences.phyla , self.taxonomies)\n g['kng'] = map(lambda x : x.presences.kingdoms , self.taxonomies)\n #g['all'] = map(lambda x : (x.gid,int(x.presences.species),int(x.genera),int(x.families),int(x.orders),int(x.classes),int(x.phyla),int(x.kingdoms)),self.taxonomies)\n keys = settings.TAXONOMIC_TREE_KEYS\n if attr == 'int':\n for key in keys:\n g[key] = map(lambda p : int(p) ,g[key])\n elif attr == 'str':\n for key in keys:\n g[key] = map(lambda p : str(p) ,g[key]) \n elif attr == 'list':\n for key in keys:\n g[key] = map(lambda p : p.list ,g[key]) \n elif attr == 'mapping':\n for key in keys:\n g[key] = map(lambda p : p.map ,g[key]) \n elif attr == 'raw':\n return g\n else:\n logger.error(\"Wrong attribute selection\")\n return None\n \n return g", "def make_summary(otu_table,\r\n level,\r\n upper_percentage,\r\n lower_percentage,\r\n md_as_string=False,\r\n md_identifier=\"taxonomy\"):\r\n header = ['Taxon']\r\n header.extend(otu_table.SampleIds)\r\n\r\n counts_by_consensus, sample_map = sum_counts_by_consensus(otu_table,\r\n level,\r\n \"Other\",\r\n md_as_string,\r\n md_identifier)\r\n\r\n total_counts = float(sum([sum(i) for i in counts_by_consensus.values()]))\r\n taxonomy_summary = []\r\n for consensus, otu_counts in sorted(counts_by_consensus.items()):\r\n if lower_percentage is not None and \\\r\n otu_counts.sum() > lower_percentage * total_counts:\r\n continue\r\n elif upper_percentage is not None and \\\r\n otu_counts.sum() < upper_percentage * total_counts:\r\n continue\r\n new_row = [(consensus)]\r\n new_row.extend(otu_counts)\r\n taxonomy_summary.append(new_row)\r\n\r\n return taxonomy_summary, header", "def structure_by_package(mel):\n \"\"\"receives in a pandas dataframe\"\"\"\n string='K10024-'\n WP='00'\n l={}\n mel['Level 1','Level 2','Level 3','Level 4']=''\n mel['WP']=mel['Level'].str.replace('.','',regex=True) \n for i,row in mel.iterrows():\n print (WP)\n if (type(row['WP Activity/ Part No.']) is str) and (string in row['WP Activity/ Part No.']) :\n #new section starts:\n WP=row['WP Activity/ Part No.']\n l[row['Level']]=row['Equipment Description']\n \n mel.loc[i,'WP']=WP\n for key in l.keys():\n mel.loc[i,'Level ' +key]=l[key]\n \n mel.dropna(subset=['Delivery','WP'], inplace=True)\n \n mel['WP']=mel['WP'].str.replace('K10024-','',regex=False) \n mel['WP']=mel['WP'].str[:2]\n mel.drop(columns=['Level'],inplace=True) \n mel.to_excel('packages_MEL02.xlsx')\n return mel", "def label_map_gen(df_main):\n # Function to flatten a list of list\n flatten = lambda l: [item for sublist in l for item in sublist]\n labels = list(set(flatten([l.split(' ') for l in df_main['tags'].values])))\n\n # Create list of labels\n label_map = {l: i for i, l in enumerate(labels)}\n return label_map", "def table_summary():\n \n t = dict()\n t['name'] = get_names()\n t['Name'] = [get_properties(name)['label'] for name in t['name']]\n N = len(t['name'])\n \n # host\n t['host'] = ['Sagittarius', 'Sagittarius', 'none', 'Gaia-Sausage-Enceladus', 'Sagittarius', 'Sequoia / Arjuna / I\\'itoi', 'Sequoia / Arjuna', np.nan, np.nan, 'Sequoia / Arjuna', 'Gaia-Sausage-Enceladus', 'Sequoia / Arjuna', 'Helmi / Wukong', 'Helmi / Wukong', 'Sagittarius', 'in situ / Helmi / Wukong', 'Helmi / Wukong', 'Cetus', 'Cetus', 'Sagittarius', 'Sequoia / Arjuna / I\\'itoi', 'Cetus', 'Sequoia / Arjuna / I\\'itoi']\n \n # progenitor\n t['progenitor'] = [np.nan, np.nan, 'itself', 'NGC 5139', 'NGC 4590', np.nan, 'NGC 3201', '(Wukong / Helmi)', '(Wukong / Helmi)', np.nan, np.nan, np.nan, np.nan, 'NGC 5024', np.nan, 'NGC 5272', 'NGC 5024', 'NGC 5824', 'NGC 5824', np.nan, np.nan, np.nan, np.nan]\n \n # progenitor type\n t['type'] = ['DG' if name in ['elqui', 'indus', 'jhelum'] else 'GC' for name in t['name']]\n \n # metallicity\n t['feh'] = [-2.4, -2.4, -2.2, -1.5, -2.16, -2.3, -1.5, -2.1, -2.1, -1.6, -1.95, -1.6, -2.7, np.nan, -1.7, -1.1, -2.7, -1.9, np.nan, np.nan, -2.2, np.nan, -1.9]\n \n # associations\n t['friends'] = ['ATLAS', 'Aliqa Uma', np.nan, np.nan, np.nan, np.nan, np.nan, 'Jhelum', 'Indus', np.nan, np.nan, np.nan, np.nan, 'Sylgr', np.nan, np.nan, 'Ravi', 'Turbio', 'Triangulum', np.nan, np.nan, np.nan, np.nan]\n \n tout = Table(t)\n tout.pprint()\n tout.write('../data/stream_origin.fits', overwrite=True)", "def describe(self, index=None, columns=None, query=None, split_view_names=False):\r\n stack_tree = []\r\n for dk in self.keys():\r\n path_dk = [dk]\r\n filters = self[dk]\r\n\r\n for fk in filters.keys():\r\n path_fk = path_dk + [fk]\r\n xs = self[dk][fk]\r\n\r\n for sk in xs.keys():\r\n path_sk = path_fk + [sk]\r\n ys = self[dk][fk][sk]\r\n\r\n for tk in ys.keys():\r\n path_tk = path_sk + [tk]\r\n views = self[dk][fk][sk][tk]\r\n\r\n if views.keys():\r\n for vk in views.keys():\r\n path_vk = path_tk + [vk, 1]\r\n stack_tree.append(tuple(path_vk))\r\n else:\r\n path_vk = path_tk + ['|||||', 1]\r\n stack_tree.append(tuple(path_vk))\r\n \r\n column_names = ['data', 'filter', 'x', 'y', 'view', '#']\r\n description = pd.DataFrame.from_records(stack_tree, columns=column_names)\r\n if split_view_names:\r\n views_as_series = pd.DataFrame(\r\n description.pivot_table(values='#', columns='view', aggfunc='count')\r\n ).reset_index()['view']\r\n parts = ['xpos', 'agg', 'relation', 'rel_to', 'weights', \r\n 'shortname']\r\n description = pd.concat(\r\n (views_as_series,\r\n pd.DataFrame(views_as_series.str.split('|').tolist(),\r\n columns=parts)), axis=1)\r\n \r\n description.replace('|||||', np.NaN, inplace=True)\r\n if query is not None:\r\n description = description.query(query)\r\n if not index is None or not columns is None:\r\n description = description.pivot_table(values='#', index=index, columns=columns,\r\n aggfunc='count')\r\n return description", "def aggregate_tagged_graph(graph):\n def recursor(obj, scores):\n if obj['secondary_tag']!=None:\n if type(scores[obj['tag']]) is int:\n scores[obj['tag']] = defaultdict(int)\n scores[obj['tag']][obj['secondary_tag']] += obj['impact']\n for flow in obj['biosphere']:\n if type(scores[flow['tag']]) is int:\n scores[flow['tag']] = defaultdict(int)\n scores[flow['tag']][flow['secondary_tag']] += flow['impact']\n else:\n scores[obj['tag']] += obj['impact']\n for flow in obj['biosphere']:\n scores[flow['tag']] += flow['impact'] \n for exc in obj['technosphere']:\n scores = recursor(exc, scores)\n return scores\n\n scores = defaultdict(int)\n for obj in graph:\n scores = recursor(obj, scores)\n return scores", "def metadata_summary(idx):\n tax_per_cluster = []\n genomes_per_tax = []\n genes_per_genome = []\n for cluster_id,v in idx.items():\n tax_per_cluster.append(len(v.keys()))\n for tax,vv in v.items():\n genomes_per_tax.append(len(vv.keys()))\n for genomeID,gene_ids in vv.items():\n genes_per_genome.append(len(set(gene_ids)))\n sum_stats(tax_per_cluster, 'Clades per cluster')\n sum_stats(genomes_per_tax, 'Gemomes per clade')\n sum_stats(genes_per_genome, 'Genes per genome')", "def get_column_to_tags_mapping(\n self, config: cconfig.Config\n ) -> Optional[Dict[Any, List[str]]]:\n _ = self, config\n return None", "def calculate_level_contribution(df, columns, index_col, dateColDateFormat, value_col, max_time, meta_parser, pandas_flag):\n # print \"index_col\",index_col\n # print \"dateColDateFormat\",dateColDateFormat\n # print \"value_col\",value_col\n # print \"max_time\",max_time\n out = {}\n for column_name in columns:\n print(\"-\"*100)\n print(\"calculate_level_contribution for \",column_name)\n data_dict = {\n \"overall_avg\":None,\n \"excluding_avg\":None,\n \"minval\":None,\n \"maxval\":None,\n \"diff\":None,\n \"contribution\":None,\n \"growth\":None\n }\n try:\n column_levels = meta_parser.get_unique_level_names(column_name)\n except:\n if not pandas_flag:\n column_levels = [x[0] for x in df.select(column_name).distinct().collect()]\n # column_levels = df.agg((F.collect_set(column_name).alias(column_name))).first().asDict()[column_name]\n else:\n column_levels = list(df[column_name].unique())\n out[column_name] = dict(list(zip(column_levels,[data_dict]*len(column_levels))))\n # st = time.time()\n if not pandas_flag:\n pivotdf = df.groupBy(index_col).pivot(column_name).sum(value_col)\n else:\n pivotdf = df.pivot_table(\n values=value_col, index=index_col, columns=column_name, aggfunc='sum')\n pivotdf.reset_index(inplace=True)\n # print \"time for pivot\",time.time()-st\n # pivotdf = pivotdf.na.fill(0)\n # pivotdf = pivotdf.withColumn('total', sum([pivotdf[col] for col in pivotdf.columns if col != index_col]))\n # st=time.time()\n # print \"converting to pandas\"\n if not pandas_flag:\n k = pivotdf.toPandas()\n else:\n k = pivotdf.copy()\n # print \"time taken for pandas conversion of pivotdf\",time.time()-st\n k[\"total\"] = k.sum(axis=1)\n k[index_col] = k[index_col].apply(str)\n try:\n k[\"rank\"] = k[index_col].apply(lambda x: datetime.strptime(x,dateColDateFormat) if x != 'None' else None)\n except Exception as e:\n print(\"Exception in /bi/narratives/utils.py calculate_level_contribution: \", e)\n k[index_col] = pd.to_datetime(k[index_col])\n k[\"rank\"] = k[index_col].apply(lambda x: datetime.strftime(x, dateColDateFormat) if x != 'None' else None)\n\n k = k.sort_values(by=\"rank\", ascending=True)\n occurance_index = np.where(k[index_col] == max_time)\n # print \"occurance_index\",occurance_index\n # print \"max_time\",max_time\n if len(occurance_index[0]) > 0:\n max_index = occurance_index[0][0]\n else:\n max_index = None\n for level in column_levels:\n try:\n # print \"calculations for level\",level\n if level != None:\n data_dict = {\"overall_avg\":None,\"excluding_avg\":None,\"minval\":None,\"maxval\":None,\"diff\":None,\"contribution\":None,\"growth\":None}\n data_dict[\"contribution\"] = float(np.nansum(k[level]))*100/np.nansum(k[\"total\"])\n data = list(k[level])\n growth_data = [x for x in data if np.isnan(x) != True and x != 0]\n data_dict[\"growth\"] = old_div((growth_data[-1]-growth_data[0])*100,growth_data[0])\n k[\"percentLevel\"] = (old_div(k[level],k[\"total\"]))*100\n data = list(k[\"percentLevel\"])\n data_dict[\"overall_avg\"] = np.nanmean(data)\n data_dict[\"maxval\"] = np.nanmax(data)\n data_dict[\"minval\"] = np.nanmin(data)\n if max_index:\n del(data[max_index])\n data_dict[\"excluding_avg\"] = np.nanmean(data)\n data_dict[\"diff\"] = (data_dict[\"maxval\"] - data_dict[\"excluding_avg\"])*100/float(data_dict[\"excluding_avg\"])\n out[column_name][level] = data_dict\n except:\n pass\n return out", "def pivot_table_around_tags(df):\n\n # Get values where tag is not 'none'\n df = df.query('tag != \"none\"')\n\n # Call pivot_on_tag with each tag\n tags = list(df.tag.unique())\n frames = [_pivot_on_tag(df, t) for t in tags]\n final_df = pd.concat(frames, axis=1)\n final_df.index.name = frames[0].index.name\n return final_df.reset_index()", "def get_sample_info(lines):\r\n mapping_data, header, comments = parse_mapping_file(lines)\r\n labels = [\"from\", \"to\", \"eweight\", \"consensus_lin\"]\r\n node_labels = [\"node_name\", \"node_disp_name\", \"ntype\", \"degree\",\r\n \"weighted_degree\", \"consensus_lin\"]\r\n cat_by_sample = {}\r\n sample_by_cat = defaultdict(list)\r\n meta_dict = {}\r\n category_labels = header[1:-1]\r\n labels.extend(category_labels)\r\n node_labels.extend(category_labels)\r\n label_list = [[] for c in category_labels]\r\n for r in mapping_data:\r\n categories = r[0:len(category_labels) + 1]\r\n sample = categories[0]\r\n meta_dict[sample] = ['\\t'.join(categories[1:]), 0]\r\n\r\n cat_by_sample[sample] = [(l.strip(), c.strip())\r\n for l, c in zip(category_labels, categories[1:])]\r\n\r\n cat_list = []\r\n for i, (l, c) in enumerate(zip(category_labels, categories[1:])):\r\n if c not in label_list[i]:\r\n label_list[i].append(c)\r\n l = l.strip()\r\n c = c.strip()\r\n cat_list.append((l, c))\r\n sample_by_cat[(l, c)].append(sample)\r\n\r\n cat_by_sample[sample] = cat_list\r\n\r\n return cat_by_sample, sample_by_cat, len(category_labels), meta_dict,\\\r\n labels, node_labels, label_list", "def _compute_columns(log: EventLog, prefix_length: int, padding: bool) -> list:\n return [\"trace_id\"] + \\\n sorted(list({\n event['concept:name']\n for trace in log\n for event in trace[:prefix_length]\n })) + \\\n ['0'] if padding else [] + \\\n ['label']", "def _get_summary_struct(self):\n _features = _precomputed_field(\n _internal_utils.pretty_print_list(self.get('features')))\n _exclude = _precomputed_field(\n _internal_utils.pretty_print_list(self.get('excluded_features')))\n fields = [\n (\"Features\", _features),\n (\"Excluded features\", _exclude),\n (\"Output column name\", 'output_column_name'),\n (\"Max categories per column\", 'max_categories'),\n ]\n section_titles = ['Model fields']\n\n return ([fields], section_titles)", "def tags():", "def build_sample_map(flowcell):\n result = {}\n rows = [(lane, lib[\"name\"]) for lib in flowcell[\"libraries\"] for lane in lib[\"lanes\"]]\n i = 1\n for _, name in sorted(set(rows)):\n if name not in result:\n result[name] = \"S{}\".format(i)\n i += 1\n return result", "def aggregate_tags(terms=terms, enableWarnning=False):\n ordered_tags = load_map()\n\n tmp_tags = collections.OrderedDict() # All keys to lowe cases\n for k in ordered_tags.keys():\n tag = k.lower()\n tmp_tags[tag] = k\n\n for term, tags in terms.iteritems():\n for tag in tags:\n try:\n ind = tag.rindex('/')\n tmp = tag[ind+1:]\n except ValueError as e:\n tmp = tag\n\n tmp = tmp.lower()\n if tmp in tmp_tags:\n tmp_tags[tmp] = tag\n else:\n if enableWarnning:\n print('Unknown tag ignored:[%s] for term [%s].' % (tag, term))\n\n return tmp_tags", "def group(df):\r\n t = len(df.columns.values)\r\n ans = {}\r\n names = df.columns.values\r\n for index,row in df.iterrows(): \r\n for j in range(t-1): \r\n index = str(j) \r\n llaveCol = str(row[j])\r\n llaveClase = str(row[t-1]) \r\n if index not in ans:\r\n ans[index] = {}\r\n if llaveCol not in ans[index]:\r\n ans[index][llaveCol] = {} \r\n if llaveClase not in ans[index][llaveCol]:\r\n ans[index][llaveCol][llaveClase] = 0 \r\n ans[index][llaveCol][llaveClase]+=1 \r\n return ans", "def reportTagStats(stats, tag, lower):\n transcript_level_tags = ['stats', 'hasOkCopies', 'hasBadCopies',\n 'ok', 'not_ok']\n level = 0\n increment = 1\n if tag in transcript_level_tags:\n header = '%40s %7s %7s %15s' % ('tree', 'Tran.s',\n 'Annot.s', 'Trans. Tags')\n else:\n header = '%40s %7s %7s %15s' % ('tree', 'Tran.s',\n 'Annot.s', 'Annot. Tags')\n print header\n def printTree(level, t, lower):\n if t.tagTranscriptAnnotations > lower:\n if tag in transcript_level_tags:\n count = '%6d (%6.2f%%)' % (\n t.tagTranscripts,\n 100. * t.tagTranscripts / t.nodeTranscripts)\n elif tag.endswith('*'):\n count = '%16d' % (t.tagTranscriptAnnotations)\n else:\n count = '%6d (%6.2f%%)' % (\n t.tagTranscriptAnnotations,\n 100. * t.tagTranscriptAnnotations / t.nodeTranscriptAnnotations)\n s = '%7d, %7d, %s' % (\n t.nodeTranscripts, t.nodeTranscriptAnnotations, count)\n title = '%s%s' % ('| ' * level, t.nodeName)\n buff = '.' * (40 - len(title))\n print '%s%s%s' % (title, buff, s)\n t.children = sorted(t.children, key=lambda c: c.nodeTranscripts,\n reverse=True)\n for c in t.children:\n printTree(level + increment, c, lower)\n printTree(level, stats, lower)", "def tag(self):\n \n tag = super(self.__class__, self).tag();\n tag = als.tag_join(tag, als.stra(self.strain));\n tag = als.tag_join(tag, als.stra(self.dtype));\n tag = als.tag_join(tag, 'w=%s' % als.stra(self.wid)); \n tag = als.tag_join(tag, 's=%s' % als.stra(self.stage));\n #tag = analysis.tag_join(tag, 'l=%s' % analysis.stra(self.label)); \n\n return tag;", "def test_formatting() -> None:\n assert tupleElement(\n \"tags_key\",\n arrayJoin(\n \"snuba_all_tags\",\n zip_columns(\n Column(None, None, \"tags.key\"), Column(None, None, \"tags.value\"),\n ),\n ),\n Literal(None, 1),\n ).accept(ClickhouseExpressionFormatter()) == (\n \"(tupleElement((arrayJoin(arrayMap((x, y -> tuple(x, y)), \"\n \"tags.key, tags.value)) AS snuba_all_tags), 1) AS tags_key)\"\n )\n\n assert tupleElement(\n \"tags_key\",\n arrayJoin(\n \"snuba_all_tags\",\n filter_key_values(\n zip_columns(\n Column(None, None, \"tags.key\"), Column(None, None, \"tags.value\"),\n ),\n [Literal(None, \"t1\"), Literal(None, \"t2\")],\n ),\n ),\n Literal(None, 1),\n ).accept(ClickhouseExpressionFormatter()) == (\n \"(tupleElement((arrayJoin(arrayFilter((pair -> in(\"\n \"tupleElement(pair, 1), tuple('t1', 't2'))), \"\n \"arrayMap((x, y -> tuple(x, y)), tags.key, tags.value))) AS snuba_all_tags), 1) AS tags_key)\"\n )", "def tree_statistics(tree):\n all_elements = tree.findall('//')\n tag_counter = defaultdict(int)\n for element in all_elements:\n tag_counter[element.tag] += 1\n for (tag, counts) in tag_counter.items():\n print \"{0}: {1}\".format(tag, counts)", "def hacky_tagging(df):\n\n df['tag'] = 'none' # add tag column (set to 'none' by default)\n\n df_rtemp = df.query('name == \"ROOMTEMP\"') # tag 'room_temp'\n df.loc[df_rtemp.index, 'tag'] = 'room_temp'\n\n df_rvalve = df.query('name == \"R VALVE\"') # tag 'valve'\n df.loc[df_rvalve.index, 'tag'] = 'valve'\n\n return df", "def get_stories(df):\n categories = df.get_categorical().columns\n continuous = df.get_numerical().columns\n\n stories = []\n cat_copy = list(categories)\n for col in categories:\n # Remove the current col\n if col in cat_copy:\n cat_copy.remove(col)\n try:\n # Get comparison variable\n x = cat_copy.pop()\n d = pd.pivot_table(df.data, index=(col), values=[x],\\\n aggfunc='count').reset_index().sort_values(by=x, ascending=False)\n stories.append({\n 'question': \"%s with high count of %s\" %(col, x),\n 'question_html': \"<span class='tag is-primary is-light'>%s</span>\\\n with high count of <span class='tag is-success is-light'>%s</span>\" % (col, x),\n 'answer': d[col].head(1).values[0],\n 'misc': d\n })\n except IndexError as e:\n pass\n \n for num in continuous:\n d = pd.pivot_table(df.data, index=[col], values=[num],\\\n aggfunc=np.sum).reset_index().sort_values(by=num, ascending=False)\n stories.append({\n 'question': \"%s with sum of %s\" % (col, num),\n 'question_html': \"<span class='tag is-primary is-light'>%s</span>\\\n with sum of <span class='tag is-success is-light'>%s</span>\" % (col, num),\n 'answer': round(d[num].head(1).values[0]),\n 'misc': d\n })\n\n return stories" ]
[ "0.6095907", "0.5899178", "0.5657692", "0.52457756", "0.52168375", "0.5198422", "0.517395", "0.5151941", "0.51494884", "0.5133481", "0.5127067", "0.50493133", "0.500938", "0.4997392", "0.49971712", "0.49863157", "0.49833137", "0.4966695", "0.49348795", "0.49285176", "0.4926881", "0.49258542", "0.49092895", "0.48735845", "0.48633984", "0.48547497", "0.48546445", "0.48461473", "0.48226574", "0.47976303" ]
0.6325224
0
Prunes tensor corresponding to parameter called `name` in `module` by removing every other entry in the tensors. Modifies module in place (and also return the modified module)
def foobar_unstructured(module, name): FooBarPruningMethod.apply(module, name) return module
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_weight_norm_and_equal_lr(module: Module,\n name: str = 'weight') -> Module:\n return remove_weight_lambda(module, 'norm_equal_lr', name)", "def remove_norms(module_: \"WN\") -> \"WN\":\n module_.start = torch.nn.utils.remove_weight_norm(module_.start_conv)\n module_.cond_layer = torch.nn.utils.remove_weight_norm(module_.cond_layer)\n for i, layer_ in enumerate(module_.in_layers):\n layer_ = DepthwiseSeparableConv1d.remove_batch_norm(layer_)\n module_.in_layers[i] = layer_\n for i, layer_ in enumerate(module_.res_skip_layers):\n layer_ = torch.nn.utils.remove_weight_norm(layer_)\n module_.res_skip_layers[i] = layer_\n return module_", "def retrieve_prune_modules(self, model):\n if (len(self.leaf_modules) == 0):\n self.retrieve_leaf_modules(model)\n self.prune_modules = []\n for name, m in self.leaf_modules:\n # Skip non-prunable layers\n if (hasattr(m, 'unprunable') and m.unprunable):\n continue\n if (m.__class__ in [nn.Linear, nn.Conv1d, nn.Conv2d, nn.Conv3d, nn.ConvTranspose2d, nn.ConvTranspose3d, Conv1d]):\n self.prune_modules.append(m)\n if (m.__class__ in [nn.RNN, nn.GRU, nn.LSTM]):\n self.prune_modules.append(m)\n return self.prune_modules", "def remove_weight_scale(module: Module, name: str = 'weight') -> Module:\n return remove_weight_lambda(module, 'scale', name)", "def prune():\n with tf.Graph().as_default() as g:\n # Input evaluation data\n images, labels = rn.inputs(eval_data=True)\n\n # inference model.\n logits = rn.inference(images, 15)\n\n # Calculate predictions.\n top_k_op = tf.nn.in_top_k(logits, labels, 1)\n\n # Create a saver\n saver = tf.train.Saver()\n\n # Create session to restore, and restore data\n sess = tf.InteractiveSession()\n\n # Queue runner\n tf.train.start_queue_runners()\n\n ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)\n if ckpt and ckpt.model_checkpoint_path:\n # Restores from checkpoint\n saver.restore(sess, ckpt.model_checkpoint_path)\n # extract global_step from it.\n global_step_num = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]\n else:\n print('No checkpoint file found')\n return\n\n precision = eval_once(sess, top_k_op)\n \n \"\"\"\n # Get all variables\n lst_variables = tf.global_variables()\n lst_values = sess.run(tf.global_variables())\n\n # Get the pruning information\n r = np.arange(0,0.2,0.01)\n p = []\n for reduce_factor in r:\n kernel_index, channel_to_delete_pack, pruning_number_pack = \\\n pru_cal(lst_variables, lst_values, reduce_factor=reduce_factor)\n print('reduce factor is %.3f' % reduce_factor)\n\n # Delete these variables\n counter = 0\n for i in kernel_index:\n for j in range(pruning_number_pack[counter]):\n sess.run(tf.assign(lst_variables[i][:, :, :, channel_to_delete_pack[counter][j]],\n tf.zeros(\n tf.shape(lst_variables[i][:, :, :, channel_to_delete_pack[counter][j]])),\n name=lst_variables[i][:, :, :, channel_to_delete_pack[counter][j]].name))\n counter = counter + 1\n\n # Real evaluation, after pruning\n p.append(eval_once(sess, top_k_op))\n\n return r, p\n \"\"\"", "def zero_module(module):\n for p in module.parameters():\n p.detach().zero_()\n return module", "def delModule(name):", "def remove_spectral_norm(module, name='weight'):\n for k, hook in module._forward_pre_hooks.items():\n if isinstance(hook, SpectralNorm) and hook.name == name:\n hook.remove(module)\n del module._forward_pre_hooks[k]\n return module\n\n raise ValueError(\"spectral_norm of '{}' not found in {}\".format(\n name, module))", "def on_prune(self, function_graph, node, reason):", "def remove(self, name):\n for var in self.inputs:\n if var.name == name:\n self.inputs.remove(var)\n return\n for var in self.outputs:\n if var.name == name:\n self.outputs.remove(var)\n return", "def remove_node(name):\n for file_path in config_files + [node_config_file]:\n with open(file_path, 'r') as f:\n lines = f.readlines()\n\n new_lines = []\n for line in lines:\n if name not in line:\n new_lines.append(line)\n\n with open(file_path, 'w') as f:\n f.writelines(new_lines)\n\n topology_file = Settings.CONF_TOPOLOGY_FILE\n if topology_file != \"\":\n topology_file_path = os.path.join(simulaqron_path, topology_file)\n else:\n topology_file_path = default_topology_file\n\n with open(topology_file_path, 'r') as f:\n topology = json.load(f)\n\n if name in topology:\n topology.pop(name)\n\n for node, neighbors in topology.items():\n if name in neighbors:\n neighbors.remove(name)\n\n with open(topology_file_path, 'w') as f:\n json.dump(topology, f)", "def clones(module, num_copies):\n return nn.ModuleList([copy.deepcopy(module) for _ in range(num_copies)])", "def unfreeze_up_to(self, module_name: Text) -> List[Text]:\n return self.__up_to(module_name, requires_grad=True)", "def clones(module, n):\n return nn.ModuleList([copy.deepcopy(module) for _ in range(n)])", "def clones(module, n):\n return nn.ModuleList([copy.deepcopy(module) for _ in range(n)])", "def clones(module, n):\n return nn.ModuleList([copy.deepcopy(module) for _ in range(n)])", "def _rescale_module(module):\n for sub in module.modules():\n if isinstance(sub, (nn.Conv1d, nn.ConvTranspose1d, nn.Conv2d, nn.ConvTranspose2d)):\n std = sub.weight.std().detach()\n scale = (std / 0.1) ** 0.5\n sub.weight.data /= scale\n if sub.bias is not None:\n sub.bias.data /= scale", "def unpool(value, name='unpool'):\n with tf.name_scope(name) as scope:\n sh = value.get_shape().as_list()\n dim = len(sh[1:-1])\n out = (tf.reshape(value, [-1] + sh[-dim:]))\n for i in range(dim, 0, -1):\n out = tf.concat(i, [out, tf.zeros_like(out)])\n out_size = [-1] + [s * 2 for s in sh[1:-1]] + [sh[-1]]\n out = tf.reshape(out, out_size, name=scope)\n return out", "def reparameterizables(self,\n variable_predicate=is_variable,\n module_predicate=None,\n with_path=False):\n if module_predicate is None:\n # Just return all the variables satisfying `variable_predicate`.\n return self._flatten(\n recursive=True, predicate=variable_predicate, with_path=with_path)\n\n else:\n # Need to additionally verify `module_predicate` of submodules that\n # directly or indirectly own the variable.\n variables_with_paths = self._flatten(\n recursive=True, predicate=variable_predicate, with_path=True)\n\n def path_prefixes(path):\n return (path[:i] for i in range(1, len(path)))\n\n # TODO(eringrant): This function wraps the return value from a call to\n # `tf.Module._flatten`, which only checks predicates of leaf objects and\n # therefore precludes checking containment relationships.\n # However, the performance could be improved by avoiding redoing the BFS\n # within `tf.Module._flatten`.\n def satisfies_module_predicate(variable_with_path):\n \"\"\"Return True if a module in the path satisfies `module_predicate`.\"\"\"\n # Check if this module satisfies `module_predicate`.\n if module_predicate(self):\n return True\n\n # Check if a submodule satisfies `module_predicate`.\n path, _ = variable_with_path\n for path_prefix in path_prefixes(path):\n prefix_module = chained_getattr(self, path_prefix)\n if isinstance(prefix_module,\n tf.Module) and module_predicate(prefix_module):\n return True\n\n # No modules or submodules satisfied `module_predicate`.\n return False\n\n filtered_variables = filter(satisfies_module_predicate,\n variables_with_paths)\n if with_path:\n return filtered_variables\n else:\n _, variables = zip(*filtered_variables)\n # De-duplicate.\n return [v_ref.deref() for v_ref in set(v.ref() for v in variables)]", "def removeModulesNotOnAPathExcluding( process, keepList=() ):\n allMods=set((x for x in process.producers_().iterkeys()))\n allMods.update((x for x in process.filters_().iterkeys()))\n allMods.update((x for x in process.analyzers_().iterkeys()))\n allMods.update((x for x in process.outputModules_().iterkeys()))\n \n modulesOnPaths = set()\n for p in process.paths_():\n modulesOnPaths.update( (x for x in getattr(process,p).moduleNames())) \n for p in process.endpaths_():\n modulesOnPaths.update( (x for x in getattr(process,p).moduleNames()))\n\n notOnPaths = allMods.difference(modulesOnPaths)\n \n keepModuleNames = set( (x.label_() for x in keepList) )\n \n getRidOf = notOnPaths.difference(keepModuleNames)\n \n for n in getRidOf:\n delattr(process,n)", "def remove_batchnorm(m: nn.Sequential) -> None:\n ms = list(m._modules.items())\n\n # transfer biases from BN to previous conv / Linear / Whatever\n for (name1, mod1), (name2, mod2) in zip(ms[:-1], ms[1:]):\n if isinstance(mod2, (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)):\n if mod1.bias is not None:\n continue\n\n if mod2.bias is not None:\n with torch.no_grad():\n mod1.bias = mod2.bias\n else:\n out_ch = len(mod2.running_mean)\n with torch.no_grad():\n mod1.bias = nn.Parameter(torch.zeros(out_ch))\n # remove bn\n for name, mod in ms:\n if isinstance(mod, (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)):\n delattr(m, name)", "def unpool(value, name='unpool'):\n with tf.name_scope(name) as scope:\n sh = value.get_shape().as_list()\n dim = len(sh[1:-1])\n out = (tf.reshape(value, [-1] + sh[-dim:]))\n for i in range(dim, 0, -1):\n out = tf.concat(i, [out, out])\n out_size = [-1] + [s * 2 for s in sh[1:-1]] + [sh[-1]]\n out = tf.reshape(out, out_size, name=scope)\n return out", "def unpool(value, name='unpool'):\n with tf.name_scope(name) as scope:\n sh = value.get_shape().as_list()\n dim = len(sh[1:-1])\n out = (tf.reshape(value, [-1] + sh[-dim:]))\n for i in range(dim, 0, -1):\n out = tf.concat(axis=i, values=[out, tf.zeros_like(out)])\n out_size = [-1] + [s * 2 for s in sh[1:-1]] + [sh[-1]]\n out = tf.reshape(out, out_size, name=scope)\n return out", "def discard_before(self, r):\n W = self.matrix[:, r:]\n s = self.n - r\n M = None\n mt = self._mult_tab\n if mt is not None:\n M = {}\n for u in range(s):\n M[u] = {}\n for v in range(u, s):\n M[u][v] = mt[r + u][r + v][r:]\n return Submodule(self.parent, W, denom=self.denom, mult_tab=M)", "def unpool(value, name='unpool'):\n with tf.name_scope(name) as scope:\n sh = value.get_shape().as_list()\n dim = len(sh[1:-1])\n out = (tf.reshape(value, [-1] + sh[-dim:]))\n for i in range(dim, 0, -1):\n out = tf.concat([out, tf.zeros_like(out)], i)\n out_size = [-1] + [s * 2 for s in sh[1:-1]] + [sh[-1]]\n out = tf.reshape(out, out_size, name=scope)\n return out", "def clones(module, N):\n return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])", "def clones(module, N):\n return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])", "def clones(module, N):\n return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])", "def clones(module, N):\n return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])", "def on_prune(self, fgraph, app, reason):\r\n #if app not in self.debug_all_apps: raise ProtocolError(\"prune without import\")\r\n #self.debug_all_apps.remove(app)\r\n\r\n #UPDATE self.clients\r\n for i, input in enumerate(OrderedSet(app.inputs)):\r\n del self.clients[input][app]\r\n\r\n if getattr(app.op, 'destroy_map', {}):\r\n self.destroyers.remove(app)\r\n\r\n # Note: leaving empty client dictionaries in the struct.\r\n # Why? It's a pain to remove them. I think they aren't doing any harm, they will be\r\n # deleted on_detach().\r\n\r\n #UPDATE self.view_i, self.view_o\r\n for o_idx, i_idx_list in getattr(app.op, 'view_map', {}).items():\r\n if len(i_idx_list) > 1:\r\n #destroying this output invalidates multiple inputs\r\n raise NotImplementedError()\r\n o = app.outputs[o_idx]\r\n i = app.inputs[i_idx_list[0]]\r\n\r\n del self.view_i[o]\r\n\r\n self.view_o[i].remove(o)\r\n if not self.view_o[i]:\r\n del self.view_o[i]\r\n\r\n self.stale_droot = True" ]
[ "0.60598844", "0.5909009", "0.5744272", "0.5732194", "0.5677878", "0.5647646", "0.5559865", "0.5466906", "0.5406585", "0.5399408", "0.53274816", "0.5233343", "0.5209073", "0.5150959", "0.5150959", "0.5150959", "0.51101005", "0.50904685", "0.5088666", "0.50826114", "0.50823766", "0.50332624", "0.5026033", "0.5008918", "0.5007886", "0.5005741", "0.5005741", "0.5005741", "0.5005741", "0.4992559" ]
0.6131159
0
Returns Dict with class name and bounding boxes. Key number is box number
def return_boxes_class_as_dict(self) -> Dict[int, Dict]: boxes_dict = {} for index, sg_box in enumerate(self.root.iter('object')): boxes_dict[index] = {"name": sg_box.find("name").text, "xmin": int(sg_box.find("bndbox").find("xmin").text), "ymin": int(sg_box.find("bndbox").find("ymin").text), "xmax": int(sg_box.find("bndbox").find("xmax").text), "ymax": int(sg_box.find("bndbox").find("ymax").text)} return boxes_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_dict(self, round_boxes: bool = True) -> dict:\n npboxes = self.get_npboxes()\n if round_boxes and np.issubdtype(npboxes.dtype, np.floating):\n npboxes = npboxes.round(2)\n classes = self.get_class_ids()\n scores = self.get_scores().round(6)\n d = {\n Box.from_npbox(box): (class_id, score)\n for box, class_id, score in zip(npboxes, classes, scores)\n }\n return d", "def boxes(self) -> dict:\n return self.data[\"boxes\"]", "def parse_bboxes(ann, classes):\n\n names, xmins, ymins, xmaxs, ymaxs = [], [], [], [], []\n ann_root = ann.getroot()\n\n for name in ann_root.iter('name'):\n names.append(np.float32(classes.index(name.text)))\n\n for xmin in ann_root.iter('xmin'):\n xmins.append(np.float32(xmin.text))\n\n for ymin in ann_root.iter('ymin'):\n ymins.append(np.float32(ymin.text))\n\n for xmax in ann_root.iter('xmax'):\n xmaxs.append(np.float32(xmax.text))\n\n for ymax in ann_root.iter('ymax'):\n ymaxs.append(np.float32(ymax.text))\n\n return np.column_stack((xmins, ymins, xmaxs, ymaxs, names))", "def boxes_stats(self):\n all_boxes = []\n nb_detections = []\n convexities = []\n all_ids = set()\n for image_id in self.dataset_handler.image_ids:\n masks, ids = self.dataset_handler.load_mask(image_id)\n all_ids = all_ids.union(set(ids))\n boxes = utils.extract_bboxes(masks)\n all_boxes.append(boxes)\n nb_detections.append(boxes.shape[0])\n for mask_idx in range(masks.shape[2]):\n mask = masks[:, :, mask_idx]\n props = regionprops(mask.astype(np.int8))[0]\n convexities.append(props.filled_area/props.convex_area)\n\n self.nb_classes = len(all_ids) + 1\n\n convexities = np.array(convexities)\n self.convexity_stats = stats.describe(convexities)\n\n nb_detections = np.array(nb_detections)\n self.nb_detections_stats = stats.describe(nb_detections)\n\n all_boxes = np.concatenate(all_boxes, axis=0)\n heights = all_boxes[:, 2] - all_boxes[:, 0]\n widths = all_boxes[:, 3] - all_boxes[:, 1]\n\n self.height_stats = stats.describe(heights)\n self.width_stats = stats.describe(widths)\n\n ratios = widths / heights\n self.ratio_stats = stats.describe(ratios)\n\n mean_pixel = [np.mean(img, axis=(0, 1))\n for img in self.dataset_handler.images]\n self.mean_pixel = np.mean(np.array(mean_pixel), axis=0)", "def get_detection_bboxes(detector):\r\n with open('../datasets/AICity_data/train/S03/c010/det/det_' + detector + '.txt') as f:\r\n lines = f.readlines()\r\n bboxes = dict()\r\n num_of_instances = 0\r\n for line in lines:\r\n num_of_instances += 1\r\n line = (line.split(','))\r\n if line[0] in bboxes.keys():\r\n content = [int(float(elem)) for elem in line[1:6]]\r\n content.append(float(line[6])) # confidence score??????\r\n bboxes[line[0]].append(content)\r\n else:\r\n content = [int(float(elem)) for elem in line[1:6]]\r\n content.append(float(line[6]))\r\n bboxes[line[0]] = [content]\r\n return bboxes, num_of_instances", "def get_detect_result(self):\n\n resultdict = {'class_index' : self.class_index,\n 'obj_name' : self.obj_name,\n 'score' : self.score,\n 'bounding_box' : {\n 'x_min' : self.x_min,\n 'y_min' : self.y_min,\n 'width' : self.width,\n 'height' : self.height}\n }\n return resultdict", "def bbox2fields():\n bbox2label = {\n 'gt_bboxes': 'gt_labels',\n 'gt_bboxes_ignore': 'gt_labels_ignore'\n }\n bbox2mask = {\n 'gt_bboxes': 'gt_masks',\n 'gt_bboxes_ignore': 'gt_masks_ignore'\n }\n bbox2seg = {\n 'gt_bboxes': 'gt_semantic_seg',\n }\n return bbox2label, bbox2mask, bbox2seg", "def get_bounding_boxes(outputs, width: int, height: int):\n\n # detected bounding boxes, obtained confidences and class's number\n boxes = []\n scores = []\n classes = []\n\n # this is our threshold for keeping the bounding box\n probability_minimum = 0.5\n\n # iterating through all three outputs\n for result in outputs:\n # going through all bounding boxes from current output layer\n for detection in result:\n # getting class for current object\n scores_current = detection[5:]\n class_current = np.argmax(scores_current)\n\n # getting probability for current object\n probability_current = scores_current[class_current]\n\n # getting object confidence for current object\n object_confidence = detection[4]\n\n # eliminating weak predictions by minimum probability\n if probability_current > probability_minimum:\n # if probability_current*object_confidence > probability_minimum: # this is an alternative way\n\n # Scaling bounding box coordinates to the initial image size\n # by element-wise multiplying them with the width and height of the image\n box_current = np.array(detection[0:4]) * np.array([width, height, width, height])\n\n # YOLO data format keeps center of detected box and its width and height\n # here we reconstruct the top left and bottom right corner\n x_center, y_center, box_width, box_height = box_current.astype('int')\n x_min = int(x_center - (box_width / 2))\n y_min = int(y_center - (box_height / 2))\n x_max = int(x_center + (box_width / 2))\n y_max = int(y_center + (box_height / 2))\n\n # adding results into prepared lists\n boxes.append([x_min, y_min, x_max, y_max])\n scores.append(float(probability_current))\n classes.append(class_current)\n\n boxes = np.array(boxes)\n scores = np.array(scores)\n classes = np.array(classes)\n return boxes, scores, classes", "def _boxes_coordinates(self,\n image,\n boxes,\n classes,\n scores,\n max_boxes_to_draw=20,\n min_score_thresh=.5):\n\n if not max_boxes_to_draw:\n max_boxes_to_draw = boxes.shape[0]\n number_boxes = min(max_boxes_to_draw, boxes.shape[0])\n final_boxes = []\n final_scores = []\n for i in range(number_boxes):\n if self.category_index[classes[i]]['name'] not in \\\n self.classes_to_detect:\n continue\n if scores is None or scores[i] > min_score_thresh:\n box = tuple(boxes[i].tolist())\n ymin, xmin, ymax, xmax = box\n\n im_height, im_width, _ = image.shape\n left, right, top, bottom = [int(z) for z in\n (xmin * im_width, xmax * im_width,\n ymin * im_height,\n ymax * im_height)]\n\n final_boxes.append([top, left, bottom, right])\n final_scores.append(scores[i])\n return final_boxes, final_scores", "def boundingRect(cnt):\n\tx, y, w, h = cv2.boundingRect(cnt)\n\treturn {\"x\":x, \"y\": y, \"w\": w, \"h\": h}", "def parse_labelbox_boxes(IMAGE_PATH, ANNOTATION_FILEPATH, CLASS_NAMES):\n IMAGE_PATH = make_abs_path(IMAGE_PATH)\n annotation_data = json.loads(open(ANNOTATION_FILEPATH).read())\n images = []\n boxes = {name:[] for name in CLASS_NAMES}\n for image_data in annotation_data:\n image_file = image_data.get('External ID')\n images.append(io.imread(IMAGE_PATH + image_file))\n image_boxes = {name: [] for name in CLASS_NAMES}\n for obj in image_data.get(\"Label\",{}).get(\"objects\",[]):\n object_name = obj.get('title','').lower()\n if object_name not in CLASS_NAMES:\n continue\n label_box = obj.get('bbox',{})\n image_boxes[object_name].append(\n dlib.rectangle(left=long(label_box.get(\"left\")), top=long(label_box.get(\"top\")), right=long(label_box.get(\"width\")), bottom=long(label_box.get(\"height\")))\n )\n for class_name in CLASS_NAMES:\n boxes[class_name].append(image_boxes[class_name])\n\n return images, boxes", "def get_best_bbox_of_class(bboxes, class_index):\n obj_to_find_score, obj_to_find_bbox = 0., None\n new_bboxes = []\n filtered_bboxes = []\n for bbox in bboxes:\n if bbox.get_label() == class_index and bbox.get_score() > obj_to_find_score:\n obj_to_find_score = bbox.get_score()\n obj_to_find_bbox = bbox\n elif bbox.get_label() == class_index and bbox.get_score() <= obj_to_find_score:\n filtered_bboxes.append(bbox)\n else:\n new_bboxes.append(bbox)\n if obj_to_find_bbox:\n new_bboxes.append(obj_to_find_bbox)\n\n return new_bboxes, obj_to_find_bbox, filtered_bboxes", "def visualize_bbox(img, bbox, class_name, color=(255, 0, 0) , thickness=2):\n BOX_COLOR = (255, 0, 0) # Red\n TEXT_COLOR = (255, 255, 255) # White\n\n x_min, y_min, x_max, y_max = bbox\n\n cv2.rectangle(img, (x_min, y_min), (x_max, y_max), color=color, thickness=thickness)\n\n ((text_width, text_height), _) = cv2.getTextSize(class_name, cv2.FONT_HERSHEY_SIMPLEX, 0.35, 1)\n cv2.rectangle(img, (x_min, y_min - int(1.3 * text_height)), (x_min + text_width, y_min), BOX_COLOR, -1)\n cv2.putText(\n img,\n text=class_name,\n org=(x_min, y_min - int(0.3 * text_height)),\n fontFace=cv2.FONT_HERSHEY_SIMPLEX,\n fontScale=0.35,\n color=TEXT_COLOR,\n lineType=cv2.LINE_AA,\n )\n return img", "def draw_bbox(image, bboxes, classes_file_path, show_label = True, show_confidence = True, Text_colors = (255,255,0), \n rectangle_colors = '', tracking = False):\n \n # obtain list of classes name \n classes = read_class_names(classes_file_path)\n \n # obtain length of classes \n num_classes = len(classes)\n \n # obtain shape of image\n image_h, image_w, _ = image.shape\n \n # obtain list of unique hsv (hue, saturation, value) for each class\n hsv_tuples = [(1.0 * x / num_classes, 1., 1.) for x in range(num_classes)]\n \n # obtain unique rgb tuples from hsv tuples\n colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))\n \n # scale rgb from 0-1 to 0-255 \n colors = list(map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), colors))\n \n # shuffle colors list with same seed\n random.seed(0)\n random.shuffle(colors)\n random.seed(None)\n \n # iterate over bbox in bboxes\n for i, bbox in enumerate(bboxes):\n \n # obtain coordinates of bbox\n coor = np.array(bbox[:4], dtype = np.int32)\n \n # obtain objectiveness score\n score = bbox[4]\n \n # obtain class index\n class_ind = int(bbox[5])\n \n # choose rectangle color if none is given, else chose from tuple\n bbox_color = rectangle_colors if rectangle_colors != '' else colors[class_ind]\n \n # obtain thickness of bboxes\n bbox_thick = int(0.6 * (image_h + image_w) / 1000)\n if bbox_thick < 1: bbox_thick = 1\n \n # obtain font scale\n fontScale = 0.75 * bbox_thick\n \n # obtain tuples of min and max coordinates\n (x1, y1), (x2, y2) = (coor[0], coor[1]), (coor[2], coor[3])\n\n # generate bbox\n cv2.rectangle(image, (x1, y1), (x2, y2), bbox_color, bbox_thick * 2)\n \n # if show label is true\n if show_label:\n \n # get objectiveness score label\n score_str = \" {:.2f}\".format(score) if show_confidence else \"\"\n \n # if tracking show whole score without rounding\n if tracking: score_str = \" \" + str(score)\n \n # obtain label of class name with objectiveness score\n label = \"{}\".format(classes[class_ind]) + score_str\n \n # get text size \n (text_width, text_height), baseline = cv2.getTextSize(label, cv2.FONT_HERSHEY_COMPLEX_SMALL,\n fontScale, thickness = bbox_thick)\n # put filled text rectangle\n cv2.rectangle(image, (x1, y1), (x1 + text_width, y1 - text_height - baseline), bbox_color, \n thickness = cv2.FILLED)\n\n # put text above rectangle\n cv2.putText(image, label, (x1, y1 - 4), cv2.FONT_HERSHEY_COMPLEX_SMALL,\n fontScale, Text_colors, bbox_thick, lineType = cv2.LINE_AA)\n\n return image", "def get_instance_bounding_box(img, bounding_boxes, instance):\n mask = np.zeros(img.shape, dtype=np.uint16)\n mask[img == instance] = 1\n ret, threshed = cv.threshold(mask, 0, 2 ** 16, cv.THRESH_BINARY)\n compressed = threshed.astype(np.uint8)\n contours, hierarchy = cv.findContours(compressed, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_NONE)\n x, y, w, h = cv.boundingRect(contours[0])\n cv.rectangle(bounding_boxes, (x, y), (x + w, y + h), (randint(25, 255), randint(25, 255), randint(25, 255)), 3)\n img2 = contours = hierarchy = mask = None", "def detect_class_onpic(boxes, allowed_classes):\n object_class = \"all\"\n highest_prob = 0\n for box in boxes:\n box_prob = float(box[1].strip('%')) / 100.0\n if box[0] in allowed_classes and box_prob > highest_prob:\n highest_prob = box_prob\n object_class = box[0]\n return object_class, highest_prob", "def get_bounding_boxes(dets):\n bounding_boxes = []\n for box in dets:\n bounding_box = {'top_left_x': box.left(),\n 'top_left_y': box.top(),\n 'bottom_right_x': box.right(),\n 'bottom_right_y': box.bottom()}\n bounding_boxes.append(bounding_box)\n return bounding_boxes", "def _load_bboxes_depth(self, results):\n results[\"centers2d\"] = results[\"ann_info\"][\"centers2d\"]\n results[\"depths\"] = results[\"ann_info\"][\"depths\"]\n return results", "def plt_bboxes(img, classes, scores, bboxes, figsize=(10,10), linewidth=1.5):\n fig = plt.figure(figsize=figsize)\n plt.imshow(img)\n height = img.shape[0]\n width = img.shape[1]\n colors = dict()\n for i in range(classes.shape[0]):\n cls_id = int(classes[i])\n if cls_id >= 0:\n score = scores[i]\n if cls_id not in colors:\n colors[cls_id] = (random.random(), random.random(), random.random())\n ymin = int(bboxes[i, 0] * height)\n xmin = int(bboxes[i, 1] * width)\n ymax = int(bboxes[i, 2] * height)\n xmax = int(bboxes[i, 3] * width)\n# crop_img = img[xmin:(xmax - xmin),xmax:(ymax - ymin)]\n# misc.imsave('1.jpg', crop_img)\n rect = plt.Rectangle((xmin, ymin), xmax - xmin,\n ymax - ymin, fill=False,\n edgecolor=colors[cls_id],\n linewidth=linewidth)\n plt.gca().add_patch(rect)\n class_name = CLASSES[cls_id]\n plt.gca().text(xmin, ymin - 2,\n '{:s} | {:.3f}'.format(class_name, score),\n bbox=dict(facecolor=colors[cls_id], alpha=0.5),\n fontsize=12, color='white')\n plt.show()", "def annotation_to_dict(self):\r\n out_dict = {'bbox_center': self.bbox_center,\r\n 'bbox_size': np.array(self.bbox_size, dtype=\"float64\").tolist(),\r\n 'rotation': self.get_box_angle(),\r\n 'label': self.classname}\r\n return out_dict", "def get_box_data(index, hdf5_data):\n meta_data = dict()\n meta_data['height'] = []\n meta_data['label'] = []\n meta_data['left'] = []\n meta_data['top'] = []\n meta_data['width'] = []\n\n def print_attrs(name, obj):\n vals = []\n if obj.shape[0] == 1:\n vals.append(obj[0][0])\n else:\n for k in range(obj.shape[0]):\n vals.append(int(hdf5_data[obj[k][0]][0][0]))\n meta_data[name] = vals\n\n box = hdf5_data['/digitStruct/bbox'][index]\n hdf5_data[box[0]].visititems(print_attrs)\n return meta_data", "def get_bounding_boxes(frame):\n\n blob = cv2.dnn.blobFromImage(frame,1/255,(320,320),(0,0,0),1,crop=False)\n net.setInput(blob)\n\n output_layer_names = net.getUnconnectedOutLayersNames()\n layer_outputs = net.forward(output_layer_names)\n\n all_boxes, confidences = get_all_boxes(layer_outputs)\n\n indexes=cv2.dnn.NMSBoxes(all_boxes,confidences,0.5,0.3)\n\n return indexes, confidences, all_boxes", "def get_human_box_detection(boxes,scores,classes,height,width):\n\t# print(boxes)\n\tarray_boxes = list() # Create an empty list\n\tfor i in range(boxes.shape[1]):\n\t\t# If the class of the detected object is 1 and the confidence of the prediction is > 0.75\n\t\tif int(classes[i]) == 0 and scores[i] > 0.5:\n\t\t\t# Multiply the X coordonnate by the height of the image and the Y coordonate by the width\n\t\t\t# To transform the box value into pixel coordonate values.\n\t\t\tbox = [boxes[0,i,0],boxes[0,i,1],boxes[0,i,2],boxes[0,i,3]] * np.array([height, width, height, width])\n\t\t\t# Add the results converted to int\n\t\t\tarray_boxes.append((int(box[0]),int(box[1]),int(box[2]),int(box[3])))\n\treturn array_boxes", "def plt_bboxes(img, classes, scores, bboxes, figsize=(17.78,10), linewidth=1.5):\n fig = plt.figure(figsize=figsize, frameon=False)\n ax = fig.add_axes([0, 0, 1, 1])\n ax.axis('off')\n plt.imshow(img)\n height = img.shape[0]\n width = img.shape[1]\n print (\"original height width\", height, width)\n if (classes.shape[0] > 0):\n print (\"This frame has class\")\n for i in range(classes.shape[0]):\n cls_id = int(classes[i])\n if cls_id >= 0:\n score = scores[i]\n if cls_id not in colors:\n colors[cls_id] = (random.random(), random.random(), random.random())\n ymin = int(bboxes[i, 0] * height)\n xmin = int(bboxes[i, 1] * width)\n ymax = int(bboxes[i, 2] * height)\n xmax = int(bboxes[i, 3] * width)\n rect = plt.Rectangle((xmin, ymin), xmax - xmin,\n ymax - ymin, fill=False,\n edgecolor=colors[cls_id],\n linewidth=linewidth)\n plt.gca().add_patch(rect)\n class_name = pascal_classes[cls_id]\n plt.gca().text(xmin, ymin - 2,\n '{:s} | {:.3f}'.format(class_name, score),\n bbox=dict(facecolor=colors[cls_id], alpha=0.5),\n fontsize=12, color='white')\n fig.canvas.draw()\n data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')\n data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))\n plt.close()\n print(\"Processed data with shape, \", data.shape)\n return data", "def _load_bboxes_3d(self, results):\n results[\"gt_bboxes_3d\"] = results[\"ann_info\"][\"gt_bboxes_3d\"]\n results[\"bbox3d_fields\"].append(\"gt_bboxes_3d\")\n return results", "def _nms_boxes(self, boxes, scores):\n all_boxes = [[] for _ in range(self.num_classes)]\n # skip j = 0, because it's the background class\n for class_id in range(1, self.num_classes):\n # Whether to use only the top class for each box or\n # all classes over a certain threshhold.\n if self.top_class_only:\n detection_criterion = (np.argmax(scores, axis=1) == class_id)\n else:\n detection_criterion = (\n scores[:, class_id] > self.class_detection_thresh)\n class_detected_indexes = np.where(detection_criterion)[0]\n\n cls_scores = scores[class_detected_indexes, class_id]\n class_box_start = class_id * 4\n class_box_end = class_box_start + 4\n cls_boxes = boxes[class_detected_indexes,\n class_box_start:class_box_end]\n\n cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])\n ).astype(np.float32, copy=False)\n\n if len(cls_dets) > 1:\n keep = nms(cls_dets, self.nms_thresh, force_cpu=True)\n cls_dets = cls_dets[keep, :]\n all_boxes[class_id] = cls_dets\n return all_boxes", "def getbbox(self):\n pass", "def _predict_boxes_and_classes(self, feature_map):\n\n spatial_averaged_feature_map = tf.reduce_mean(\n feature_map, [1, 2], keepdims=True, name='AvgPool')\n\n flattened_feature_map = tf.squeeze(spatial_averaged_feature_map)\n with slim.arg_scope(self._fc_hyperparams_fn()):\n box_encodings = slim.fully_connected(\n flattened_feature_map,\n self._num_classes * self._box_code_size,\n activation_fn=None,\n scope='BoxEncodingPredictor')\n class_predictions = slim.fully_connected(\n flattened_feature_map,\n self._num_classes + 1,\n activation_fn=None,\n scope='ClassPredictor')\n\n box_encodings = tf.reshape(\n box_encodings, [-1, 1, self._num_classes, self._box_code_size])\n class_predictions = tf.reshape(\n class_predictions, [-1, 1, self._num_classes + 1])\n\n return box_encodings, class_predictions", "def bounds(self):\n return self._bboxes[0][0] #TODO: merge all coverages", "def draw_box(image, boxes, class_name, score, max_boxes=10, min_score=0.1):\n colors = list(ImageColor.colormap.values())\n font = ImageFont.load_default()\n\n ymin, xmin, ymax, xmax = tuple(boxes)\n display_str = \"{}: {}%\".format(class_name, int(100 * score))\n color = colors[hash(class_name) % len(colors)]\n draw_bounding_box_on_image(\n image,ymin,xmin,ymax,xmax,color,font,display_str_list=[display_str]\n )" ]
[ "0.6812948", "0.6808152", "0.6690509", "0.65427005", "0.6401556", "0.63900846", "0.61920977", "0.6154504", "0.614809", "0.6109047", "0.6102897", "0.6070463", "0.605929", "0.59976906", "0.5995844", "0.5992272", "0.5982084", "0.5980321", "0.596288", "0.59605664", "0.5936286", "0.5930418", "0.59075755", "0.5895757", "0.5863435", "0.5857319", "0.58441603", "0.5838924", "0.58349484", "0.5824699" ]
0.79432786
0
Create data object storing the synapse id. Id is none if
def __init__(self, synid=None, data_uri): self.synid = synid self.data_uri = None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, id, data={}):\n\t\tself.__id = id\n\t\tself.__dStore = data", "def payload_creation(self, id, data):\n\n payload = {\n 'UUID': self.uuid,\n 'ID': id,\n 'RATE': self.rate,\n 'GPIO': data[2],\n 'DDL': self.ddl,\n 'VALUE': data[1],\n 'TIME': data[0]\n }\n return payload", "def create(self, identity, data=None, record=None, **kwargs):\n record.metadata = data.get('metadata', {})", "def __init__(self, id):\n \n self.id = id", "def __init__(self, _id: Generator[str, None, None], data: dict, obs: OBS) -> None:\r\n\r\n self.id: Generator[str, None, None] = _id\r\n self.data: dict = data\r\n self.obs: OBS = obs", "def __init__(self,\n id: str) -> None:\n self.id = id", "def __init__(self,\n id: str) -> None:\n self.id = id", "def __init__(self,\n id: str) -> None:\n self.id = id", "def __init__(self,\n id: str) -> None:\n self.id = id", "def __init__(__self__, *,\n id: Optional[str] = None):\n if id is not None:\n pulumi.set(__self__, \"id\", id)", "def __init__(__self__, *,\n id: Optional[str] = None):\n if id is not None:\n pulumi.set(__self__, \"id\", id)", "def __init__(self, id: str):\n self.id = id", "def __init__(__self__, *,\n id: str):\n pulumi.set(__self__, \"id\", id)", "def __init__(__self__, *,\n id: str):\n pulumi.set(__self__, \"id\", id)", "def __init__(__self__, *,\n id: Optional[pulumi.Input[str]] = None):\n if id is not None:\n pulumi.set(__self__, \"id\", id)", "def __init__(__self__, *,\n id: Optional[pulumi.Input[str]] = None):\n if id is not None:\n pulumi.set(__self__, \"id\", id)", "def __init__(__self__, *,\n id: Optional[pulumi.Input[str]] = None):\n if id is not None:\n pulumi.set(__self__, \"id\", id)", "def __init__(__self__, *,\n id: Optional[pulumi.Input[str]] = None):\n if id is not None:\n pulumi.set(__self__, \"id\", id)", "def __init__(__self__, *,\n id: Optional[pulumi.Input[str]] = None):\n if id is not None:\n pulumi.set(__self__, \"id\", id)", "def __init__(__self__, *,\n id: Optional[pulumi.Input[str]] = None):\n if id is not None:\n pulumi.set(__self__, \"id\", id)", "def __init__(__self__, *,\n id: Optional[pulumi.Input[str]] = None):\n if id is not None:\n pulumi.set(__self__, \"id\", id)", "def instantiate(cls, data_store, identifier):\n pass", "def create_ds_config_ds(self):\n\n\n sqlObj = _DS_config_DS_SQL()\n value_id_ds_conf_ds = sqlObj.insert_DS_conf_DS_return_id_if_not_exists(self.value_configuration, self.FK_id_configuration_DCT_DCD, self.FK_id_dataset_DS_DCD)\n \n self.id_ds_conf_ds = value_id_ds_conf_ds\n return value_id_ds_conf_ds", "def __init__(__self__, *,\n dataset_id: Optional[pulumi.Input[str]] = None):\n if dataset_id is not None:\n pulumi.set(__self__, \"dataset_id\", dataset_id)", "def create(self, data):\n raise NotImplementedError", "def id(self):\n return self.data.id", "def test_data_source_soaps_id_dynamic_datas_post(self):\n pass", "def __init__(__self__, *,\n id: str,\n name: str,\n system_data: 'outputs.SystemDataResponse',\n type: str,\n properties: Optional['outputs.AzureDevOpsOrgPropertiesResponse'] = None):\n pulumi.set(__self__, \"id\", id)\n pulumi.set(__self__, \"name\", name)\n pulumi.set(__self__, \"system_data\", system_data)\n pulumi.set(__self__, \"type\", type)\n if properties is not None:\n pulumi.set(__self__, \"properties\", properties)", "def test_data_source_soaps_id_put(self):\n pass", "def __init__(__self__, *,\n id: pulumi.Input[str],\n parameters: Optional[pulumi.Input[Mapping[str, Any]]] = None):\n pulumi.set(__self__, \"id\", id)\n if parameters is not None:\n pulumi.set(__self__, \"parameters\", parameters)" ]
[ "0.6683341", "0.5950109", "0.5892375", "0.5882798", "0.5797706", "0.57971925", "0.57971925", "0.57971925", "0.57971925", "0.57885116", "0.57885116", "0.5745124", "0.574388", "0.574388", "0.56933784", "0.56933784", "0.56933784", "0.56933784", "0.56933784", "0.56933784", "0.56933784", "0.567264", "0.56624305", "0.5616885", "0.5583593", "0.55654424", "0.5559687", "0.5521137", "0.55210364", "0.55145526" ]
0.6004968
1
Retrieves data from data_uri and stores it in the local cache.
def getData(self, local_cache):
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _retrieveCachedData(self):", "def read_data_cache(self):\n if os.path.exists(self.cache_filename):\n return self.read_data_cache_file()\n else:\n data = self._empty_data()\n self.write_data_cache(data)\n return data", "def get_uri(\n uri,\n ignore_fail=False,\n cache_data=False,\n cacheage=900,\n cachedir=\".\"\n):\n if pyversion(\"3\"):\n import urllib, urllib.error, urllib.request\n URLError = urllib.error.URLError\n urlopen = urllib.request.urlopen\n else:\n import urllib2 as urllib\n URLError = urllib.URLError\n urlopen = urllib.urlopen\n import os, time\n if cache_data:\n dcachedir = os.path.join( os.path.expanduser(cachedir), \"datacache\" )\n if not os.path.exists(dcachedir):\n try: os.makedirs(dcachedir)\n except (IOError, OSError): pass\n dcache_fn = os.path.join(\n dcachedir,\n uri.split(\":\")[1].replace(\"/\",\"_\")\n )\n now = time.time()\n if cache_data and os.access(dcache_fn, os.R_OK) \\\n and now-cacheage < os.stat(dcache_fn).st_mtime <= now:\n dcache_fd = open(dcache_fn)\n data = dcache_fd.read()\n dcache_fd.close()\n else:\n try:\n if pyversion(\"3\"): data = urlopen(uri).read().decode(\"utf-8\")\n else: data = urlopen(uri).read()\n except URLError:\n if ignore_fail: return \"\"\n else:\n import os, sys, traceback\n message = \"%s error: failed to retrieve\\n %s\\n %s\" % (\n os.path.basename( sys.argv[0] ),\n uri,\n traceback.format_exception_only(\n sys.exc_type,\n sys.exc_value\n )[0]\n )\n sys.stderr.write(message)\n sys.exit(1)\n if cache_data:\n try:\n import codecs\n dcache_fd = codecs.open(dcache_fn, \"w\", \"utf-8\")\n dcache_fd.write(data)\n dcache_fd.close()\n except (IOError, OSError): pass\n return data", "def fetch_data(data_url):\n return requests.get(data_url).content", "def set_to_cache(self, url, data):\n cache_key, cache_lookup = self.get_cacheable_info(url)\n MEM_CACHE[cache_key][cache_lookup] = (data, time.time())", "def fetch(self):\r\n if not self._fetched:\r\n self._fetched = True\r\n self.data = query_cache.get(self.iden) or []", "def _load_for_cache(self, parsed_uri, session):\n remote_uri = \"{}://{}/{}\".format(parsed_uri.scheme, parsed_uri.netloc, parsed_uri.path)\n if self.verbose:\n print(\"Loading URI {}\".format(remote_uri), file=sys.stderr)\n response = session.get(remote_uri)\n try:\n response.raise_for_status()\n except HTTPError as e:\n raise RefResolutionException(\n \"Could not load file {}\".format(parsed_uri.geturl())\n ) from e\n remote_json = self._load_json(response)\n return remote_json", "def _load_for_cache(self, doc_uri, doc, parsed_uri):\n remote_uri = '{}://{}/{}'.format(\n parsed_uri.scheme, parsed_uri.netloc, parsed_uri.path)\n if self.verbose:\n print('Loading URI {}'.format(remote_uri), file=sys.stderr)\n response = self.session.get(remote_uri)\n try:\n response.raise_for_status()\n except HTTPError as e:\n raise RefResolutionException(\n 'Could not load file {}'.format(parsed_uri.geturl()))\n remote_json = self._load_json(response)\n return remote_json", "def cache_data(name, data):\n cache_path = get_cachefile('%s.cache' % name)\n with open(cache_path, 'wb') as f:\n pickle.dump(data, f)", "def reload_cache(self):\n self.data = self.read_data_cache()", "def get_data(self, name):\n assert name, \"Must input a valid dataset name.\"\n try:\n return self.data[\"dataset\"][name]\n except KeyError:\n raise KeyError(\"The dataset \\'{}\\' does not exist in the cache.\".format(name))", "def fetch_and_cache(data_url, file, data_dir=\"data\", force=False):\n data_dir = Path(data_dir)\n data_dir.mkdir(exist_ok = True)\n file_path = data_dir / Path(file)\n if force and file_path.exists():\n file_path.unlink()\n if force or not file_path.exists():\n print('Downloading...', end=' ')\n resp = requests.get(data_url)\n with file_path.open('wb') as f:\n f.write(resp.content)\n print('Done!')\n last_modified_time = time.ctime(file_path.stat().st_mtime)\n else:\n last_modified_time = time.ctime(file_path.stat().st_mtime)\n print(\"Using cached version that was downloaded (UTC):\", last_modified_time)\n return file_path", "def _load_data(self):\n path = os.path.join(self._cache_path, '%s.data' % self._name)\n\n if not os.path.exists(path):\n raise IOError('Data cache missing at %s' % path)\n\n f = bz2.BZ2File(path)\n data = pickle.loads(f.read())\n f.close()\n\n return data", "def data(self):\n if self._data is None:\n try:\n with open(self.storage_path, 'r') as cache_file:\n self._data = json.load(cache_file)\n except FileNotFoundError:\n self._data = {}\n return self._data", "def read_data_cache_file(self):\n with open(self.cache_filename, 'r') as json_data:\n return json.load(json_data)", "def cache_matrio_data(filename):\n prefix = \"https://data.matr.io/3/api/v1/file\"\n key = MATRIO_DATA_KEYS[filename]\n if not os.path.isfile(filename):\n cache_download(\"{}/{}/download\".format(prefix, key), filename)", "def _fetch_data(self):\n pass", "def cache_data(self):\n # Initialize key variables\n result = self.data['cache_data']\n return result", "def refresh(self):\r\n data = super(Share, self)._fetch()\r\n self.data_set = data[self.symbol]\r\n return data[self.symbol]", "def get_data(self, url):\n return self.get(url).get('data', [])", "def load_url_data(url, fname=None, cache_time=0, nretry=3, sleeptime=60):\n\n content = None\n if url and is_file_expired(fname, cache_time): # load data into temporary cache file\n for trial in range(nretry):\n if content:\n break\n try:\n if os.path.isfile(url):\n logger.info('[attempt=%s] loading data from file=%s' % (trial, url))\n with open(url, \"r\") as f:\n content = f.read()\n else:\n logger.info('[attempt=%s] loading data from url=%s' % (trial, url))\n content = urllib2.urlopen(url, timeout=20).read()\n\n if fname: # save to cache\n with open(fname, \"w+\") as f:\n f.write(content)\n logger.info('saved data from \"%s\" resource into file=%s, length=%.1fKb' %\n (url, fname, len(content) / 1024.))\n return content\n except Exception as e: # ignore errors, try to use old cache if any\n logger.warning('failed to load data from url=%s, error: %s .. trying to use data from cache=%s' %\n (url, e, fname))\n # will try to use old cache below\n if trial < nretry - 1:\n logger.info(\" -- DEPRECATED-- will try again after %ss..\" % sleeptime)\n from time import sleep\n sleep(sleeptime)\n\n if content is not None: # just loaded\n return content\n\n try:\n with open(fname, 'r') as f:\n content = f.read()\n except Exception as e:\n logger.warning(\"%s (will try different source)\" % e)\n return None\n\n return content", "def get_fred_data(url):\n pass", "def fetch_data():\n data.fetch_data()\n data.start_updating()", "def read_cache(self):\n with open(self.get_cache_filename(), 'rb') as f:\n data = pickle.loads(f.read())\n self.timestamp = data['timestamp']\n self.cache = data['cache']", "def download_data(overwrite=False):\n\n filename = _data_url.split('/')[-1]\n base_dir = get_cachedir()\n if base_dir is not None:\n dest = base_dir / filename\n else:\n print('No cache dir found, not downloading anything.')\n return\n\n if (dest.exists() and (overwrite is False)):\n print(\"Destination path {} already exists, use overwrite=True \"\n \"to force an overwrite.\".format(dest))\n return\n\n print(\"Trying to fetch {}\".format(_data_url))\n with _TqdmUpTo(unit='B', unit_scale=True, miniters=1,\n desc=filename) as t:\n urlretrieve(_data_url, filename=str(dest), reporthook=t.update_to)\n\n _decompress_data()", "def get_data(self, path):\n\n if path == self.original_path:\n cache = self._2to3_cache_path(path)\n data = self._load_cached_2to3(path, cache)\n if data is None:\n output, encoding = self._refactor_2to3(path)\n data = bytearray(output, encoding or sys.getdefaultencoding())\n self.set_data(cache, data)\n return data\n\n else:\n return super().get_data(path)", "def download_data(self, format = 'srt'):\n resp, content = httplib2.Http(\".cache\").request(self.url, \"GET\")\n suburl = json.loads(content)['url']\n resp, content = httplib2.Http(\".cache\").request(suburl, \"GET\")\n\n return content", "def get_data(self):\n\n self.set_query_string()\n self.realtime_data = super().get_data()\n self.set_coordinate()\n return self.realtime_data", "def retrieve(self, url, filename, reporthook=None, data=None, cont=None):\n url = urllib.unwrap(urllib.toBytes(url))\n if self.tempcache and url in self.tempcache:\n return self.tempcache[url]\n type, url1 = urllib.splittype(url)\n if filename is None and (not type or type == 'file'):\n try:\n fp = self.open_local_file(url1)\n hdrs = fp.info()\n del fp\n return urllib.url2pathname(urllib.splithost(url1)[1]), hdrs\n except IOError, msg:\n pass\n bs = 1024*8\n size = -1\n read = 0\n blocknum = 0\n if cont:\n localsize = self.continue_file(filename)\n read = localsize\n blocknum = localsize / bs\n fp = self.open(url, data)\n headers = fp.info()\n if cont:\n if (self.fetcher.proto == self.fetcher.PROTO_HTTP and\n not (headers.dict.get(\"content-range\") or\n headers.dict.get(\"Content-Range\"))):\n raise ResumeNotSupported\n tfp = open(filename, 'rb+')\n tfp.seek(-self.checksum_size, os.SEEK_END)\n local = tfp.read(self.checksum_size)\n remote = fp.read(self.checksum_size)\n if not local == remote:\n raise ResumeChecksumFailed\n else:\n tfp = open(filename, 'wb')\n result = filename, headers\n if self.tempcache is not None:\n self.tempcache[url] = result\n if reporthook:\n if \"content-length\" in headers:\n size = int(headers[\"Content-Length\"])\n if cont and self.fetcher.proto == self.fetcher.PROTO_HTTP:\n size = size + localsize - self.checksum_size\n reporthook(blocknum, bs, size)\n while 1:\n block = fp.read(bs)\n if block == \"\":\n break\n read += len(block)\n tfp.write(block)\n blocknum += 1\n if reporthook:\n reporthook(blocknum, bs, size)\n fp.close()\n tfp.close()\n del fp\n del tfp\n\n # raise exception if actual size does not match content-length header\n if size >= 0 and read < size:\n raise urllib.ContentTooShortError(\"retrieval incomplete: got only %i out \"\n \"of %i bytes\" % (read, size), result)\n\n return result", "def get_data(self):\n data_i = sqlite3.connect('data::memory:', check_same_thread=False)\n data_cursor = data_i.cursor()\n data_cursor.execute('SELECT * FROM localdata')\n item = data_cursor.fetchall()\n data_i.commit()\n data_i.close()\n return self.generate_data_list(item)" ]
[ "0.6769925", "0.6302919", "0.61490214", "0.613357", "0.61221176", "0.61186427", "0.6081297", "0.6054003", "0.60285616", "0.59756505", "0.58839726", "0.5862857", "0.5859773", "0.5847547", "0.580898", "0.5805113", "0.57414836", "0.57271916", "0.56812966", "0.56810653", "0.56489444", "0.5625874", "0.56190884", "0.5611498", "0.5586663", "0.5584763", "0.5582992", "0.55769825", "0.5576833", "0.5565975" ]
0.66638345
1
Changing retraction speed for a specific source file. User has to define the initial distance and the layer distance, which specifies the number of layers between steps.
def change_retraction_speed(gcode_source=None, gcode_target=None, initial_retraction_speed=None, retraction_speed_steps=None, layer_distance=None): current_retraction_speed_at = initial_retraction_speed current_layer_at = None currently_extruder_at = None lines = gcode_source.readlines() for line in lines: if get_current_layer(line) is not None: current_layer_at = get_current_layer(line) log_layer_line(current_layer_at) # We increment retraction distance if required if not_initial_layer(current_layer_at) and have_to_change_variable_at_layer(current_layer_at, layer_distance): current_retraction_speed_at += retraction_speed_steps if current_layer_at is not None and currently_extruder_at is not None: # Changing the retraction setting derived from the original if is_changing_only_extruder(line): feed_rate = get_feed_rate(line) log_retraction_speed_change(current_retraction_speed_at=current_retraction_speed_at, feed_rate=feed_rate, line=line) line = line.replace(str(feed_rate), str(current_retraction_speed_at)) if is_printing(line): currently_extruder_at = get_extruder_position(line) gcode_target.writelines(line)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def change_retraction_distance(gcode_source=None,\r\n gcode_target=None,\r\n initial_retraction_distance=None,\r\n retraction_distance_step=None,\r\n layer_distance=None):\r\n current_retraction_distance_at = initial_retraction_distance\r\n current_layer_at = None\r\n currently_extruder_at = None\r\n lines = gcode_source.readlines()\r\n for line in lines:\r\n\r\n if get_current_layer(line) is not None:\r\n current_layer_at = get_current_layer(line)\r\n log_layer_line(current_layer_at)\r\n # We increment retraction distance if required\r\n if not_initial_layer(current_layer_at) and have_to_change_variable_at_layer(current_layer_at, layer_distance):\r\n current_retraction_distance_at += retraction_distance_step\r\n\r\n if current_layer_at is not None and currently_extruder_at is not None:\r\n # Changing the retraction setting derived from the original\r\n if is_changing_only_extruder(line):\r\n new_extruder_at = get_extruder_position(line)\r\n # Check if this is a real extraction\r\n # TODO do we need to change negative extractions?\r\n if is_not_negative_extrusion(new_extruder_at) and is_retraction(new_extruder_at, currently_extruder_at):\r\n # We recalculate the extrusion value\r\n retracted_extruder_at = round(currently_extruder_at - current_retraction_distance_at, 5)\r\n log_retraction_distance_change(new_extruder_at=new_extruder_at,\r\n retracted_extruder_at=retracted_extruder_at,\r\n current_retraction_distance_at=current_retraction_distance_at,\r\n current_layer_at=current_layer_at,\r\n line=line)\r\n line = line.replace(str(new_extruder_at), str(retracted_extruder_at))\r\n\r\n if is_printing(line):\r\n currently_extruder_at = get_extruder_position(line)\r\n\r\n gcode_target.writelines(line)", "def set_speed():\n pass", "def change_speed(self, action):\r\n if action == \"faster\":\r\n self.speed += 1\r\n else:\r\n if self.speed > 1:\r\n self.speed -= 1", "def increase_speed(self):\n self.state['speed_boost'] = True\n self.speed = self.maze.block_size / 8", "def setSpeed(self, v):\n\t\tconverted = self.convertSpeed(v)\n\t\tprint(converted)\n\t\t# set both stage speeds\n\t\tself.zaberSend(self.translation[\"hor\"], self.cmd[\"setTargetSpeed\"], data = converted)\n\t\tself.zaberSend(self.translation[\"ver\"], self.cmd[\"setTargetSpeed\"], data = converted)", "def on_speed_change(self, event) -> None:\r\n\r\n speed_level = int(self.speed_scale.get())\r\n self.animator.time_per_gen = self.TIMES_PER_GEN[speed_level]", "def set_speed(self, new_speed):\n self.__x_speed, self.__y_speed = new_speed", "def step(self):\n if self.change_rate != 0:\n self.speed += stats.norm(loc=0, scale=self.change_rate).rvs()\n\n if self.speed < 0.5 * self._initial_speed:\n self.speed = 0.5 * self._initial_speed\n if self.speed > 2.0 * self._initial_speed:\n self.speed = 2.0 * self._initial_speed\n else:\n pass", "def increase_speed(self):\n self.target_speed *= self.speedup_scale\n self.bullet_speed_factor *= self.speedup_scale", "def speed(self, s=0):", "def increase_car_speed(self):\r\n self.car_speed += 5", "def _set_target_speed(self, target_speed: int):\n self._target_speed = target_speed\n self._local_planner.set_speed(target_speed)", "def increment_speed(self):\n self.speed += 0.0004", "def set_speed(self,speed):\n self.speed = speed", "def set_custom_speed(self, bytes_per_second):\n self._custom_speed = bytes_per_second", "def setSpeedEngine1(speed: int):\n pass", "def increase_speed(self):\n self.ship_speed_factor *= self.speed_up_scale\n self.bullet_speed_factor *= self.speed_up_scale\n self.alien_speed_factor *= self.speed_up_scale", "def set_speed(self,speed):\n self.speed_p = speed", "def set_speed(self, ratio):\n self._speed = ratio", "def smooth_drive(self, distance, linear_speed):\n ### EXTRA CREDIT\n # TODO\n pass # delete this when you implement your code", "def set_speed(self, speed):\n self._kernel.set_speed(float(speed))", "def setSpeedEngine2(speed: int):\n pass", "def set_speed(self, SHIP_MOVEMENT):\n self._speed = SHIP_MOVEMENT", "def increase_speed(self, character):\n character.speed = min(character.max_steps/4, character.speed * 1.25)", "def increase_speed(self):\n self.ship_speed_factor *= self.speedup_scale\n self.bullet_speed_factor *= self.speedup_scale\n self.alien_speed_factor *= self.speedup_scale\n self.alien_points = int(self.alien_points * self.score_scale)", "def optimization_step(self):\n \n if \"CSS\" in self.algorithm:\n \n input_dict = {self.x: self.train_inputs[self.minibatch_set,:]}\n \n var_list = [self.x_tilda, self.minibatch_set]\n \n if (self.num_samples > 0) and (not self.mixture):\n \n if ((self.mf_steps > 0) and self.alpha >0) or\\\n self.gibbs_steps > 0: \n \n var_list.append(self.sampler_theta)\n \n elif \"CD\" in self.algorithm:\n \n input_dict = {self.x : self.train_inputs[self.minibatch_set,:]} \n \n var_list = [self.minibatch_set]\n \n var_list.append(self.learning_rate)\n \n if self.use_momentum:\n \n var_list.append(self.momentum)\n \n output_vars = [self.pseudo_cost]\n \n if self.report_p_tilda:\n \n output_vars.append(self.p_tilda)\n \n else:\n \n output_vars.append(theano.shared(0))\n \n opt_step = theano.function(inputs = var_list,\n outputs = output_vars,\n updates = self.updates,\n givens = input_dict,\n on_unused_input='warn')\n \n return opt_step", "def changespeed(self, x1, y1):\n self.change_x += x1\n self.change_y += y1", "def setSpeedEngine4(speed: int):\n pass", "def set_speed(self, speed):\n self.speed = speed", "def walk(self):\n self.speed = self.speed + (0.2 * self.legs)" ]
[ "0.68603784", "0.59780365", "0.59389067", "0.5706731", "0.56527776", "0.5626594", "0.55819356", "0.55043226", "0.54412353", "0.5432877", "0.543281", "0.5414724", "0.5414351", "0.53777224", "0.53676", "0.53567505", "0.5352132", "0.5328321", "0.52560544", "0.5254133", "0.5247845", "0.52370787", "0.52312267", "0.5223482", "0.5201517", "0.51968646", "0.5192958", "0.51886225", "0.51873213", "0.5186836" ]
0.7475537
0