query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
sequencelengths
30
30
negative_scores
sequencelengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Write a HTML report. `Cluster_No` and `IsRepr` have to be present in the DataFrame. In the current setting, the largest clusters are at the top of the report, with similar clusters (determind by the chemical similarities of the representative structures) are grouped together. Writes the report to disk as `Clusters.html`. Used in `projects/paint3_anal/190328_cpd_clustering.ipynb`.
def report( df: pd.DataFrame, id_col: str = "Compound_Id", columns: List[str] = ["Compound_Id", "Smiles"], title: str = "Cluster Report", intro: str = "Large clusters first, similar clusters together.", ): def add_cluster(cl_no, sim_to=None): if sim_to is None: sim_to = "" html.append("<hr>") else: sim_to = f"(similar to {sim_to})" mf_cl = mf.MolFrame(df.query("Cluster_No == @cl_no")[columns]) mf_cl = mf_cl.add_mols() html.append( f"<br><h2>Cluster {cl_no} ({len(mf_cl.data)} Members)&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;{sim_to}</h2><br>" ) grid = mfv.html_grid(mf_cl.data, id_col="Compound_Id") html.append(grid) if id_col not in columns: columns = [id_col] + columns if "Smiles" not in columns: columns.append("Smiles") df_repr = df.query("IsRepr == 'Yes'").reset_index().drop("index", axis=1) chem_sim = {} for idx, rec0 in df_repr.iterrows(): for _, rec1 in df_repr.iloc[idx + 1 :].iterrows(): cl0 = rec0["Cluster_No"] cl1 = rec1["Cluster_No"] sim = mf.chem_sim(rec0["Smiles"], rec1["Smiles"]) chem_sim[(cl0, cl1)] = sim chem_sim[(cl1, cl0)] = sim cl_sizes = ( df[["Cluster_No", "Compound_Id"]] .groupby(by="Cluster_No") .count() .reset_index() .rename(columns={"Compound_Id": "Size"}) ) cl_sizes = cl_sizes.sort_values("Size", ascending=False) cl_order = {x: True for x in cl_sizes["Cluster_No"].values} html = [f"<h1>{title}</h1><br>{intro}<br><br>"] while len(cl_order) > 0: cl_no = list(cl_order.keys())[0] add_cluster(cl_no) cl_order.pop(cl_no) to_remove = [] for sim_cl in cl_order: if chem_sim[(cl_no, sim_cl)] > 0.45: add_cluster(sim_cl, cl_no) to_remove.append(sim_cl) for x in to_remove: cl_order.pop(x) mfht.write(mfht.page("\n".join(html)), "Clusters.html")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_output_html(tmp_path):\n\n # This will be the file to\n temporary_file = os.path.join(tmp_path, 'test-cluster.html')\n\n # Run clustering on small dummy data (see test_clustering.py)\n cluster = TextClustering(embedding_random_state=42,\n reducer_random_state=43,\n clustering_random_state=44)\n\n X = ['Wellcome Trust',\n 'The Wellcome Trust',\n 'Sir Henry Wellcome',\n 'Francis Crick',\n 'Crick Institute',\n 'Francis Harry Crick']\n\n cluster.fit(X)\n\n # Run the visualisation function with output_file=temporary_file\n visualize_clusters(clustering=cluster, output_file_path=temporary_file, radius=0.01,\n alpha=0.5, output_in_notebook=False)\n\n # Assert that the html was generated correctly\n assert os.path.exists(temporary_file)", "def cluster_classification_tex(f,browsing_matrix,diversifying_matrix, weblog,session_data_threshold,cluster_type,classification_column_diversity,classification_wanted_transaction):\n divpat_classification_wanted_transaction = classification_wanted_transaction\n divpat_N_classification_wanted_transaction=len(divpat_classification_wanted_transaction)\n f.write(\"\\n% 6. Cluster Classification\")\n columns_latex = '|'+'c|'*len(session_data_threshold[cluster_type].unique())\n f.write(\"\\n\\\\newcommand{\\\\%s}{%s}\"%('DivColumnsLatex',columns_latex)) \n columns_blank = ' ' + '& '*(len(session_data_threshold[cluster_type].unique()) -1)\n f.write(\"\\n\\\\newcommand{\\\\%s}{%s}\"%('DivColumnsBlank',columns_blank)) \n cluster_list = []\n ieuc_clusters = []\n star_chain_like_clusters = []\n length_clusters = []\n browsing_pattern_1 = []\n browsing_pattern_2 = []\n browsing_pattern_3 = []\n diversifying_pattern_1 = []\n diversifying_pattern_2 = []\n diversifying_pattern_3 = []\n cluster_ids = session_data_threshold[cluster_type].unique()\n cluster_ids.sort()\n for cluster_id in cluster_ids:\n cluster_list.append(str(cluster_id))\n \n cluster_session_list=session_data_threshold[session_data_threshold[cluster_type]==cluster_id].session_id.values\n temp_cluster_weblog=weblog[weblog.session_id.isin(cluster_session_list)]\n pa,pa_names = proportional_abundance(temp_cluster_weblog,'requested_'+classification_column_diversity)\n cluster_entropy=ShannonEntropy(pa,normalize=True)\n \n ieuc_clusters.append(str(round(np.power(2.0,cluster_entropy),2)))\n star_chain_like_clusters.append(star_chain_str(session_data_threshold[session_data_threshold[cluster_type]==cluster_id].star_chain_like.mean()))\n length_clusters.append(length(session_data_threshold[session_data_threshold[cluster_type]==cluster_id].requests.mean()))\n # Browsing patterns\n r,c=np.unravel_index(browsing_matrix[cluster_id][:-1,:-1].argsort(axis=None)[::-1][:3],dims=(divpat_N_classification_wanted_transaction,divpat_N_classification_wanted_transaction))\n browsing_pattern_1.append('%.1f\\%%: %s$\\\\rightarrow$%s'%(100.0*browsing_matrix[cluster_id][r[0],c[0]],divpat_classification_wanted_transaction[r[0]],divpat_classification_wanted_transaction[c[0]]))\n browsing_pattern_2.append('%.1f\\%%: %s$\\\\rightarrow$%s'%(100.0*browsing_matrix[cluster_id][r[1],c[1]],divpat_classification_wanted_transaction[r[1]],divpat_classification_wanted_transaction[c[1]]))\n browsing_pattern_3.append('%.1f\\%%: %s$\\\\rightarrow$%s'%(100.0*browsing_matrix[cluster_id][r[2],c[2]],divpat_classification_wanted_transaction[r[2]],divpat_classification_wanted_transaction[c[2]]))\n \n # Diversifying patterns\n r,c=np.unravel_index(np.nan_to_num(diversifying_matrix[cluster_id])[:-1,:-1].argsort(axis=None)[::-1][:3],dims=(divpat_N_classification_wanted_transaction,divpat_N_classification_wanted_transaction))\n diversifying_pattern_1.append('%.1f\\%%: %s$\\\\rightarrow$%s'%(100.0*diversifying_matrix[cluster_id][r[0],c[0]],divpat_classification_wanted_transaction[r[0]],divpat_classification_wanted_transaction[c[0]]))\n diversifying_pattern_2.append('%.1f\\%%: %s$\\\\rightarrow$%s'%(100.0*diversifying_matrix[cluster_id][r[1],c[1]],divpat_classification_wanted_transaction[r[1]],divpat_classification_wanted_transaction[c[1]]))\n diversifying_pattern_3.append('%.1f\\%%: %s$\\\\rightarrow$%s'%(100.0*diversifying_matrix[cluster_id][r[2],c[2]],divpat_classification_wanted_transaction[r[2]],divpat_classification_wanted_transaction[c[2]]))\n\n del temp_cluster_weblog\n \n f.write(\"\\n\\\\newcommand{\\\\%s}{%s}\"%('DivClusterList',' & '.join(cluster_list)))\n f.write(\"\\n\\\\newcommand{\\\\%s}{%s}\"%('DivIEUCClusters',' & '.join(ieuc_clusters)))\n f.write(\"\\n\\\\newcommand{\\\\%s}{%s}\"%('StarChainClusters',' & '.join(star_chain_like_clusters)))\n f.write(\"\\n\\\\newcommand{\\\\%s}{%s}\"%('LengthClusters',' & '.join(length_clusters)))\n f.write(\"\\n\\\\newcommand{\\\\%s}{%s}\"%('BrowsingPatternClustersOne',' & '.join(browsing_pattern_1)))\n f.write(\"\\n\\\\newcommand{\\\\%s}{%s}\"%('BrowsingPatternClustersTwo',' & '.join(browsing_pattern_2)))\n f.write(\"\\n\\\\newcommand{\\\\%s}{%s}\"%('BrowsingPatternClustersThree',' & '.join(browsing_pattern_3)))\n f.write(\"\\n\\\\newcommand{\\\\%s}{%s}\"%('DiversifyingPatternClustersOne',' & '.join(diversifying_pattern_1)))\n f.write(\"\\n\\\\newcommand{\\\\%s}{%s}\"%('DiversifyingPatternClustersTwo',' & '.join(diversifying_pattern_2)))\n f.write(\"\\n\\\\newcommand{\\\\%s}{%s}\"%('DiversifyingPatternClustersThree',' & '.join(diversifying_pattern_3)))\n\n return f;", "def cluster_and_render(conf, dbname, outname=\"./text.html\", nclusters=8):\n\n\n db = sqlite3.connect(dbname)\n r = db.execute(\"select min(year), max(year) from counts where conf=?\", (conf,))\n minyear, maxyear = r.fetchone()\n\n # total words per year for normalization purposes\n r = db.execute(\"select year, count(*) from counts where conf=? order by year\", (conf,))\n year2c = dict([(year, c) for year, c in r])\n yearcounts = dict2arr(year2c, range(minyear, maxyear+1), 1)\n\n\n def add_content(subcluster, content, suffix):\n \"\"\"\n Render the cluster as an image\n \"\"\"\n\n fname = './plots/%s_%s.png' % (conf, suffix)\n\n # pick the top 10 terms\n subcluster = sorted(subcluster, key=lambda t: max(t[1:].astype(float)), reverse=True)\n subcluster = subcluster[:10]\n\n words = np.array(subcluster)[:,0]\n ys = np.array(subcluster)[:,1:].astype(float)\n mean = [np.mean(ys[:,i]) for i in xrange(ys.shape[1])]\n maxmean = max(mean)\n idx = mean.index(maxmean)\n\n # this is used to make the top-k list in the HTML later\n content.append(('', words, fname, idx))\n\n\n data = []\n for arr in subcluster:\n word = arr[0]\n for x, y in enumerate(map(float, arr[1:])):\n data.append(dict(\n group=\"normal\",\n word=word,\n x=xs[x],\n y=y, \n alpha=0.3\n ))\n\n # add a line for the mean\n for x, y in enumerate(mean):\n data.append(dict(group=\"aggregate\", word='___mean___', x=xs[x], y=y, alpha=1))\n\n if 1:\n maxy = max(10, max(pluckone(data, 'y')))\n if maxy <= 10:\n breaks = [0, 5, 10]\n\n\n # pygg lets you write ggplot2 syntax in python\n p = ggplot(data, aes(x='x', y='y', group='word', color='group', alpha='alpha'))\n p += geom_line(size=1)\n p += scale_color_manual(values=\"c('normal' = '#7777dd','aggregate' = 'black')\", guide=\"FALSE\")\n p += scale_alpha_continuous(guide=\"FALSE\")\n if 1:\n if maxy <= 10:\n p += scale_y_continuous(lim=[0, maxy], breaks=breaks, labels = \"function (x) as.integer(x)\")\n else:\n p += scale_y_continuous(lim=[0, maxy], labels = \"function (x) as.integer(x)\")\n p += legend_bottom\n p += theme(**{\n \"axis.title\":element_blank()\n })\n ggsave(fname, p, width=10, height=4, libs=['grid'])\n \n\n\n def vectors():\n \"\"\"\n Extract a matrix of term count vectors\n\n Return: [\n [word, count1, count2, ...],\n ...\n ]\n \"\"\"\n r = db.execute(\"select word, year, c from counts where conf=? order by word, year\", (conf,))\n vects = defaultdict(dict)\n for w,y,c in r:\n l = vects[w]\n l[y] = float(c) \n\n\n ret = []\n for w in vects:\n d = vects[w]\n\n # if word is super uncommon, skip it\n if (max(d.values()) <= 3):\n continue\n if (max([v / (1.+year2c.get(y,0)) for y, v in d.items()]) < .1): \n continue\n\n # some years may not have the word\n counts = dict2arr(d, xrange(minyear, maxyear+1), 1.0)\n\n \n # naive window averaging smoothing over the trend curve\n smooth = []\n for i in xrange(len(counts)):\n smooth.append(np.mean(counts[max(0,i-2):i+2]))\n if max(smooth) > 2:\n ret.append([w] + smooth)\n return np.array(ret)\n\n\n vects = vectors()\n # dimensions: words (row) x year (col)\n data = vects[:,1:].astype(float)\n\n # there's a bajillion ways to normalize the counts before clustering.\n # we do the following:\n\n # 1. divide by the total number of words in that year\n # (normalize by column)\n for idx, base in enumerate(yearcounts):\n data[:,idx] /= float(base)\n\n # 2. ensure zero mean and 1 std\n # (normalize by row)\n data = np.array([(l - np.mean(l)) / (max(l)) for l in data ])\n\n\n clusterer = KMeans(nclusters, n_init=50, init='k-means++')\n clusterer.fit(data) \n labels = clusterer.labels_\n xs = np.array(range(minyear, maxyear+1))\n\n content = []\n\n # each label is a cluster\n for label in set(labels):\n idxs = labels == label\n cluster = vects[idxs]\n\n # sort the words/clusters by their max count\n cluster = sorted(cluster, key=lambda t: max(t[1:].astype(float)), reverse=True)\n if not len(cluster): continue\n cluster = np.array(cluster)\n words = cluster[:,0]\n words = list(words)\n\n add_content(cluster, content, label)\n\n content.sort(key=lambda c: c[-1])\n\n\n\n # make HTML\n from jinja2 import Template\n template = Template(file('./clustertemplate.html').read())\n\n with file(outname, 'w') as f:\n f.write( template.render(content=content))", "def _repr_html_(self):\n return (\n f'<b>GalaxyCluster:</b> {self.unique_id} '\n f'(ra={self.ra}, dec={self.dec}) at z={self.z}'\n f'<br>> <b>with columns:</b> {self._str_colnames()}'\n f'<br>> {len(self.galcat)} source galaxies'\n f'<br>{self.galcat._html_table()}'\n )", "def __report(self):\n dataframe = pd.read_csv(os.path.join(self.report_path, \"cyclomatic-complexity.csv\"),\n names=[\"NLOC\", \"CCN\", \"Token\", \"Param\", \"Length\", \"Location\",\n \"Path\", \"Function\", \"Args\", \"Row\", \"Col\"],\n sep=',')\n dataframe.drop(['Path', 'Function', 'Row', 'Col'], axis=1, inplace=True)\n dataframe.sort_values('CCN', ascending=False, inplace=True)\n dataframe[\"Location\"] = dataframe[\"Location\"].str.replace('\\\\', '/')\n self.report_html(os.path.join(self.report_path,\n \"cyclomatic-complexity-report.html\"), dataframe,\n \"Cyclomatic Complexity report\")", "def create_html_layout(self):\n page = \"\"\"<!DOCTYPE html>\n <!doctype html>\n <html lang=\"en\">\n <head>\n <meta charset=\"utf-8\">\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1, shrink-to-fit=no\">\n </head>\n </html>\n <head>\n \t<meta charset=\"UTF-8\">\n </head>\n <body>\n <div class=\"container\">\n <div class=\"row\">\n <div class=\"col-sm\">\n <h4>eda report: Exploratory data analysis</h4>\n </div>\n <div class=\"col-sm\">\n <h3>Inspecting dataframe of size: {size}\n </div>\n </div>\n </div>\n \t<table class=\"table table-hover\" style=\".table\">\n <thead>\n <tr style=\"font-size: 15px;\">\n <th width=\"5%\" align=\"left\" scope=\"col\">Variable Name</th>\n <th width=\"12%\" align=\"left\" scope=\"col\">Data Type</th>\n <th width=\"15%\" align=\"left\" scope=\"col\">Histogram</th>\n <th width=\"11%\" align=\"left\" scope=\"col\">Stats</th>\n <th width=\"7%\" align=\"left\" scope=\"col\">Missing NA</th>\n <th width=\"5%\" align=\"left\" scope=\"col\">Outliers</th>\n </tr>\n </thead>\n <tbody>\"\"\".format(size=self.df.size)\n\n end_page = \"\"\" \n </tbody>\n </table>\n </body>\n \"\"\"\n rows_html = []\n for i, column in enumerate(self.df.columns):\n Summary = ColumnSummary(data=self.df[column])\n datatype = Summary.data_type()\n missing = Summary.missing_values()\n stats = Summary.statistic_summary()\n outliers = Summary.outliers()\n Summary.create_histogram(i)\n html = f\"\"\"\n <tr>\n <td style=\"font-size: 15px;\" width=\"10%\" align=\"left\"> {column}</td>\n <td style=\"font-size: 15px;\"width=\"10%\" align=\"left\"> {datatype}</td>\n <td><img class=\"img-fluid\" src=\"hist_images/histogram{i}.png?{random.randint(0,\n 2e9)}\" style=\"width:800px\"> </td>\n <td style=\"font-size: 15px;\">mean: {stats.mean}<br>\n mode: {stats.mode}<br><br>\n min: {stats.min}<br>\n max: {stats.max}<br><br>\n lower-bound: {stats.lower}<br>\n upper-bound: {stats.upper}<b</td>\n <td style=\"font-size: 15px;\">{missing}</td>\n <td style=\"font-size: 15px;\">{outliers}</td>\n </tr>\n \"\"\"\n rows_html.append(html)\n\n merged_html = page + \"\".join(rows_html) + end_page\n return merged_html", "def disp(df):\n display(HTML(df.to_html(index=False)))", "def print_df(cluster_df):\n import sys\n\n cluster_df.to_string(sys.stdout, index=False, header=True)\n # Print an empty line to finish\n print()", "def write_index_html(wk_dir,region_dict,metrics_filename,ext=\"png\"):\n # Make lists of the metrics and figure files to display\n metrics_dir = os.path.join(wk_dir,metrics_dir_name)\n metric_list = sorted([\n f for f in os.listdir(metrics_dir) if f.endswith('_summary.csv')])\n plot_list=[]\n fig_list=sorted([f for f in os.listdir(wk_dir+'/'+figure_dir_name)])\n for keyword in ['lag','correlations','twodpdf']:\n plot_list.append([f for f in fig_list if (keyword in f)]) # sort datasets\n subtitle_list=['Autocorrelation','2D Histograms','Correlation maps']\n\n # Start working on html text. Each line is appened to a list that\n # is then written to file.\n html_file=['<html>\\n',\n '<body>','<head><title>ASoP-Coherence</title></head>\\n',\n '<br><h1>ASoP-Coherence results</h1>\\n','<h2>Contents</h2>\\n',\n '<dl>\\n','<dt><a href=\"#Metrics\">Metrics</a></dt>\\n',\n '<dt><a href=\"#Figures\">Figures</a></dt>\\n',\n '<dd><a href=\"#Autocorrelation\">Autocorrelation</a></dd>\\n',\n '<dd><a href=\"#2D-Histograms\">2D Histograms</a></dd>\\n',\n '<dd><a href=\"#Correlation-maps\">Correlation Maps</a></dd>\\n',\n '</dl>\\n''<section id=\"Metrics\">\\n','<br><h2>Metrics</h2>\\n']\n html_file.append('<h3>Intermittency Metrics</h3>\\n')\n\n # Display metrics JSON in dashboard option\n metrics_json = os.path.basename(metrics_filename)\n metrics_relocated = os.path.join(metrics_dir_name,metrics_json)\n tmp='<p><a href=\"'+metrics_relocated+'\" target=\"_blank\">'+metrics_json+'</a></p>\\n'\n html_file.append(tmp)\n\n # Link CSV tables for download\n html_file.append('<h3>Tables</h3>\\n')\n for metric_file in metric_list:\n metric_path = os.path.join(metrics_dir_name,metric_file)\n html_file.append('<p><a href=\"{0}\">{1}</a></p>\\n'.format(metric_path,metric_file))\n html_file.append('<br>\\n')\n html_file.append('</section>\\n')\n\n # Add figures\n html_file.append('<section id=\"Figures\">\\n')\n html_file.append('<h2>Figures</h2>\\n')\n for title,category in zip(subtitle_list,plot_list):\n html_file.append('<section id='+title.replace(' ','-')+'>\\n')\n html_file.append('<h3>{0}</h3>\\n'.format(title))\n # Adjust figure width for autocorrelation\n fwidth = \"647\"\n if title==\"Autocorrelation\":\n fwidth=\"450\"\n for region in region_dict:\n html_file.append('<h4>{0}</h4>\\n'.format(region.replace('_',' ')))\n region_fig = [f for f in category if (region.replace(\" \",\"_\") in f)]\n for fig in region_fig:\n tmp = '<p><a href=\"{0}\" target=\"_blank\" alt={0}>' + \\\n '<img src=\"{0}\" width={1} alt=\"{0}\"></a></p>\\n'\n html_file.append(\n tmp.format(os.path.join(figure_dir_name,fig),fwidth))\n html_file.append('</section>\\n')\n html_file.append('</section>\\n')\n\n html_file.append('</body>\\n</html>\\n')\n filename=wk_dir+'/index.html'\n with open(filename,'w') as html_page:\n html_page.writelines(html_file)", "def generateHtml(self):\n # only the master processor needs to do this\n if not self.master: return\n\n for page in self.layout.pages:\n \n # build the metric dictionary\n metrics = {}\n page.models = []\n for fname in glob.glob(os.path.join(self.output_path,\"*.nc\")):\n with Dataset(fname) as dataset:\n mname = dataset.getncattr(\"name\")\n if mname != \"Benchmark\": page.models.append(mname)\n if not dataset.groups.has_key(page.name): continue\n group = dataset.groups[page.name]\n\n # if the dataset opens, we need to add the model (table row)\n metrics[mname] = {}\n \n # each model will need to have all regions\n for region in self.regions: metrics[mname][region] = {}\n \n # columns in the table will be in the scalars group\n if not group.groups.has_key(\"scalars\"): continue\n \n # we add scalars to the model/region based on the region\n # name being in the variable name. If no region is found,\n # we assume it is the global region.\n grp = group.groups[\"scalars\"]\n for vname in grp.variables.keys():\n found = False\n for region in self.regions:\n if region in vname: \n found = True\n var = grp.variables[vname]\n name = vname.replace(region,\"\")\n metrics[mname][region][name] = Variable(name = name,\n unit = var.units,\n data = var[...])\n if not found:\n var = grp.variables[vname]\n metrics[mname][\"global\"][vname] = Variable(name = vname,\n unit = var.units,\n data = var[...])\n page.setMetrics(metrics)\n \n # write the HTML page\n f = file(os.path.join(self.output_path,\"%s.html\" % (self.name)),\"w\")\n f.write(str(self.layout))\n f.close()", "def df_to_html(df, percentage_columns=None): # pragma: no cover\n big_dataframe_setup()\n try:\n res = \"<br><h2> {} </h2>\".format(df.name)\n except AttributeError:\n res = \"\"\n df.style.set_properties(**{\"text-align\": \"center\"})\n res += df.to_html(\n formatters=_formatters_dict(\n input_df=df, percentage_columns=percentage_columns\n )\n )\n res += \"<br>\"\n return res", "def create_html_report():\r\n\r\n #Sample DataFrame\r\n df = pd.DataFrame(np.random.randn(7,4)\r\n ,columns=['one','two','three','four']\r\n ,index=['a','b','c','d','e','f','g'])\r\n\r\n #Formatting rule\r\n def color_negative_red(val):\r\n color = 'red' if val<0 else 'black'\r\n return f'color: {color}'\r\n\r\n styler = df.style.applymap(color_negative_red)\r\n\r\n #Chart plotting\r\n filename = \"\".join([APP_ROOT, \"\\\\static\\\\images\\\\\" , \"plot.svg\"])\r\n #Plot\r\n ax = df.plot.bar()\r\n fig = ax.get_figure()\r\n fig.savefig(filename)\r\n\r\n #Template handling\r\n env = jinja2.Environment(loader=jinja2.FileSystemLoader(searchpath='./templates/'))\r\n template = env.get_template('template.html')\r\n\r\n filename = \"file:///\" + filename\r\n html = template.render(my_table=styler.render(), img_url=filename)\r\n\r\n return html", "def write_html(self):\n html_exporter = HTMLExporter(template_file=os.path.join(config[\"templates_dir\"], \"notebook.tpl\"))\n for nb in self.notebooks:\n (body, resources) = html_exporter.from_notebook_node(nb.content)\n body = re.sub('{github_user_name}', config[\"github_user_name\"], body)\n body = re.sub('{github_repo_name}', config[\"github_repo_name\"], body)\n html_path = os.path.join(self.dst_dir, os.path.splitext(nb.filename)[0] + \".html\")\n print(f\"- writing {html_path}\")\n with open(html_path, 'w') as f:\n f.write(body)", "def gen_html_report(landmarks_list, usage_flag, output_folder):\n labelled_landmarks = landmarks_list[0]\n\n if usage_flag == 2:\n detected_landmarks = landmarks_list[1]\n assert len(labelled_landmarks.keys()) == len(detected_landmarks.keys())\n\n # sort the labelled landmarks according to detection error\n error_summary = error_analysis(labelled_landmarks, detected_landmarks)\n\n landmark_name_list = labelled_landmarks[list(labelled_landmarks.keys())[0]].keys()\n for landmark_idx, landmark_name in enumerate(landmark_name_list):\n print(\"Generating html report for landmark {}: {}.\".format(landmark_idx, landmark_name))\n image_link_template = r\"<div class='content'><img border=0 src= '{0}' hspace=1 width={1} class='pic'></div>\"\n error_info_template = r'<b>Labelled</b>: [{0:.2f}, {1:.2f}, {2:.2f}];'\n document_text = r'\"<h1>check predicted coordinates:</h1>\"'\n document_text += \"\\n\"\n\n if usage_flag == 1:\n image_list = list(labelled_landmarks.keys())\n for image_idx, image_name in enumerate(image_list):\n label_landmark_world = labelled_landmarks[image_name][landmark_name]\n document_text = \\\n gen_row_for_html(usage_flag, image_link_template, error_info_template,\n document_text, image_list, image_idx, landmark_idx,\n [label_landmark_world], None)\n \n elif usage_flag == 2:\n image_list = error_summary.all_cases[landmark_name]\n error_sorted_index = error_summary.error_sorted_index\n for image_idx in error_sorted_index[landmark_name]:\n image_name = image_list[image_idx]\n label_landmark_world = labelled_landmarks[image_name][landmark_name]\n detected_landmark_world = detected_landmarks[image_name][landmark_name]\n error_info_template = r'<b>Labelled</b>: [{0:.2f}, {1:.2f}, {2:.2f}];'\n error_info_template += r'<b>Detected</b>: [{3:.2f}, {4:.2f}, {5:.2f}]; '\n error_info_template += r'<b>Type</b>: {6};'\n error_info_template += r'<b>Error</b>: x:{7:.2f}; y:{8:.2f}; z:{9:.2f}; L2:{10:.2f};'\n document_text = \\\n gen_row_for_html(usage_flag, image_link_template, error_info_template,\n document_text, image_list, image_idx, landmark_name,\n [label_landmark_world, detected_landmark_world],\n error_summary)\n\n else:\n raise ValueError('Undefined usage flag!')\n\n if usage_flag == 1:\n analysis_text = gen_analysis_text(len(image_list), usage_flag,\n labelled_landmarks, landmark_name, None)\n\n elif usage_flag == 2:\n analysis_text = gen_analysis_text(len(image_list), usage_flag,\n labelled_landmarks, landmark_name, error_summary)\n\n else:\n raise ValueError('Undefined usage float!')\n\n html_report_name = 'result_analysis.html'\n html_report_folder = os.path.join(output_folder, 'lm{}'.format(landmark_idx))\n if not os.path.isdir(html_report_folder):\n os.makedirs(html_report_folder)\n \n html_report_path = os.path.join(html_report_folder, html_report_name)\n write_html_report_for_single_landmark(document_text, analysis_text, html_report_path, width=200)\n\n if usage_flag == 2:\n summary_csv_report_name = 'summary.csv'\n summary_csv_path = os.path.join(output_folder, summary_csv_report_name)\n write_summary_csv_report_for_all_landmarks(error_summary, summary_csv_path)", "def _generate_report(self):\n\n _LOG.info(\"Generating the HTML report.\")\n\n # Make sure the output directory exists.\n try:\n self.outdir.mkdir(parents=True, exist_ok=True)\n except OSError as err:\n raise Error(f\"failed to create directory '{self.outdir}': {err}\")\n\n raw_stats_paths, descr_paths = self._copy_raw_data()\n\n # Find the styles and templates paths.\n templdir = FSHelpers.search_for_app_data(\"wult\", Path(\"templates\"),\n pathdescr=\"HTML report Jinja2 templates\")\n csspath = FSHelpers.search_for_app_data(\"wult\", Path(\"css/style.css\"),\n pathdescr=\"HTML report CSS file\")\n\n # Copy the styles file to the output directory.\n dstpath = self.outdir.joinpath(\"style.css\")\n try:\n shutil.copyfile(csspath, dstpath)\n except OSError as err:\n raise Error(f\"failed to copy CSS file from '{csspath}' to '{dstpath}':\\n{err}\")\n\n # The summary table is only included into the main HTML page.\n sum_tbl = self._prepare_summary_table(raw_stats_paths, descr_paths)\n links_tbl = self._prepare_links_table()\n\n # Each column name gets its own HTML page.\n for colname, pinfos in self._pinfos.items():\n stats_tbl = self._prepare_stats_table(pinfos)\n\n # Render the template.\n jenv = Jinja2.build_jenv(templdir, trim_blocks=True, lstrip_blocks=True)\n jenv.globals[\"stats_tbl\"] = stats_tbl\n jenv.globals[\"pinfos\"] = pinfos\n jenv.globals[\"colname\"] = colname\n jenv.globals[\"title_descr\"] = self.title_descr\n jenv.globals[\"toolname\"] = self._refinfo[\"toolname\"]\n\n if sum_tbl:\n jenv.globals[\"sum_tbl\"] = sum_tbl\n jenv.globals[\"links_tbl\"] = links_tbl\n templfile = outfile = \"index.html\"\n sum_tbl = None\n else:\n templfile = \"metric.html\"\n outfile = links_tbl[colname][\"fname\"]\n\n Jinja2.render_template(jenv, Path(templfile), outfile=self.outdir.joinpath(outfile))", "def to_html(self, result_dir):\n png_path = self.png_path(result_dir)\n data_table = self.html_data_table()\n return \"XXX figure html\"", "def make_pdf_reports(df, path):\n with PdfPages(path) as pdf:\n # settings for the file\n base = 10 # threshold for grouping points\n page_size = (11, 8.5)\n point_size = 1.5 # scatter plot point size\n\n df[\"color\"] = df.db.apply(rand_color) # adjacency color\n df[\"fuzzy_y\"] = df.y.apply(my_round) # horizontal group color\n df[\"y_color\"] = df.fuzzy_y.apply(rand_color)\n df[\"fuzzy_x\"] = df.x.apply(my_round) # vertical group color\n df[\"x_color\"] = df.fuzzy_x.apply(rand_color)\n\n # Add title and axis names\n plt.figure(figsize=page_size)\n plt.title('Horizontal Grouping Scatter Plot')\n plt.xlabel('x distance')\n plt.ylabel('y distance')\n plt.scatter(df.x, df.y, c=df.y_color, s=point_size)\n pdf.savefig() # saves the current figure into a pdf page\n plt.close()\n\n plt.figure(figsize=page_size)\n plt.title('Vertical Grouping Scatter Plot')\n plt.xlabel('x distance')\n plt.ylabel('y distance')\n plt.scatter(df.x, df.y, c=df.x_color, s=point_size)\n pdf.savefig() # saves the current figure into a pdf page\n plt.close()\n\n plt.figure(figsize=page_size)\n plt.title('Block Adjacency Grouping Scatter Plot')\n plt.xlabel('x distance')\n plt.ylabel('y distance')\n plt.scatter(df.x, df.y, c=df.color, s=point_size)\n pdf.savefig() # saves the current figure into a pdf page\n plt.close()\n\n data1 = df[[\"floor\", \"swing_drop\", \"name\"]]\n data = data1.groupby([\"floor\", \"swing_drop\"]).count()\n data = data.reset_index()\n data.head()\n data = data.fillna(0)\n pivot = data.pivot(index=\"floor\", columns=\"swing_drop\", values=\"name\")\n pivot = pivot.fillna(0)\n order = sorted(df.floor.unique(), reverse=True)\n pivot = pivot.reindex(order)\n plt.figure(figsize=page_size)\n ax = sns.heatmap(pivot, cmap=\"BuPu\")\n ax.set_title(\"Block Qty Heatmap\")\n pdf.savefig()\n plt.close()\n\n # bar chart\n plt.rcParams.update({'font.size': 5})\n plt.figure(figsize=page_size)\n plt.title('Block Style Bar Graph')\n plt.xlabel('Names')\n plt.xticks(rotation=90)\n plt.ylabel('Quantities')\n dd = df[['name', \"guid\"]].groupby(\"name\").count()\n dd = dd.reset_index()\n dd = dd.sort_values(\"guid\")\n plt.bar(dd.name, dd.guid)\n # plt.show()\n pdf.savefig()\n plt.close()\n\n # We can also set the file's metadata via the PdfPages object:\n d = pdf.infodict()\n d['Title'] = 'Multipage PDF Example'\n d['Author'] = 'Matthew Kreidler'\n d['Subject'] = 'How to create a multipage pdf file and set its metadata'\n d['Keywords'] = 'PdfPages multipage keywords author title subject'\n d['CreationDate'] = datetime.datetime.today()\n d['ModDate'] = datetime.datetime.today()\n\n print(\"Graphs and Charts finished!\")\n return path", "def to_multiple_htmls(self):\n self.error_throw('output')\n \n if self.rank_method == methods_of_ranking[3]: #'diversified_ranking'\n self.output_div('multiple_htmls')\n else:\n self.output('multiple_htmls')", "def output_html(self, path):\n if path is None:\n return\n import os\n fout = codecs.open(os.path.abspath(path), 'w', encoding='utf-8')\n fout.write('<html><body><table>')\n for data in self.datas:\n fout.write('<tr><td>%s</td><td>%s</td><td>%s</td></tr>' % (data['url'], data['title'], data['summary']))\n self.datas.remove(data)\n fout.write('</table></body></html>')\n fout.close()", "def print_cluster(self):\n print('Cluster', self.number)\n for pattern in self.patterns:\n pattern.print_pattern()", "def plot_samples(self, df_clusters, n_trajectories_per_cluster=5, path_to_save='', file_name='', save=True):\n\n\n # create colormap for trajectories\n self.clusters = df_clusters['cluster'].unique()\n\n # cluster loop\n for c in self.clusters:\n\n self.fig = plt.figure(figsize=(13,15))\n self.ax = self.fig.add_subplot(1,1,1)\n\n # plot background map\n self.plot()\n\n df_clus = df_clusters[df_clusters['cluster'] == c]\n # Separate medoids. Always plot medoids. Medoids are cool.\n df_med_clus = df_clus[df_clus['medoids'] == 1]\n\n trip = list(map(int, df_med_clus['locations_list'].tolist()[0]))\n df_trip = pd.DataFrame(data={'pro_com': trip})\n # get centroids of each trip\n self.df_trip_entroids = self.get_centroids_trip(df_trip)\n self.plot_single_trajectory()\n\n\n # trajectory loop\n if n_trajectories_per_cluster > 1:\n\n df_sample = df_clus[df_clus['medoids'] == 0][:n_trajectories_per_cluster-1]\n\n for t in range(n_trajectories_per_cluster-1):\n\n trip = list(map(int, df_sample.iloc[[t], :]['locations_list'].tolist()[0]))\n df_trip = pd.DataFrame(data={'pro_com': trip})\n # get centroids of each trip\n self.df_trip_entroids = self.get_centroids_trip(df_trip)\n self.plot_single_trajectory()\n \n if save:\n cluster_wise_plot_path = path_to_save+\"clusterwise_trajectories/\"\n if not os.path.exists(cluster_wise_plot_path):\n os.makedirs(cluster_wise_plot_path)\n plt.savefig(cluster_wise_plot_path+file_name+'_clusterwise_trajectories_'+str(c)+'.png')", "def write_report(self):\n\n # Create progress meter bar\n self.progress = ProgressMeter(_(\"Liste Eclair\"), '')\n\n # Write the title line. Set in INDEX marker so that this section will be\n # identified as a major category if this is included in a Book report.\n\n title = _(\"Liste Eclair\")\n mark = IndexMark(title, INDEX_TYPE_TOC, 1) \n self.doc.start_paragraph(\"Eclair-ReportTitle\")\n self.doc.write_text(title, mark)\n self.doc.end_paragraph()\n self.__write_all_places()\n\n # Close the progress meter\n self.progress.close()", "def to_single_html(self):\n self.error_throw('output')\n \n if self.rank_method == methods_of_ranking[3]: #'diversified_ranking'\n self.output_div('single_html')\n else:\n self.output('single_html')", "def md_writer(clf, features, outcome, eval_folder,\n config_file, summary_df, i=''):\n if config_file.endswith('.xlsx'):\n config = pd.read_excel(config_file, sheetname='Sheet1')\n elif config_file.endswith('.csv'):\n config = pd.read_csv(config_file)\n\n clf_params = clf.get_params()\n clf_name = str(clf)[:str(clf).index('(')]\n clf_img = clf_name+str(i)\n\n file_name = clf_name+str(i)+'_Evaluation.md'\n\n save_file = open(eval_folder+file_name, 'w')\n\n def new_line():\n save_file.write('\\n')\n\n save_file.write('<link rel=\"stylesheet\" href=\"style.css\" type=\"text/css\" />\\n')\n save_file.write('# Model Evaluation Report\\n')\n new_line()\n\n save_file.write('## Data Configuration:\\n')\n new_line()\n save_file.write(config.to_html(na_rep='', index=False).replace('NaT', ''))\n new_line()\n\n save_file.write('## Classifier Parameters: '+clf_name+'\\n')\n new_line()\n for elem in clf_params:\n save_file.write('* {}: {}\\n'.format(elem, clf_params[elem]))\n new_line()\n\n summary_df = summary_df.T\n summary_df.columns = ['value']\n\n save_file.write('## Evaluation Metrics; Summary\\n')\n new_line()\n save_file.write(summary_df.to_html())\n new_line()\n\n save_file.write('## ROC Curve\\n')\n new_line()\n save_file.write('![mis](images/ROC_Curve_'+clf_img+'.png)\\n')\n new_line()\n\n save_file.write('## Precision-Recall Curve\\n')\n new_line()\n save_file.write('![mis](images/PR_Curve_'+clf_img+'.png)\\n')\n new_line()\n\n save_file.write('## Precision, Recall vs % Population\\n')\n new_line()\n save_file.write('![mis](images/PRATN_Curve_'+clf_img+'.png)\\n')\n\n if clf_name in ['LogisticRegression']:\n save_file.write('## Coefficients\\n')\n new_line()\n for i,coef in enumerate(clf.coef_[0]):\n save_file.write('*<b>{}: {}</b>\\n'.format(features[i], round(coef,4)))\n new_line()\n\n if clf_name in ['WeightedQuestions']:\n save_file.write('## Weights\\n')\n new_line()\n for i,wt in enumerate(clf.weights):\n save_file.write('*<b>{}: {}</b>\\n'.format(features[i], wt))\n new_line()\n\n save_file.close()\n\n def markdown_to_html(md_file, out_file_name=None):\n import markdown\n\n with open(md_file, 'r') as f:\n html = markdown.markdown(f.read())\n\n if out_file_name is None:\n out_file_name = md_file.split('.')[0]+'.html'\n with open(out_file_name, 'w') as f:\n f.write(html)\n\n markdown_to_html(eval_folder+file_name)", "def _neighbours_html(self):\n self._make_svg_script()\n\n ret = {\n 'rt_label': self.rt_label,\n 'uri': self.uri,\n 'uri_encoded': self.uri_encoded,\n 'label': self.label,\n 'nid': self.nid,\n 'gat': self.gat,\n 'rs_encoded': self.rs_encoded,\n 'rs_label': self.rs_label,\n 'sa': self.sa,\n 'ea': self.ea,\n 'script': self.script\n }\n\n return render_template(\n 'class_report.html',\n report=ret\n )", "def all_cluster_report_fn(self):\n return op.join(self.combined_dir, 'all.cluster_report.csv')", "def table_to_csv(output_table, cat_column, method, out_csv_names, debug):\n p_df = df_to_pandas(output_table)\n no_of_prod = len(p_df)\n head_df = pd.DataFrame()\n head_df[\"Cluster Name\"] = p_df.reset_index()[cat_column]\n head_df_list = head_df[\"Cluster Name\"].tolist()\n try:\n cluster_matrix = hierarical_clustering(p_df, method)\n except Exception as e:\n raise Exception(\"Distance matrix has some issue:\"+str(e))\n # head_df.sort(\"Cluster Name\", inplace=True) # original\n head_df = head_df.sort_values([\"Cluster Name\"]) # changed by mukul\n head_df[\"Cluster Number\"] = range(1, no_of_prod + 1)\n head_df = change_column_order(head_df, \"Cluster Number\", 0)\n p_df = pd.DataFrame(cluster_matrix, columns=[\"Idj1\", \"Idj2\", \"SemipartialRSq\", \"priority\"])\n p_df[\"NumberOfClusters\"] = range(len(p_df),0,-1)\n p_df = format_column(p_df, \"Idj1\", no_of_prod, \"NumberOfClusters\")\n p_df = format_column(p_df, \"Idj2\", no_of_prod, \"NumberOfClusters\") \n p_df.drop(\"priority\", axis=1, inplace=True)\n p_df = change_column_order(p_df, \"NumberOfClusters\", 0)\n if not debug:\n p_df.to_excel(out_csv_names[0], index=False)\n head_df.to_excel(out_csv_names[1], index=False)\n return head_df, p_df, head_df_list, cluster_matrix", "def render(self, project=None, total_records=None):\n # TODO check for index column in df other than the default numbering\n table = json.dumps(self.to_backgrid_dict())\n if total_records is None:\n total_records = self.shape[0]\n uuids = [str(uuid.uuid4()) for i in range(3)]\n juuids, jproject = json.dumps(uuids), json.dumps(project)\n html = f'<div id=\"{uuids[0]}\"></div>'\n html += f'<div id=\"{uuids[1]}\" style=\"width:100%;\"></div>'\n html += f'<div id=\"{uuids[2]}\"></div>'\n html += f'<script>render_table({{\\\n total_records: {total_records}, project: {jproject},\\\n uuids: {juuids}, table: {table}\\\n }})</script>'\n return html", "def __repr__(self):\n rep = \"alg_cluster.Cluster(\"\n rep += str(self._fips_codes) + \", \"\n rep += str(self._horiz_center) + \", \"\n rep += str(self._vert_center) + \", \"\n rep += str(self._total_population) + \", \"\n rep += str(self._averaged_risk) + \")\"\n return rep", "def __repr__(self):\n rep = \"alg_cluster.Cluster(\"\n rep += str(self._fips_codes) + \", \"\n rep += str(self._horiz_center) + \", \"\n rep += str(self._vert_center) + \", \"\n rep += str(self._total_population) + \", \"\n rep += str(self._averaged_risk) + \")\"\n return rep" ]
[ "0.650531", "0.6185467", "0.61722493", "0.6105052", "0.60469466", "0.593786", "0.5836839", "0.573391", "0.5733406", "0.5732567", "0.56860524", "0.56389797", "0.5630503", "0.54823095", "0.5481751", "0.5479838", "0.5461374", "0.5451356", "0.540365", "0.540203", "0.54001427", "0.5396499", "0.5387077", "0.53570706", "0.5341553", "0.5339647", "0.53321725", "0.5297732", "0.5278862", "0.5278862" ]
0.73103774
0
Register a ffmpeg process/device.
def async_register_device(self, device): self._entities.append(device) @asyncio.coroutine def async_shutdown(event): """Stop ffmpeg process.""" yield from device.async_stop_ffmpeg() self.hass.bus.async_listen_once( EVENT_HOMEASSISTANT_STOP, async_shutdown) # start on startup if device.initial_state: @asyncio.coroutine def async_start(event): """Start ffmpeg process.""" yield from device.async_start_ffmpeg() yield from device.async_update_ha_state() self.hass.bus.async_listen_once( EVENT_HOMEASSISTANT_START, async_start)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def start(self) -> None:\r\n self._spawn_ffmpeg()", "def _spawn_ffmpeg(self) -> None:\r\n if self.ffmpeg_proc is not None:\r\n raise RuntimeError('_spawn_ffmpeg called when ffmpeg_proc is '\r\n + f'{self.ffmpeg_proc} (not None)')\r\n\r\n args = ['ffmpeg', '-f', 'rawvideo', '-vcodec', 'rawvideo',\r\n '-s', f'{self.frame_size[0]}x{self.frame_size[1]}',\r\n '-pix_fmt', 'rgba', '-r', str(self.fps),\r\n '-loglevel', 'quiet',\r\n '-i', 'pipe:0',\r\n '-vcodec', 'h264', '-pix_fmt', 'yuv420p',\r\n '-movflags', '+faststart']\r\n\r\n if self.bitrate > 0:\r\n args.extend(['-b', f'{self.bitrate}k'])\r\n args.extend(['-y', self.outfile])\r\n\r\n create_flags = sp.CREATE_NO_WINDOW if 'nt' in os.name else 0\r\n self.ffmpeg_proc = sp.Popen(args, shell=False, stdout=None, stderr=None,\r\n stdin=sp.PIPE, creationflags=create_flags)", "def async_start_ffmpeg(self):\n raise NotImplementedError()", "def start_ffmpeg_record(stream, stream_url, formatted_date):\n filename = stream + '_' + formatted_date\n save_video_dir = 'rover_stream/' + stream\n subprocess.Popen(['mkdir rover_stream'], shell=True)\n subprocess.Popen(['mkdir ' + save_video_dir], shell=True)\n proc_video[stream] = subprocess.Popen(['ffmpeg -i ' + stream_url + ' -acodec copy -vcodec copy ' + save_video_dir + '/' + filename + '.mp4'], stdin=PIPE, shell=True)", "def __init__(self, ffmpeg_path=None, ffprobe_path=None):\n\n def which(name):\n path = os.environ.get_parser('PATH', os.defpath)\n for d in path.split(':'):\n fpath = os.path.join(d, name)\n if os.path.exists(fpath) and os.access(fpath, os.X_OK):\n return fpath\n return None\n\n if ffmpeg_path is None:\n ffmpeg_path = 'ffmpeg'\n\n if ffprobe_path is None:\n ffprobe_path = 'ffprobe'\n\n if '/' not in ffmpeg_path:\n ffmpeg_path = which(ffmpeg_path) or ffmpeg_path\n if '/' not in ffprobe_path:\n ffprobe_path = which(ffprobe_path) or ffprobe_path\n\n self.ffmpeg_path = ffmpeg_path\n self.ffprobe_path = ffprobe_path\n\n if not os.path.exists(self.ffmpeg_path):\n raise FFMpegError(\"ffmpeg binary not found: \" + self.ffmpeg_path)\n\n if not os.path.exists(self.ffprobe_path):\n raise FFMpegError(\"ffprobe binary not found: \" + self.ffprobe_path)\n\n self.hwaccels = []\n\n self.encoders = []\n self.decoders = []\n\n self._getcapabilities()", "def ffmpeg(*options):\n\tffmpeg_command = [\"ffmpeg\"] + list(options)\n\tprint(\"Calling FFMPEG:\", \" \".join(ffmpeg_command))\n\n\tprocess = subprocess.Popen(ffmpeg_command, stdout=subprocess.PIPE)\n\t(cout, cerr) = process.communicate()\n\texit_code = process.wait()\n\tif exit_code != 0: #0 is success.\n\t\traise Exception(\"Calling FFmpeg failed with exit code {exit_code}. CERR: {cerr} . COUT: {cout}\".format(exit_code=exit_code, cerr=str(cerr), cout=str(cout)))", "def _have_ffmpeg(self):\n from sage.misc.sage_ostools import have_program\n return have_program('ffmpeg')", "def register_proc(self, pid: int):\n self.processes.add(pid)", "def register(self, dbus_path, uuid, codec, capabilities):\n self._media_proxy.proxy.RegisterEndpoint(\n dbus_path,\n {\n \"UUID\": uuid,\n \"Codec\": Byte(codec),\n \"Capabilities\": Array(capabilities, signature=\"y\")\n })", "def register_with_mpf(machine):\n return 'playlist', MpfPlaylistPlayer(machine)", "def __init__(self):\n \n app_name = 'FFMPEG_info'\n app_author = 'sksound'\n \n # The package \"appdirs\" allows an OS-independent implementation\n user_data_dir = appdirs.user_data_dir(app_name, app_author)\n if not os.path.exists(user_data_dir):\n os.makedirs(user_data_dir)\n self.config_file = os.path.join(user_data_dir, 'ffmpeg.json')\n \n if not os.path.exists(self.config_file):\n \n # Check if it is in the system path\n try:\n completed_process = subprocess.run('ffmpeg')\n completed_process = subprocess.run('ffplay')\n self.ffmpeg = 'ffmpeg'\n self.ffplay = 'ffplay'\n except FileNotFoundError:\n self.set()\n else:\n with open(self.config_file, 'r') as in_file:\n info = json.load(in_file)\n self.ffmpeg = info['ffmpeg']\n self.ffplay = info['ffplay']", "def register():\n \n global _registered\n if not _registered:\n _registered = True\n sys.path_hooks.insert(0, VFSImporter)", "def set(self):\n \n ffmpeg_installed = misc.askquestion(DialogTitle='FFMPEG Check',\n Question='Is FFMPEG installed?')\n \n if ffmpeg_installed:\n ffmpeg_dir = misc.get_dir(DialogTitle='Please select the directory where FFMPEG (binary) is installed:')\n \n if sys.platform=='win32':\n self.ffmpeg = os.path.join(ffmpeg_dir, 'ffmpeg.exe')\n self.ffplay = os.path.join(ffmpeg_dir, 'ffplay.exe')\n else:\n self.ffmpeg = os.path.join(ffmpeg_dir, 'ffmpeg')\n self.ffplay = os.path.join(ffmpeg_dir, 'ffplay')\n \n if not os.path.exists(self.ffmpeg):\n print('Sorry, {0} does not exist!'.format(self.ffmpeg))\n return\n \n if not os.path.exists(self.ffplay):\n print('Sorry, {0} does not exist!'.format(self.ffplay))\n return\n \n else:\n self.ffmpeg = None\n self.ffplay = None\n \n # Save them to the default config file\n info = {'ffmpeg':self.ffmpeg, 'ffplay': self.ffplay}\n try:\n with open(self.config_file, 'w') as outFile:\n json.dump(info, outFile)\n print('Config information written to {0}'.format(os.path.abspath(self.config_file)))\n except PermissionError as e:\n curDir = os.path.abspath(os.curdir)\n print('Current directory: {0}'.format(curDir))\n print('Error: {0}'.format(e))\n \n return", "def __init__(\n self, executable=\"ffmpeg\", global_options=None, inputs=None, outputs=None\n ):\n self.executable = executable\n self._cmd = [executable]\n\n global_options = global_options or []\n if _is_sequence(global_options):\n normalized_global_options = []\n for opt in global_options:\n normalized_global_options += shlex.split(opt)\n else:\n normalized_global_options = shlex.split(global_options)\n\n self._cmd += normalized_global_options\n self._cmd += _merge_args_opts(inputs, add_input_option=True)\n self._cmd += _merge_args_opts(outputs)\n\n self.cmd = subprocess.list2cmdline(self._cmd)\n self.process = None", "def registerProducer(producer, streaming):", "def registerProducer(producer, streaming):\n pass", "def start(self, print_ffplay_proc_stderr=False, print_read_proc_stderr=False):\n # Set the image controls\n self.set_controls()\n \n # Create a process to read from the webcam\n # stdin should be pipe so it doesn't suck up keypresses (??)\n # stderr should be null, so pipe doesn't fill up and block\n # stdout will go to downstream process\n if print_read_proc_stderr:\n read_proc_stderr = None\n else:\n read_proc_stderr = open(os.devnull, 'w')\n read_proc_cmd_l = ['ffmpeg',\n '-f', 'video4linux2',\n '-i', self.device,\n '-vcodec', 'libx264',\n '-qp', '0',\n '-vf', 'format=gray',\n '-preset', 'ultrafast',\n '-f', 'rawvideo', '-',\n ] \n self.read_proc = subprocess.Popen(read_proc_cmd_l, stdin=subprocess.PIPE, \n stdout=subprocess.PIPE, stderr=read_proc_stderr)\n \n # Sometimes the read_proc fails because the device is busy or \"Input/ouput error\"\n # but the returncode isn't set or anything so I don't know how to\n # detect this.\n\n # Tee the compressed output to a file\n self.tee_proc = subprocess.Popen(['tee', self.output_filename], \n stdin=self.read_proc.stdout,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n # Play the output\n if print_ffplay_proc_stderr:\n ffplay_proc_stderr = None\n else:\n ffplay_proc_stderr = open(os.devnull, 'w') \n self.ffplay_proc = subprocess.Popen([\n 'ffplay', \n #~ '-fflags', 'nobuffer', # not compatible with analyzeduration or probesize?\n '-analyzeduration', '500000', # 500 ms delay in starting\n '-window_title', self.window_title,\n '-',\n ], \n stdin=self.tee_proc.stdout,\n stdout=subprocess.PIPE, stderr=ffplay_proc_stderr)\n\n # This is supposed to allow SIGPIPE\n # https://docs.python.org/2/library/subprocess.html#replacing-shell-pipeline\n self.read_proc.stdout.close()\n self.tee_proc.stdout.close()", "def test_ffmpeg_in_path(self) -> None:\n self.assertIsNotNone(which('ffmpeg'))", "def start_gst(self, config=None):\n\n if not config:\n config = \\\n [\n 'videotestsrc ! decodebin',\n '! videoconvert ! video/x-raw,format=(string)BGR ! videoconvert',\n '! appsink'\n ]\n\n command = ' '.join(config)\n self.video_pipe = Gst.parse_launch(command)\n self.video_pipe.set_state(Gst.State.PLAYING)\n self.video_sink = self.video_pipe.get_by_name('appsink0')", "def main():\n input_video = sys.argv[1]\n input_audio = sys.argv[2]\n output_video = sys.argv[3]\n set_audio(input_video, input_audio, output_video)", "def watch_ffmpeg(channel_id: str) -> Response:\n log.info(\n f\"Watching channel {channel_id} on {host_and_port} for {locast_service.city} using ffmpeg\")\n uri = locast_service.get_station_stream_uri(channel_id)\n\n ffmpeg = config.ffmpeg or 'ffmpeg'\n\n # Start ffmpeg as a subprocess to extract the mpeg stream and copy it to the incoming\n # connection. ffmpeg will take care of demuxing the mpegts stream and following m3u directions\n ffmpeg_cmd = [ffmpeg, \"-i\", uri, \"-codec\",\n \"copy\", \"-f\", \"mpegts\", \"pipe:1\"]\n\n ffmpeg_proc = subprocess.Popen(\n ffmpeg_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n # use a signal to indicate threads running or not\n signal = RunningSignal(True)\n\n # Start a thread that reads ffmpeg stderr and logs it to our logger.\n t = threading.Thread(target=_log_output, args=(\n config, ffmpeg_proc.stderr, signal))\n t.setDaemon(True)\n t.start()\n\n return Response(_stream_ffmpeg(config, ffmpeg_proc, signal), content_type='video/mpeg; codecs=\"avc1.4D401E')", "def __init__(self, executable=\"ffprobe\", global_options=\"\", inputs=None):\n super(FFprobe, self).__init__(\n executable=executable, global_options=global_options, inputs=inputs\n )", "def start_recording(codec, filename=time.strftime(\"%Y-%m-%d_%H-%M-%S\")):\n global video_writer\n folder = 'video_out/' # eventually replace this with the SD card folder\n # TODO: also include branch name and/or commit ID\n path = folder + filename + '.' + filetype\n print \"Saving video to: %s\" % path\n\n height = videoinput.frame_height\n if settings.sidebyside:\n width = 2*videoinput.frame_width\n else:\n width = videoinput.frame_width\n\n try:\n video_writer = cv2.VideoWriter(path, codec, 30, (width, height))\n except:\n print \"Failed to open video file for writing!\"", "def run(self):\n\n self.start_gst(\n [\n self.video_source,\n self.video_codec,\n self.video_decode,\n self.video_sink_conf\n ])\n\n self.video_sink.connect('new-sample', self.callback)", "def async_setup(hass, config):\n conf = config.get(DOMAIN, {})\n\n manager = FFmpegManager(\n hass,\n conf.get(CONF_FFMPEG_BIN, DEFAULT_BINARY),\n conf.get(CONF_RUN_TEST, DEFAULT_RUN_TEST)\n )\n\n descriptions = yield from hass.loop.run_in_executor(\n None, load_yaml_config_file,\n os.path.join(os.path.dirname(__file__), 'services.yaml'))\n\n # register service\n @asyncio.coroutine\n def async_service_handle(service):\n \"\"\"Handle service ffmpeg process.\"\"\"\n entity_ids = service.data.get(ATTR_ENTITY_ID)\n\n if entity_ids:\n devices = [device for device in manager.entities\n if device.entity_id in entity_ids]\n else:\n devices = manager.entities\n\n tasks = []\n for device in devices:\n if service.service == SERVICE_START:\n tasks.append(device.async_start_ffmpeg())\n elif service.service == SERVICE_STOP:\n tasks.append(device.async_stop_ffmpeg())\n else:\n tasks.append(device.async_restart_ffmpeg())\n\n if tasks:\n yield from asyncio.wait(tasks, loop=hass.loop)\n\n tasks.clear()\n for device in devices:\n tasks.append(device.async_update_ha_state())\n\n if tasks:\n yield from asyncio.wait(tasks, loop=hass.loop)\n\n hass.services.async_register(\n DOMAIN, SERVICE_START, async_service_handle,\n descriptions[DOMAIN].get(SERVICE_START), schema=SERVICE_FFMPEG_SCHEMA)\n\n hass.services.async_register(\n DOMAIN, SERVICE_STOP, async_service_handle,\n descriptions[DOMAIN].get(SERVICE_STOP), schema=SERVICE_FFMPEG_SCHEMA)\n\n hass.services.async_register(\n DOMAIN, SERVICE_RESTART, async_service_handle,\n descriptions[DOMAIN].get(SERVICE_RESTART),\n schema=SERVICE_FFMPEG_SCHEMA)\n\n hass.data[DATA_FFMPEG] = manager\n return True", "def add_media_file(self, input_file: str, output_file: str) -> None:\n if not os.path.exists(input_file):\n raise FFmpegNormalizeError(f\"file {input_file} does not exist\")\n\n ext = os.path.splitext(output_file)[1][1:]\n if (\n self.audio_codec is None or \"pcm\" in self.audio_codec\n ) and ext in PCM_INCOMPATIBLE_EXTS:\n raise FFmpegNormalizeError(\n f\"Output extension {ext} does not support PCM audio. \"\n \"Please choose a suitable audio codec with the -c:a option.\"\n )\n\n self.media_files.append(MediaFile(self, input_file, output_file))\n self.file_count += 1", "def run(self):\n\n # Start the video stream process\n self._process.start()", "def _RegisterProcess(self, process):\n if process is None:\n raise ValueError('Missing process.')\n\n if process.pid in self._processes_per_pid:\n raise KeyError(\n 'Already managing process: {0!s} (PID: {1:d})'.format(\n process.name, process.pid))\n\n self._processes_per_pid[process.pid] = process", "def connect_plugin(sound_file):\n global play_cmd # pylint: disable=global-statement\n\n ext = os.path.splitext(sound_file)[-1]\n try:\n return _subprocess(play_cmd[ext], sound_file)\n except (KeyError, AttributeError):\n pass\n\n programs = ['gst123', 'gst-play-1.0']\n if ext == '.wav':\n programs.append('aplay')\n elif ext == '.mp3':\n programs += ['mpg123', 'mpg321', 'mpg321-mpg123']\n for cmd in programs:\n try:\n _subprocess(cmd, sound_file)\n except OSError:\n pass # log here!\n else:\n play_cmd[ext] = cmd\n break", "def add_audio_to_video(audio_path: Union[str, Path],\n video_path: Union[str, Path],\n out_video_path: [str, Path]) -> Path:\n command = 'ffmpeg -loglevel warning -y -i \"{}\" -i \"{}\" -c:v copy -c:a copy -shortest {}'.format(\n video_path.as_posix(),\n audio_path.as_posix(),\n out_video_path.as_posix(),\n )\n run_command(command)\n return out_video_path" ]
[ "0.6310389", "0.6125284", "0.6104799", "0.5966454", "0.5877694", "0.58489907", "0.5754165", "0.56728053", "0.5621524", "0.5486249", "0.5477552", "0.5379064", "0.5299416", "0.5282166", "0.5268385", "0.5261999", "0.5171462", "0.5142859", "0.51377225", "0.5101035", "0.5090891", "0.5069568", "0.50671893", "0.5041392", "0.501444", "0.5010919", "0.5009473", "0.49955896", "0.49705008", "0.4962509" ]
0.65415466
0
Initialize ffmpeg base object.
def __init__(self, initial_state=True): self.ffmpeg = None self.initial_state = initial_state
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, ffmpeg_path=None, ffprobe_path=None):\n\n def which(name):\n path = os.environ.get_parser('PATH', os.defpath)\n for d in path.split(':'):\n fpath = os.path.join(d, name)\n if os.path.exists(fpath) and os.access(fpath, os.X_OK):\n return fpath\n return None\n\n if ffmpeg_path is None:\n ffmpeg_path = 'ffmpeg'\n\n if ffprobe_path is None:\n ffprobe_path = 'ffprobe'\n\n if '/' not in ffmpeg_path:\n ffmpeg_path = which(ffmpeg_path) or ffmpeg_path\n if '/' not in ffprobe_path:\n ffprobe_path = which(ffprobe_path) or ffprobe_path\n\n self.ffmpeg_path = ffmpeg_path\n self.ffprobe_path = ffprobe_path\n\n if not os.path.exists(self.ffmpeg_path):\n raise FFMpegError(\"ffmpeg binary not found: \" + self.ffmpeg_path)\n\n if not os.path.exists(self.ffprobe_path):\n raise FFMpegError(\"ffprobe binary not found: \" + self.ffprobe_path)\n\n self.hwaccels = []\n\n self.encoders = []\n self.decoders = []\n\n self._getcapabilities()", "def __init__(\n self, executable=\"ffmpeg\", global_options=None, inputs=None, outputs=None\n ):\n self.executable = executable\n self._cmd = [executable]\n\n global_options = global_options or []\n if _is_sequence(global_options):\n normalized_global_options = []\n for opt in global_options:\n normalized_global_options += shlex.split(opt)\n else:\n normalized_global_options = shlex.split(global_options)\n\n self._cmd += normalized_global_options\n self._cmd += _merge_args_opts(inputs, add_input_option=True)\n self._cmd += _merge_args_opts(outputs)\n\n self.cmd = subprocess.list2cmdline(self._cmd)\n self.process = None", "def __init__(self):\n \n app_name = 'FFMPEG_info'\n app_author = 'sksound'\n \n # The package \"appdirs\" allows an OS-independent implementation\n user_data_dir = appdirs.user_data_dir(app_name, app_author)\n if not os.path.exists(user_data_dir):\n os.makedirs(user_data_dir)\n self.config_file = os.path.join(user_data_dir, 'ffmpeg.json')\n \n if not os.path.exists(self.config_file):\n \n # Check if it is in the system path\n try:\n completed_process = subprocess.run('ffmpeg')\n completed_process = subprocess.run('ffplay')\n self.ffmpeg = 'ffmpeg'\n self.ffplay = 'ffplay'\n except FileNotFoundError:\n self.set()\n else:\n with open(self.config_file, 'r') as in_file:\n info = json.load(in_file)\n self.ffmpeg = info['ffmpeg']\n self.ffplay = info['ffplay']", "def __init__(self, *args):\n _snap.TChAV_swiginit(self, _snap.new_TChAV(*args))", "def __init__(self, executable=\"ffprobe\", global_options=\"\", inputs=None):\n super(FFprobe, self).__init__(\n executable=executable, global_options=global_options, inputs=inputs\n )", "def __init__(self, filename, check_integrity=False, force_framerate=0):\n super(FMFCapture, self).__init__()\n\n self._mov = fmf.FlyMovie(filename, check_integrity)\n\n self._frame_timestamp = 0.0\n self._frame_number = -1\n if force_framerate > 0:\n self._frame_delay = 1./float(force_framerate)\n else:\n self._frame_delay = None\n\n #CaptureBase attributes\n self.frame_count = self._mov.n_frames\n self.frame_width = self._mov.width\n self.frame_height = self._mov.height\n self.is_video_file = True\n self.filename = filename", "def __init__(self,vid_path:str,num_frames:int=None,vid_flow_direction:str='left'):\n \n self.num_frames=num_frames\n if vid_path.split('.')[-1]=='cine' or vid_flow_direction!='left':\n #This is a cine file or needs to be rotated, convert to mp4\n print('Converting .cine file to mp4 (lossless)')\n #detect platform so we can correct file paths for ffmpeg\n is_win=re.compile('.*[Ww]in.*')\n if is_win.match(sys.platform):\n corrected_vid_path='\"'+vid_path+'\"'\n else:\n #Put escape characters in front of spaces in file name\n corrected_vid_path=[]\n for c in vid_path:\n if c==' ':\n corrected_vid_path.append('\\\\')\n corrected_vid_path.append(c)\n corrected_vid_path=''.join(corrected_vid_path)\n if vid_flow_direction=='up':\n rotate='-vf \"transpose=2\" '\n elif vid_flow_direction=='left':\n rotate=''\n elif vid_flow_direction=='right':\n rotate='-vf \"transpose=2,transpose=2\" '\n else:\n raise Exception(\"vid_flow_direction must be 'up', 'left' or 'right'\")\n if num_frames!=None:\n frames='-frames:v {0} '.format(num_frames)\n else:\n frames=''\n os_handle,new_file_path=tempfile.mkstemp(suffix='.mp4')\n #close file, we don't work with it directly\n os.close(os_handle)\n ffmpeg_command='ffmpeg -y -i {orig_file} {frames}{rotate}-f mp4 -crf 0 {new_file}'.format(orig_file=corrected_vid_path,rotate=rotate,new_file=new_file_path,frames=frames)\n print(ffmpeg_command)\n list(os.popen(ffmpeg_command))\n self.vid_path=new_file_path\n self.delete_file=True\n stats=os.stat(new_file_path)\n if stats.st_size==0:\n raise Exception('File conversion failed, check that ffmpeg is on PATH')\n else:\n #Not a cine\n self.vid_path=vid_path\n self.delete_file=False", "def __init__(self):\n super().__init__(interface.Audio, DEFAULT_PRIORITIES)", "def __init__(self, inFile = None, inData = None, inRate = None):\n \n # Information about FFMPEG\n self.ffmpeg_info = FFMPEG_info()\n \n if inData is not None:\n if inRate is None:\n print('Set the \"rate\" to the default value (8012 Hz).')\n rate = 8012.0\n self.generate_sound(inData, inRate)\n else: \n if inFile is None:\n inFile = self._selectInput()\n if inFile == 0:\n return\n try:\n self.source = inFile\n self.read_sound(self.source)\n except FileNotFoundError as err:\n print(err)\n inFile = self._selectInput()\n self.source = inFile\n self.read_sound(self.source)", "def __init__(self):\n self._start = None\n self._end = None\n self._num_frames = 0", "def __init__(self):\n log.msg(\"Initializing Twitch parser.\")\n\n # initialize our data members\n self.streams = tuple()\n self.crc32 = 0", "def __init__(self):\n # Attributes from GUI or caller\n self.file_path = None\n self.override = None\n self.start_frame = None\n self.render_length = None\n self.timecode_in = None\n self.timecode_out = None\n\n # Attributes from Pico file in memory\n self.file_buffer = None\n\n self.header = None\n self.base_timecode = None\n\n self.channels = None # Not entirely sure we need this - Scratch that, we do need it.\n self.jam_timecode = None\n self.raw_fps = None\n self.frame_in = None\n self.frame_out = None\n self.frame_zero = None\n self.frame_offset = None # This one eventually turns into the frame index\n self.frame_start = None # This one is not used anymore as the value comes from the GUI\n self.frame_padding = None\n self.total_frames = None\n\n # Attributes for render action\n self.output_name = None\n self.render_fps = None\n self.ref_timecode = None", "def start(self) -> None:\r\n self._spawn_ffmpeg()", "def __init__(self, buffer_size=DEFAULT_STREAM_BUFFER_SIZE):\n self._reader = cv2.VideoCapture()\n self._queue = deque(maxlen=buffer_size)\n self._stop_event = threading.Event()\n self._video_info = {}", "def __init__(self, input_file_path, convert_to_bgr=False):\n self.__yuv_video = YuvDecoder(input_file_path, convert_to_bgr=True)\n print('After INSTANTIATION')\n self.__yuv_video.start()", "def __init__(self, embed_size, sequence_size, encoder_name, n_classes=400, input_size=224, pretrained=True,\n mode='rfbdiff', layer_norm=True):\n super().__init__()\n self.mode = mode\n motion_sequence_size = sequence_size\n input_channels = 3\n if self.mode == \"flow\":\n input_channels = 2\n elif self.mode == \"rgbdiff\":\n motion_sequence_size = motion_sequence_size - 1\n self.rgb_diff = RGBDiff()\n else:\n raise Exception(\"Unsupported mode \" + self.mode)\n\n self.motion_decoder = VideoTransformer(embed_size, motion_sequence_size, encoder_name, n_classes=n_classes,\n input_size=input_size, pretrained=pretrained,\n input_channels=input_channels, layer_norm=layer_norm)", "def __init__(self):\n\t\tself._logger = None\n\t\tself._instanciate_logger()\n\t\tself._video_manager = VideoManager(self, self._logger)\n\t\tself._video_thread = None\n\t\tself._audio_manager = AudioManager(self, self._logger)\n\t\tself._audio_thread = None\n\t\tself._input_thread = None\n\t\tself._trigger_manager = None\n\t\tself.is_running = False", "def __init__(self, seq_length=80, class_limit=None, image_shape=(224, 224, 3)):\n self.seq_length = seq_length\n self.class_limit = class_limit\n self.sequence_path = os.path.join('data', 'demo_sequences')\n \n self.max_frames = 8000 # max number of frames a video can have for us to use it\n\n # Get the data.\n self.data = self.get_data()\n\n # Get the classes.\n self.classes = self.get_classes()\n\n # Now do some minor data cleaning.\n self.data = self.clean_data()\n\n self.image_shape = image_shape", "def __init__(self, *args):\n _ida_fpro.qfile_t_swiginit(self, _ida_fpro.new_qfile_t(*args))", "def __init__(self, input: Union[BinaryIO, str, os.PathLike], skip_frames: bool = False):\n self.start = None\n self.frames = []\n self.end = None\n self.metadata = None\n self.metadata_raw = None\n\n parse(input, {\n ParseEvent.START: lambda x: setattr(self, 'start', x),\n ParseEvent.FRAME: self._add_frame,\n ParseEvent.END: lambda x: setattr(self, 'end', x),\n ParseEvent.METADATA: lambda x: setattr(self, 'metadata', x),\n ParseEvent.METADATA_RAW: lambda x: setattr(self, 'metadata_raw', x)},\n skip_frames)", "def __init__(\n self,\n *,\n video_format: str,\n constant_rate_factor: int = 28,\n channels_first: bool = False,\n apply_fit: bool = False,\n apply_predict: bool = True,\n verbose: bool = False,\n ):\n super().__init__(is_fitted=True, apply_fit=apply_fit, apply_predict=apply_predict)\n self.video_format = video_format\n self.constant_rate_factor = constant_rate_factor\n self.channels_first = channels_first\n self.verbose = verbose\n self._check_params()", "def __init__(self, *args, **kwargs):\n super(Decoder, self).__init__(*args, **kwargs)\n self.stream.seek(0)\n self._code = 0", "def __init__(self, link, stop, outQueue, framerate, logger):\n\n super().__init__()\n self.logger = logger\n self.streamLink = link\n self.stop = stop\n self.outQueue = outQueue\n self.framerate = framerate\n self.currentState = StreamMode.INIT_STREAM\n self.pipeline = None\n self.source = None\n self.metadata_sink = None\n self.frame_sink = None\n self.image_arr = None\n self.newImage = False\n self.newMetadata = False\n self.num_unexpected_tot = 40\n self.unexpected_cnt = 0\n self.flag = True\n self.counter_frame_fps = 0\n self.fps = 0\n self.last_frame_time = time.time()", "def __init__(self, subtitle_zip_files_dir, target_dir, temp_storage_dir):\n self._video_formats = ('avi', 'mp4', 'mov', 'mkv', 'mk3d', 'webm', \\\n 'ts', 'mts', 'm2ts', 'ps', 'vob', 'evo', 'mpeg', 'mpg', \\\n 'm1v', 'm2p', 'm2v', 'm4v', 'movhd', 'movx', 'qt', \\\n 'mxf', 'ogg', 'ogm', 'ogv', 'rm', 'rmvb', 'flv', 'swf', \\\n 'asf', 'wm', 'wmv', 'wmx', 'divx', 'x264', 'xvid')\n\n self.subtitle_zip_files_dir = pathlib.Path(subtitle_zip_files_dir)\n self.target_dir = pathlib.Path(target_dir)\n self.temp_storage_dir = pathlib.Path(temp_storage_dir)", "def init_video(self):\n\n assert self.container is None\n\n retry = 3\n while self.container is None and 0 < retry:\n retry -= 1\n try:\n self.container = av.open(self.tello.get_video_stream())\n except av.AVError as ave:\n print(ave)\n print('retry...')\n\n\n assert self.container is not None", "def __init__(self):\n self.detector = dlib.get_frontal_face_detector()", "def __init__(self, quality: int = 7, bitrate: int = 64):\n self._output = mp3.Mp3(quality, bitrate)\n self._output.add_callback(self._enqueue)\n self._socket = None\n self._source = None\n self._endpoint = None\n self._password = None\n # Icecast doesn't actually support chunked encoding\n self._chunk = False", "def __init__(self):\n\n # the path to the file locally\n self.path = None\n # the file extension\n self.ext = None\n # image|video\n self.type = None\n ##\n # file title reference\n self.title = None\n # [image, gallery, video, performer]\n self.category = None\n # file size\n self.size = None", "def __init__(self):\n # zu Beginn ist noch kein Modus gesetzt\n self.mode = None\n # zu Beginn sind noch keine Channels/ Pins konfiguriert\n self.channels = {}\n # es sind zu Beginn auch noch keine callbacks fuer events hinzugefuegt\n self.events = []", "def main():\n print(\"This is a library for reading video sequences into python via ffmpeg. \")\n print(\"Provides the 'Video_Reader' iterator class. \")\n print(\"Requires ffmpeg be installed. \")" ]
[ "0.73991567", "0.6756737", "0.6681672", "0.66100085", "0.642642", "0.6374926", "0.6355954", "0.6349292", "0.6311904", "0.62431127", "0.6241929", "0.62038124", "0.61841667", "0.61685425", "0.61664706", "0.6157815", "0.6156973", "0.61419606", "0.6110978", "0.61021984", "0.60942507", "0.6088486", "0.6066316", "0.6016537", "0.6006725", "0.6001366", "0.5980384", "0.59620976", "0.5935669", "0.59011155" ]
0.7267754
1
Start a ffmpeg process. This method must be run in the event loop and returns a coroutine.
def async_start_ffmpeg(self): raise NotImplementedError()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def start(self) -> None:\r\n self._spawn_ffmpeg()", "def _spawn_ffmpeg(self) -> None:\r\n if self.ffmpeg_proc is not None:\r\n raise RuntimeError('_spawn_ffmpeg called when ffmpeg_proc is '\r\n + f'{self.ffmpeg_proc} (not None)')\r\n\r\n args = ['ffmpeg', '-f', 'rawvideo', '-vcodec', 'rawvideo',\r\n '-s', f'{self.frame_size[0]}x{self.frame_size[1]}',\r\n '-pix_fmt', 'rgba', '-r', str(self.fps),\r\n '-loglevel', 'quiet',\r\n '-i', 'pipe:0',\r\n '-vcodec', 'h264', '-pix_fmt', 'yuv420p',\r\n '-movflags', '+faststart']\r\n\r\n if self.bitrate > 0:\r\n args.extend(['-b', f'{self.bitrate}k'])\r\n args.extend(['-y', self.outfile])\r\n\r\n create_flags = sp.CREATE_NO_WINDOW if 'nt' in os.name else 0\r\n self.ffmpeg_proc = sp.Popen(args, shell=False, stdout=None, stderr=None,\r\n stdin=sp.PIPE, creationflags=create_flags)", "def async_restart_ffmpeg(self):\n yield from self.async_stop_ffmpeg()\n yield from self.async_start_ffmpeg()", "def run(self):\n\n # Start the video stream process\n self._process.start()", "def ffmpeg(*options):\n\tffmpeg_command = [\"ffmpeg\"] + list(options)\n\tprint(\"Calling FFMPEG:\", \" \".join(ffmpeg_command))\n\n\tprocess = subprocess.Popen(ffmpeg_command, stdout=subprocess.PIPE)\n\t(cout, cerr) = process.communicate()\n\texit_code = process.wait()\n\tif exit_code != 0: #0 is success.\n\t\traise Exception(\"Calling FFmpeg failed with exit code {exit_code}. CERR: {cerr} . COUT: {cout}\".format(exit_code=exit_code, cerr=str(cerr), cout=str(cout)))", "def _stream_ffmpeg(config: Configuration, ffmpeg_proc: subprocess.Popen, signal: RunningSignal):\n while True:\n try:\n yield ffmpeg_proc.stdout.read(config.bytes_per_read)\n except:\n ffmpeg_proc.terminate()\n ffmpeg_proc.communicate()\n signal.stop()\n break", "def start(self, print_ffplay_proc_stderr=False, print_read_proc_stderr=False):\n # Set the image controls\n self.set_controls()\n \n # Create a process to read from the webcam\n # stdin should be pipe so it doesn't suck up keypresses (??)\n # stderr should be null, so pipe doesn't fill up and block\n # stdout will go to downstream process\n if print_read_proc_stderr:\n read_proc_stderr = None\n else:\n read_proc_stderr = open(os.devnull, 'w')\n read_proc_cmd_l = ['ffmpeg',\n '-f', 'video4linux2',\n '-i', self.device,\n '-vcodec', 'libx264',\n '-qp', '0',\n '-vf', 'format=gray',\n '-preset', 'ultrafast',\n '-f', 'rawvideo', '-',\n ] \n self.read_proc = subprocess.Popen(read_proc_cmd_l, stdin=subprocess.PIPE, \n stdout=subprocess.PIPE, stderr=read_proc_stderr)\n \n # Sometimes the read_proc fails because the device is busy or \"Input/ouput error\"\n # but the returncode isn't set or anything so I don't know how to\n # detect this.\n\n # Tee the compressed output to a file\n self.tee_proc = subprocess.Popen(['tee', self.output_filename], \n stdin=self.read_proc.stdout,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n # Play the output\n if print_ffplay_proc_stderr:\n ffplay_proc_stderr = None\n else:\n ffplay_proc_stderr = open(os.devnull, 'w') \n self.ffplay_proc = subprocess.Popen([\n 'ffplay', \n #~ '-fflags', 'nobuffer', # not compatible with analyzeduration or probesize?\n '-analyzeduration', '500000', # 500 ms delay in starting\n '-window_title', self.window_title,\n '-',\n ], \n stdin=self.tee_proc.stdout,\n stdout=subprocess.PIPE, stderr=ffplay_proc_stderr)\n\n # This is supposed to allow SIGPIPE\n # https://docs.python.org/2/library/subprocess.html#replacing-shell-pipeline\n self.read_proc.stdout.close()\n self.tee_proc.stdout.close()", "def start(self):\n # start a thread to read frames from the file video stream\n self.thread.start()\n return self", "def play_async(path):\n p = Process(target=play, args=(path,))\n p.start()", "async def example_use_case(path_file_input: Path, path_file_output: Path) -> None:\n with ProcessTaskPoolExecutor(max_workers=1, cancel_tasks_when_shutdown=True) as executor:\n task = executor.create_process_task(\n FFmpegCoroutineFactory.create().execute,\n CreateStreamSpecCoroutineFilter(path_file_input, path_file_output).create,\n )\n LocalSocket.send(\"Ready\")\n await task", "def _ffmpeg_loop(cls, ffmpeg: subprocess.Popen) -> Iterable[Progress]:\n while ffmpeg.poll() is None:\n rlist, _, _ = select((ffmpeg.stderr, ffmpeg.stdout), (), ())\n # Read logs from stdin\n if ffmpeg.stderr in rlist:\n status = cls.process_logs(ffmpeg.stderr.read().splitlines())\n if status:\n yield status\n # ignore stdout\n if ffmpeg.stdout in rlist:\n ffmpeg.stdout.read()", "def start(self):\n if hasattr(self, 'process'):\n err = \"video '{}' Frames Extraction has already \" \\\n \"started.\".format(self.video_file)\n print err\n raise Exception(err)\n\n process_number = subprocess.Popen(self.start_frames, stdout=subprocess.PIPE)\n process_number.wait()\n return process_number", "def start(self):\n self._proc = self._get_subprocess()\n self._pid = self._proc.pid\n self._return_code = None", "def start_ffmpeg_record(stream, stream_url, formatted_date):\n filename = stream + '_' + formatted_date\n save_video_dir = 'rover_stream/' + stream\n subprocess.Popen(['mkdir rover_stream'], shell=True)\n subprocess.Popen(['mkdir ' + save_video_dir], shell=True)\n proc_video[stream] = subprocess.Popen(['ffmpeg -i ' + stream_url + ' -acodec copy -vcodec copy ' + save_video_dir + '/' + filename + '.mp4'], stdin=PIPE, shell=True)", "def run(self):\n\n self.start_gst(\n [\n self.video_source,\n self.video_codec,\n self.video_decode,\n self.video_sink_conf\n ])\n\n self.video_sink.connect('new-sample', self.callback)", "def run(self, ff: FFMPEG) -> None:\n return_code, error = ff.run()\n if error or return_code != 0:\n # Check return code and error messages\n error = error or f\"invalid ffmpeg return code {return_code}\"\n raise TranscodeError(error)", "def async_stop_ffmpeg(self):\n return self.ffmpeg.close()", "def _have_ffmpeg(self):\n from sage.misc.sage_ostools import have_program\n return have_program('ffmpeg')", "def start(self):\n\t\twhile self.capture_status:\n\t\t\t_, frame = self.cap.read()\n\t\t\tc_frame = frame[self.width / 2 - self.face_width / 2: self.width / 2 + self.face_width / 2,\n\t\t\t self.height / 2 - self.face_width / 2: self.height / 2 + self.face_height / 2, :]\n\t\t\tif not self.in_processing:\n\t\t\t\tself.frame = frame\n\t\t\t\tself.in_processing = True\n\t\t\tsleep(0.2)\n\t\tyield cv2.imdecode('png', c_frame)", "def run(self):\n self.process.start()", "def Start(self):\n\n\n\n assert not self._process, 'Start() can only be called once'\n self._process = subprocess.Popen(self._args)", "def start(self):\r\n return self.start_subprocess()", "def start(self):\n control_process = mp.Process(target = self._start, args = [])\n control_process.start()", "def main():\n print(\"This is a library for reading video sequences into python via ffmpeg. \")\n print(\"Provides the 'Video_Reader' iterator class. \")\n print(\"Requires ffmpeg be installed. \")", "async def start(self):\n # Avoid being rate limited by Twitter when restarting the stream with the same follow list.\n if self.sub_process and not set(self.sub_process.follows) != set(self.get_follows()):\n return\n\n # Kill the current stream before starting a new one\n self.stop()\n\n # No need to start a stream if we're not following anyone\n if not self.conf.follows:\n return\n\n # Create a new multi-processes queue, a new stream object and a new Process\n log.info('Creating new sub-process.')\n self.mp_queue = multiprocessing.Queue()\n self.mp_queue.cancel_join_thread()\n self.sub_process = SubProcessStream(self.mp_queue, self.conf.credentials, self.get_follows())\n log.info('Created new sub-process.')\n\n # Schedule the polling daemon (it will take care of starting the child process)\n self.daemon = asyncio.ensure_future(self._run())", "def watch_ffmpeg(channel_id: str) -> Response:\n log.info(\n f\"Watching channel {channel_id} on {host_and_port} for {locast_service.city} using ffmpeg\")\n uri = locast_service.get_station_stream_uri(channel_id)\n\n ffmpeg = config.ffmpeg or 'ffmpeg'\n\n # Start ffmpeg as a subprocess to extract the mpeg stream and copy it to the incoming\n # connection. ffmpeg will take care of demuxing the mpegts stream and following m3u directions\n ffmpeg_cmd = [ffmpeg, \"-i\", uri, \"-codec\",\n \"copy\", \"-f\", \"mpegts\", \"pipe:1\"]\n\n ffmpeg_proc = subprocess.Popen(\n ffmpeg_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n # use a signal to indicate threads running or not\n signal = RunningSignal(True)\n\n # Start a thread that reads ffmpeg stderr and logs it to our logger.\n t = threading.Thread(target=_log_output, args=(\n config, ffmpeg_proc.stderr, signal))\n t.setDaemon(True)\n t.start()\n\n return Response(_stream_ffmpeg(config, ffmpeg_proc, signal), content_type='video/mpeg; codecs=\"avc1.4D401E')", "def start(self, _=False):\n if not self._stop:\n self._current_execution += 1\n flags = self.flags\n if '--write' not in flags:\n flags.extend(['--write', self.writepath])\n if '--output-format' not in flags:\n flags.extend(['--output-format', 'csv'])\n line = [\"airodump-ng\"] + flags + self.arguments + [self.interface]\n self._proc = Popen(line, bufsize=0,\n env={'PATH': os.environ['PATH']},\n stderr=DEVNULL, stdin=DEVNULL, stdout=DEVNULL)\n os.system('stty sane')\n\n time.sleep(5)\n watcher = threading.Thread(target=self.watch_process)\n watcher.start()", "def run(self):\r\n\r\n p = pyaudio.PyAudio()\r\n\r\n stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),\r\n channels=wf.getnchannels(),\r\n rate=wf.getframerate(),\r\n output=True)\r\n\r\n musicdata = wf.readframes(CHUNK)\r\n\r\n while playing:\r\n if self.streamnum == 1:\r\n stream.write(musicdata)\r\n musicdata = wf.readframes(CHUNK)\r\n else:\r\n stream.write(musicdata)\r\n musicdata = wf2.readframes(CHUNK)\r\n if len(musicdata) < CHUNK or musicdata == '':\r\n if self.streamnum == 1:\r\n self.streamnum = 2\r\n else:\r\n self.streamnum = 1\r\n self.next = False\r\n if self.pause:\r\n while True:\r\n if not playing:\r\n return\r\n elif not self.pause:\r\n break\r\n\r\n stream.stop_stream()\r\n stream.close()\r\n\r\n p.terminate()", "def run(self):\n self.p = pyaudio.PyAudio()\n self.stream = self.p.open(format=pyaudio.paFloat32, channels=self.CHANNELS, rate=self.RATE, input=True,\n output=False, stream_callback=self.callback)\n self.stream.start_stream()\n self.stop.setSingleShot(True)\n self.stop.start()", "def test_ffmpeg_in_path(self) -> None:\n self.assertIsNotNone(which('ffmpeg'))" ]
[ "0.7942734", "0.73018295", "0.67428803", "0.6742118", "0.6471932", "0.626873", "0.60460156", "0.60184485", "0.59303683", "0.5854912", "0.57571197", "0.57371753", "0.5723993", "0.5672959", "0.5659358", "0.56507385", "0.5628994", "0.55856615", "0.5585128", "0.5443574", "0.5424701", "0.5401878", "0.53723574", "0.5320829", "0.53059137", "0.5271473", "0.52050406", "0.5197798", "0.5193479", "0.5168334" ]
0.76078135
1
Stop a ffmpeg process. This method must be run in the event loop and returns a coroutine.
def async_stop_ffmpeg(self): return self.ffmpeg.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def stop(self):\n if not self.ffmpeg:\n raise RuntimeError(\"ffmpeg is not running\")\n self.ffmpeg.send_signal(signal.SIGINT)", "def stop(self):\n if self._proc_is_alive():\n\n if os.name == 'nt':\n # os.killpg is not available on Windows\n # See: https://bugs.python.org/issue5115\n self._proc.kill()\n\n # When we kill the child process on Windows the return code\n # gets set to 1, so we want to reset the return code back to 0\n # in order to avoid creating logging output in the download(...)\n # method\n self._proc.returncode = 0\n else:\n os.killpg(self._proc.pid, signal.SIGKILL)\n\n self._set_returncode(self.STOPPED)", "def stop(self):\n try:\n self.process.terminate()\n self.process = None\n except AttributeError:\n return", "async def async_media_stop(self) -> None:\n await self._volumio.stop()", "async def stop(self) -> None:\n stop_fun = getattr(self, '_stop')\n if inspect.iscoroutinefunction(stop_fun):\n await stop_fun()\n return\n await run_sync(stop_fun)", "def stop(self, timeout=1):\n if self.async:\n self.queue.put((STOP_ASYNC_PRODUCER, None))\n self.proc.join(timeout)\n\n if self.proc.is_alive():\n self.proc.terminate()", "def async_restart_ffmpeg(self):\n yield from self.async_stop_ffmpeg()\n yield from self.async_start_ffmpeg()", "def stop(self):\n if self._process is not None:\n self._process.terminate()", "def stopit(self):\n\n self.stop.stop()\n self.stream.close()\n self.p.terminate()\n self.p = None\n\n print(\"Recording terminated!\")", "def stop_media(self):\n self.stdin_queue.put(\"stop\")", "async def stop_rtsp_livestream(self):\n await self.api.stop_rtsp_livestream(self.product_type, self.serial_no)", "def close_video(self):\n\n # the helper threads look for this variable to be true\n self.vision_running = False\n\n self.player.stop()\n\n # send the command to kill the vision stream (bebop only)\n if (self.is_bebop):\n self.drone_object.stop_video_stream()", "def _cleanup_ffmpeg(self) -> None:\r\n self.ffmpeg_proc.communicate()\r\n self.ffmpeg_proc = None", "def _stop_process(self):\n self.stdin_queue.put_nowait(\"quit\")\n ExternalProcess._stop_process(self)", "def timer_ffmpeg_process_timeout():\n try:\n if not self.ffmpeg_process_ps.is_alive():\n timer_ffmpeg_process.stop()\n self.w.hide()\n del (self.w)\n self.ffmpeg_process_ps = None\n except:\n pass", "def stop_streamer(params) -> None:\n print(\"Stopping streamer...\")\n try:\n with open(params['pid'], 'r') as f:\n pid = f.read()\n cmd = \"kill \" + str(pid) + \" >/dev/null 2>&1\"\n if os.system(cmd) == 0:\n print(\" Success !\")\n else:\n print(\" Failure !\")\n os.remove(params['pid'])\n except Exception as e:\n print(\" Failure !\")", "def stop(self) -> None:\n self._stream.stop()", "def stop(self):\n if self.running:\n log.info('Stopping sub process (pid {}).'.format(self.sub_process.pid))\n self.sub_process.terminate()\n self.sub_process.join()\n log.info('Stopped sub process (pid {}).'.format(self.sub_process.pid))\n self.daemon.cancel()\n log.info('Cancelled polling daemon for sub process {}.'.format(self.sub_process.pid))\n\n # Cleanup the stream\n log.info('Cleaning sub-process (pid {}).'.format(self.sub_process.pid))\n self.mp_queue.close()\n self.mp_queue = None\n self.sub_process = None\n self.daemon = None", "def stop(self):\n if not self.process_pid:\n raise Exception('why is this being called? %s' % self.server_name)\n\n if self.stop_kill:\n os.kill(self.process_pid, signal.SIGTERM)\n rc = wait_for_fork(self.process_pid, raise_error=False)\n return (rc, '', '')", "def stop(self):\n self.stream.stop()\n self.running = False", "def stop(self):\n\n with open(self.pidfile, 'r') as pidfile:\n pid = int(pidfile.read().strip())\n\n proc = psutil.Process(pid)\n proc.terminate()", "async def stop(self):\n await self.pause()\n return await self.send_command_and_read_reply(\n Protocol1Command(command=\"\", execution_command=\"V\")\n )", "def stop(self):\n\t\tif self.__logging: self.__logger.debug('Terminating processes.')\n\t\t#terminate Threaded queue mode seperately\n\t\tif self.__threaded_queue_mode and not(self.__queue is None):\n\t\t\tif len(self.__queue)>0: self.__queue.clear()\n\t\t\tself.__threaded_queue_mode = False\n\t\t\tself.frame = None\n\n\t\t# indicate that the thread should be terminate\n\t\tself.__terminate = True\n\n\t\t# wait until stream resources are released (producer thread might be still grabbing frame)\n\t\tif self.__thread is not None:\n\t\t\tself.__thread.join()\n\t\t\t#properly handle thread exit\n\t\t\tif self.__youtube_mode:\n\t\t\t\t# kill thread-lock in youtube mode\n\t\t\t\tself.__thread = None", "def on_vader_stop(ob, message):\n text='\"Processing\"'\n subprocess.call('espeak '+ text, shell=True)\n logging.debug(\"Processing...\")\n\n # pause pipeline to not break our file\n pipe.set_state(gst.STATE_PAUSED)\n\n # get content of the file\n flacfile = file(FLACFILE, 'r')\n\n try:\n result = googleSpeech(flacfile)\n print(result)\n jarvis = threading.Thread(None, send2jarvis, None, (result, ))\n jarvis.start()\n except:\n logging.error(\"An error occured...\")\n\n file(FLACFILE, 'w').write('')\n\n #file is empty, continue to listen\n pipe.set_state(gst.STATE_PLAYING)", "async def stop_livestream(self):\n await self.api.stop_livestream(self.product_type, self.serial_no)\n if self.p2p_stream_thread.is_alive() is True:\n await self.p2p_stream_handler.stop()", "def stop(self):\n os.system(\"taskkill /T /F /PID %s\" % self.process.pid)\n self.running = False", "async def stop(self):", "def stop_recording():\n do_command('PlayStop')\n print('Stopped')", "def stop(self):\n # trying this instead of SIGTERM\n # http://stackoverflow.com/a/6659191/3380530\n # self._process.send_signal(SIGINT)\n # Or not. SIGINT doesn't exist on Windows\n self._process.terminate()", "def stop(self):\n if self.is_running():\n self._stdin_queue.put_nowait(None) # Ask to stop the stdin_thread\n try:\n self._popen.terminate() # Send SIGTERM to the player, asking to stop\n log.debug('SIGTERM ' + self.name)\n except:\n pass\n self._watchdog.join(timeout=0.2) # Waiting maximum of 250 ms before killing brutaly the processus\n if self._watchdog.is_alive():\n self._popen.kill() # Send SIGNKILL to brutaly kill the process\n log.warning('KILLED ' + self.name)\n unregister_thread(self)\n self.join() # Wait for watchdog thread to terminate" ]
[ "0.68899935", "0.6307504", "0.61911714", "0.61535424", "0.60140026", "0.6010047", "0.5976443", "0.597304", "0.59643036", "0.5912363", "0.5874809", "0.586981", "0.5869279", "0.5859912", "0.57923543", "0.57812506", "0.5770701", "0.5748853", "0.57472503", "0.5726703", "0.56687826", "0.5655419", "0.5618101", "0.561691", "0.5594852", "0.55947006", "0.55612195", "0.5545258", "0.5506351", "0.5490583" ]
0.7927968
0
Returns an immutable collection of CellAddress instances (singletons) corresponding to all 81 cells of the Sudoku grid.
def get_all_cell_addresses() -> Tuple[CellAddress, ...]: return _all_cell_addresses
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def addrCells(self, addr):\n return self.int_to_cells(addr, self.addr_cells)", "def get_all_neighbors(self):\n m, n = self.board.shape\n return as_strided(self.expanded_board,\n shape = (m,n,3,3), \n strides = self.expanded_board.strides + self.expanded_board.strides)", "def cells(self) -> List[Tuple[int, int]]:\n return self._cells", "def cells(self):\n return copy.deepcopy(self._cells)", "def solved_cells(self) -> Set[Cell]:\n\t\treturn set(self.iter_solved_cells())", "def solved_cells(self) -> Set[Cell]:\n\t\treturn set(self.iter_solved_cells())", "def get_cells(self):\n cell_list = []\n for cell_row in self.board:\n for current_cell in cell_row:\n if current_cell is not None:\n cell_list.append(current_cell)\n return cell_list", "def GLDAS025Cellgrid():\n return GLDAS025Grids(only_land=False)", "def get_neighbors(self):\n return list(map(self.game.square, [self.position - self.game.rules[\"row_len\"], self.position + 1, self.position + self.game.rules[\"row_len\"], self.position - 1]))", "def cell_list(self):\n lst_of_idx = []\n height = self.__height\n width = self.__width\n for i in range(width):\n for j in range(height):\n lst_of_idx.append((i,j))\n lst_of_idx.append((3,7))\n return lst_of_idx", "def cells_list(self):\n xx, yy = np.meshgrid(self.x_spacings, self.y_spacings)\n return np.vstack([yy.ravel(), xx.ravel()]).transpose()", "def create_sudoku(self)->list:\n grid = [[None for x in range(9)] for row in range(9)]\n for row in range(0,9):\n for column in range(0,9):\n if row <= 2 and column <=2:\n grid[row][column] = cell.Cell(0)\n elif row <= 2 and 3 <= column <= 5:\n grid[row][column] = cell.Cell(1)\n elif row <= 2 and 6 <= column <= 8:\n grid[row][column] = cell.Cell(2)\n elif 3 <= row <= 5 and column <= 2:\n grid[row][column] = cell.Cell(3)\n elif 3 <= row <= 5 and 3 <= column <= 5:\n grid[row][column] = cell.Cell(4)\n elif 3 <= row <= 5 and 6 <= column <= 8:\n grid[row][column] = cell.Cell(5)\n elif 6 <= row <= 8 and column <= 2:\n grid[row][column] = cell.Cell(6)\n elif 6 <= row <= 8 and 3 <= column <= 5:\n grid[row][column] = cell.Cell(7)\n elif 6 <= row <= 8 and 6 <= column <= 8:\n grid[row][column] = cell.Cell(8)\n return grid", "def get_neighbours(self, cell: Position) -> Iterable[Position]:\n x, y = cell\n\n return [\n (x - 1, y - 1), (x, y - 1), (x + 1, y - 1),\n (x - 1, y), (x + 1, y),\n (x - 1, y + 1), (x, y + 1), (x + 1, y + 1),\n ]", "def cells(self):\r\n return Cells(self)", "def cell_neighbours(self, x, y):\n if self.maze_map[y][x]:\n return set()\n neighbours = set()\n for (direction, ((i, j), dummy)) in MazeGraph.DIRECTIONS.items():\n xi, yj = (x + i) % self.width, (y + j) % self.height\n if not self.maze_map[yj][xi]:\n neighbours.add((direction, (xi, yj)))\n return neighbours", "def get_neighbours(self, cell):\n\t\tx,y = cell.find_id()\n\t\tlength = self.space.shape[1]\n\t\twidth = self.space.shape[0]\n\t\tif (length == 0 or width == 0 or x < 0 or x >= length or y < 0 or y >= width):\n\t\t\treturn []\n\t\tneighs = [(i,j) for i in range(y-1,y+2) if 0<=i<width for j in range(x-1,x+2) if 0<=j<length]\n\t\tneighbours = []\n\t\tfor neigh in neighs:\n\t\t\tneighbours.append(self.space[neigh[0],neigh[1]])\n\t\treturn neighbours", "def getEquateAddresses(self) -> ghidra.program.model.address.AddressIterator:\n ...", "def get_singletons(self):\n singleton_list = [] # List holding singleton positions (row, col)\n for row in range(9):\n for col in range(9):\n if len(self.possible_values[row][col]) == 1 and self.final_values[row][col] == 0:\n singleton_list.append((row, col)) # If it is vacant and has only one possible value append it\n return singleton_list", "def CPUAddrCells(self, addr):\n return self.int_to_cells(addr, self.cpu_cells)", "def initialize_cells(self):\n for loc in np.ndindex(*self.shape): # TODO: see if nested for loop is faster than this\n c = Cell(loc, self)\n self.cells.append(c)", "def addrCellsProperty(self):\n return FdtPropertyWords(\"#address-cells\", self.addr_cells)", "def create_possible_symbols_to_cells_mapping(self):\r\n possibles_to_cells = defaultdict(set)\r\n for cell in self.iterate_empty_cells():\r\n possibles_to_cells[frozenset(cell.get_possible_symbols())].add(cell)\r\n return possibles_to_cells", "def symbol_table_addresses(self):\n all_address = []\n for node in self.all_nodes[0]:\n all_address.extend(node['addresses'])\n return all_address", "def all_cells(self):\n \"\"\"\n Note that we use the convention that the first cell is (1,1)\n \"\"\"\n spart_star = self.circle_star()\n part = Partition(list(spart_star))\n coordinates = part.cells()\n coordinates = [(x+1, y+1) for x, y in coordinates]\n return coordinates", "def iter_solved_cells(self) -> Iterable[Cell]:\n\t\treturn (\n\t\t\tcell\n\t\t\tfor cell in self\n\t\t\tif cell.value()\n\t\t)", "def get_all_neighbor_coords(tiles):\n return [add(tile, neighbor) for tile in tiles for neighbor in NEIGHBORS]", "def get_neighbors(self) -> List['games.saloon.tile.Tile']:\n neighbors = []\n\n for direction in Tile.directions:\n neighbor = getattr(self, \"tile_\" + direction.lower())\n if neighbor:\n neighbors.append(neighbor)\n\n return neighbors", "def solve_puzzle(grid):\n solutions = []\n if not grid.valid():\n return solutions\n # Backtracking, iterating over (first) smallest list of candidates for empty vertices\n candidates = grid.candidate_map()\n min_number_of_candidates = min([9] + [len(candidates[ln][rw]) for ln in range(9) for rw in range(9) if grid.grid[ln][rw] is None])\n for (line, row) in [(ln, rw) for ln in range(9) for rw in range(9)]:\n if grid.grid[line][row] is None and len(candidates[line][row]) == min_number_of_candidates:\n for guess in candidates[line][row]:\n grid.grid[line][row] = guess\n for solution in solve_puzzle(grid):\n solutions.append(solution)\n grid.grid[line][row] = None\n break\n else:\n solutions.append(Sudoku(grid.__str__()))\n return solutions", "def solveSudoku(self, board: List[List[str]]) -> None:\n # initialize the hashmaps\n for row in range(self.size):\n for col in range(self.size):\n value = board[row][col]\n if value != '.':\n self.rows[row].add(value)\n self.cols[col].add(value)\n self.cells[self.cell_idx(row, col)].add(value)\n \n # start backtracking at the first field\n self.backtrack(board, 0)\n return board", "def get_neighbours(self):\n return []" ]
[ "0.64614326", "0.6399312", "0.6301142", "0.62914723", "0.6212155", "0.6212155", "0.61569643", "0.6042829", "0.60215265", "0.60164094", "0.60013884", "0.59763795", "0.59708726", "0.59201807", "0.5903456", "0.58943486", "0.58770293", "0.5867237", "0.58435106", "0.582699", "0.5818135", "0.5787382", "0.575843", "0.57482064", "0.5743943", "0.5709828", "0.5699752", "0.5690755", "0.5690479", "0.5666041" ]
0.67733836
0
Returns the cell address singleton for the given cell coordinates.
def get_cell_address(row: int, column: int) -> CellAddress: return _all_cell_addresses[9 * row + column]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cell_from_xy(self,x,y):\n return self.cell_array.item((x,y))", "def get_cell_by_coords(self, coords):\n try:\n cell = GameCell.objects.get(row=coords[0], col=coords[1], game=self)\n return cell\n except GameCell.DoesNotExist:\n return None", "def cell_at(self, x, y):\n\n return self.maze_map[x][y]", "def get_cell(self, location: Hashable) -> Cell:\n\t\treturn self._location_to_cell_map.get(location, None)", "def get_all_cell_addresses() -> Tuple[CellAddress, ...]:\n return _all_cell_addresses", "def coords_from_cell(cell, lon = [-8.73, -8.50], lat = [41.10, 41.25], N = 100, M = 75):\n lon_step = (lon[1] - lon[0]) / N \n lat_step = (lat[1] - lat[0]) / M\n \n middle_lon = lon[0] + cell[0] * lon_step + lon_step / 2\n middle_lat = lat[0] + cell[1] * lat_step + lat_step / 2\n \n return [middle_lon, middle_lat]", "def getCellFromPosition(self, xPos, yPos):\n for cell in self.cells:\n if(xPos == cell.x and yPos == cell.y):\n return cell\n return False", "def getAddressAtIndex(self, index: int) -> ghidra.program.model.address.Address:\n ...", "def cell_coord(id, Nx):\n nx = id // (Nx**2)\n ny = (id - nx * Nx**2) // Nx\n nz = id - nx * Nx**2 - ny * Nx\n return np.array([nx, ny, nz])", "def neighbour(self, position):\n if not self._linked:\n return False\n addr = Address(self._address)\n if type(position) == tuple:\n addr = addr + position\n # TODO: this does not work if position is a list...\n elif type(position) == str:\n if \"right\" in position:\n addr[1] += 1\n if \"left\" in position:\n addr[1] -= 1\n if \"top\" in position:\n addr[0] -= 1\n if \"bottom\" in position:\n addr[0] += 1\n try:\n ncell = self._worksheet.cell(addr)\n except IncorrectCellLabel:\n raise CellNotFound\n return ncell", "def get_cell(self, point):\n return self._grid[point.x][point.y]", "def Address(self) -> _n_5_t_0:", "def locationByCoordinate(latitude, longitude) :\n geoLoc = Nominatim(user_agent=\"GetLoc\")\n coordinateString = f\"{latitude}, {longitude}\"\n locationCoordinates = geoLoc.reverse(coordinateString)\n return locationCoordinates.address", "def get_address(self, symbol):\n return self.table[symbol]", "def get_cell(self, location):\n if 0 <= location[0] < self.boardSize and 0 <= location[1] < self.boardSize:\n return self.board[location[0]][location[1]]\n else:\n raise Exception(\"There is no cell at the given location\")", "def get_address(self, address=None):\n return self.__get_addr_grp('address', address)", "def getCell(self, idx = None, cell = 1, base_1 = None, base_2 = None):\n\n if idx is None: idx = np.arange(self.atoms.shape[0])\n if isinstance(idx, (int, np.integer)): idx = [idx]\n\n if cell == 1:\n if base_1 is None:\n return self.cell_1[idx, :, :]\n else:\n return np.matmul(base_1[:2, :2], self.rep_1[idx, :, :])\n elif cell == 2:\n if base_2 is None:\n return self.cell_2[idx, :, :]\n else:\n return np.matmul(base_2[:2, :2], self.rep_2[idx, :, :])", "def get_cell(self, row, column):\n return self.sudoku_matrix[row][column]", "def get_peer_addresses(cell_address: CellAddress) -> Tuple[CellAddress, ...]:\n return _peer_addresses[cell_address.row][cell_address.column]", "def get_address(self):\n if self.address:\n return self.address", "def get_cell(self, x, y):\n if y < 0 or y >= len(self.g): return None\n if x < 0 or x >= len(self.g[y]): return None\n return self.g[y][x]", "def addrCellsProperty(self):\n return FdtPropertyWords(\"#address-cells\", self.addr_cells)", "def get_cell(self, business):\n x = self.longitudes.searchsorted(business.longitude) - 1\n y = self.latitudes.searchsorted(business.latitude) - 1\n return x, y", "def cell(self, pos):\n\t\tpos = Point(pos)\n\t\tif not self.valid(pos):\n\t\t\traise KeyError('Invalid cell position: {0}'.format(pos))\n\t\treturn self.data[pos.x + pos.y * self.dims.width]", "def query(self, cell: Tuple[int, int]):\n return self._board[cell[0]][cell[1]]", "def address(self) -> tuple[str, int]:", "def get_cell(self, uuid):\n\n try:\n return Cell.from_cell(self._cells[uuid])\n except KeyError:\n error_str = \"Trying to get an non-existing cell with uuid: {}\"\n raise ValueError(error_str.format(uuid))", "def get_cell(self, x, y):\n x1, y1 = self.transpose_coordinates(x, y)\n if self.is_in_field(x1, y1):\n return self._cells[y1][x1]\n return None", "def getAbsoluteAddress(program: ghidra.program.model.listing.Program, address: ghidra.program.model.address.Address) -> ghidra.program.model.address.Address:\n ...", "def __get_cell_index(self, x, y) -> int:\n # \"The map data, in row-major order, starting with (0,0)\"\n return x + y * self.occupancy_map.info.width" ]
[ "0.65568703", "0.63951373", "0.6334845", "0.62861043", "0.6188849", "0.5850484", "0.58057034", "0.5799113", "0.57931775", "0.5784899", "0.577949", "0.57663363", "0.5765267", "0.57350516", "0.5727795", "0.5704463", "0.5692549", "0.56801885", "0.567737", "0.5655627", "0.5651809", "0.56509525", "0.5617782", "0.56094426", "0.5606064", "0.5582703", "0.55775166", "0.5564565", "0.5559779", "0.5550622" ]
0.75046825
0
Returns an immutable collection of addresseses of all cells which are peers of the cell with the given cell address. Cells residing in the same row, or in the same column, or in the same region as the given cell are considered its peers.
def get_peer_addresses(cell_address: CellAddress) -> Tuple[CellAddress, ...]: return _peer_addresses[cell_address.row][cell_address.column]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getEquateAddresses(self) -> ghidra.program.model.address.AddressIterator:\n ...", "def peers_for_cell(self, coords, include_self=False):\n for c in self.row_for_cell(coords, include_self=include_self):\n yield c\n for c in self.col_for_cell(coords, include_self=False):\n yield c\n for c in self.square_for_cell(coords, include_self=False):\n if c[0] != coords[0] and c[1] != coords[1]:\n yield c", "def get_all_cell_addresses() -> Tuple[CellAddress, ...]:\n return _all_cell_addresses", "def get_neighbours(self, cell: Position) -> Iterable[Position]:\n x, y = cell\n\n return [\n (x - 1, y - 1), (x, y - 1), (x + 1, y - 1),\n (x - 1, y), (x + 1, y),\n (x - 1, y + 1), (x, y + 1), (x + 1, y + 1),\n ]", "def nearby_cells(self, cell):\n cells = set()\n\n for i in range(cell[0] - 1, cell[0] + 2):\n for j in range(cell[1] - 1, cell[1] + 2):\n\n if (i, j) == cell:\n continue\n\n if 0 <= i < self.height and 0 <= j < self.width:\n cells.add((i, j))\n\n return cells", "def neighbors(self, cell):\n x = cell.x\n y = cell.y\n for new_x, new_y in [(x, y - 1), (x, y + 1), (x - 1, y), (x + 1, y)]:\n neighbor = self[new_x, new_y]\n if neighbor is not None:\n yield neighbor", "def addrCells(self, addr):\n return self.int_to_cells(addr, self.addr_cells)", "def get_neighbours(self, cell):\n\t\tx,y = cell.find_id()\n\t\tlength = self.space.shape[1]\n\t\twidth = self.space.shape[0]\n\t\tif (length == 0 or width == 0 or x < 0 or x >= length or y < 0 or y >= width):\n\t\t\treturn []\n\t\tneighs = [(i,j) for i in range(y-1,y+2) if 0<=i<width for j in range(x-1,x+2) if 0<=j<length]\n\t\tneighbours = []\n\t\tfor neigh in neighs:\n\t\t\tneighbours.append(self.space[neigh[0],neigh[1]])\n\t\treturn neighbours", "def getNeighbors(cell, all_living_cells, test=False, test_color=8):\n neighbors = []\n NeighborCellGrid = [ # all possible neighbor positions\n [cell.x - 1, cell.y - 1], # top left\n [cell.x, cell.y - 1], # top\n [cell.x + 1, cell.y - 1], # top right\n [cell.x - 1, cell.y], # left\n [cell.x + 1, cell.y], # right\n [cell.x - 1, cell.y + 1], # bottom left\n [cell.x, cell.y + 1], # bottom\n [cell.x + 1, cell.y + 1] # bottom right\n ]\n count = 0\n for i in all_living_cells:\n count+=1\n if i.id != cell.id and i.alive == True: # not self and pixel is alive\n if [i.x, i.y] in NeighborCellGrid: # next to\n neighbors.append(i)\n if test:\n for i in NeighborCellGrid:\n g = simCell(i[0], i[1], color=test_color)\n test_cells.append(g)\n return neighbors", "def accounts_with_address(self):\n return [account for account in self if account.address]", "def getEquateAddresses(self, asv: ghidra.program.model.address.AddressSetView) -> ghidra.program.model.address.AddressIterator:\n ...", "def get_neighbors(self, cell, count):\n row, col = cell\n # get all the neighbors\n neighbors = set([(min(self.height - 1, max(row + i, 0)), min(self.width - 1, max(col + j, 0))) \n for i in range(-1, 2)\n for j in range(-1, 2)])\n\n for neighbor in deepcopy(neighbors):\n if neighbor in self.safes or neighbor == cell:\n neighbors.remove(neighbor)\n elif neighbor in self.mines:\n neighbors.remove(neighbor)\n count -= 1\n\n return neighbors, count", "def get_adjcells(self,cell):\n adj_cells = []\n cells_xy = []\n if cell.x > 0:\n adj_cells.append(self.cell_array.item((cell.x-1,cell.y)))\n if cell.x < self.grid_size - 1:\n adj_cells.append(self.cell_array.item((cell.x+1,cell.y)))\n if cell.y > 0:\n adj_cells.append(self.cell_array.item((cell.x,cell.y-1)))\n if cell.y < self.grid_size - 1:\n adj_cells.append(self.cell_array.item((cell.x,cell.y+1)))\n return adj_cells", "def get_further_neighbours(self, cell):\n\t\tneighs = self.get_neighbours(cell)\n\t\ti, j = cell.find_id()\n\t\tneighbours = []\n\t\tfor neigh in neighs:\n\t\t\tx, y = neigh.find_id()\n\t\t\tif abs(x-i)+abs(y-j) > 1 or abs(x-i)+abs(y-j) == 0: \n\t\t\t\tneighbours.append(self.space[y,x])\n\t\treturn neighbours", "def cell_to_adjacent_boundary_cells(self,c):\n j_boundary=[j for j in self.cell_to_edges(c)\n if self.is_boundary_edge(j)]\n\n adj_edges=[]\n for j in j_boundary:\n adj_edges+=list(self.edge_to_edges(j))\n adj_edges=filter(lambda jj: self.is_boundary_edge(jj) and jj!=j,adj_edges)\n cells=[c\n for j in adj_edges\n for c in self.edge_to_cells(j) if c>=0 ]\n return cells", "def getReferencesFrom(self, address: ghidra.program.model.address.Address) -> List[ghidra.program.model.symbol.Reference]:\n ...", "def getEquateAddresses(self, start: ghidra.program.model.address.Address) -> ghidra.program.model.address.AddressIterator:\n ...", "def solve_maze(self) -> List[Cell]:\n self.prev_cells = {}\n self.junction_graph = self.construct_junction_graph()\n self.junction_graph.depth_first_search(self.maze.start_cell, self.junction_visitor)\n\n end_cell = self.maze.end_cell\n path = [end_cell]\n prev_cell = end_cell\n cell = self.prev_cells.get(end_cell)\n\n while path[-1] != self.maze.start_cell:\n if Direction.between(prev_cell, cell) is None:\n # fill in corridors\n direction = self.junction_direction(prev_cell, cell)\n neighbor = self.maze.neighbor(prev_cell, direction)\n while neighbor != cell:\n path.append(neighbor)\n neighbor = self.maze.neighbor(neighbor, direction)\n path.append(cell)\n prev_cell = cell\n cell = self.prev_cells.get(cell)\n\n return list(reversed(path))", "def find_valid_neighbours(self, cell):\n\n delta = [('W', (-1, 0)),\n ('E', (1, 0)),\n ('S', (0, 1)),\n ('N', (0, -1))]\n neighbours = []\n for direction, (dx, dy) in delta:\n x2, y2 = cell.x + dx, cell.y + dy\n if (0 <= x2 < self.nx) and (0 <= y2 < self.ny):\n neighbour = self.cell_at(x2, y2)\n if neighbour.has_all_walls():\n neighbours.append((direction, neighbour))\n return neighbours", "def get_nearest_neighbours(self, cell):\n\t\tneighs = self.get_neighbours(cell)\n\t\ti, j = cell.find_id()\n\t\tneighbours = []\n\t\tfor neigh in neighs:\n\t\t\tx, y = neigh.find_id()\n\t\t\tif abs(x-i)+abs(y-j) <= 1: \n\t\t\t\tneighbours.append(self.space[y,x])\n\t\treturn neighbours", "def symbol_table_addresses(self):\n all_address = []\n for node in self.all_nodes[0]:\n all_address.extend(node['addresses'])\n return all_address", "def get_cells(self):\n cell_list = []\n for cell_row in self.board:\n for current_cell in cell_row:\n if current_cell is not None:\n cell_list.append(current_cell)\n return cell_list", "def neighbors(node, topology):\n return [n for n in topology[node]]", "def addrCellsProperty(self):\n return FdtPropertyWords(\"#address-cells\", self.addr_cells)", "def get_canidates(self, row=-1, col=-1, cell=-1):\n\n if row != -1 and col != -1 and cell == -1:\n _row,_col = row,col\n _cell = (_row,_col)\n\n elif row == -1 and col == -1 and type(cell) == tuple:\n _row,_col = cell\n _cell = cell\n\n else:\n raise Exception(\"you must provide row and col or a cell tuple\")\n\n group = self.calc_group(_row, _col)\n if self.board[_row][_col] == self.empty_cell_flag :\n ret = self.rows[_row] & self.columns[_col] & self.groups[group]\n\n if _cell in self.cell:\n ret -= self.cell[_cell]\n\n return ret\n\n else:\n return set([])", "def find_neighbors(self):\n #checked#\n ###your code here###\n for address in self.homes:\n for i in range(-1, 2):\n for j in range(-1,2):\n neighbor_address=(address[0]+i, address[1]+j)\n if neighbor_address in self.homes and neighbor_address!=address:\n self.homes[address].neighbors.append(self.homes[neighbor_address])", "def get_open_cells(field: MineField, cell: Cell) -> List[Cell]:\n # TODO re-implement iteratively to safe stack space (especially for large boards)\n if cell.is_flag or cell.is_mine or not field.cell_is_safe(cell):\n return [cell]\n \n if cell.visited:\n return\n\n open_cells: list = list()\n cell.visited = True\n open_cells.append(cell)\n\n for cell in field.surrounding_cells(cell):\n open_cells.append(cell)\n if cell.is_flag or cell.visited or not field.cell_is_safe(cell):\n continue\n\n\n open_cells += get_open_cells(field, cell)\n\n return open_cells", "def connected_component(region):\n conn = [list(region[0])] # first cell of connected component\n cell_idx = 0\n while cell_idx < len(conn): \n cell = conn[cell_idx] \n neighbours = find_neighbours(cell, region) # find neighbours of cell in region\n for neighbour in neighbours:\n if not neighbour in conn:\n conn.append(neighbour) # add found neighbours to connected component\n cell_idx += 1\n return conn", "def neighbours(self):\n seen = set()\n return [l.other(self) for l in self.dovetails \\\n if id(l) not in seen and not seen.add(id(l))]", "def get_neighbors(self, atom):\n return self._graph.get_connected_vertices(atom)" ]
[ "0.64621055", "0.6259143", "0.60996354", "0.59649587", "0.5957195", "0.59179574", "0.59147054", "0.577808", "0.57189125", "0.57143325", "0.56766814", "0.56755906", "0.5634444", "0.5549997", "0.5474342", "0.5458419", "0.5452087", "0.5439986", "0.5412348", "0.5412165", "0.53678375", "0.5364099", "0.53510237", "0.53356093", "0.53285766", "0.5306321", "0.5282511", "0.5279239", "0.52777743", "0.52648276" ]
0.7298695
0
Returns same formula, but with clauses expressed as sets of tuples rather than lists of tuples. Formula remains a list.
def convert_formula(formula): return [set(clause) for clause in formula]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def lf(formula):\n lfset = set()\n nameObj = formula.getName()\n if nameObj in doubles:\n first = formula.getFirst()\n second = formula.getSec()\n # call for function\n if nameObj == '|':\n firstForm = lf(first)\n secondForm = lf(second)\n lfset = firstForm.union(secondForm)\n elif nameObj == 'U':\n setUntil = lf(second)\n secSet = caseUntil(first, second)\n lfset = lfset.union(setUntil, secSet)\n elif nameObj == 'R':\n secSet = release(first, second)\n lfset = lfset.union(secSet)\n elif nameObj == '&':\n secSet = caseAnd(first, second)\n lfset = lfset.union(secSet)\n elif nameObj == 'V':\n secSet = release(first, second)\n firstSet = \"NOTRELEVANT\"\n lfset.union(firstSet, secSet)\n elif nameObj in singles:\n if nameObj == 'X':\n tup = caseNext(formula.getFirst())\n lfset = lfset.union(tup)\n else:\n # appeal of helpfunction for new definiton\n tup = caseLiteral(nameObj, formula)\n lfset = lfset.union(tup)\n flatten(lfset)\n return lfset", "def expand(formula,var,val):\n return [clause - {(var, not val)} # group 2 modified here\n for clause in formula \n if (var,val) not in clause] # group 1 removed here", "def formula(self):\n terms = []\n for ff in self.formulae:\n terms += list(ff.terms)\n return Formula(terms)", "def list_formulae():\n return _list_tindyb_unique_values(\"formula\", dbpath=__dbpath__)", "def get_formula_in_list(self):\n return tree_to_string(self.expression)", "def solver(formula):\n # dictionary initializing output solution\n assignments={}\n\n # check and simplify unit clauses\n for clause in formula:\n # if clause is a unit clause\n if len(clause)==1:\n # extract random literal from clause\n var,val=get_from_set(clause)\n # make assignment such that unit clause is true\n assignments[var] = val\n # update rest of the formula with such assignment\n formula = expand(formula,var,val)\n\n # RECURSION BASE CASE 1: found one of possible solutions\n # NOTE: since I eliminate clauses once satisfied, list is \n # empty when all clauses are satisfied. \n if not formula:\n return assignments\n\n # RECURSION BASE CASE 2: impossible due to contradiction\n # NOTE: if any of the clauses is false, then no solution\n if not all(formula):\n return None\n\n # CORE OF RECURSION: recursive simplification of CNF formula\n var, val = get_from_set(formula[0])\n for attempt in (val, not val): # e.g try True, if no success try False \n assignments[var] = attempt\n new_assignments = solver(expand(formula,var,attempt))\n if new_assignments is not None:\n assignments.update(new_assignments)\n return assignments\n\n # if we get to this line, neither attempt yields a solution\n return None", "def flatten(formula):\n if not isinstance(formula, CompoundFormula) or formula.connective not in (Connective.And, Connective.Or):\n return formula\n return CompoundFormula(formula.connective, _flatten(formula, formula.connective))", "def defSix(my, ny):\n if type(my) == tuple or type(ny) == tuple:\n if type(my) == tuple:\n my = list(my)\n if type(ny) == tuple:\n ny = list(ny)\n else:\n if type(my) != frozenset:\n my = {my}\n if type(ny) != frozenset:\n ny = {ny}\n total = list(my) + list(ny)\n doubleNeg = False\n for i in total:\n for j in total:\n if (i.getName() == j.getName() and i.getNeg() != j.getNeg()):\n doubleNeg = True\n if(list(my)[0].getName() == 'ff' or list(ny)[0].getName() == 'ff'):\n return lFormula('ff')\n elif(doubleNeg is True):\n return lFormula('ff')\n else:\n solution = set()\n for x in list(my):\n solution.add(x)\n for x in list(ny):\n solution.add(x)\n return frozenset(solution)", "def extract_variables(cnf_formula: str) -> list[str]:\n variables = set()\n cnf_notation = identify_notation(cnf_formula)\n\n and_symbol_pattern = ' ' + cnf_notation.value[CNFLogicConnective.AND] + ' '\n clauses = list(map(lambda c: c[1:len(c)-1], cnf_formula.split(and_symbol_pattern))) # Remove initial and final parenthesis\n\n # Remove final parenthesis of last clause (because of the possible end of line: '\\n')\n if ')' in clauses[len(clauses)-1]:\n clauses[len(clauses)-1] = clauses[len(clauses)-1][:-1] \n\n for c in clauses:\n tokens = c.split(' ')\n tokens = list(filter(lambda t: t != cnf_notation.value[CNFLogicConnective.OR], tokens))\n for feature in tokens:\n if feature == cnf_notation.value[CNFLogicConnective.NOT]:\n continue\n elif feature.startswith(cnf_notation.value[CNFLogicConnective.NOT]):\n variables.add(feature.replace(cnf_notation.value[CNFLogicConnective.NOT], '', 1))\n else:\n variables.add(feature)\n return list(variables)", "def _flatten(formula, parent_connective):\n if not isinstance(formula, CompoundFormula) or formula.connective != parent_connective:\n return (formula,) # (returns a tuple)\n return tuple(itertools.chain.from_iterable(_flatten(sub, parent_connective) for sub in formula.subformulas))", "def solution(i, literals, clauses):\n valuation_list = binary_list(i, literals)\n num_true_clauses = 0\n\n for c in clauses:\n num_true_clauses += is_clause_satisfied(valuation_list, c)\n\n return valuation_list, num_true_clauses", "def getFormula(self):\n dataDict = self.__dict__\n # get formula dictionary\n dd = {}\n for ca in self.chemAtoms:\n if isinstance(ca, ChemAtom):\n ss = ca.elementSymbol\n ii = dd.get(ss)\n if ii is None:\n dd[ss] = 1\n else:\n dd[ss] = ii + 1\n \n # generate formula\n ll = [None] * (len(dd)*2)\n next = 0\n for ss in ('C', 'H'):\n ii = dd.get(ss)\n if ii is not None:\n ll[next] = ss\n ll[next+1] = str(ii)\n del dd[ss]\n next = next + 2\n \n items = dd.items()\n items.sort()\n for ss, ii in items:\n ll[next] = ss\n ll[next+1] = str(ii)\n next = next + 2\n #\n result = ''.join(ll)\n return result", "def satisfying_assignment(formula):\n # convert the formula to a list of sets.\n formula = [set(i) for i in formula]\n\n # call the helper starting with the givne formula and an empty assignments\n # dictionary.\n result = sat_helper(formula, {})\n if result[0]:\n return result[1] # result[1] will be the dictionary of assignments.\n else:\n return None", "def as_relational(self, symbol):\n return And(*[set.as_relational(symbol) for set in self.args])", "def parseFormula(*args):\n return _libsbml.parseFormula(*args)", "def AndLTL(formulas: List[LTL]) -> LTL:\n if len(formulas) > 1:\n vars = formulas[0].variables\n for i in range(1, len(formulas)):\n vars += formulas[i].variables\n conj = ' & '.join(s.formula for s in formulas)\n return LTL(\"(\" + conj + \")\", vars)\n elif len(formulas) == 1:\n return formulas[0]\n else:\n raise Exception(\"List of formulas is empty\")", "def get_symbols(formula, acc_list=None, acc_str=None):\r\n\r\n # To avoid mutable default errors\r\n if acc_list is None:\r\n acc_list = []\r\n if acc_str is None:\r\n acc_str = \"\"\r\n\r\n # Base case is when formula is an empty string\r\n if len(formula) == 0:\r\n # Adds the accumulated string to acc_list if there is any\r\n if len(acc_str) != 0:\r\n acc_list.append(acc_str)\r\n # Return acc_list as the final result\r\n return acc_list\r\n\r\n # Otherwise there are characters left in formula\r\n else:\r\n # Set char equal to the first character in formula\r\n char = formula[0]\r\n\r\n # If the character is a parenthesis\r\n if char in operators or char in ['(', ')']:\r\n # Adds the accumulated string to acc_list if there is any\r\n if len(acc_str) != 0:\r\n acc_list.append(acc_str)\r\n # Append the parenthesis to acc_list\r\n acc_list.append(char)\r\n # Call get_symbols recrusively on the rest of formula and reset acc_str to empty\r\n return get_symbols(formula[1:], acc_list, \"\")\r\n\r\n # If the character is whitespace or an equal sign (the beginning of a formula)\r\n elif char == ' ' or char == '=':\r\n # Adds the accumulated string to acc_list if there is any\r\n if len(acc_str) != 0:\r\n acc_list.append(acc_str)\r\n # Call get_symbols recrusively on the rest of formula and reset acc_str to empty\r\n # ignoring char since it was either whitespace or the initial equals sign\r\n return get_symbols(formula[1:], acc_list, \"\")\r\n\r\n # If the character is not whitespace (cell or number) or '='\r\n else:\r\n # Call get_symbols recursively on the rest of formula\r\n # and concatenate char onto acc_str\r\n return get_symbols(formula[1:], acc_list, acc_str + char)", "def _calculate_loss_formula(self) -> List[List[List[Tuple]]]:\n self._formulas = []\n for i in trange(len(self.explanations), desc=\"Setting explanations \"):\n explanation = self.explanations[i]\n if not is_dnf(explanation):\n explanation = str(to_dnf(explanation))\n formulas_terms = []\n terms = explanation.split(\"|\")\n assert len(terms) > 0, f\"Error in explanation formulation. Expected DNF formula but it is {explanation}\"\n for term in terms:\n # excluding trivial explanation\n if term == \"False\" or term == \"True\":\n continue\n # excluding parenthesis\n term = term.replace(\" \", \"\")\n if term[0] == \"(\":\n term = term[1:-1]\n formula_term = []\n attributes = term.split(\"&\")\n for attribute in attributes:\n if attribute[0] == \"~\":\n negated = True\n attribute = attribute[1:]\n else:\n negated = False\n idx = self.attributes.index(attribute)\n formula_term.append((idx, negated))\n formulas_terms.append(formula_term)\n self._formulas.append(formulas_terms)\n return self._formulas", "def tupleize_answers(self, answer, var_dict_list):\r\n _ = self.capa_system.i18n.ugettext\r\n\r\n out = []\r\n for var_dict in var_dict_list:\r\n try:\r\n out.append(evaluator(\r\n var_dict,\r\n dict(),\r\n answer,\r\n case_sensitive=self.case_sensitive,\r\n ))\r\n except UndefinedVariable as err:\r\n log.debug(\r\n 'formularesponse: undefined variable in formula=%s',\r\n cgi.escape(answer)\r\n )\r\n raise StudentInputError(\r\n _(\"Invalid input: {bad_input} not permitted in answer.\").format(bad_input=err.message)\r\n )\r\n except ValueError as err:\r\n if 'factorial' in err.message:\r\n # This is thrown when fact() or factorial() is used in a formularesponse answer\r\n # that tests on negative and/or non-integer inputs\r\n # err.message will be: `factorial() only accepts integral values` or\r\n # `factorial() not defined for negative values`\r\n log.debug(\r\n ('formularesponse: factorial function used in response '\r\n 'that tests negative and/or non-integer inputs. '\r\n 'Provided answer was: %s'),\r\n cgi.escape(answer)\r\n )\r\n raise StudentInputError(\r\n _(\"factorial function not permitted in answer \"\r\n \"for this problem. Provided answer was: \"\r\n \"{bad_input}\").format(bad_input=cgi.escape(answer))\r\n )\r\n # If non-factorial related ValueError thrown, handle it the same as any other Exception\r\n log.debug('formularesponse: error %s in formula', err)\r\n raise StudentInputError(\r\n _(\"Invalid input: Could not parse '{bad_input}' as a formula.\").format(\r\n bad_input=cgi.escape(answer)\r\n )\r\n )\r\n except Exception as err:\r\n # traceback.print_exc()\r\n log.debug('formularesponse: error %s in formula', err)\r\n raise StudentInputError(\r\n _(\"Invalid input: Could not parse '{bad_input}' as a formula\").format(\r\n bad_input=cgi.escape(answer)\r\n )\r\n )\r\n return out", "def satisfying_assignment(formula):\n return solver(convert_formula(formula))", "def relations(self):\n\n def functions_helper(returned_set: set()):\n if is_relation(self.root): # Populate self.root and self.arguments\n returned_set.add((self.root, len(self.arguments)))\n\n elif is_equality(self.root): # Populate self.first and self.second\n return\n elif is_quantifier(self.root): # Populate self.variable and self.predicate\n returned_set.update(self.predicate.relations())\n\n elif is_unary(self.root): # Populate self.first\n returned_set.update(self.first.relations())\n\n else: # Populate self.first and self.second\n returned_set.update(self.first.relations())\n returned_set.update(self.second.relations())\n return\n\n \"\"\" Return a set of pairs (function_name, arity) for all function names\n that appear in this formula \"\"\"\n returned_set = set()\n functions_helper(returned_set)\n return returned_set\n\n # Ex12", "def sat_apply_assignment(self, assignment):\n # YOUR CODE HERE\n o = set()\n print(s)\n print({x.simplify(assignment) for x in self.clauses if not isinstance(x.simplify(assignment), bool)})\n for x in s.clauses:\n if not isinstance(x.simplify(assignment), bool):\n o.add(x.simplify(assignment))\n print(\"ASSIGN SET\", o)\n\n return SAT(o)\n # return SAT({x.simplify(assignment) for x in self.clauses if not isinstance(x.simplify(assignment), bool)})", "def sat_generate_candidate_assignments(self):\n # YOUR CODE HERE\n short = min(len(c) for c in self.clauses)\n for c in self.clauses:\n if len(c) == short:\n return set(c.literals)\n # return (set(x.literals) for x in self.clauses if len(x) == min(len(c) for c in self.clauses))", "def pull_out_quantifications_across_binary_operator(formula: Formula) -> \\\r\n Tuple[Formula, Proof]:\r\n assert has_uniquely_named_variables(formula)\r\n assert is_binary(formula.root)\r\n # Task 11.8\r\n\r\n\r\n prover = Prover(Prover.AXIOMS.union(ADDITIONAL_QUANTIFICATION_AXIOMS))\r\n\r\n # if is_quantifier_free(formula):\r\n # ccl = equivalence_of(formula, formula)\r\n # prover.add_tautology(ccl)\r\n # return formula, prover.qed()\r\n\r\n left_f, left_p = pull_out_quantifications_from_left_across_binary_operator(formula)\r\n\r\n right_f, right_p = __across_helper(left_f)\r\n\r\n step1 = prover.add_proof(left_p.conclusion, left_p)\r\n step2 = prover.add_proof(right_p.conclusion, right_p)\r\n\r\n prover.add_tautological_implication(equivalence_of(formula,right_f), [step2, step1])\r\n\r\n return right_f, prover.qed()", "def sudoku_clauses():\n res = []\n # for all cells, ensure that the each cell:\n for i in range(1, 10):\n for j in range(1, 10):\n # denotes (at least) one of the 9 digits (1 clause)\n res.append([v(i, j, d) for d in range(1, 10)])\n # does not denote two different digits at once (36 clauses)\n for d in range(1, 10):\n for dp in range(d + 1, 10):\n res.append([-v(i, j, d), -v(i, j, dp)])\n\n def valid(cells):\n # Append 324 clauses, corresponding to 9 cells, to the result.\n # The 9 cells are represented by a list tuples. The new clauses\n # ensure that the cells contain distinct values.\n for i, xi in enumerate(cells):\n for j, xj in enumerate(cells):\n if i < j:\n for d in range(1, 10):\n res.append([-v(xi[0], xi[1], d), -v(xj[0], xj[1], d)])\n\n # ensure rows and columns have distinct values\n for i in range(1, 10):\n valid([(i, j) for j in range(1, 10)])\n valid([(j, i) for j in range(1, 10)])\n # ensure 3x3 sub-grids \"regions\" have distinct values\n for i in 1, 4, 7:\n for j in 1, 4 ,7:\n valid([(i + k % 3, j + k // 3) for k in range(9)])\n\n assert len(res) == 81 * (1 + 36) + 27 * 324\n return res", "def get_all_basic_subformula(formula, rename_flag=False):\n basic_expr = list()\n for i in range(len(formula.assert_cmd)):\n exps, typ = get_basic_subterms(formula.assert_cmd[i], i, rename_flag)\n basic_expr += exps\n return basic_expr", "def graph_by_clauses(clauses: List[Tuple[int, int]]) -> DefaultDict[int, List]:\n graph = defaultdict(list)\n for clause in clauses:\n x, y = clause # encoded literals\n graph[negate(x)].append(y)\n graph[negate(y)].append(x)\n graph[x].extend([])\n graph[y].extend([])\n return graph", "def find_solution(formula):\n #if formula is empty or if there is a contradiction between clauses\n if not formula or disqualifier(formula):\n return {}\n \n solution = get_one_unit_clause(formula)\n #if there are no unit clauses, move on to non-unit clauses\n if not solution:\n solution = get_non_unit_clause(formula)\n #if there are contradictions with literals on non-unit clauses, backtrack, get rid of that contradicting literal, and try again\n if disqualifier(reduce_expression(formula, solution)):\n solution = get_non_unit_clause(formula, True)\n updatedForm = reduce_expression(formula, solution)\n #double asterisks allow any number of keywords to be passed as an argument\n return {**find_solution(updatedForm), **{solution[0]: solution[1]}}", "def make_real_clauses(sv, pair, user_set):\r\n for x in pair:\r\n for cause in get_subtree_list(sv, x): # n.b. there may be duplicates \r\n solve_user_calls(sv, cause, user_set) # analyze each object RECURSIVE\r", "def allargs(symbol, fact, expr):\n return And(*[fact.subs(symbol, arg) for arg in expr.args])" ]
[ "0.6366528", "0.633642", "0.6149193", "0.5893506", "0.58395267", "0.57534564", "0.5645297", "0.56093985", "0.5568409", "0.55423987", "0.5500192", "0.5485004", "0.54606825", "0.5426495", "0.54117817", "0.5408915", "0.54063284", "0.5405922", "0.5393781", "0.53883916", "0.53843576", "0.5384056", "0.5328089", "0.5271918", "0.5269742", "0.52627057", "0.5261925", "0.52591515", "0.5251911", "0.523315" ]
0.7646575
0
Extracts element from a set without modifying set itself, as set.pop() otherwise would.
def get_from_set(set_): for e in set_: return e
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_one(_set):\r\n assert _set # _set is not empty\r\n return next(iter(_set))", "def peek(set_):\n ensure_set(set_)\n if not set_:\n raise KeyError(\"peek into an empty set\")\n return next(iter(set_))", "def removeFromSet(_session, _el, _set):\n it = _session.create_iterator(_session.sc_constraint_new(sc_constants.CONSTR_3_f_a_f,\n _set,\n sc.SC_ARC,\n _el), True)\n while not it.is_over():\n _session.erase_el(it.value(1))\n it.next()", "def remove_elements_from_set(s: set, *args) -> set:\n for _ in args:\n s.remove(_)\n return s", "def pop(self, last=True):\r\n if not self:\r\n raise KeyError('set is empty')\r\n key = reversed(self).next() if last else iter(self).next()\r\n self.discard(key)\r\n return key", "def pop(self):\r\n it = iter(self)\r\n try:\r\n value = next(it)\r\n except StopIteration:\r\n raise KeyError\r\n self.discard(value)\r\n return value", "def remove(self, pset):\n self._sets.remove(pset)", "def pop(self) -> Optional[T]:\n try:\n elem = heapq.heappop(self._heap).val\n self._unique_values.remove(elem)\n except IndexError:\n return None\n return elem", "def toggle(collection: set[_T], item: _T | None) -> set[_T]:\n\n if item is None:\n return collection\n\n if item in collection:\n return collection - {item}\n else:\n return collection | {item}", "def remove_subset(set_, subset):\n ensure_set(set_)\n ensure_iterable(subset)\n\n for elem in subset:\n set_.remove(elem)", "def pop(self):\r\n try:\r\n key = heapq.heappop(self.heap)\r\n return self.elements[key]\r\n except:\r\n raise StopIteration", "def __sub__(self, other):\n if not isinstance(other, (list, Set)):\n raise TypeError(\"only sets can be removed from sets\")\n\n new_set = self._clone()\n\n for element in other:\n new_set.delete(element)\n\n return new_set", "def pop(self):\n \n assert not self.empty()\n return self._s.pop()", "def pop(self, index):\n self._sets.pop(index)", "def trivial_partition(set_):\n ensure_countable(set_)\n\n result = ((x,) for x in set_)\n return _harmonize_subset_types(set_, result)", "def _pop_first(self) -> Any:\n if self.is_empty():\n raise IndexError\n return self.pop(0)", "def pop(self):\n if self.items:\n return self.items.pop()\n return None", "def findSetWithElement(self, element, \n is_present=True):\n sets = [s for s in self.sets if element in s]\n if len(sets) > 1:\n raise RuntimeError(\"Should have at most instance of values.\")\n if len(sets) == 0:\n if is_present:\n raise ValueError(\"%s is not present\" % str(element))\n result = cn.NULL_SET \n else:\n result = sets[0]\n return result", "def test_set(self):\n a = set()\n a.add('b')\n a.add('c')\n a.add('a')\n b = list(a)\n b.sort()\n self.assertEqual(b, ['a', 'b', 'c'])\n a.remove('b')\n b = list(a)\n b.sort()\n self.assertEqual(b, ['a', 'c'])\n\n a.discard('d')\n\n b = set(['r', 's'])\n d = a.union(b)\n b = list(d)\n b.sort()\n self.assertEqual(b, ['a', 'c', 'r', 's'])", "def pop(self):\n\n if self.items:\n return self.items.pop()\n\n return None", "def pop(self):\n return self.popleft()", "def difference(stack):\n assertArity(stack, 2)\n rhs, lhs = stack.pop(), stack.pop()\n assertType(lhs, Set)\n assertType(rhs, Set)\n return Set(lhs - rhs)", "def remove(self) -> object:\n return self._contains.pop()", "def pop():", "def pop(self):\n return self.remove(0)", "def pop(self):\n return self.s1.pop()", "def pop(self, key: T) -> Optional[U]:\n if key in self._store:\n return self._store.pop(key)\n return None", "def pop(self):\n return self.ll.delete_first()", "def notInSet(stack):\n assertArity(stack, 2)\n rhs, lhs = stack.pop(), stack.pop()\n assertType(rhs, Set)\n return lhs not in rhs", "def union(stack):\n assertArity(stack, 2)\n rhs, lhs = stack.pop(), stack.pop()\n assertType(lhs, Set)\n assertType(rhs, Set)\n return Set(lhs | rhs)" ]
[ "0.72956246", "0.7101703", "0.6618571", "0.6581533", "0.6568849", "0.60777617", "0.5827025", "0.5797772", "0.57287544", "0.56769073", "0.5629646", "0.5607694", "0.5606581", "0.5581183", "0.55715925", "0.5543389", "0.5521011", "0.55131763", "0.55004513", "0.5433363", "0.5429391", "0.54279345", "0.5421606", "0.54106987", "0.540234", "0.53722113", "0.53637433", "0.53531265", "0.53403443", "0.5335643" ]
0.76785475
0
Convert a quizroomscheduling problem into a Boolean formula.
def boolify_scheduling_problem(student_preferences, session_capacities): # gather all rules (i.e. functions) defined below rules=(only_desired_sessions,only_one_session,no_oversuscribed_sessions) # combine them in one formula out=[] for rule in rules: out+=rule(student_preferences, session_capacities) return out
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def boolify_scheduling_problem(student_preferences, session_capacities):\n #first constraint for assignment\n #clause will consist of name_preference = True (or)\n cnf = []\n names = student_preferences.keys()\n for name in names:\n clause = []\n preferences = student_preferences[name]\n for preference in preferences:\n literal = (str(name) + '_' + str(preference)), 'True'\n clause.append(literal)\n cnf.append(clause)\n #concatenate other 2 constraints, as detailed by the helper functions below\n cnf += one_session(student_preferences, session_capacities)\n cnf += oversubscribed(student_preferences, session_capacities)\n return cnf", "def is_satisfiable(formula: Formula) -> bool:\n # Task 2.5c\n variables = list(sorted(formula.variables()))\n assignment_dict = all_models(list(variables))\n for val in truth_values(formula, assignment_dict):\n if val:\n return True\n return False", "def proof_of_formula_eq_formula(formula):\n formula = str(formula)\n prover = Prover(DEFAULT_PROOF_ASSUMPTIONS, EQUIVALENCE_FORMAT.format(formula, formula))\n prover.add_tautology(EQUIVALENCE_FORMAT.format(formula, formula))\n return prover.proof", "def to_implies_false(formula: Formula) -> Formula:\r\n # Task 3.6d\r\n convert_implies = to_implies_not(formula)\r\n map_false = {'~': Formula('->', Formula('p'), Formula('F'))}\r\n return convert_implies.substitute_operators(map_false)", "def evaluate(formula, model):\r\n # Task 2.1\r\n if is_unary(formula.root):\r\n return not evaluate(formula.first, model)\r\n elif is_ternary(formula.root):\r\n if evaluate(formula.first,model):\r\n return evaluate(formula.second,model)\r\n else:\r\n return evaluate(formula.third,model)\r\n elif is_binary(formula.root):\r\n if formula.root == '&':\r\n return evaluate(formula.first, model) and evaluate(formula.second, model)\r\n elif formula.root == '|':\r\n return evaluate(formula.first, model) or evaluate(formula.second, model)\r\n elif formula.root == '<->':\r\n return not (evaluate(formula.first, model) ^ evaluate(formula.second, model))\r\n elif formula.root == '-&':\r\n return not (evaluate(formula.first, model) and evaluate(formula.second, model))\r\n elif formula.root == '-|':\r\n return not (evaluate(formula.first, model) or evaluate(formula.second, model))\r\n else:\r\n return (not evaluate(formula.first, model)) or evaluate(formula.second, model)\r\n elif is_constant(formula.root):\r\n if formula.root == 'T':\r\n return True\r\n else:\r\n return False\r\n else:\r\n return model[formula.root]\r\n #\r\n # if is_constant(formula.root):\r\n # if formula.root == 'T':\r\n # return True\r\n # elif formula.root == 'F':\r\n # return False\r\n # elif is_variable(formula.root):\r\n # return model[formula.root]\r\n #\r\n # elif is_unary(formula.root):\r\n # return not evaluate(formula.first, model)\r\n # assert (type(formula.first) is Formula) and (type(formula.second) is Formula)\r\n # if is_binary(formula.root):\r\n # if formula.root == '&':\r\n # return evaluate(formula.first, model) and evaluate(formula.second, model)\r\n # elif formula.root == '|':\r\n # return evaluate(formula.first, model) or evaluate(formula.second, model)\r\n # elif formula.root == '->':\r\n # if evaluate(formula.first, model) and not evaluate(formula.second, model):\r\n # return False\r\n # else:\r\n # return True\r\n # elif formula.root == '<->':\r\n # if (not evaluate(formula.first, model) and not evaluate(formula.second, model)) or (\r\n # evaluate(formula.first, model) and evaluate(formula.second, model)):\r\n # return True\r\n # else:\r\n # return False\r\n # elif formula.root == '-&':\r\n # if evaluate(formula.first, model) and evaluate(formula.second, model):\r\n # return False\r\n # else:\r\n # return True\r\n # elif formula.root == '-|':\r\n # if not evaluate(formula.first, model) and not evaluate(formula.second, model):\r\n # return True\r\n # else:\r\n # return False\r\n # else:\r\n # if evaluate(formula.first, model):\r\n # return evaluate(formula.second, model)\r\n # else:\r\n # return evaluate(formula.third, model)\r", "def is_satisfiable(formula: Formula) -> bool:\r\n # satisfiable - if it gets the value True at least once\r\n # Task 2.5c\r\n all_models_local = all_models(list(formula.variables()))\r\n for bool_val in truth_values(formula, all_models_local):\r\n if bool_val:\r\n return True\r\n return False", "def satisfying_assignment(formula):\n return solver(convert_formula(formula))", "def evaluate(formula: Formula, model: Model) -> bool:\n assert is_model(model)\n assert formula.variables().issubset(variables(model))\n # Task 2.1\n if is_constant(formula.root):\n return True if formula.root == 'T' else False\n elif is_variable(formula.root):\n return model[formula.root]\n elif is_unary(formula.root):\n return not evaluate(formula.first, model)\n elif is_binary(formula.root):\n return eval_binary(evaluate(formula.first, model), evaluate(formula.second, model), formula.root)", "def evaluate(formula: Formula, model: Model) -> bool:\r\n assert is_model(model)\r\n assert formula.variables().issubset(variables(model))\r\n # Task 2.1\r\n if is_unary(formula.root):\r\n return not evaluate(formula.first, model)\r\n elif is_constant(formula.root):\r\n if formula.root == 'T':\r\n return True\r\n return False # if its not 'T' than it must be 'F'\r\n elif is_variable(formula.root):\r\n return model.get(formula.root)\r\n else:\r\n # if we got here, than it must be binary operation\r\n assert (is_binary(formula.root))\r\n return evaluate_binary_operation_handler(formula, model)", "def isFormula(string):\r\n string = string.replace(' ', '')\r\n if string == '':\r\n return True\r\n elif re.sub(r\"\\w|\\d|->|_|\\(|\\)|~\", '', string):\r\n return False\r\n elif re.findall(r\"(?<!\\w_)\\d+|(?<!\\w)\\d+|->->\", string):\r\n return False\r\n else:\r\n string1 = string.replace('~', '').replace('->', '+')\r\n info = re.findall(r'\\w_\\d+|\\w\\d*', string1)\r\n for part in info:\r\n string1 = string1.replace(part, '(-1)')\r\n try:\r\n eval(string1)\r\n except:\r\n return False\r\n string2 = string.replace('~', '-').replace('->', '+')\r\n info = re.findall(r'\\w_\\d+|\\w\\d*', string2)\r\n for part in info:\r\n string2 = string2.replace(part, '(-1)')\r\n try:\r\n eval(string2)\r\n except:\r\n return False\r\n return True", "def boolean_func(experiment):", "def to_not_and(formula: Formula) -> Formula:\r\n # Task 3.6a\r\n map_operators = {'->': Formula.parse('~(~~p&~q)'),\r\n '+': Formula.parse('~(~(p&~q)&~(~p&q))'),\r\n '<->': Formula.parse('~~(~(p&~q)&~(~p&q))'),\r\n '-&': Formula.parse('~(p&q)'),\r\n '-|': Formula.parse('~~(~p&~q)'),\r\n 'F': Formula.parse('(p&~p)'),\r\n 'T': Formula.parse('~(p&~p)'),\r\n '|': Formula.parse('~(~p&~q)')}\r\n return formula.substitute_operators(map_operators)", "def _toBoolean (iraf_flag):\n\n return (iraf_flag == iraf.yes)", "def valid(formula):\r\n\r\n try:\r\n return not re.search(r'\\b0[0-9]', formula) and eval((formula) is True\r\n #except ArithmeticError:\r\n #return False\r\n except:\r\n return False", "def solve(self, question):\n # check if the question matches binary operation\n match = self.BINARY_OP_REGEX.match(question)\n if match:\n # read LHS operand\n op1 = self.get_number(match.group(1))\n if op1 is None:\n return\n # read operator\n operator = match.group(2)\n # read RHS operand\n op2 = self.get_number(match.group(3))\n if op2 is None:\n return\n # calculate the operation\n self.handle_binary_operator(op1, operator, op2)\n return\n # check match of unary operation\n match = self.UNARY_OP_REGEX.match(question)\n if match:\n # read operator\n operator = match.group(1).upper()\n # read operand\n op = self.get_number(match.group(2))\n if op is None:\n return\n # calculate the operation\n self.handle_unary_operator(operator, op)\n return\n # no match found\n print(\"Invalid question!\")", "def evaluateBoolean(compiled_expression):", "def is_atomic(formula):\n return isinstance(formula, Symbol) or isinstance(formula, Predicate)", "def print_truth_table(formula: Formula) -> None:\n # Task 2.4\n variables = list(sorted(formula.variables()))\n assignment_dict = all_models(list(variables))\n assignment_results = list(truth_values(formula, assignment_dict))\n arr = []\n for i, assignment in enumerate(assignment_dict):\n vals = list(assignment.values())\n vals.append(assignment_results[i])\n vals = ['T' if i == True else 'F' for i in vals]\n arr.append(vals)\n\n variables.append(str(formula))\n table_printer(variables, arr)", "def eval_act_logic(self, act_logic_str):\n if self.ignore_act_logic:\n return True\n # return true for empty string\n if len(act_logic_str) == 0:\n return True\n eval_str = act_logic_str\n # switch to Python boolean operators\n for (op, py_op) in (('!', ' not '), ('&&', ' and '), ('||', ' or ')):\n eval_str = eval_str.replace(op, py_op)\n # TODO: guard against malicious code in eval() call?\n result = eval(eval_str, self.ZZ_EVAL_VARS)\n return result", "def format_bool(b):\n return \"YES\" if b else \"NO\"", "def to_boolean(self,string):\n if self.debug:\n print('to_boolean'+lineno())\n # FIXME\n sys.exit(1)\n #string.to_s.casecmp('true').zero?", "def _string_to_bool(string_representation_of_bool):\n return bool(strtobool(string_representation_of_bool))", "def test_staff_inputs_expressions_legacy(self):\r\n problem = self.build_problem(answer=\"1+1j\", tolerance=1e-3)\r\n self.assert_grade(problem, '1+j', 'correct')", "def truth_values(formula: Formula, models: Iterable[Model]) -> Iterable[bool]:\n # Task 2.3\n arr = []\n for model in models:\n arr.append(evaluate(formula, model))\n return arr", "def check_formula(self, expected, given, samples):\r\n var_dict_list = self.randomize_variables(samples)\r\n student_result = self.tupleize_answers(given, var_dict_list)\r\n instructor_result = self.tupleize_answers(expected, var_dict_list)\r\n\r\n correct = all(compare_with_tolerance(student, instructor, self.tolerance)\r\n for student, instructor in zip(student_result, instructor_result))\r\n if correct:\r\n return \"correct\"\r\n else:\r\n return \"incorrect\"", "def is_operator(formula):\n return is_binary_operator(formula) or isinstance(formula, Not)", "def answer(self) -> bool:", "def is_correct_answer(self):\n if not self.is_answered():\n return None\n\n if self.question_type == 6: # ArithmeticTask\n return self.option_id == self.sub_question_type % 3 + 1\n elif self.question_type == 8: # ImageTask\n return self.option_id == self.sub_question_type % 3 + 1\n\n return None", "def is_evaluated(evalassignment):\n if evalassignment.assignment.document.name == '' or evalassignment.\\\n assignment.assignmentype.deadline_submission > timezone.now():\n return -30\n else:\n if evalassignment.is_questions_graded:\n if evalassignment.grade_evaluation:\n return evalassignment.grade_evaluation\n else:\n return -10\n else:\n return -20", "def isTrue(self, hard=False):\n global debug_solver_count\n \n if( self.cond == CT.TRUE ):\n return True\n elif( self.cond == CT.FALSE ):\n return False\n \n if( hard == False ):\n res = self.customSimplify()\n return ( res == CE.TRUE )\n else:\n condZ3 = self.simplifyZ3()\n self.checked = True\n v = Solver()\n v.add(Not(condZ3))\n signal.signal(signal.SIGALRM, TimeOutRaiser)\n #signal.setitimer(signal.ITIMER_REAL, 1) \n try: \n res = str(v.check())\n except Exception as e:\n res = \"sat\"\n debug_solver_count = debug_solver_count + 1\n if( str(res) == \"unsat\" ):\n self.setTrue()\n return True\n else:\n return False" ]
[ "0.58110166", "0.54149693", "0.5408767", "0.5338128", "0.5260459", "0.52410215", "0.5120409", "0.5098295", "0.50793344", "0.5055923", "0.5044053", "0.49960268", "0.4963661", "0.4947366", "0.49394703", "0.4920284", "0.49053076", "0.48932275", "0.4886718", "0.48580343", "0.48452845", "0.4836662", "0.4836456", "0.4835529", "0.48200974", "0.48093018", "0.4807912", "0.48012653", "0.47985828", "0.4798251" ]
0.60213345
0
Given an array, returns a set of all unique Nlong combinations of its elements.
def combos(array,n=2): # base case if n==0: yield frozenset() return # core recursion for c in set(combos(array,n-1)): for i in array: #added this to avoid duplicate combos if i not in c: # add element i to combo c yield frozenset({i})| c
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def powerSetNaive(array):\n\n res = [[d] for d in array]\n res.append([])\n array_ = []\n skip = 1\n while skip <=len(array)-1:\n\n for x in range(0,len(array),skip):\n array_.append(array[x])\n for y in range(len(array_[0:x+skip+1])):\n toAppend = array_[y:x+1]\n if toAppend not in res:\n res.append(toAppend)\n toAppend = array_[0:x]\n if toAppend not in res:\n res.append(toAppend)\n array_=[]\n skip = skip + 1\n\n return res", "def powerset(iterable):\n return map(set, chain.from_iterable(\n combinations(iterable, r) for r in range(len(iterable) + 1)))", "def sets(elements, set_size):\n return combinations(elements, set_size)", "def subsets(arr):\n return chain(*[combinations(arr, i + 1) for i, a in enumerate(arr)])", "def subsets(arr):\n return chain(*[combinations(arr, i + 1) for i, a in enumerate(arr)])", "def get_uniques(array):\n uniques = []\n for element in array:\n if element not in uniques:\n uniques.append(element)\n return uniques", "def get_all_comb(array, r=None):\n if r is None:\n r = len(array)\n return [_ for i in range(1, r + 1) for _ in itertools.combinations(array, i)]", "def powerset(iterable):\n s = list(iterable)\n return itertools.chain.from_iterable( itertools.combinations(s, r)\n for r in range(len(s)+1) )", "def power_set(A):\n\n L = list()\n for i in range(len(A) + 1):\n L.extend([set(j) for j in itertools.combinations(A, i)])\n return L\n\n raise NotImplementedError(\"Problem 4 Incomplete\")", "def powerset(iterable):\n s = list(iterable)\n return chain.from_iterable(combinations(s, r) for r in range(1, len(s)+1))", "def powerset(iterable):\n\tset_list = list(iterable)\n\treturn list(chain.from_iterable(combinations(set_list, r)\n\t\t\t\t\t\t\t\tfor r in range(len(set_list)+1)))", "def find_all_ngon_sols():\n ngon = [None for _ in range(N)] \n ngon_set = set()\n numbers = set(range(1, (2 * N) + 1))\n\n for triplet in permutations(numbers, 3):\n ngon[0] = tuple(triplet)\n total = sum(triplet)\n next_ngon_set = set()\n fill_ngon(ngon, numbers - set(triplet), 1, next_ngon_set, total)\n ngon_set |= next_ngon_set\n\n return ngon_set", "def powerset(iterable):\n s = list(iterable)\n return chain.from_iterable(combinations(s, r) for r in range(len(s) + 1))", "def powerset(iterable):\n s = list(iterable)\n return chain.from_iterable(combinations(s, r) for r in range(len(s) + 1))", "def powerset(iterable):\n s = list(iterable)\n return chain.from_iterable(combinations(s, r) for r in range(len(s) + 1))", "def powerset(n):\n # chain r-combinations generator for r=0, 1,..., n\n return chain.from_iterable(combinations(range(n), r) for r in range(n+1))", "def powerset(iterable):\n\n s = list(iterable)\n\n return chain.from_iterable(combinations(s, r) for r in range(2, len(s) + 1))", "def powerset(iterable):\n s = list(iterable)\n return chain.from_iterable(combinations(s, r) for r in range(2, len(s)+1))", "def powerset(iterable):\r\n \r\n s = list(iterable)\r\n subsets = chain.from_iterable(combinations(s, r) for r in range(len(s)+1))\r\n tuples=list(subsets)\r\n tuples.remove(())\r\n return tuples", "def powerset(a):\n if len(a) == 0:\n return set([frozenset()])\n accumulator = set()\n a = set(a)\n element = a.pop()\n for subset in powerset(a):\n accumulator.add(subset)\n accumulator.add(frozenset(set([element]) | subset))\n return accumulator", "def subsets(n):\n binary = lambda x: x>0 and binary(x>>1) + [x&1] or []\n pad = lambda l: [0]*(n-len(l)) + l #Always returns a list of length 'n'\n return [pad(binary(i)) for i in range(1, 2**n)]", "def powerset(iterable):\n xs = list(iterable)\n # note we return an iterator rather than a list\n return chain.from_iterable(combinations(xs,n) for n in range(len(xs)+1))", "def combinations(arrays):\n return np.array(np.meshgrid(*arrays)).T.reshape(-1, len(arrays))", "def powerset(iterable):\n\n \"powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)\"\n s = list(iterable)\n return itertools.chain.from_iterable(\n itertools.combinations(s, r) for r in range(1, len(s) + 1)\n )", "def powerset(xs):\n cards = list(reversed(xrange(len(xs)))) + [len(xs)]\n return list(chain.from_iterable(combinations(xs, n) for n in cards))", "def unique_of_an_array(an_array):\n temp_array=an_array.copy()\n u=[]\n for i in range(temp_array.shape[0]):\n for j in range(temp_array.shape[1]):\n if temp_array[i][j] not in u:\n u.append(temp_array[i][j])\n return sorted(u)", "def unique_permutations(items):\n return set(permutations(items))", "def powerset(s):\n return chain.from_iterable(combinations(s, r) for r in range(len(s) + 1))", "def bruteForcePopulation(N):\n return list(itertools.permutations(range(N), N))", "def distinct(n):\n if n <= 10:\n return [_distincts[2*i] for i in range(0, n)]\n elif n <= 20:\n return _distincts[:n]\n else:\n return _distincts[:]" ]
[ "0.6779107", "0.6662315", "0.66218275", "0.6608261", "0.6608261", "0.6602481", "0.6584448", "0.64823306", "0.64609313", "0.63868445", "0.6363319", "0.6362756", "0.636253", "0.636253", "0.636253", "0.63586986", "0.6320948", "0.62981164", "0.62563735", "0.6245234", "0.62420666", "0.6193457", "0.61906254", "0.61488044", "0.6130323", "0.6089576", "0.6065588", "0.60137486", "0.5984431", "0.59795815" ]
0.7121797
0
Creates a new node at the front of the list and assigns to it the passed value Increments the list size by 1
def push_front(self, value): new_node = self.Node(value) # Edge Case : List is empty if self._size == 0: self._tail = new_node self._head = new_node self._size += 1 return new_node.next = self._head self._head.prev = new_node self._head = new_node self._size += 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_first(self, value):\n self.head = Node(value, self.head)", "def push_front(self, val):\n new_node = Node(val, self.head)\n if self.is_empty():\n self.tail = new_node\n self.head = new_node\n self.size += 1", "def add_to_head(self, value):\n\n new_node = ListNode(value)\n if self.size == 0:\n self.head = new_node\n self.tail = new_node\n\n else:\n new_node.next = self.head\n self.head.prev = new_node\n new_node.next = self.head\n self.head = new_node\n\n # increments the size attribute after adding node to list\n self.size += 1", "def push_front(self, item):\n new_node = Node(item)\n # if the list is empty, make it head\n if self.head is None:\n self.head = new_node\n # else, \n else:\n new_node.next = self.head # new node points to current head\n self.head = new_node # current head points to new_node\n self.n += 1", "def prepend(self, value):\r\n if self.head is None:\r\n self.head = Node(value)\r\n return\r\n new_node = Node(value)\r\n new_node.next = self.head\r\n self.head = new_node", "def push_front(self, value):\n node = DLLNode(value)\n if self.head is None:\n self.tail = node \n else: \n self.head.prev_node = node \n node.next_node = self.head\n self.head = node", "def add_front(self, key, value):\n new_node = SLNode(key, value)\n new_node.next = self.head\n self.head = new_node\n self.size = self.size + 1", "def add_front(self, key, value):\n new_node = SLNode(key, value)\n new_node.next = self.head\n self.head = new_node\n self.size = self.size + 1", "def add_front(self, key, value):\r\n\t\tnew_node = SLNode(key, value)\r\n\t\tnew_node.next = self.head\r\n\t\tself.head = new_node\r\n\t\tself.size = self.size + 1", "def insert(self, value):\n old_head = self.head\n self.head = Node(value, old_head)\n if self.count > 0: # if any Nodes: set tail previous to current Node\n old_head.next = self.head\n else: # adding to an empty, than define front\n self.tail = self.head\n self.count += 1", "def push(self, value):\n new_node = Node(value)\n new_node.next = self.head\n self.head = new_node\n self.count += 1\n return new_node", "def push_front(self, val: Generic[T]) -> None:\n first_node = self.node.next\n\n self.node.next = Node(val)\n latest_first = self.node.next\n\n latest_first.prev = self.node #pushes the node to the front\n latest_first.next = first_node\n first_node.prev = latest_first #rearranges the list", "def __add_first(self, value):\n node = self.Node(value, self.__head)\n if self.__head == None: # when this is the first element being added,\n self.__last = node # set the last pointer to this new node\n self.__head = node\n self.__length += 1", "def add_to_head(self, value):\n node = Node(value)\n if self.head is not None:\n node.set_next(self.head)\n\n self.head = node", "def addAtHead(self, val):\n new_node = ListNode(val)\n new_node.next = self.head\n self.head = new_node\n self.length += 1", "def addAtHead(self, val):\n new_node = Node(val)\n new_node.next = self.head\n self.head = new_node\n self.length += 1", "def insert(self, value):\n self.head = Node(value, self.head)", "def addAtHead(self, val: int) -> None:\n if(self.head == None):\n self.head = Node(val)\n else:\n new_node = Node(val)\n new_node.next = self.head \n self.head = new_node", "def addAtHead(self, val):\n new_head = Node(val)\n if self._size == 0:\n self._head = new_head\n self._tail = self._head\n else:\n new_head.next = self._head\n self._head = new_head\n self._size += 1", "def push_back(self, value):\n\n # Edge Case : List is empty\n # Behave just like push_front()\n if self._size == 0:\n self.push_front(value)\n return\n\n new_node = self.Node(value)\n new_node.prev = self._tail\n self._tail.next = new_node\n self._tail = new_node\n self._size += 1", "def insert(self, value):\n\n # create new node\n\n # self.head =new_node\n current = self.head\n if current == None:\n self.head = Node(value, self.head)\n return\n while current.next != None:\n current=current.next\n new_node=Node(value)\n\n current.next=new_node", "def addAtHead(self, val):\n node = ListNode(val)\n if self.head == None:\n self.head = node\n else:\n node.next = self.head\n self.head = node", "def addAtHead(self, val):\n node = Node(val)\n node.next = self.head\n self.head = node\n\n self.size += 1", "def addAtHead(self, val):\n node = Node(val)\n node.next = self.head\n self.head = node\n\n self.size += 1", "def addAtHead(self, val: int) -> None:\n if self.head:\n temp_node = MyListNode(val, next_node=self.head, prev_node=None)\n self.head.prev = temp_node\n self.head = temp_node\n self.node_count += 1\n else:\n self.head = MyListNode(val)\n self.node_count += 1", "def insert(self, index, value):\n # check the validity of index\n if index < 0 or index > self.n: # larger than no. of items\n print(\"Index Error; please input valid index\")\n return\n # if index==0, same as push_front\n if index==0:\n self.push_front(value)\n return\n # else,\n new_node = Node(value)\n temp_node = self.head\n for _ in range(index-1):\n temp_node = temp_node.next # traverse the list\n new_node.next = temp_node.next # temp_node is index-1 node\n temp_node.next = new_node\n self.n += 1", "def addAtHead(self, val):\n tmp = Node(val)\n tmp.nxt = self.head\n self.head = tmp\n if not self.tail:\n self.tail = self.head", "def addNodeBefore(self, new_value, before_node): # Class O(n)\r\n if not isinstance(new_value, Node):\r\n if new_value % 1 != 0: raise ValueError(\"Please, insert an integer\")\r\n if before_node > self.length(): raise ValueError(\"Invalid position\")\r\n if before_node == 1:\r\n self.head = Node(new_value, self.head)\r\n else:\r\n self.addNodeAfter(new_value, before_node - 1)", "def push(self, value: int):\n new_node = Node(value)\n new_node._next = self._head\n self._head = new_node\n self._len += 1\n print(f\"value: {value}, head: {self._head}, len: {self._len}, new_node: {new_node}\")", "def push(self, value):\n if self.head == None:\n self.head = Node(value)\n self.tail = self.head\n self.current = self.head\n else:\n newNode = Node(value)\n newNode.prev = self.tail\n self.tail.next = newNode\n self.tail = self.tail.next\n self.length = self.length + 1" ]
[ "0.7686634", "0.7617703", "0.76045", "0.7579039", "0.7531801", "0.74938774", "0.74524677", "0.74524677", "0.744536", "0.7401989", "0.73863536", "0.7339731", "0.7316137", "0.7264587", "0.71709114", "0.71600443", "0.7120181", "0.7105105", "0.7083401", "0.707433", "0.70651096", "0.7039383", "0.70221007", "0.70221007", "0.6999319", "0.6976969", "0.69622105", "0.69571316", "0.6942871", "0.6919192" ]
0.79621935
0
Retuns the value stored by the node at the front of the list Returns None if list is empty
def peek_front(self): if (self._size == 0): return None return self._head.value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def front_value(self):\n if self.is_empty():\n return None\n return self.head.value", "def peek(self):\n if self.is_empty():\n return None\n return self.list.head.data", "def first(self):\n if self.is_empty():\n raise Empty('list is empty')\n return self._head._next._element # front aligned with head of list", "def first_value(self):\n if not self.is_empty():\n return self.data[self.head]\n return None", "def first(self):\r\n if self.head == None: #check if first(head) node is empty\r\n return 'null' #if yes, then return null\r\n else: #if it is not empty\r\n return self.head.data #return the data of head node\r", "def first(self):\n return self.head and self.head.value or None", "def peek(self):\n if self.is_empty():\n return None\n\n return self.linked_list.head.data", "def peek_first(self):\n if self.is_empty(): raise RuntimeError(\"Empty list\")\n return self.head.data", "def first(self):\n if self.is_empty():\n raise Empty('list is empty')\n return self._head._element # front aligned with head of list", "def front_node(self):\n return self.sentinel.next if self.N != 0 else None", "def hd(lst):\n return lst[0] if lst else None", "def pop_front(self):\n if self.head is None:\n raise IndexError('pop_front from empty list')\n node = self.head \n if node.next_node is None:\n self.tail = None \n else: \n node.next_node.prev_node = None \n self.head = node.next_node\n return node.value", "def peek_front(self):\n\n if self.items:\n return self.items[0]\n return None", "def front(self):\n heap = self.heap\n if len(heap) == 0:\n return None\n item = heap[0]\n to_node = item[self.TO_NODE]\n from_node = item[self.FROM_NODE]\n value = item[self.VALUE]\n return from_node, to_node, value", "def pop_front(self):\n if (self._size == 0):\n return None\n\n output_value = self._head.value\n\n self._head = self._head.next\n self._head.prev = None\n self._size -= 1\n\n # Edge case, list is now empty\n if (self._size == 0):\n self._tail = None\n\n return output_value", "def first(self):\n if self.is_empty():\n raise Empty('Queue is empty')\n return self._head._element # front aligned with head of list", "def pop_front(self):\n if self.n==0:\n print(\"Error; empty list\")\n return\n else:\n temp = self.head # retrieve front node\n self.head = temp.next # assign head to the second node\n self.n -= 1\n return temp.val", "def peek(self):\n if self.is_empty():\n return None\n list_length = len(self.list) - 1\n return self.list[list_length]", "def front(self):\n if self.empty():\n return \"Linked List is Empty\"\n return self.head.data", "def peek(self):\n # TODO: Return top item, if any\n print('self.is_empty()', self.is_empty())\n if self.is_empty():\n return None\n print('self.top', self.list.head.data)\n return self.list.head.data", "def peek(self):\n return self.list.head", "def peek(self):\n # TODO: Return top item, if any\n print(\"self.list P\", self.list)\n print(\"length\", self.length())\n if self.is_empty():\n return None\n else:\n return self.list[self.length()-1]\n # do n-1\n # return self.list[-]", "def pop_front(self):\n if self.is_empty():\n return None\n val = self.head.value\n # Update head and size\n self.head = self.head.next_node\n self.size -= 1\n # If the only node was removed, also need to update tail\n if self.is_empty():\n self.tail = None\n return val", "def peek(self):\n size = self._list.size()\n if size == 0:\n return None\n return self._list.tail.data", "def front(self):\n if self.size() < 1:\n return None\n else:\n # TODO: Return min item from heap, if any\n ...", "def peek(self):\n\n if self.is_empty():\n return None\n\n return self._list[-1]", "def value_at(self, index):\n if index==0:\n return self.head.val\n\n temp_node = self.head\n for _ in range(index):\n temp_node = temp_node.next\n return temp_node.val", "def front(self):\n return self.sentinel.next.item", "def top(self):\n if self.is_empty():\n return None\n return self._head.value", "def get_first(self):\n if self.is_empty():\n raise self.NoSuchNodeException()\n\n return self.head.data" ]
[ "0.7704693", "0.72550946", "0.72465354", "0.7224682", "0.7140524", "0.7129547", "0.7127332", "0.71250117", "0.71125025", "0.7022299", "0.6988882", "0.6959299", "0.6956622", "0.69185114", "0.6831163", "0.68272775", "0.6825421", "0.68179774", "0.6813925", "0.67642015", "0.67405164", "0.66976714", "0.6685376", "0.6671277", "0.6643668", "0.6641608", "0.66257054", "0.6623245", "0.66158587", "0.66093796" ]
0.73432696
1
Sets the node at the given index to the passed value
def set(self, value, index=0): # Error case: Index out of acceptable range if index < 0 or index >= self._size: raise RangeError("index out of range.") i = 0 current_node = self._head while(i < index): current_node = current_node.next i += 1 current_node.value = value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _update_value_at(self, index, value):\n node = self._get_node_at(index)\n if node is None:\n raise IndexError('List index out of range.')\n node.value = value", "def set_node(self, n, value):\n node = self.get_node(n)\n if node:\n node.value = value", "def set(self, node, value):\n self.val[node] = value", "def set_at_index(self, index: int, value: object) -> None:\n self.data[index] = value", "def set_node(self, index, node):\r\n self.loc.coord[index] = node", "def setvalue(self, index, value):\n self._checkIndex(index)\n self._items[index].value = value", "def setData(self, index, value):\n \n self.state[index.row()][index.column()] = value\n return value", "def update(self, value, index):\n\n length = self.get_length()\n if type(index) is int:\n if index > length:\n # The index value is out of range and prompts and exits\n print(\"Index is out of range.\")\n return\n else:\n this_node = Node(data=value)\n if index == 0:\n this_node.next = self.head.next\n this_node.prev =None\n self.head = this_node\n else:\n cur = self.head\n while index - 1:\n cur = cur.next\n index -= 1\n this_node.next = cur.next.next\n this_node.prev = cur.next.prev\n cur.next = this_node\n return\n else:\n print(\"Index value is not int.\")\n return", "def update(self, idx, value):\n idx = self.__capacity - 1 + idx\n self.__tree[idx] = value\n self.__update(idx)", "def __setitem__(self, index, value):\n self.elem[index] = value", "def set_value ( self, object, value ):\n object[ self.index ] = value", "def set_config_value(self, value, index=None):", "def __setitem__(self, index, value):\n if isinstance(index, slice):\n del self[index]\n offset = 0\n if len(self) == 0:\n for x in value:\n self.append(x)\n else:\n for x in xrange(*index.indices(len(self))):\n self.__insert(x + offset, value)\n offset += value.length\n if not index.step:\n break\n return\n\n self.__verify_index(index)\n\n if index < 0:\n index += self.length\n\n index, prev_node, cur_node = self.__find_node_index(index)\n cur_node.data_list[index] = value", "def __setitem__(self, index: int, value: object) -> None:\n self.set_at_index(index, value)", "def __setitem__(self, index, value):\n self._update_value_at(index, value)", "def __setitem__(self, idx, value):\n if not isinstance(value, nodes.Node):\n raise NotImplementedError(\"setitem with non-blaze rhs\")\n result = self.getitem(idx, context='set')\n result = Assign('assign', [result, value])\n result.eval()", "def setitem(obj, idx, value):\n obj.__setitem__(idx, value)", "def set(self, index, data):\n self.data[index] = data", "def set_offset(self, index, value):\n if self.is_leaf():\n raise TerminalNodeException\n if value is None:\n return\n else:\n self.offsets[index] = value", "def __setitem__(self, index, value):\n self.position[index] = value", "def set_dna_value(self, value: int, index: int):\n self.dna[index] = value", "def __setitem__(self, index, value):\n if not isinstance(index, numbers.Integral):\n raise TypeError(\"Input index must be integer\")\n if index >= len(self._fsm.get(self._id)):\n raise ValueError(\"Input index is out of boundary\")\n ts = self._fsm.get(self._id)\n ts[index] = value\n self._fsm.store(self._id, ts)", "def set_child(self, child_index, node):\n try:\n self.children[child_index] = node #Set the node to be the child at the provided index.\n except: #If the index is invalid,\n pass #Make no changes", "def set_v_item(self, vindex, new_val):\n\n i = [((0, 0),),\n ((1, 1),),\n ((2, 2),),\n ([1, 2], [2, 1]),\n ([2, 0], [0, 2]),\n ([0, 1], [1, 0])]\n\n for j, k in i[vindex]:\n self[j, k] = new_val", "def setByPathAndIndex(self, keys, index, value):\n self.getByPath(keys[:-1])[keys[-1]][index] = value", "def set_register(self, index, value):\n if index < 0 or index > 32:\n raise Exception('Register out of index')\n\n self.register[index].set_value(str(value))", "def set_new_item(self, item, index):\n\n current = self.head\n previous = None\n for i in range(index):\n previous = current\n current = current.next\n if previous is None:\n self.head = Node(item)\n self.head.next = current\n else:\n previous.next = Node(item)\n previous.next.next = current\n self.size += 1", "def __setitem__(self, i, value):\n try:\n self.__getIthNode(i).setPayload(value)\n except IndexError:\n print(\"ERROR: Index value out of range.\")", "def __setitem__(self, index, value):\n # attempt to\n try:\n # cast {index} to an integer\n index = int(index)\n # if this fails\n except TypeError:\n # let my tile do the rest\n self.data[self.tile.offset(index)] = value\n # otherwise\n else:\n # set the item directly in my container\n self.data[index] = value\n # all done\n return", "def __setitem__(self, index, value):\n if isinstance(index, tuple):\n list.__getitem__(self, index[0])[index[1]] = value\n elif isinstance(index, int):\n self.pop(index)\n self.insert(index, value)\n else:\n raise TypeError, \"Table indices must be int or tuple\"" ]
[ "0.8241105", "0.7841289", "0.7668498", "0.7593661", "0.74415594", "0.73957735", "0.72437423", "0.7243659", "0.7214338", "0.7166695", "0.7081666", "0.7077546", "0.70689046", "0.7057119", "0.7034722", "0.70014685", "0.69267786", "0.6924723", "0.6884724", "0.6881682", "0.68104035", "0.67967546", "0.67901385", "0.6732409", "0.6717543", "0.6708706", "0.67019457", "0.6685106", "0.6680678", "0.66519433" ]
0.8198925
1
Install accounting on a new Odoo base. This function must not be executed many time but can be. The action is totally like a men would made on webclient.
def install_accounting(session, logger): acc_set = session.registry('account.config.settings') chart_template = session.registry('account.chart.template') # TODO: use odoo config file to have options configurable with the buildout chart_syscohada = chart_template.search( session.cr, UID, [('name', '=', u'SYSCOHADA - Plan de compte')])[0] wiz_field = acc_set.default_get(session.cr, UID, []) wiz_field.update(dict(chart_template_id=chart_syscohada)) onchange_value = acc_set.onchange_chart_template_id(session.cr, UID, [], chart_syscohada) wiz_field.update(onchange_value['value']) wiz_field.update(dict(date_start="2015-01-01", date_stop="2015-12-31", period="month")) wiz_id = acc_set.create(session.cr, UID, wiz_field) acc_set.execute(session.cr, UID, [wiz_id]) logger.info("Accounting installed (l10n_syscohada)")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def install(self):\n # This installs the packages defined in self.packages\n super().install()\n # Do any other installation work that is needed. If a license key is\n # required then use the custom_assess_status_check() function below to\n # determine whether it is needed.\n # This assess_status() will determine what status the charm is at after\n # install.\n self.assess_status()", "def _install(self):\n\n pass", "def action_invoice_create(self, cr, uid, ids, context=None):\n res = False\n\n journal_obj = self.pool.get('account.journal')\n inv_obj = self.pool.get('account.invoice')\n inv_line_obj = self.pool.get('account.invoice.line')\n fiscal_obj = self.pool.get('account.fiscal.position')\n\n for order in self.browse(cr, uid, ids, context=context):\n# pay_acc_id = order.partner_id.property_account_payable.id\n #use a new method to get the account_id\n pay_acc_id = self._get_inv_pay_acc_id(cr,uid,order) \n journal_ids = journal_obj.search(cr, uid, [('type', '=','purchase'),('company_id', '=', order.company_id.id)], limit=1)\n if not journal_ids:\n raise osv.except_osv(_('Error!'),\n _('Define purchase journal for this company: \"%s\" (id:%d).') % (order.company_id.name, order.company_id.id))\n\n # generate invoice line correspond to PO line and link that to created invoice (inv_id) and PO line\n inv_lines = []\n for po_line in order.order_line:\n #check if this line have quantity to generate invoice, by johnw\n if po_line.product_qty <= po_line.invoice_qty:\n continue \n# if po_line.product_id:\n# acc_id = po_line.product_id.property_account_expense.id\n# if not acc_id:\n# acc_id = po_line.product_id.categ_id.property_account_expense_categ.id\n# if not acc_id:\n# raise osv.except_osv(_('Error!'), _('Define expense account for this company: \"%s\" (id:%d).') % (po_line.product_id.name, po_line.product_id.id,))\n# else:\n# acc_id = property_obj.get(cr, uid, 'property_account_expense_categ', 'product.category').id \n #use a new method to get the account_id, by johnw \n acc_id = self._get_inv_line_exp_acc_id(cr,uid,order,po_line)\n fpos = order.fiscal_position or False\n acc_id = fiscal_obj.map_account(cr, uid, fpos, acc_id)\n\n inv_line_data = self._prepare_inv_line(cr, uid, acc_id, po_line, context=context)\n #update the quantity to the quantity, by johnw\n inv_line_data.update({'quantity':(po_line.product_qty - po_line.invoice_qty)})\n inv_line_id = inv_line_obj.create(cr, uid, inv_line_data, context=context)\n inv_lines.append(inv_line_id)\n\n po_line.write({'invoiced':True, 'invoice_lines': [(4, inv_line_id)]}, context=context)\n \n #if no lines then return direct, by johnw\n if len(inv_lines) == 0:\n continue\n \n # get invoice data and create invoice\n inv_data = {\n 'name': order.partner_ref or order.name,\n 'reference': order.partner_ref or order.name,\n 'account_id': pay_acc_id,\n 'type': 'in_invoice',\n 'partner_id': order.partner_id.id,\n 'currency_id': order.pricelist_id.currency_id.id,\n 'journal_id': len(journal_ids) and journal_ids[0] or False,\n 'invoice_line': [(6, 0, inv_lines)],\n 'origin': order.name,\n 'fiscal_position': order.fiscal_position.id or False,\n 'payment_term': order.payment_term_id.id or False,\n 'company_id': order.company_id.id,\n }\n inv_id = inv_obj.create(cr, uid, inv_data, context=context)\n\n # compute the invoice\n inv_obj.button_compute(cr, uid, [inv_id], context=context, set_total=True)\n\n # Link this new invoice to related purchase order\n order.write({'invoice_ids': [(4, inv_id)]}, context=context)\n res = inv_id\n return res", "def do_post_install(self, context):\n pass", "def test_install(self):\n\n\n adminuser,adminpass = self.testdata.find_account_for('toolmanager')\n\n self.utils.account.login_as(adminuser,adminpass)\n\n self.contribtool.install(TOOLNAME,adminuser,adminpass)", "def do(self):\r\n parameters = ParametersParserStr(self.args_parameters).get()\r\n self.core.install(self.product_names, parameters, with_dependencies=True)", "def install(self, parent):\r\n pass", "def execute(self):\n\n super(BasicInstaller, self).execute()", "def execute(self):\n\n super(BasicInstaller, self).execute()", "def on_install(self, event):\n unit = self.model.unit\n\n # Install your software and its dependencies\n\n unit.status = ActiveStatus()", "def action_invoice_create(self, cr, uid, ids, context=None):\n res = False\n\n journal_obj = self.pool.get('account.journal')\n inv_obj = self.pool.get('account.invoice')\n inv_line_obj = self.pool.get('account.invoice.line')\n fiscal_obj = self.pool.get('account.fiscal.position')\n property_obj = self.pool.get('ir.property')\n\n for order in self.browse(cr, uid, ids, context=context):\n pay_acc_id = order.partner_id.property_account_payable.id\n journal_ids = journal_obj.search(cr, uid, [('type', '=','purchase'),('company_id', '=', order.company_id.id)], limit=1)\n if not journal_ids:\n raise osv.except_osv(_('Error !'),\n _('There is no purchase journal defined for this company: \"%s\" (id:%d)') % (order.company_id.name, order.company_id.id))\n\n # generate invoice line correspond to PO line and link that to created invoice (inv_id) and PO line\n inv_lines = []\n for po_line in order.order_line:\n if po_line.product_id:\n acc_id = po_line.product_id.product_tmpl_id.property_account_expense.id\n if not acc_id:\n acc_id = po_line.product_id.categ_id.property_account_expense_categ.id\n if not acc_id:\n raise osv.except_osv(_('Error !'), _('There is no expense account defined for this product: \"%s\" (id:%d)') % (po_line.product_id.name, po_line.product_id.id,))\n else:\n acc_id = property_obj.get(cr, uid, 'property_account_expense_categ', 'product.category').id\n fpos = order.fiscal_position or False\n acc_id = fiscal_obj.map_account(cr, uid, fpos, acc_id)\n\n inv_line_data = self._prepare_inv_line(cr, uid, acc_id, po_line, context=context)\n inv_line_id = inv_line_obj.create(cr, uid, inv_line_data, context=context)\n inv_lines.append(inv_line_id)\n\n po_line.write({'invoiced':True, 'invoice_lines': [(4, inv_line_id)]}, context=context)\n\n # get invoice data and create invoice\n inv_data = {\n 'name': order.partner_ref or order.name,\n 'reference': order.partner_ref or order.name,\n 'account_id': pay_acc_id,\n 'type': 'in_invoice',\n 'partner_id': order.partner_id.id,\n 'currency_id': order.pricelist_id.currency_id.id,\n 'address_invoice_id': order.partner_address_id.id,\n 'address_contact_id': order.partner_address_id.id,\n 'journal_id': len(journal_ids) and journal_ids[0] or False,\n 'invoice_line': [(6, 0, inv_lines)], \n 'origin': order.name,\n 'fiscal_position': order.fiscal_position.id or order.partner_id.property_account_position.id,\n 'payment_term': order.partner_id.property_payment_term and order.partner_id.property_payment_term.id or False,\n 'company_id': order.company_id.id,\n 'add_disc': order.add_disc or 0.0\n }\n inv_id = inv_obj.create(cr, uid, inv_data, context=context)\n\n # compute the invoice\n inv_obj.button_compute(cr, uid, [inv_id], context=context, set_total=True)\n\n # Link this new invoice to related purchase order\n order.write({'invoice_ids': [(4, inv_id)]}, context=context)\n res = inv_id\n return res", "def pre_approve(self, cr, uid, ids, context={}):\n \tfor voucher in self.browse(cr, uid, ids, context=context):\n \t if not voucher.department_id.analytic_account_id:\n \t raise osv.except_osv(_('Configration Check!'), _(\"Please add cost center for your department!\"))\n \t periods = self.pool.get('account.period').search(cr, uid, [('date_start','<=',voucher.date),('date_stop','>=',voucher.date),('company_id','=',voucher.company_id.id)], context=context)\n\n\n res=0.0\n if voucher.purpose:\n if not voucher.purpose.account_id: raise osv.except_osv(_('Warning!'), _('Please configure account for this purpose!')) \n voucher_line = {\n \t\t'voucher_id': voucher.id,\n \t\t'partner_id': voucher.partner_id.id,\n \t\t'untax_amount': voucher.amount,\n \t\t'amount': voucher.amount,\n 'name': voucher.narration,\n \t\t'type': 'dr',\n \t\t'account_analytic_id': voucher.department_id.analytic_account_id and voucher.department_id.analytic_account_id.id,\n 'account_id': voucher.purpose.account_id.id,\n \t }\n new_amount = res and res or voucher.amount \n voucher_line.update({'amount':new_amount,'untax_amount':new_amount})\n \t if voucher.line_ids :\n for line in voucher.line_ids:\n \t\t self.pool.get('account.voucher.line').write(cr, uid, line.id, {\n \t\t'voucher_id': voucher.id,\n \t\t'partner_id': voucher.partner_id.id,\n \t\t'untax_amount': res or line.amount,\n \t\t'amount': line.amount,\n 'name': voucher.narration,\n \t\t'type': 'dr',\n \t\t'account_analytic_id': line.account_analytic_id and line.account_analytic_id.id or voucher.department_id.analytic_account_id.id,\n 'account_id': voucher.purpose.account_id.id or line.account_id.id,\n \t }, context=context)\n \t else:\n\n \t\t new_voucher_line = self.pool.get('account.voucher.line').create(cr, uid, voucher_line, context=context)\n context.update({'purchase':True})\n self.create_budget_confirmation(cr, uid, [voucher.id], context)\n \tself.write(cr, uid, ids,{'state': 'preapprove','type':'purchase','ratification':True}, context=context)\n #cxt = context.copy()\n #cxt.update({'type':'ratification'})\n if not super(account_voucher, self).create_budget_confirmation(cr, uid, ids, context=context):\n self.write(cr, uid, ids, {'state': 'approved'}, context=context)\n\n \t'''self.write(cr, uid, ids, {'state': 'preapprove'})\n if not super(account_voucher, self).create_budget_confirmation(cr, uid, ids, context=context):\n self.write(cr, uid, ids, {'state': 'approve','type':'purchase','ratification':True}, context=context)'''\n return True", "def install(self):\n raise NotImplementedError", "async def install(self) -> None:\n self.add_to_output(\"Starting install, please wait...\")\n # outsource installer process\n proc = await asyncio.create_subprocess_shell(\n f'{os.path.join(os.getcwd(), \"files\", \"bos-toolbox\", \"bos-toolbox.bat\")} install {self.ip} --pool-user UpstreamDataInc.test --no-keep-pools --psu-power-limit 900 --no-nand-backup --feeds-url file:./feeds/',\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.PIPE)\n # get stdout of the install\n while True:\n # stderr = await proc.stderr.readuntil(b'\\r')\n stdout = await proc.stderr.readuntil(b'\\r')\n if stdout == b'':\n break\n # self.add_to_output(stderr.decode(\"utf-8\").strip())\n self.add_to_output(stdout.decode(\"utf-8\").strip())\n self.add_to_output(\"Rebooting...\")\n await proc.wait()\n self.add_to_output(\"Install complete...\")\n while not await self.ping_http():\n await asyncio.sleep(3)\n await asyncio.sleep(5)", "def install_step(self):\n\n cmd = \"./INSTALL -noroot -silent -install_dir %s\" % self.installdir\n run_cmd(cmd, log_all=True, simple=True)\n\n adjust_permissions(self.installdir, stat.S_IWOTH, add=False)", "def install():\n PackCommandExecutor().pack()\n InstallCommandExecutor().install()", "def install(self):\n return self._process('install')", "def _install(self, host):\n pass", "def install(self):\n conn = sqlite3.connect(self.__DB)\n cursor = conn.cursor()\n\n # creating tables...\n\n cursor.execute('''\n CREATE TABLE users (\n id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,\n agency TEXT NOT NULL,\n account TEXT NOT NULL,\n password TEXT NOT NULL,\n balance REAL NOT NULL\n );\n ''')\n\n cursor.execute('''\n CREATE TABLE history (\n id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,\n register TEXT NOT NULL,\n owner INTEGER NOT NULL\n );\n ''')\n\n # inserting a few users by default (there isn't 'sign up' requirement for this app)...\n\n hasher = User('', '', '')\n users_data = [\n ('A1', '00000-0', hasher.str_to_hash('pass0'), 1500),\n ('A1', '11111-1', hasher.str_to_hash('pass1'), 400),\n ('A2', '22222-2', hasher.str_to_hash('pass2'), 260),\n ('A3', '33333-3', hasher.str_to_hash('pass3'), 380),\n ('A2', '44444-4', hasher.str_to_hash('pass4'), 240),\n ]\n\n cursor.executemany('''\n INSERT INTO users (agency, account, password, balance)\n VALUES (?, ?, ?, ?);\n ''', users_data)\n\n conn.commit()\n conn.close()\n\n self.load_users()", "def create_landlord_invoice(self):\n if self.tenancy_id.is_landlord_rent:\n account_jrnl_obj = self.env['account.journal'].search(\n [('type', '=', 'purchase')], limit=1)\n inv_lines_values = {\n # 'origin': 'tenancy.rent.schedule',\n 'name': 'Rent Cost for' + self.tenancy_id.name,\n 'quantity': 1,\n 'price_unit': self.amount or 0.00,\n 'account_id':\n self.tenancy_id.property_id.account_depreciation_expense_id.id or False,\n 'analytic_account_id': self.tenancy_id.id or False,\n }\n owner_rec = self.tenancy_id.property_owner_id\n invo_values = {\n 'partner_id': self.tenancy_id.property_owner_id.id or False,\n 'type': 'in_invoice',\n 'invoice_line_ids': [(0, 0, inv_lines_values)],\n 'property_id': self.tenancy_id.property_id.id or False,\n 'invoice_date': self.start_date or False,\n # 'account_id': owner_rec.property_account_payable_id.id,\n # 'schedule_id': self.id,\n 'new_tenancy_id': self.tenancy_id.id,\n 'journal_id': account_jrnl_obj.id or False\n }\n\n acc_id = self.env['account.move'].with_context({'default_type': 'in_invoice'}).create(invo_values)\n self.write({'invc_id': acc_id.id, 'inv': True})\n wiz_form_id = self.env['ir.model.data'].get_object_reference(\n 'account', 'view_move_form')[1]\n return {\n 'view_type': 'form',\n 'view_id': wiz_form_id,\n 'view_mode': 'form',\n 'res_model': 'account.move',\n 'res_id': self.invc_id.id,\n 'type': 'ir.actions.act_window',\n 'target': 'current',\n 'context': self._context,\n }", "def call_transfer_fund(self):\n ## 1) Create expense line for current student\n ## 2) Create Deposite lines for oney transfer student\n\n ## 1\n student_pool = self.env['op.student']\n partner_obj = self.env['res.partner']\n employee_pool = self.env['hr.employee']\n\n if not self.pin_varification:\n raise except_orm(_('Warning!'),\n _(\"Enter Valid PIN to proceed!\"))\n\n\n student_id = student_pool.search([('user_id', '=', self._uid)])\n\n ## Validate Enter PIN\n if student_id:\n self.validate_current_user_pin(student_id)\n\n expense_vals = {\n 'name': student_id.id,\n 'amount': self.amount_transfer,\n 'date': datetime.datetime.now(),\n 'source': \"Transfer Amount of %s to account no %s (%s) on date %s - %s\" % (self.amount_transfer, self.account_no, self.name, datetime.datetime.now(), self.description),\n 'create_invoice': False,\n # 'student_id': student_id.id,\n }\n\n student_expenses_id = self.env['student.expenses'].sudo().create(expense_vals)\n self.total_expense_balance = student_id.stud_balance_amount\n\n ## Get employee form account id\n employee_id = employee_pool.sudo().search([('ean13', '=', self.account_no)])\n\n ## Search EMployee By Employee ID\n search_by_id_employee_id = employee_pool.sudo().search([('identification_id', '=', self.account_no)])\n\n ## Search by student matrix ID\n search_by_id_student_id = student_pool.sudo().search([('gr_no', '=', self.account_no)])\n\n if not self.account_no:\n ## Logic for search by User Name\n employee_id = self.pass_employee_id.sudo()\n student_id = self.pass_student_id.sudo()\n else:\n ## Get partner form account id\n student_id = student_pool.sudo().search([('ean13', '=', self.account_no)])\n if student_id:\n deposite_vals = {\n 'name': student_id.id,\n # 'amount': self.amount_to_transfer,\n 'paid_amount': self.amount_transfer,\n 'date': datetime.datetime.now(),\n 'create_invoice': True,\n }\n student_deposite_id = self.env['student.deposits'].sudo().create(deposite_vals)\n if not self.account_no:\n trans_student_id = student_id.sudo()\n else:\n trans_student_id = student_pool.sudo().search([('ean13', '=', self.account_no)])\n if trans_student_id:\n self.total_deposite_balance = trans_student_id.stud_balance_amount\n elif employee_id:\n deposite_vals = {\n 'name': employee_id.id,\n 'employee_id': employee_id.identification_id,\n 'paid_amount': self.amount_transfer,\n 'date': datetime.datetime.now(),\n 'create_invoice': True,\n 'source': \"Transfer Amount of %s to account no %s (%s) on date %s - %s \" % (self.amount_transfer, self.account_no, self.name, datetime.datetime.now(), self.description),\n }\n employee_deposite_id = self.env['employee.deposits'].sudo().create(deposite_vals)\n self.total_deposite_balance = employee_id.available_balance\n\n elif search_by_id_employee_id:\n deposite_vals = {\n 'name': search_by_id_employee_id.id,\n 'employee_id': search_by_id_employee_id.identification_id,\n 'paid_amount': self.amount_transfer,\n 'date': datetime.datetime.now(),\n 'create_invoice': True,\n 'source': \"Transfer Amount of %s to account no %s (%s) on date %s - %s \" % (self.amount_transfer, self.account_no, self.name, datetime.datetime.now(), self.description),\n }\n employee_deposite_id = self.env['employee.deposits'].sudo().create(deposite_vals)\n self.total_deposite_balance = search_by_id_employee_id.available_balance\n\n elif search_by_id_student_id:\n deposite_vals = {\n 'name': search_by_id_student_id.id,\n 'employee_id': search_by_id_student_id.gr_no,\n 'paid_amount': self.amount_transfer,\n 'date': datetime.datetime.now(),\n 'create_invoice': True,\n 'source': \"Transfer Amount of %s to account no %s (%s) on date %s - %s \" % (self.amount_transfer, self.account_no, self.name, datetime.datetime.now(), self.description),\n }\n student_deposite_id = self.env['student.deposits'].sudo().create(deposite_vals)\n self.total_deposite_balance = search_by_id_student_id.stud_balance_amount\n\n # return True\n compose_form = self.env.ref('deposite_management.transfer_confirmation_popup_view', False)\n\n try:\n template_id = self.env.ref('deposite_management.email_template_student_fund_transfer', False)\n except ValueError:\n template_id = False\n values = self.env['email.template'].generate_email(template_id.id, self.id)\n\n ## Append Student email id to send mail\n if values and 'email_to' in values:\n values['email_to'] = student_id.sudo().email\n mail_id = self.env['mail.mail'].sudo().create(values)\n if mail_id:\n mail_send_id = mail_id.send()\n\n try:\n template_id_new = self.env.ref('deposite_management.email_template_student_fund_transfer_self_notification', False)\n except ValueError:\n template_id_new = False\n values_new = self.env['email.template'].generate_email(template_id_new.id, self.id)\n ## Append email id to send mail\n if values_new and 'email_to' in values_new:\n if student_id and trans_student_id:\n values_new['email_to'] = trans_student_id.email\n elif employee_id:\n values_new['email_to'] = employee_id.sudo().work_email\n mail_id_new = self.env['mail.mail'].sudo().create(values_new)\n if mail_id_new:\n mail_send_id = mail_id_new.send()\n ## return wizard after click on Fund Transfer Button\n return {\n 'name': _('Fund Transfer Done'),\n 'type': 'ir.actions.act_window',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'fund.confirmation.msg',\n 'view_id': compose_form.id,\n 'target': 'new',\n }", "def done(self, cr, uid, ids, context=None):\n \n voucher_obj = self.pool.get('account.voucher')\n voucher_line_obj = self.pool.get('account.voucher.line')\n admin_affairs_model_obj = self.pool.get('admin.affairs.model')\n affairs_account_obj = self.pool.get('admin_affairs.account') \n model_id = admin_affairs_model_obj.search(cr, uid, [('model','=','environment.and.safety')], context=context)[0] \n affairs_account = affairs_account_obj.search(cr, uid, [('model_id','=',model_id)], context=context)\n if not affairs_account:\n raise osv.except_osv(_('Warning !'), _('Please insert account configuration For Environment and safety'))\n affairs_account_id = affairs_account[0]\n \n affairs_account_record = affairs_account_obj.browse(cr, uid, affairs_account_id,context=context) \n for record in self.browse(cr, uid, ids, context=context):\n if not record.allowances_lines_after :\n raise osv.except_osv(_('Partner Amount !'), _('Sorry no partner Amount After Rate To Transfer!'))\n notes = _(\"Enviroment and Safety allowances Contract: %s\")%(record.name)\n \n journal_id = affairs_account_record.journal_id\n analytic_id = affairs_account_record.analytic_id\n account_id = affairs_account_record.account_id\n\n\t\t# Creating Voucher / Ratitication\n voucher_id = voucher_obj.create(cr, uid, {\n 'amount': record.amount_total,\n 'type': 'ratification',\n 'date': time.strftime('%Y-%m-%d'),\n 'partner_id': record.partner_id.id,\n 'journal_id': journal_id and journal_id.id , \n 'state': 'draft',\n\t\t\t\t\t 'notes':record.notes,\n\t\t\t\t\t 'narration':notes ,\n \t 'company_id':record.company_id.id,\n })\n \t# Creating Voucher / Ratitication Lines\n for line in record.allowances_lines_after:\n '''account_id =line.category_id.account_id\n if not account_id:\n account_id = line.category_id.parent_id.account_id\n \n if not account_id:\n account_id = affairs_account_record.account_id \n\n if not account_id:\n raise osv.except_osv(_('Invalid action !'), _('Please insert Account configuration For Environment and safety Service')) ''' \n \n account_analytic_id =line.category_id.analytic_id\n if not account_analytic_id:\n account_analytic_id = line.category_id.parent_id.analytic_id \n \n if not account_analytic_id:\n account_analytic_id = affairs_account_record.analytic_id\n \n vocher_line_id = voucher_line_obj.create(cr, uid, {\n 'amount': record.amount_total,\n 'voucher_id': voucher_id,\n\t\t\t\t\t 'account_id':account_id and account_id.id,\n\t\t\t\t\t 'account_analytic_id':account_analytic_id and account_analytic_id.id ,\n 'type': 'dr',\n 'name':'environment and Safety allowances :' + record.name,\n })\n\t\t\n\t\t# Selecting Voucher Number / Refernece \n\n voucher_number = self.pool.get('account.voucher').browse(cr,uid,voucher_id)\n\n copy_attachments(self,cr,uid,[record.id],'services.contracts.archive',voucher_id,'account.voucher', context)\n self.write(cr, uid, ids, {'state':'done','transfer':True,'voucher_no':voucher_number.number}) \n return True", "def install(i):\n\n cm_kernel.print_for_con('***********************************************')\n cm_kernel.print_for_con('Installing code ...')\n\n # Check vars\n if 'target_os_uoa' not in i: return {'cm_return':1, 'cm_error':'\"target_os_uoa\" is not defined in \"code install\"'}\n\n # Create entry\n ii={'cm_run_module_uoa':ini['cm_module_uid'],\n 'cm_action':'update'}\n if 'install_data_uid' in i and i['install_data_uid']!='': \n ii['cm_data_uid']=i['install_data_uid']\n if 'install_data_alias' in i and i['install_data_alias']!='': \n ii['cm_data_uoa']=i['install_data_alias']\n if 'install_data_display_as_alias' in i: \n ii['cm_display_as_alias']=i['install_data_display_as_alias']\n if 'install_module_uoa' in i and i['install_module_uoa']!='':\n ii['cm_run_module_uoa']=i['install_module_uoa']\n if 'cm_array' in i and len(i['cm_array'])>0: ii['cm_array']=i['cm_array']\n if 'install_repo_uoa' in i and i['install_repo_uoa']!='': \n ii['cm_repo_uoa']=i['install_repo_uoa']\n r=cm_kernel.access(ii)\n if r['cm_return']>0: return r\n\n target_path=r['cm_path']\n target_uid=r['cm_uid']\n target_alias=r['cm_alias']\n\n # Prepare script\n rx=get_env({'cm_data_uoa':target_uid,\n 'os_uoa':i['target_os_uoa']})\n if rx['cm_return']>0: return rx\n\n script=rx['cm_string']\n\n ii={'script_name':script,\n 'skip_extension':'yes',\n 'target_os_uoa':i['target_os_uoa'],\n 'cm_path':target_path}\n if 'code_deps' in i and i.get('skip_code_deps','')!='yes':\n ii['code_deps']=i['code_deps']\n\n # Add remark about how code was built\n if 'add_rem_to_script' in i:\n run_commands_before=[]\n run_commands_before.append('')\n for x in i['add_rem_to_script']:\n run_commands_before.append(x)\n ii['run_commands_before']=run_commands_before\n\n rx=prepare_script(ii)\n if rx['cm_return']>0: return rx\n\n r['script_name']=rx['cm_path']\n r['script_filename']=script\n\n return r", "def test_install(self):\n pass", "def test_website_companies_install_additions(self):\n pass", "def _setup_account_general(insid, start_date, rate_dict, counterparty,\n prf_name, account_name, reinvest,\n funding_instype, external_id=None):\n calendar = acm.FCalendar['ZAR Johannesburg']\n next_bus_day = calendar.AdjustBankingDays(acm.Time.DateToday(), 1)\n day_after_start_date = calendar.AdjustBankingDays(start_date, 1)\n # Make sure that two conditions are met:\n # 1. End date doesn't lie in the past.\n # 2. Start date predates end date.\n end_date = max(next_bus_day, day_after_start_date)\n\n deposit = acm.FInstrument[insid]\n if deposit:\n LOGGER.info(\"The instrument {} already exists\".format(insid))\n if deposit.ExternalId1():\n LOGGER.info(\"Updating the external id from {} to {}\".format(\n deposit.ExternalId1(), external_id))\n deposit.ExternalId1(external_id)\n deposit.Commit()\n return None\n\n LOGGER.info('Creating %s...', insid)\n acm.BeginTransaction()\n try:\n # Instrument\n deposit = acm.FDeposit()\n deposit.Currency(CURRENCY)\n deposit.Name(insid)\n deposit.DayCountMethod(DAY_COUNT_METHOD)\n deposit.SpotBankingDaysOffset(0)\n # this sets the exp_time, which has a higher priority over exp_day,\n # which is set when calling re_rate(...) from ael. If the exp_time\n # is not set, acm (trading manager) uses the exp_day.\n # deposit.ExpiryDate(end_date)\n deposit.ContractSize(1)\n deposit.Quotation('Clean')\n deposit.QuoteType('Clean')\n deposit.OpenEnd('Open End')\n deposit.MinimumPiece(MINIMUM_PIECE)\n deposit.PayOffsetMethod('Business Days')\n if external_id:\n deposit.ExternalId1(external_id)\n\n # Leg\n leg = deposit.CreateLeg(1)\n leg.LegType('Call Fixed Adjustable')\n leg.Decimals(11)\n leg.StartDate(start_date)\n leg.EndDate(end_date)\n leg.EndPeriodUnit('Days')\n leg.DayCountMethod(DAY_COUNT_METHOD)\n if rate_dict['type'] == 'fixed':\n leg.FixedRate(rate_dict['rate'])\n leg.ResetDayOffset(0)\n leg.ResetType('Weighted')\n leg.ResetPeriod('1d')\n leg.ResetDayMethod('Following')\n leg.Currency(CURRENCY)\n leg.NominalFactor(1)\n leg.Rounding('Normal')\n leg.RollingPeriod('1m')\n leg.RollingPeriodBase(acm.Time.FirstDayOfMonth(acm.Time.DateAddDelta(\n start_date, 0, 1, 0)))\n leg.PayDayMethod('Following')\n leg.PayCalendar(calendar)\n leg.FixedCoupon(True)\n leg.NominalAtEnd(True)\n leg.FloatRateFactor(1)\n leg.FixedCoupon(True)\n leg.StartPeriod('-1d')\n leg.Reinvest(reinvest)\n if rate_dict['type'] == 'float':\n deposit.AddInfoValue('CallFloatRef', rate_dict['ref'])\n deposit.AddInfoValue('CallFloatSpread', rate_dict['spread'])\n deposit.Commit() # Commits both the instrument and the leg.\n\n # Trade\n trade = acm.FTrade()\n trade.Instrument(deposit)\n trade.Counterparty(counterparty)\n trade.Acquirer('PRIME SERVICES DESK')\n trade.AcquireDay(start_date)\n trade.ValueDay(start_date)\n trade.Quantity(1)\n trade.TradeTime(start_date)\n trade.Currency(CURRENCY)\n trade.Price(0)\n trade.Portfolio(acm.FPhysicalPortfolio[prf_name])\n trade.Type('Normal')\n trade.TradeTime(start_date)\n trade.Status('Simulated') # To allow for delete in case of rollback.\n trade.AddInfoValue('Funding Instype', funding_instype)\n trade.AddInfoValue('Call_Region', 'BB SANDTON')\n trade.AddInfoValue('Account_Name', account_name)\n trade.Commit()\n \n acm.CommitTransaction()\n except Exception as e:\n acm.AbortTransaction()\n LOGGER.exception(\"Could not create call/loan account {}\".format(insid))\n raise e\n\n deposit = acm.FInstrument[insid]\n if deposit:\n trades = deposit.Trades()\n if trades:\n LOGGER.info('The following trade has been created:{}\\n'.format(trades[0].Oid()))\n else:\n raise RuntimeError('Could not create trade!')\n else:\n raise RuntimeError('Could not create deposit!')", "def perform_transaction_install(target_userspace_info, storage_info, used_repos, tasks, plugin_info, xfs_info):\n\n stage = 'upgrade'\n\n # These bind mounts are performed by systemd-nspawn --bind parameters\n bind_mounts = [\n '/:/installroot',\n '/dev:/installroot/dev',\n '/proc:/installroot/proc',\n '/run/udev:/installroot/run/udev',\n ]\n\n if get_target_major_version() == '8':\n bind_mounts.append('/sys:/installroot/sys')\n else:\n # the target major version is RHEL 9+\n # we are bindmounting host's \"/sys\" to the intermediate \"/hostsys\"\n # in the upgrade initramdisk to avoid cgroups tree layout clash\n bind_mounts.append('/hostsys:/installroot/sys')\n\n already_mounted = {entry.split(':')[0] for entry in bind_mounts}\n for entry in storage_info.fstab:\n mp = entry.fs_file\n if not os.path.isdir(mp):\n continue\n if mp not in already_mounted:\n bind_mounts.append('{}:{}'.format(mp, os.path.join('/installroot', mp.lstrip('/'))))\n\n if os.path.ismount('/boot'):\n bind_mounts.append('/boot:/installroot/boot')\n\n if os.path.ismount('/boot/efi'):\n bind_mounts.append('/boot/efi:/installroot/boot/efi')\n\n with _prepare_transaction(used_repos=used_repos,\n target_userspace_info=target_userspace_info,\n binds=bind_mounts\n ) as (context, target_repoids, _unused):\n # the below nsenter command is important as we need to enter sysvipc namespace on the host so we can\n # communicate with udev\n cmd_prefix = ['nsenter', '--ipc=/installroot/proc/1/ns/ipc']\n\n disable_plugins = []\n if plugin_info:\n for info in plugin_info:\n if stage in info.disable_in:\n disable_plugins += [info.name]\n\n # we have to ensure the leapp packages will stay untouched\n # Note: this is the most probably duplicate action - it should be already\n # set like that, however seatbelt is a good thing.\n dnfconfig.exclude_leapp_rpms(context, disable_plugins)\n\n if get_target_major_version() == '9':\n _rebuild_rpm_db(context, root='/installroot')\n _transaction(\n context=context, stage='upgrade', target_repoids=target_repoids, plugin_info=plugin_info,\n xfs_info=xfs_info, tasks=tasks, cmd_prefix=cmd_prefix\n )\n\n # we have to ensure the leapp packages will stay untouched even after the\n # upgrade is fully finished (it cannot be done before the upgrade\n # on the host as the config-manager plugin is available since rhel-8)\n dnfconfig.exclude_leapp_rpms(mounting.NotIsolatedActions(base_dir='/'), disable_plugins=disable_plugins)", "def install():\n execute(generate)\n execute(upload)", "def install(self,name,destFiles,data_sizeCrcDate,progress=None):\n pass", "def payment(self, **post):\n cr, uid, context = request.cr, request.uid, request.context\n payment_obj = request.registry.get('payment.acquirer')\n sale_order_obj = request.registry.get('sale.order')\n\n order = request.website.sale_get_order(context=context)\n order.write({'usersess': request.session['webcalc_session_id']})\n #order.env.cr.commit()\n redirection = self.checkout_redirection(order)\n if redirection:\n return redirection\n\n shipping_partner_id = False\n if order:\n if order.partner_shipping_id.id:\n shipping_partner_id = order.partner_shipping_id.id\n else:\n shipping_partner_id = order.partner_invoice_id.id\n\n values = {\n 'order': request.registry['sale.order'].browse(cr, SUPERUSER_ID, order.id, context=context),\n 'usersess': request.session['webcalc_session_id']\n }\n values['errors'] = sale_order_obj._get_errors(cr, uid, order, context=context)\n values.update(sale_order_obj._get_website_data(cr, uid, order, context))\n\n if not values['errors']:\n acquirer_ids = payment_obj.search(cr, SUPERUSER_ID, [('website_published', '=', True), ('company_id', '=', order.company_id.id)], context=context)\n values['acquirers'] = list(payment_obj.browse(cr, uid, acquirer_ids, context=context))\n render_ctx = dict(context, submit_class='btn btn-primary', submit_txt=_('Завершить оформление'))\n for acquirer in values['acquirers']:\n acquirer.button = payment_obj.render(\n cr, SUPERUSER_ID, acquirer.id,\n '/',\n order.amount_total,\n order.pricelist_id.currency_id.id,\n partner_id=shipping_partner_id,\n tx_values={\n 'return_url': '/shop/payment/validate',\n },\n context=render_ctx)\n #vips_shop\n return request.website.render(\"vips_shop.payment\", values)" ]
[ "0.6092755", "0.5978062", "0.59750295", "0.5958811", "0.593691", "0.5921626", "0.5819296", "0.57570124", "0.57570124", "0.5737443", "0.5695904", "0.56551325", "0.5596055", "0.55873203", "0.55854464", "0.5569766", "0.55693704", "0.5526181", "0.55224955", "0.5478638", "0.5468129", "0.5449577", "0.5419112", "0.53857744", "0.53762865", "0.5375288", "0.53656715", "0.53539383", "0.53403085", "0.53391314" ]
0.7017036
0
Flush the serial IO.
def flush(self): if self.serial: self.serial.flush()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def flush(self):\n self.ser.flushInput()", "def close_serial(self):\n if(self.serial):\n self.serial.flush()\n self.serial.close()\n self.serial = False", "def flush(self):\n self.out.flush()", "def flush(self):\n timeout = self.serial.timeout\n try:\n self.serial.timeout = 0.1\n while True:\n c = self.serial.read()\n if not c:\n return\n finally:\n self.serial.timeout = timeout", "def flush(self):\n self.f.flush()", "def _Flush(self):\n self._ignore_width = False\n if self._fill:\n self._out.write('\\n')\n self._blank = False\n self._fill = 0", "def flush(self):\n self._stream.flush()", "def flush(self):\n self.write(self.ASCII_FF)", "def flush(self):\n self._write()", "def flush(self):\n if self.out is not None:\n self.out.flush()", "def flush(self):\n if self.out is not None:\n self.out.flush()", "def flush(self):\n self.fileobj.flush()", "def flush(self):\n self.out_file.flush()", "def _flush(self):\n pass", "def _write_terminator(self,):\n #It is critical that this function blocks and write the outstanding\n #records to the disc, as without that the whole protocol comes unstuck\n\n self.fd.write(b\"\\0\")\n self.fd.flush()\n self.fd.seek(-1,1)\n fdatasync(self.fd.fileno())", "def flush(self):\n if self._writable:\n with self._seek_lock:\n self._flush_raw_or_buffered()\n self._write_buffer = bytearray(self._buffer_size)\n self._buffer_seek = 0", "def close(self) -> None:\r\n\r\n # Close the serial port\r\n if self._serial_handle.read():\r\n self._serial_handle.flushInput()\r\n self._serial_handle.flushOutput()\r\n self._serial_handle.close()", "def _flush(self):", "def _flush(self):\r\n\r\n fh = open(self.filename, \"a\")\r\n fh.write(\"\".join(self.buffer))\r\n fh.close()\r\n\r\n self.buffer = []", "def flush(self) -> None:\r\n if self.file is not None:\r\n self.file.flush()\r\n\r\n self.stdout.flush()", "def flush(self):\n self._send()", "def close(self):\n self.flush()", "def flush(self):\n if self.index < self.bufsize:\n self.writer(\n self.linesep.join(self.read1_batch[0:self.index]),\n self.linesep.join(self.read2_batch[0:self.index]))\n else:\n self.writer(\n self.linesep.join(self.read1_batch),\n self.linesep.join(self.read2_batch))\n self.writer(self.linesep, self.linesep)\n self.index = 0", "def flush(self, mode=None):\r\n pass", "def flush(self):\n pass", "def flush(self):\n pass", "def flush(self):\n pass", "def flush(self):\n pass", "def flush(self):\r\n # this flush method is needed for python 3 compatibility.\r\n # this handles the flush command by doing nothing.\r\n pass", "def flush(self, data):" ]
[ "0.7911663", "0.7459787", "0.7403486", "0.7286804", "0.7205834", "0.7152669", "0.713177", "0.71284574", "0.71226877", "0.7080503", "0.7080503", "0.7020482", "0.70039046", "0.685848", "0.6739498", "0.669237", "0.66759765", "0.66632384", "0.66274196", "0.66020226", "0.6602012", "0.6563315", "0.65295994", "0.65203214", "0.6520264", "0.6520264", "0.6520264", "0.6520264", "0.6503941", "0.64948595" ]
0.8194595
0
Return True if connected to serial port.
def connected(self): return bool(self.serial)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_connected(self):\n return self.serial_connection.isOpen()", "def Connected(self):\r\n return self.Port.is_open", "def connected(self):\n return self.port.is_open", "def connect(self):\n try:\n self.ser = Serial(\n self.comport, \n self.baudrate,\n timeout=self.timeout)\n assert self.ser.isOpen(), \"Port is not open.\"\n return True\n except AssertionError as e1:\n print(\"Error:\", e1)\n return False\n except SerialException as e2:\n print(\"SerialException:\", e2)\n return False", "def is_connected(self):\n return self._port.is_connected()", "def is_connected(self):\n try:\n if PY3:\n self.__gen_serial_id()\n cmnd = \"#{} {}\".format(self.serial_id, protocol.GET_FIRMWARE_VERSION)\n cmndString = bytes(cmnd + \"\\n\", encoding='ascii')\n self.__serial.write(cmndString)\n response = str(self.__serial.readline(),encoding='ascii')\n else:\n self.__gen_serial_id()\n cmnd = \"#{} {}\".format(self.serial_id, protocol.GET_FIRMWARE_VERSION)\n cmndString = bytes(cmnd + \"\\n\")\n self.__serial.write(cmndString)\n response = self.__serial.readline()\n except serial.serialutil.SerialException:\n self.__isConnected = False\n if self.__serial.isOpen() and self.__isConnected:\n return True\n else:\n return False", "def detect():\n try:\n s = serial.Serial(port = 0, baudrate = 19200, parity = 'O', timeout=1)\n except Exception, e:\n log = logging.getLogger('root')\n log.exception(e)\n return False\n else:\n return True\n finally:\n s.close()", "def connect(self):\n\n # in Windows even if the device is detected it\n # may be not ready to be opened yet\n while not self.serial.is_open:\n try:\n self.serial.open()\n except SerialException:\n pass\n \n return True", "def check_port(self):\r\n\t\treturn(self.connect.is_open)", "def serial_ok(self) -> bool:\r\n return self.ser is not None", "def is_open(self):\n return self.__port.isOpen()", "def is_connected(self):\n if self._socket:\n return True\n else:\n return False", "def is_connected(self) -> bool:\n return self.arduino is not None", "def getIsConnected(self):\n if self._socket == None:\n return False\n\n # Assume we are still connected. TODO: Do a test receive?\n return True", "def is_connected(self):\n\t\treturn bool(call_sdk_function('PrlVmDev_IsConnected', self.handle))", "def is_connected_to_device(self):\n is_connected_to_device = ctypes.c_bool()\n\n result = self._lib.NRFJPROG_is_connected_to_device(ctypes.byref(is_connected_to_device))\n if result != NrfjprogdllErr.SUCCESS:\n raise APIError(result)\n\n return is_connected_to_device.value", "def connected(self):\n return self.opened() and bool(self._dll.JLINKARM_EMU_IsConnected())", "def is_connected(self):\n return self._connection and self._connection.is_open", "def is_connected(self):\n\t\tif self._connection is None:\n\t\t\treturn False\n\n\t\treturn True", "def available(self):\n if self._ser is not None:\n return self._ser.dtr\n else:\n return True", "def is_connected(self):\n return self._current_protocol is not None", "def is_connected(self):\n\t\treturn call_sdk_function('PrlSrv_IsConnected', self.handle)", "def isConnected(self):\n\n return self._connection is not None", "def available(self) -> bool:\n return self._device.is_connected", "def is_connected(self):\n return self._socket is not None", "def is_connected(self) -> bool:\n\n return self.send(self.cmd.GET_SYSTEMLINE) == self.cmd.DEFAULT_SYSTEM_LINE", "def connect_to_device(self):\n \n try:\n # Recreate the serial interface\n self.interface = serial.Serial(self.address,\n self.baudrate,\n timeout=0.1,\n xonxoff=0, \n rtscts=0,\n interCharTimeout=None)\n \n # Test that data can be read from connection\n field_string = self.interface.readline().decode(\"utf-8\")\n self.connected = True\n \n except Exception as err:\n print(\"No connection established to magnetometer | {}\".format(err))\n self.connected = False\n \n return self.connected", "def isConnected(self):\n return self.transport is not None and self.started", "def is_connected(self):\n output = self.run_commands(['q'])\n return output.find('Info: Found {0}'.format(self._connected)) != -1", "def connected(self):\n\n if self._connection:\n if self._connection.is_closed == True:\n return False\n else:\n return True\n else:\n return False" ]
[ "0.83128446", "0.79970634", "0.78823143", "0.78717333", "0.7867433", "0.77403134", "0.7522227", "0.7484234", "0.74747556", "0.7370798", "0.73271185", "0.7299317", "0.72478753", "0.7147081", "0.71386373", "0.70849586", "0.7061378", "0.6995808", "0.6990348", "0.6989641", "0.69825673", "0.6964839", "0.6918633", "0.69026834", "0.6869545", "0.6862416", "0.68613005", "0.68591934", "0.6850078", "0.6848983" ]
0.8235554
1
Test case for add_category_to_asset
def test_add_category_to_asset(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_remove_category_from_asset(self):\n pass", "def test_add_category(self):\n self.add_success(self.test_data['pants'])", "def test_create_category(self):\n pass", "def test_already_added_asset(self):\n # assign the asset to the story\n self.story.assets.add(self.asset)\n self.story.save()\n # confirm the asset is added to the story\n self.assertTrue(self.asset in self.story.assets.select_subclasses())\n # Assign the asset to the section\n container = Container.objects.get(name='left')\n section_asset = SectionAsset(section=self.section, asset=self.asset, container=container)\n section_asset.save()\n # Confirm the asset is in the section's list\n self.assertTrue(self.asset in self.section.assets.select_subclasses())\n # Confirm that the asset is in the story's list\n self.assertTrue(self.asset in self.story.assets.select_subclasses())", "def test_update_category(self):\n pass", "def test_add_category(self):\n self.signup('Bo', 'Theo', '[email protected]', 'Bo1995', 'Bo1995')\n self.login('[email protected]', 'Bo1995')\n self.dashboard()\n rv = self.category('Breakfast')\n self.assertIn(b'Category created', rv.data)", "def test_add_category_success(self):\n category = json.dumps({\n 'name': 'Asian',\n })\n response = self.client.post('/category', data=category,\n headers={\"Authorization\": self.token})\n self.assertEqual(response.status_code, 201)\n self.assertIn('asian', response.data.decode())", "def test_already_added_asset(self):\n # Create a story\n title = ('Transportation Challenges Limit Education Choices for '\n 'Denver Parents')\n summary = \"\"\"\n Many families in the Denver metro area use public\n transportation instead of a school bus because for them, a\n quality education is worth hours of daily commuting. Colorado's\n school choice program is meant to foster educational equity,\n but the families who benefit most are those who have time and\n money to travel. Low-income families are often left in a lurch.\n \"\"\"\n byline = \"Mile High Connects\"\n story = create_story(title=title, summary=summary, byline=byline)\n # create a HtmlAsset\n asset = HtmlAsset()\n asset.save()\n translation = HtmlAssetTranslation(title='Test Asset', asset=asset)\n translation.save()\n # assign the asset to the story\n story.assets.add(asset)\n story.save()\n # confirm the asset is added to the story\n self.assertTrue(asset in story.assets.select_subclasses())\n # create a Section\n section = create_section(title=\"Test Section 1\", story=story)\n # Assign the asset to the section\n section_asset = SectionAsset(section=section, asset=asset, weight=0)\n section_asset.save()\n # Confirm the asset is in the section's list\n self.assertTrue(asset in section.assets.select_subclasses())\n # Confirm that the asset is in the story's list\n self.assertTrue(asset in story.assets.select_subclasses())", "def test_create_category(self):\n res = self.client().post('/categories/', data=self.category)\n self.assertEqual(res.status_code, 201)\n self.assertIn('Stews', str(res.data))", "def test_0005_create_categories(self):\n self.create_category(name='Test 0060 Workflow Features', description='Test 0060 - Workflow Features')", "def test_category_manip_pipeline(self):\n raise NotImplementedError(\"\")", "def test_add_asset_type_assignment_rule(self):\n pass", "def test_add_new_asset(self):\n self.assertEqual(self.all_assets.count(), 1)\n new_asset = Asset(asset_code=\"IC002\",\n serial_number=\"SN0045\",\n model_number=self.test_assetmodel,\n assigned_to=self.user)\n new_asset.save()\n self.assertEqual(self.all_assets.count(), 2)", "def test_category_mixed(self):\n self.go200('minus_upload')\n self.formfile('minus_upload', 'file', AUDIO_FILE)\n self.fv('minus_upload', 'categories', 'onecat')\n self.fv('minus_upload', 'add_category', 'yuppie')\n self.submit200()\n minus = MinusRecord.objects.all()[0]\n self.assert_equal(minus.categories.count(), 2)\n self.assert_equal(minus.categories.all()[0].name, 'onecat')\n self.assert_equal(minus.categories.all()[1].name, 'yuppie')", "def test_delete_category(self):\n pass", "def test_auto_add_assets_to_story(self):\n # Confirm that the story has no assets\n self.assertEqual(self.story.assets.count(), 0)\n # Assign the asset to the section\n container = Container.objects.get(name='left')\n section_asset = SectionAsset(section=self.section, asset=self.asset, container=container)\n section_asset.save()\n # Confirm the asset is in the section's list\n self.assertTrue(self.asset in self.section.assets.select_subclasses())\n # Confirm that the asset is in the story's list\n self.assertTrue(self.asset in self.story.assets.select_subclasses())", "def test_up_assign_categories(self):\n self.go200('minus_upload')\n self.formfile('minus_upload', 'file', AUDIO_FILE)\n self.fv('minus_upload', 'categories', 'onecat')\n self.submit200()\n minus = MinusRecord.objects.all()[0]\n self.url('minus_detail', [minus.author, minus.id])\n self.assert_equal(minus.categories.count(), 1)\n self.assert_equal(minus.categories.all()[0].name, 'onecat')", "def test_create_recipe_category(self):\n self.signup('Bo', 'Theo', '[email protected]', 'Bo1995', 'Bo1995')\n self.login('[email protected]', 'Bo1995')\n self.dashboard()\n self.category('JunkFood')\n self.dashboard()\n self.recipe_dashboard()\n rv = self.create_recipe('cakes', 'blah, blah, blah....mix ingredient, heat')\n self.assertIn(b'Recipe created', rv.data)", "def test_category_addition(self):\n login = self.autheniticate()\n token = json.loads(login.data.decode()).get('token')\n res = self.app.post(category_url,\n data=json.dumps(self.data),\n headers=dict(Authorization=\"Bearer \" + token),\n content_type='application/json')\n res1 = json.loads(res.data.decode())\n self.assertEqual(res1['status'], 'Success!')\n self.assertEqual(res.status_code, 201)", "def add_category(self, category):\n raise NotImplementedError()", "def test_dashboard_recipe_created_with_category(self):\n self.signup('Bo', 'Theo', '[email protected]', 'Bo1995', 'Bo1995')\n self.login('[email protected]', 'Bo1995')\n self.dashboard()\n self.category('JunkFood')\n self.dashboard()\n rv = self.recipe_dashboard()\n self.assertIn(b'JunkFood', rv.data)", "def test_add_assets_signal(self):\n story = create_story(title=\"Test Story\", summary=\"Test Summary\",\n byline=\"Test Byline\", status='published')\n asset = create_html_asset(type='text', title='Test Asset', \n body='Test content')\n self.assertEqual(story.assets.count(), 0)\n story.featured_assets.add(asset)\n story.save()\n self.assertEqual(story.assets.count(), 1)", "def test_add_category_to_product(self):\n login = self.autheniticate()\n token = json.loads(login.data.decode()).get('token')\n self.app.post(product_url,\n data=json.dumps(self.product_data),\n headers=dict(Authorization=\"Bearer \" + token),\n content_type='application/json')\n self.app.post(category_url,\n data=json.dumps(self.data),\n headers=dict(Authorization=\"Bearer \" + token),\n content_type='application/json')\n res = self.app.post(productcategory_url,\n headers=dict(Authorization=\"Bearer \" + token),\n content_type='application/json')\n res1 = json.loads(res.data.decode())\n self.assertEqual(res1['status'], 'Updated!')\n self.assertEqual(res.status_code, 200)", "def test_add_category_existing_name(self):\n category = json.dumps({\n 'name': 'Asian',\n })\n self.client.post('/category', data=category,\n headers={\"Authorization\": self.token})\n response = self.client.post('/category', data=category,\n headers={\"Authorization\": self.token})\n self.assertEqual(response.status_code, 409)\n self.assertIn('Name Asian exists', response.data.decode())", "def test_category_mixed_on_edit(self):\n self.go200('minus_upload')\n self.formfile('minus_upload', 'file', AUDIO_FILE)\n self.fv('minus_upload', 'categories', 'onecat')\n self.submit200()\n minus = MinusRecord.objects.all()[0]\n self.assert_equal(minus.categories.count(), 1)\n self.go200('minus_edit', [self.superuser, minus.id])\n self.fv('minus_upload', 'add_category', 'yuppie')\n self.submit200()\n self.assert_equal(minus.categories.count(), 2)\n self.assert_equal(minus.categories.all()[0].name, 'onecat')\n self.assert_equal(minus.categories.all()[1].name, 'yuppie')", "def test_create_system_asset(self):\n pass", "def test_add_same_category(self):\n response = self.client.post('/api/v1/categories',\n data=json.dumps(category[0]),\n content_type='application/json',\n headers=self.admin_headers)\n self.assertEqual(response.status_code, 409)\n self.assertIn('category with name already exist',\n str(response.data))", "def test_add_asset_share_feed(self):\n pass", "def test_add_child_category(self):\n self.add_success(self.test_data['pants'])\n rv = self.get('pants')\n assert in_response(rv, 'This category is empty.')\n self.add_success(self.test_data['jeans'])\n rv = self.get('pants')\n assert not in_response(rv, 'This category is empty.')\n assert in_response(rv, 'Jeans')", "def test_category_url(self):\n data = self.data1\n # response = self.client.post(\n # reverse('recipe:category_list', args=[data.slug]))\n # self.assertEqual(response.status_code, 200)\n self.assertTrue(isinstance(data, Category))" ]
[ "0.81523687", "0.764425", "0.7570311", "0.6889236", "0.6887647", "0.6819343", "0.67514527", "0.6708014", "0.6673375", "0.6647989", "0.65606", "0.64768696", "0.647213", "0.6441778", "0.642443", "0.642372", "0.63488626", "0.63487214", "0.63214856", "0.6315992", "0.631255", "0.6299389", "0.62952477", "0.6287453", "0.6250629", "0.6197039", "0.6187445", "0.61579037", "0.6152437", "0.6144456" ]
0.95660377
0
Test case for add_network
def test_add_network(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_register_network(self):\n pass", "def test_create_network():\n _network = Network()", "def test_networking_project_network_create(self):\n pass", "def test_get_network(self):\n pass", "def test_add_node(num_mutations):\n net = WeightAgnosticNetwork(10, 2, 0.5)\n for _ in range(num_mutations):\n net.mutate()\n\n num_connections_pre, num_neurons_pre, num_layers_pre = get_network_stats(net)\n net.add_node()\n assert net.get_num_connections() == num_connections_pre + 1\n assert net.num_neurons == num_neurons_pre + 1\n assert len(net.neurons_in_layer) == num_layers_pre or len(\n net.neurons_in_layer) == num_layers_pre + 1", "def test_get_networks(self):\n pass", "def test_create_cluster_network(self):\n pass", "def test_create_network_and_subnet(self):\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 254\n self.__create_network_and_subnet_test_helper__(network_name, network_cidr)", "def test_api_use_royal_network_post(self):\n body = Network()\n response = self.client.open(\n '/api/use/royal-network/',\n method='POST',\n data=json.dumps(body),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_03_network_create(self):\n # Validate the following\n # 1. Create a project.\n # 2. Add virtual/direct network resource to the project. User shared\n # network resource for the project\n # 3. Verify any number of Project level Virtual/Direct networks can be\n # created and used for vm deployment within the project.\n # 4. Verify shared networks (zone and domain wide) from outside the\n # project can also be used in a project.\n\n # Create project as a domain admin\n project = Project.create(\n self.apiclient,\n self.services[\"project\"],\n account=self.account.name,\n domainid=self.account.domainid\n )\n # Cleanup created project at end of test\n self.cleanup.append(project)\n self.debug(\"Created project with domain admin with ID: %s\" %\n project.id)\n\n network_offerings = list_network_offerings(\n self.apiclient,\n projectid=project.id,\n supportedServices='SourceNat',\n type='isolated',\n state='Enabled'\n )\n self.assertEqual(\n isinstance(network_offerings, list),\n True,\n \"Check for the valid network offerings\"\n )\n network_offering = network_offerings[0]\n\n self.debug(\"creating a network with network offering ID: %s\" %\n network_offering.id)\n self.services[\"network\"][\"zoneid\"] = self.zone.id\n network = Network.create(\n self.apiclient,\n self.services[\"network\"],\n networkofferingid=network_offering.id,\n projectid=project.id\n )\n self.debug(\"Created network with ID: %s\" % network.id)\n networks = Network.list(\n self.apiclient,\n projectid=project.id,\n listall=True\n )\n self.assertEqual(\n isinstance(networks, list),\n True,\n \"Check for the valid network list response\"\n )\n\n self.debug(\"Deploying VM with network: %s\" % network.id)\n\n virtual_machine = VirtualMachine.create(\n self.apiclient,\n self.services[\"server\"],\n templateid=self.template.id,\n networkids=[str(network.id)],\n serviceofferingid=self.service_offering.id,\n projectid=project.id\n )\n self.debug(\"Deployed VM with ID: %s\" % virtual_machine.id)\n # Verify VM state\n self.assertEqual(\n virtual_machine.state,\n 'Running',\n \"Check VM state is Running or not\"\n )\n\n network_offerings = list_network_offerings(\n self.apiclient,\n state='Enabled',\n guestiptype='Shared',\n name='DefaultSharedNetworkOffering',\n displaytext='Offering for Shared networks'\n )\n self.assertEqual(\n isinstance(network_offerings, list),\n True,\n \"Check for the valid network offerings\"\n )\n network_offering = network_offerings[0]\n\n self.debug(\"creating a shared network in domain: %s\" %\n self.domain.id)\n\n # Getting physical network and free vlan in it\n physical_network, vlan = get_free_vlan(self.apiclient, self.zone.id)\n\n self.services[\"domain_network\"][\"vlan\"] = vlan\n self.services[\"domain_network\"][\"physicalnetworkid\"] = physical_network.id\n\n # Generating random subnet number for shared network creation\n shared_network_subnet_number = random.randrange(1,254)\n\n self.services[\"domain_network\"][\"gateway\"] = \"172.16.\"+str(shared_network_subnet_number)+\".1\"\n self.services[\"domain_network\"][\"startip\"] = \"172.16.\"+str(shared_network_subnet_number)+\".2\"\n self.services[\"domain_network\"][\"endip\"] = \"172.16.\"+str(shared_network_subnet_number)+\".20\"\n\n domain_network = Network.create(\n self.apiclient,\n self.services[\"domain_network\"],\n domainid=self.domain.id,\n networkofferingid=network_offering.id,\n zoneid=self.zone.id\n )\n self.cleanup.append(domain_network)\n self.debug(\"Created network with ID: %s\" % domain_network.id)\n\n virtual_machine = VirtualMachine.create(\n self.apiclient,\n self.services[\"server\"],\n templateid=self.template.id,\n networkids=[str(domain_network.id)],\n serviceofferingid=self.service_offering.id,\n projectid=project.id\n )\n self.debug(\"Deployed VM with ID: %s\" % virtual_machine.id)\n # Verify VM state\n self.assertEqual(\n virtual_machine.state,\n 'Running',\n \"Check VM state is Running or not\"\n )\n\n # Delete VM before network gets deleted in cleanup\n virtual_machine.delete(self.apiclient, expunge=True)\n return", "def test_networking_project_network_tag_create(self):\n pass", "def test_api_use_web_network_post(self):\n body = Network()\n response = self.client.open(\n '/api/use/web-network/',\n method='POST',\n data=json.dumps(body),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_add_connection(num_mutations):\n net = WeightAgnosticNetwork(10, 2, 0.5)\n for _ in range(num_mutations):\n net.mutate()\n\n # Raw net without hidden layers.\n num_connections_pre, num_neurons_pre, num_layers_pre = get_network_stats(net)\n net.add_connection()\n assert net.get_num_connections() == num_connections_pre + 1\n assert net.num_neurons == num_neurons_pre\n assert len(net.neurons_in_layer) == num_layers_pre", "def addNetwork(self, session: Session, network: Network) -> int:\n try:\n return NetworkManager().addNetwork(session, network)\n except TortugaException as ex:\n raise\n except Exception as ex:\n self._logger.exception(str(ex))\n raise TortugaException(exception=ex)", "def add_network(self, network, distance):\n self.networks[network] = distance", "def test_delete_network(self):\n pass", "def test_api_use_virtual_network_post(self):\n body = Network()\n response = self.client.open(\n '/api/use/virtual-network/',\n method='POST',\n data=json.dumps(body),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_add_00():\n\n a_shape = (1, 1, 3, 4)\n b_shape = (1, 2, 3, 1)\n out_shape = (1, 2, 3, 4)\n\n class AddTester(Base):\n def create_onnx(self) -> onnx.ModelProto:\n node = make_node(\"Add\", inputs=[\"A\", \"B\"], outputs=[\"C\"])\n inputs = [\n info(\"A\", TensorProto.FLOAT, a_shape),\n info(\"B\", TensorProto.FLOAT, b_shape),\n ]\n outputs = [info(\"C\", TensorProto.FLOAT, out_shape)]\n\n graph = make_graph([node], \"add_graph\", inputs, outputs)\n model = make_model(graph)\n return model\n\n a = np.random.rand(*a_shape).astype(np.float32)\n b = np.random.rand(*b_shape).astype(np.float32)\n\n outputs = [\"C\"]\n AddTester({\"A\": a, \"B\": b}, outputs).run()", "def test_networking_project_network_update(self):\n pass", "def test_create_network(self):\n network = vertigo.create_network(\"test\")\n self.assert_equals(\"test\", network.address)\n network.address = \"foo\"\n self.assert_equals(\"foo\", network.address)\n network.enable_acking()\n self.assert_true(network.acking_enabled())\n network.disable_acking()\n self.assert_false(network.acking_enabled())\n network.num_ackers = 10\n self.assert_equals(10, network.num_ackers)\n network.ack_expire = 50000\n self.assert_equals(50000, network.ack_expire)\n component = network.from_verticle('test_feeder_verticle', main='test_feeder_verticle.py')\n self.assert_equals('test_feeder_verticle', component.name)\n self.assert_equals('test_feeder_verticle.py', component.main)\n component.workers = 4\n self.assert_equals(4, component.workers)\n component2 = component.to_verticle('test_worker_verticle')\n component2.main = 'test_worker_verticle.py'\n self.assert_equals('test_worker_verticle.py', component2.main)\n self.complete()", "def _build_network(self):\n pass", "def test_create_router_no_external_network_and_add_network_port(self):\n # Create Router\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n router_name = TEST_ROUTER_PREFIX + \"_ports_\" + suffix\n router_id = self.__create_router_test_helper__(router_name)\n\n # Create Network with only one subnet\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 253\n network_id, subnet_id = self.__create_network_and_subnet_test_helper__(network_name, network_cidr)\n\n port_id = self.neutron_operations.add_interface_router(router_id, subnet_id)\n self.test_world['ports'].append(port_id)", "def test_networking_project_network_get(self):\n pass", "def test_add_01():\n\n a_shape = (1, 2, 3, 4)\n b_shape = (1, 1, 3, 1)\n out_shape = (1, 2, 3, 4)\n\n class AddTester(Base):\n def create_onnx(self) -> onnx.ModelProto:\n node = make_node(\"Add\", inputs=[\"A\", \"B\"], outputs=[\"C\"])\n inputs = [info(\"A\", TensorProto.FLOAT, a_shape)]\n outputs = [info(\"C\", TensorProto.FLOAT, out_shape)]\n\n B = np.random.rand(*b_shape).astype(np.float32)\n\n b_init = from_array(B, \"B\")\n graph = make_graph([node], \"add_graph\", inputs, outputs, initializer=[b_init])\n model = make_model(graph)\n return model\n\n a = np.random.rand(*a_shape).astype(np.float32)\n\n outputs = [\"C\"]\n AddTester({\"A\": a}, outputs).run()", "def add_network(self, router, network, distance):\n self.routers[router].add_network(network, distance)", "def create_network(self, body=None):\r\n return self.post(self.networks_path, body=body)", "def test_add_02():\n\n a_shape = (1, 1, 3, 4)\n b_shape = (1, 2, 3, 1)\n out_shape = (1, 2, 3, 4)\n\n class AddTester(Base):\n def create_onnx(self) -> onnx.ModelProto:\n node = make_node(\"Add\", inputs=[\"A\", \"B\"], outputs=[\"C\"])\n inputs = [info(\"A\", TensorProto.FLOAT, a_shape)]\n outputs = [info(\"C\", TensorProto.FLOAT, out_shape)]\n\n B = np.random.rand(*b_shape).astype(np.float32)\n\n b_init = from_array(B, \"B\")\n graph = make_graph([node], \"add_graph\", inputs, outputs, initializer=[b_init])\n model = make_model(graph)\n return model\n\n a = np.random.rand(*a_shape).astype(np.float32)\n\n outputs = [\"C\"]\n AddTester({\"A\": a}, outputs).run()", "def test_add_03():\n\n a_shape = (1, 2, 3, 4)\n b_shape = (3, 4)\n out_shape = (1, 2, 3, 4)\n\n class AddTester(Base):\n def create_onnx(self) -> onnx.ModelProto:\n node = make_node(\"Add\", inputs=[\"A\", \"B\"], outputs=[\"C\"])\n inputs = [info(\"A\", TensorProto.FLOAT, a_shape)]\n outputs = [info(\"C\", TensorProto.FLOAT, out_shape)]\n\n B = np.random.rand(*b_shape).astype(np.float32)\n\n b_init = from_array(B, \"B\")\n graph = make_graph([node], \"add_graph\", inputs, outputs, initializer=[b_init])\n model = make_model(graph)\n return model\n\n a = np.random.rand(*a_shape).astype(np.float32)\n\n outputs = [\"C\"]\n AddTester({\"A\": a}, outputs).run()", "def test_networking_project_network_tag_put(self):\n pass", "def test_get_default_network(self):\n pass" ]
[ "0.8056717", "0.7881254", "0.7629789", "0.7488789", "0.7199418", "0.70610476", "0.7000087", "0.6990182", "0.69499254", "0.68895066", "0.6878308", "0.68459576", "0.6842636", "0.68235344", "0.6696582", "0.6688259", "0.6661542", "0.661923", "0.66191167", "0.657747", "0.65758955", "0.65507615", "0.6527314", "0.6522319", "0.65201306", "0.64961785", "0.64953184", "0.64836156", "0.64770204", "0.6434002" ]
0.9333787
0
Test case for add_project
def test_add_project(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_create_project(self):\n pass", "def test_create_project(self):\n pass", "def test_create_project(self):\n pass", "def test_create_project_request(self):\n pass", "def test_add_project_member(self):\n pass", "def test_get_project(self):\n pass", "def test_project_creation(self):\n title = 'Project title'\n code = 'SCW-12345'\n project = self.create_project(\n title=title,\n code=code,\n institution=self.institution,\n tech_lead=self.project_owner,\n category=self.category,\n funding_source=self.funding_source,\n )\n self.assertTrue(isinstance(project, Project))\n self.assertEqual(project.__str__(), code + ' - ' + title)\n self.assertEqual(project.status, Project.AWAITING_APPROVAL)\n self.assertEqual(project.title, title)\n self.assertEqual(project.code, code)\n self.assertTrue(project.awaiting_approval())", "def test_update_project(self):\n pass", "def test_update_project(self):\n pass", "def test_patch_project(self):\n pass", "def test_add_trusted_project(self):\n pass", "def test_new_project_existing(self):\n\n project = fake_clients.FakeProject(name=\"test_project\")\n\n setup_identity_cache(projects=[project])\n\n url = \"/v1/actions/CreateProjectAndUser\"\n data = {\"project_name\": \"test_project\", \"email\": \"[email protected]\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n\n headers = {\n \"project_name\": \"test_project\",\n \"project_id\": \"test_project_id\",\n \"roles\": \"admin,member\",\n \"username\": \"[email protected]\",\n \"user_id\": \"test_user_id\",\n \"authenticated\": True,\n }\n new_task = Task.objects.all()[0]\n url = \"/v1/tasks/\" + new_task.uuid\n response = self.client.post(\n url, {\"approved\": True}, format=\"json\", headers=headers\n )\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.json(), {\"errors\": [\"actions invalid\"]})", "def test_replace_project(self):\n pass", "def test_create_project(self):\n yield self.nodes[0].overlay.create_project(\"test\", \"specpointer\", \"01-02-03\", 300, \"EUR\", 5)\n yield self.deliver_messages()\n\n # Node 2 should know about this app request now\n projects = self.nodes[1].overlay.persistence.get_projects()\n self.assertTrue(projects)\n self.assertEqual(projects[0]['id'], 1)", "def test_remove_project(self):\n pass", "def test_add_submission_service_to_project(self):\n pass", "def test_get_projects(self):\n pass", "def add_project(project):\n print('add_project: ' + str(project))\n try_insert_or_update(models.projects.insert(), # pylint: disable=no-value-for-parameter\n [dict(\n name=project['name'], path=project['name'], active=True, user_id=current_user.id)])\n return", "def test_new_project(self):\n\n setup_identity_cache()\n\n url = \"/v1/actions/CreateProjectAndUser\"\n data = {\"project_name\": \"test_project\", \"email\": \"[email protected]\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n\n headers = {\n \"project_name\": \"test_project\",\n \"project_id\": \"test_project_id\",\n \"roles\": \"admin,member\",\n \"username\": \"[email protected]\",\n \"user_id\": \"test_user_id\",\n \"authenticated\": True,\n }\n new_task = Task.objects.all()[0]\n url = \"/v1/tasks/\" + new_task.uuid\n response = self.client.post(\n url, {\"approved\": True}, format=\"json\", headers=headers\n )\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.json(), {\"notes\": [\"created token\"]})\n\n new_project = fake_clients.identity_cache[\"new_projects\"][0]\n self.assertEqual(new_project.name, \"test_project\")\n\n new_token = Token.objects.all()[0]\n url = \"/v1/tokens/\" + new_token.token\n data = {\"password\": \"testpassword\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_list_project(self):\n pass", "def test_create_project(self):\n self.assertEqual(Project.objects.count(), 2)\n\n url = reverse('projectroles:api_project_create')\n post_data = {\n 'title': NEW_PROJECT_TITLE,\n 'type': PROJECT_TYPE_PROJECT,\n 'parent': str(self.category.sodar_uuid),\n 'description': 'description',\n 'readme': 'readme',\n 'public_guest_access': False,\n 'owner': str(self.user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n\n self.assertEqual(response.status_code, 201, msg=response.content)\n self.assertEqual(Project.objects.count(), 3)\n new_project = Project.objects.get(title=NEW_PROJECT_TITLE)\n model_dict = model_to_dict(new_project)\n model_dict['readme'] = model_dict['readme'].raw\n expected = {\n 'id': new_project.pk,\n 'title': new_project.title,\n 'type': new_project.type,\n 'parent': self.category.pk,\n 'description': new_project.description,\n 'readme': new_project.readme.raw,\n 'public_guest_access': False,\n 'archive': False,\n 'full_title': self.category.title + ' / ' + new_project.title,\n 'has_public_children': False,\n 'sodar_uuid': new_project.sodar_uuid,\n }\n self.assertEqual(model_dict, expected)\n self.assertEqual(\n RoleAssignment.objects.filter(\n project=new_project, user=self.user, role=self.role_owner\n ).count(),\n 1,\n )\n expected = {\n 'title': NEW_PROJECT_TITLE,\n 'type': PROJECT_TYPE_PROJECT,\n 'parent': str(self.category.sodar_uuid),\n 'description': new_project.description,\n 'readme': new_project.readme.raw,\n 'public_guest_access': False,\n 'sodar_uuid': str(new_project.sodar_uuid),\n }\n self.assertEqual(json.loads(response.content), expected)", "def test_create_project_target_enabled(self):\n self.assertEqual(Project.objects.count(), 2)\n url = reverse('projectroles:api_project_create')\n post_data = {\n 'title': NEW_PROJECT_TITLE,\n 'type': PROJECT_TYPE_PROJECT,\n 'parent': str(self.category.sodar_uuid),\n 'description': 'description',\n 'readme': 'readme',\n 'public_guest_access': False,\n 'owner': str(self.user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 201, msg=response.content)\n self.assertEqual(Project.objects.count(), 3)", "def test_read_project(self):\n pass", "def test_read_project(self):\n pass", "def test_add_trusted_project1(self):\n pass", "def test_create_project_root(self):\n self.assertEqual(Project.objects.count(), 2)\n url = reverse('projectroles:api_project_create')\n post_data = {\n 'title': NEW_PROJECT_TITLE,\n 'type': PROJECT_TYPE_PROJECT,\n 'parent': None,\n 'description': 'description',\n 'readme': 'readme',\n 'public_guest_access': False,\n 'owner': str(self.user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 400)\n self.assertEqual(Project.objects.count(), 2)", "def test_fields_on_new_project(new_project) -> None:\n\n # assert isinstance(new_project.id, int) # not created yet, hasnt been committed\n assert isinstance(new_project.token, uuid.UUID)\n assert new_project.title == \"Lord of the Rings\"\n assert new_project.email == \"J. R. R. Tolkien\"\n assert new_project.phone == \"5558675309\"\n assert new_project.verification is None", "def test_projects_post(self):\n project = Project()\n response = self.client.open('/project-tracker/projects',\n method='POST',\n data=json.dumps(project),\n content_type='application/json')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def test_create_project_from_template(self):\n project_new = self.project_template.take_template()\n\n self.assertTrue(project_new)", "def test_save_method(self):\r\n self.project.save_project()\r\n projects = Project.objects.all()\r\n self.assertTrue(len(projects) > 0)" ]
[ "0.866761", "0.866761", "0.866761", "0.83126116", "0.79430395", "0.7570318", "0.7561691", "0.7533521", "0.7533521", "0.74828166", "0.74306893", "0.7343142", "0.7331581", "0.7305114", "0.72570556", "0.7224564", "0.72043514", "0.71749264", "0.71729624", "0.7158761", "0.7094995", "0.7076792", "0.70390415", "0.70390415", "0.70389056", "0.7030905", "0.70239234", "0.70157087", "0.6991088", "0.6953694" ]
0.94154257
0
Test case for add_project_member
def test_add_project_member(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_add_role_to_project_member(self):\n pass", "def test_add_team_member(self):\n pass", "def test_add_project(self):\n pass", "def test_remove_project_member(self):\n pass", "def test_get_member(self):\n user_new = self.make_user('user_new')\n self.make_assignment(self.project, user_new, self.role_contributor)\n url = reverse('projectroles:api_project_list')\n response = self.request_knox(url, token=self.get_token(user_new))\n self.assertEqual(response.status_code, 200)\n response_data = json.loads(response.content)\n self.assertEqual(len(response_data), 1)\n self.assertEqual(\n response_data[0]['sodar_uuid'], str(self.project.sodar_uuid)\n )", "def test_create_project_request(self):\n pass", "def test_create_project(self):\n pass", "def test_create_project(self):\n pass", "def test_create_project(self):\n pass", "def test_add_member_to_group(client):\n group = client.add_members_to_group(TEAM_ID, GROUP_ID, 35555)\n assert group.team_id == TEAM_ID\n assert group.group_id == GROUP_ID\n assert 35555 in group.members", "async def on_project_member_add(self, guild_id: int, project: dict,\n members: list) -> discord.Message:\n # print(project)\n guild = (await self.bot.fetch_guild(guild_id))\n channel = await self.bot.fetch_channel(int(project.get(\"channel\")))\n members = [(await guild.fetch_member(member)) for member in members]\n count = len(members)\n if count == 1:\n member = members[0]\n return await channel.send(f\"**> Member Update:** `{member}` was\"\n \" added to this project.\")\n if count == 2:\n return await channel.send(f\"**> Member Update:** `{members[0]} `\"\n f\"and `{members[1]}\"\n \" were added to this project.\"\n )\n else:\n last_member = members[count - 1]\n members = members.pop(count - 1)\n string = \"`\"\n members = string + \"`, \".join(str(x) for x in members) + string\n members = members + f\" and `{last_member}`\"\n return await channel.send(f\"**> Member Update:** {members} were \"\n \"added to this project.\")", "def test_create_member(self):\r\n resource = 'member'\r\n cmd = member.CreateMember(test_cli20.MyApp(sys.stdout), None)\r\n address = '10.0.0.1'\r\n port = '8080'\r\n tenant_id = 'my-tenant'\r\n my_id = 'my-id'\r\n pool_id = 'pool-id'\r\n args = ['--address', address, '--protocol-port', port,\r\n '--tenant-id', tenant_id, pool_id]\r\n position_names = ['address', 'protocol_port', 'tenant_id', 'pool_id',\r\n 'admin_state_up']\r\n position_values = [address, port, tenant_id, pool_id, True]\r\n self._test_create_resource(resource, cmd, None, my_id, args,\r\n position_names, position_values,\r\n admin_state_up=None)", "def test_teams_invite_member(self):\n pass", "async def add(self, ctx, project_name: str,\n members: commands.Greedy[discord.Member]) -> None:\n project = project_name\n if not ctx.projects.find_project(project_name):\n await ctx.send(\"This project doesn't exist.\")\n return\n if str(ctx.author.id) != ctx.projects.find_project(project).get(\n \"owner\"):\n await ctx.send(\"You can't add members to this project.\")\n return\n members = members if len(members) > 0 else [ctx.author]\n count = len(members)\n channel = ctx.guild.get_channel(\n int(ctx.projects.find_project(project).get(\"channel\")))\n for member in members:\n await channel.set_permissions(member, read_messages=True,\n send_messages=False)\n ctx.projects.add_project_members(project, [x.id for x in members])\n if members == ctx.author:\n await ctx.send(f\"You're already a member.\")\n if count == 1:\n member = members[0]\n await ctx.send(f\"`{member}` is now a member.\")\n if count == 2:\n await ctx.send(f\"`{members[0]}` and `{members[1]} `\"\n \"are now members.\")\n else:\n last_member = members[count - 1]\n members = members.pop(count - 1)\n string = \"`\"\n members = string + \", \".join(str(x) for x in members) + string\n members = members + f\" and `{last_member}`\"\n await ctx.send(f\"{members} are now members of your project.\")", "def test_handle_add_not_admin(self):\n test_user = User(\"userid\")\n test_user.github_username = \"githubuser\"\n team = Team(\"BRS\", \"brs\", \"web\")\n team.github_team_id = \"githubid\"\n self.db.retrieve.return_value = test_user\n self.db.query.return_value = [team]\n self.assertTupleEqual(self.testcommand.handle(\"team add brs ID\", user),\n (self.testcommand.permission_error, 200))\n self.db.store.assert_not_called()\n self.gh.add_team_member.assert_not_called()", "def test_add_members_to_group(client):\n group = client.add_members_to_group(TEAM_ID, GROUP_ID, [52911, 35555])\n assert group.team_id == TEAM_ID\n assert group.group_id == GROUP_ID\n assert 52911 in group.members\n assert 35555 in group.members", "def test_add_member_by_id_to_group1(self):\n pass", "def test_add_member_by_id_to_group(self):\n pass", "def create_memberships_project():\n client = RequestManager()\n client.set_method(\"POST\")\n client.set_endpoint(\"/projects/{0}/memberships\".format(STORED_ID['project_id']))\n body = {\"person_id\": CONFIG_DATA['member_id'], \"role\": 'member'}\n client.set_body(json.dumps(body))\n client.execute_request()", "def test_add_trusted_project(self):\n pass", "def test_project_creation(self):\n title = 'Project title'\n code = 'SCW-12345'\n project = self.create_project(\n title=title,\n code=code,\n institution=self.institution,\n tech_lead=self.project_owner,\n category=self.category,\n funding_source=self.funding_source,\n )\n self.assertTrue(isinstance(project, Project))\n self.assertEqual(project.__str__(), code + ' - ' + title)\n self.assertEqual(project.status, Project.AWAITING_APPROVAL)\n self.assertEqual(project.title, title)\n self.assertEqual(project.code, code)\n self.assertTrue(project.awaiting_approval())", "def test_add_submission_service_to_project(self):\n pass", "def test_fields_on_new_project(new_project) -> None:\n\n # assert isinstance(new_project.id, int) # not created yet, hasnt been committed\n assert isinstance(new_project.token, uuid.UUID)\n assert new_project.title == \"Lord of the Rings\"\n assert new_project.email == \"J. R. R. Tolkien\"\n assert new_project.phone == \"5558675309\"\n assert new_project.verification is None", "def test_add_trusted_project2(self):\n pass", "def test_000_add_user(self):\n # This case is always passed because it's tested in setup_module,\n # If setup module fails, this case will never run\n pass", "def test_add_trusted_project1(self):\n pass", "def test_user_can_create_a_project(self):\n self.assertEqual(project_model.Project.objects.get(user=self.test_user).pk, self.test_project.pk)", "def test_remove_role_from_project_member(self):\n pass", "def test_add_user(self):\n pass", "def test_handle_add(self):\n test_user = User(\"userid\")\n test_user.permissions_level = Permissions.admin\n test_user.github_username = \"githubuser\"\n team = Team(\"BRS\", \"brs\", \"web\")\n team.github_team_id = \"githubid\"\n add_user = User(\"anotheruser\")\n add_user.github_username = \"myuser\"\n add_user.github_id = \"otherID\"\n self.db.retrieve.side_effect = [test_user, add_user]\n self.db.query.return_value = [team]\n with self.app.app_context():\n resp, code = self.testcommand.handle(\"team add brs ID\", user)\n team_attach = team.get_attachment()\n expect = {'attachments': [team_attach],\n 'text': 'Added User to brs'}\n self.assertDictEqual(resp, expect)\n self.assertEqual(code, 200)\n self.db.store.assert_called_with(team)\n assert team.has_member(\"otherID\")\n self.gh.add_team_member.assert_called_once_with(\"myuser\", \"githubid\")" ]
[ "0.82126886", "0.81758904", "0.76732403", "0.75444025", "0.7147261", "0.7115712", "0.7073174", "0.7073174", "0.7073174", "0.6956787", "0.6917595", "0.68717015", "0.67915183", "0.67410284", "0.6646233", "0.6636679", "0.6630381", "0.66245466", "0.66177475", "0.6607113", "0.6558036", "0.65242535", "0.6508377", "0.6507073", "0.64973134", "0.6486643", "0.6462954", "0.6459677", "0.6456442", "0.64529574" ]
0.9535099
0
Test case for add_recurring_schedule
def test_add_recurring_schedule(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_remove_recurring_schedule(self):\n pass", "def test_add_schedule(self):\n body = Schedule()\n response = self.client.open('/v1/schedule',\n method='POST',\n data=json.dumps(body),\n content_type='application/json')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def test_list_schedules(self):\n pass", "def _create_schedules(self):\n\n ''''''", "def test_calendar_query_partial_recurring(self):\n raise SkipTest(\"test unimplemented\")", "def test_calendar_query_expanded_recurring(self):\n raise SkipTest(\"test unimplemented\")", "def mock_recurring_another_day_schedule() \\\n -> Generator[SwitcherV2Schedule, Any, None]:\n schedule_patch = patch(\n 'aioswitcher.schedules.SwitcherV2Schedule',\n recurring=True,\n start_time=create_random_time(),\n days=[WEEKDAY_TUP[get_weekday_for_day_delta(3)]])\n\n schedule = schedule_patch.start()\n yield schedule\n schedule_patch.stop()", "def test_cron_workflow_service_create_cron_workflow(self):\n pass", "def test_schedule(self):\n\n year = 2019\n month = 3\n day = 25 \n hour = 0\n\n mydate = datetime.datetime(year, month, day,hour)\n\n lower_boundary = mydate - timedelta(hours=3)\n upper_boundary = mydate + timedelta(hours=3)\n\n s4 = Schedule()\n s4.interval = 60*30\n\n s5 = Schedule()\n s5.interval = 60*45\n\n r = number_expected([s4,s5],lower_boundary,upper_boundary)\n\n self.assertEqual(r, 16 )", "def test_schedule(self):\n\n year = 2019\n month = 3\n day = 25 \n hour = 0\n\n mydate = datetime.datetime(year, month, day,hour)\n\n lower_boundary = mydate - timedelta(hours=3)\n upper_boundary = mydate + timedelta(hours=3)\n\n s4 = Schedule()\n s4.interval = 60*30\n\n s5 = Schedule()\n s5.interval = 60*45\n\n r = number_expected([s4,s5],lower_boundary,upper_boundary)\n\n self.assertEqual(r, 16 )", "def test_schedule(self):\n\n year = 2019\n month = 3\n day = 25 \n hour = 0\n\n mydate = datetime.datetime(year, month, day,hour)\n\n lower_boundary = mydate - timedelta(hours=3)\n upper_boundary = mydate + timedelta(hours=3)\n\n s = Schedule()\n s.hour_from = 0\n s.min_from = 0\n s.hour_to = 21\n s.min_to = 59\n s.interval = 60*60*3 \n\n r = number_expected([s,],lower_boundary,upper_boundary)\n\n self.assertEqual(r, 2 )", "def test_schedule(self):\n\n year = 2019\n month = 3\n day = 25 \n hour = 0\n\n mydate = datetime.datetime(year, month, day,hour)\n\n lower_boundary = mydate - timedelta(hours=3)\n upper_boundary = mydate + timedelta(hours=3)\n\n s1 = Schedule()\n s1.hour_from = 0\n s1.min_from = 0\n s1.hour_to = 21\n s1.min_to = 59\n s1.interval = 60*60*3 \n\n s2 = Schedule()\n s2.hour_from = 0\n s2.min_from = 0\n s2.hour_to = 21\n s2.min_to = 59\n s2.interval = 60*60*3 \n\n s3 = Schedule()\n s3.hour_from = 0\n s3.min_from = 0\n s3.hour_to = 21\n s3.min_to = 59\n s3.interval = 60*60*3 \n\n\n r = number_expected([s1,s2,s3],lower_boundary,upper_boundary)\n\n self.assertEqual(r, 2 )", "def test_schedule(self):\n\n year = 2019\n month = 3\n day = 25 \n hour = 0\n\n mydate = datetime.datetime(year, month, day,hour)\n\n lower_boundary = mydate - timedelta(hours=3)\n upper_boundary = mydate + timedelta(hours=3)\n\n s1 = Schedule()\n s1.hour_from = 0\n s1.min_from = 30\n s1.hour_to = 23\n s1.min_to = 30\n s1.interval = 60*30\n\n s2 = Schedule()\n s2.hour_from = 0\n s2.min_from = 30\n s2.hour_to = 23\n s2.min_to = 30\n s2.interval = 60*60\n\n s3 = Schedule()\n s3.hour_from = 22\n s3.min_from = 0\n s3.hour_to = 23\n s3.min_to = 30\n s3.interval = 60*5\n\n\n r = number_expected([s1,s2,s3],lower_boundary,upper_boundary)\n\n self.assertEqual(r, 25 )", "def test_schedule(self):\n\n year = 2019\n month = 3\n day = 25 \n hour = 0\n\n mydate = datetime.datetime(year, month, day,hour)\n\n lower_boundary = mydate - timedelta(hours=3)\n upper_boundary = mydate + timedelta(hours=3)\n\n s = Schedule()\n s.hour_from = 3\n s.min_from = 0\n s.hour_to = 3\n s.min_to = 59\n s.interval = 60*60*6 \n\n r = number_expected([s,],lower_boundary,upper_boundary)\n\n self.assertEqual(r, 0 )", "def add_schedule(self):\r\n\r\n # Take the schedule entires from TOML file\r\n entries = self.cfg.get('payload',{}).get('schedule')\r\n # Check for valid entires\r\n if entries:\r\n # Construct payload \r\n for payload in entries:\r\n # Parse schedule payload\r\n ready = self.construct_payload(parse = copy.deepcopy(payload), dele = 'link')\r\n # Check the entry vs a json schema\r\n check.check_entry(path='schemas/schedule.json', test=ready)\r\n # Post request\r\n b1 = self.add_post(ready, API.url_schl, self.schedules)\r\n if 'link' in payload.keys() and payload['link'] != [{}]:\r\n b2 = self.link(self.schedules[-1].get('id'))\r\n else:\r\n return b1\r\n if b2 != None:\r\n return b1 and b2\r\n else:\r\n return False", "def _add_schedule_items(self):\n\n schedules = [\n {\n 'start_time': '9:30 AM',\n 'end_time': '10:00 AM',\n 'title': 'Daily Scrum',\n 'location': 'Hogwarts',\n 'day': self.day,\n 'user': self.user.user.rolllistuser,\n },\n {\n 'start_time': '10:30 AM',\n 'end_time': '11:00 AM',\n 'title': 'Engineering Interview',\n 'location': 'Narnia',\n 'day': self.day,\n 'user': self.user.user.rolllistuser,\n },\n {\n 'start_time': '12:00 PM',\n 'end_time': '12:30 PM',\n 'title': 'Lunch',\n 'location': 'Kitchen',\n 'day': self.day,\n 'user': self.user.user.rolllistuser,\n },\n {\n 'start_time': '2:00 PM',\n 'end_time': '2:30 PM',\n 'title': 'Workout',\n 'location': 'Gym',\n 'day': self.day,\n 'user': self.user.user.rolllistuser,\n },\n ]\n\n recurring_item_data = {\n 'start_time': '3:00 PM',\n 'end_time': '3:30 PM',\n 'title': 'Recurring thing',\n 'location': 'asdf',\n 'day': self.day,\n 'user': self.user.user.rolllistuser,\n }\n\n schedule_items = []\n\n schedule_dict = {i['start_time']: i for i in schedules}\n\n for schedule in schedules:\n save_data = schedule\n save_data['start_time'] = get_relevant_time_id(schedule['start_time'])\n save_data['end_time'] = get_relevant_time_id(schedule['end_time'])\n new_schedule_item = ScheduleItem(**save_data)\n new_schedule_item.save()\n schedule_items.append(new_schedule_item)\n\n save_data = recurring_item_data\n save_data['start_time'] = get_relevant_time_id(recurring_item_data['start_time'])\n save_data['end_time'] = get_relevant_time_id(recurring_item_data['end_time'])\n new_schedule_item = ScheduleItem(**save_data)\n new_schedule_item.save()\n new_schedule_item.make_recurring([0])\n schedule_items.append(new_schedule_item)\n\n return schedule_items, schedule_dict", "def test_schedule(self):\n\n year = 2019\n month = 3\n day = 25 \n hour = 0\n\n mydate = datetime.datetime(year, month, day,hour)\n\n lower_boundary = mydate - timedelta(hours=3)\n upper_boundary = mydate + timedelta(hours=3)\n\n s = Schedule()\n s.hour_from = 22\n s.min_from = 0\n s.hour_to = 21\n s.min_to = 59\n s.interval = 60*60\n\n r = number_expected([s,],lower_boundary,upper_boundary)\n\n self.assertEqual(r, 6 )", "def test_schedule(self):\n\n year = 2019\n month = 3\n day = 25 \n hour = 0\n\n mydate = datetime.datetime(year, month, day,hour)\n\n lower_boundary = mydate - timedelta(hours=3)\n upper_boundary = mydate + timedelta(hours=3)\n\n s1 = Schedule()\n s1.hour_from = 0\n s1.min_from = 0\n s1.hour_to = 21\n s1.min_to = 59\n s1.interval = 60*60*3 \n\n s2 = Schedule()\n s2.hour_from = 0\n s2.min_from = 0\n s2.hour_to = 21\n s2.min_to = 59\n s2.interval = 60*60*3 \n\n r = number_expected([s1,s2],lower_boundary,upper_boundary)\n\n self.assertEqual(r, 2 )", "def test_create_sugar_reminder_task(self):\n LOG.debug('test_create_sugar_reminder_task')\n business = Business.objects.get(id=114)\n subject = 'Test task'\n offset_days = 120\n create_sugar_reminder_task(self.sugar, business, subject, offset_days)\n # get parent_id from get_entry_list email = \n module = \"Accounts\"\n query = build_recent_entry_query(module=module, test_mode=True, \n get_modified=False, start=None)\n sugar_list = self.sugar.get_entry_list(module, query)\n query = \"tasks.name = '%s' and tasks.parent_id = '%s'\" % (subject, \n sugar_list[0]['id'])\n sugar_list = self.sugar.get_entry_list(module='Tasks', query=query)\n if sugar_list and len(sugar_list) == 1:\n self.assertTrue(True)\n else:\n self.assertTrue(False)", "def test_cron_workflow_service_update_cron_workflow(self):\n pass", "def test_get_cron_expression_before_create(\n self,\n mock_schedule_service_create,\n mock_schedule_service_get,\n mock_schedule_bucket_exists,\n job_spec,\n mock_load_yaml_and_json,\n ):\n aiplatform.init(\n project=_TEST_PROJECT,\n staging_bucket=_TEST_GCS_BUCKET_NAME,\n location=_TEST_LOCATION,\n credentials=_TEST_CREDENTIALS,\n )\n\n job = pipeline_jobs.PipelineJob(\n display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,\n template_path=_TEST_TEMPLATE_PATH,\n parameter_values=_TEST_PIPELINE_PARAMETER_VALUES,\n input_artifacts=_TEST_PIPELINE_INPUT_ARTIFACTS,\n enable_caching=True,\n )\n\n pipeline_job_schedule = preview_pipeline_job_schedules.PipelineJobSchedule(\n pipeline_job=job,\n display_name=_TEST_PIPELINE_JOB_SCHEDULE_DISPLAY_NAME,\n )\n\n with pytest.raises(RuntimeError) as e:\n pipeline_job_schedule.cron_expression\n\n assert e.match(regexp=r\"PipelineJobSchedule resource has not been created.\")\n\n pipeline_job_schedule.create(\n cron_expression=_TEST_PIPELINE_JOB_SCHEDULE_CRON,\n max_concurrent_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_CONCURRENT_RUN_COUNT,\n max_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_RUN_COUNT,\n service_account=_TEST_SERVICE_ACCOUNT,\n network=_TEST_NETWORK,\n create_request_timeout=None,\n )\n\n pipeline_job_schedule.cron_expression", "def mock_recurring_tommorow_schedule() \\\n -> Generator[SwitcherV2Schedule, Any, None]:\n schedule_patch = patch(\n 'aioswitcher.schedules.SwitcherV2Schedule',\n recurring=True,\n start_time=create_random_time(),\n days=[WEEKDAY_TUP[get_weekday_for_day_delta()]])\n\n schedule = schedule_patch.start()\n yield schedule\n schedule_patch.stop()", "def test_add(self):\n result = CalculateDueDate.add(self.test_time, self.test_turn_time)\n self.assertEqual(dt.datetime(2021, 6, 18, 15, 0, 0), result)", "def test_add(self):\n sched = Schedule()\n sched.append(Play(Waveform(np.ones(5)), DriveChannel(0)), inplace=True)\n inst_map = InstructionScheduleMap()\n\n inst_map.add(\"u1\", 1, sched)\n inst_map.add(\"u1\", 0, sched)\n\n self.assertIn(\"u1\", inst_map.instructions)\n self.assertEqual(inst_map.qubits_with_instruction(\"u1\"), [0, 1])\n self.assertTrue(\"u1\" in inst_map.qubit_instructions(0))\n\n with self.assertRaises(PulseError):\n inst_map.add(\"u1\", (), sched)\n with self.assertRaises(PulseError):\n inst_map.add(\"u1\", 1, \"not a schedule\")", "def test_get_next_n_schedule(self):\n expected_list = [datetime.datetime(2021, 8, 7, 8, 46, tzinfo=datetime.timezone.utc),\n datetime.datetime(2021, 8, 7, 9, 0, tzinfo=datetime.timezone.utc),\n datetime.datetime(2021, 8, 7, 9, 23, tzinfo=datetime.timezone.utc),\n datetime.datetime(2021, 8, 7, 9, 46, tzinfo=datetime.timezone.utc),\n datetime.datetime(2021, 8, 7, 10, 0, tzinfo=datetime.timezone.utc),\n datetime.datetime(2021, 8, 7, 10, 23, tzinfo=datetime.timezone.utc),\n datetime.datetime(2021, 8, 7, 10, 46, tzinfo=datetime.timezone.utc),\n datetime.datetime(2021, 8, 7, 11, 0, tzinfo=datetime.timezone.utc),\n datetime.datetime(2021, 8, 7, 11, 23, tzinfo=datetime.timezone.utc),\n datetime.datetime(2021, 8, 7, 11, 46, tzinfo=datetime.timezone.utc)]\n\n from_dt = datetime.datetime(2021, 8, 7, 8, 30, 57, tzinfo=datetime.timezone.utc)\n result = AWSCron.get_next_n_schedule(10, from_dt, '0/23 * * * ? *')\n self.assertEqual(str(expected_list), str(result))", "def test_schedule_across_dst(self):\n self.mockTicketAddMessage()\n # start five hours from now\n params = self._getNowAsDict(add_hours=266) # 11 days 2 hours from now\n target = self._getNowAsDict(add_hours=271) # this implies a maintenance scheduled at CST(-6 UTC) into CDT (-5 CDT) \n target_maintcal_datetime = MaintcalDatetime(\n int(target['start_year']),\n int(target['start_month']),\n int(target['start_day']),\n int(target['start_hour']), \n int(params['start_minute']),0) \n params['tzname'] = 'America%2FChicago'\n params['is_dst'] = '1'\n response = self.app.post(url_for(controller='maintenances', action='schedule', id=3),\n params=params)\n self.assert_(response.body)\n this_maint = db_sess.query(ScheduledMaintenance).get(3)\n self.assertEqual(this_maint.services[0].start_time,target_maintcal_datetime)", "def test_get_cron_before_create(\n self,\n mock_schedule_service_create,\n mock_schedule_service_get,\n mock_schedule_bucket_exists,\n job_spec,\n mock_load_yaml_and_json,\n ):\n aiplatform.init(\n project=_TEST_PROJECT,\n staging_bucket=_TEST_GCS_BUCKET_NAME,\n location=_TEST_LOCATION,\n credentials=_TEST_CREDENTIALS,\n )\n\n job = pipeline_jobs.PipelineJob(\n display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,\n template_path=_TEST_TEMPLATE_PATH,\n parameter_values=_TEST_PIPELINE_PARAMETER_VALUES,\n input_artifacts=_TEST_PIPELINE_INPUT_ARTIFACTS,\n enable_caching=True,\n )\n\n pipeline_job_schedule = pipeline_job_schedules.PipelineJobSchedule(\n pipeline_job=job,\n display_name=_TEST_PIPELINE_JOB_SCHEDULE_DISPLAY_NAME,\n )\n\n with pytest.raises(RuntimeError) as e:\n pipeline_job_schedule.cron\n\n assert e.match(regexp=r\"PipelineJobSchedule resource has not been created.\")\n\n pipeline_job_schedule.create(\n cron=_TEST_PIPELINE_JOB_SCHEDULE_CRON,\n max_concurrent_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_CONCURRENT_RUN_COUNT,\n max_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_RUN_COUNT,\n service_account=_TEST_SERVICE_ACCOUNT,\n network=_TEST_NETWORK,\n create_request_timeout=None,\n )\n\n pipeline_job_schedule.cron", "async def test_modify_schedule_type(self):\n await self.populate_test_data() # Populate data in foglamp.scheduled_processes\n\n scheduler = Scheduler(_address, _m_port)\n await scheduler.start()\n\n interval_schedule = IntervalSchedule()\n interval_schedule.name = 'sleep10'\n interval_schedule.process_name = 'sleep10'\n interval_schedule.repeat = datetime.timedelta(seconds=0)\n\n await scheduler.save_schedule(interval_schedule)\n\n manual_schedule = ManualSchedule()\n manual_schedule.schedule_id = interval_schedule.schedule_id\n manual_schedule.name = 'manual'\n manual_schedule.process_name = 'sleep10'\n manual_schedule.repeat = datetime.timedelta(seconds=0)\n\n await scheduler.save_schedule(manual_schedule)\n\n # Assert: only 1 task is running\n schedule = await scheduler.get_schedule(manual_schedule.schedule_id)\n\n assert isinstance(schedule, ManualSchedule)\n\n await self.stop_scheduler(scheduler)", "def test_call_schedule_service_create_with_different_timezone(\n self,\n mock_schedule_service_create,\n mock_schedule_service_get,\n mock_schedule_bucket_exists,\n job_spec,\n mock_load_yaml_and_json,\n ):\n aiplatform.init(\n project=_TEST_PROJECT,\n staging_bucket=_TEST_GCS_BUCKET_NAME,\n location=_TEST_LOCATION,\n credentials=_TEST_CREDENTIALS,\n )\n\n job = pipeline_jobs.PipelineJob(\n display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,\n template_path=_TEST_TEMPLATE_PATH,\n parameter_values=_TEST_PIPELINE_PARAMETER_VALUES,\n input_artifacts=_TEST_PIPELINE_INPUT_ARTIFACTS,\n enable_caching=True,\n )\n\n pipeline_job_schedule = pipeline_job_schedules.PipelineJobSchedule(\n pipeline_job=job,\n display_name=_TEST_PIPELINE_JOB_SCHEDULE_DISPLAY_NAME,\n )\n\n test_pipeline_job_schedule_cron_tz_expression = \"TZ=America/New_York * * * * *\"\n pipeline_job_schedule.create(\n cron=test_pipeline_job_schedule_cron_tz_expression,\n max_concurrent_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_CONCURRENT_RUN_COUNT,\n max_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_RUN_COUNT,\n service_account=_TEST_SERVICE_ACCOUNT,\n network=_TEST_NETWORK,\n create_request_timeout=None,\n )\n\n expected_runtime_config_dict = {\n \"gcsOutputDirectory\": _TEST_GCS_BUCKET_NAME,\n \"parameterValues\": _TEST_PIPELINE_PARAMETER_VALUES,\n \"inputArtifacts\": {\"vertex_model\": {\"artifactId\": \"456\"}},\n }\n runtime_config = gca_pipeline_job.PipelineJob.RuntimeConfig()._pb\n json_format.ParseDict(expected_runtime_config_dict, runtime_config)\n\n job_spec = yaml.safe_load(job_spec)\n pipeline_spec = job_spec.get(\"pipelineSpec\") or job_spec\n\n # Construct expected request\n expected_gapic_pipeline_job_schedule = gca_schedule.Schedule(\n display_name=_TEST_PIPELINE_JOB_SCHEDULE_DISPLAY_NAME,\n cron=test_pipeline_job_schedule_cron_tz_expression,\n max_concurrent_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_CONCURRENT_RUN_COUNT,\n max_run_count=_TEST_PIPELINE_JOB_SCHEDULE_MAX_RUN_COUNT,\n create_pipeline_job_request={\n \"parent\": _TEST_PARENT,\n \"pipeline_job\": {\n \"runtime_config\": runtime_config,\n \"pipeline_spec\": dict_to_struct(pipeline_spec),\n \"service_account\": _TEST_SERVICE_ACCOUNT,\n \"network\": _TEST_NETWORK,\n },\n },\n )\n\n mock_schedule_service_create.assert_called_once_with(\n parent=_TEST_PARENT,\n schedule=expected_gapic_pipeline_job_schedule,\n timeout=None,\n )\n\n assert pipeline_job_schedule._gca_resource == make_schedule(\n gca_schedule.Schedule.State.COMPLETED\n )", "def test_friday_8_hours(self):\n test_time = dt.datetime(2021, 6, 18, 15, 0, 0)\n test_turn_time = 8\n result = CalculateDueDate.add(test_time, test_turn_time)\n self.assertEqual(dt.datetime(2021, 6, 21, 15, 0, 0), result)" ]
[ "0.8349968", "0.7243144", "0.6945285", "0.68674254", "0.68189013", "0.6725856", "0.67006576", "0.6662517", "0.65366554", "0.65366554", "0.6528251", "0.65199625", "0.6493642", "0.64925885", "0.645706", "0.643178", "0.6423171", "0.63880414", "0.63795733", "0.63702244", "0.63447154", "0.6343539", "0.63374776", "0.63152426", "0.62877154", "0.62813365", "0.62335527", "0.61574984", "0.6142115", "0.6116433" ]
0.94817495
0
Test case for add_role_to_project_member
def test_add_role_to_project_member(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_add_role(self):\n pass", "def test_add_role_simple(self):\n pass", "def test_remove_role_from_project_member(self):\n pass", "def test_add_project_member(self):\n pass", "def test_add_role_simple_post(self):\n pass", "def test_has_role(self):\n self.make_assignment(self.project, self.user_bob, self.role_contributor)\n self.assertFalse(self.category.has_role(self.user_bob))\n self.assertTrue(self.project.has_role(self.user_bob))", "def setup_test_role(self):\n self.test_role = rand_name('role')\n resp, self.role = self.client.create_role(self.test_role)\n self.roles.append(self.role)", "def test_add_team_member(self):\n pass", "def test_user_id_role_put(self):\n pass", "def test_ipam_roles_create(self):\n pass", "def test_add_user_existing_with_role(self):\n\n project = fake_clients.FakeProject(name=\"test_project\")\n\n user = fake_clients.FakeUser(\n name=\"[email protected]\", password=\"123\", email=\"[email protected]\"\n )\n\n assignment = fake_clients.FakeRoleAssignment(\n scope={\"project\": {\"id\": project.id}},\n role_name=\"member\",\n user={\"id\": user.id},\n )\n\n setup_identity_cache(\n projects=[project], users=[user], role_assignments=[assignment]\n )\n\n url = \"/v1/actions/InviteUser\"\n headers = {\n \"project_name\": \"test_project\",\n \"project_id\": project.id,\n \"roles\": \"project_admin,member,project_mod\",\n \"username\": \"[email protected]\",\n \"user_id\": \"test_user_id\",\n \"authenticated\": True,\n }\n data = {\n \"email\": \"[email protected]\",\n \"roles\": [\"member\"],\n \"project_id\": project.id,\n }\n response = self.client.post(url, data, format=\"json\", headers=headers)\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.json(), {\"notes\": [\"task created\"]})\n\n tasks = Task.objects.all()\n self.assertEqual(1, len(tasks))\n self.assertTrue(tasks[0].completed)", "def test_create_namespaced_role(self):\n pass", "def test_edit_role_add_new_role(self):\n # Add node with controller role\n Nodes().nodes_discovered[0].checkbox.click()\n RolesPanel().controller.click()\n Nodes().apply_changes.click()\n time.sleep(1)\n # Add cinder role\n with Nodes() as n:\n n.nodes[0].checkbox.click()\n n.edit_roles.click()\n RolesPanel().cinder.click()\n Nodes().apply_changes.click()\n time.sleep(1)\n with Nodes() as n:\n self.assertIn(ROLE_CONTROLLER, n.nodes[0].roles.text,\n 'Controller role')\n self.assertIn(ROLE_CINDER, n.nodes[0].roles.text,\n 'Cinder role')", "def assign_member(self, project_id, user_id, role_id):\n resp = {}\n path = '/projects/%s/users/%s/roles/%s' % (project_id, user_id, role_id)\n res = self.client.call(path, 'PUT', data='', \n token=self.manager.identity.token) \n \n self.logger.debug('Grant role %s to user %s on project %s' % \n (project_id, user_id, role_id))\n return True", "def test_create_role_existing(self):\n self.assertEqual(\n RoleAssignment.objects.filter(project=self.project).count(), 1\n )\n\n url = reverse(\n 'projectroles:api_role_create',\n kwargs={'project': self.project.sodar_uuid},\n )\n post_data = {\n 'role': PROJECT_ROLE_CONTRIBUTOR,\n 'user': str(self.assign_user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n\n self.assertEqual(response.status_code, 201, msg=response.content)\n self.assertEqual(\n RoleAssignment.objects.filter(project=self.project).count(), 2\n )\n post_data = {\n 'role': PROJECT_ROLE_GUEST,\n 'user': str(self.assign_user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n\n self.assertEqual(response.status_code, 400, msg=response.content)\n self.assertEqual(\n RoleAssignment.objects.filter(project=self.project).count(), 2\n )", "def test_list_role(self):\n pass", "def test_create_role_success(self) -> None:\n\n role_code = \"TestRoleCode\"\n\n try:\n self.delete_role(role_code)\n except Exception as e:\n print(e)\n\n # Create a role creation request (using the access model)\n access_role_creation_request = create_access_role_creation_request(role_code)\n\n # Create a role using the LPT create_role method\n iam.roles.create_role(self.api_factory, access_role_creation_request)\n\n # Check that the role was correctly created through the access API\n access_role = self.get_access_role(role_code)\n self.assertEqual(first=access_role.id.code, second=role_code)\n\n # Check that the role was correctly created through the identity API\n identity_role = self.get_identity_role(role_code)\n self.assertEqual(first=identity_role.role_id.code, second=role_code)", "def test_remove_project_member(self):\n pass", "def test_replace_roles(self):\n pass", "def test_ipam_roles_update(self):\n pass", "def test_create_cluster_role(self):\n pass", "async def add_role_member(request, role_id):\n required_fields = [\"id\"]\n utils.validate_fields(required_fields, request.json)\n txn_key, txn_user_id = await utils.get_transactor_key(request)\n proposal_id = str(uuid4())\n batch_list = Role().member.propose.batch_list(\n signer_keypair=txn_key,\n signer_user_id=txn_user_id,\n proposal_id=proposal_id,\n role_id=role_id,\n pack_id=request.json.get(\"pack_id\"),\n next_id=request.json.get(\"id\"),\n reason=request.json.get(\"reason\"),\n metadata=request.json.get(\"metadata\"),\n )\n batch_status = await utils.send(\n request.app.config.VAL_CONN,\n batch_list,\n request.app.config.TIMEOUT,\n request.json.get(\"tracker\") and True,\n )\n if request.json.get(\"tracker\"):\n return utils.create_tracker_response(\"batch_status\", batch_status)\n return json({\"proposal_id\": proposal_id})", "def test_delete_role(self):\n pass", "def test_has_role_public(self):\n self.project.set_public()\n self.assertFalse(self.category.has_role(self.user_bob))\n self.assertTrue(self.project.has_role(self.user_bob))", "def test_patch_namespaced_role(self):\n pass", "def test_create_contributor(self):\n self.assertEqual(\n RoleAssignment.objects.filter(project=self.project).count(), 1\n )\n url = reverse(\n 'projectroles:api_role_create',\n kwargs={'project': self.project.sodar_uuid},\n )\n post_data = {\n 'role': PROJECT_ROLE_CONTRIBUTOR,\n 'user': str(self.assign_user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n\n self.assertEqual(response.status_code, 201, msg=response.content)\n self.assertEqual(\n RoleAssignment.objects.filter(project=self.project).count(), 2\n )\n role_as = RoleAssignment.objects.filter(\n project=self.project,\n role=self.role_contributor,\n user=self.assign_user,\n ).first()\n self.assertIsNotNone(role_as)\n expected = {\n 'project': str(self.project.sodar_uuid),\n 'role': PROJECT_ROLE_CONTRIBUTOR,\n 'user': str(self.assign_user.sodar_uuid),\n 'sodar_uuid': str(role_as.sodar_uuid),\n }\n self.assertEqual(json.loads(response.content), expected)", "def test_additional_emails_roles(self):\n\n # NOTE(amelia): sending this email here is probably not the intended\n # case. It would be more useful in utils such as a quota update or a\n # child project being created that all the project admins should be\n # notified of\n\n project = fake_clients.FakeProject(name=\"test_project\")\n\n user = fake_clients.FakeUser(\n name=\"[email protected]\", password=\"123\", email=\"[email protected]\"\n )\n\n user2 = fake_clients.FakeUser(\n name=\"[email protected]\", password=\"123\", email=\"[email protected]\"\n )\n\n user3 = fake_clients.FakeUser(\n name=\"[email protected]\", password=\"123\", email=\"[email protected]\"\n )\n\n assignments = [\n fake_clients.FakeRoleAssignment(\n scope={\"project\": {\"id\": project.id}},\n role_name=\"member\",\n user={\"id\": user.id},\n ),\n fake_clients.FakeRoleAssignment(\n scope={\"project\": {\"id\": project.id}},\n role_name=\"project_admin\",\n user={\"id\": user.id},\n ),\n fake_clients.FakeRoleAssignment(\n scope={\"project\": {\"id\": project.id}},\n role_name=\"member\",\n user={\"id\": user2.id},\n ),\n fake_clients.FakeRoleAssignment(\n scope={\"project\": {\"id\": project.id}},\n role_name=\"project_admin\",\n user={\"id\": user2.id},\n ),\n fake_clients.FakeRoleAssignment(\n scope={\"project\": {\"id\": project.id}},\n role_name=\"member\",\n user={\"id\": user3.id},\n ),\n fake_clients.FakeRoleAssignment(\n scope={\"project\": {\"id\": project.id}},\n role_name=\"project_mod\",\n user={\"id\": user3.id},\n ),\n ]\n\n setup_identity_cache(\n projects=[project], users=[user, user2, user3], role_assignments=assignments\n )\n\n url = \"/v1/actions/InviteUser\"\n headers = {\n \"project_name\": \"test_project\",\n \"project_id\": project.id,\n \"roles\": \"project_admin,member,project_mod\",\n \"username\": \"[email protected]\",\n \"user_id\": \"test_user_id\",\n \"authenticated\": True,\n }\n\n data = {\n \"email\": \"[email protected]\",\n \"roles\": [\"member\"],\n \"project_id\": project.id,\n }\n response = self.client.post(url, data, format=\"json\", headers=headers)\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.json(), {\"notes\": [\"task created\"]})\n\n self.assertEqual(len(mail.outbox), 2)\n\n self.assertEqual(len(mail.outbox[0].to), 2)\n self.assertEqual(set(mail.outbox[0].to), set([user.email, user2.email]))\n self.assertEqual(mail.outbox[0].subject, \"invite_user_to_project_additional\")\n\n # Test that the token email gets sent to the other addresses\n self.assertEqual(mail.outbox[1].to[0], \"[email protected]\")\n\n new_token = Token.objects.all()[0]\n url = \"/v1/tokens/\" + new_token.token\n\n data = {\"confirm\": True, \"password\": \"1234\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def can_set_role(userid, role, group):", "def test_create_role_for_all_namespaces(self):\n pass", "def test_get_member(self):\n user_new = self.make_user('user_new')\n self.make_assignment(self.project, user_new, self.role_contributor)\n url = reverse('projectroles:api_project_list')\n response = self.request_knox(url, token=self.get_token(user_new))\n self.assertEqual(response.status_code, 200)\n response_data = json.loads(response.content)\n self.assertEqual(len(response_data), 1)\n self.assertEqual(\n response_data[0]['sodar_uuid'], str(self.project.sodar_uuid)\n )" ]
[ "0.841619", "0.82518697", "0.8169925", "0.8058054", "0.80104566", "0.7648387", "0.7197095", "0.71273685", "0.7111336", "0.70626056", "0.7048378", "0.703866", "0.691923", "0.68821615", "0.68614334", "0.685862", "0.6786615", "0.6769088", "0.67576975", "0.6705574", "0.6684479", "0.664449", "0.6619348", "0.6568197", "0.6547175", "0.6525681", "0.6469101", "0.6429095", "0.64103645", "0.6396507" ]
0.9625025
0
Test case for add_submission_service_to_project
def test_add_submission_service_to_project(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_remove_submission_service_from_project(self):\n pass", "def test_update_submission_service(self):\n pass", "def test_submit_asset_to_submission_service(self):\n pass", "def test_list_submission_serivces_for_project(self):\n pass", "def test_add_project(self):\n pass", "def test_get_submission(self):\n # creating a submission\n sub_register = register_ok_submission(self, self.token)\n response_data = json.loads(sub_register.data.decode())\n self.assertTrue(response_data['status']=='success')\n\n # getting it from the service\n get_response = get_submissions(self, self.token)\n response_data = json.loads(get_response.data.decode())\n self.assertTrue(response_data['data'][0]['text_count']==2)\n self.assertTrue(isinstance(response_data['data'][0]['texts'], list))", "def test_create_project_request(self):\n pass", "def test_question_submission_successfully(self):\n with self.client:\n \n response = self.add_question(\"1\",\"hello\",\"hello world\",\"java\",\"kenneth\")\n self.assertEqual(response.status_code, 201)", "def test_create_valid_submission(self):\n with self.client:\n # valid submission registration\n sub_response = register_ok_submission(self, self.token)\n response_data = json.loads(sub_response.data.decode())\n self.assertTrue(response_data['status']=='success')", "def test_add_project_member(self):\n pass", "def test_issue_add_subscription(self):\n pass", "def testServicePost(self):\n\n text = \"This is a test sentence. And another sentence to split.\"\n results = self.client.post(\"workflow\", json={\"name\": \"post\", \"elements\": [text]}).json()\n\n self.assertEqual(len(results), 1)\n self.assertEqual(len(results[0]), 2)", "def test_add_trusted_project(self):\n pass", "def test_create_confirm_service_details(self):\n pass", "def test_task_add():\n pytest.fail('Not implemented yet.')", "def test_workflows_post(self):\n pass", "def test_add_trusted_project7(self):\n pass", "def test_tests():\n submission = SubmissionBuilder(\"t\", \"b\", [\"anything\"]).build()\n assert submission.get(\"results\") == [\"anything\"], submission", "def test_01_service_offerings(self):\n # Validate the following\n # 1. Create a project.\n # 2. List service offerings for the project. All SO available in the\n # domain can be used for project resource creation.\n\n # Create project as a domain admin\n project = Project.create(\n self.apiclient,\n self.services[\"project\"],\n account=self.account.name,\n domainid=self.account.domainid\n )\n # Cleanup created project at end of test\n self.cleanup.append(project)\n self.debug(\"Created project with domain admin with ID: %s\" %\n project.id)\n\n self.debug(\n \"Deploying VM instance for project: %s & service offering: %s\" % (\n project.id,\n self.service_offering.id\n ))\n virtual_machine = VirtualMachine.create(\n self.apiclient,\n self.services[\"server\"],\n templateid=self.template.id,\n serviceofferingid=self.service_offering.id,\n projectid=project.id\n )\n # Verify VM state\n self.assertEqual(\n virtual_machine.state,\n 'Running',\n \"Check VM state is Running or not\"\n )\n\n return", "def test_cron_workflow_service_create_cron_workflow(self):\n pass", "def test_submit_for_endorsement(self):", "def test_task_add_invalid_form():\n pytest.fail('Not implemented yet.')", "def test_create_submission(self):\n yield self.nodes[0].overlay.create_project(\"test\", \"specpointer\", \"01-02-03\", 300, \"EUR\", 5)\n yield self.deliver_messages()\n\n # Test making a submission for an unknown project\n self.assertRaises(RuntimeError, self.nodes[1].overlay.create_submission, 'a', 3, 'test')\n\n # Node 2 now makes a submission\n project = self.nodes[1].overlay.persistence.get_projects()[0]\n yield self.nodes[1].overlay.create_submission(project['public_key'].decode('hex'), project['id'], 'test')\n yield self.deliver_messages()\n\n # Node 1 should have received this submission and added it to the database\n submissions = self.nodes[0].overlay.persistence.get_submissions_for_project(project['public_key'].decode('hex'), project['id'])\n self.assertTrue(submissions)", "def test_add_trusted_project6(self):\n pass", "def test_update_submission(self):\n sub_response_register = register_ok_submission(self, self.token)\n response_data = json.loads(sub_response_register.data.decode())\n self.assertTrue(response_data['status']=='success')\n\n sub = [sub for sub in Submission.query(hash_key=self.new_user.username, range_key_condition=Submission.sort.startswith('SUBMISSION_'))][0]\n sub_response_update = self.client.put(\n '/submission/{}'.format(str(sub.public_id)),\n headers=dict(\n Authorization=\"Token {}\".format(self.token)\n ),\n data=json.dumps(dict(\n submitted_texts=['updated_text1']\n )),\n content_type='application/json'\n )\n update_data = json.loads(sub_response_update.data.decode())\n upd_sub = Submission.get(hash_key=sub.username, range_key=sub.sort)\n self.assertTrue(update_data['status']=='success')\n self.assertTrue(upd_sub.text_count == 1)", "def test_create_project(self):\n pass", "def test_create_project(self):\n pass", "def test_create_project(self):\n pass", "def test_submissions(self):\r\n # Basic case, things go well.\r\n attempt = self.create_and_submit()\r\n assert_equals(attempt.status, \"submitted\")\r\n\r\n # We post, but Software Secure doesn't like what we send for some reason\r\n with patch('verify_student.models.requests.post', new=mock_software_secure_post_error):\r\n attempt = self.create_and_submit()\r\n assert_equals(attempt.status, \"must_retry\")\r\n\r\n # We try to post, but run into an error (in this case a newtork connection error)\r\n with patch('verify_student.models.requests.post', new=mock_software_secure_post_unavailable):\r\n attempt = self.create_and_submit()\r\n assert_equals(attempt.status, \"must_retry\")", "def test_publish_deployment_run(self):\n pass" ]
[ "0.80246717", "0.7933138", "0.7063919", "0.67945546", "0.67048573", "0.65570164", "0.64308405", "0.6261419", "0.6186436", "0.61797863", "0.6114426", "0.6111672", "0.60713226", "0.60485977", "0.6019403", "0.60181093", "0.6015916", "0.60029244", "0.5983504", "0.5976141", "0.59062797", "0.5893177", "0.5887293", "0.58706033", "0.5869949", "0.5868668", "0.5868668", "0.5868668", "0.5830798", "0.57978195" ]
0.96346676
0
Test case for add_team_manager_to_team
def test_add_team_manager_to_team(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_remove_team_manager_from_team(self):\n pass", "def test_add_team_member(self):\n pass", "def test_assign_managing_team(self):\n pass", "def test_create_team(self):\n pass", "def test_update_team(self):\n pass", "def test_teams_add_user_to_team_v2(self):\n pass", "def test_teams_create(self):\n pass", "def test_teams_add_user_to_team_v1(self):\n pass", "def test_handle_assign_as_team_lead(self):\r\n def facade_retrieve_side_effect(*args, **kwargs):\r\n \"\"\"Return a side effect for the mock facade.\"\"\"\r\n if args[0] == Project:\r\n return Project(\"\", [])\r\n else:\r\n calling_user = User(user)\r\n return calling_user\r\n self.mock_facade.retrieve.side_effect = facade_retrieve_side_effect\r\n team = Team(\"GTID\", \"team-name\", \"display-name\")\r\n team.team_leads.add(user)\r\n self.mock_facade.query.return_value = [team]\r\n self.assertTupleEqual(\r\n self.testcommand.handle(\"project assign ID team-name\",\r\n user),\r\n (\"Project successfully assigned!\", 200))", "def add_user_to_team(self, for_user, to_manager):\n for_user = User.get_user_by_username(for_user)\n manager = User.get_user_by_username(to_manager)\n # @Todo test inheritance of get_user_by_username\n self.access_handler.check_add_user_to_team(for_user, manager)\n manager.add_user_to_team(for_user)", "def test_retrieve_team(self):\n pass", "def test_handle_add(self):\n test_user = User(\"userid\")\n test_user.permissions_level = Permissions.admin\n test_user.github_username = \"githubuser\"\n team = Team(\"BRS\", \"brs\", \"web\")\n team.github_team_id = \"githubid\"\n add_user = User(\"anotheruser\")\n add_user.github_username = \"myuser\"\n add_user.github_id = \"otherID\"\n self.db.retrieve.side_effect = [test_user, add_user]\n self.db.query.return_value = [team]\n with self.app.app_context():\n resp, code = self.testcommand.handle(\"team add brs ID\", user)\n team_attach = team.get_attachment()\n expect = {'attachments': [team_attach],\n 'text': 'Added User to brs'}\n self.assertDictEqual(resp, expect)\n self.assertEqual(code, 200)\n self.db.store.assert_called_with(team)\n assert team.has_member(\"otherID\")\n self.gh.add_team_member.assert_called_once_with(\"myuser\", \"githubid\")", "def test_handle_create_multiple_team_lookup_error(self):\r\n team1 = Team(\"GTID1\", \"team-name1\", \"name1\")\r\n team2 = Team(\"GTID2\", \"team-name2\", \"name2\")\r\n team1.team_leads.add(user)\r\n team2.team_leads.add(user)\r\n self.mock_facade.query.return_value = [team1, team2]\r\n self.assertTupleEqual(\r\n self.testcommand.handle(\"project create repo-link team-name\",\r\n user),\r\n (\"2 teams found with GitHub team name team-name\", 200))", "def test_teams_save_team_member_v1(self):\n pass", "def test_createteam(self):\n p1, p2, p3 = self.create3persons()\n t = model.Team(name='Tigers', persons=[p1, p2, p3])\n id = t.store()\n t2 = model.Team(id=id)\n self.assertEqual(t.name, t2.name)\n self.assertEqual(t.persons, t2.persons)", "def test_register_team_already_team(self):\n result = self.client.post(\"/teams\", data={\"already_team\": \"Killers\"}, follow_redirects=True)\n self.assertEqual(result.status_code, 200)\n # self.assertIn(b\"Sorry! That team name is already in use!\", result.data) #error:not in /teams, but should be in createTeam", "def test_handle_force_assign(self):\r\n self.mock_facade.retrieve.return_value = Project(\"GTID\", [])\r\n team = Team(\"GTID\", \"team-name\", \"display-name\")\r\n team.team_leads.add(user)\r\n self.mock_facade.query.return_value = [team]\r\n self.assertTupleEqual(\r\n self.testcommand.handle(\"project assign ID team-name -f\",\r\n user),\r\n (\"Project successfully assigned!\", 200))", "def test_handle_add_not_admin(self):\n test_user = User(\"userid\")\n test_user.github_username = \"githubuser\"\n team = Team(\"BRS\", \"brs\", \"web\")\n team.github_team_id = \"githubid\"\n self.db.retrieve.return_value = test_user\n self.db.query.return_value = [team]\n self.assertTupleEqual(self.testcommand.handle(\"team add brs ID\", user),\n (self.testcommand.permission_error, 200))\n self.db.store.assert_not_called()\n self.gh.add_team_member.assert_not_called()", "def test_teams_save_team_v1(self):\n pass", "def test_teams_invite_member(self):\n pass", "def test_handle_assign_project_team_lookup_error(self):\r\n self.mock_facade.retrieve.return_value = Project(\"\", [])\r\n self.mock_facade.query.return_value = []\r\n self.assertTupleEqual(\r\n self.testcommand.handle(\"project assign ID team-name\",\r\n user),\r\n (\"0 teams found with GitHub team name team-name\", 200))", "def test_handle_create(self):\n test_user = User(\"userid\")\n test_user.permissions_level = Permissions.admin\n test_user.github_username = \"githubuser\"\n self.db.retrieve.return_value = test_user\n self.gh.org_create_team.return_value = \"team_id\"\n inputstring = \"team create b-s --name 'B S'\"\n outputstring = \"New team created: b-s, name: B S, \"\n self.assertTupleEqual(self.testcommand.handle(inputstring, user),\n (outputstring, 200))\n inputstring += \" --platform web\"\n outputstring += \"platform: web, \"\n self.assertTupleEqual(self.testcommand.handle(inputstring, user),\n (outputstring, 200))\n self.gh.org_create_team.assert_called()\n self.gh.add_team_member.assert_called_with('githubuser', 'team_id')\n inputstring += \" --channel 'channelID'\"\n outputstring += \"added channel, \"\n self.sc.get_channel_users.return_value = ['someID', 'otherID']\n self.assertTupleEqual(self.testcommand.handle(inputstring, user),\n (outputstring, 200))\n self.sc.get_channel_users.assert_called_once_with(\"channelID\")\n self.db.retrieve.assert_called_with(User, 'otherID')\n self.gh.add_team_member.assert_called()\n inputstring += \" --lead 'someID'\"\n outputstring += \"added lead\"\n self.gh.has_team_member.return_value = False\n print(self.testcommand.handle(inputstring, user))\n self.assertTupleEqual(self.testcommand.handle(inputstring, user),\n (outputstring, 200))\n self.db.store.assert_called()", "def team_add(token_user):\n if not json_param_exists('name') or \\\n not json_param_exists('type'):\n abort(400, \"one or more required parameter is missing\")\n name = request.json['name']\n team_type = TeamType.query.filter_by(name=request.json['type']).first()\n if not team_type:\n abort(400, \"invalid team type\")\n\n if team_type.name == 'other_team':\n if not token_user.has_permission('team.create') and \\\n not token_user.has_permission('team.create.elevated'):\n abort(403, 'team creation is not permitted')\n else: # creating any team other than 'other_team' requires elevated\n if not token_user.has_permission('team.create.elevated'):\n abort(403, 'insufficient permissions to create a team of this type')\n\n team = Team(name=name)\n team.team_type = team_type\n\n try:\n get_db().add(team)\n get_db().commit()\n except IntegrityError:\n abort(409, 'team name is already in use')\n\n return '', 201", "def test_teams_list(self):\n pass", "def test_returns_200_if_user_org_manager(self):\n # Arrange\n self.test_user.role = UserRole.MAPPER.value # Make sure user role is Mapper\n self.test_user.save()\n add_manager_to_organisation(self.test_project.organisation, self.test_user)\n # Act\n response = self.client.get(\n self.url, headers={\"Authorization\": self.user_session_token}\n )\n # Assert\n self.assertEqual(response.status_code, 200)\n TestGetProjectsRestAPI.assert_project_response(\n response.json, self.test_project, assert_type=\"notasks\"\n )", "def test_get_teams(self):\n pass", "def test_get_teams(self):\n pass", "def test_update_team_state(self):\n pass", "def test_handle_create_no_team_lookup_error(self):\r\n self.mock_facade.query.return_value = []\r\n self.assertTupleEqual(\r\n self.testcommand.handle(\"project create repo-link team-name\",\r\n user),\r\n (\"0 teams found with GitHub team name team-name\", 200))", "def test_handle_assign_assign_error(self):\r\n self.mock_facade.retrieve.return_value = Project(\"GTID\", [])\r\n team = Team(\"GTID\", \"team-name\", \"display-name\")\r\n team.team_leads.add(user)\r\n self.mock_facade.query.return_value = [team]\r\n self.assertTupleEqual(\r\n self.testcommand.handle(\"project assign ID team-name\",\r\n user),\r\n (self.testcommand.assigned_error, 200))" ]
[ "0.80295914", "0.78750473", "0.786846", "0.7462127", "0.72877383", "0.72226095", "0.7068232", "0.6994096", "0.696774", "0.6940582", "0.6848767", "0.6813016", "0.6809541", "0.6751459", "0.6727695", "0.67230606", "0.66943234", "0.6686662", "0.6621173", "0.6589685", "0.6564817", "0.6516438", "0.64684075", "0.6465651", "0.64553314", "0.64287436", "0.64287436", "0.64216435", "0.6388669", "0.6367974" ]
0.9565149
0
Test case for add_trusted_project
def test_add_trusted_project(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_add_trusted_project1(self):\n pass", "def test_add_trusted_project3(self):\n pass", "def test_add_trusted_project4(self):\n pass", "def test_add_trusted_project2(self):\n pass", "def test_add_trusted_project7(self):\n pass", "def test_add_trusted_project6(self):\n pass", "def test_add_trusted_project5(self):\n pass", "def test_add_project(self):\n pass", "def test_remove_trusted_project(self):\n pass", "def test_remove_trusted_project1(self):\n pass", "def test_remove_trusted_project2(self):\n pass", "def test_remove_trusted_project3(self):\n pass", "def test_remove_trusted_project4(self):\n pass", "def test_create_project_request(self):\n pass", "def test_add_project_member(self):\n pass", "def test_remove_trusted_project5(self):\n pass", "def test_create_project(self):\n pass", "def test_create_project(self):\n pass", "def test_create_project(self):\n pass", "def test_remove_trusted_project7(self):\n pass", "def test_remove_trusted_project6(self):\n pass", "def test_add_submission_service_to_project(self):\n pass", "def test_create_project_target_enabled(self):\n self.assertEqual(Project.objects.count(), 2)\n url = reverse('projectroles:api_project_create')\n post_data = {\n 'title': NEW_PROJECT_TITLE,\n 'type': PROJECT_TYPE_PROJECT,\n 'parent': str(self.category.sodar_uuid),\n 'description': 'description',\n 'readme': 'readme',\n 'public_guest_access': False,\n 'owner': str(self.user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 201, msg=response.content)\n self.assertEqual(Project.objects.count(), 3)", "def test_patch_project(self):\n pass", "def test_get_project(self):\n pass", "def test_replace_project(self):\n pass", "def test_project_creation(self):\n title = 'Project title'\n code = 'SCW-12345'\n project = self.create_project(\n title=title,\n code=code,\n institution=self.institution,\n tech_lead=self.project_owner,\n category=self.category,\n funding_source=self.funding_source,\n )\n self.assertTrue(isinstance(project, Project))\n self.assertEqual(project.__str__(), code + ' - ' + title)\n self.assertEqual(project.status, Project.AWAITING_APPROVAL)\n self.assertEqual(project.title, title)\n self.assertEqual(project.code, code)\n self.assertTrue(project.awaiting_approval())", "def test_create_project_target_disabled(self):\n self.assertEqual(Project.objects.count(), 2)\n url = reverse('projectroles:api_project_create')\n post_data = {\n 'title': NEW_PROJECT_TITLE,\n 'type': PROJECT_TYPE_PROJECT,\n 'parent': str(self.category.sodar_uuid),\n 'description': 'description',\n 'readme': 'readme',\n 'public_guest_access': False,\n 'owner': str(self.user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 403, msg=response.content)\n self.assertEqual(Project.objects.count(), 2)", "def test_update_project(self):\n pass", "def test_update_project(self):\n pass" ]
[ "0.9206295", "0.9119349", "0.91069734", "0.91037977", "0.905265", "0.89352095", "0.89024603", "0.77335167", "0.7688007", "0.7375554", "0.7316886", "0.73087144", "0.72394705", "0.71377397", "0.7135141", "0.71177024", "0.71154004", "0.71154004", "0.71154004", "0.7090552", "0.7040931", "0.6838585", "0.6543693", "0.65070045", "0.64818627", "0.63962567", "0.62683785", "0.62496704", "0.6186432", "0.6186432" ]
0.95238775
0
Test case for add_trusted_project1
def test_add_trusted_project1(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_add_trusted_project3(self):\n pass", "def test_add_trusted_project2(self):\n pass", "def test_add_trusted_project4(self):\n pass", "def test_add_trusted_project7(self):\n pass", "def test_add_trusted_project6(self):\n pass", "def test_add_trusted_project(self):\n pass", "def test_add_trusted_project5(self):\n pass", "def test_remove_trusted_project1(self):\n pass", "def test_remove_trusted_project3(self):\n pass", "def test_remove_trusted_project2(self):\n pass", "def test_remove_trusted_project4(self):\n pass", "def test_remove_trusted_project(self):\n pass", "def test_remove_trusted_project5(self):\n pass", "def test_remove_trusted_project6(self):\n pass", "def test_remove_trusted_project7(self):\n pass", "def test_add_project(self):\n pass", "def test_add_project_member(self):\n pass", "def test_create_project_request(self):\n pass", "def test_add_submission_service_to_project(self):\n pass", "def test_create_project(self):\n pass", "def test_create_project(self):\n pass", "def test_create_project(self):\n pass", "def test_create_project_target_enabled(self):\n self.assertEqual(Project.objects.count(), 2)\n url = reverse('projectroles:api_project_create')\n post_data = {\n 'title': NEW_PROJECT_TITLE,\n 'type': PROJECT_TYPE_PROJECT,\n 'parent': str(self.category.sodar_uuid),\n 'description': 'description',\n 'readme': 'readme',\n 'public_guest_access': False,\n 'owner': str(self.user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 201, msg=response.content)\n self.assertEqual(Project.objects.count(), 3)", "def test_patch_project(self):\n pass", "def test_new_project_existing_project_new_user(self):\n setup_identity_cache()\n\n # create signup#1 - project1 with user 1\n url = \"/v1/actions/CreateProjectAndUser\"\n data = {\"project_name\": \"test_project\", \"email\": \"[email protected]\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n\n # Create signup#2 - project1 with user 2\n data = {\"project_name\": \"test_project\", \"email\": \"[email protected]\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n\n headers = {\n \"project_name\": \"admin_project\",\n \"project_id\": \"admin_project_id\",\n \"roles\": \"admin,member\",\n \"username\": \"admin\",\n \"user_id\": \"admin_id\",\n \"authenticated\": True,\n }\n # approve signup #1\n new_task1 = Task.objects.all()[0]\n url = \"/v1/tasks/\" + new_task1.uuid\n response = self.client.post(\n url, {\"approved\": True}, format=\"json\", headers=headers\n )\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.json(), {\"notes\": [\"created token\"]})\n\n # Attempt to approve signup #2\n new_task2 = Task.objects.all()[1]\n url = \"/v1/tasks/\" + new_task2.uuid\n response = self.client.post(\n url, {\"approved\": True}, format=\"json\", headers=headers\n )\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.json(), {\"errors\": [\"actions invalid\"]})", "def test_create_project_target_disabled(self):\n self.assertEqual(Project.objects.count(), 2)\n url = reverse('projectroles:api_project_create')\n post_data = {\n 'title': NEW_PROJECT_TITLE,\n 'type': PROJECT_TYPE_PROJECT,\n 'parent': str(self.category.sodar_uuid),\n 'description': 'description',\n 'readme': 'readme',\n 'public_guest_access': False,\n 'owner': str(self.user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 403, msg=response.content)\n self.assertEqual(Project.objects.count(), 2)", "def test_get_project(self):\n pass", "def test_replace_project(self):\n pass", "def test_registered_user_can_create_project(self):\n user = self._create_user({\"username\":\"user2\",\"email\":\"[email protected]\"})\n testproject = self._create_comicsite_in_admin(user,\"user1project\") \n testpage1 = create_page_in_admin(testproject,\"testpage1\")\n testpage2 = create_page_in_admin(testproject,\"testpage2\")\n \n self._test_page_can_be_viewed(user,testpage1)\n self._test_page_can_be_viewed(self.root,testpage1)", "def test_add_role_to_project_member(self):\n pass" ]
[ "0.9311907", "0.92969507", "0.92683226", "0.91964453", "0.9178843", "0.9174212", "0.9161285", "0.7790664", "0.7617954", "0.76131535", "0.7518393", "0.7481766", "0.74765563", "0.74038017", "0.73679364", "0.7182775", "0.67843926", "0.670129", "0.6545574", "0.6509795", "0.6509795", "0.6509795", "0.6263747", "0.60647535", "0.6051312", "0.6034336", "0.59757495", "0.5968817", "0.5900419", "0.5890914" ]
0.952116
0
Test case for add_trusted_project2
def test_add_trusted_project2(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_add_trusted_project1(self):\n pass", "def test_add_trusted_project3(self):\n pass", "def test_add_trusted_project4(self):\n pass", "def test_add_trusted_project6(self):\n pass", "def test_add_trusted_project7(self):\n pass", "def test_add_trusted_project(self):\n pass", "def test_add_trusted_project5(self):\n pass", "def test_remove_trusted_project2(self):\n pass", "def test_remove_trusted_project1(self):\n pass", "def test_remove_trusted_project3(self):\n pass", "def test_remove_trusted_project4(self):\n pass", "def test_remove_trusted_project(self):\n pass", "def test_remove_trusted_project5(self):\n pass", "def test_remove_trusted_project6(self):\n pass", "def test_remove_trusted_project7(self):\n pass", "def test_add_project(self):\n pass", "def test_add_project_member(self):\n pass", "def test_create_project_request(self):\n pass", "def test_create_project(self):\n pass", "def test_create_project(self):\n pass", "def test_create_project(self):\n pass", "def test_add_submission_service_to_project(self):\n pass", "def test_create_project_target_enabled(self):\n self.assertEqual(Project.objects.count(), 2)\n url = reverse('projectroles:api_project_create')\n post_data = {\n 'title': NEW_PROJECT_TITLE,\n 'type': PROJECT_TYPE_PROJECT,\n 'parent': str(self.category.sodar_uuid),\n 'description': 'description',\n 'readme': 'readme',\n 'public_guest_access': False,\n 'owner': str(self.user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 201, msg=response.content)\n self.assertEqual(Project.objects.count(), 3)", "def test_create_project_target_disabled(self):\n self.assertEqual(Project.objects.count(), 2)\n url = reverse('projectroles:api_project_create')\n post_data = {\n 'title': NEW_PROJECT_TITLE,\n 'type': PROJECT_TYPE_PROJECT,\n 'parent': str(self.category.sodar_uuid),\n 'description': 'description',\n 'readme': 'readme',\n 'public_guest_access': False,\n 'owner': str(self.user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 403, msg=response.content)\n self.assertEqual(Project.objects.count(), 2)", "def test_patch_project(self):\n pass", "def test_new_project_existing_project_new_user(self):\n setup_identity_cache()\n\n # create signup#1 - project1 with user 1\n url = \"/v1/actions/CreateProjectAndUser\"\n data = {\"project_name\": \"test_project\", \"email\": \"[email protected]\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n\n # Create signup#2 - project1 with user 2\n data = {\"project_name\": \"test_project\", \"email\": \"[email protected]\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n\n headers = {\n \"project_name\": \"admin_project\",\n \"project_id\": \"admin_project_id\",\n \"roles\": \"admin,member\",\n \"username\": \"admin\",\n \"user_id\": \"admin_id\",\n \"authenticated\": True,\n }\n # approve signup #1\n new_task1 = Task.objects.all()[0]\n url = \"/v1/tasks/\" + new_task1.uuid\n response = self.client.post(\n url, {\"approved\": True}, format=\"json\", headers=headers\n )\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.json(), {\"notes\": [\"created token\"]})\n\n # Attempt to approve signup #2\n new_task2 = Task.objects.all()[1]\n url = \"/v1/tasks/\" + new_task2.uuid\n response = self.client.post(\n url, {\"approved\": True}, format=\"json\", headers=headers\n )\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.json(), {\"errors\": [\"actions invalid\"]})", "def test_replace_project(self):\n pass", "def test_get_project(self):\n pass", "def test_registered_user_can_create_project(self):\n user = self._create_user({\"username\":\"user2\",\"email\":\"[email protected]\"})\n testproject = self._create_comicsite_in_admin(user,\"user1project\") \n testpage1 = create_page_in_admin(testproject,\"testpage1\")\n testpage2 = create_page_in_admin(testproject,\"testpage2\")\n \n self._test_page_can_be_viewed(user,testpage1)\n self._test_page_can_be_viewed(self.root,testpage1)", "def test_add_role_to_project_member(self):\n pass" ]
[ "0.93524945", "0.91748667", "0.9085901", "0.90064704", "0.89825124", "0.89697266", "0.8951871", "0.7865752", "0.77204573", "0.7566949", "0.7415315", "0.7366751", "0.7352189", "0.7314308", "0.72421914", "0.70771676", "0.67682683", "0.66284674", "0.6467487", "0.6467487", "0.6467487", "0.6345986", "0.6318252", "0.62060624", "0.6117776", "0.6101255", "0.6070521", "0.6025127", "0.6004796", "0.59860563" ]
0.9483128
0
Test case for add_trusted_project3
def test_add_trusted_project3(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_add_trusted_project4(self):\n pass", "def test_add_trusted_project1(self):\n pass", "def test_add_trusted_project7(self):\n pass", "def test_add_trusted_project6(self):\n pass", "def test_add_trusted_project5(self):\n pass", "def test_add_trusted_project2(self):\n pass", "def test_add_trusted_project(self):\n pass", "def test_remove_trusted_project3(self):\n pass", "def test_remove_trusted_project4(self):\n pass", "def test_remove_trusted_project1(self):\n pass", "def test_remove_trusted_project5(self):\n pass", "def test_remove_trusted_project2(self):\n pass", "def test_remove_trusted_project6(self):\n pass", "def test_remove_trusted_project7(self):\n pass", "def test_remove_trusted_project(self):\n pass", "def test_add_project(self):\n pass", "def test_add_project_member(self):\n pass", "def test_create_project_request(self):\n pass", "def test_create_project(self):\n pass", "def test_create_project(self):\n pass", "def test_create_project(self):\n pass", "def test_add_submission_service_to_project(self):\n pass", "def test_create_project_target_enabled(self):\n self.assertEqual(Project.objects.count(), 2)\n url = reverse('projectroles:api_project_create')\n post_data = {\n 'title': NEW_PROJECT_TITLE,\n 'type': PROJECT_TYPE_PROJECT,\n 'parent': str(self.category.sodar_uuid),\n 'description': 'description',\n 'readme': 'readme',\n 'public_guest_access': False,\n 'owner': str(self.user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 201, msg=response.content)\n self.assertEqual(Project.objects.count(), 3)", "def test_patch_project(self):\n pass", "def test_create_project_target_disabled(self):\n self.assertEqual(Project.objects.count(), 2)\n url = reverse('projectroles:api_project_create')\n post_data = {\n 'title': NEW_PROJECT_TITLE,\n 'type': PROJECT_TYPE_PROJECT,\n 'parent': str(self.category.sodar_uuid),\n 'description': 'description',\n 'readme': 'readme',\n 'public_guest_access': False,\n 'owner': str(self.user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 403, msg=response.content)\n self.assertEqual(Project.objects.count(), 2)", "def test_get_project(self):\n pass", "def test_replace_project(self):\n pass", "def test_add_role_to_project_member(self):\n pass", "def test_update_project(self):\n pass", "def test_update_project(self):\n pass" ]
[ "0.9225511", "0.9183113", "0.91391516", "0.9127116", "0.90698624", "0.90442437", "0.89806354", "0.794116", "0.7600252", "0.75882703", "0.7511307", "0.7496523", "0.7474205", "0.7434741", "0.7419216", "0.7074395", "0.67137897", "0.6623791", "0.64187926", "0.64187926", "0.64187926", "0.63423043", "0.6285568", "0.60483027", "0.60114306", "0.5971926", "0.5892267", "0.58888113", "0.5884186", "0.5884186" ]
0.95350647
0
Test case for add_trusted_project4
def test_add_trusted_project4(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_add_trusted_project3(self):\n pass", "def test_add_trusted_project7(self):\n pass", "def test_add_trusted_project6(self):\n pass", "def test_add_trusted_project1(self):\n pass", "def test_add_trusted_project5(self):\n pass", "def test_add_trusted_project(self):\n pass", "def test_add_trusted_project2(self):\n pass", "def test_remove_trusted_project4(self):\n pass", "def test_remove_trusted_project3(self):\n pass", "def test_remove_trusted_project5(self):\n pass", "def test_remove_trusted_project1(self):\n pass", "def test_remove_trusted_project6(self):\n pass", "def test_remove_trusted_project7(self):\n pass", "def test_remove_trusted_project(self):\n pass", "def test_remove_trusted_project2(self):\n pass", "def test_add_project(self):\n pass", "def test_add_project_member(self):\n pass", "def test_create_project_request(self):\n pass", "def test_add_submission_service_to_project(self):\n pass", "def test_create_project(self):\n pass", "def test_create_project(self):\n pass", "def test_create_project(self):\n pass", "def test_create_project_target_enabled(self):\n self.assertEqual(Project.objects.count(), 2)\n url = reverse('projectroles:api_project_create')\n post_data = {\n 'title': NEW_PROJECT_TITLE,\n 'type': PROJECT_TYPE_PROJECT,\n 'parent': str(self.category.sodar_uuid),\n 'description': 'description',\n 'readme': 'readme',\n 'public_guest_access': False,\n 'owner': str(self.user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 201, msg=response.content)\n self.assertEqual(Project.objects.count(), 3)", "def test_patch_project(self):\n pass", "def test_get_project(self):\n pass", "def test_replace_project(self):\n pass", "def test_create_project_target_disabled(self):\n self.assertEqual(Project.objects.count(), 2)\n url = reverse('projectroles:api_project_create')\n post_data = {\n 'title': NEW_PROJECT_TITLE,\n 'type': PROJECT_TYPE_PROJECT,\n 'parent': str(self.category.sodar_uuid),\n 'description': 'description',\n 'readme': 'readme',\n 'public_guest_access': False,\n 'owner': str(self.user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 403, msg=response.content)\n self.assertEqual(Project.objects.count(), 2)", "def test_add_role_to_project_member(self):\n pass", "def test_update_project(self):\n pass", "def test_update_project(self):\n pass" ]
[ "0.9321345", "0.9283545", "0.9263834", "0.92287755", "0.9220919", "0.9139585", "0.91044164", "0.7866021", "0.7758492", "0.76730657", "0.76374555", "0.76202387", "0.7591135", "0.7579618", "0.755832", "0.71346456", "0.6817681", "0.6675366", "0.66077816", "0.6471094", "0.6471094", "0.6471094", "0.6209337", "0.61482567", "0.6059523", "0.6029146", "0.59504557", "0.5949568", "0.5900302", "0.5900302" ]
0.94952035
0
Test case for add_trusted_project5
def test_add_trusted_project5(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_add_trusted_project4(self):\n pass", "def test_add_trusted_project6(self):\n pass", "def test_add_trusted_project7(self):\n pass", "def test_add_trusted_project3(self):\n pass", "def test_add_trusted_project1(self):\n pass", "def test_add_trusted_project(self):\n pass", "def test_add_trusted_project2(self):\n pass", "def test_remove_trusted_project5(self):\n pass", "def test_remove_trusted_project3(self):\n pass", "def test_remove_trusted_project4(self):\n pass", "def test_remove_trusted_project6(self):\n pass", "def test_remove_trusted_project1(self):\n pass", "def test_remove_trusted_project7(self):\n pass", "def test_remove_trusted_project2(self):\n pass", "def test_remove_trusted_project(self):\n pass", "def test_add_project(self):\n pass", "def test_add_project_member(self):\n pass", "def test_create_project_request(self):\n pass", "def test_add_submission_service_to_project(self):\n pass", "def test_create_project(self):\n pass", "def test_create_project(self):\n pass", "def test_create_project(self):\n pass", "def test_create_project_target_enabled(self):\n self.assertEqual(Project.objects.count(), 2)\n url = reverse('projectroles:api_project_create')\n post_data = {\n 'title': NEW_PROJECT_TITLE,\n 'type': PROJECT_TYPE_PROJECT,\n 'parent': str(self.category.sodar_uuid),\n 'description': 'description',\n 'readme': 'readme',\n 'public_guest_access': False,\n 'owner': str(self.user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 201, msg=response.content)\n self.assertEqual(Project.objects.count(), 3)", "def test_patch_project(self):\n pass", "def test_create_project_target_disabled(self):\n self.assertEqual(Project.objects.count(), 2)\n url = reverse('projectroles:api_project_create')\n post_data = {\n 'title': NEW_PROJECT_TITLE,\n 'type': PROJECT_TYPE_PROJECT,\n 'parent': str(self.category.sodar_uuid),\n 'description': 'description',\n 'readme': 'readme',\n 'public_guest_access': False,\n 'owner': str(self.user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 403, msg=response.content)\n self.assertEqual(Project.objects.count(), 2)", "def test_get_project(self):\n pass", "def test_replace_project(self):\n pass", "def test_add_role_to_project_member(self):\n pass", "def test_update_project(self):\n pass", "def test_update_project(self):\n pass" ]
[ "0.93548214", "0.93354976", "0.9304582", "0.9299463", "0.9238341", "0.90903604", "0.9074475", "0.79082066", "0.77504534", "0.7750323", "0.77044415", "0.7654292", "0.76301885", "0.75398386", "0.75297606", "0.7008189", "0.6709826", "0.65403694", "0.6507049", "0.63173455", "0.63173455", "0.63173455", "0.6092695", "0.6036938", "0.5878462", "0.58645284", "0.5854738", "0.58314735", "0.5760265", "0.5760265" ]
0.94613916
0
Test case for add_trusted_project6
def test_add_trusted_project6(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_add_trusted_project7(self):\n pass", "def test_add_trusted_project4(self):\n pass", "def test_add_trusted_project3(self):\n pass", "def test_add_trusted_project5(self):\n pass", "def test_add_trusted_project1(self):\n pass", "def test_add_trusted_project2(self):\n pass", "def test_add_trusted_project(self):\n pass", "def test_remove_trusted_project6(self):\n pass", "def test_remove_trusted_project5(self):\n pass", "def test_remove_trusted_project3(self):\n pass", "def test_remove_trusted_project7(self):\n pass", "def test_remove_trusted_project4(self):\n pass", "def test_remove_trusted_project1(self):\n pass", "def test_remove_trusted_project2(self):\n pass", "def test_remove_trusted_project(self):\n pass", "def test_add_project(self):\n pass", "def test_add_project_member(self):\n pass", "def test_add_submission_service_to_project(self):\n pass", "def test_create_project_request(self):\n pass", "def test_create_project(self):\n pass", "def test_create_project(self):\n pass", "def test_create_project(self):\n pass", "def test_patch_project(self):\n pass", "def test_create_project_target_enabled(self):\n self.assertEqual(Project.objects.count(), 2)\n url = reverse('projectroles:api_project_create')\n post_data = {\n 'title': NEW_PROJECT_TITLE,\n 'type': PROJECT_TYPE_PROJECT,\n 'parent': str(self.category.sodar_uuid),\n 'description': 'description',\n 'readme': 'readme',\n 'public_guest_access': False,\n 'owner': str(self.user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 201, msg=response.content)\n self.assertEqual(Project.objects.count(), 3)", "def test_replace_project(self):\n pass", "def test_add_role_to_project_member(self):\n pass", "def test_get_project(self):\n pass", "def test_update_project(self):\n pass", "def test_update_project(self):\n pass", "def test_000_add_user(self):\n # This case is always passed because it's tested in setup_module,\n # If setup module fails, this case will never run\n pass" ]
[ "0.9414184", "0.93354887", "0.92838407", "0.92823344", "0.918894", "0.90624726", "0.9032232", "0.79031926", "0.77913874", "0.7782476", "0.77823997", "0.7780702", "0.7660113", "0.75795764", "0.75311136", "0.7077016", "0.6871449", "0.66426206", "0.6561147", "0.6376654", "0.6376654", "0.6376654", "0.6240475", "0.6132141", "0.6044225", "0.5972813", "0.59562683", "0.5954621", "0.5954621", "0.590457" ]
0.94977355
0
Test case for add_trusted_project7
def test_add_trusted_project7(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_add_trusted_project6(self):\n pass", "def test_add_trusted_project4(self):\n pass", "def test_add_trusted_project3(self):\n pass", "def test_add_trusted_project5(self):\n pass", "def test_add_trusted_project1(self):\n pass", "def test_add_trusted_project(self):\n pass", "def test_add_trusted_project2(self):\n pass", "def test_remove_trusted_project7(self):\n pass", "def test_remove_trusted_project3(self):\n pass", "def test_remove_trusted_project6(self):\n pass", "def test_remove_trusted_project4(self):\n pass", "def test_remove_trusted_project5(self):\n pass", "def test_remove_trusted_project1(self):\n pass", "def test_remove_trusted_project(self):\n pass", "def test_remove_trusted_project2(self):\n pass", "def test_add_project(self):\n pass", "def test_add_project_member(self):\n pass", "def test_add_submission_service_to_project(self):\n pass", "def test_create_project_request(self):\n pass", "def test_create_project(self):\n pass", "def test_create_project(self):\n pass", "def test_create_project(self):\n pass", "def test_patch_project(self):\n pass", "def test_create_project_target_enabled(self):\n self.assertEqual(Project.objects.count(), 2)\n url = reverse('projectroles:api_project_create')\n post_data = {\n 'title': NEW_PROJECT_TITLE,\n 'type': PROJECT_TYPE_PROJECT,\n 'parent': str(self.category.sodar_uuid),\n 'description': 'description',\n 'readme': 'readme',\n 'public_guest_access': False,\n 'owner': str(self.user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 201, msg=response.content)\n self.assertEqual(Project.objects.count(), 3)", "def test_replace_project(self):\n pass", "def test_create_project_target_disabled(self):\n self.assertEqual(Project.objects.count(), 2)\n url = reverse('projectroles:api_project_create')\n post_data = {\n 'title': NEW_PROJECT_TITLE,\n 'type': PROJECT_TYPE_PROJECT,\n 'parent': str(self.category.sodar_uuid),\n 'description': 'description',\n 'readme': 'readme',\n 'public_guest_access': False,\n 'owner': str(self.user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 403, msg=response.content)\n self.assertEqual(Project.objects.count(), 2)", "def test_update_project(self):\n pass", "def test_update_project(self):\n pass", "def test_get_project(self):\n pass", "def test_add_role_to_project_member(self):\n pass" ]
[ "0.9328864", "0.9296538", "0.92676723", "0.918818", "0.91760397", "0.9110821", "0.90390265", "0.7747253", "0.7673277", "0.76582974", "0.7655529", "0.76154494", "0.7546863", "0.75040823", "0.74613756", "0.70946646", "0.68021613", "0.6726306", "0.65829325", "0.6375963", "0.6375963", "0.6375963", "0.6149214", "0.61490226", "0.5951143", "0.59361756", "0.5897513", "0.5897513", "0.58941966", "0.5893867" ]
0.94752955
0
Test case for allocate_virtualization_realm
def test_allocate_virtualization_realm(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_virtualization_realm(self):\n pass", "def test_register_virtualization_realm(self):\n pass", "def test_set_virtualization_realm_active(self):\n pass", "def test_get_virtualization_realm_resources(self):\n pass", "def test_update_virtualization_realm(self):\n pass", "def test_get_virtualization_realms(self):\n pass", "def test_determine_valid_virtualization_realms(self):\n pass", "def test_get_deployment_runs_in_virtualization_realm(self):\n pass", "def test_deallocate_virt_realm(self):\n pass", "def test_list_virtualization_realm_templates(self):\n pass", "def test_set_project_default_virtualization_realm(self):\n pass", "def test_get_templates_in_virtualization_realm(self):\n pass", "def test_update_virt_realm(self):\n pass", "def test_create_virtual_account(self):\n pass", "def test_enable_virt_realm_remote_access(self):\n pass", "def test_01_dedicated_cluster_allocation(self):\n\n # Step 1\n dedicateCmd = dedicateCluster.dedicateClusterCmd()\n dedicateCmd.clusterid = self.clusters[0].id\n dedicateCmd.domainid = self.domain.id\n dedicateCmd.account = self.account_1.name\n self.apiclient.dedicateCluster(dedicateCmd)\n\n afcmd = listAffinityGroups.listAffinityGroupsCmd()\n afcmd.account = self.account_1.name\n afcmd.domainid = self.account_1.domainid\n affinitygr_list = self.apiclient.listAffinityGroups(afcmd)\n\n # Step 2\n self.vm = VirtualMachine.create(\n self.userapiclient_1,\n self.testdata[\"small\"],\n templateid=self.template.id,\n accountid=self.account_1.name,\n domainid=self.account_1.domainid,\n serviceofferingid=self.service_offering.id,\n affinitygroupids=[affinitygr_list[0].id],\n zoneid=self.zone.id,\n mode=self.zone.networktype\n )\n # Steps to verify if VM is created on dedicated account\n vmlist = list_virtual_machines(self.apiclient,\n id=self.vm.id)\n\n hostlist = list_hosts(self.apiclient,\n id=vmlist[0].hostid)\n\n self.assertEqual(hostlist[0].clusterid,\n self.clusters[0].id,\n \"check if vm gets deployed on dedicated clusture\"\n )\n # Step 3\n self.vm_1 = VirtualMachine.create(\n self.userapiclient_2,\n self.testdata[\"small\"],\n templateid=self.template.id,\n accountid=self.account_2.name,\n domainid=self.account_2.domainid,\n serviceofferingid=self.service_offering.id,\n zoneid=self.zone.id,\n mode=self.zone.networktype\n )\n\n # Steps to verify if VM is created on dedicated account\n vmlist_1 = list_virtual_machines(self.apiclient,\n id=self.vm_1.id)\n\n hostlist_1 = list_hosts(self.apiclient,\n id=vmlist_1[0].hostid)\n\n self.assertNotEqual(hostlist_1[0].clusterid,\n self.clusters[0].id,\n \"check if vm gets deployed on correct clusture\"\n )\n\n # Step 4\n routerList = list_routers(self.apiclient,\n clusterid=self.clusters[0].id,\n networkid=self.vm_1.nic[0].networkid\n )\n self.assertEqual(\n routerList,\n None,\n \"Check Dedicated cluster is used for virtual routers \\\n that belong to non-dedicated account\")\n\n return", "def test_remove_virt_realm(self):\n pass", "def _create_vm(self):\n self._create_instance_in_the_db()\n self.type_data = db.instance_type_get_by_name(None, 'm1.large')\n self.conn.spawn(self.context, self.instance, self.network_info)\n self._check_vm_record()", "def test_create_virtual_account_transfer(self):\n pass", "def test_get_valid_networks_for_virtualization_realm(self):\n pass", "def test_aws_service_api_vm_management_get(self):\n pass", "def create_vm(self):\n\t\treturn handle_to_object(call_sdk_function('PrlSrv_CreateVm', self.handle))", "def test_get_team_owned_or_managed_virtualization_realms(self):\n pass", "def test_invalidate_template_cache_in_virtualization_realm(self):\n pass", "def test_create_hyperflex_vcenter_config_policy(self):\n pass", "def virtual_memory():\n mem = cext.virtual_mem()\n totphys, availphys, totsys, availsys = mem\n #\n total = totphys\n avail = availphys\n free = availphys\n used = total - avail\n percent = usage_percent((total - avail), total, round_=1)\n return svmem(total, avail, percent, used, free)", "def pre_virtual_machine_create(self, resource_dict):\n pass", "def test_get_virtual_accounts(self):\n pass", "def test_verify_enterprise_reinit(self):\n proj_obj, fabric_obj, pr_obj = self._create_prerequisites()\n esi_id = '00:11:22:33:44:55:66:77:88:99'\n vlan_1 = 42\n\n esi_id = '00:11:22:33:44:55:66:77:88:99'\n vlan_1 = 42\n vlan_2 = '4094'\n vlan_3 = 4093\n pi_name = self.id() + '_physical_interface1'\n pi = PhysicalInterface(name=pi_name,\n parent_obj=pr_obj,\n ethernet_segment_identifier=esi_id)\n pi_uuid = self._vnc_lib.physical_interface_create(pi)\n pi_obj = self._vnc_lib.physical_interface_read(id=pi_uuid)\n\n fabric_name = fabric_obj.get_fq_name()\n pi_fq_name = pi_obj.get_fq_name()\n\n # Create VPG\n vpg_name = \"vpg-1\"\n vpg = VirtualPortGroup(vpg_name, parent_obj=fabric_obj)\n vpg_uuid = self.api.virtual_port_group_create(vpg)\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_uuid)\n vpg_name = vpg_obj.get_fq_name()\n\n # Create single VN\n vn1 = VirtualNetwork('vn1-%s' % (self.id()), parent_obj=proj_obj)\n self.api.virtual_network_create(vn1)\n\n # Create a VMI that's attached to vpg-1 and having reference\n # to vn1\n vmi_obj_1 = VirtualMachineInterface(self.id() + \"1\",\n parent_obj=proj_obj)\n vmi_obj_1.set_virtual_network(vn1)\n\n # Create KV_Pairs for this VMI\n kv_pairs = self._create_kv_pairs(pi_fq_name,\n fabric_name,\n vpg_name)\n\n vmi_obj_1.set_virtual_machine_interface_bindings(kv_pairs)\n\n vmi_obj_1.set_virtual_machine_interface_properties(\n VirtualMachineInterfacePropertiesType(\n sub_interface_vlan_tag=vlan_1))\n vmi_uuid_1 = self.api.virtual_machine_interface_create(vmi_obj_1)\n vpg_obj.add_virtual_machine_interface(vmi_obj_1)\n self.api.virtual_port_group_update(vpg_obj)\n\n mock_zk = self._api_server._db_conn._zk_db\n # Verify if Znode are created for VMI1\n tagged_validation_node1 = os.path.join(\n _DEFAULT_ZK_FABRIC_ENTERPRISE_PATH_PREFIX,\n 'virtual-port-group:%s' % vpg_obj.uuid,\n 'virtual-network:%s' % vn1.uuid)\n znode_vlan_1_id = mock_zk._zk_client.read_node(\n tagged_validation_node1)\n validation_node1 = os.path.join(\n _DEFAULT_ZK_FABRIC_ENTERPRISE_PATH_PREFIX,\n 'virtual-port-group:%s' % vpg_obj.uuid,\n 'vlan:%s' % znode_vlan_1_id)\n\n # Read Znode\n znode_vmi_1_uuid = mock_zk._zk_client.read_node(validation_node1)\n # Verify if correct Znodes are created\n assert znode_vmi_1_uuid == vmi_uuid_1, \\\n \"Znode for VMI_1 (%s) doesn't exist\" % vmi_uuid_1\n\n # Attach Second VMI with untagged vlan\n vn2 = VirtualNetwork('vn2-%s' % (self.id()), parent_obj=proj_obj)\n self.api.virtual_network_create(vn2)\n\n # Create first untagged VMI and attach it to Virtual Port Group\n vmi_obj_2 = VirtualMachineInterface(self.id() + \"2\",\n parent_obj=proj_obj)\n vmi_obj_2.set_virtual_network(vn2)\n\n # Create KV_Pairs for this VMI with an untagged VLAN\n # If tor_port_vlan_id is set, then it signifies a untagged VMI\n kv_pairs = self._create_kv_pairs(pi_fq_name,\n fabric_name,\n vpg_name,\n tor_port_vlan_id=vlan_2)\n\n vmi_obj_2.set_virtual_machine_interface_bindings(kv_pairs)\n vmi_uuid_2 = self.api.virtual_machine_interface_create(vmi_obj_2)\n vpg_obj = self.api.virtual_port_group_read(id=vpg_obj.uuid)\n vpg_obj.add_virtual_machine_interface(vmi_obj_2)\n self.api.virtual_port_group_update(vpg_obj)\n\n validation_node2 = os.path.join(\n _DEFAULT_ZK_FABRIC_ENTERPRISE_PATH_PREFIX,\n 'virtual-port-group:%s' % vpg_obj.uuid,\n 'untagged')\n\n # Read Znode\n znode_vmi_2_uuid = mock_zk._zk_client.read_node(validation_node2)\n # Verify if correct Znodes are created\n assert znode_vmi_2_uuid == vmi_uuid_2, \\\n \"Znode for VMI_2 (%s) doesn't exist\" % vmi_uuid_2\n\n # Create another third VN with second tagged VMI\n vn3 = VirtualNetwork('vn3-%s' % (self.id()), parent_obj=proj_obj)\n self.api.virtual_network_create(vn3)\n\n # Create a VMI that's attached to vpg-1 and having reference\n # to vn3\n vmi_obj_3 = VirtualMachineInterface(self.id() + \"3\",\n parent_obj=proj_obj)\n vmi_obj_3.set_virtual_network(vn3)\n\n # Create KV_Pairs for this VMI\n kv_pairs = self._create_kv_pairs(pi_fq_name,\n fabric_name,\n vpg_name)\n\n vmi_obj_3.set_virtual_machine_interface_bindings(kv_pairs)\n\n vmi_obj_3.set_virtual_machine_interface_properties(\n VirtualMachineInterfacePropertiesType(\n sub_interface_vlan_tag=vlan_3))\n vmi_uuid_3 = self.api.virtual_machine_interface_create(vmi_obj_3)\n vpg_obj = self.api.virtual_port_group_read(id=vpg_obj.uuid)\n vpg_obj.add_virtual_machine_interface(vmi_obj_3)\n self.api.virtual_port_group_update(vpg_obj)\n vpg_obj = self.api.virtual_port_group_read(id=vpg_obj.uuid)\n\n tagged_validation_node3 = os.path.join(\n _DEFAULT_ZK_FABRIC_ENTERPRISE_PATH_PREFIX,\n 'virtual-port-group:%s' % vpg_obj.uuid,\n 'virtual-network:%s' % vn3.uuid)\n znode_vlan_3_id = mock_zk._zk_client.read_node(\n tagged_validation_node3)\n validation_node3 = os.path.join(\n _DEFAULT_ZK_FABRIC_ENTERPRISE_PATH_PREFIX,\n 'virtual-port-group:%s' % vpg_obj.uuid,\n 'vlan:%s' % znode_vlan_3_id)\n\n # Read Znode\n znode_vmi_3_uuid = mock_zk._zk_client.read_node(validation_node3)\n # Verify if correct Znodes are created\n assert znode_vmi_3_uuid == vmi_uuid_3, \\\n \"Znode for VMI_3 (%s) doesn't exist\" % vmi_uuid_3\n\n # Delete all Znodes for VMI1, VMI2, VMI3\n mock_zk._zk_client.delete_node(validation_node1, True)\n mock_zk._zk_client.delete_node(validation_node2, True)\n mock_zk._zk_client.delete_node(validation_node3, True)\n\n # manually setting contrail_version to 21.4\n # so db_resync is run as part of upgrade scenario\n self._api_server._args.contrail_version = '21.4'\n\n self._api_server._db_conn._db_resync_done.clear()\n # API server DB reinit\n self._api_server._db_init_entries()\n self._api_server._db_conn.wait_for_resync_done()\n\n # Verify if Znodes are added back\n znode_vmi_1_uuid = mock_zk._zk_client.read_node(validation_node1)\n znode_vmi_2_uuid = mock_zk._zk_client.read_node(validation_node2)\n znode_vmi_3_uuid = mock_zk._zk_client.read_node(validation_node3)\n\n # Verify if correct Znodes are created\n assert znode_vmi_1_uuid == vmi_uuid_1, \\\n \"Znode for VMI_1 (%s) doesn't exist\" % vmi_uuid_1\n assert znode_vmi_2_uuid == vmi_uuid_2, \\\n \"Znode for VMI_2 (%s) doesn't exist\" % vmi_uuid_2\n assert znode_vmi_3_uuid == vmi_uuid_3, \\\n \"Znode for VMI_3 (%s) doesn't exist\" % vmi_uuid_3\n\n # Delete VMIs from VPG\n self.api.virtual_machine_interface_delete(id=vmi_uuid_1)\n self.api.virtual_machine_interface_delete(id=vmi_uuid_2)\n self.api.virtual_machine_interface_delete(id=vmi_uuid_3)\n self.api.virtual_port_group_delete(id=vpg_obj.uuid)\n self.api.physical_interface_delete(id=pi_uuid)\n self.api.physical_router_delete(id=pr_obj.uuid)\n self.api.fabric_delete(id=fabric_obj.uuid)\n # adding back zknode to original version\n # so other test cases runs from the begining\n mock_zk._zk_client.update_node(PATH_SYNC, '2011')", "def test_create_virtual_account_beneficiary(self):\n pass" ]
[ "0.81738967", "0.81207174", "0.7674074", "0.7670276", "0.73524356", "0.7052354", "0.7019888", "0.6925424", "0.68759066", "0.66911906", "0.66594386", "0.6524497", "0.62494606", "0.61741275", "0.6134701", "0.5982555", "0.59148824", "0.5906454", "0.5885194", "0.5815983", "0.5792344", "0.57624984", "0.57461774", "0.5719141", "0.5692324", "0.5677442", "0.56714654", "0.56710345", "0.56687653", "0.56599766" ]
0.94568443
0
Test case for assign_managing_team
def test_assign_managing_team(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_create_team(self):\n pass", "def test_handle_force_assign(self):\r\n self.mock_facade.retrieve.return_value = Project(\"GTID\", [])\r\n team = Team(\"GTID\", \"team-name\", \"display-name\")\r\n team.team_leads.add(user)\r\n self.mock_facade.query.return_value = [team]\r\n self.assertTupleEqual(\r\n self.testcommand.handle(\"project assign ID team-name -f\",\r\n user),\r\n (\"Project successfully assigned!\", 200))", "def test_update_team(self):\n pass", "def test_handle_assign_assign_error(self):\r\n self.mock_facade.retrieve.return_value = Project(\"GTID\", [])\r\n team = Team(\"GTID\", \"team-name\", \"display-name\")\r\n team.team_leads.add(user)\r\n self.mock_facade.query.return_value = [team]\r\n self.assertTupleEqual(\r\n self.testcommand.handle(\"project assign ID team-name\",\r\n user),\r\n (self.testcommand.assigned_error, 200))", "def test_add_team_manager_to_team(self):\n pass", "def test_add_team_member(self):\n pass", "def test_unassign_managing_team(self):\n pass", "def test_teams_create(self):\n pass", "def test_handle_assign_as_team_lead(self):\r\n def facade_retrieve_side_effect(*args, **kwargs):\r\n \"\"\"Return a side effect for the mock facade.\"\"\"\r\n if args[0] == Project:\r\n return Project(\"\", [])\r\n else:\r\n calling_user = User(user)\r\n return calling_user\r\n self.mock_facade.retrieve.side_effect = facade_retrieve_side_effect\r\n team = Team(\"GTID\", \"team-name\", \"display-name\")\r\n team.team_leads.add(user)\r\n self.mock_facade.query.return_value = [team]\r\n self.assertTupleEqual(\r\n self.testcommand.handle(\"project assign ID team-name\",\r\n user),\r\n (\"Project successfully assigned!\", 200))", "def test_handle_assign_project_team_lookup_error(self):\r\n self.mock_facade.retrieve.return_value = Project(\"\", [])\r\n self.mock_facade.query.return_value = []\r\n self.assertTupleEqual(\r\n self.testcommand.handle(\"project assign ID team-name\",\r\n user),\r\n (\"0 teams found with GitHub team name team-name\", 200))", "def test_retrieve_team(self):\n pass", "def test_teams_invite_member(self):\n pass", "def test_teams_save_team_member_v1(self):\n pass", "def test_update_team_state(self):\n pass", "def test_createteam(self):\n p1, p2, p3 = self.create3persons()\n t = model.Team(name='Tigers', persons=[p1, p2, p3])\n id = t.store()\n t2 = model.Team(id=id)\n self.assertEqual(t.name, t2.name)\n self.assertEqual(t.persons, t2.persons)", "def test_teams_save_team_v1(self):\n pass", "def test_get_teams(self):\n pass", "def test_get_teams(self):\n pass", "def test_pre_fill_and_assign(self):\n users = []\n for i in range(1, 50):\n users.append(User.objects.create_user(username=\"u{0}\".format(i)))\n pre_fill.main([\"--managers\", \"--workshift\"])\n utils.make_workshift_pool_hours(semester=self.semester)\n # Assign manager shifts beforehand\n for user, manager in zip(users, Manager.objects.all()):\n manager.incumbent = UserProfile.objects.get(user=user)\n manager.save()\n unfinished = utils.auto_assign_shifts(self.semester)\n self.assertEqual([], unfinished)", "def test_teams_add_user_to_team_v2(self):\n pass", "def test_teams_list(self):\n pass", "def test_teams_partial_update(self):\n pass", "def _test_pre_fill_and_assign_humor(self):\n for i in range(1, 50):\n User.objects.create_user(username=\"u{0}\".format(i))\n pre_fill.main([\"--managers\", \"--workshift\"])\n utils.make_workshift_pool_hours(semester=self.semester)\n # Assign manager shifts beforehand\n manager_shifts = RegularWorkshift.objects.filter(\n pool=self.p1, workshift_type__auto_assign=False,\n )\n profiles = WorkshiftProfile.objects.all()\n for profile, shift in zip(profiles, manager_shifts):\n shift.current_assignees.add(profile)\n shift.save()\n unfinished = utils.auto_assign_shifts(\n self.semester, pool=WorkshiftPool.objects.get(title=\"Humor Shift\")\n )\n self.assertEqual([], unfinished)", "def assign_team(self, nickname, team):\n cmd = '{}assignTeam \"{}\" {}'.format(self.console, Commands.aquote(nickname), Commands.get_team(team))\n self.write_command(cmd)", "def test_teams_save_workgroup_member_v1(self):\n pass", "def test_post_team(self):\n response = self.client.post(url_for('teams'),\n data={\n 'name': 'test team',\n 'capacity': 11,\n 'number_players': 6,\n 'pitch_postcode': 'E1 6LT',\n 'time': '2019-01-01 13:00'\n })\n self.assertEqual(response.status_code, 201)\n self.assertIn(b'Team created successfully', response.data)\n self.assertEqual(db.session.query(Team).count(), 1)", "def creat_team(self):\n te = Teams()\n per = Persons()\n teamlist = []\n for one in per.find({'role':'leader'},{'team_name'}):\n if one['team_name'] not in teamlist:\n teamlist.append(one['team_name'])\n # print len(teamlist)\n for team in teamlist:\n tmp = {'name': '', 'leader_email': '', 'person_emails': []}\n tmp['name'] = team\n tmp['leader_email'] = per.get_one({'team_name':team,'role':'leader'})['email']\n for one in per.find({'team_name':team},{'email'}):\n tmp['person_emails'].append(one['email'])\n print tmp\n search_t = te.get_one({'name':team})\n if search_t is None:\n te.insert_one(tmp)\n else:\n te.update_one({'name':team,'leader_email':'','person_emails':''},tmp,cover=True)", "def test_teams_add_user_to_team_v1(self):\n pass", "def assign_everyone(self, team):\n team_number = Commands.get_team(team)\n for player in self.players.all_nicknames():\n cmd = '{}assignTeam \"{}\" {}'.format(self.console, Commands.aquote(player), team_number)\n self.write_command(cmd)", "def test_posting_a_teammate(self):\n response = self.client.post(\n '/team/all/', {'name': 'New Name',\n 'email': '[email protected]',\n 'slackhandle': '@NewTeam'},\n format='json')\n self.assertEqual(response.data, {'status': 201,\n \"data\": {'id': 1, 'name': 'New Name',\n 'email': '[email protected]',\n 'slackhandle': '@NewTeam'}})" ]
[ "0.77974325", "0.762171", "0.7576154", "0.74665797", "0.74637526", "0.74606645", "0.74169934", "0.7250529", "0.7208695", "0.7173935", "0.7055683", "0.7006886", "0.6909342", "0.68885905", "0.68568605", "0.6763962", "0.67095834", "0.67095834", "0.66458046", "0.6631514", "0.66257656", "0.6598994", "0.6531301", "0.65131325", "0.650717", "0.6493509", "0.6452119", "0.6421599", "0.64131445", "0.63845575" ]
0.9487738
0
Test case for clone_deployment
def test_clone_deployment(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_clone_scenario(self):\n pass", "def test_clone_system(self):\n pass", "def test_create_deployment(self):\n pass", "def test_create_deployment_entire(self):\n pass", "def test_11_clone(self):\n # Test basic operation of cloning repo which contains one\n # publisher to repo which contains same publisher\n self.pkgrecv(self.durl1, \"--clone -d {0}\".format(self.dpath2))\n\n ret = subprocess.call([\"/usr/bin/gdiff\", \"-Naur\", \"-x\", \n \"index\", \"-x\", \"trans\", self.dpath1, self.dpath2])\n self.assertTrue(ret==0)\n\n # Test that packages in dst which are not in src get removed.\n self.pkgsend_bulk(self.durl2, (self.amber30))\n self.pkgrecv(self.durl1, \"--clone -d {0}\".format(self.dpath2))\n ret = subprocess.call([\"/usr/bin/gdiff\", \"-Naur\", \"-x\", \n \"index\", \"-x\", \"trans\", self.dpath1, self.dpath2])\n self.assertTrue(ret==0)\n\n # Test that clone reports publishers not in the dest repo.\n amber = self.amber10.replace(\"open \", \"open pkg://test2/\")\n self.pkgsend_bulk(self.durl1, amber)\n self.pkgrecv(self.durl1, \"--clone -d {0}\".format(self.dpath2), exit=1)\n\n # Test that clone adds new publishers if requested.\n amber = self.amber10.replace(\"open \", \"open pkg://test2/\")\n self.pkgsend_bulk(self.durl1, amber)\n self.pkgrecv(self.durl1, \"--clone -d {0} -p test2\".format(self.dpath2))\n ret = subprocess.call([\"/usr/bin/gdiff\", \"-Naur\", \"-x\", \n \"index\", \"-x\", \"trans\", self.dpath1,\n self.dpath2])\n self.assertTrue(ret==0)\n\n # Test that clone removes all packages if source is empty\n self.pkgrecv(self.durl3, \"--clone -d {0}\".format(self.dpath2))\n self.pkgrepo(\"-s {0} list -H -p test2\".format(self.dpath2))\n self.assertEqualDiff(\"\", self.output)\n\n # Test that clone works fine with mulitple publishers\n amber = self.amber10.replace(\"open \", \"open pkg://test2/\")\n self.pkgsend_bulk(self.durl1, amber)\n\n path = os.path.join(self.dpath2, \"publisher/test1\")\n shutil.rmtree(path)\n path = os.path.join(self.dpath2, \"publisher/test2\")\n shutil.rmtree(path)\n self.pkgrecv(self.durl1, \"--clone -d {0} -p test2 -p test1\".format(\n self.dpath2))\n ret = subprocess.call([\"/usr/bin/gdiff\", \"-Naur\", \"-x\",\n \"index\", \"-x\", \"trans\", self.dpath1, self.dpath2])\n self.assertTrue(ret==0)\n\n # Test that clone fails if --raw is specified.\n self.pkgrecv(self.durl1, \"--raw --clone -d {0} -p test2\".format(\n self.dpath2), exit=2)\n\n # Test that clone fails if -c is specified.\n self.pkgrecv(self.durl1, \"-c /tmp/ --clone -d {0} -p test2\".format(\n self.dpath2), exit=2)\n\n # Test that clone fails if -a is specified.\n self.pkgrecv(self.durl1, \"-a --clone -d {0} -p test2\".format(\n self.dpath2), exit=2)\n\n # Test that clone fails if --newest is specified.\n self.pkgrecv(self.durl1, \"--newest --clone -d {0} -p test2\".format(\n self.dpath2), exit=2)", "def test_execute_deployment(self):\n pass", "def test_launch_deployment(self):\n pass", "def test_clone_repository(m_check):\n m_check.return_value = 0\n assert clone_repository(\"test\", \"test\", \"test\") == 0", "def test_get_deployment(self):\n pass", "def test_get_deployment(self):\n pass", "def test_update_deployment(self):\n pass", "def test_clone_repository(koan, assert_cloned_repo_exists):\n koan.shell('')", "def test_create_copy(self):\n\n config = {\n 'version': '2.0',\n 'input_files': {\n 'INPUT_1': [{\n 'id': 1234,\n 'type': 'PRODUCT',\n 'workspace_name': 'wksp-name',\n 'workspace_path': 'the/workspace/path/file.json',\n 'local_file_name': 'file_abcdfeg.json',\n 'is_deleted': False,\n }]\n },\n 'output_workspaces': {\n 'OUTPUT_1': 'WORKSPACE_1'\n },\n 'tasks': [\n {\n 'task_id': 'task-1234',\n 'type': 'main',\n 'resources': {'cpu': 1.0},\n 'args': 'foo ${INPUT_1} ${JOB_OUTPUT_DIR}',\n 'env_vars': {'ENV_VAR_NAME': 'ENV_VAR_VALUE'},\n 'workspaces': {'WORKSPACE_NAME': {'mode': 'ro'}},\n 'mounts': {'MOUNT_NAME': 'MOUNT_VOLUME_NAME'},\n 'settings': {'SETTING_NAME': 'SETTING_VALUE'},\n 'volumes': {\n 'VOLUME_NAME_1': {\n 'container_path': '/the/container/path',\n 'mode': 'ro',\n 'type': 'host',\n 'host_path': '/the/host/path'\n },\n 'VOLUME_NAME_2': {\n 'container_path': '/the/other/container/path',\n 'mode': 'rw',\n 'type': 'volume',\n 'driver': 'SUPER_DRIVER_5000',\n 'driver_opts': {'turbo': 'yes-pleez'}\n }\n },\n 'docker_params': [{'flag': 'hello', 'value': 'scale'}]\n }\n ]\n }\n exe_config = ExecutionConfiguration(config)\n\n copy = exe_config.create_copy()\n self.assertDictEqual(copy.get_dict(), config)", "def test_retest_deployment_run(self):\n pass", "def test_publish_deployment_run(self):\n pass", "def test_get_deployment_runs1(self):\n pass", "def test_get_deployment_run(self):\n pass", "def test_delete_deployment(self):\n pass", "def test_deployment(self):\n config = {'example.com': {\n 'applications': {\n 'site-hybridcluster': {\n 'image': 'unknown',\n },\n 'mysql-hybridcluster': {\n 'image': 'unknown',\n }\n },\n 'version': 1\n }}\n expected = Deployment(nodes=frozenset([\n Node(hostname='example.com', applications=frozenset([\n Application(\n name='mysql-hybridcluster',\n image=DockerImage.from_string('unknown'),\n ports=frozenset(),\n ),\n Application(\n name='site-hybridcluster',\n image=DockerImage.from_string('unknown'),\n )]))]))\n self.assertEqual(expected,\n current_from_configuration(config))", "def test_delete_deployment_run(self):\n pass", "def test_release_deployment_run(self):\n pass", "def test_create_namespaced_build_request_clone(self):\n pass", "def test_redeploy_container_asset(self):\n pass", "def test_create_cloned_volume(self, mock_ghn):\n ctxt = context.get_admin_context()\n extra_specs = {}\n type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs)\n orig = {'id': '1', 'name': 'volume1', 'display_name': '',\n 'volume_type_id': type_ref['id'], 'size': 10,\n 'provider_id': 'space_orig'}\n clone = {'id': '2', 'name': 'clone1', 'display_name': '',\n 'volume_type_id': type_ref['id'], 'size': 10}\n pid = self.driver.create_cloned_volume(clone, orig)\n # We must copy entier underlying storage, ~12GB, not just 10GB\n self.assertEqual(11444 * units.Mi, self.dd_count)\n self.assertEqual('1M', self.bs)\n # Check space-create command\n expected = {'redundancy': '0', 'group': 'xanadu',\n 'name': 'clone1', 'mode': '0777',\n 'user': 'kane', 'net': 'net1',\n 'storageserver': 'stor1:gbd0,stor2:gbd0,',\n 'size': '12'}\n self.assertDictMatch(expected, self.created)\n # Check the returned provider\n expected_pid = {'provider_id': 'clone1'}\n self.assertDictMatch(expected_pid, pid)", "def test_deploy_instance_with_new_network_and_metadata(self):\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n instance_name = TEST_SERVER_PREFIX + \"_network_metadata_\" + suffix\n instance_meta = {\"test_item\": \"test_value\"}\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 251\n self.__deploy_instance_helper__(instance_name=instance_name,\n network_name=network_name,\n network_cidr=network_cidr,\n metadata=instance_meta)", "def test_redeploy(self):\n pass", "def test_create_namespaced_deployment_config(self):\n pass", "def catalog_clone(self, args):\n try:\n catalog = self.server.connect_ermrest(args.id)\n print(\"Attempting to clone catalog %s into new catalog. Please wait...\" % args.id)\n dest_cat = catalog.clone_catalog(copy_data=args.no_copy_data,\n copy_annotations=args.no_copy_annotations,\n copy_policy=args.no_copy_policy,\n truncate_after=args.no_truncate_after,\n exclude_schemas=args.exclude_schemas)\n print(\"Catalog successfully cloned into new catalog: %s\" % dest_cat.catalog_id)\n except HTTPError as e:\n if e.response.status_code == requests.codes.not_found:\n raise ResourceException('Catalog not found', e)\n else:\n raise e", "def test_create_namespaced_deployment_config_rollback(self):\n pass", "def test_deploy_instance_with_new_network(self):\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n instance_name = TEST_SERVER_PREFIX + \"_network_\" + suffix\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 252\n self.__deploy_instance_helper__(instance_name=instance_name,\n network_name=network_name,\n network_cidr=network_cidr)" ]
[ "0.7720832", "0.76868254", "0.7543187", "0.7406014", "0.704627", "0.6974044", "0.69144845", "0.6802333", "0.67807245", "0.67807245", "0.6773744", "0.6770297", "0.676037", "0.66459686", "0.6630639", "0.65027314", "0.64937353", "0.6485087", "0.645322", "0.6439337", "0.643094", "0.6396998", "0.6332362", "0.6330075", "0.63132286", "0.62643373", "0.6254787", "0.6246752", "0.6239403", "0.6210062" ]
0.94979024
0
Test case for clone_scenario
def test_clone_scenario(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_clone_system(self):\n pass", "def test_clone_deployment(self):\n pass", "def test_clone_change_param(self, cosmo):\n pass", "def test_clone_repository(m_check):\n m_check.return_value = 0\n assert clone_repository(\"test\", \"test\", \"test\") == 0", "def clone(self):", "def test_clone(self):\n f0 = 5 * (np.random.rand(10, 5) - 0.5)\n ga = population.Evolver(f0, eval_one_max)\n ga.clone()\n\n # should have created a new generation\n self.assertEqual(len(ga.generations), 2)\n\n # should have copied fitness\n self.assertFalse(ga.generations[-1].new)", "def test_clone_identical(self, cosmo):\n assert cosmo.clone() is cosmo", "def test_11_clone(self):\n # Test basic operation of cloning repo which contains one\n # publisher to repo which contains same publisher\n self.pkgrecv(self.durl1, \"--clone -d {0}\".format(self.dpath2))\n\n ret = subprocess.call([\"/usr/bin/gdiff\", \"-Naur\", \"-x\", \n \"index\", \"-x\", \"trans\", self.dpath1, self.dpath2])\n self.assertTrue(ret==0)\n\n # Test that packages in dst which are not in src get removed.\n self.pkgsend_bulk(self.durl2, (self.amber30))\n self.pkgrecv(self.durl1, \"--clone -d {0}\".format(self.dpath2))\n ret = subprocess.call([\"/usr/bin/gdiff\", \"-Naur\", \"-x\", \n \"index\", \"-x\", \"trans\", self.dpath1, self.dpath2])\n self.assertTrue(ret==0)\n\n # Test that clone reports publishers not in the dest repo.\n amber = self.amber10.replace(\"open \", \"open pkg://test2/\")\n self.pkgsend_bulk(self.durl1, amber)\n self.pkgrecv(self.durl1, \"--clone -d {0}\".format(self.dpath2), exit=1)\n\n # Test that clone adds new publishers if requested.\n amber = self.amber10.replace(\"open \", \"open pkg://test2/\")\n self.pkgsend_bulk(self.durl1, amber)\n self.pkgrecv(self.durl1, \"--clone -d {0} -p test2\".format(self.dpath2))\n ret = subprocess.call([\"/usr/bin/gdiff\", \"-Naur\", \"-x\", \n \"index\", \"-x\", \"trans\", self.dpath1,\n self.dpath2])\n self.assertTrue(ret==0)\n\n # Test that clone removes all packages if source is empty\n self.pkgrecv(self.durl3, \"--clone -d {0}\".format(self.dpath2))\n self.pkgrepo(\"-s {0} list -H -p test2\".format(self.dpath2))\n self.assertEqualDiff(\"\", self.output)\n\n # Test that clone works fine with mulitple publishers\n amber = self.amber10.replace(\"open \", \"open pkg://test2/\")\n self.pkgsend_bulk(self.durl1, amber)\n\n path = os.path.join(self.dpath2, \"publisher/test1\")\n shutil.rmtree(path)\n path = os.path.join(self.dpath2, \"publisher/test2\")\n shutil.rmtree(path)\n self.pkgrecv(self.durl1, \"--clone -d {0} -p test2 -p test1\".format(\n self.dpath2))\n ret = subprocess.call([\"/usr/bin/gdiff\", \"-Naur\", \"-x\",\n \"index\", \"-x\", \"trans\", self.dpath1, self.dpath2])\n self.assertTrue(ret==0)\n\n # Test that clone fails if --raw is specified.\n self.pkgrecv(self.durl1, \"--raw --clone -d {0} -p test2\".format(\n self.dpath2), exit=2)\n\n # Test that clone fails if -c is specified.\n self.pkgrecv(self.durl1, \"-c /tmp/ --clone -d {0} -p test2\".format(\n self.dpath2), exit=2)\n\n # Test that clone fails if -a is specified.\n self.pkgrecv(self.durl1, \"-a --clone -d {0} -p test2\".format(\n self.dpath2), exit=2)\n\n # Test that clone fails if --newest is specified.\n self.pkgrecv(self.durl1, \"--newest --clone -d {0} -p test2\".format(\n self.dpath2), exit=2)", "def test_clone_fail_unexpected_arg(self, cosmo):\n with pytest.raises(TypeError, match=\"unexpected keyword argument\"):\n newclone = cosmo.clone(not_an_arg=4)", "def test_clone(self):\n mock_query = MagicMock(return_value=\"\")\n with patch(\n \"salt.cloud.clouds.proxmox._get_properties\", MagicMock(return_value=[])\n ), patch(\"salt.cloud.clouds.proxmox.query\", mock_query):\n vm_ = {\n \"technology\": \"qemu\",\n \"name\": \"new2\",\n \"host\": \"myhost\",\n \"clone\": True,\n \"clone_from\": 123,\n }\n\n # CASE 1: Numeric ID\n result = proxmox.create_node(vm_, ANY)\n mock_query.assert_called_once_with(\n \"post\",\n \"nodes/myhost/qemu/123/clone\",\n {\"newid\": ANY},\n )\n assert result == {}\n\n # CASE 2: host:ID notation\n mock_query.reset_mock()\n vm_[\"clone_from\"] = \"otherhost:123\"\n result = proxmox.create_node(vm_, ANY)\n mock_query.assert_called_once_with(\n \"post\",\n \"nodes/otherhost/qemu/123/clone\",\n {\"newid\": ANY},\n )\n assert result == {}", "def test_copy(self):\n p = hw.create_tile_puzzle(3, 3)\n p2 = p.copy()\n self.assertTrue(p.get_board() == p2.get_board())\n p2.perform_move('up')\n self.assertFalse(p.get_board() == p2.get_board())", "def clone_rand(self):", "def test_clone_repository(koan, assert_cloned_repo_exists):\n koan.shell('')", "def clone(self):\n raise NotImplementedError", "def sanitize_clone(self):\n pass", "def test_create_cloned_volume(self, mock_ghn):\n ctxt = context.get_admin_context()\n extra_specs = {}\n type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs)\n orig = {'id': '1', 'name': 'volume1', 'display_name': '',\n 'volume_type_id': type_ref['id'], 'size': 10,\n 'provider_id': 'space_orig'}\n clone = {'id': '2', 'name': 'clone1', 'display_name': '',\n 'volume_type_id': type_ref['id'], 'size': 10}\n pid = self.driver.create_cloned_volume(clone, orig)\n # We must copy entier underlying storage, ~12GB, not just 10GB\n self.assertEqual(11444 * units.Mi, self.dd_count)\n self.assertEqual('1M', self.bs)\n # Check space-create command\n expected = {'redundancy': '0', 'group': 'xanadu',\n 'name': 'clone1', 'mode': '0777',\n 'user': 'kane', 'net': 'net1',\n 'storageserver': 'stor1:gbd0,stor2:gbd0,',\n 'size': '12'}\n self.assertDictMatch(expected, self.created)\n # Check the returned provider\n expected_pid = {'provider_id': 'clone1'}\n self.assertDictMatch(expected_pid, pid)", "def onClone(self):\n pass", "def clone(context, request):\n if request.has_permission('create'):\n return {\n 'name': 'clone',\n 'title': 'Clone',\n 'profile': '/profiles/{ti.name}.json'.format(ti=context.type_info),\n 'href': '{item_uri}#!clone'.format(item_uri=request.resource_path(context)),\n }", "def clone(repo, src, dest, shallow):\n print('Repo: %s' % repo)\n print('Source: %s' % src)\n print('Destination: %s' % dest)\n print('Shallow: %s' % shallow)", "def test_new(self):", "def test_new(self):", "def _mock_git_clone(self, args: List[str]) -> None:\n cloned_repo_root = args[-1]\n\n # Create \"cloned\" directory and subfolders.\n if cloned_repo_root.endswith('test-repo1'):\n self.fs.create_file(os.path.join(cloned_repo_root, 'yara', 'cloned.yara'))\n self.fs.create_file(os.path.join(cloned_repo_root, 'not_included.yara'))\n elif cloned_repo_root.endswith('test-repo2'):\n self.fs.create_file(os.path.join(cloned_repo_root, 'yara', 'cloned.yara'))\n self.fs.create_file(os.path.join(cloned_repo_root, 'yara', 'exluded_mobile.yara'))\n self.fs.create_file(os.path.join(cloned_repo_root, 'windows', 'excluded.yara'))\n elif cloned_repo_root.endswith('test-repo3'):\n self.fs.create_file(os.path.join(cloned_repo_root, 'yara', 'cloned.yara'))", "def component_clone ( same ) : \n if isinstance ( same , str ) \\\n and same.strip().lower() in ( 'clone' , 'cloned' , 'same' ) : return True \n return False", "def test_pvc_to_pvc_clone(self, interface_type, teardown_factory):\n logger.info(f\"Running IO on pod {self.pod_obj.name}\")\n file_name = self.pod_obj.name\n logger.info(f\"File created during IO {file_name}\")\n self.pod_obj.run_io(storage_type=\"fs\", size=\"500M\", fio_filename=file_name)\n\n # Wait for fio to finish\n self.pod_obj.get_fio_results()\n logger.info(f\"Io completed on pod {self.pod_obj.name}.\")\n\n # Verify presence of the file\n file_path = pod.get_file_path(self.pod_obj, file_name)\n logger.info(f\"Actual file path on the pod {file_path}\")\n assert pod.check_file_existence(\n self.pod_obj, file_path\n ), f\"File {file_name} does not exist\"\n logger.info(f\"File {file_name} exists in {self.pod_obj.name}\")\n\n # Calculate md5sum of the file.\n orig_md5_sum = pod.cal_md5sum(self.pod_obj, file_name)\n\n # Create a clone of the existing pvc.\n sc_name = self.pvc_obj.backed_sc\n parent_pvc = self.pvc_obj.name\n clone_yaml = constants.CSI_RBD_PVC_CLONE_YAML\n namespace = self.pvc_obj.namespace\n if interface_type == constants.CEPHFILESYSTEM:\n clone_yaml = constants.CSI_CEPHFS_PVC_CLONE_YAML\n cloned_pvc_obj = pvc.create_pvc_clone(\n sc_name, parent_pvc, clone_yaml, namespace\n )\n teardown_factory(cloned_pvc_obj)\n helpers.wait_for_resource_state(cloned_pvc_obj, constants.STATUS_BOUND)\n cloned_pvc_obj.reload()\n\n # Create and attach pod to the pvc\n clone_pod_obj = helpers.create_pod(\n interface_type=interface_type,\n pvc_name=cloned_pvc_obj.name,\n namespace=cloned_pvc_obj.namespace,\n pod_dict_path=constants.NGINX_POD_YAML,\n )\n # Confirm that the pod is running\n helpers.wait_for_resource_state(\n resource=clone_pod_obj, state=constants.STATUS_RUNNING\n )\n clone_pod_obj.reload()\n teardown_factory(clone_pod_obj)\n\n # Verify file's presence on the new pod\n logger.info(\n f\"Checking the existence of {file_name} on cloned pod \"\n f\"{clone_pod_obj.name}\"\n )\n assert pod.check_file_existence(\n clone_pod_obj, file_path\n ), f\"File {file_path} does not exist\"\n logger.info(f\"File {file_name} exists in {clone_pod_obj.name}\")\n\n # Verify Contents of a file in the cloned pvc\n # by validating if md5sum matches.\n logger.info(\n f\"Verifying that md5sum of {file_name} \"\n f\"on pod {self.pod_obj.name} matches with md5sum \"\n f\"of the same file on restore pod {clone_pod_obj.name}\"\n )\n assert pod.verify_data_integrity(\n clone_pod_obj, file_name, orig_md5_sum\n ), \"Data integrity check failed\"\n logger.info(\"Data integrity check passed, md5sum are same\")\n\n logger.info(\"Run IO on new pod\")\n clone_pod_obj.run_io(storage_type=\"fs\", size=\"100M\", runtime=10)\n\n # Wait for IO to finish on the new pod\n clone_pod_obj.get_fio_results()\n logger.info(f\"IO completed on pod {clone_pod_obj.name}\")", "def test_clone_change_param(self, cosmo):\n super().test_clone_change_param(cosmo)\n\n # don't change any values\n kwargs = cosmo._init_arguments.copy()\n kwargs.pop(\"name\", None) # make sure not setting name\n c = cosmo.clone(**kwargs)\n assert c.__class__ == cosmo.__class__\n assert c.name == cosmo.name + \" (modified)\"\n assert c.is_equivalent(cosmo)\n\n # change ``H0``\n # Note that H0 affects Ode0 because it changes Ogamma0\n c = cosmo.clone(H0=100)\n assert c.__class__ == cosmo.__class__\n assert c.name == cosmo.name + \" (modified)\"\n assert c.H0.value == 100\n for n in (\"Om0\", \"Ode0\", \"Tcmb0\", \"Neff\", \"m_nu\", \"Ok0\", \"Ob0\"):\n v = getattr(c, n)\n if v is None:\n assert v is getattr(cosmo, n)\n continue\n assert u.allclose(v, getattr(cosmo, n), atol=1e-4 * getattr(v, \"unit\", 1))\n assert not u.allclose(c.Ogamma0, cosmo.Ogamma0)\n assert not u.allclose(c.Onu0, cosmo.Onu0)\n\n # change multiple things\n c = cosmo.clone(name=\"new name\", H0=100, Tcmb0=2.8, meta=dict(zz=\"tops\"))\n assert c.__class__ == cosmo.__class__\n assert c.name == \"new name\"\n assert c.H0.value == 100\n assert c.Tcmb0.value == 2.8\n assert c.meta == {**cosmo.meta, **dict(zz=\"tops\")}\n for n in (\"Om0\", \"Ode0\", \"Neff\", \"m_nu\", \"Ok0\", \"Ob0\"):\n v = getattr(c, n)\n if v is None:\n assert v is getattr(cosmo, n)\n continue\n assert u.allclose(v, getattr(cosmo, n), atol=1e-4 * getattr(v, \"unit\", 1))\n assert not u.allclose(c.Ogamma0, cosmo.Ogamma0)\n assert not u.allclose(c.Onu0, cosmo.Onu0)\n assert not u.allclose(c.Tcmb0.value, cosmo.Tcmb0.value)", "def test_copy(self):\n data = [[0, 1], [1, 0]]\n b1 = Board(data)\n b2 = b1.copy()\n # test if proper copy\n self.assertListEqual(b1.data, b2.data)\n # teset if not just a shallow copy\n b1.data[0][0] = 1\n self.assertNotEqual(b1.data[0][0], b2.data[0][0])", "def test_clone_name(self, cosmo):\n # test changing name. clone treats 'name' differently (see next test)\n c = cosmo.clone(name=\"cloned cosmo\")\n assert c.name == \"cloned cosmo\" # changed\n # show name is the only thing changed\n c._name = cosmo.name # first change name back\n assert c == cosmo\n assert c.meta == cosmo.meta\n\n # now change a different parameter and see how 'name' changes\n c = cosmo.clone(meta={})\n assert c.name == cosmo.name + \" (modified)\"", "def clone( m, orig):\r\n if m.ObjType not in (1, 6): return\r\n if not orig: return\r\n \r\n if m.ObjType == 6: # Target is a Folder\r\n if orig.ObjType == 6: cloned = m.CopyFolderDisp( orig) # Orig is Folder too\r\n else: cloned = m.CopyFCODisp( orig) # Orig is FCO\r\n elif m.ObjType == 1:\r\n cloned = m.CopyFCODisp( orig, metaRole( orig)) # Target is Model, Orig is FCO\r\n \r\n if cloned:\r\n \tcloned.Name = \"Cloned\" + orig.Name\r\n return cloned", "def GetClone(self, *args, **kwargs):\n pass", "def test_copy(self):\n\n # Copy the 'orig' data pipe to the 'new' data pipe.\n pipes.copy('orig', 'new')\n\n # Test that the new data pipe exists.\n self.assert_('new' in ds)\n\n # Test that the new data pipe has the object 'x' and that its value is 1.\n self.assertEqual(ds['new'].x, 1)\n\n # Change the value of x.\n ds['new'].x = 2\n\n # Test that the two values are different.\n self.assert_(ds['orig'].x != ds['new'].x)\n\n # Test that the new data pipe has the object 'mol[0].res[0].spin[0].num' and that its value is 1.\n self.assertEqual(ds['new'].mol[0].res[0].spin[0].num, 1)\n\n # Change the spin system number.\n ds['new'].mol[0].res[0].spin[0].num = 2\n\n # Test that the original spin system number hasn't changed.\n self.assertEqual(ds['orig'].mol[0].res[0].spin[0].num, 1)" ]
[ "0.7862457", "0.73369557", "0.72778666", "0.7237186", "0.71096575", "0.7100466", "0.70886004", "0.70301014", "0.69382375", "0.6869478", "0.6818836", "0.6739689", "0.6681371", "0.65595156", "0.65106905", "0.6453624", "0.6449878", "0.64137197", "0.6380553", "0.6289498", "0.6289498", "0.6276729", "0.6275772", "0.6249315", "0.62373435", "0.6213653", "0.61955845", "0.61808825", "0.617438", "0.6171129" ]
0.94704384
0
Test case for clone_system
def test_clone_system(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_clone_scenario(self):\n pass", "def test_clone_repository(m_check):\n m_check.return_value = 0\n assert clone_repository(\"test\", \"test\", \"test\") == 0", "def test_clone_deployment(self):\n pass", "def test_11_clone(self):\n # Test basic operation of cloning repo which contains one\n # publisher to repo which contains same publisher\n self.pkgrecv(self.durl1, \"--clone -d {0}\".format(self.dpath2))\n\n ret = subprocess.call([\"/usr/bin/gdiff\", \"-Naur\", \"-x\", \n \"index\", \"-x\", \"trans\", self.dpath1, self.dpath2])\n self.assertTrue(ret==0)\n\n # Test that packages in dst which are not in src get removed.\n self.pkgsend_bulk(self.durl2, (self.amber30))\n self.pkgrecv(self.durl1, \"--clone -d {0}\".format(self.dpath2))\n ret = subprocess.call([\"/usr/bin/gdiff\", \"-Naur\", \"-x\", \n \"index\", \"-x\", \"trans\", self.dpath1, self.dpath2])\n self.assertTrue(ret==0)\n\n # Test that clone reports publishers not in the dest repo.\n amber = self.amber10.replace(\"open \", \"open pkg://test2/\")\n self.pkgsend_bulk(self.durl1, amber)\n self.pkgrecv(self.durl1, \"--clone -d {0}\".format(self.dpath2), exit=1)\n\n # Test that clone adds new publishers if requested.\n amber = self.amber10.replace(\"open \", \"open pkg://test2/\")\n self.pkgsend_bulk(self.durl1, amber)\n self.pkgrecv(self.durl1, \"--clone -d {0} -p test2\".format(self.dpath2))\n ret = subprocess.call([\"/usr/bin/gdiff\", \"-Naur\", \"-x\", \n \"index\", \"-x\", \"trans\", self.dpath1,\n self.dpath2])\n self.assertTrue(ret==0)\n\n # Test that clone removes all packages if source is empty\n self.pkgrecv(self.durl3, \"--clone -d {0}\".format(self.dpath2))\n self.pkgrepo(\"-s {0} list -H -p test2\".format(self.dpath2))\n self.assertEqualDiff(\"\", self.output)\n\n # Test that clone works fine with mulitple publishers\n amber = self.amber10.replace(\"open \", \"open pkg://test2/\")\n self.pkgsend_bulk(self.durl1, amber)\n\n path = os.path.join(self.dpath2, \"publisher/test1\")\n shutil.rmtree(path)\n path = os.path.join(self.dpath2, \"publisher/test2\")\n shutil.rmtree(path)\n self.pkgrecv(self.durl1, \"--clone -d {0} -p test2 -p test1\".format(\n self.dpath2))\n ret = subprocess.call([\"/usr/bin/gdiff\", \"-Naur\", \"-x\",\n \"index\", \"-x\", \"trans\", self.dpath1, self.dpath2])\n self.assertTrue(ret==0)\n\n # Test that clone fails if --raw is specified.\n self.pkgrecv(self.durl1, \"--raw --clone -d {0} -p test2\".format(\n self.dpath2), exit=2)\n\n # Test that clone fails if -c is specified.\n self.pkgrecv(self.durl1, \"-c /tmp/ --clone -d {0} -p test2\".format(\n self.dpath2), exit=2)\n\n # Test that clone fails if -a is specified.\n self.pkgrecv(self.durl1, \"-a --clone -d {0} -p test2\".format(\n self.dpath2), exit=2)\n\n # Test that clone fails if --newest is specified.\n self.pkgrecv(self.durl1, \"--newest --clone -d {0} -p test2\".format(\n self.dpath2), exit=2)", "def test_clone_repository(koan, assert_cloned_repo_exists):\n koan.shell('')", "def test_clone(self):\n mock_query = MagicMock(return_value=\"\")\n with patch(\n \"salt.cloud.clouds.proxmox._get_properties\", MagicMock(return_value=[])\n ), patch(\"salt.cloud.clouds.proxmox.query\", mock_query):\n vm_ = {\n \"technology\": \"qemu\",\n \"name\": \"new2\",\n \"host\": \"myhost\",\n \"clone\": True,\n \"clone_from\": 123,\n }\n\n # CASE 1: Numeric ID\n result = proxmox.create_node(vm_, ANY)\n mock_query.assert_called_once_with(\n \"post\",\n \"nodes/myhost/qemu/123/clone\",\n {\"newid\": ANY},\n )\n assert result == {}\n\n # CASE 2: host:ID notation\n mock_query.reset_mock()\n vm_[\"clone_from\"] = \"otherhost:123\"\n result = proxmox.create_node(vm_, ANY)\n mock_query.assert_called_once_with(\n \"post\",\n \"nodes/otherhost/qemu/123/clone\",\n {\"newid\": ANY},\n )\n assert result == {}", "def test_clone_repository_into_other_directory(koan, assert_cloned_repo_exists_in_other_directory):\n koan.shell('')", "def _mock_git_clone(self, args: List[str]) -> None:\n cloned_repo_root = args[-1]\n\n # Create \"cloned\" directory and subfolders.\n if cloned_repo_root.endswith('test-repo1'):\n self.fs.create_file(os.path.join(cloned_repo_root, 'yara', 'cloned.yara'))\n self.fs.create_file(os.path.join(cloned_repo_root, 'not_included.yara'))\n elif cloned_repo_root.endswith('test-repo2'):\n self.fs.create_file(os.path.join(cloned_repo_root, 'yara', 'cloned.yara'))\n self.fs.create_file(os.path.join(cloned_repo_root, 'yara', 'exluded_mobile.yara'))\n self.fs.create_file(os.path.join(cloned_repo_root, 'windows', 'excluded.yara'))\n elif cloned_repo_root.endswith('test-repo3'):\n self.fs.create_file(os.path.join(cloned_repo_root, 'yara', 'cloned.yara'))", "def test_create_cloned_volume(self, mock_ghn):\n ctxt = context.get_admin_context()\n extra_specs = {}\n type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs)\n orig = {'id': '1', 'name': 'volume1', 'display_name': '',\n 'volume_type_id': type_ref['id'], 'size': 10,\n 'provider_id': 'space_orig'}\n clone = {'id': '2', 'name': 'clone1', 'display_name': '',\n 'volume_type_id': type_ref['id'], 'size': 10}\n pid = self.driver.create_cloned_volume(clone, orig)\n # We must copy entier underlying storage, ~12GB, not just 10GB\n self.assertEqual(11444 * units.Mi, self.dd_count)\n self.assertEqual('1M', self.bs)\n # Check space-create command\n expected = {'redundancy': '0', 'group': 'xanadu',\n 'name': 'clone1', 'mode': '0777',\n 'user': 'kane', 'net': 'net1',\n 'storageserver': 'stor1:gbd0,stor2:gbd0,',\n 'size': '12'}\n self.assertDictMatch(expected, self.created)\n # Check the returned provider\n expected_pid = {'provider_id': 'clone1'}\n self.assertDictMatch(expected_pid, pid)", "def test_clone_identical(self, cosmo):\n assert cosmo.clone() is cosmo", "def do_clone(self, arg):\n checkLocalGitLocation()\n teamorindividual = input(\"Is this a team or individual (t or i):\")\n if teamorindividual == 'i':\n for student in returnAllStudents():\n os.system(\"cd %s && git clone https://github.ccs.neu.edu/%s\" %\n (localgitlocation, 'cs5500/' + student))\n else:\n for team in returnAllTeams():\n os.system(\"cd %s && git clone https://github.ccs.neu.edu/%s/%s\" %\n (localgitlocation, githuborg, team))", "def test_clone_change_param(self, cosmo):\n pass", "def clone_repository():\n try:\n ssh = paramiko.SSHClient()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh.connect(hostname=exec_server_address, username=exec_server_username, password=exec_server_password)\n command = 'cd ' + exec_server_working_directory + '; rm -rf ' + robot_tests_directory\n ssh.exec_command(command=command, timeout=180)\n command = 'cd ' + exec_server_working_directory + '; git clone ' + bitbucket_repository_url\n ssh.exec_command(command=command, timeout=1800)\n ssh.close()\n except Exception as error:\n print(\"Failed to connect to execution server \" + exec_server_address)", "def test_clone_nonexistent(tmpdir):\n repo_url = 'git://github.com/Tinche/a-repo-i-will-never-make'\n with pytest.raises(gitwrapper.GitException):\n gitwrapper.clone_from(repo_url, tmpdir)", "def test_create_system_entire(self):\n pass", "def _standby_clone():\n # manualy:\n # $ mkdir -p /var/lib/postgresql/9.1/testscluster/\n # $ rsync -avz --rsh='ssh -p2222' [email protected]:/var/lib/postgresql/9.1/testscluster/ /var/lib/postgresql/9.1/testscluster/\n\n with settings(hide('running', 'stdout', 'stderr', 'warnings'), warn_only=True):\n puts(green('Start cloning the master'))\n repmgr_clone_command = 'repmgr -D %(slave_pgdata_path)s -d %(sync_db)s -p %(cluster_port)s -U %(sync_user)s -R postgres --verbose standby clone %(pgmaster_ip)s' % env\n puts(green(repmgr_clone_command))\n puts(\"-\" * 40)\n res = sudo(repmgr_clone_command, user='postgres')\n if 'Can not connect to the remote host' in res or 'Connection to database failed' in res:\n puts(\"-\" * 40)\n puts(green(repmgr_clone_command))\n puts(\"-\" * 40)\n puts(\"Master server is %s reachable.\" % red(\"NOT\"))\n puts(\"%s you can try to CLONE the slave manually [%s]:\" % (green(\"BUT\"), red(\"at your own risk\")))\n puts(\"On the slave server:\")\n puts(\"$ sudo -u postgres rsync -avz --rsh='ssh -p%(master_ssh_port)s' postgres@%(pgmaster_ip)s:%(master_pgdata_path)s %(slave_pgdata_path)s --exclude=pg_xlog* --exclude=pg_control --exclude=*.pid\" % env)\n puts(\"Here:\")\n puts(\"$ fab <cluster_task_name> finish_configuring_slave\")\n abort(\"STOP...\")", "def clone(repo, src, dest, shallow):\n print('Repo: %s' % repo)\n print('Source: %s' % src)\n print('Destination: %s' % dest)\n print('Shallow: %s' % shallow)", "def test_clone_fail_unexpected_arg(self, cosmo):\n with pytest.raises(TypeError, match=\"unexpected keyword argument\"):\n newclone = cosmo.clone(not_an_arg=4)", "def _clone(self):\n subprocess.call(['git', 'clone', self.source, self.path])\n self._set_chmod()\n return self.check(force=False)", "def repos_clone_steps(self):\n platform = self.platform\n # required by coho tools to correctly resolve repo location\n if platform == \"blackberry10\":\n platform = \"blackberry\"\n return [\n ShellCommand(command=[\"node\", \"medic/checkout.js\", \"--path=medic/repos.json\", \"--cat=CORDOVA-MSPEC\", \"--releasebranch=\" + CONFIG.branch_release], workdir='build', haltOnFailure=True, description='Clone Mobilespec'),\n ShellCommand(command=[\"node\", \"medic/checkout.js\", \"--path=medic/repos.json\", \"--cat=CORDOVA-PLUGIN\", \"--releasebranch=\" + CONFIG.branch_release], workdir='build', haltOnFailure=True, description='Clone Plugins'),\n ShellCommand(command=[\"node\", \"medic/checkout.js\", \"--path=medic/repos.json\", \"--cat=CORDOVA-\" + platform, \"--releasebranch=\" + CONFIG.branch_release], workdir='build', haltOnFailure=True, description='Clone Platform'),\n ShellCommand(command=[\"node\", \"medic/checkout.js\", \"--path=medic/repos.json\", \"--cat=CORDOVA-JS\", \"--releasebranch=\" + CONFIG.branch_release], workdir='build', haltOnFailure=True, description='Clone JS'),\n ]", "def clone_execution_system(self, host_system_id, new_system_id, alloc):\n\n clone_body = {\n 'action': 'CLONE',\n 'id': new_system_id\n }\n\n cloned_sys = self.client.systems.manage(body=clone_body, systemId=host_system_id)\n\n sys = self.validate_exec_system(cloned_sys['id'], alloc)\n\n return sys", "def setUpClass(cls):\n Git.clone('test-clone/', 'https://github.com/ChielBruin/Gitcovery.git')\n cls.root = Git.checkout('ede9c381daf318a87a58ed9607549132e150f145')", "def create_repo_clone(self, path, https):\n _, _, login, remote_dir = path.split('/', 3) # 3 x '/' before real path\n remote_dir = os.path.dirname(remote_dir) # final segment from clone\n print remote_dir\n cmd = ['ssh', login, 'mkdir', '-p', remote_dir]\n print cmd\n check_output(cmd)\n cmd = ['ssh', login, 'cd', remote_dir, ';', 'hg', 'clone', https]\n #cmd = ['ssh', login, 'cd {} ; hg clone {}'.format(remote_dir, path.replace('ssh:', 'https:'))]\n print cmd\n check_output(cmd)", "def test_13_output(self):\n\n # Now attempt to receive from a repository.\n self.pkgrepo(\"create {0}\".format(self.tempdir))\n self.pkgrecv(self.dpath1, \"-d {0} -n -v \\*\".format(self.tempdir))\n expected = \"\"\"\\\nRetrieving packages (dry-run) ...\n Packages to add: 9\n Files to retrieve: 17\nEstimated transfer size: 528.00 B\n\"\"\"\n self.assert_(expected in self.output, self.output)\n for s in self.published:\n self.assert_(fmri.PkgFmri(s).get_fmri(anarchy=True,\n include_scheme=False) in self.output)\n\n # Clean up for next test.\n shutil.rmtree(self.tempdir)\n\n # Now attempt to receive from a repository to a package archive.\n self.pkgrecv(self.dpath1, \"-a -d {0} -n -v \\*\".format(self.tempdir))\n expected = \"\"\"\\\nArchiving packages (dry-run) ...\n Packages to add: 9\n Files to retrieve: 17\nEstimated transfer size: 528.00 B\n\"\"\"\n self.assert_(expected in self.output, self.output)\n for s in self.published:\n self.assert_(fmri.PkgFmri(s).get_fmri(anarchy=True,\n include_scheme=False) in self.output)\n\n # Now attempt to clone a repository.\n self.pkgrepo(\"create {0}\".format(self.tempdir))\n self.pkgrecv(self.dpath1, \"--clone -d {0} -p \\* -n -v\" \\\n .format(self.tempdir))\n expected = \"\"\"\\\nRetrieving packages (dry-run) ...\n Packages to add: 9\n Files to retrieve: 17\nEstimated transfer size: 528.00 B\n\"\"\"\n self.assert_(expected in self.output, self.output)\n for s in self.published:\n self.assert_(fmri.PkgFmri(s).get_fmri(anarchy=True,\n include_scheme=False) in self.output)\n\n # Test that output is correct if -n is not specified.\n self.pkgrecv(self.dpath1, \"-d {0} -v \\*\".format(self.tempdir))\n self.assert_(\"dry-run\" not in self.output)", "def cloneDB():\n print(\"::cloning db\")\n filepath = confighome+\"config\"\n\n # open config to get credentials for ssh \n with open(filepath,mode='r', encoding='utf-8') as f:\n jconfig = json.load(f)\n creds=jconfig[0]\n\n # locally clone the \"db\"\n cmd_full=\"git clone \"+creds['db']['username']+\"@\"+creds['db']['host']+\":swrss_database\"\n print(\"::cmd=\",cmd_full)\n retval= os.system(cmd_full)\n if (retval==0):\n print(\"::synced successfully\")\n\n print(\"::system returned \",retval)", "def clone(c):\n\n for p in get_config().get('packages', []):\n try:\n c.run(f\"git clone {p}\")\n except UnexpectedExit as e:\n pass", "def clone_ex(self, new_vm_name, new_vm_root_path, nFlags):\n\t\treturn Job(SDK.PrlVm_CloneEx(self.handle, new_vm_name, new_vm_root_path, nFlags)[0])", "def _clone_to(vcs, location):\n try:\n subprocess.Popen([vcs.type, \"clone\", vcs.url], cwd=location)\n except:\n return location", "def test_init_on_cloned_repo(isolated_runner, data_repository, project_init):\n data, commands = project_init\n\n new_project = Path(data[\"test_project\"])\n import shutil\n\n shutil.copytree(str(data_repository.working_dir), str(new_project))\n assert new_project.exists()\n\n # try to create in a dirty folder\n result = isolated_runner.invoke(cli, commands[\"init_test\"] + commands[\"id\"], commands[\"confirm\"])\n assert 0 == result.exit_code, format_result_exception(result)\n assert new_project.exists()\n assert (new_project / \".renku\").exists()\n assert (new_project / \".renku\" / \"renku.ini\").exists()\n assert (new_project / \".renku\" / \"metadata\").exists()", "def clone(self):" ]
[ "0.8047334", "0.76347864", "0.7441855", "0.7439768", "0.7325142", "0.6886465", "0.67456806", "0.6739956", "0.6657189", "0.64944553", "0.6468549", "0.64062333", "0.6359524", "0.62620795", "0.6221049", "0.6213149", "0.6211276", "0.6207467", "0.616226", "0.61290413", "0.6113984", "0.6089901", "0.6056109", "0.60434306", "0.6031003", "0.60102445", "0.59852016", "0.5982926", "0.5978433", "0.59725744" ]
0.93936086
0
Test case for create_category
def test_create_category(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_0005_create_categories(self):\n self.create_category(name='Test 0060 Workflow Features', description='Test 0060 - Workflow Features')", "def test_create_category(self):\n res = self.client().post('/categories/', data=self.category)\n self.assertEqual(res.status_code, 201)\n self.assertIn('Stews', str(res.data))", "def test_create_recipe_category(self):\n self.signup('Bo', 'Theo', '[email protected]', 'Bo1995', 'Bo1995')\n self.login('[email protected]', 'Bo1995')\n self.dashboard()\n self.category('JunkFood')\n self.dashboard()\n self.recipe_dashboard()\n rv = self.create_recipe('cakes', 'blah, blah, blah....mix ingredient, heat')\n self.assertIn(b'Recipe created', rv.data)", "def test_add_category(self):\n self.signup('Bo', 'Theo', '[email protected]', 'Bo1995', 'Bo1995')\n self.login('[email protected]', 'Bo1995')\n self.dashboard()\n rv = self.category('Breakfast')\n self.assertIn(b'Category created', rv.data)", "def test_create(self):\n self.assertTrue(Category.objects.exists())", "def test_create_category(self):\n payload = {\n 'name': 'Houses',\n }\n res = self.client.post(CATEGORY_URL, payload)\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n category = Category.objects.get(id=res.data['id'])\n serializer = CategorySerializer(category)\n self.assertEqual(serializer.data['name'], payload['name'])", "def create_category():\n category = Category(name='testcategory', description=\"\", fee=DEFAULT_FEE)\n category.save()\n return category", "def test_add_category(self):\n self.add_success(self.test_data['pants'])", "def test_create_cat_object():\n from .scripts.initializedb import create_cat_object\n cat_object = create_cat_object(\"a\", \"b\", \"c\", \"c\")\n assert isinstance(cat_object, Category)", "def test_create_category_with_existing_name(self):\n sample_category()\n res = self.client.post(CATEGORY_URL, {\"name\": \"place\"})\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(\n res.data['errors']['name'][0],\n 'This field must be unique.')", "def test_dashboard_recipe_created_with_category(self):\n self.signup('Bo', 'Theo', '[email protected]', 'Bo1995', 'Bo1995')\n self.login('[email protected]', 'Bo1995')\n self.dashboard()\n self.category('JunkFood')\n self.dashboard()\n rv = self.recipe_dashboard()\n self.assertIn(b'JunkFood', rv.data)", "def test_project_category_creation(self):\n name = 'A project category name'\n description = 'A project category description'\n project_category = self.create_project_category(\n name=name,\n description=description,\n )\n self.assertTrue(isinstance(project_category, ProjectCategory))\n self.assertEqual(project_category.__str__(), project_category.name)\n self.assertEqual(project_category.name, name)\n self.assertEqual(project_category.description, description)", "def sample_category(name='place'):\n return Category.objects.create(name=name)", "def test_delete_category(self):\n pass", "def test_add_category_success(self):\n category = json.dumps({\n 'name': 'Asian',\n })\n response = self.client.post('/category', data=category,\n headers={\"Authorization\": self.token})\n self.assertEqual(response.status_code, 201)\n self.assertIn('asian', response.data.decode())", "def create_category(name):\n return Category.objects.create(name=name)", "def test_create_category(self):\n self.assertEqual(Project.objects.count(), 2)\n\n url = reverse('projectroles:api_project_create')\n post_data = {\n 'title': NEW_CATEGORY_TITLE,\n 'type': PROJECT_TYPE_CATEGORY,\n 'parent': '',\n 'description': 'description',\n 'readme': 'readme',\n 'public_guest_access': False,\n 'owner': str(self.user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n\n self.assertEqual(response.status_code, 201, msg=response.content)\n self.assertEqual(Project.objects.count(), 3)\n # Assert object content\n new_category = Project.objects.get(title=NEW_CATEGORY_TITLE)\n model_dict = model_to_dict(new_category)\n model_dict['readme'] = model_dict['readme'].raw\n expected = {\n 'id': new_category.pk,\n 'title': new_category.title,\n 'type': new_category.type,\n 'parent': None,\n 'description': new_category.description,\n 'readme': new_category.readme.raw,\n 'public_guest_access': False,\n 'archive': False,\n 'full_title': new_category.title,\n 'has_public_children': False,\n 'sodar_uuid': new_category.sodar_uuid,\n }\n self.assertEqual(model_dict, expected)\n # Assert role assignment\n self.assertEqual(\n RoleAssignment.objects.filter(\n project=new_category, user=self.user, role=self.role_owner\n ).count(),\n 1,\n )\n # Assert API response\n expected = {\n 'title': NEW_CATEGORY_TITLE,\n 'type': PROJECT_TYPE_CATEGORY,\n 'parent': None,\n 'description': new_category.description,\n 'readme': new_category.readme.raw,\n 'public_guest_access': False,\n 'sodar_uuid': str(new_category.sodar_uuid),\n }\n self.assertEqual(json.loads(response.content), expected)", "def test_can_create_job_category(self):\n\t\tself.job_category.save()\n\t\tjob_category_instance = JobCategory.objects.get(pk=1)\n\t\tself.assertEqual(\n\t\t\tself.category,\n\t\t\tjob_category_instance.category,\n\t\t\t\"Job categories don't match.\"\n\t\t)", "def test_update_category(self):\n pass", "def test_add_category_to_asset(self):\n pass", "def test_add_category_with_perms(self):\n self.client.login(username='hodor', password='hodor')\n Perms.objects.create(user=self.user, access_level=2).save()\n response = self.client.post('/categories/add/', {'categoryType': 'tr0npr0n'})\n self.assertRedirects(response, '/categories/')", "def test_23_admin_add_category(self):\r\n self.create()\r\n category = {'name': 'cat', 'short_name': 'cat',\r\n 'description': 'description'}\r\n # Anonymous user\r\n url = '/admin/categories'\r\n res = self.app.post(url, data=category, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Anonymous users should be redirected to sign in\"\r\n assert dom.find(id='signin') is not None, err_msg\r\n\r\n # Authenticated user but not admin\r\n self.signin(email=self.email_addr2, password=self.password)\r\n res = self.app.post(url, data=category, follow_redirects=True)\r\n err_msg = \"Non-Admin users should get 403\"\r\n assert res.status_code == 403, err_msg\r\n self.signout()\r\n\r\n # Admin\r\n self.signin(email=self.root_addr, password=self.root_password)\r\n res = self.app.post(url, data=category, follow_redirects=True)\r\n err_msg = \"Category should be added\"\r\n assert \"Category added\" in res.data, err_msg\r\n assert category['name'] in res.data, err_msg\r\n\r\n category = {'name': 'cat', 'short_name': 'cat',\r\n 'description': 'description'}\r\n\r\n self.signin(email=self.root_addr, password=self.root_password)\r\n res = self.app.post(url, data=category, follow_redirects=True)\r\n err_msg = \"Category form validation should work\"\r\n assert \"Please correct the errors\" in res.data, err_msg", "def test_create_category_nested(self):\n self.assertEqual(Project.objects.count(), 2)\n\n url = reverse('projectroles:api_project_create')\n post_data = {\n 'title': NEW_CATEGORY_TITLE,\n 'type': PROJECT_TYPE_CATEGORY,\n 'parent': str(self.category.sodar_uuid),\n 'description': 'description',\n 'readme': 'readme',\n 'public_guest_access': False,\n 'owner': str(self.user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n\n self.assertEqual(response.status_code, 201, msg=response.content)\n self.assertEqual(Project.objects.count(), 3)\n new_category = Project.objects.get(title=NEW_CATEGORY_TITLE)\n model_dict = model_to_dict(new_category)\n model_dict['readme'] = model_dict['readme'].raw\n expected = {\n 'id': new_category.pk,\n 'title': new_category.title,\n 'type': new_category.type,\n 'parent': self.category.pk,\n 'description': new_category.description,\n 'readme': new_category.readme.raw,\n 'public_guest_access': False,\n 'archive': False,\n 'full_title': self.category.title + ' / ' + new_category.title,\n 'has_public_children': False,\n 'sodar_uuid': new_category.sodar_uuid,\n }\n self.assertEqual(model_dict, expected)\n self.assertEqual(\n RoleAssignment.objects.filter(\n project=new_category, user=self.user, role=self.role_owner\n ).count(),\n 1,\n )\n expected = {\n 'title': NEW_CATEGORY_TITLE,\n 'type': PROJECT_TYPE_CATEGORY,\n 'parent': str(self.category.sodar_uuid),\n 'description': new_category.description,\n 'readme': new_category.readme.raw,\n 'public_guest_access': False,\n 'sodar_uuid': str(new_category.sodar_uuid),\n }\n self.assertEqual(json.loads(response.content), expected)", "def test_create_category_with_invalid_details_fails(self):\n res = self.client.post(CATEGORY_URL, {})\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(\n res.data['errors']['name'][0],\n 'This field is required.')", "def test_create_post(self):\n self.test_category = Category.objects.create(name='django')\n self.testuser1 = User.objects.create_superuser(\n username='test_user1', password='123456789')\n # self.testuser1.is_staff = True\n\n self.client.login(username=self.testuser1.username,\n password='123456789')\n\n data = {\"title\": \"new\", \"author\": 1,\n \"excerpt\": \"new\", \"content\": \"new\"}\n url = reverse('blog_api:listcreate')\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)", "def test_new_category_data(db_session):\n new_cat = Category(\n label=\"test_label\",\n desc=\"test_desc\"\n )\n db_session.add(new_cat)\n category = db_session.query(Category).all()\n assert category[0].label == \"test_label\"\n assert category[0].desc == \"test_desc\"", "def test_get_categories(self):\n pass", "def create_category(self, category):\n\n super().new_entry()\n\n return Categories.objects.create(\n name=category['id'].split(':')[1],\n name_fr=category['name'],\n url=category['url']\n )", "def test_category_addition(self):\n login = self.autheniticate()\n token = json.loads(login.data.decode()).get('token')\n res = self.app.post(category_url,\n data=json.dumps(self.data),\n headers=dict(Authorization=\"Bearer \" + token),\n content_type='application/json')\n res1 = json.loads(res.data.decode())\n self.assertEqual(res1['status'], 'Success!')\n self.assertEqual(res.status_code, 201)", "def test_create_finder_category(self):\n self.assertEqual(\n RoleAssignment.objects.filter(project=self.category).count(), 1\n )\n url = reverse(\n 'projectroles:api_role_create',\n kwargs={'project': self.category.sodar_uuid},\n )\n post_data = {\n 'role': PROJECT_ROLE_FINDER,\n 'user': str(self.assign_user.sodar_uuid),\n }\n response = self.request_knox(\n url,\n method='POST',\n data=post_data,\n token=self.get_token(self.user_owner_cat),\n )\n self.assertEqual(response.status_code, 201, msg=response.content)\n self.assertEqual(\n RoleAssignment.objects.filter(project=self.category).count(), 2\n )\n role_as = RoleAssignment.objects.filter(\n project=self.category, role=self.role_finder, user=self.assign_user\n ).first()\n self.assertIsNotNone(role_as)" ]
[ "0.8499705", "0.8336913", "0.8146069", "0.8084576", "0.8025293", "0.80050004", "0.7836217", "0.7816334", "0.77271616", "0.77140254", "0.7675138", "0.7601237", "0.75862074", "0.7489063", "0.7486537", "0.7441543", "0.74232167", "0.7394554", "0.731856", "0.73099464", "0.7226296", "0.7165073", "0.7127406", "0.7095373", "0.7076141", "0.70662236", "0.70600027", "0.70363665", "0.70060426", "0.6991024" ]
0.9331137
0
Test case for create_deployment_entire
def test_create_deployment_entire(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_create_deployment(self):\n pass", "def test_execute_deployment(self):\n pass", "def test_get_deployment(self):\n pass", "def test_get_deployment(self):\n pass", "def test_launch_deployment(self):\n pass", "def test_publish_deployment_run(self):\n pass", "def test_get_deployment_run(self):\n pass", "def test_create_namespaced_deployment_config(self):\n pass", "def test_get_deployment_runs1(self):\n pass", "def test_update_deployment(self):\n pass", "def test_deployment(self):\n config = {'example.com': {\n 'applications': {\n 'site-hybridcluster': {\n 'image': 'unknown',\n },\n 'mysql-hybridcluster': {\n 'image': 'unknown',\n }\n },\n 'version': 1\n }}\n expected = Deployment(nodes=frozenset([\n Node(hostname='example.com', applications=frozenset([\n Application(\n name='mysql-hybridcluster',\n image=DockerImage.from_string('unknown'),\n ports=frozenset(),\n ),\n Application(\n name='site-hybridcluster',\n image=DockerImage.from_string('unknown'),\n )]))]))\n self.assertEqual(expected,\n current_from_configuration(config))", "def test_get_deployment_resource(self):\n pass", "def test_clone_deployment(self):\n pass", "def test_create_namespaced_deployment_request_instantiate(self):\n pass", "def test_release_deployment_run(self):\n pass", "def test_generate_namespaced_deployment_config(self):\n pass", "def test_get_deployment_resources(self):\n pass", "def test_create_namespaced_deployment_config_rollback(self):\n pass", "def test_get_deployment_runs(self):\n pass", "def test_delete_deployment(self):\n pass", "def test_delete_deployment_run(self):\n pass", "def test_create_deployment_config_for_all_namespaces(self):\n pass", "def test_get_deployments(self):\n pass", "def test_get_deployments(self):\n pass", "def test_retest_deployment_run(self):\n pass", "def create_deployment(StackId=None, AppId=None, InstanceIds=None, LayerIds=None, Command=None, Comment=None, CustomJson=None):\n pass", "def test_create_namespaced_deployment_config_rollback_rollback(self):\n pass", "def _create_deployment(self) -> Optional[str]:\n LOG.debug(\"%sTrying to create a deployment through client\", self.log_prefix)\n response_dep = cast(\n Dict, self._api_client.create_deployment(restApiId=self._api_physical_id, description=\"Created by SAM Sync\")\n )\n new_dep_id = response_dep.get(\"id\")\n LOG.debug(\"%sCreate Deployment Result: %s\", self.log_prefix, response_dep)\n return new_dep_id", "def test_get_deployment_resource_data(self):\n pass", "def _create_deployment(self) -> aws.apigateway.Stage:\n deployment = aws.apigateway.Deployment(\n f\"{self.rest_api._name}-deployment\",\n rest_api=self.rest_api.id,\n # TODO: Still want to have a triggers function\n opts=pulumi.ResourceOptions(\n parent=self, depends_on=[p.lambda_integration for p in self.proxies]\n ),\n )\n\n stage = aws.apigateway.Stage(\n f\"{self.rest_api._name}-prod-stage\",\n deployment=deployment.id,\n rest_api=self.rest_api.id,\n stage_name=\"prod\",\n opts=pulumi.ResourceOptions(parent=self),\n )\n\n return stage" ]
[ "0.88904864", "0.7746057", "0.7681598", "0.7681598", "0.75200987", "0.74070346", "0.73362505", "0.73342454", "0.7310437", "0.7308697", "0.7112917", "0.710564", "0.7103114", "0.7078024", "0.7074777", "0.69106066", "0.6890238", "0.68823415", "0.68766534", "0.68764615", "0.6770871", "0.6769607", "0.6735776", "0.6735776", "0.66634583", "0.6643798", "0.66042817", "0.6594571", "0.6579576", "0.65762484" ]
0.9384189
0
Test case for create_project
def test_create_project(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_create_project_request(self):\n pass", "def test_add_project(self):\n pass", "def test_project_creation(self):\n title = 'Project title'\n code = 'SCW-12345'\n project = self.create_project(\n title=title,\n code=code,\n institution=self.institution,\n tech_lead=self.project_owner,\n category=self.category,\n funding_source=self.funding_source,\n )\n self.assertTrue(isinstance(project, Project))\n self.assertEqual(project.__str__(), code + ' - ' + title)\n self.assertEqual(project.status, Project.AWAITING_APPROVAL)\n self.assertEqual(project.title, title)\n self.assertEqual(project.code, code)\n self.assertTrue(project.awaiting_approval())", "def test_create_project(self):\n yield self.nodes[0].overlay.create_project(\"test\", \"specpointer\", \"01-02-03\", 300, \"EUR\", 5)\n yield self.deliver_messages()\n\n # Node 2 should know about this app request now\n projects = self.nodes[1].overlay.persistence.get_projects()\n self.assertTrue(projects)\n self.assertEqual(projects[0]['id'], 1)", "def test_create_project_from_template(self):\n project_new = self.project_template.take_template()\n\n self.assertTrue(project_new)", "def test_create_project_root(self):\n self.assertEqual(Project.objects.count(), 2)\n url = reverse('projectroles:api_project_create')\n post_data = {\n 'title': NEW_PROJECT_TITLE,\n 'type': PROJECT_TYPE_PROJECT,\n 'parent': None,\n 'description': 'description',\n 'readme': 'readme',\n 'public_guest_access': False,\n 'owner': str(self.user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 400)\n self.assertEqual(Project.objects.count(), 2)", "def test_create_project(client, session, tokens):\n response = client.post(\n \"/projects\",\n json={\n \"name\": \"New Project\",\n \"organizations\": [],\n \"teams\": [],\n \"users\": [],\n },\n headers={\"Authorization\": f\"Bearer {tokens['write']}\"},\n )\n assert response.status_code == 201\n project_id = response.json[\"id\"]\n assert Project.query.filter(Project.id == project_id).count() == 1", "def project_create(project):\n client.project.create(project)", "def test_get_project(self):\n pass", "def _create_project(self):\n request = {\n \"project\": {\n \"description\": \"description\",\n \"enabled\": True,\n \"name\": uuid.uuid4().hex,\n \"domain_id\": \"default\",\n }\n }\n response = self.client.post(PROJECT_PATH, data=json.dumps(request),\n headers=HEADERS)\n\n if response.status_code == 201:\n return response.json()\n else:\n raise SystemExit(\"Failed to create project.\")", "def testSessionCreate(self):\n success = False\n project = None\n\n try:\n project = self.session.create_project()\n\n success = True\n except Exception:\n pass\n\n self.failUnless(success)\n self.failIf(project is None)", "def test_create_project_target_enabled(self):\n self.assertEqual(Project.objects.count(), 2)\n url = reverse('projectroles:api_project_create')\n post_data = {\n 'title': NEW_PROJECT_TITLE,\n 'type': PROJECT_TYPE_PROJECT,\n 'parent': str(self.category.sodar_uuid),\n 'description': 'description',\n 'readme': 'readme',\n 'public_guest_access': False,\n 'owner': str(self.user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 201, msg=response.content)\n self.assertEqual(Project.objects.count(), 3)", "def test_new_project_existing(self):\n\n project = fake_clients.FakeProject(name=\"test_project\")\n\n setup_identity_cache(projects=[project])\n\n url = \"/v1/actions/CreateProjectAndUser\"\n data = {\"project_name\": \"test_project\", \"email\": \"[email protected]\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n\n headers = {\n \"project_name\": \"test_project\",\n \"project_id\": \"test_project_id\",\n \"roles\": \"admin,member\",\n \"username\": \"[email protected]\",\n \"user_id\": \"test_user_id\",\n \"authenticated\": True,\n }\n new_task = Task.objects.all()[0]\n url = \"/v1/tasks/\" + new_task.uuid\n response = self.client.post(\n url, {\"approved\": True}, format=\"json\", headers=headers\n )\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.json(), {\"errors\": [\"actions invalid\"]})", "def test_new_project(self):\n\n setup_identity_cache()\n\n url = \"/v1/actions/CreateProjectAndUser\"\n data = {\"project_name\": \"test_project\", \"email\": \"[email protected]\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n\n headers = {\n \"project_name\": \"test_project\",\n \"project_id\": \"test_project_id\",\n \"roles\": \"admin,member\",\n \"username\": \"[email protected]\",\n \"user_id\": \"test_user_id\",\n \"authenticated\": True,\n }\n new_task = Task.objects.all()[0]\n url = \"/v1/tasks/\" + new_task.uuid\n response = self.client.post(\n url, {\"approved\": True}, format=\"json\", headers=headers\n )\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.json(), {\"notes\": [\"created token\"]})\n\n new_project = fake_clients.identity_cache[\"new_projects\"][0]\n self.assertEqual(new_project.name, \"test_project\")\n\n new_token = Token.objects.all()[0]\n url = \"/v1/tokens/\" + new_token.token\n data = {\"password\": \"testpassword\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_create_project(self):\n self.assertEqual(Project.objects.count(), 2)\n\n url = reverse('projectroles:api_project_create')\n post_data = {\n 'title': NEW_PROJECT_TITLE,\n 'type': PROJECT_TYPE_PROJECT,\n 'parent': str(self.category.sodar_uuid),\n 'description': 'description',\n 'readme': 'readme',\n 'public_guest_access': False,\n 'owner': str(self.user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n\n self.assertEqual(response.status_code, 201, msg=response.content)\n self.assertEqual(Project.objects.count(), 3)\n new_project = Project.objects.get(title=NEW_PROJECT_TITLE)\n model_dict = model_to_dict(new_project)\n model_dict['readme'] = model_dict['readme'].raw\n expected = {\n 'id': new_project.pk,\n 'title': new_project.title,\n 'type': new_project.type,\n 'parent': self.category.pk,\n 'description': new_project.description,\n 'readme': new_project.readme.raw,\n 'public_guest_access': False,\n 'archive': False,\n 'full_title': self.category.title + ' / ' + new_project.title,\n 'has_public_children': False,\n 'sodar_uuid': new_project.sodar_uuid,\n }\n self.assertEqual(model_dict, expected)\n self.assertEqual(\n RoleAssignment.objects.filter(\n project=new_project, user=self.user, role=self.role_owner\n ).count(),\n 1,\n )\n expected = {\n 'title': NEW_PROJECT_TITLE,\n 'type': PROJECT_TYPE_PROJECT,\n 'parent': str(self.category.sodar_uuid),\n 'description': new_project.description,\n 'readme': new_project.readme.raw,\n 'public_guest_access': False,\n 'sodar_uuid': str(new_project.sodar_uuid),\n }\n self.assertEqual(json.loads(response.content), expected)", "def test_create_account_project(self, create):\n row = {'PROJ_NAME1': 'Some Proj', 'PROJ_NO': '121-212',\n 'SECTOR': 'IT'}\n sync.create_account(row, None)\n self.assertTrue(create.called)\n account, row, issue_map = create.call_args[0]\n self.assertEqual(account.name, 'Some Proj')\n self.assertEqual(account.code, '121-212')\n self.assertEqual(account.category, Account.PROJECT)\n self.assertEqual(0, len(Account.objects.filter(pk=account.pk)))", "def test_user_can_create_a_project(self):\n self.assertEqual(project_model.Project.objects.get(user=self.test_user).pk, self.test_project.pk)", "def _create_dummy_project(self,projectname=\"testproject\"):\n # Create three types of users that exist: Root, can do anything, \n # projectadmin, cam do things to a project he or she owns. And logged in\n # user \n \n #created in _create_main_project_and_root.\n root = self.root\n # non-root users are created as if they signed up through the project, \n # to maximize test coverage. \n \n # A user who has created a project\n projectadmin = self._create_random_user(\"projectadmin_\")\n \n testproject = self._create_comicsite_in_admin(projectadmin,projectname)\n create_page_in_admin(testproject,\"testpage1\")\n create_page_in_admin(testproject,\"testpage2\")\n \n # a user who explicitly signed up to testproject\n participant = self._create_random_user(\"participant_\")\n self._register(participant,testproject)\n \n # a user who only signed up but did not register to any project\n registered_user = self._create_random_user(\"comicregistered_\")\n \n #TODO: How to do this gracefully? \n return [testproject,root,projectadmin,participant,registered_user]", "def _create_project(org, project_name):\n project = Project(\n org=org,\n name=project_name\n )\n project.save()\n return project", "def create_project(self, **kwargs):\n save = kwargs.get('save', True) \n if kwargs.has_key('save'):\n del(kwargs['save'])\n\n index = self.object_index()\n defaults = dict(slug = \"test-project-%s\" % index,\n basecamp_url = \"https://foo.basecamphq.com/projects/%s/log\" % index)\n defaults.update(kwargs)\n p = Project(**defaults)\n\n if save:\n p.save()\n self.assert_(p.id)\n return p", "def test_empty_project_create(self):\n\n responses.add(\n responses.POST,\n self.host + \"/manager\",\n json={'message': \"Project name cannot be empty.\", 'status':\"error\"},\n status=200\n )\n\n with self.assertRaises(CreateError):\n self.azk.create('', 'description')", "def run(opts, args):\n create_new_project()", "def create_project():\n client = RequestManager()\n project_name = \"\".join(choices(string.ascii_letters + string.digits, k=10))\n client.set_method(\"POST\")\n client.set_endpoint(\"/projects\")\n body = {\"name\": project_name}\n client.set_body(json.dumps(body))\n response = client.execute_request()\n STORED_ID['project_id'] = response.json()['id']", "def CreateProject(projectName='project'):\r\n projectName = input('''The project's name: ''')\r\n if not os.path.exists(projectName):\r\n os.mkdir(projectName)\r\n else:\r\n print('There is a file with the same name.')\r\n\r\n for dir in ['OPT', 'SCF', 'PHO']:\r\n if not os.path.exists(projectName + os.sep + dir):\r\n os.mkdir(projectName + os.sep + dir)", "def setup_project(client, project_template, do_auth=True):\n client = deepcopy(client)\n email = \"[email protected]\"\n password = \"test\"\n urls = URLS()\n project_config = project_template()\n\n # we work in empty database, so let's create business user and login\n user = User.objects.create(email=email)\n user.set_password(password) # set password without hash\n\n create_business(user)\n org = Organization.create_organization(created_by=user, title=user.first_name)\n user.active_organization = org\n user.save()\n\n if do_auth:\n\n assert signin(client, email, password).status_code == 302\n # create project\n with requests_mock.Mocker() as m:\n m.register_uri('POST', re.compile(r'ml\\.heartex\\.net/\\d+/validate'), text=json.dumps({'status': 'ok'}))\n m.register_uri('GET', re.compile(r'ml\\.heartex\\.net/\\d+/health'), text=json.dumps({'status': 'UP'}))\n r = client.post(urls.project_create, data=project_config)\n print('Project create with status code:', r.status_code)\n assert r.status_code == 201, f'Create project result should be redirect to the next page'\n\n # get project id and prepare url\n project = Project.objects.filter(title=project_config['title']).first()\n urls.set_project(project.pk)\n print('Project id:', project.id)\n\n client.project = project\n\n client.user = user\n client.urls = urls\n client.project_config = project_config\n client.org = org\n return client", "def test_create_project_from_template(svc_client_templates_creation):\n from git import Repo\n\n from renku.service.serializers.headers import RenkuHeaders\n from renku.service.utils import CACHE_PROJECTS_PATH\n\n svc_client, headers, payload, rm_remote = svc_client_templates_creation\n\n # NOTE: fail: remote authentication\n anonymous_headers = deepcopy(headers)\n anonymous_headers[\"Authorization\"] = \"Bearer None\"\n response = svc_client.post(\"/templates.create_project\", data=json.dumps(payload), headers=anonymous_headers)\n\n assert response\n assert response.json[\"error\"]\n assert \"Cannot push changes\" in response.json[\"error\"][\"reason\"]\n\n # NOTE: fail: missing parameters\n if len(payload[\"parameters\"]) > 0:\n payload_without_parameters = deepcopy(payload)\n payload_without_parameters[\"parameters\"] = []\n response = svc_client.post(\n \"/templates.create_project\", data=json.dumps(payload_without_parameters), headers=headers\n )\n assert response\n assert response.json[\"error\"]\n assert RENKU_EXCEPTION_ERROR_CODE == response.json[\"error\"][\"code\"]\n assert \"missing parameter\" in response.json[\"error\"][\"reason\"]\n\n # NOTE: successfully push with proper authentication\n response = svc_client.post(\"/templates.create_project\", data=json.dumps(payload), headers=headers)\n\n assert response\n assert {\"result\"} == set(response.json.keys())\n stripped_name = normalize_to_ascii(payload[\"project_name\"])\n assert stripped_name == response.json[\"result\"][\"slug\"]\n expected_url = \"{0}/{1}/{2}\".format(payload[\"project_repository\"], payload[\"project_namespace\"], stripped_name)\n assert expected_url == response.json[\"result\"][\"url\"]\n\n # NOTE: assert correct git user is set on new project\n user_data = RenkuHeaders.decode_user(headers[\"Renku-User\"])\n project_path = (\n CACHE_PROJECTS_PATH\n / user_data[\"user_id\"]\n / response.json[\"result\"][\"project_id\"]\n / payload[\"project_namespace\"]\n / stripped_name\n )\n repo = Repo(project_path)\n reader = repo.config_reader()\n assert reader.get_value(\"user\", \"email\") == user_data[\"email\"]\n assert reader.get_value(\"user\", \"name\") == user_data[\"name\"]\n\n # NOTE: successfully re-use old name after cleanup\n assert rm_remote() is True\n sleep(1) # NOTE: sleep to make sure remote isn't locked\n response = svc_client.post(\"/templates.create_project\", data=json.dumps(payload), headers=headers)\n assert response\n assert {\"result\"} == set(response.json.keys())\n assert expected_url == response.json[\"result\"][\"url\"]\n assert rm_remote() is True", "def test_create_project_unknown_user(self):\n self.assertEqual(Project.objects.count(), 2)\n url = reverse('projectroles:api_project_create')\n post_data = {\n 'title': NEW_PROJECT_TITLE,\n 'type': PROJECT_TYPE_PROJECT,\n 'parent': str(self.category.sodar_uuid),\n 'description': 'description',\n 'readme': 'readme',\n 'public_guest_access': False,\n 'owner': INVALID_UUID,\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 400)\n self.assertEqual(Project.objects.count(), 2)", "def test_read_project(self):\n pass", "def test_read_project(self):\n pass", "def test_project_generation(cookies, context, context_combination):\n result = cookies.bake(extra_context={**context, **context_combination})\n assert result.exit_code == 0\n assert result.exception is None\n assert result.project.basename == context[\"project_slug\"]\n assert result.project.isdir()\n\n paths = build_files_list(str(result.project))\n assert paths\n check_paths(paths)" ]
[ "0.87766117", "0.8437002", "0.841835", "0.80077946", "0.7994402", "0.7775029", "0.77226585", "0.76863635", "0.7676147", "0.76569086", "0.7646558", "0.7646026", "0.76438665", "0.7638706", "0.76039904", "0.74945164", "0.7460312", "0.7429228", "0.74266595", "0.7354817", "0.7349462", "0.7287748", "0.7284829", "0.7261214", "0.72552454", "0.7198857", "0.71860194", "0.7158664", "0.7158664", "0.7150631" ]
0.94393003
1
Test case for create_scenario
def test_create_scenario(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_create_scenario1(self):\n pass", "def test_get_scenario(self):\n pass", "def test_get_scenarios(self):\n pass", "def create_scenarios(self, params, num_scenarios, random_seed):\n return None", "def test_create_run(self):\n pass", "def post(self):\n data = request.json\n create_testing_scenario(data)\n return None, 201", "def test_pytest_bdd_scenario(self):\n self.testdir.makefile(\n \".feature\",\n simple=_SIMPLE_SCENARIO,\n )\n py_file = self.testdir.makepyfile(\n \"\"\"\n from pytest_bdd import scenario, given, then, when\n\n @scenario(\"simple.feature\", \"Simple scenario\")\n def test_simple():\n pass\n\n BAR = None\n\n @given(\"I have a bar\")\n def bar():\n global BAR\n BAR = 1\n\n @when(\"I eat it\")\n def eat():\n global BAR\n BAR -= 1\n\n @then(\"I don't have a bar\")\n def check():\n assert BAR == 0\n \"\"\"\n )\n file_name = os.path.basename(py_file.strpath)\n self.inline_run(\"--ddtrace\", file_name)\n spans = self.pop_spans()\n\n assert len(spans) == 7\n assert spans[0].get_tag(\"component\") == \"pytest\"\n assert spans[0].get_tag(\"test.name\") == \"Simple scenario\"\n assert spans[0].span_type == \"test\"\n assert spans[1].resource == \"I have a bar\"\n assert spans[1].name == \"given\"\n assert spans[2].resource == \"I eat it\"\n assert spans[2].name == \"when\"\n assert spans[3].resource == \"I don't have a bar\"\n assert spans[3].name == \"then\"", "def test_create_goal(self):\n pass", "def test_create(self):\n pass", "def test_pytest_bdd_scenario_with_failed_step(self):\n self.testdir.makefile(\n \".feature\",\n simple=_SIMPLE_SCENARIO,\n )\n py_file = self.testdir.makepyfile(\n \"\"\"\n from pytest_bdd import scenario, given, then, when\n\n @scenario(\"simple.feature\", \"Simple scenario\")\n def test_simple():\n pass\n\n BAR = None\n\n @given(\"I have a bar\")\n def bar():\n global BAR\n BAR = 1\n\n @when(\"I eat it\")\n def eat():\n global BAR\n BAR -= 1\n\n @then(\"I don't have a bar\")\n def check():\n assert BAR == -1\n \"\"\"\n )\n file_name = os.path.basename(py_file.strpath)\n self.inline_run(\"--ddtrace\", file_name)\n spans = self.pop_spans()\n\n assert len(spans) == 7\n assert spans[3].name == \"then\"\n assert spans[3].get_tag(ERROR_MSG)", "def _run_scenario(self, cls, method_name, context, args, config):", "def test_pytest_bdd_scenario_with_parameters(self):\n self.testdir.makefile(\n \".feature\",\n parameters=\"\"\"\n Feature: Parameters\n Scenario: Passing scenario\n Given I have 0 bars\n When I eat it\n Then I have -1 bars\n\n Scenario: Failing scenario\n Given I have 2 bars\n When I eat it\n Then I have 0 bar\n\n Scenario: Failing converter\n Given I have no bar\n \"\"\",\n )\n py_file = self.testdir.makepyfile(\n \"\"\"\n from pytest_bdd import scenarios, given, then, when, parsers\n\n scenarios(\"parameters.feature\")\n\n BAR = None\n\n @given(parsers.re(\"^I have (?P<bars>[^ ]+) bar$\")) # loose regex\n def have_simple(bars):\n global BAR\n BAR = bars\n\n @given(parsers.re(\"^I have (?P<bars>\\\\d+) bars$\"), converters=dict(bars=int))\n def have(bars):\n global BAR\n BAR = bars\n\n @when(\"I eat it\")\n def eat():\n global BAR\n BAR -= 1\n\n @then(parsers.parse(\"I have {bars:d} bar\"))\n def check_parse(bars):\n assert BAR == bars\n\n @then(parsers.cfparse(\"I have {bars:d} bars\"))\n def check_cfparse(bars):\n assert BAR == bars\n \"\"\"\n )\n file_name = os.path.basename(py_file.strpath)\n self.inline_run(\"--ddtrace\", file_name)\n spans = self.pop_spans()\n\n assert len(spans) == 13 # 3 scenarios + 7 steps + 1 module\n assert json.loads(spans[1].get_tag(test.PARAMETERS)) == {\"bars\": 0}\n assert json.loads(spans[3].get_tag(test.PARAMETERS)) == {\"bars\": -1}\n assert json.loads(spans[5].get_tag(test.PARAMETERS)) == {\"bars\": 2}\n assert json.loads(spans[7].get_tag(test.PARAMETERS)) == {\"bars\": 0}\n assert json.loads(spans[9].get_tag(test.PARAMETERS)) == {\"bars\": \"no\"}", "def test_create_team(self):\n pass", "def run_scenario(self, run, run_id):\n\n raise NotImplementedError", "def test_create_activity_template(self):\n pass", "def test_teams_create(self):\n pass", "def test_pytest_bdd_with_missing_step_implementation(self):\n self.testdir.makefile(\n \".feature\",\n simple=_SIMPLE_SCENARIO,\n )\n py_file = self.testdir.makepyfile(\n \"\"\"\n from pytest_bdd import scenario, given, then, when\n\n @scenario(\"simple.feature\", \"Simple scenario\")\n def test_simple():\n pass\n \"\"\"\n )\n file_name = os.path.basename(py_file.strpath)\n self.inline_run(\"--ddtrace\", file_name)\n spans = self.pop_spans()\n\n assert len(spans) == 4\n assert spans[0].get_tag(ERROR_MSG)", "def test_2nd_scenario():\n start_entered_scenario(config.MERAKI_CAMERAS[0][\"serial\"])\n return \"ok\"", "def test_create_activity(self):\n pass", "def test_create10(self):\n pass", "def start_scenario(self, _, scenario, **kwargs):\n if scenario.tags and \"skip\" in scenario.tags:\n scenario.skip(\"Marked with @skip\")\n self._scenario_id = self._rp.start_test_item(\n name=scenario.name,\n start_time=timestamp(),\n item_type=\"STEP\",\n parent_item_id=self._feature_id,\n code_ref=self._code_ref(scenario),\n attributes=self._attributes(scenario),\n parameters=self._get_parameters(scenario),\n description=self._item_description(scenario),\n test_case_id=self._test_case_id(scenario),\n **kwargs,\n )\n self._log_fixtures(scenario, \"BEFORE_TEST\", self._scenario_id)\n self._log_item_id = self._scenario_id", "def test_create_course(self):\r\n self.assert_created_course()", "def test_create_record(self):\n pass", "def test_client_risk_assessment_create(self):\n pass", "def test_create_activity_occurrence(self):\n pass", "def test_0_0_create(self):\n\n self.assertTrue(self.b1)", "def test_scenario(self):\n scenario_ids = list(scenarios.get_scenarios().keys())\n\n for scenario_id in scenario_ids:\n url = reverse('workbench_show_scenario', kwargs={'scenario_id': scenario_id})\n client = Client()\n response = client.get(url, follow=True)\n assert response.status_code == 200, scenario_id\n\n # Be sure we got the whole scenario. Again, we can't know what to expect\n # here, but at the very least, if there are verticals, they should not be\n # empty. That would be a sign that some data wasn't loaded properly while\n # rendering the scenario.\n html = lxml.html.fromstring(response.content)\n for vertical_tag in html.xpath('//div[@class=\"vertical\"]'):\n # No vertical tag should be empty.\n assert list(vertical_tag), u\"Scenario {}: Empty <vertical> shouldn't happen!\".format(scenario_id)", "def ConstructStage(self):\n raise NotImplementedError(self, \"ConstructStage: Implement in your test\")", "def test_create_story(self):\n title = ('Transportation Challenges Limit Education Choices for '\n 'Denver Parents')\n summary = \"\"\"\n Many families in the Denver metro area use public\n transportation instead of a school bus because for them, a\n quality education is worth hours of daily commuting. Colorado's\n school choice program is meant to foster educational equity,\n but the families who benefit most are those who have time and\n money to travel. Low-income families are often left in a lurch.\n \"\"\"\n byline = \"Mile High Connects\"\n with self.assertRaises(Story.DoesNotExist):\n Story.objects.get(storytranslation__title=title)\n story = create_story(title=title, summary=summary, byline=byline)\n self.assertEqual(story.title, title)\n self.assertEqual(story.summary, summary)\n self.assertEqual(story.byline, byline)\n retrieved_story = Story.objects.get(pk=story.pk)\n self.assertEqual(retrieved_story.title, title)\n self.assertEqual(retrieved_story.summary, summary)\n self.assertEqual(retrieved_story.byline, byline)", "def test_create_story(self):\n title = ('Transportation Challenges Limit Education Choices for '\n 'Denver Parents')\n summary = \"\"\"\n Many families in the Denver metro area use public\n transportation instead of a school bus because for them, a\n quality education is worth hours of daily commuting. Colorado's\n school choice program is meant to foster educational equity,\n but the families who benefit most are those who have time and\n money to travel. Low-income families are often left in a lurch.\n \"\"\"\n byline = \"Mile High Connects\"\n with self.assertRaises(Story.DoesNotExist):\n Story.objects.get(storytranslation__title=title)\n story = create_story(title=title, summary=summary, byline=byline)\n self.assertEqual(story.title, title)\n self.assertEqual(story.summary, summary)\n self.assertEqual(story.byline, byline)\n retrieved_story = Story.objects.get(pk=story.pk)\n self.assertEqual(retrieved_story.title, title)\n self.assertEqual(retrieved_story.summary, summary)\n self.assertEqual(retrieved_story.byline, byline)" ]
[ "0.8869541", "0.7671374", "0.7262993", "0.7245442", "0.7218788", "0.71558136", "0.69229096", "0.68585736", "0.6857396", "0.66705066", "0.65117943", "0.6506439", "0.6440637", "0.6375267", "0.637377", "0.63317627", "0.63309366", "0.6323892", "0.6315476", "0.6173005", "0.6121736", "0.6120932", "0.6077051", "0.60669845", "0.6062456", "0.60490924", "0.6045699", "0.60422564", "0.60348254", "0.60348254" ]
0.9296185
0
Test case for create_scenario1
def test_create_scenario1(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_create_scenario(self):\n pass", "def test_create_run(self):\n pass", "def test_get_scenario(self):\n pass", "def create_scenarios(self, params, num_scenarios, random_seed):\n return None", "def test_get_scenarios(self):\n pass", "def test_create(self):\n pass", "def post(self):\n data = request.json\n create_testing_scenario(data)\n return None, 201", "def test_create_goal(self):\n pass", "def test_create10(self):\n pass", "def test_2nd_scenario():\n start_entered_scenario(config.MERAKI_CAMERAS[0][\"serial\"])\n return \"ok\"", "def test_0_0_create(self):\n\n self.assertTrue(self.b1)", "def test_create_activity_template(self):\n pass", "def test_pytest_bdd_scenario(self):\n self.testdir.makefile(\n \".feature\",\n simple=_SIMPLE_SCENARIO,\n )\n py_file = self.testdir.makepyfile(\n \"\"\"\n from pytest_bdd import scenario, given, then, when\n\n @scenario(\"simple.feature\", \"Simple scenario\")\n def test_simple():\n pass\n\n BAR = None\n\n @given(\"I have a bar\")\n def bar():\n global BAR\n BAR = 1\n\n @when(\"I eat it\")\n def eat():\n global BAR\n BAR -= 1\n\n @then(\"I don't have a bar\")\n def check():\n assert BAR == 0\n \"\"\"\n )\n file_name = os.path.basename(py_file.strpath)\n self.inline_run(\"--ddtrace\", file_name)\n spans = self.pop_spans()\n\n assert len(spans) == 7\n assert spans[0].get_tag(\"component\") == \"pytest\"\n assert spans[0].get_tag(\"test.name\") == \"Simple scenario\"\n assert spans[0].span_type == \"test\"\n assert spans[1].resource == \"I have a bar\"\n assert spans[1].name == \"given\"\n assert spans[2].resource == \"I eat it\"\n assert spans[2].name == \"when\"\n assert spans[3].resource == \"I don't have a bar\"\n assert spans[3].name == \"then\"", "def TestOneStep(self):\n pass", "def test_create_team(self):\n pass", "def test_virtualmachineconsole_scenario1(self):\n call_scenario1(self)", "def test_create_activity(self):\n pass", "def test_create_activity_occurrence(self):\n pass", "def test_pytest_bdd_scenario_with_failed_step(self):\n self.testdir.makefile(\n \".feature\",\n simple=_SIMPLE_SCENARIO,\n )\n py_file = self.testdir.makepyfile(\n \"\"\"\n from pytest_bdd import scenario, given, then, when\n\n @scenario(\"simple.feature\", \"Simple scenario\")\n def test_simple():\n pass\n\n BAR = None\n\n @given(\"I have a bar\")\n def bar():\n global BAR\n BAR = 1\n\n @when(\"I eat it\")\n def eat():\n global BAR\n BAR -= 1\n\n @then(\"I don't have a bar\")\n def check():\n assert BAR == -1\n \"\"\"\n )\n file_name = os.path.basename(py_file.strpath)\n self.inline_run(\"--ddtrace\", file_name)\n spans = self.pop_spans()\n\n assert len(spans) == 7\n assert spans[3].name == \"then\"\n assert spans[3].get_tag(ERROR_MSG)", "def test_pytest_bdd_scenario_with_parameters(self):\n self.testdir.makefile(\n \".feature\",\n parameters=\"\"\"\n Feature: Parameters\n Scenario: Passing scenario\n Given I have 0 bars\n When I eat it\n Then I have -1 bars\n\n Scenario: Failing scenario\n Given I have 2 bars\n When I eat it\n Then I have 0 bar\n\n Scenario: Failing converter\n Given I have no bar\n \"\"\",\n )\n py_file = self.testdir.makepyfile(\n \"\"\"\n from pytest_bdd import scenarios, given, then, when, parsers\n\n scenarios(\"parameters.feature\")\n\n BAR = None\n\n @given(parsers.re(\"^I have (?P<bars>[^ ]+) bar$\")) # loose regex\n def have_simple(bars):\n global BAR\n BAR = bars\n\n @given(parsers.re(\"^I have (?P<bars>\\\\d+) bars$\"), converters=dict(bars=int))\n def have(bars):\n global BAR\n BAR = bars\n\n @when(\"I eat it\")\n def eat():\n global BAR\n BAR -= 1\n\n @then(parsers.parse(\"I have {bars:d} bar\"))\n def check_parse(bars):\n assert BAR == bars\n\n @then(parsers.cfparse(\"I have {bars:d} bars\"))\n def check_cfparse(bars):\n assert BAR == bars\n \"\"\"\n )\n file_name = os.path.basename(py_file.strpath)\n self.inline_run(\"--ddtrace\", file_name)\n spans = self.pop_spans()\n\n assert len(spans) == 13 # 3 scenarios + 7 steps + 1 module\n assert json.loads(spans[1].get_tag(test.PARAMETERS)) == {\"bars\": 0}\n assert json.loads(spans[3].get_tag(test.PARAMETERS)) == {\"bars\": -1}\n assert json.loads(spans[5].get_tag(test.PARAMETERS)) == {\"bars\": 2}\n assert json.loads(spans[7].get_tag(test.PARAMETERS)) == {\"bars\": 0}\n assert json.loads(spans[9].get_tag(test.PARAMETERS)) == {\"bars\": \"no\"}", "def test_1():", "def test_teams_create(self):\n pass", "def test_create_record(self):\n pass", "def test_create_unexpected_problem(self):\n pass", "def test_new(self):", "def test_new(self):", "def test_client_risk_assessment_create(self):\n pass", "def test_create_occurrence(self):\n pass", "def test_pytest_bdd_with_missing_step_implementation(self):\n self.testdir.makefile(\n \".feature\",\n simple=_SIMPLE_SCENARIO,\n )\n py_file = self.testdir.makepyfile(\n \"\"\"\n from pytest_bdd import scenario, given, then, when\n\n @scenario(\"simple.feature\", \"Simple scenario\")\n def test_simple():\n pass\n \"\"\"\n )\n file_name = os.path.basename(py_file.strpath)\n self.inline_run(\"--ddtrace\", file_name)\n spans = self.pop_spans()\n\n assert len(spans) == 4\n assert spans[0].get_tag(ERROR_MSG)", "def test_create_part(self):\n pass" ]
[ "0.90819556", "0.75562876", "0.7398337", "0.7303192", "0.72252643", "0.7149693", "0.7026761", "0.70154", "0.68493575", "0.670496", "0.6677057", "0.6605391", "0.6593304", "0.65581036", "0.65439767", "0.65091515", "0.6480423", "0.64081895", "0.6379883", "0.6355066", "0.6348571", "0.629792", "0.6283295", "0.62435406", "0.6240703", "0.6240703", "0.6220805", "0.62160975", "0.62099165", "0.6194798" ]
0.93772525
0
Test case for create_software_asset_bundle_from_system_module
def test_create_software_asset_bundle_from_system_module(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_create_software_bundle_from_system_module(self):\n pass", "def test_get_software_bundle(self):\n pass", "def test_update_software_asset_bundle(self):\n pass", "def test_get_software_bundles(self):\n pass", "def test_create_system_asset(self):\n pass", "def test_get_software_asset_bundle_expanded(self):\n pass", "def test_delete_software_asset_bundle(self):\n pass", "def test_import_software_asset(self):\n pass", "def test_replace_software_asset_for_software_component(self):\n pass", "def test_import_system_asset(self):\n pass", "def test_retrieve_system_asset(self):\n pass", "def test_update_software_asset(self):\n pass", "def test_update_software_asset_install_script(self):\n pass", "def test_update_software_asset_content(self):\n pass", "def test_update_system_asset(self):\n pass", "def test_list_system_assets(self):\n pass", "def test_update_software_components_for_system_module(self):\n pass", "def test_update_software_component_for_system_module(self):\n pass", "def test_build(self):\r\n self.mkbundle('file1', 'file2', output=\"out\").build()\r\n assert self.get(\"media/out\") == \"foo\\nbar\"", "def test_create_deployment(self):\n pass", "def bundle(self, app):\r\n assert(isinstance(app, BundleCreate.App))\r\n\r\n bundledir = os.path.join(self.outdir, '%s-bundle' % app.basename)\r\n self.context.log.info('creating %s' % os.path.relpath(bundledir, get_buildroot()))\r\n\r\n safe_mkdir(bundledir, clean=True)\r\n\r\n classpath = OrderedSet()\r\n if not self.deployjar:\r\n libdir = os.path.join(bundledir, 'libs')\r\n os.mkdir(libdir)\r\n\r\n # Add internal dependencies to the bundle.\r\n def add_jars(target):\r\n target_jars = self.context.products.get('jars').get(target)\r\n if target_jars is not None:\r\n for basedir, jars in target_jars.items():\r\n for internaljar in jars:\r\n os.symlink(os.path.join(basedir, internaljar),\r\n os.path.join(libdir, internaljar))\r\n classpath.add(internaljar)\r\n app.binary.walk(add_jars, lambda t: t.is_internal)\r\n\r\n # Add external dependencies to the bundle.\r\n for basedir, externaljar in self.list_jar_dependencies(app.binary):\r\n path = os.path.join(basedir, externaljar)\r\n os.symlink(path, os.path.join(libdir, externaljar))\r\n classpath.add(externaljar)\r\n\r\n for basedir, jars in self.context.products.get('jars').get(app.binary).items():\r\n if len(jars) != 1:\r\n raise TaskError('Expected 1 mapped binary for %s but found: %s' % (app.binary, jars))\r\n\r\n binary = jars[0]\r\n binary_jar = os.path.join(basedir, binary)\r\n bundle_jar = os.path.join(bundledir, binary)\r\n if not classpath:\r\n os.symlink(binary_jar, bundle_jar)\r\n else:\r\n with open_zip(binary_jar, 'r') as src:\r\n with open_zip(bundle_jar, 'w', compression=ZIP_DEFLATED) as dest:\r\n for item in src.infolist():\r\n buf = src.read(item.filename)\r\n if Manifest.PATH == item.filename:\r\n manifest = Manifest(buf)\r\n manifest.addentry(Manifest.CLASS_PATH,\r\n ' '.join(os.path.join('libs', jar) for jar in classpath))\r\n buf = manifest.contents()\r\n dest.writestr(item, buf)\r\n\r\n for bundle in app.bundles:\r\n for path, relpath in bundle.filemap.items():\r\n bundlepath = os.path.join(bundledir, relpath)\r\n safe_mkdir(os.path.dirname(bundlepath))\r\n os.symlink(path, bundlepath)\r\n\r\n return bundledir", "def test_get_bundle(self):\n res = self.app.get('/bundle/DEFAULT/main')\n bundle = json.loads(res.body.decode('utf-8'))\n expected = self.stats1['chunks']['main'][0]\n self.assertEqual(len(bundle), 1)\n self.assertEqual(bundle[0]['name'], expected['name'])\n self.assertEqual(bundle[0]['path'], expected['path'])\n self.assertTrue('url' in bundle[0])", "def test_delete_system_asset(self):\n pass", "def firmware_pack_create(handle, org_name, name, rack_bundle_version,\n blade_bundle_version, descr=\"\", mode=\"staged\",\n org_parent=\"org-root\"):\n\n org_dn = org_parent + \"/org-\" + org_name\n p_mo = handle.query_dn(org_dn)\n if not p_mo:\n log.info(\"Sub-Org <%s> not found!\" % org_name)\n else:\n from ucsmsdk.mometa.firmware.FirmwareComputeHostPack import\\\n FirmwareComputeHostPack\n\n mo = FirmwareComputeHostPack(parent_mo_or_dn=org_dn,\n name=name,\n descr=descr,\n rack_bundle_version=rack_bundle_version,\n mode=mode,\n blade_bundle_version=blade_bundle_version)\n handle.add_mo(mo)\n handle.commit()", "def test_get_bundle(self):\n bundle = self.webpack.get_bundle('main')\n self.assertEqual(bundle, self.stats['chunks']['main'])", "def bundle(self, app):\n assert(isinstance(app, BundleCreate.App))\n\n bundledir = os.path.join(self.outdir, '%s-bundle' % app.basename)\n self.context.log.info('creating %s' % os.path.relpath(bundledir, get_buildroot()))\n\n safe_mkdir(bundledir, clean=True)\n\n classpath = OrderedSet()\n if not self.deployjar:\n libdir = os.path.join(bundledir, 'libs')\n os.mkdir(libdir)\n\n # Add external dependencies to the bundle.\n for basedir, externaljar in self.list_jar_dependencies(app.binary):\n path = os.path.join(basedir, externaljar)\n os.symlink(path, os.path.join(libdir, externaljar))\n classpath.add(externaljar)\n\n # TODO: There should probably be a separate 'binary_jars' product type,\n # so we can more easily distinguish binary jars (that contain all the classes of their\n # transitive deps) and per-target jars.\n for basedir, jars in self.context.products.get('jars').get(app.binary).items():\n if len(jars) != 1:\n raise TaskError('Expected 1 mapped binary for %s but found: %s' % (app.binary, jars))\n\n binary = jars[0]\n binary_jar = os.path.join(basedir, binary)\n bundle_jar = os.path.join(bundledir, binary)\n # Add the internal classes into the bundle_jar.\n if not classpath:\n os.symlink(binary_jar, bundle_jar)\n else:\n # TODO: Can we copy the existing jar and inject the manifest in, instead of\n # laboriously copying the contents one by one? Would that be more efficient?\n with open_zip(binary_jar, 'r') as src:\n with open_zip(bundle_jar, 'w', compression=ZIP_DEFLATED) as dest:\n for item in src.infolist():\n buf = src.read(item.filename)\n if Manifest.PATH == item.filename:\n manifest = Manifest(buf)\n manifest.addentry(Manifest.CLASS_PATH,\n ' '.join(os.path.join('libs', jar) for jar in classpath))\n buf = manifest.contents()\n dest.writestr(item, buf)\n\n for bundle in app.bundles:\n for path, relpath in bundle.filemap.items():\n bundlepath = os.path.join(bundledir, relpath)\n safe_mkdir(os.path.dirname(bundlepath))\n os.symlink(path, bundlepath)\n\n return bundledir", "def test_get_second_bundle(self):\n res = self.app.get('/bundle/other/libs')\n bundle = json.loads(res.body.decode('utf-8'))\n expected = self.stats2['chunks']['libs'][0]\n self.assertEqual(len(bundle), 1)\n self.assertEqual(bundle[0]['name'], expected['name'])\n self.assertEqual(bundle[0]['path'], expected['path'])\n self.assertTrue('url' in bundle[0])", "def get_bundle(conf, asset_type, bundle_name):\n \n content_type = 'application/javascript'\n content = []\n \n if asset_type == 'css':\n content_type = 'text/css'\n \n for asset in conf[asset_type][bundle_name]:\n content.append(open(os.path.join(conf['srcDir'], asset_type, asset)).read())\n \n content = ''.join(content)\n \n return '200 OK', content_type, content", "def test_get_test_asset(self):\n pass", "def create_system(sys_structure):\n pass" ]
[ "0.8933855", "0.76393044", "0.7609548", "0.7527626", "0.75150734", "0.7355522", "0.6996746", "0.69212085", "0.6799893", "0.67246693", "0.6567345", "0.63880044", "0.62481856", "0.6242452", "0.62012464", "0.5992383", "0.593134", "0.59185493", "0.584247", "0.5657797", "0.56223786", "0.5598833", "0.55923206", "0.55839235", "0.5566948", "0.55025256", "0.5489674", "0.5480018", "0.5464289", "0.5462373" ]
0.9603537
0
Test case for create_software_bundle_from_system_module
def test_create_software_bundle_from_system_module(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_create_software_asset_bundle_from_system_module(self):\n pass", "def test_get_software_bundle(self):\n pass", "def test_get_software_bundles(self):\n pass", "def test_update_software_components_for_system_module(self):\n pass", "def test_update_software_component_for_system_module(self):\n pass", "def test_update_software_asset_bundle(self):\n pass", "def test_get_software_asset_bundle_expanded(self):\n pass", "def create_system(sys_structure):\n pass", "def firmware_pack_create(handle, org_name, name, rack_bundle_version,\n blade_bundle_version, descr=\"\", mode=\"staged\",\n org_parent=\"org-root\"):\n\n org_dn = org_parent + \"/org-\" + org_name\n p_mo = handle.query_dn(org_dn)\n if not p_mo:\n log.info(\"Sub-Org <%s> not found!\" % org_name)\n else:\n from ucsmsdk.mometa.firmware.FirmwareComputeHostPack import\\\n FirmwareComputeHostPack\n\n mo = FirmwareComputeHostPack(parent_mo_or_dn=org_dn,\n name=name,\n descr=descr,\n rack_bundle_version=rack_bundle_version,\n mode=mode,\n blade_bundle_version=blade_bundle_version)\n handle.add_mo(mo)\n handle.commit()", "def test_update_software_configuration_for_system_module(self):\n pass", "def test_get_software(self):\n pass", "def test_delete_software_asset_bundle(self):\n pass", "def test_replace_software_asset_for_software_component(self):\n pass", "def test_create_system_entire(self):\n pass", "def test_create_system_asset(self):\n pass", "def test_System_creation(self):\n s1 = System()\n self.assertEqual(s1.get_library_name(), \"default\")", "def fusion_api_create_firmware_bundle(self, body, api=None, headers=None):\n return self.driver.post(body, api, headers)", "def test_literal_io_from_package_and_offering(self):\n cwl = {\n \"cwlVersion\": \"v1.0\",\n \"class\": \"CommandLineTool\",\n \"inputs\": [\n {\n \"id\": \"literal_input_only_cwl_minimal\",\n \"type\": \"string\"\n },\n {\n \"id\": \"literal_input_both_cwl_and_wps\",\n \"type\": \"string\"\n },\n ],\n \"outputs\": [\n {\n \"id\": \"literal_output_only_cwl_minimal\",\n \"type\": {\n \"type\": \"array\",\n \"items\": \"float\",\n }\n },\n {\n \"id\": \"literal_output_both_cwl_and_wps\",\n \"type\": \"float\"\n }\n ]\n }\n body = {\n \"processDescription\": {\n \"process\": {\n \"id\": self._testMethodName,\n \"title\": \"some title\",\n \"abstract\": \"this is a test\",\n \"inputs\": [\n {\n \"id\": \"literal_input_only_wps_removed\",\n },\n {\n \"id\": \"literal_input_both_cwl_and_wps\",\n \"title\": \"Extra detail for I/O both in CWL and WPS\"\n }\n ],\n \"outputs\": [\n {\n \"id\": \"literal_output_only_wps_removed\"\n },\n {\n \"id\": \"literal_output_both_cwl_and_wps\",\n \"title\": \"Additional detail only within WPS output\"\n }\n ]\n }\n },\n \"deploymentProfileName\": \"http://www.opengis.net/profiles/eoc/wpsApplication\",\n \"executionUnit\": [{\"unit\": cwl}],\n }\n desc, pkg = self.deploy_process(body, describe_schema=\"OLD\")\n proc = desc[\"process\"]\n\n assert proc[\"id\"] == self._testMethodName\n assert proc[\"title\"] == \"some title\"\n assert proc[\"description\"] == \"this is a test\"\n assert isinstance(proc[\"inputs\"], list)\n assert len(proc[\"inputs\"]) == 2\n assert proc[\"inputs\"][0][\"id\"] == \"literal_input_only_cwl_minimal\"\n assert proc[\"inputs\"][0][\"minOccurs\"] == 1\n assert proc[\"inputs\"][0][\"maxOccurs\"] == 1\n assert proc[\"inputs\"][1][\"id\"] == \"literal_input_both_cwl_and_wps\"\n assert proc[\"inputs\"][1][\"minOccurs\"] == 1\n assert proc[\"inputs\"][1][\"maxOccurs\"] == 1\n assert proc[\"inputs\"][1][\"title\"] == \"Extra detail for I/O both in CWL and WPS\", \\\n \"Additional details defined only in WPS matching CWL I/O by ID should be preserved\"\n assert isinstance(proc[\"outputs\"], list)\n assert len(proc[\"outputs\"]) == 2\n assert proc[\"outputs\"][0][\"id\"] == \"literal_output_only_cwl_minimal\"\n assert proc[\"outputs\"][1][\"id\"] == \"literal_output_both_cwl_and_wps\"\n assert proc[\"outputs\"][1][\"title\"] == \"Additional detail only within WPS output\", \\\n \"Additional details defined only in WPS matching CWL I/O by ID should be preserved\"\n\n assert len(pkg[\"inputs\"]) == 2\n assert pkg[\"inputs\"][0][\"id\"] == \"literal_input_only_cwl_minimal\"\n assert pkg[\"inputs\"][1][\"id\"] == \"literal_input_both_cwl_and_wps\"\n # FIXME:\n # https://github.com/crim-ca/weaver/issues/31\n # https://github.com/crim-ca/weaver/issues/50\n # assert pkg[\"inputs\"][1][\"label\"] == \"Extra detail for I/O both in CWL and WPS\", \\\n # \"WPS I/O title should be converted to CWL label of corresponding I/O from additional details\"\n assert len(pkg[\"outputs\"]) == 2\n assert pkg[\"outputs\"][0][\"id\"] == \"literal_output_only_cwl_minimal\"\n assert pkg[\"outputs\"][1][\"id\"] == \"literal_output_both_cwl_and_wps\"\n # FIXME:\n # https://github.com/crim-ca/weaver/issues/31\n # https://github.com/crim-ca/weaver/issues/50\n # assert pkg[\"outputs\"][1][\"label\"] == \"Additional detail only within WPS output\", \\\n # \"WPS I/O title should be converted to CWL label of corresponding I/O from additional details\"\n\n desc = self.describe_process(self._testMethodName, describe_schema=\"OGC\")\n assert desc[\"id\"] == self._testMethodName\n assert desc[\"title\"] == \"some title\"\n assert desc[\"description\"] == \"this is a test\"\n assert isinstance(desc[\"inputs\"], dict)\n assert len(desc[\"inputs\"]) == 2\n assert desc[\"inputs\"][\"literal_input_only_cwl_minimal\"][\"minOccurs\"] == 1\n assert desc[\"inputs\"][\"literal_input_only_cwl_minimal\"][\"maxOccurs\"] == 1\n assert desc[\"inputs\"][\"literal_input_both_cwl_and_wps\"][\"minOccurs\"] == 1\n assert desc[\"inputs\"][\"literal_input_both_cwl_and_wps\"][\"maxOccurs\"] == 1\n assert isinstance(desc[\"outputs\"], dict)\n assert len(desc[\"outputs\"]) == 2\n assert \"title\" not in desc[\"outputs\"][\"literal_output_only_cwl_minimal\"], \\\n \"No additional title provided should make the field to be omitted completely.\"\n assert desc[\"outputs\"][\"literal_output_both_cwl_and_wps\"][\"title\"] == \\\n \"Additional detail only within WPS output\", \\\n \"Additional details defined only in WPS matching CWL I/O by ID should be preserved.\"", "def test_import_software_asset(self):\n pass", "def test_create_hyperflex_software_version_policy(self):\n pass", "def create_usstock_bundle(code, sids=None, universes=None, free=False, data_frequency=None):\n params = {}\n params[\"ingest_type\"] = \"usstock\"\n if sids:\n params[\"sids\"] = sids\n if universes:\n params[\"universes\"] = universes\n if free:\n params[\"free\"] = free\n if data_frequency:\n params[\"data_frequency\"] = data_frequency\n\n response = houston.put(\"/zipline/bundles/{}\".format(code), params=params)\n\n houston.raise_for_status_with_json(response)\n return response.json()", "def test_create_hyperflex_app_catalog(self):\n pass", "def test_create_hyperflex_server_firmware_version(self):\n pass", "def test_create_deployment(self):\n pass", "def test_get_system(self):\n pass", "def _provision_package(self):", "def test_build(self):\r\n self.mkbundle('file1', 'file2', output=\"out\").build()\r\n assert self.get(\"media/out\") == \"foo\\nbar\"", "def install_bundle(client_bin, module, bundle_url):\n cmd = CLIENT_KARAF_COMMAND_WITH_ARGS.format(client_bin, PACKAGE_STATE_MAP[\"present\"], bundle_url)\n rc, out, err = module.run_command(cmd)\n\n bundle_id = None\n if rc != 0:\n reason = parse_error(out)\n module.fail_json(msg=reason)\n else:\n install_result = out.split(':')\n bundle_id = install_result[1].strip()\n\n # Parse out to get Bundle id.\n return True, cmd, bundle_id, out, err", "def test_bundle_is_product_pack(self):\n template = self.product_apple_bundle\n product_pack_ids = template.product_pack_ids\n self.assertTrue(template.is_pack, 'Product template is a bundle pack')\n self.assertTrue(len(product_pack_ids) != 0, 'Product: a product bundle should have product pack')\n self.assertEqual(len(product_pack_ids), 3, 'Product: a product bundle should have product pack')", "def test_update_software_asset_install_script(self):\n pass" ]
[ "0.89967304", "0.8258284", "0.7785115", "0.68196535", "0.6734564", "0.6677118", "0.6558964", "0.62449294", "0.6210243", "0.60930866", "0.60378927", "0.59984803", "0.59662664", "0.5965425", "0.584158", "0.5824617", "0.57435644", "0.56696844", "0.56671286", "0.56625974", "0.56306976", "0.56238157", "0.5622445", "0.5604874", "0.5595843", "0.5526382", "0.54925376", "0.54293525", "0.5421534", "0.5414397" ]
0.9654075
0
Test case for create_system_asset
def test_create_system_asset(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_retrieve_system_asset(self):\n pass", "def test_import_system_asset(self):\n pass", "def test_create_software_asset_bundle_from_system_module(self):\n pass", "def test_update_system_asset(self):\n pass", "def test_delete_system_asset(self):\n pass", "def test_get_test_asset(self):\n pass", "def test_import_software_asset(self):\n pass", "def test_import_test_asset(self):\n pass", "def test_update_software_asset(self):\n pass", "def test_list_system_assets(self):\n pass", "def test_update_software_asset_bundle(self):\n pass", "def test_update_asset(self):\n pass", "def test_update_test_asset(self):\n pass", "def test_update_software_asset_content(self):\n pass", "def test_ocean_assets_algorithm(publisher_ocean_instance):\n publisher = get_publisher_wallet()\n metadata = get_sample_algorithm_ddo()[\"service\"][0]\n metadata[\"attributes\"][\"main\"][\"files\"][0][\"checksum\"] = str(uuid.uuid4())\n ddo = publisher_ocean_instance.assets.create(metadata[\"attributes\"], publisher)\n assert ddo\n _ddo = wait_for_ddo(publisher_ocean_instance, ddo.did)\n assert _ddo, f\"assets.resolve failed for did {ddo.did}\"", "def create_asset(ocean, publisher):\n sample_ddo_path = get_resource_path(\"ddo\", \"ddo_sa_sample.json\")\n assert sample_ddo_path.exists(), \"{} does not exist!\".format(sample_ddo_path)\n\n asset = DDO(json_filename=sample_ddo_path)\n asset.metadata[\"main\"][\"files\"][0][\"checksum\"] = str(uuid.uuid4())\n my_secret_store = \"http://myownsecretstore.com\"\n auth_service = ServiceDescriptor.authorization_service_descriptor(my_secret_store)\n return ocean.assets.create(asset.metadata, publisher, [auth_service])", "def test_add_category_to_asset(self):\n pass", "def createAsset(assFolder, *args):\n createAssetUI(assFolder)", "def test_update_test_asset_content(self):\n pass", "def test_add_new_asset(self):\n self.assertEqual(self.all_assets.count(), 1)\n new_asset = Asset(asset_code=\"IC002\",\n serial_number=\"SN0045\",\n model_number=self.test_assetmodel,\n assigned_to=self.user)\n new_asset.save()\n self.assertEqual(self.all_assets.count(), 2)", "def test_update_asset_content(self):\n pass", "def test_create_image(self):\n pass", "def test_delete_asset(self):\n pass", "def test_submit_asset_to_submission_service(self):\n pass", "def test_ocean_assets_compute(publisher_ocean_instance):\n publisher = get_publisher_wallet()\n metadata = get_computing_metadata()\n metadata[\"main\"][\"files\"][0][\"checksum\"] = str(uuid.uuid4())\n ddo = publisher_ocean_instance.assets.create(metadata, publisher)\n assert ddo\n _ddo = wait_for_ddo(publisher_ocean_instance, ddo.did)\n assert _ddo, f\"assets.resolve failed for did {ddo.did}\"", "def test_get_test_assets(self):\n pass", "def test_delete_software_asset_bundle(self):\n pass", "def test_existing_content_asset(self):\n with self.app.test_client() as client:\n response = client.get('/contentassets/logo.png')\n\n self.assert200(response)", "def test_create_system_entire(self):\n pass", "def test_register_asset(publisher_ocean_instance):\n ocn = publisher_ocean_instance\n ddo_reg = ocn.assets.ddo_registry()\n block = ocn.web3.eth.blockNumber\n alice = get_publisher_wallet()\n bob = get_consumer_wallet()\n\n def _get_num_assets(_minter):\n dids = [add_0x_prefix(did_to_id(a)) for a in ocn.assets.owner_assets(_minter)]\n dids = [a for a in dids if len(a) == 42]\n return len([a for a in dids if DataToken(a).contract_concise.isMinter(_minter)])\n\n num_assets_owned = _get_num_assets(alice.address)\n\n original_ddo = create_asset(ocn, alice)\n assert original_ddo, \"create asset failed.\"\n\n # try to resolve new asset\n did = original_ddo.did\n asset_id = original_ddo.asset_id\n log = ddo_reg.get_event_log(ddo_reg.EVENT_METADATA_CREATED, block, asset_id, 30)\n assert log, \"no ddo created event.\"\n\n ddo = wait_for_ddo(ocn, did)\n assert ddo, \"ddo is not found in cache.\"\n ddo_dict = ddo.as_dictionary()\n original = original_ddo.as_dictionary()\n assert ddo_dict[\"publicKey\"] == original[\"publicKey\"]\n assert ddo_dict[\"authentication\"] == original[\"authentication\"]\n assert ddo_dict[\"service\"]\n assert original[\"service\"]\n metadata = ddo_dict[\"service\"][0][\"attributes\"]\n if \"datePublished\" in metadata[\"main\"]:\n metadata[\"main\"].pop(\"datePublished\")\n assert (\n ddo_dict[\"service\"][0][\"attributes\"][\"main\"][\"name\"]\n == original[\"service\"][0][\"attributes\"][\"main\"][\"name\"]\n )\n assert ddo_dict[\"service\"][1] == original[\"service\"][1]\n\n # Can't resolve unregistered asset\n unregistered_did = DID.did({\"0\": \"0x00112233445566\"})\n with pytest.raises(ValueError):\n ocn.assets.resolve(unregistered_did)\n\n # Raise error on bad did\n invalid_did = \"did:op:0123456789\"\n with pytest.raises(ValueError):\n ocn.assets.resolve(invalid_did)\n\n meta_data_assets = ocn.assets.search(\"\")\n if meta_data_assets:\n print(\"Currently registered assets:\")\n print(meta_data_assets)\n\n # Publish the metadata\n _ = ddo.metadata[\"main\"][\"name\"]\n _name = \"updated name\"\n ddo.metadata[\"main\"][\"name\"] = _name\n assert ddo.metadata[\"main\"][\"name\"] == _name\n with pytest.raises(ValueError):\n ocn.assets.update(ddo, bob)\n\n _ = ocn.assets.update(ddo, alice)\n log = ddo_reg.get_event_log(ddo_reg.EVENT_METADATA_UPDATED, block, asset_id, 30)\n assert log, \"no ddo updated event\"\n _asset = wait_for_update(ocn, ddo.did, \"name\", _name)\n assert _asset, \"Cannot read asset after update.\"\n assert (\n _asset.metadata[\"main\"][\"name\"] == _name\n ), \"updated asset does not have the new updated name !!!\"\n\n assert (\n ocn.assets.owner(ddo.did) == alice.address\n ), \"asset owner does not seem correct.\"\n\n assert _get_num_assets(alice.address) == num_assets_owned + 1" ]
[ "0.81536865", "0.7973233", "0.7644901", "0.75932163", "0.729875", "0.71742857", "0.71433455", "0.6814606", "0.6709667", "0.6696938", "0.6659669", "0.662001", "0.65640193", "0.6529609", "0.64879006", "0.64605457", "0.64437705", "0.6391738", "0.63574326", "0.63473016", "0.6344878", "0.63433695", "0.6296046", "0.6286772", "0.6282377", "0.62617385", "0.62587875", "0.6228701", "0.617263", "0.6121546" ]
0.94222957
0
Test case for create_system_entire
def test_create_system_entire(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_system(sys_structure):\n pass", "def test_get_system(self):\n pass", "def test_System_creation(self):\n s1 = System()\n self.assertEqual(s1.get_library_name(), \"default\")", "def test_get_systems(self):\n pass", "def test_create_device_template(self):\n pass", "def test_subsystems(self):\n pass", "def _initialize_system(self):\n # Make sure that the system is actually valid before trying anything\n self._validate_system()\n\n # Do any necessary template resolution\n self._system.template = resolve_template(self._system.template)\n\n existing_system = self._ez_client.find_unique_system(\n name=self._system.name,\n version=self._system.version,\n namespace=self._system.namespace,\n )\n\n if not existing_system:\n try:\n # If this succeeds can just finish here\n return self._ez_client.create_system(self._system)\n except ConflictError:\n # If multiple instances are starting up at once and this is a new system\n # the create can return a conflict. In that case just try the get again\n existing_system = self._ez_client.find_unique_system(\n name=self._system.name,\n version=self._system.version,\n namespace=self._system.namespace,\n )\n\n # If we STILL can't find a system something is really wrong\n if not existing_system:\n raise PluginValidationError(\n \"Unable to find or create system {0}\".format(self._system)\n )\n\n # We always update with these fields\n update_kwargs = {\n \"new_commands\": self._system.commands,\n \"metadata\": self._system.metadata,\n \"description\": self._system.description,\n \"display_name\": self._system.display_name,\n \"icon_name\": self._system.icon_name,\n \"template\": self._system.template,\n }\n\n # And if this particular instance doesn't exist we want to add it\n if not existing_system.has_instance(self._config.instance_name):\n update_kwargs[\"add_instance\"] = Instance(name=self._config.instance_name)\n\n return self._ez_client.update_system(existing_system.id, **update_kwargs)", "def test_create_device(self):\n pass", "def test_create_device(self):\n pass", "def test_create_device1(self):\n pass", "def test_create_software_bundle_from_system_module(self):\n pass", "def createSystemSims(self):\n # create systems\n import anwp.sims\n self.systemSims = []\n for systemID, systemDict in self.game.allSystems.iteritems():\n empireDict = self.game.allEmpires[systemDict['myEmpireID']]\n imageFileName = '%s%s.png' % (self.game.app.simImagePath, systemDict['imageFile']) \n \n # create sim\n sim = SystemEntity(self, anwp.sims.categories.ClickableCategory(imageFileName,'system'), systemDict, empireDict)\n \n # add sim to world\n self.systemSims.append(sim)\n x = systemDict['x']\n y = systemDict['y']\n facing = 0\n speed = 0\n sim.turnRate = 0\n self.world.addToWorld(sim, x, y, facing, speed)", "def test_create_tpm(self):\n command_line = self._MENU + [self._POOLNAME] + self._DEVICES + [\"--clevis=tpm2\"]\n TEST_RUNNER(command_line)", "def generate_testsystem(smiles = 'CCCC',\n forcefield_files = ['amber14/protein.ff14SB.xml', 'amber14/tip3p.xml'],\n forcefield_kwargs = {'removeCMMotion': False, 'ewaldErrorTolerance': 1e-4, 'constraints' : None, 'hydrogenMass' : 4 * unit.amus},\n nonperiodic_forcefield_kwargs = {'nonbondedMethod': app.NoCutoff},\n periodic_forcefield_kwargs = {'nonbondedMethod': app.PME},\n small_molecule_forcefield = 'gaff-2.11',\n padding=9*unit.angstroms,\n ionicStrength=0.0*unit.molar,\n water_model = 'tip3p',\n pressure = 1.0 * unit.atmosphere,\n temperature = 300 * unit.kelvin,\n barostat_period = 50,\n **kwargs\n ):\n from openforcefield.topology import Molecule\n from perses.utils.openeye import smiles_to_oemol\n from openmmforcefields.generators.system_generators import SystemGenerator\n from perses.utils.openeye import OEMol_to_omm_ff\n from simtk import openmm\n from qmlify.utils import pull_force_by_name\n\n oemol = smiles_to_oemol(smiles)\n off_molecules = [Molecule.from_openeye(oemol)]\n vac_system_generator = SystemGenerator(forcefields=forcefield_files,\n small_molecule_forcefield=small_molecule_forcefield,\n forcefield_kwargs=forcefield_kwargs,\n nonperiodic_forcefield_kwargs = nonperiodic_forcefield_kwargs, molecules = off_molecules)\n barostat = openmm.MonteCarloBarostat(pressure, temperature, barostat_period)\n sol_system_generator = SystemGenerator(forcefields=forcefield_files,\n small_molecule_forcefield=small_molecule_forcefield,\n forcefield_kwargs=forcefield_kwargs,\n periodic_forcefield_kwargs = periodic_forcefield_kwargs,\n molecules = off_molecules,\n barostat = barostat)\n\n\n vac_system, vac_positions, vac_topology = OEMol_to_omm_ff(oemol, vac_system_generator)\n\n #now i can attempt to solvate\n modeller = app.Modeller(vac_topology, vac_positions)\n modeller.addSolvent(sol_system_generator.forcefield, model=water_model, padding=padding, ionicStrength=ionicStrength)\n sol_positions, sol_topology = modeller.getPositions(), modeller.getTopology()\n sol_positions = unit.quantity.Quantity(value = np.array([list(atom_pos) for atom_pos in sol_positions.value_in_unit_system(unit.md_unit_system)]), unit = unit.nanometers)\n sol_system = sol_system_generator.create_system(sol_topology)\n\n vac_sys_pos_top = (vac_system, vac_positions, vac_topology)\n sol_sys_pos_top = (sol_system, sol_positions, sol_topology)\n\n #a quick assertion to make sure the nonbonded forces are being treated properly\n vac_nbf, sol_nbf = pull_force_by_name(vac_system, 'NonbondedForce'), pull_force_by_name(sol_system, 'NonbondedForce')\n assert not vac_nbf.usesPeriodicBoundaryConditions()\n assert sol_nbf.usesPeriodicBoundaryConditions()\n\n return vac_sys_pos_top, sol_sys_pos_top", "def test_create_tang_2(self):\n command_line = (\n self._MENU\n + [self._POOLNAME]\n + self._DEVICES\n + [\"--clevis=tang\", \"--thumbprint=print\", \"--tang-url=http\"]\n )\n TEST_RUNNER(command_line)", "def create_system_data():\n system_data = dict()\n system_data['system'] = dict()\n system_data['system']['primary'] = dict()\n system_data['system']['primary']['controllers'] = dict()\n system_data['system']['primary']['controllers']['re0'] = dict()\n system_data['system']['primary']['controllers']['re0']['hostname'] = 'abc'\n system_data['system']['primary']['controllers']['re0']['mgt-ip'] = '1.1.1.1'\n system_data['system']['primary']['controllers']['re0']['osname'] = 'Paragon'\n system_data['system']['primary']['name'] = 'abc'\n system_data['system']['primary']['model'] = 'Paragon'\n system_data['system']['primary']['make'] = 'Calnex'\n system_data['system']['primary']['server-ip'] = '1.1.1.2'\n system_data['system']['primary']['osname'] = 'Paragon'\n return system_data", "def test_create_run(self):\n pass", "def test_system_group_add_system(audreyvars, system_uuid, tunnel_requested, system_groups):\n server = audreyvars[\"KATELLO_HOST\"]\n login = audreyvars.get(\"KATELLO_USER\", \"admin\")\n org = audreyvars.get(\"KATELLO_ORG\", \"redhat\")\n password = audreyvars.get(\"KATELLO_PASS\", \"admin\")\n\n # If using a tunnel to access ec2, an alternative port is needed\n if tunnel_requested:\n port = audreyvars.get(\"SSH_TUNNEL_KATELLO_PORT\", 1443)\n else:\n port = audreyvars.get(\"KATELLO_PORT\", 443)\n\n # Locate existing system groups, and add system\n for group_name in system_groups:\n result = common.katello.system_group_query(server, port, org, login, password, group_name)\n assert len(result) > 0, \"System group '%s' not found\" % group_name\n group_id = result[0].get('id')\n common.katello.system_group_add_system(server, port, org,\n system_uuid, login, password,\n group_id)", "def test06_machine_create(self, image_name=\"Ubuntu 16.04 x64\"):\n self.lg('%s STARTED' % self._testID)\n self.lg(' create %s machine ' % self.machine_name)\n self.assertTrue(self.EUMachines.end_user_create_virtual_machine(image_name,self.machine_name))\n self.lg('delete %s machine ' % self.machine_name)\n self.assertTrue(self.EUMachines.end_user_delete_virtual_machine(self.machine_name))\n self.lg('%s ENDED' % self._testID)", "def test_create_tang_1(self):\n command_line = (\n self._MENU\n + [self._POOLNAME]\n + self._DEVICES\n + [\"--clevis=tang\", \"--trust-url\", \"--tang-url=http\"]\n )\n TEST_RUNNER(command_line)", "def test_create_device_data(self):\n pass", "def initialize(self, create_new=True, sysid=\"\"):", "def test_create_deployment_entire(self):\n pass", "def test_create_system_asset(self):\n pass", "def test_create_already_exists():\n current_folder = os.path.dirname(os.path.realpath(__file__))\n misc_folder = os.path.join(current_folder, \"misc\")\n model_file = os.path.join(misc_folder, \"model.tar\")\n create.main(\"mlp\", \"10:12:8\", model_file)\n # TODO: Check if error was logged", "def test_speciesCreation():\n \n sys = LVsystem.Ecosystem()\n sys.addSpecies('rabbit')\n sys.addSpecies('fox')\n sys.setInteraction('rabbit', 'fox', -1)\n sys.setInteraction('fox', 'rabbit', 1)\n sys.setInitialCond('rabbit', 10)\n sys.setInitialCond('fox', 5)\n sys.setGrowthRate('rabbit', 1)\n sys.setGrowthRate('fox', -1)\n sys.setCarrCap('rabbit', 10000)\n sys.setCarrCap('fox', 10000)\n sys.setChangeRate('rabbit', 10)\n sys.setChangeRate('fox', 20) \n \n assert len(sys.species_list) == 2\n assert sys.species_list == ['rabbit','fox']\n assert sys.intMatrix == {('rabbit','fox'):-1, ('fox','rabbit'):1}\n\n sys.removeSpecies('rabbit')\n sys.removeSpecies('fox')", "def create():", "def create():", "def test_create_global_system_configs(self):\n with self.override_role():\n self._create_global_system_config()", "def test_psystem3():\n psys = PSystem('z', [Membrane('a', [('a', 'b')], [\n Membrane('b', [('b', '_')])])], 1)\n return psys" ]
[ "0.7678952", "0.70999867", "0.6993825", "0.69247663", "0.6622011", "0.65675867", "0.6427302", "0.6399686", "0.6399686", "0.63301903", "0.63018954", "0.6299899", "0.62945867", "0.6257245", "0.6245801", "0.62265", "0.6214174", "0.61735445", "0.61556554", "0.6151772", "0.6128185", "0.61191815", "0.60976267", "0.6092165", "0.6081462", "0.60804516", "0.60293454", "0.60293454", "0.6028678", "0.60212505" ]
0.92615867
0
Test case for create_team
def test_create_team(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_teams_create(self):\n pass", "def test_createteam(self):\n p1, p2, p3 = self.create3persons()\n t = model.Team(name='Tigers', persons=[p1, p2, p3])\n id = t.store()\n t2 = model.Team(id=id)\n self.assertEqual(t.name, t2.name)\n self.assertEqual(t.persons, t2.persons)", "def test_create_new_team(self):\n default_user = AnotherUserFactory(email_confirmed=True)\n token = Token.objects.get(user=default_user)\n self.client.credentials(\n HTTP_AUTHORIZATION=f'Token {token.key}')\n\n data = {\n 'name': 'Griffons',\n 'description': 'Only strong souls can be joined us.'\n }\n response = self.client.post(reverse('api:teams-list'), data)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertTrue(Team.objects.filter(name=data['name']).exists())", "def test_add_team_member(self):\n pass", "def test_post_team(self):\n response = self.client.post(url_for('teams'),\n data={\n 'name': 'test team',\n 'capacity': 11,\n 'number_players': 6,\n 'pitch_postcode': 'E1 6LT',\n 'time': '2019-01-01 13:00'\n })\n self.assertEqual(response.status_code, 201)\n self.assertIn(b'Team created successfully', response.data)\n self.assertEqual(db.session.query(Team).count(), 1)", "def test_assign_managing_team(self):\n pass", "def test_teams_save_team_v1(self):\n pass", "def test_create_team_creates_survey(self):\n user = User.create(name='User Foo', email='[email protected]')\n user.put()\n\n code = 'trout viper'\n\n team_response = self.testapp.post_json(\n '/api/teams',\n {\n 'name': 'Team Foo',\n 'code': code,\n 'program_id': self.ep_program.uid,\n },\n headers=self.login_headers(user),\n )\n team_dict = json.loads(team_response.body)\n\n survey_result = Survey.get(team_id=team_dict['uid'])\n self.assertEqual(len(survey_result), 1)\n survey = survey_result[0]\n\n return user, team_dict", "def create_team_action(request):\n # Create the team.\n now = datetime.utcnow()\n user_id = request.context.user_id\n user = load_user(request.db, user_id)\n # Select a round based on the user's badges.\n round_ids = find_round_ids_with_badges(request.db, user['badges'], now)\n if len(round_ids) == 0:\n # The user does not have access to any open round.\n raise ApiError('not qualified for any open round')\n if len(round_ids) > 1:\n # XXX The case where a user has badges for multiple open rounds\n # is currently handled by picking the first one, which is the\n # one that has the greatest id. This is unsatisfactory.\n pass\n round_id = round_ids[0]\n round_ = load_round(request.db, round_id, now)\n if not round_['is_registration_open']:\n raise ApiError('registration is closed')\n # Create the team.\n team_id = create_user_team(request.db, user_id, now)\n # Create a participation.\n create_participation(request.db, team_id, round_id, now=now)\n # Ensure the user gets team credentials.\n reset_user_principals(request)\n return {'success': True}", "def test_retrieve_team(self):\n pass", "def create_team():\n # Get the user's id from access token\n uid = get_jwt_identity()\n\n # If no user id, return error\n if not uid:\n return make_response(\n jsonify({'error': 'Could not verify!'}),\n 401,\n {'WWW-Authentication': 'Basic realm=\"Login required!\"'})\n\n # Try to get user from database\n query = User.query.filter_by(public_id=uid)\n\n try:\n user = query.one()\n\n # If no result found, return error\n except NoResultFound:\n return jsonify({'error': 'No result found!'}), 401\n\n # If some other sqlalchemy error is thrown, return error\n except SQLAlchemyError:\n return jsonify({'error': 'Some problem occurred!'}), 400\n\n # Get team data from request\n data = request.get_json()\n\n # Verify that all required team data was sent\n if not data['name'] or not data['group']:\n return make_response(jsonify({'error': 'Missing data!'}), 400)\n\n # Create team object\n team = Team(\n name=data['name'],\n iso_2=data['iso_2'],\n group=data['group'])\n\n # Try to add team to database\n try:\n db.session.add(team)\n db.session.commit()\n\n # If team name already in database, return error\n except IntegrityError:\n return jsonify({\n 'error': 'Team with name already exists'\n }), 400\n\n # If some other sqlalchemy error is thrown, return error\n except SQLAlchemyError:\n return jsonify({'error': 'Some problem occurred!'}), 400\n\n # Serialze the team object and return json response\n team_schema = TeamSchema()\n output = team_schema.dump(team).data\n\n return jsonify({\n 'success': 'Successfully retrieved team.',\n 'team': output\n }), 200", "def test_teams_save_team_member_v1(self):\n pass", "def test_handle_create(self):\n test_user = User(\"userid\")\n test_user.permissions_level = Permissions.admin\n test_user.github_username = \"githubuser\"\n self.db.retrieve.return_value = test_user\n self.gh.org_create_team.return_value = \"team_id\"\n inputstring = \"team create b-s --name 'B S'\"\n outputstring = \"New team created: b-s, name: B S, \"\n self.assertTupleEqual(self.testcommand.handle(inputstring, user),\n (outputstring, 200))\n inputstring += \" --platform web\"\n outputstring += \"platform: web, \"\n self.assertTupleEqual(self.testcommand.handle(inputstring, user),\n (outputstring, 200))\n self.gh.org_create_team.assert_called()\n self.gh.add_team_member.assert_called_with('githubuser', 'team_id')\n inputstring += \" --channel 'channelID'\"\n outputstring += \"added channel, \"\n self.sc.get_channel_users.return_value = ['someID', 'otherID']\n self.assertTupleEqual(self.testcommand.handle(inputstring, user),\n (outputstring, 200))\n self.sc.get_channel_users.assert_called_once_with(\"channelID\")\n self.db.retrieve.assert_called_with(User, 'otherID')\n self.gh.add_team_member.assert_called()\n inputstring += \" --lead 'someID'\"\n outputstring += \"added lead\"\n self.gh.has_team_member.return_value = False\n print(self.testcommand.handle(inputstring, user))\n self.assertTupleEqual(self.testcommand.handle(inputstring, user),\n (outputstring, 200))\n self.db.store.assert_called()", "def test_register_team_already_team(self):\n result = self.client.post(\"/teams\", data={\"already_team\": \"Killers\"}, follow_redirects=True)\n self.assertEqual(result.status_code, 200)\n # self.assertIn(b\"Sorry! That team name is already in use!\", result.data) #error:not in /teams, but should be in createTeam", "def test_get_individual_team(self):\n args = {\n 'name': 'test team',\n 'capacity': '11',\n 'number_players': '6',\n 'pitch_postcode': 'E1 6LT',\n 'time': '2019-01-01 13:00'\n }\n team = Team(args)\n db.session.add(team)\n db.session.commit()\n response = self.client.get('/teams/1')\n self.assertEqual(response.status_code, 200)\n self.assertIn(b'test team', response.data)", "def creat_team(self):\n te = Teams()\n per = Persons()\n teamlist = []\n for one in per.find({'role':'leader'},{'team_name'}):\n if one['team_name'] not in teamlist:\n teamlist.append(one['team_name'])\n # print len(teamlist)\n for team in teamlist:\n tmp = {'name': '', 'leader_email': '', 'person_emails': []}\n tmp['name'] = team\n tmp['leader_email'] = per.get_one({'team_name':team,'role':'leader'})['email']\n for one in per.find({'team_name':team},{'email'}):\n tmp['person_emails'].append(one['email'])\n print tmp\n search_t = te.get_one({'name':team})\n if search_t is None:\n te.insert_one(tmp)\n else:\n te.update_one({'name':team,'leader_email':'','person_emails':''},tmp,cover=True)", "def test_add_team_manager_to_team(self):\n pass", "def test_teams_add_user_to_team_v2(self):\n pass", "def post(self):\n req = team_req.parse_args(strict=True)\n curr_user = api.user.get_user()\n if curr_user[\"teacher\"]:\n raise PicoException(\"Teachers may not create teams\", 403)\n req[\"team_name\"] = req[\"team_name\"].strip()\n if not all(\n [\n c in string.digits + string.ascii_lowercase + \" ()+-,#'&!?\"\n for c in req[\"team_name\"].lower()\n ]\n ):\n raise PicoException(\n \"Team names cannot contain special characters other than \"\n + \"()+-,#'&!?\",\n status_code=400,\n )\n\n if req[\"team_name\"] == curr_user[\"username\"]:\n raise PicoException(\"Invalid team name\", status_code=409)\n\n new_tid = api.team.create_and_join_new_team(\n req[\"team_name\"], req[\"team_password\"], curr_user\n )\n res = jsonify({\"success\": True, \"tid\": new_tid})\n res.status_code = 201\n return res", "def test_update_team(self):\n pass", "async def create_team(new_team: BaseTeam, db_handler: DBHandler = Depends(database_dependency)):\n try:\n inserted_record = await db_handler.insert_team(new_team=new_team)\n inserted_record = init_BaseTeam(inserted_record)\n except DBHandlerException as e:\n return JSONResponse(status_code=400)\n\n return inserted_record", "def test_cannot_create_new_team(self):\n\n data = {\n 'name': 'Griffons',\n 'description': 'Only strong souls can be joined us.'\n }\n response = self.client.post(reverse('api:teams-list'), data)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_posting_a_teammate(self):\n response = self.client.post(\n '/team/all/', {'name': 'New Name',\n 'email': '[email protected]',\n 'slackhandle': '@NewTeam'},\n format='json')\n self.assertEqual(response.data, {'status': 201,\n \"data\": {'id': 1, 'name': 'New Name',\n 'email': '[email protected]',\n 'slackhandle': '@NewTeam'}})", "def test_teams_add_user_to_team_v1(self):\n pass", "def test_handle_create_no_team_lookup_error(self):\r\n self.mock_facade.query.return_value = []\r\n self.assertTupleEqual(\r\n self.testcommand.handle(\"project create repo-link team-name\",\r\n user),\r\n (\"0 teams found with GitHub team name team-name\", 200))", "def test_delete_team(self):\n pass", "def test_get_teams(self):\n pass", "def test_get_teams(self):\n pass", "def test_teams_invite_member(self):\n pass", "def create(self, request):\n serializer = data_serializers.CreateTeamSerializer(data=request.data)\n if serializer.is_valid(raise_exception=True):\n request_data = serializer.save()\n try:\n new_team_entity = self.controller.create_team(request_data=request_data)\n serializer = data_serializers.PresentTeamSerializer(new_team_entity)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n except domain_exceptions.TeamHasALeader as e:\n return Response(e.message, status=status.HTTP_400_BAD_REQUEST)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)" ]
[ "0.8881791", "0.83876824", "0.83771837", "0.79718596", "0.79485494", "0.78821754", "0.7869251", "0.7798998", "0.7745506", "0.7734325", "0.7662799", "0.7542226", "0.7529002", "0.75064474", "0.7461149", "0.7423147", "0.74031717", "0.73948455", "0.7387496", "0.7367145", "0.73669374", "0.7347898", "0.73438066", "0.72814316", "0.72810155", "0.72527766", "0.7249162", "0.7249162", "0.7194872", "0.7178492" ]
0.9365454
0
Test case for create_template_subsciption
def test_create_template_subsciption(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_create_namespaced_template(self):\n pass", "def test_create_subscription_template(self):\n pass", "def test_register_template(self):\n pass", "def test_create_activity_template(self):\n pass", "def test_create_namespaced_processed_template(self):\n pass", "def test_otoroshi_controllers_adminapi_templates_controller_create_from_template_tcp(self):\n pass", "def test_create_device_template(self):\n pass", "def test_create_template_for_all_namespaces(self):\n pass", "def test_template_feedback(self):\r\n pass", "def test_retrieve_template_registration(self):\n pass", "def test_get_tosca_template(self):\n pass", "def test_add_template(self):\n\n widget = part.Part(self.api, pk=10000)\n\n n = len(widget.getTestTemplates())\n\n part.PartTestTemplate.create(self.api, {\n 'part': widget.pk,\n 'test_name': f\"Test_Name_{n}\",\n 'description': 'A test or something',\n 'required': True,\n })\n\n self.assertEqual(len(widget.getTestTemplates()), n + 1)", "def test_get_template_subscription(self):\n pass", "def create_template(self):\n options = {\n 'dir': os.path.join(os.path.dirname(__file__)),\n 'template': self.template,\n 'project': self.project,\n 'answers_file': self.answers_file,\n }\n return self.env.run(\n '%(dir)s/bin/mrbob -O %(project)s --config '\n '%(dir)s/%(answers_file)s %(dir)s/bobtemplates/%(template)s'\n % options)", "def test_otoroshi_controllers_adminapi_templates_controller_template_spec(self):\n pass", "def test_customization_template_crud():\n template_crud = pxe.CustomizationTemplate(\n name=generate_random_string(size=8),\n description=generate_random_string(size=16),\n image_type='RHEL-6',\n script_type='Kickstart',\n script_data='Testing the script')\n\n template_crud.create()\n with update(template_crud):\n template_crud.name = template_crud.name + \"_update\"\n template_crud.delete(cancel=False)", "def test_customization_template_crud():\n template_crud = pxe.CustomizationTemplate(\n name=generate_random_string(size=8),\n description=generate_random_string(size=16),\n image_type='RHEL-6',\n script_type='Kickstart',\n script_data='Testing the script')\n\n template_crud.create()\n with update(template_crud):\n template_crud.name = template_crud.name + \"_update\"\n template_crud.delete(cancel=False)", "def test_share_template_registration(self):\n pass", "def post_service_template_create(self, resource_dict):\n pass", "def test_update_template_registration(self):\n pass", "def test_create_project_from_template(self):\n project_new = self.project_template.take_template()\n\n self.assertTrue(project_new)", "def create_template(self):\n return '{}/{}.html'.format(self.object_name, self.create_endpoint)", "def test_get_subscription_template(self):\n pass", "def test_create_template_with_experiment_id_success(self):\n template_name = \"template-3\"\n experiment_id = util.MOCK_UUID_1\n\n rv = TEST_CLIENT.post(\n \"/templates\",\n json={\n \"name\": template_name,\n \"experimentId\": experiment_id,\n },\n )\n result = rv.json()\n\n expected = {\n \"uuid\": mock.ANY,\n \"name\": template_name,\n \"tasks\": [\n {\n \"uuid\": util.MOCK_UUID_1,\n \"task_id\": util.MOCK_UUID_1,\n \"dependencies\": [],\n \"position_x\": 0.0,\n \"position_y\": 0.0,\n },\n {\n \"uuid\": util.MOCK_UUID_4,\n \"task_id\": util.MOCK_UUID_1,\n \"dependencies\": [util.MOCK_UUID_1],\n \"position_x\": 0.0,\n \"position_y\": 0.0,\n },\n ],\n \"experimentId\": experiment_id,\n \"deploymentId\": None,\n \"createdAt\": mock.ANY,\n \"updatedAt\": mock.ANY,\n }\n self.assertEqual(result, expected)\n self.assertEqual(rv.status_code, 200)", "def test_dynamic_template(hass: HomeAssistant) -> None:\n schema = vol.Schema(cv.dynamic_template)\n\n for value in (\n None,\n 1,\n \"{{ partial_print }\",\n \"{% if True %}Hello\",\n [\"test\"],\n \"just a string\",\n ):\n with pytest.raises(vol.Invalid):\n schema(value)\n\n options = (\n \"{{ beer }}\",\n \"{% if 1 == 1 %}Hello{% else %}World{% endif %}\",\n # Function added as an extension by Home Assistant\n \"{{ expand('group.foo')|map(attribute='entity_id')|list }}\",\n # Filter added as an extension by Home Assistant\n \"{{ ['group.foo']|expand|map(attribute='entity_id')|list }}\",\n )\n for value in options:\n schema(value)", "def test_starting_template(checker):\n contents = labeled.contents(label=\"template\")\n _ = tomllib.loads(contents)", "def test_create_template_experiment_or_deployment_needed_to_create(self):\n rv = TEST_CLIENT.post(\n \"/templates\",\n json={\n \"name\": \"foo\",\n },\n )\n result = rv.json()\n\n expected = {\n \"message\": \"experimentId or deploymentId needed to create template.\",\n \"code\": \"MissingRequiredExperimentIdOrDeploymentId\",\n }\n self.assertDictEqual(expected, result)\n self.assertEqual(rv.status_code, 400)", "def test_object_template_validation():\n length_template = PropertyTemplate(\"Length\", bounds=RealBounds(2.0, 3.5, 'cm'))\n dial_template = ConditionTemplate(\"dial\", bounds=IntegerBounds(0, 5))\n color_template = ParameterTemplate(\"Color\", bounds=CategoricalBounds([\"red\", \"green\", \"blue\"]))\n\n with pytest.raises(TypeError):\n MaterialTemplate()\n\n with pytest.raises(ValueError):\n MaterialTemplate(\"Block\", properties=[[length_template, RealBounds(3.0, 4.0, 'cm')]])\n\n with pytest.raises(ValueError):\n ProcessTemplate(\"a process\", conditions=[[color_template, CategoricalBounds([\"zz\"])]])\n \n with pytest.raises(ValueError):\n MeasurementTemplate(\"A measurement\", parameters=[[dial_template, IntegerBounds(-3, -1)]])", "def test_templates(self):\n path = str(Template())\n self.assertTrue(os.path.exists(path))", "def test_team_template_folders_id_templates_post(self):\n pass" ]
[ "0.7543965", "0.74555725", "0.73099774", "0.7259014", "0.7108045", "0.7072468", "0.69551826", "0.68299764", "0.681227", "0.67984265", "0.67033446", "0.6637706", "0.6623759", "0.6559831", "0.6538498", "0.6537776", "0.6537776", "0.6491336", "0.6463776", "0.64331627", "0.64062804", "0.64032453", "0.6400782", "0.63922554", "0.6376795", "0.6349786", "0.6339685", "0.6319431", "0.6281441", "0.62756336" ]
0.93009686
0
Test case for create_user
def test_create_user(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_createUser_single(self):\n #TODO: this and other tests", "def test_able_to_create_a_user():\n response = api_helper.create_user(pytest.test_user)\n assert response.status_code == 201\n check_user_data_in_response(response.json()[\"data\"])", "def test_create_user(self):\n \n new_user = {\"username\": \"beny1976\", \"vocab_count\": 0, \"name\": \"beny rood\", \"sex\": \"male\", \"dob\": \"18/10/1979\"}\n msg = app.create_user(predefined_user=new_user)\n self.assertTrue(msg != \"\")", "def create_user(self):\n User.objects.create_user('test', '[email protected]', 'testing')", "def test_create_user(self):\n self.assertIsInstance(\n User.objects.create_user(username=\"username\", email=\"[email protected]\", password=\"password\"), User)", "def users_create():", "def test_user_creation(self):\r\n \r\n self.assertIsInstance(self.user, User)", "def test_create_user(self):\n data = {\n \"firstname\": \"John\",\n \"lastname\": \"Doe\",\n \"password\": \"supersecret\",\n \"password_repeat\": \"supersecret\",\n }\n res = self.post(url=\"/users\", data=data)\n self.assertEqual(res.status_code, 200)\n self.assertIn(b\"Created user.\", res.data)\n\n user = User.query.filter_by(id=6).first()\n self.assertTrue(user)\n self.assertEqual(user.firstname, \"John\")\n self.assertEqual(user.lastname, \"Doe\")\n self.assertFalse(user.is_verified)", "def test_create_user(self):\n first_name = \"b\"\n last_name = \"b\"\n username = \"b\"\n email = \"b\"\n password = \"b\"\n\n manager = UserManager()\n result = manager.create(first_name, last_name, username, email, password)\n self.assertTrue(result)\n\n user = User.objects.get(username=username)\n self.assertEqual(first_name, user.first_name)\n self.assertEqual(last_name, user.last_name)\n self.assertEqual(username, user.username)\n self.assertEqual(email, user.email)\n self.assertEqual(password, user.testdata.password)\n self.assertEqual(username, user.testdata.username)\n self.assertEqual(email, user.testdata.email)\n self.assertNotEqual(user.authtests, None)", "def test_create_user(self):\n self.login()\n res = self.submit()\n\n assert res.status_code == 200", "def test_user_creation(self):\n self.assertTrue(User.objects.exists())", "def test_create_user_exists(self):\n payload = {\n \"email\": \"[email protected]\",\n \"name\": \"Test\",\n 'password': 'test123'\n }\n create_user(**payload)\n\n res = self.client.post(CREATE_USER_URL, payload)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_createuser():\n url = baseUrl + userurl\n payload = user_payload\n logging.info(\"Create a user: %s\" % payload)\n r = requests.post(url, data=json.dumps(payload), headers=header)\n assert r.status_code == 201\n resp = r.text\n assert resp == 'Success'", "def test_add_user(self):\n pass", "def test_create_user(self):\n user = User(\"Gideon Bamuleseyo\", \"[email protected]\", \"secret\")\n self.assertEqual(user.name, \"Gideon Bamuleseyo\")\n self.assertEqual(user.email, \"[email protected]\")\n self.assertEqual(user.password, \"secret\")", "def sample_user(email=\"[email protected]\",\n password=\"password123\",\n name=\"some name\"):\n return get_user_model().objects.create_user(email=email,\n password=password,\n name=name)", "def create_test_user():\n return User.objects.create(username='test_username', password='test_password')", "def test_create_user_exists(self):\n payload = {\n \"user\": {\n \"email\": \"[email protected]\",\n \"password\": \"useruser111\",\n \"is_doctor\": False,\n \"is_hospital_admin\": True\n },\n 'first_name': 'Test',\n 'last_name': 'JustUser'\n }\n sample_user(\n payload['user']['email'], payload['user']['password'],\n payload['user']['is_doctor'], payload['user']['is_hospital_admin']\n ),\n response = self.client.post(HOSPITAL_ADMIN_REGISTER_URL, payload, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_create_valid_user_success(self):\n payload = {\n 'email': '[email protected]',\n 'password': 'testpass',\n 'name': 'TestName'\n }\n res = self.client.post(CREATE_USER_URL, payload)\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n \n user = get_user_model().objects.get(**res.data)\n \n self.assertTrue(user.check_password(payload['[email protected]', \n 'testpass']))\n self.assertNotIn('testpass', res.data)", "def test_user_exists(self):\n\n payload = {\n 'email': '[email protected]',\n 'password': 'test11',\n 'name': \"test name\"\n }\n\n create_user(**payload)\n\n res = self.client.post(CREATE_USER_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_create_user(self):\n email = '[email protected]'\n password = 'testPass'\n user = get_user_model().objects.create_user(\n email=email,\n password=password\n )\n\n self.assertEqual(user.email, email)\n self.assertEqual(user.role, Role.PLAYER)\n self.assertTrue(user.check_password(password))\n self.assertTrue(user.is_active)\n self.assertFalse(user.is_staff)", "def test_create(self, client):\n count = get_user_model().objects.count()\n data = factory.build(dict, FACTORY_CLASS=UserFactory)\n url = reverse('users:create')\n response = client.post(url, data)\n assert response.status_code == 302\n\n user = get_user_model().objects.last()\n assert user.username == data['username']\n assert user.birthday == datetime.datetime.strptime(data['birthday'], '%Y-%m-%d').date()\n assert get_user_model().objects.count() == count + 1", "def test_create_user_user_exists(self):\n create_mock_user(**self.mock_user)\n\n res = self.client.post(CREATE_USER_URL, self.mock_user)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_user_create(self):\n\n # Creates event\n event = {\n \"clientId\": 2,\n \"username\": \"user\" + randstr(),\n \"pwd\": \"password\",\n \"nameLast\": \"User\",\n \"nameFirst\": \"Joe\",\n \"email\": \"[email protected]\" + randstr(),\n \"phone\": \"123-4567\",\n \"profilePicturePath\": \"/\",\n \"timezoneDefault\": \"EST\",\n \"languageDefault\": \"English\"\n }\n\n # Generates expected value\n expected = {\n 'statusCode': 200,\n 'body': '{\"success\": true, \"apicode\": \"OK\", \"apimessage\": \"User successfully created.\", \"apidataset\": {\"message\": \"User successfully created!\"}}'\n }\n\n # Invokes\n actual = handler.user_create(event=event, context=None)\n\n # Validates response\n self.assertEqual(expected, actual)", "def sample_user_third(email=\"[email protected]\",\n password=\"password123\",\n name=\"some name3\"):\n return get_user_model().objects.create_user(email=email,\n password=password,\n name=name)", "def create_user(email, password, f_name, l_name):\n pass", "def test_user_creation(self):\n user = UserModel.objects.create_user(\n username=\"saimer\"\n )\n self.assertEqual(user.email, \"\")\n self.assertEqual(user.username, \"saimer\")\n self.assertFalse(user.has_usable_password())", "def test_user_exists(self):\r\n payload = {\r\n 'email': '[email protected]',\r\n 'password': 'testpass',\r\n 'name': 'Maks'\r\n }\r\n create_user(**payload)\r\n\r\n res = self.client.post(CREATE_USER_URL, payload)\r\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_create(self):\n userValue = {'name': 'User Test 1',\n 'login': 'usertest1',\n 'user_profile_id': self.user_profile2.id,\n }\n Users = self.env['res.users']\n user_test = Users.create(userValue)\n newUser = self.env['res.users'].browse(user_test.id)\n self.assertEqual(userValue['name'], newUser['name'])", "def test_create_simple_user(self):\n payload = {\n 'email': '[email protected]',\n 'password': 'password231',\n 'name': 'vasia'\n }\n\n res = self.client.post(CREATE_USER_URL, payload)\n\n u = get_user_model().objects.get(**res.data)\n\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n self.assertTrue(u.check_password(self.payload['password']))\n self.assertEqual(u.email, self.payload['email'])" ]
[ "0.87298", "0.8356501", "0.8329616", "0.81969166", "0.8180267", "0.8153807", "0.8152923", "0.8108296", "0.8101715", "0.8046674", "0.8028509", "0.7997988", "0.79795915", "0.7977299", "0.7972973", "0.7970259", "0.7955699", "0.79508954", "0.7929527", "0.79290193", "0.7914198", "0.790889", "0.789708", "0.78939533", "0.78934205", "0.78899866", "0.78854495", "0.78714293", "0.78684294", "0.78666234" ]
0.9081128
1
Test case for deallocate_virt_realm
def test_deallocate_virt_realm(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_remove_virt_realm(self):\n pass", "def test_allocate_virtualization_realm(self):\n pass", "def test_update_virt_realm(self):\n pass", "def test_get_virtualization_realm(self):\n pass", "def test_update_virtualization_realm(self):\n pass", "def test_register_virtualization_realm(self):\n pass", "def test_get_virtualization_realm_resources(self):\n pass", "def test_disable_virt_realm_remote_access(self):\n pass", "def deallocate(self):\n raise NotImplementedError", "def test_invalidate_template_cache_in_virtualization_realm(self):\n pass", "def test_delete_hyperflex_cluster_storage_policy(self):\n pass", "def test__removeRelObject(t):\n t.adm._removeRelObject(\"device\", \"objmap\", \"relname\")", "def test_deleter(self):\n self.rebuild_all()\n l = len(self.real_heap)\n #print \"Before :\",l\n for member_remove in xrange(0,l):\n self.rebuild_all()\n \n l = len(self.real_heap)\n #print \"Aftre :\",l\n #print \"To delete :\",self.real_heap\n #print \"member to delete :\",member_remove\n self.real_heap.new_delete(member_remove)\n assert self.is_heap_valid(self.real_heap) == True\n\n #check if it is there ?\n tmp_max = max(self.real_heap)\n real_max = self.real_heap.extract_max()\n assert tmp_max == real_max\n assert self.is_heap_valid(self.real_heap) == True\n\n if member_remove%10==0:\n print \"Deletion of %d/%d is completed\"%(member_remove,l)", "def test_delete_hyperflex_sys_config_policy(self):\n pass", "def test_delete_hyperflex_capability_info(self):\n pass", "def test_set_virtualization_realm_active(self):\n pass", "def test_delete_hyperflex_vcenter_config_policy(self):\n pass", "def test_enable_virt_realm_remote_access(self):\n pass", "def test_delete_hyperflex_app_catalog(self):\n pass", "def test_delete_hyperflex_cluster_profile(self):\n pass", "def test_delete_hyperflex_ext_fc_storage_policy(self):\n pass", "def test_destroy_container_privilege(self):\n pass", "def test_delete_hyperflex_proxy_setting_policy(self):\n pass", "def test_delete_hyperflex_local_credential_policy(self):\n pass", "def test_data_object_del(self):\n pass", "def test_delete_hyperflex_auto_support_policy(self):\n pass", "def test_update_virt_realm_remote_access_config(self):\n pass", "def test_delete_hyperflex_ucsm_config_policy(self):\n pass", "def test_orphan(self, mock_rm_vopts):\n def validate_rm_vopts(vgwrap, vopts, **kwargs):\n # Two of the VSCSI mappings have storage; both are vopts\n self.assertEqual(2, len(vopts))\n mock_rm_vopts.side_effect = validate_rm_vopts\n vwrap = self.vio_feed[0]\n # Save the \"before\" sizes of the mapping lists\n vscsi_len = len(vwrap.scsi_mappings)\n vfc_len = len(vwrap.vfc_mappings)\n ts.add_orphan_storage_scrub_tasks(self.ftsk)\n ret = self.ftsk.execute()\n # One for vscsi maps, one for vfc maps, one for vopt storage\n self.assertEqual(3, self.logfx.patchers['warn'].mock.call_count)\n # Pull out the WrapperTask returns from the (one) VIOS\n wtr = ret['wrapper_task_rets'].popitem()[1]\n vscsi_removals = wtr['vscsi_removals_orphans']\n self.assertEqual(18, len(vscsi_removals))\n # Removals are really orphans\n for srm in vscsi_removals:\n self.assertIsNone(srm.client_adapter)\n # The right number of maps remain.\n self.assertEqual(vscsi_len - 18, len(vwrap.scsi_mappings))\n # Remaining maps are not orphans.\n for smp in vwrap.scsi_mappings:\n self.assertIsNotNone(smp.client_adapter)\n # _RemoveOrphanVfcMaps doesn't \"provide\", so the following are limited.\n # The right number of maps remain.\n self.assertEqual(vfc_len - 19, len(vwrap.vfc_mappings))\n # Remaining maps are not orphans.\n for fmp in vwrap.vfc_mappings:\n self.assertIsNotNone(fmp.client_adapter)\n # POST was warranted.\n self.assertEqual(1, self.txfx.patchers['update'].mock.call_count)\n # _RemoveStorage invoked _rm_vopts\n self.assertEqual(1, mock_rm_vopts.call_count)", "def tearDown(self):\n self._invoker = None\n self.implementation.destantiate(self._memo)\n self._digest_pool.shutdown(wait=True)" ]
[ "0.81644636", "0.70024914", "0.66599214", "0.6289817", "0.6224093", "0.60699666", "0.6067102", "0.59737736", "0.5801199", "0.5729157", "0.57257605", "0.568894", "0.5666749", "0.5623497", "0.56026965", "0.55525106", "0.5513239", "0.54915744", "0.5483333", "0.5478135", "0.5474653", "0.54282534", "0.5419794", "0.536963", "0.53563225", "0.5349775", "0.53494585", "0.5347258", "0.52856535", "0.5275906" ]
0.9518774
0
Test case for delete_asset
def test_delete_asset(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_delete_system_asset(self):\n pass", "def test_delete_asset_type(self):\n self.assertEqual(self.all_assets.count(), 1)\n get_asset = Asset.objects.get(asset_code=\"IC001\")\n statuses = AssetStatus.objects.filter(asset=get_asset)\n for status in statuses:\n status.delete()\n get_asset.delete()\n self.assertEqual(self.all_assets.count(), 0)", "def test_delete_software_asset_bundle(self):\n pass", "def test_remove_asset(self):\n # Confirm that the story has no assets\n self.assertEqual(self.story.assets.count(), 0)\n # Assign the asset to the section\n container = Container.objects.get(name='left')\n section_asset = SectionAsset(section=self.section, asset=self.asset, container=container)\n section_asset.save()\n # Confirm the asset is in the section's list\n self.assertTrue(self.asset in self.section.assets.select_subclasses())\n # Confirm that the asset is in the story's list\n self.assertTrue(self.asset in self.story.assets.select_subclasses())\n # Delete the asset from the section.\n section_asset.delete()\n # Confirm that the asset is NOT in the section's list\n self.assertFalse(self.asset in self.section.assets.select_subclasses())\n # Confirm that the asset is in the story's list\n self.assertTrue(self.asset in self.story.assets.select_subclasses())", "def test_remove_asset(self):\n # Create a story\n title = ('Transportation Challenges Limit Education Choices for '\n 'Denver Parents')\n summary = \"\"\"\n Many families in the Denver metro area use public\n transportation instead of a school bus because for them, a\n quality education is worth hours of daily commuting. Colorado's\n school choice program is meant to foster educational equity,\n but the families who benefit most are those who have time and\n money to travel. Low-income families are often left in a lurch.\n \"\"\"\n byline = \"Mile High Connects\"\n story = create_story(title=title, summary=summary, byline=byline)\n # Confirm that the story has no assets\n self.assertEqual(story.assets.count(), 0)\n # create a Section\n section = create_section(title=\"Test Section 1\", story=story)\n # create a HtmlAsset\n asset = HtmlAsset()\n asset.save()\n translation = HtmlAssetTranslation(title='Test Asset', asset=asset)\n translation.save()\n # Assign the asset to the section\n section_asset = SectionAsset(section=section, asset=asset, weight=0)\n section_asset.save()\n # Confirm the asset is in the section's list\n self.assertTrue(asset in section.assets.select_subclasses())\n # Confirm that the asset is in the story's list\n self.assertTrue(asset in story.assets.select_subclasses())\n # Delete the asset from the section.\n section_asset.delete()\n # Confirm that the asset is NOT in the section's list\n self.assertFalse(asset in section.assets.select_subclasses())\n # Confirm that the asset is in the story's list\n self.assertTrue(asset in story.assets.select_subclasses())", "def test_delete(self):\n obj = self.provision_single_asset()\n obj_id = obj.id\n self.delete('widget', 200, params={'id': obj_id})\n obj = self.session.query(self.widget_model).filter_by(id=obj_id).first()\n assert obj is None", "def test_delete_image(self):\n pass", "def test_remove_category_from_asset(self):\n pass", "def delete_asset(self, asset_id, asset_type):\n return self.asset(asset_id, asset_type=asset_type, action='DELETE')", "def test_aws_service_api_volume_attachment_delete(self):\n pass", "def test_delete_assetmodel_cascades(self):\n self.assertEqual(self.all_assets.count(), 1)\n self.assertEqual(self.all_assetmodels.count(), 1)\n get_assetmodel = AssetModelNumber.objects.get(model_number=\"IMN50987\")\n get_asset = Asset.objects.get(asset_code=\"IC001\")\n get_asset.model_number = get_assetmodel\n get_asset.save()\n with self.assertRaises(ProtectedError):\n get_assetmodel.delete()\n self.assertEqual(self.all_assets.count(), 1)\n self.assertEqual(self.all_assetmodels.count(), 1)", "def test_before_delete(self, create_with_upload):\n name = \"test.txt\"\n resource = create_with_upload(\n \"hello world\",\n name,\n name=name,\n package_id=factories.Dataset()[\"id\"],\n )\n plugin = p.get_plugin(\"cloudstorage\")\n uploader = plugin.get_resource_uploader(resource)\n assert uploader.get_url_from_filename(resource[\"id\"], name)\n\n helpers.call_action(\"resource_delete\", id=resource[\"id\"])\n assert uploader.get_url_from_filename(resource[\"id\"], name) is None", "def test_delete(self):\n self.basic_login()\n cassette_name = self.cassette_name(\"delete\")\n with self.recorder.use_cassette(cassette_name):\n auth = self.gh.authorize(\n username=self.user,\n password=self.password,\n scopes=[\"gist\"],\n note=\"testing github3.py\",\n )\n assert isinstance(auth, github3.auths.Authorization)\n assert auth.delete() is True", "def test_delete_run(self):\n pass", "def test_delete_file(self):\n # put file\n self.prepare_uploads()\n backend = BackendS3(**self.config)\n uploads = self.upload_path\n src = os.path.join(uploads, 'test.tar.gz')\n\n id1 = utils.generate_id('test.tar.gz')\n backend.put(src, id1)\n\n # regression testing (only delete what requested)\n id2 = id1.split('-')\n id2[4] += 'ZZZ'\n id2 = '-'.join(id2)\n\n backend.put(src, id1, True)\n backend.put_variant(src, id1, 'demo.txt')\n backend.put(src, id2, True)\n backend.delete(id1)\n\n path1 = '/'.join(backend.id_to_path(id1)) + '/test.tar.gz'\n path2 = '/'.join(backend.id_to_path(id1)) + '/demo.txt'\n self.assertFalse(backend.exists(path1))\n self.assertFalse(backend.exists(path2))\n\n # assume only proper file deleted\n path3 = '/'.join(backend.id_to_path(id2)) + '/test.tar.gz'\n self.assertTrue(backend.exists(path3))", "def test_delete_detail(self):\n story = create_story(title=\"Test Story\", summary=\"Test Summary\",\n byline=\"Test Byline\", status=\"published\",\n language=\"en\", author=self.user)\n layout = SectionLayout.objects.get(sectionlayouttranslation__name=\"Side by Side\")\n container1 = Container.objects.get(name='left')\n container2 = Container.objects.get(name='right')\n section = create_section(title=\"Test Section 1\", story=story,\n layout=layout)\n asset1 = create_html_asset(type='text', title='Test Asset',\n body='Test content', owner=self.user)\n asset2 = create_html_asset(type='text', title='Test Asset 2',\n body='Test content 2', owner=self.user)\n SectionAsset.objects.create(section=section, asset=asset1, container=container1)\n SectionAsset.objects.create(section=section, asset=asset2, container=container2)\n self.assertEqual(SectionAsset.objects.count(), 2)\n self.api_client.client.login(username=self.username, password=self.password)\n uri = '/api/0.1/stories/%s/sections/%s/assets/' % (story.story_id,\n section.section_id)\n resp = self.api_client.get(uri)\n self.assertValidJSONResponse(resp)\n self.assertEqual(len(self.deserialize(resp)['objects']), 2)\n uri = '/api/0.1/stories/%s/sections/%s/assets/%s/' % (\n story.story_id, section.section_id, asset1.asset_id)\n resp = self.api_client.delete(uri)\n self.assertHttpAccepted(resp)\n self.assertEqual(SectionAsset.objects.count(), 1)", "def test_delete(self):\n pass", "def test_delete_image(self):\n # Upload the image first\n self.test_upload_image()\n im = ImageAttachment.objects.all()[0]\n r = post(self.client, 'upload.del_image_async', args=[im.id])\n\n eq_(200, r.status_code)\n json_r = json.loads(r.content)\n eq_('success', json_r['status'])\n eq_(0, ImageAttachment.objects.count())", "def test_delete_item_using_delete(self):\n pass", "def test_aws_service_api_snapshot_delete(self):\n pass", "def test_delete_collection_image(self):\n pass", "def test_delete_activity_template(self):\n pass", "def test_aws_service_api_volume_delete(self):\n pass", "def test_delete_image_signature(self):\n pass", "def test_delete_activity(self):\n pass", "def test_update_asset(self):\n pass", "def test_update_test_asset(self):\n pass", "def test_delete_boat(self):\n pass", "def test_delete_vehicle(self):\n vehicle = sample_vehicle(user=self.user)\n\n url = detail_url(vehicle.id)\n res = self.client.delete(url)\n\n self.assertEqual(res.status_code, status.HTTP_204_NO_CONTENT)", "def test_vault_delete_vault_item(self):\n pass" ]
[ "0.85412884", "0.8045294", "0.78518474", "0.7808656", "0.7775288", "0.7630795", "0.7349746", "0.7310101", "0.72265697", "0.71663094", "0.71586347", "0.69964325", "0.69690657", "0.6936937", "0.6901171", "0.68877393", "0.6855123", "0.68510675", "0.67798287", "0.67679745", "0.67624706", "0.6724539", "0.6716645", "0.6704701", "0.66917473", "0.6689844", "0.66181445", "0.6611595", "0.661012", "0.66034156" ]
0.94202167
0
Test case for delete_category
def test_delete_category(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_delete_category(self):\n self.signup('Bo', 'Theo', '[email protected]', 'Bo1995', 'Bo1995')\n self.login('[email protected]', 'Bo1995')\n self.dashboard()\n self.category('JunkFood')\n self.dashboard()\n rv = self.del_category()\n self.assertIn(b'successfully deleted category', rv.data)", "def test_delete_category(self):\n self.add_success(self.test_data['pants'])\n self.delete_success('pants')", "def test_delete_a_category(self):\n self.test_add_category_success()\n response = self.client.delete('/categories/1',\n headers={\"Authorization\": self.token})\n self.assertEqual(response.status_code, 200)\n self.assertIn('category deleted permanently',\n response.data.decode())", "def test_delete_category_does_not_exist(self):\n self.delete_does_not_exist_fail('hats')", "def test_delete_category(self):\n rv = self.client().post(\n '/categories/',\n data={'category_name': 'Sauces'})\n self.assertEqual(rv.status_code, 201)\n res = self.client().delete('/categories/1')\n #self.assertEqual(res.status_code, 200)\n # Test to see if it exists, should return a 404\n result = self.client().get('/categories/1')\n #self.assertEqual(result.status_code, 404)", "def test_delete_recipe_category(self):\n self.signup('Bo', 'Theo', '[email protected]', 'Bo1995', 'Bo1995')\n self.login('[email protected]', 'Bo1995')\n self.dashboard()\n self.category('JunkFood')\n self.dashboard()\n self.recipe_dashboard()\n self.create_recipe('cakes', 'blah, blah, blah....mix ingredient, heat')\n self.edit_recipe('edited cakes', 'edited blah blah blah spoon , heat')\n rv = self.del_recipe()\n self.assertIn(b'deleted successfully', rv.data)", "def test_delete(self, init_db, category):\n category.delete()\n assert Category.get(category.id) == None", "def test_category_delete(category):\n category.delete()\n\n category = Category.query.filter_by(id=category.id).first()\n\n assert category is None", "def test_category_delete(self):\n login = self.autheniticate()\n token = json.loads(login.data.decode()).get('token')\n self.app.post(category_url,\n data=json.dumps(self.data),\n headers=dict(Authorization=\"Bearer \" + token),\n content_type='application/json')\n res = self.app.delete('/api/v2/categories/1',\n headers=dict(Authorization=\"Bearer \" + token),\n content_type='application/json')\n res1 = json.loads(res.data.decode())\n self.assertEqual(res1['status'], 'Deleted!')\n self.assertEqual(res.status_code, 200)", "def test_delete_single_recipe_category(self):\n with self.client:\n response = self.register_user(\n \"Patrick\", \"Walukagga\", \n \"[email protected]\", \"telnetcmd123\"\n )\n # registered user login\n rep_login = self.login_user(\"[email protected]\", \"telnetcmd123\")\n # valid token\n headers=dict(\n Authorization='Bearer ' + json.loads(\n rep_login.data.decode()\n )['auth_token']\n )\n cresponse = self.create_category(\"Breakfast\", \n \"How to make breakfast\", \n headers)\n \n response = self.create_category(\"Lunchfast\", \n \"How to make lunchfast\", \n headers)\n response = self.client.delete('/recipe_category/1', \n headers=headers)\n self.assertEqual(response.status_code, 200)\n self.assertIn('Recipe category deleted', \n str(response.data))\n # delete recipe category not in database\n response = self.client.delete('/recipe_category/3', \n headers=headers, )\n self.assertEqual(response.status_code, 404)\n self.assertIn('No category found', \n str(response.data))", "def test_CategoriesDelete(self):\n trans1 = DebitsCredits.objects.create(account=self.account,\n currency=self.euro,\n name=\"Shopping\",\n amount=1,\n category=self.cat1)\n self.cat1.delete()\n self.assertEqual(self.cat1.active, False)\n\n trans1.delete()\n self.assertEqual(self.cat1.transactions.all().count(), 0)\n\n self.cat1.delete()\n self.assertEqual(Category.objects.all().count(), 1)", "def test_delete_case(self):\n pass", "def test_delete_single_recipe_category_id_not_number(self):\n with self.client:\n response = self.register_user(\n \"Patrick\", \"Walukagga\", \n \"[email protected]\", \"telnetcmd123\"\n )\n # registered user login\n rep_login = self.login_user(\"[email protected]\", \"telnetcmd123\")\n # valid token\n headers=dict(\n Authorization='Bearer ' + json.loads(\n rep_login.data.decode()\n )['auth_token']\n )\n cresponse = self.create_category(\"Breakfast\", \n \"How to make breakfast\", \n headers)\n \n response = self.create_category(\"Lunchfast\", \n \"How to make lunchfast\", \n headers)\n response = self.client.delete('/recipe_category/a', \n headers=headers)\n self.assertEqual(response.status_code, 400)\n self.assertIn('Category ID must be an integer', \n str(response.data))", "def test_create_category(self):\n pass", "def test_remove_category_from_asset(self):\n pass", "def test_25_admin_delete_category(self):\r\n self.create()\r\n obj = db.session.query(Category).get(2)\r\n category = obj.dictize()\r\n\r\n # Anonymous user GET\r\n url = '/admin/categories/del/%s' % obj.id\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Anonymous users should be redirected to sign in\"\r\n assert dom.find(id='signin') is not None, err_msg\r\n # Anonymous user POST\r\n res = self.app.post(url, data=category, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Anonymous users should be redirected to sign in\"\r\n assert dom.find(id='signin') is not None, err_msg\r\n\r\n # Authenticated user but not admin GET\r\n self.signin(email=self.email_addr2, password=self.password)\r\n res = self.app.post(url, follow_redirects=True)\r\n err_msg = \"Non-Admin users should get 403\"\r\n assert res.status_code == 403, err_msg\r\n # Authenticated user but not admin POST\r\n res = self.app.post(url, data=category, follow_redirects=True)\r\n err_msg = \"Non-Admin users should get 403\"\r\n assert res.status_code == 403, err_msg\r\n self.signout()\r\n\r\n # Admin GET\r\n self.signin(email=self.root_addr, password=self.root_password)\r\n res = self.app.get(url, follow_redirects=True)\r\n err_msg = \"Category should be listed for admin user\"\r\n assert category['name'] in res.data, err_msg\r\n # Admin POST\r\n res = self.app.post(url, data=category, follow_redirects=True)\r\n err_msg = \"Category should be deleted\"\r\n assert \"Category deleted\" in res.data, err_msg\r\n assert category['name'] not in res.data, err_msg\r\n output = db.session.query(Category).get(obj.id)\r\n assert output is None, err_msg\r\n # Non existant category\r\n category['id'] = 5000\r\n url = '/admin/categories/del/5000'\r\n res = self.app.post(url, data=category, follow_redirects=True)\r\n assert res.status_code == 404, res.status_code\r\n\r\n # Now try to delete the only available Category\r\n obj = db.session.query(Category).first()\r\n url = '/admin/categories/del/%s' % obj.id\r\n category = obj.dictize()\r\n res = self.app.post(url, data=category, follow_redirects=True)\r\n print res.data\r\n err_msg = \"Category should not be deleted\"\r\n assert \"Category deleted\" not in res.data, err_msg\r\n assert category['name'] in res.data, err_msg\r\n output = db.session.query(Category).get(obj.id)\r\n assert output.id == category['id'], err_msg", "def test_delete(self):\n pass", "def test_category_delete_with_user(topic):\n user = topic.user\n forum = topic.forum\n category = topic.forum.category\n\n assert user.post_count == 1\n assert forum.post_count == 1\n assert forum.topic_count == 1\n\n category.delete([user])\n\n assert user.post_count == 0\n\n category = Category.query.filter_by(id=category.id).first()\n topic = Topic.query.filter_by(id=topic.id).first()\n\n assert category is None\n # The topic should also be deleted\n assert topic is None", "def test_update_category(self):\n pass", "def test_delete_a_category_not_existing(self):\n response = self.client.delete('/categories/1',\n headers={\"Authorization\": self.token})\n self.assertEqual(response.status_code, 404)\n self.assertIn('category with id: 1 is not found',\n response.data.decode())", "def category_delete(request):\n if request.POST:\n cat = get_object_or_404(Category, pk=request.POST.get('id'))\n cat.delete()\n return HttpResponse(status=200)", "def test_category_delete_with_forum(forum):\n forum.category.delete()\n\n assert forum is not None\n assert forum.category is not None\n\n category = Category.query.filter_by(id=forum.category.id).first()\n forum = Forum.query.filter_by(id=forum.id).first()\n\n assert forum is None\n assert category is None", "def test_delete_a_category_invalid_token(self):\n self.test_add_category_success()\n token = \"\"\n response = self.client.delete('/categories/1',\n headers={\"Authorization\": token})\n self.assertEqual(response.status_code, 401)\n self.assertIn('Token not found', response.data.decode())", "def delete_category(key):\n try:\n category = Categories.objects.get(pk=key)\n except ObjectDoesNotExist:\n return Response({'status': CATEGORY_NOT_FOUND}, status=status.HTTP_404_NOT_FOUND)\n\n setattr(category, 'is_delete', True)\n category.save()\n return Response({'status': CATEGORY_DELETED}, status=status.HTTP_200_OK)", "def test_delete_run(self):\n pass", "def test_delete1(self):\n pass", "def test_delete(self):\r\n course = CourseFactory.create(org='edX', course='999')\r\n with self.assertRaises(ValueError):\r\n tabs.primitive_delete(course, 0)\r\n with self.assertRaises(ValueError):\r\n tabs.primitive_delete(course, 1)\r\n with self.assertRaises(IndexError):\r\n tabs.primitive_delete(course, 6)\r\n tabs.primitive_delete(course, 2)\r\n self.assertFalse({u'type': u'textbooks'} in course.tabs)\r\n # Check that discussion has shifted up\r\n self.assertEquals(course.tabs[2], {'type': 'discussion', 'name': 'Discussion'})", "def test_crud_category_when_not_logged_in(self):\n with self.client:\n response = self.register_user(\n \"Patrick\", \"Walukagga\", \n \"[email protected]\", \"telnetcmd123\"\n )\n # invalid token\n headers=dict(Authorization='Bearer ')\n response = self.create_category(\"Breakfast\", \n \"How to make breakfast\", \n headers)\n self.assertEqual(response.status_code, 401)\n self.assertIn('Token is missing', str(response.data))\n category_data = json.dumps({\"name\": \"Lunchfast\", \n \"description\": \n \"How to make lunchfast\"})\n response = self.client.put('/recipe_category/1', \n headers=headers,\n data=category_data)\n self.assertEqual(response.status_code, 401)\n self.assertIn('Token is missing', str(response.data))\n response = self.client.delete('/recipe_category/1', \n headers=headers, \n data=category_data)\n self.assertEqual(response.status_code, 401)\n self.assertIn('Token is missing', str(response.data))\n # delete recipe category not in database\n response = self.client.delete('/recipe_category/3', \n headers=headers, \n data=category_data)\n self.assertEqual(response.status_code, 401)\n self.assertIn('Token is missing', str(response.data))", "def delete_category(category_id):\n if request.args.get('state') != login_session['state']:\n response = make_response(\n json.dumps({'error': 'Invalid state parameter.'}), 401\n )\n response.headers['Content-Type'] = 'application/json'\n return response\n if 'username' not in login_session:\n response = make_response(\n json.dumps({'error': 'User is logged out. This should not happen'}), 401\n )\n response.headers['Content-Type'] = 'application/json'\n return response\n try:\n if request.method == 'DELETE':\n category = session.query(Category).filter_by(id=category_id).one()\n session.delete(category)\n session.commit()\n state = get_new_state()\n login_session['state'] = state\n response = make_response(\n json.dumps({'success': '', 'nonce': login_session['state']}), 200\n )\n response.headers['Content-Type'] = 'application/json'\n return response\n except Exception as inst:\n print(type(inst))\n print(inst.args)\n print(inst)", "async def delete_recipe_category(category: str, session: Session = Depends(generate_session)):\n\n try:\n db.categories.delete(session, category)\n except Exception:\n raise HTTPException(status.HTTP_400_BAD_REQUEST)" ]
[ "0.8890961", "0.8825679", "0.84570104", "0.8391637", "0.8391261", "0.8381615", "0.83248633", "0.82285976", "0.8167587", "0.80330753", "0.7792687", "0.75420815", "0.74702066", "0.74675155", "0.7414793", "0.7371265", "0.73069346", "0.7272684", "0.7258884", "0.7239488", "0.71770185", "0.71731794", "0.7139101", "0.7077789", "0.7068039", "0.699697", "0.69527364", "0.6919144", "0.6888057", "0.6882292" ]
0.9361505
0
Test case for delete_cloud
def test_delete_cloud(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_delete_run(self):\n pass", "def test_delete(self):\n pass", "def test_before_delete(self, create_with_upload):\n name = \"test.txt\"\n resource = create_with_upload(\n \"hello world\",\n name,\n name=name,\n package_id=factories.Dataset()[\"id\"],\n )\n plugin = p.get_plugin(\"cloudstorage\")\n uploader = plugin.get_resource_uploader(resource)\n assert uploader.get_url_from_filename(resource[\"id\"], name)\n\n helpers.call_action(\"resource_delete\", id=resource[\"id\"])\n assert uploader.get_url_from_filename(resource[\"id\"], name) is None", "def test_cloud_service(self):\n cur = self.factory.create(access_token=ACCESS_TOKEN, expires_at=self.current_dt)\n with HTTMock(spark_cloud_mock):\n cloud = CloudCredentials.objects.cloud_service()\n self.assertEqual(cloud.access_token, ACCESS_TOKEN)\n cur.delete()", "def test_delete_bucket(self):\n pass", "def test_delete(self):\n\n value = self.instance.delete()\n self.client.delete_instance.assert_called_once_with('nginx')\n self.assertEqual(value, self.client.delete_instance.return_value)", "def test_delete_case(self):\n pass", "def test_aws_service_api_vm_delete(self):\n pass", "def test_delete_entity(self):\n\n storage = StringIO.StringIO()\n c = pycurl.Curl()\n c.setopt(c.URL,\"http://127.0.0.1:8090/compute/this_is_bilel\")\n c.setopt(c.HTTPHEADER, ['Content-Type: application/occi+json', 'Accept: application/occi+json'])\n c.setopt(c.CUSTOMREQUEST, 'DELETE')\n c.setopt(c.WRITEFUNCTION, storage.write)\n c.perform()\n content = storage.getvalue()\n print \" ===== Body content =====\\n \" + content + \" ==========\\n\"", "def test_delete_project(self):\n pass", "def test_delete_project(self):\n pass", "def delete():", "def test_delete__compute(self):\n arglist = [\n '--compute',\n self.projects[0].id,\n ]\n verifylist = [\n ('service', 'compute'),\n ('project', self.projects[0].id),\n ]\n\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n\n result = self.cmd.take_action(parsed_args)\n\n self.assertIsNone(result)\n self.projects_mock.get.assert_called_once_with(self.projects[0].id)\n self.compute_quotas_mock.delete.assert_called_once_with(\n self.projects[0].id,\n )\n self.volume_quotas_mock.delete.assert_not_called()\n self.network_mock.delete_quota.assert_not_called()", "def test_delete_device(self):\n pass", "def test_delete_device(self):\n pass", "def test_delete_client(self):\n pass", "def test_delete1(self):\n pass", "def test_delete(self):\n responses.add(\n responses.Response(\n method='DELETE',\n url='https://connection.keboola.com/v2/storage/buckets/1?force=False&async=False',\n json={}\n )\n )\n bucket_id = '1'\n deleted_detail = self.buckets.delete(bucket_id, asynchronous=False)\n assert deleted_detail is None", "def test_004_delete(self):\n ret = svcmgr.main(argv=[\"delete\", \"-s\", SVCNAME, \"--local\"])\n assert ret == 0", "def test_delete_record(self):\n pass", "def test_delete_findings(upload, test_id):\n check_delete()\n upload.test_delete_findings(test_id)", "def test_delete_virtual_service(self):\n pass", "def test_ipam_vrfs_delete(self):\n pass", "def test_update_cloud(self):\n pass", "def test_aws_service_api_volume_delete(self):\n pass", "def test_client_verification_document_delete(self):\n pass", "def test_delete(client):\n rv = delete(client, 'Michael')\n assert json.loads(rv.data.decode())['code'] == 0\n assert json.loads(rv.data.decode())['owner'] == 'Michael'", "def test_client_document_delete(self):\n pass", "def test_get_cloud(self):\n pass", "def test_delete_records(self):\n pass" ]
[ "0.7414735", "0.7348272", "0.7266415", "0.7241517", "0.7134624", "0.7127713", "0.71162254", "0.71058226", "0.71026415", "0.7093834", "0.7093834", "0.7081794", "0.7049275", "0.7030104", "0.7030104", "0.701471", "0.69890684", "0.6981374", "0.6918537", "0.6848626", "0.6826079", "0.68252945", "0.6822957", "0.6818754", "0.68150276", "0.6786623", "0.6784604", "0.6781362", "0.6777259", "0.67768466" ]
0.946878
0
Test case for delete_composition
def test_delete_composition(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_delete_collection_group(self):\n pass", "def test_delete_collection(self):\n pass", "def test_delete_collections(self):\n pass", "def test_delete_run(self):\n pass", "def test_delete_collection_identity(self):\n pass", "def test_delete_case(self):\n pass", "def test_delete_collection_namespaced_build(self):\n pass", "def test_delete(self):\n pass", "def test_variablepresentations_id_delete(self):\n pass", "def test_delete_group(self):\n pass", "def test_delete_group(self):\n pass", "def test_delete_occurrence(self):\n pass", "def test_delete_company_props_using_delete(self):\n pass", "def test_groups_group_ref_delete(self):\n pass", "def test_delete_collection_namespaced_template(self):\n pass", "def test_delete1(self):\n pass", "def test_delete(self):\r\n course = CourseFactory.create(org='edX', course='999')\r\n with self.assertRaises(ValueError):\r\n tabs.primitive_delete(course, 0)\r\n with self.assertRaises(ValueError):\r\n tabs.primitive_delete(course, 1)\r\n with self.assertRaises(IndexError):\r\n tabs.primitive_delete(course, 6)\r\n tabs.primitive_delete(course, 2)\r\n self.assertFalse({u'type': u'textbooks'} in course.tabs)\r\n # Check that discussion has shifted up\r\n self.assertEquals(course.tabs[2], {'type': 'discussion', 'name': 'Discussion'})", "def test_delete_namespaced_build(self):\n pass", "def test_products_ref_groups_delete(self):\n pass", "def test_update_composition(self):\n pass", "def test_remove(self):\n pass", "def test_model_flow_node_model_flow_id_node_id_component_delete(self):\n pass", "def test_delete_activity_template(self):\n pass", "def test_composition(self):", "def test_delete_collection_image(self):\n pass", "def test_delete_groups(self):\n pass", "def test_component_remove_ok(self):\n test_name = sys._getframe().f_code.co_name\n self._execute('component remove component1')\n rv, output = self._execute('component list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)", "def test_delete(self):\n self.request.access.allow_delete = [\"everyone\"]\n pkg = make_package(factory=SQLPackage)\n self.sql.add(pkg)\n transaction.commit()\n self.sql.add(pkg)\n self.db.delete(pkg)\n count = self.sql.query(SQLPackage).count()\n self.assertEqual(count, 0)\n self.storage.delete.assert_called_with(pkg)", "def test_coupledmodels_id_delete(self):\n pass", "def test_delete_goal(self):\n pass" ]
[ "0.7100599", "0.7073361", "0.6893561", "0.6867474", "0.6807937", "0.67828196", "0.67734206", "0.67533463", "0.67522377", "0.6740552", "0.6740552", "0.6692136", "0.6651535", "0.66335267", "0.66273606", "0.6579304", "0.6550235", "0.6546103", "0.6507538", "0.6503365", "0.6492035", "0.6491467", "0.6490243", "0.6486874", "0.64797544", "0.64542437", "0.645374", "0.6451551", "0.64422107", "0.6432131" ]
0.9490086
0
Test case for delete_deployment_run
def test_delete_deployment_run(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_delete_deployment(self):\n pass", "def test_remove_deployment(self):\n del_deployment, mod_del_dep_bp1 = \\\n self._deploy_and_get_modified_bp_path('remove_deployment',\n deployment_id='del_dep')\n\n undel_deployment, mod_undel_dep_bp1 = \\\n self._deploy_and_get_modified_bp_path('remove_deployment',\n deployment_id='undel_dep')\n\n blu_id = BLUEPRINT_ID + '-del-1'\n self.client.blueprints.upload(mod_del_dep_bp1, blu_id)\n wait_for_blueprint_upload(blu_id, self.client)\n self._do_update(del_deployment.id, blu_id)\n\n blu_id = BLUEPRINT_ID + '-undel-1'\n self.client.blueprints.upload(mod_undel_dep_bp1, blu_id)\n wait_for_blueprint_upload(blu_id, self.client)\n self._do_update(undel_deployment.id, blu_id)\n\n mod_del_dep_bp2 = self._get_blueprint_path(\n os.path.join('remove_deployment', 'modification2'),\n 'remove_deployment_modification2.yaml')\n blu_id = BLUEPRINT_ID + '-del-2'\n self.client.blueprints.upload(mod_del_dep_bp2, blu_id)\n wait_for_blueprint_upload(blu_id, self.client)\n self._do_update(del_deployment.id, blu_id)\n\n deployment_update_list = self.client.deployment_updates.list(\n deployment_id=del_deployment.id,\n _include=['id']\n )\n\n self.assertEqual(len(deployment_update_list.items), 2)\n\n # Delete deployment and assert deployment updates were removed\n uninstall = self.client.executions.start(\n del_deployment.id, 'uninstall')\n self.wait_for_execution_to_end(uninstall)\n\n self.client.deployments.delete(del_deployment.id)\n wait_for_deployment_deletion_to_complete(\n del_deployment.id, self.client\n )\n deployment_update_list = self.client.deployment_updates.list(\n deployment_id=del_deployment.id,\n _include=['id']\n )\n self.assertEqual(len(deployment_update_list.items), 0)\n\n # Assert no other deployment updates were deleted\n deployment_update_list = self.client.deployment_updates.list(\n deployment_id=undel_deployment.id,\n _include=['id']\n )\n self.assertEqual(len(deployment_update_list), 1)", "def test_delete_run(self):\n pass", "def test_delete_namespaced_deployment_config(self):\n pass", "def test_retest_deployment_run(self):\n pass", "def test_get_deployment_run(self):\n pass", "def delete_deployment(request, deployment, **_kwargs):\n pass", "def test_relaunch_deployment_run(self):\n pass", "def delete_run(arn=None):\n pass", "def test_execute_deployment(self):\n pass", "def test_release_deployment_run(self):\n pass", "def test_delete_collection_namespaced_deployment_config(self):\n pass", "def test_get_deployment_runs(self):\n pass", "def test_get_deployment_runs1(self):\n pass", "def test_delete_on_background_response_descriptor_projects_release_release_resource_spaces(self):\n pass", "def test_taskrun_delete(self):\r\n admin = UserFactory.create()\r\n owner = UserFactory.create()\r\n non_owner = UserFactory.create()\r\n app = AppFactory.create(owner=owner)\r\n task = TaskFactory.create(app=app)\r\n anonymous_taskrun = AnonymousTaskRunFactory.create(task=task, info='my task result')\r\n user_taskrun = TaskRunFactory.create(task=task, user=owner, info='my task result')\r\n\r\n ## anonymous\r\n res = self.app.delete('/api/taskrun/%s' % user_taskrun.id)\r\n error_msg = 'Anonymous should not be allowed to delete'\r\n assert_equal(res.status, '401 UNAUTHORIZED', error_msg)\r\n\r\n ### real user but not allowed to delete anonymous TaskRuns\r\n url = '/api/taskrun/%s?api_key=%s' % (anonymous_taskrun.id, owner.api_key)\r\n res = self.app.delete(url)\r\n error_msg = 'Authenticated user should not be allowed ' \\\r\n 'to delete anonymous TaskRuns'\r\n assert_equal(res.status, '403 FORBIDDEN', error_msg)\r\n\r\n ### real user but not allowed as not owner!\r\n url = '/api/taskrun/%s?api_key=%s' % (user_taskrun.id, non_owner.api_key)\r\n res = self.app.delete(url)\r\n error_msg = 'Should not be able to delete TaskRuns of others'\r\n assert_equal(res.status, '403 FORBIDDEN', error_msg)\r\n\r\n #### real user\r\n # DELETE with not allowed args\r\n url = '/api/taskrun/%s?api_key=%s' % (user_taskrun.id, owner.api_key)\r\n res = self.app.delete(url + \"&foo=bar\")\r\n err = json.loads(res.data)\r\n assert res.status_code == 415, err\r\n assert err['status'] == 'failed', err\r\n assert err['target'] == 'taskrun', err\r\n assert err['action'] == 'DELETE', err\r\n assert err['exception_cls'] == 'AttributeError', err\r\n\r\n # Owner with valid args can delete\r\n res = self.app.delete(url)\r\n assert_equal(res.status, '204 NO CONTENT', res.data)\r\n\r\n ### root\r\n url = '/api/taskrun/%s?api_key=%s' % (anonymous_taskrun.id, admin.api_key)\r\n res = self.app.delete(url)\r\n error_msg = 'Admin should be able to delete TaskRuns of others'\r\n assert_equal(res.status, '204 NO CONTENT', error_msg)", "def test_delete_on_background_response_descriptor_projects_release_release_resource(self):\n pass", "def test_01_delete_run(self):\n client = self.client\n\n j = check_json(client, 'api/db_default/v4/nts/runs/1')\n sample_ids = [s['id'] for s in j['tests']]\n self.assertNotEqual(len(sample_ids), 0)\n for sid in sample_ids:\n resp = client.get('api/db_default/v4/nts/samples/{}'.format(sid))\n self.assertEqual(resp.status_code, 200)\n\n resp = client.delete('api/db_default/v4/nts/runs/1')\n self.assertEqual(resp.status_code, 401)\n\n resp = client.delete('api/db_default/v4/nts/runs/1',\n headers={'AuthToken': 'wrong token'})\n self.assertEqual(resp.status_code, 401)\n\n resp = client.delete('api/db_default/v4/nts/runs/1',\n headers={'AuthToken': 'test_token'})\n self.assertEqual(resp.status_code, 200)\n\n resp = client.get('api/db_default/v4/nts/runs/1')\n self.assertEqual(resp.status_code, 404)\n\n for sid in sample_ids:\n resp = client.get('api/db_default/v4/nts/samples/{}'.format(sid))\n self.assertEqual(resp.status_code, 404)", "def test_publish_deployment_run(self):\n pass", "def Deletetest(self):\n # Test delete()\n result = self.runner.invoke(\n yoda.cli,\n [\"setup\", \"delete\"],\n input=\"n\\n\"\n )\n self.assertEqual(result.exit_code, 0)\n self.assertIn(\"Operation cancelled\", result.output)\n\n result = self.runner.invoke(\n yoda.cli,\n [\"setup\", \"delete\"],\n input=\"y\\n\"\n )\n self.assertEqual(result.exit_code, 0)\n self.assertIn(\"Configuration file deleted\", result.output)", "def test_duo_application_delete(self):\n pass", "def test_update_deployment(self):\n pass", "def handle_delete_deployment(project_id, deployment_id):\n deployment = delete_deployment(uuid=deployment_id, project_id=project_id)\n return jsonify(deployment)", "def test_create_deployment(self):\n pass", "def test_clean_exit(self):\n ch = connection_helper()\n qr = list_test_artifacts(None, ch.tables)\n self.assertFalse(bool(qr), \"\"\"Run 'removefacts --conf <config> --removetestlist' or \nexecute 'tests/scripts/removetestfacts.py' to fix\"\"\")", "def test_aws_service_api_vm_workshift_delete(self):\n pass", "def test_launch_deployment(self):\n pass", "def test_delete(self):\n self.request.access.allow_delete = [\"everyone\"]\n pkg = make_package(factory=DynamoPackage)\n self._save_pkgs(pkg)\n self.db.delete(pkg)\n count = self.engine.scan(DynamoPackage).count()\n self.assertEqual(count, 0)\n count = self.engine.scan(PackageSummary).count()\n self.assertEqual(count, 0)\n self.storage.delete.assert_called_with(pkg)", "def test_delete_project(self):\n pass", "def test_delete_project(self):\n pass" ]
[ "0.867448", "0.7608501", "0.74323153", "0.73011905", "0.72511435", "0.7250415", "0.7247018", "0.7219005", "0.70478123", "0.7030893", "0.70227975", "0.6893348", "0.6891038", "0.68697864", "0.6785895", "0.6785029", "0.67260367", "0.67216665", "0.6642672", "0.66380054", "0.66042185", "0.65227807", "0.64348334", "0.6432259", "0.63779193", "0.6337233", "0.6324319", "0.6318272", "0.6314017", "0.6314017" ]
0.93325716
0
Test case for delete_network
def test_delete_network(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_networking_project_network_delete(self):\n pass", "def test_delete_cluster_network(self):\n pass", "def test_delete__network(self):\n arglist = [\n '--network',\n self.projects[0].id,\n ]\n verifylist = [\n ('service', 'network'),\n ('project', self.projects[0].id),\n ]\n\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n\n result = self.cmd.take_action(parsed_args)\n\n self.assertIsNone(result)\n self.projects_mock.get.assert_called_once_with(self.projects[0].id)\n self.compute_quotas_mock.delete.assert_not_called()\n self.volume_quotas_mock.delete.assert_not_called()\n self.network_mock.delete_quota.assert_called_once_with(\n self.projects[0].id,\n )", "def delete_network(self, network):\r\n return self.delete(self.network_path % (network))", "def test_delete_collection_cluster_network(self):\n pass", "def test_networking_project_network_tag_delete(self):\n pass", "def testDeleteNetworkAuth(self):\n response = self._delete('inventory/networks/1/')\n self.assertEquals(response.status_code, 401)\n\n response = self._delete('inventory/networks/1/',\n username=\"testuser\", password=\"password\")\n self.assertEquals(response.status_code, 403)", "def test_delete_hyperflex_cluster_network_policy(self):\n pass", "def test_delete_net_namespace(self):\n pass", "def test_delete_host_subnet(self):\n pass", "def network_delete(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.delete_network(**kwargs)", "def _delete_network_vm(args):\n libvirtConn = libvirt.openReadOnly(None)\n if libvirtConn is None:\n print('Cannot contact hypervisor', file=sys.stderr)\n return 1\n net = None\n try:\n net = libvirtConn.networkLookupByName(args.network_name)\n except libvirt.libvirtError:\n print('Cannot find network named [%s]' % args.network_name, file=sys.stderr)\n return 1\n print('Network found:\\n')\n print(xml.dom.minidom.parseString(net.XMLDesc()).toprettyxml(indent=\" \", newl=''))\n print('')\n\n if not args.yes:\n if not input('Really destroy this network ?').strip().lower() in ('y', 'yes'):\n return 1\n return oci_utils.kvm.virt.delete_virtual_network(network_name=args.network_name)", "def delete(self): \n params = {'command':'deleteNetwork',\n 'id':self.id}\n \n self.logger.debug('Remove network %s' % self.name)\n \n try:\n response = self.send_request(params)\n res = json.loads(response)\n clsk_job_id = res['deletenetworkresponse']['jobid']\n self.logger.debug('Start job over %s.%s - %s: %s' % (\n self._obj_type, self.name, \n 'deleteNetwork', res))\n return clsk_job_id\n except KeyError as ex :\n self.logger.error('Error parsing json data: %s' % ex)\n raise ClskError('Error parsing json data: %s' % ex)\n except ApiError as ex:\n self.logger.error(ex)\n raise ClskError(ex)", "def delete_network(name, host, network_type):\n logging.info(\"Deleting %s '%s' from host '%s'\", network_type, name, host.name)\n\n try:\n if network_type.lower() == \"vswitch\":\n host.configManager.networkSystem.RemoveVirtualSwitch(name)\n elif network_type.lower() == \"portgroup\":\n host.configManager.networkSystem.RemovePortGroup(name)\n except vim.fault.NotFound:\n logging.error(\"Tried to remove %s '%s' that does not exist from host '%s'\",\n network_type, name, host.name)\n except vim.fault.ResourceInUse:\n logging.error(\"%s '%s' can't be removed because there are vNICs associated with it\",\n network_type, name)", "def network_delete_event(self, network_info):\n\n net_id = network_info['network_id']\n if net_id not in self.network:\n LOG.error(_LE('network_delete_event: net_id %s does not exist.'),\n net_id)\n return\n\n segid = self.network[net_id].get('segmentation_id')\n tenant_id = self.network[net_id].get('tenant_id')\n tenant_name = self.get_project_name(tenant_id)\n net = utils.Dict2Obj(self.network[net_id])\n if not tenant_name:\n LOG.error(_LE('Project %(tenant_id)s does not exist.'),\n {'tenant_id': tenant_id})\n self.update_network_db(net.id, constants.DELETE_FAIL)\n return\n\n try:\n self.dcnm_client.delete_network(tenant_name, net)\n # Put back the segmentation id into the pool.\n self.seg_drvr.release_segmentation_id(segid)\n\n # Remove entry from database and cache.\n self.delete_network_db(net_id)\n del self.network[net_id]\n snets = [k for k in self.subnet if (\n self.subnet[k].get('network_id') == net_id)]\n [self.subnet.pop(s) for s in snets]\n except dexc.DfaClientRequestFailed:\n LOG.error(_LE('Failed to create network %(net)s.'),\n {'net': net.name})\n self.update_network_db(net_id, constants.DELETE_FAIL)\n # deleting all related VMs\n instances = self.get_vms()\n instances_related = [k for k in instances if k.network_id == net_id]\n for vm in instances_related:\n LOG.debug(\"deleting vm %s because network is deleted\", vm.name)\n self.delete_vm_function(vm.port_id, vm)\n self.network_del_notif(tenant_id, tenant_name, net_id)", "def delete_network(session, name):\n # type: (Session, Text) -> None\n url_tail = f\"/{CoordConstsV2.RSC_NETWORKS}/{name}\"\n return _delete(session, url_tail)", "def delete(self):\n \n logging.info(\"Deleting network %s\" % self.cloudnet)\n # res = cn.delete(self.cloudnet)\n res = self.cloudnet.delete()\n return res", "def delete_network(options, vsm_obj):\n print(\"Disconnecting edge interface attached to this network\")\n edge_id = get_edge(vsm_obj)\n edge = Edge(vsm_obj, '4.0')\n edge.id = edge_id\n vnics = Vnics(edge)\n vnics_schema = vnics.query()\n network = get_network_id(options, get_network_name_on_vc(options))\n for vnic in vnics_schema.vnics:\n if network and vnic.portgroupId == network:\n print(\"Found a matching vnic %s %s\" % (options.name, vnic.index))\n vnic.isConnected = \"False\"\n vnic.portgroupId = None\n vnic.name = \"vnic%s\" % vnic.index\n vnics_schema = VnicsSchema()\n vnics_schema.vnics = [vnic]\n result = vnics.create(vnics_schema)\n if (result[0].response.status != 204):\n print \"update vnic error: %s %s\" \\\n % (result[0].response.status, result[0].response.reason)\n return False\n else:\n break\n else:\n print (\"No matching vnic found\")\n\n vdn_scope = get_transport_zone(options)\n virtual_wire = VirtualWire(vdn_scope)\n vwire = virtual_wire.read_by_name(get_network_name(options))\n name = get_network_name(options)\n if vwire != \"FAILURE\":\n print(\"Found a matching network %s\" % (options.name))\n virtual_wire.id = vwire.objectId\n result = virtual_wire.delete()\n if (result.response.status != 200):\n print (\"Delete vwire error: %s\" % result.response.reason)\n return False\n else:\n print (\"No matching network found\")\n print(\"Network %s deleted\" % (options.name))\n\n return True", "def test_delete_collection_host_subnet(self):\n pass", "def delete_network_postcommit(self, context):\n if self.rpc_handler is None:\n return\n network = self._get_network_info(context._network)\n for _, _network in network.items():\n network_type = _network.get('network_type', '')\n if network_type not in CentecConstant.SUPPORTED_NETWORK_TYPES and len(CentecConstant.SUPPORTED_NETWORK_TYPES) > 0:\n return\n if network is not None:\n try:\n self.rpc_handler.delete_network(network)\n except:\n pass", "def deleteNetwork(self, session: Session, id_: str):\n try:\n return NetworkManager().deleteNetwork(session, id_)\n except TortugaException as ex:\n raise\n except Exception as ex:\n self._logger.exception(str(ex))\n raise TortugaException(exception=ex)", "def network_delete_end(self, payload):\n self.disable_dhcp_helper(payload['network_id'])", "def test_delete_namespaced_egress_network_policy(self):\n pass", "def delete_net(self, net_id):\n LOG_OBJ.debug(\"Deleting network %s\" % net_id)\n _url = \"http://\" + self.host_ip + \":9696/v2.0/networks/\" + \\\n net_id + \".json\"\n _headers = {'Content-type': 'application/json',\n 'x-auth-token': self.project_info[\"token_project\"]}\n _body = None\n\n response = self.request(\"DELETE\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\"No response from Server while deleting net:%s\" %\n net_id)\n return response\n\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Deletion of Network Failed with status %s \" %\n response.status)\n return response.status\n\n LOG_OBJ.info(\"Deleted the network : %s \" % net_id)\n return True", "def test_add_network(self):\n pass", "def delete(self, oid):\n path = '%s/networks/%s' % (self.ver, oid)\n res = self.client.call(path, 'DELETE', data='', \n token=self.manager.identity.token)\n self.logger.debug('Delete openstack network: %s' % truncate(res))\n return res[0]", "def test_delete_device(self):\n pass", "def test_delete_device(self):\n pass", "def run(self, network_create_args=None):\n network = self.neutron.create_network(**(network_create_args or {}))\n self.neutron.delete_network(network[\"id\"])", "def test_delete_run(self):\n pass" ]
[ "0.882342", "0.8506827", "0.8320668", "0.79648894", "0.793082", "0.7892634", "0.74320185", "0.73942524", "0.7378358", "0.72483325", "0.7240482", "0.7189392", "0.7184786", "0.71149975", "0.70919955", "0.70222574", "0.7014078", "0.6969431", "0.6955914", "0.6919233", "0.689802", "0.6868898", "0.68233025", "0.6811651", "0.6809803", "0.6803659", "0.67872906", "0.67872906", "0.67576045", "0.6751275" ]
0.94385636
0
Test case for delete_project
def test_delete_project(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_remove_project(self):\n pass", "def test_delete_project(self):\n self.assertEqual(Project.objects.count(), 1)\n self.assertEqual(Group.objects.count(), 2)\n\n delete_project(Project.objects.get(name=\"project A\"))\n\n self.assertEqual(Project.objects.count(), 0)\n self.assertEqual(Group.objects.count(), 0)", "def test_projects_delete(self):\n project = Project()\n response = self.client.open('/project-tracker/projects',\n method='DELETE',\n data=json.dumps(project),\n content_type='application/json')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def delete_project(project):\n with BMI(_username, _password, constants.BMI_ADMIN_PROJECT) as bmi:\n ret = bmi.delete_project(project)\n if ret[constants.STATUS_CODE_KEY] == 200:\n click.echo(\"Success\")\n else:\n click.echo(ret[constants.MESSAGE_KEY])", "def test_projects_id_delete(self):\n response = self.client.open('/project-tracker/projects/{id}'.format(id=3.4),\n method='DELETE')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def delete_project(arn=None):\n pass", "def do_project_delete(cs, args):\n key = args.project\n if cs.projects.is_id(key):\n id = key\n else:\n id = cs.projects.get_id_by_name(key)\n cs.projects.delete(id)\n print(\"Delete Project '%s' successfully.\" % key)", "def project_delete(cursor, project):\n haystack = (project['_id'], )\n\n query = \"DELETE FROM projects WHERE _id=?\"\n try:\n cursor.execute(query, haystack)\n except Exception as e:\n on_error(e)\n\n query = \"DELETE FROM namespaces WHERE project_id=?\"\n try:\n cursor.execute(query, haystack)\n except Exception as e:\n on_error(e)\n else:\n cursor.connection.commit()\n raise Return((True, None))", "def pre_project_delete(self, resource_id):\n pass", "def post_project_delete(self, resource_id, resource_dict):\n pass", "def delete_project(\n name\n):\n\n cmd = dict()\n cmd[\"type_\"] = \"delete_project\"\n cmd[\"name_\"] = name\n\n comm.send(cmd)", "def delete_project(proj_id):\n project_obj = Project.objects.get(id=proj_id)\n print('Deleting project the fastq files within the project: ', project_obj.description)\n\n description = project_obj.description.replace(' ', '') # remove any space in the project name\n project_dir = 'documents/%s/%s' % (str(project_obj.date.date()), description)\n shutil.rmtree(project_dir, ignore_errors=True)\n print(\"Files deleted.\")", "def delete_project(request, project_id):\n\n profile = get_object_or_404(Profile, user=request.user)\n project = get_object_or_404(GameProject, pk=project_id)\n\n if not profile.is_creator:\n messages.error(request, 'Sorry, only creators can do that.')\n return redirect(reverse('home'))\n if project.owner != profile:\n messages.error(request, 'Sorry, only the project owner can do that.')\n return redirect(reverse('home'))\n\n project = get_object_or_404(GameProject, pk=project_id)\n project.delete()\n messages.success(request, 'Project deleted!')\n return redirect(reverse('all_projects'))", "def delete(conn, project):\n with conn:\n c = conn.cursor()\n c.execute(\"DELETE FROM projects WHERE project =?\", (project,))", "def delete_project(request, project_id):\n if not request.user.is_superuser:\n messages.error(request, 'Sorry, only store owners can do that.')\n return redirect(reverse('home'))\n\n project = get_object_or_404(Project, pk=project_id)\n project.delete()\n messages.success(request, 'Project deleted!')\n return redirect(reverse('portfolio'))", "def delete_stored_project():\n client = RequestManager()\n client.set_method(\"DELETE\")\n client.set_endpoint(\"/projects/{0}\".format(STORED_ID['project_id']))\n client.execute_request()", "def test_delete_run(self):\n pass", "def delete_project(project_id):\n project = Project.query.filter_by(id=project_id).first()\n if not project:\n return {\n 'success': False,\n 'message': f\"No project with the specified id {project_id} found.\",\n }\n\n else:\n if is_project_manager(project, g.user):\n # delete related tasks\n Task.query.filter_by(project=project).delete()\n #delete related invites\n Invitation.query.filter_by(project=project).delete()\n db_session.delete(project)\n db_session.commit()\n return {\n 'success': True,\n 'result': {},\n 'message': \"Project Deleted Successfully.\",\n }", "def test_remove_project_member(self):\n pass", "def delete_project(projectname):\n response = jsonify(admin.delete_project(current_app.scoped_session(), projectname))\n return response", "def delete(self):\n _url = f\"{self.connector.base_url}/projects/{self.project_id}\"\n\n self.connector.http_call(\"delete\", _url)\n\n self.project_id = None\n self.name = None", "def test_delete_team(self):\n pass", "def delete(self, guid):\n if helpers.authorized(self.request.params['UUID'], self.request.params['ATO'], self.request.params['action']):\n # search for the Project and delete if found\n key = db.Key.from_path('Project', int(guid))\n project = db.get(key)\n if not project == None:\n project.delete()\n self.response.set_status(204, \"Deleted\")\n else:\n self.response.set_status(404, \"Not Found\")\n else:\n self.response.set_status(401, \"Not Authorized\")", "def test_project_get_deleted_upon_user_delete(self):\n\n project = django_dynamic_fixture.get(Project)\n user1 = django_dynamic_fixture.get(User)\n project.users.add(user1)\n\n project.refresh_from_db()\n assert project.users.all().count() == 1\n\n # Delete the user\n user1.delete()\n # The object should not exist\n project = Project.objects.all().filter(id=project.id)\n assert not project.exists()", "def tearDown(self):\n Project.objects.all().delete()", "def test_delete(self):\n pass", "def delete_project(project_id):\n client = RequestManager()\n client.set_method(\"DELETE\")\n client.set_endpoint(\"/projects/{0}\".format(project_id))\n client.execute_request()", "def test_delete_goal(self):\n pass", "def test_handle_delete_assign_error(self):\r\n self.mock_facade.retrieve.return_value = Project(\"GTID\", [])\r\n self.assertTupleEqual(\r\n self.testcommand.handle(\"project delete ID\",\r\n user),\r\n (self.testcommand.assigned_error, 200))", "def delete(self, oid):\n path = '/projects/%s' % oid\n res = self.client.call(path, 'DELETE', data='', token=self.manager.identity.token)\n self.logger.debug('Delete openstack project: %s' % truncate(res))\n return True" ]
[ "0.8795011", "0.86479425", "0.84122276", "0.80755264", "0.80197614", "0.7882559", "0.78238297", "0.76832116", "0.75349617", "0.747658", "0.7430732", "0.7419057", "0.7347951", "0.73402214", "0.73338026", "0.73239595", "0.73235404", "0.73109347", "0.7307974", "0.72852445", "0.7279667", "0.72415876", "0.7222287", "0.72188336", "0.7195768", "0.71741766", "0.71551454", "0.71418434", "0.71331364", "0.7131443" ]
0.952583
0
Test case for delete_software_asset_bundle
def test_delete_software_asset_bundle(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_delete_system_asset(self):\n pass", "def test_delete_asset(self):\n pass", "def test_update_software_asset_bundle(self):\n pass", "async def test_deleter_delete_bundle(config, mocker):\n logger_mock = mocker.MagicMock()\n lta_rc_mock = mocker.patch(\"rest_tools.client.RestClient\", new_callable=AsyncMock)\n remove_mock = mocker.patch(\"os.remove\", new_callable=MagicMock)\n p = Deleter(config, logger_mock)\n await p._delete_bundle(lta_rc_mock, {\n \"uuid\": \"c4b345e4-2395-4f9e-b0eb-9cc1c9cdf003\",\n \"bundle_path\": \"/icecube/datawarehouse/path/to/c4b345e4-2395-4f9e-b0eb-9cc1c9cdf003.zip\",\n })\n remove_mock.assert_called()\n lta_rc_mock.request.assert_called_with(\"PATCH\", \"/Bundles/c4b345e4-2395-4f9e-b0eb-9cc1c9cdf003\", mocker.ANY)", "def test_delete_deployment(self):\n pass", "def test_delete_deployment_run(self):\n pass", "def test_remove_asset(self):\n # Create a story\n title = ('Transportation Challenges Limit Education Choices for '\n 'Denver Parents')\n summary = \"\"\"\n Many families in the Denver metro area use public\n transportation instead of a school bus because for them, a\n quality education is worth hours of daily commuting. Colorado's\n school choice program is meant to foster educational equity,\n but the families who benefit most are those who have time and\n money to travel. Low-income families are often left in a lurch.\n \"\"\"\n byline = \"Mile High Connects\"\n story = create_story(title=title, summary=summary, byline=byline)\n # Confirm that the story has no assets\n self.assertEqual(story.assets.count(), 0)\n # create a Section\n section = create_section(title=\"Test Section 1\", story=story)\n # create a HtmlAsset\n asset = HtmlAsset()\n asset.save()\n translation = HtmlAssetTranslation(title='Test Asset', asset=asset)\n translation.save()\n # Assign the asset to the section\n section_asset = SectionAsset(section=section, asset=asset, weight=0)\n section_asset.save()\n # Confirm the asset is in the section's list\n self.assertTrue(asset in section.assets.select_subclasses())\n # Confirm that the asset is in the story's list\n self.assertTrue(asset in story.assets.select_subclasses())\n # Delete the asset from the section.\n section_asset.delete()\n # Confirm that the asset is NOT in the section's list\n self.assertFalse(asset in section.assets.select_subclasses())\n # Confirm that the asset is in the story's list\n self.assertTrue(asset in story.assets.select_subclasses())", "def test_update_software_asset(self):\n pass", "def test_remove_asset(self):\n # Confirm that the story has no assets\n self.assertEqual(self.story.assets.count(), 0)\n # Assign the asset to the section\n container = Container.objects.get(name='left')\n section_asset = SectionAsset(section=self.section, asset=self.asset, container=container)\n section_asset.save()\n # Confirm the asset is in the section's list\n self.assertTrue(self.asset in self.section.assets.select_subclasses())\n # Confirm that the asset is in the story's list\n self.assertTrue(self.asset in self.story.assets.select_subclasses())\n # Delete the asset from the section.\n section_asset.delete()\n # Confirm that the asset is NOT in the section's list\n self.assertFalse(self.asset in self.section.assets.select_subclasses())\n # Confirm that the asset is in the story's list\n self.assertTrue(self.asset in self.story.assets.select_subclasses())", "def test_delete_asset_type(self):\n self.assertEqual(self.all_assets.count(), 1)\n get_asset = Asset.objects.get(asset_code=\"IC001\")\n statuses = AssetStatus.objects.filter(asset=get_asset)\n for status in statuses:\n status.delete()\n get_asset.delete()\n self.assertEqual(self.all_assets.count(), 0)", "def test_remove_category_from_asset(self):\n pass", "def test_create_software_asset_bundle_from_system_module(self):\n pass", "def test_update_software_asset_content(self):\n pass", "def test_delete(self):\n obj = self.provision_single_asset()\n obj_id = obj.id\n self.delete('widget', 200, params={'id': obj_id})\n obj = self.session.query(self.widget_model).filter_by(id=obj_id).first()\n assert obj is None", "def asset_cleanup():\n app.bank = dict()\n return \"Cleaned\", 200", "def test_delete_hyperflex_app_catalog(self):\n pass", "def test_replace_software_asset_for_software_component(self):\n pass", "def test_delete_on_background_response_descriptor_projects_release_release_resource(self):\n pass", "def test_vault_delete_vault_item(self):\n pass", "def test_aws_service_api_volume_attachment_delete(self):\n pass", "def test_delete_hyperflex_software_version_policy(self):\n pass", "def test_delete_bucket(self):\n pass", "def test_delete(self):\n self.request.access.allow_delete = [\"everyone\"]\n pkg = make_package(factory=DynamoPackage)\n self._save_pkgs(pkg)\n self.db.delete(pkg)\n count = self.engine.scan(DynamoPackage).count()\n self.assertEqual(count, 0)\n count = self.engine.scan(PackageSummary).count()\n self.assertEqual(count, 0)\n self.storage.delete.assert_called_with(pkg)", "async def test_deleter_quarantine_bundle_with_reason(config, mocker):\n logger_mock = mocker.MagicMock()\n lta_rc_mock = mocker.patch(\"rest_tools.client.RestClient\", new_callable=AsyncMock)\n p = Deleter(config, logger_mock)\n await p._quarantine_bundle(lta_rc_mock, {\"uuid\": \"c4b345e4-2395-4f9e-b0eb-9cc1c9cdf003\"}, \"Rucio caught fire, then we roasted marshmellows.\")\n lta_rc_mock.request.assert_called_with(\"PATCH\", \"/Bundles/c4b345e4-2395-4f9e-b0eb-9cc1c9cdf003\", mocker.ANY)", "def test_delete(self):\n self.request.access.allow_delete = [\"everyone\"]\n pkg = make_package(factory=SQLPackage)\n self.sql.add(pkg)\n transaction.commit()\n self.sql.add(pkg)\n self.db.delete(pkg)\n count = self.sql.query(SQLPackage).count()\n self.assertEqual(count, 0)\n self.storage.delete.assert_called_with(pkg)", "def test_import_software_asset(self):\n pass", "def test_delete(self):\n package = make_package()\n path = self.storage.get_path(package)\n os.makedirs(os.path.dirname(path))\n with open(path, 'w') as ofile:\n ofile.write('foobar')\n self.storage.delete(package)\n self.assertFalse(os.path.exists(path))", "def test_delete_on_background_response_descriptor_projects_release_release_resource_spaces(self):\n pass", "def test_get_software_bundles(self):\n pass", "def test_delete_hyperflex_server_firmware_version(self):\n pass" ]
[ "0.80919915", "0.79550517", "0.7599956", "0.7187153", "0.69839835", "0.68842155", "0.68526673", "0.6682587", "0.6676647", "0.6661918", "0.6593671", "0.65800726", "0.65216184", "0.6501501", "0.6496982", "0.64812243", "0.64527225", "0.640331", "0.63805336", "0.63252723", "0.63010174", "0.62978375", "0.6297209", "0.62874144", "0.6274094", "0.6272763", "0.6255761", "0.62290674", "0.6223093", "0.6219145" ]
0.9550257
0
Test case for delete_system_asset
def test_delete_system_asset(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_delete_asset(self):\n pass", "def test_delete_software_asset_bundle(self):\n pass", "def test_delete_asset_type(self):\n self.assertEqual(self.all_assets.count(), 1)\n get_asset = Asset.objects.get(asset_code=\"IC001\")\n statuses = AssetStatus.objects.filter(asset=get_asset)\n for status in statuses:\n status.delete()\n get_asset.delete()\n self.assertEqual(self.all_assets.count(), 0)", "def test_create_system_asset(self):\n pass", "def test_update_system_asset(self):\n pass", "def test_remove_asset(self):\n # Confirm that the story has no assets\n self.assertEqual(self.story.assets.count(), 0)\n # Assign the asset to the section\n container = Container.objects.get(name='left')\n section_asset = SectionAsset(section=self.section, asset=self.asset, container=container)\n section_asset.save()\n # Confirm the asset is in the section's list\n self.assertTrue(self.asset in self.section.assets.select_subclasses())\n # Confirm that the asset is in the story's list\n self.assertTrue(self.asset in self.story.assets.select_subclasses())\n # Delete the asset from the section.\n section_asset.delete()\n # Confirm that the asset is NOT in the section's list\n self.assertFalse(self.asset in self.section.assets.select_subclasses())\n # Confirm that the asset is in the story's list\n self.assertTrue(self.asset in self.story.assets.select_subclasses())", "def test_retrieve_system_asset(self):\n pass", "def test_remove_asset(self):\n # Create a story\n title = ('Transportation Challenges Limit Education Choices for '\n 'Denver Parents')\n summary = \"\"\"\n Many families in the Denver metro area use public\n transportation instead of a school bus because for them, a\n quality education is worth hours of daily commuting. Colorado's\n school choice program is meant to foster educational equity,\n but the families who benefit most are those who have time and\n money to travel. Low-income families are often left in a lurch.\n \"\"\"\n byline = \"Mile High Connects\"\n story = create_story(title=title, summary=summary, byline=byline)\n # Confirm that the story has no assets\n self.assertEqual(story.assets.count(), 0)\n # create a Section\n section = create_section(title=\"Test Section 1\", story=story)\n # create a HtmlAsset\n asset = HtmlAsset()\n asset.save()\n translation = HtmlAssetTranslation(title='Test Asset', asset=asset)\n translation.save()\n # Assign the asset to the section\n section_asset = SectionAsset(section=section, asset=asset, weight=0)\n section_asset.save()\n # Confirm the asset is in the section's list\n self.assertTrue(asset in section.assets.select_subclasses())\n # Confirm that the asset is in the story's list\n self.assertTrue(asset in story.assets.select_subclasses())\n # Delete the asset from the section.\n section_asset.delete()\n # Confirm that the asset is NOT in the section's list\n self.assertFalse(asset in section.assets.select_subclasses())\n # Confirm that the asset is in the story's list\n self.assertTrue(asset in story.assets.select_subclasses())", "def test_remove_category_from_asset(self):\n pass", "def test_delete(self):\n obj = self.provision_single_asset()\n obj_id = obj.id\n self.delete('widget', 200, params={'id': obj_id})\n obj = self.session.query(self.widget_model).filter_by(id=obj_id).first()\n assert obj is None", "def test_aws_service_api_volume_attachment_delete(self):\n pass", "def test_delete_image(self):\n pass", "def test_aws_service_api_volume_delete(self):\n pass", "def test_import_system_asset(self):\n pass", "def test_delete_assetmodel_cascades(self):\n self.assertEqual(self.all_assets.count(), 1)\n self.assertEqual(self.all_assetmodels.count(), 1)\n get_assetmodel = AssetModelNumber.objects.get(model_number=\"IMN50987\")\n get_asset = Asset.objects.get(asset_code=\"IC001\")\n get_asset.model_number = get_assetmodel\n get_asset.save()\n with self.assertRaises(ProtectedError):\n get_assetmodel.delete()\n self.assertEqual(self.all_assets.count(), 1)\n self.assertEqual(self.all_assetmodels.count(), 1)", "def asset_cleanup():\n app.bank = dict()\n return \"Cleaned\", 200", "def test_delete_file(self):\n # put file\n self.prepare_uploads()\n backend = BackendS3(**self.config)\n uploads = self.upload_path\n src = os.path.join(uploads, 'test.tar.gz')\n\n id1 = utils.generate_id('test.tar.gz')\n backend.put(src, id1)\n\n # regression testing (only delete what requested)\n id2 = id1.split('-')\n id2[4] += 'ZZZ'\n id2 = '-'.join(id2)\n\n backend.put(src, id1, True)\n backend.put_variant(src, id1, 'demo.txt')\n backend.put(src, id2, True)\n backend.delete(id1)\n\n path1 = '/'.join(backend.id_to_path(id1)) + '/test.tar.gz'\n path2 = '/'.join(backend.id_to_path(id1)) + '/demo.txt'\n self.assertFalse(backend.exists(path1))\n self.assertFalse(backend.exists(path2))\n\n # assume only proper file deleted\n path3 = '/'.join(backend.id_to_path(id2)) + '/test.tar.gz'\n self.assertTrue(backend.exists(path3))", "def test_vault_delete_vault_item(self):\n pass", "def test_delete__volume(self):\n arglist = [\n '--volume',\n self.projects[0].id,\n ]\n verifylist = [\n ('service', 'volume'),\n ('project', self.projects[0].id),\n ]\n\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n\n result = self.cmd.take_action(parsed_args)\n\n self.assertIsNone(result)\n self.projects_mock.get.assert_called_once_with(self.projects[0].id)\n self.compute_quotas_mock.delete.assert_not_called()\n self.volume_quotas_mock.delete.assert_called_once_with(\n self.projects[0].id,\n )\n self.network_mock.delete_quota.assert_not_called()", "def test_delete_image_signature(self):\n pass", "def test_aws_service_api_snapshot_delete(self):\n pass", "def test_delete_deployment_run(self):\n pass", "def test_delete_deployment(self):\n pass", "def test_before_delete(self, create_with_upload):\n name = \"test.txt\"\n resource = create_with_upload(\n \"hello world\",\n name,\n name=name,\n package_id=factories.Dataset()[\"id\"],\n )\n plugin = p.get_plugin(\"cloudstorage\")\n uploader = plugin.get_resource_uploader(resource)\n assert uploader.get_url_from_filename(resource[\"id\"], name)\n\n helpers.call_action(\"resource_delete\", id=resource[\"id\"])\n assert uploader.get_url_from_filename(resource[\"id\"], name) is None", "def test_delete(self):\n self.basic_login()\n cassette_name = self.cassette_name(\"delete\")\n with self.recorder.use_cassette(cassette_name):\n auth = self.gh.authorize(\n username=self.user,\n password=self.password,\n scopes=[\"gist\"],\n note=\"testing github3.py\",\n )\n assert isinstance(auth, github3.auths.Authorization)\n assert auth.delete() is True", "def test_update_software_asset(self):\n pass", "def test_dashboards_v2_delete_share(self):\n pass", "def test_delete_run(self):\n pass", "def test_delete_cloud(self):\n pass", "def test_aws_service_api_vm_delete(self):\n pass" ]
[ "0.8655649", "0.8234435", "0.752207", "0.73021317", "0.7064291", "0.70400894", "0.70157003", "0.6983601", "0.6964017", "0.6923627", "0.68705094", "0.681115", "0.67882484", "0.663221", "0.66320956", "0.6508906", "0.6490903", "0.64670813", "0.6428559", "0.6422137", "0.64050126", "0.6403821", "0.63981116", "0.63962644", "0.6356783", "0.6319642", "0.63103485", "0.63098794", "0.6306019", "0.62905115" ]
0.9475287
0
Test case for delete_team
def test_delete_team(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_teams_delete_team_v1(self):\n pass", "def test_delete_team_member(self):\n pass", "def test_handle_delete(self):\n team = Team(\"BRS\", \"brs\", \"web\")\n team.github_team_id = \"12345\"\n test_user = User(\"userid\")\n test_user.github_id = \"1234\"\n team.add_team_lead(\"1234\")\n self.db.retrieve.return_value = test_user\n self.db.query.return_value = [team]\n self.assertTupleEqual(self.testcommand.handle(\"team delete brs\", user),\n (f\"Team brs deleted\", 200))\n self.db.delete.assert_called_once_with(Team, \"12345\")\n self.gh.org_delete_team.assert_called_once_with(int(\"12345\"))", "def test_remove_team_manager_from_team(self):\n pass", "def test_handle_delete_as_team_lead(self):\r\n def facade_retrieve_side_effect(*args, **kwargs):\r\n \"\"\"Return a side effect for the mock facade.\"\"\"\r\n if args[0] == Project:\r\n return Project(\"\", [])\r\n elif args[0] == Team:\r\n team = Team(\"GTID\", \"team-name\", \"display-name\")\r\n team.team_leads.add(user)\r\n return team\r\n else:\r\n calling_user = User(user)\r\n return calling_user\r\n self.mock_facade.retrieve.side_effect = facade_retrieve_side_effect\r\n self.assertTupleEqual(\r\n self.testcommand.handle(\"project delete ID\",\r\n user),\r\n (\"Project successfully deleted!\", 200))", "async def delete_team(team_id: str = Path(..., description=\"ID value of the desired team\"),\n db_handler: DBHandler = Depends(database_dependency)):\n deleted_record = await db_handler.delete_team(team_id=team_id)\n deleted_record = init_BaseTeam(deleted_record)\n\n return deleted_record", "async def delete(self):\n return await self._state.delete_team(self.id)", "def delete_workteam(WorkteamName=None):\n pass", "def team_delete(token_user, team_id):\n team = Team.query.get(team_id)\n if team is None:\n abort(404, 'team not found')\n\n if team.team_type.name == 'single':\n abort(403, 'unable to delete team of type \"single\"')\n\n # check for permissions to delete the team\n if not (token_user.has_permission('team.delete.elevated') or\n (token_user.has_permission('team.delete') and\n team.has_member(token_user))):\n abort(403, 'insufficient permissions to delete team')\n\n # deschedule reservations for the team then delete the team\n Reservation.query.filter_by(team_id=team.id).delete()\n get_db().delete(team)\n get_db().commit()\n\n return '', 204", "def test_teams_remove_user_from_team_v2(self):\n pass", "def test_handle_delete_not_admin(self):\n team = Team(\"BRS\", \"brs\", \"web\")\n test_user = User(\"userid\")\n self.db.retrieve.return_value = test_user\n self.db.query.return_value = [team]\n self.assertTupleEqual(self.testcommand.handle(\"team delete brs\", user),\n (self.testcommand.permission_error, 200))\n self.db.delete.assert_not_called()\n self.gh.org_delete_team.assert_not_called()", "def test_handle_remove_not_in_team(self):\n test_user = User(\"userid\")\n test_user.permissions_level = Permissions.admin\n team = Team(\"BRS\", \"brs\", \"web\")\n team.github_team_id = \"githubid\"\n other_user = User(\"anotheruser\")\n other_user.github_id = \"githubID\"\n other_user.github_username = \"myuser\"\n self.db.retrieve.side_effect = [test_user, other_user]\n self.db.query.return_value = [team]\n self.gh.has_team_member.return_value = False\n with self.app.app_context():\n self.assertTupleEqual(self.testcommand.handle(\"team remove\"\n \" brs ID\", user),\n (\"User not in team!\", 200))\n self.gh.has_team_member.assert_called_once_with(\"myuser\", \"githubid\")\n self.db.store.assert_not_called()\n self.gh.remove_team_member.assert_not_called()", "def test_teams_remove_user_from_team_v1(self):\n pass", "def delete_team(team_id):\n if request.method == 'GET':\n Team.query.filter_by(team_id=team_id).delete()\n db.session.commit()\n teams = get_team()\n if teams:\n return render_template('team-players.html', teams=teams)\n else:\n return render_template('team-players.html')", "def test_delete_goal(self):\n pass", "def test_delete_project(self):\n pass", "def test_delete_project(self):\n pass", "def test_unassign_managing_team(self):\n pass", "def test_team_template_folders_id_delete(self):\n pass", "def test_remove_self_from_team_success(self):\n team = Team.create(name='foo', captain_id='User_cap',\n program_id=self.program.uid)\n user = User.create(name='foo', email='[email protected]', user_type='user',\n owned_teams=[team.uid])\n user.put()\n team.put()\n\n response = self.testapp.put_json(\n '/api/users/{}'.format(user.uid),\n {'owned_teams': []},\n headers=self.login_headers(user),\n )\n\n # User is removed from team.\n self.assertEqual(json.loads(response.body)['owned_teams'], [])", "def test_delete_case(self):\n pass", "def test_delete_team_user_group(client):\n resp = client.delete_team_user_group(TEAM_ID, NEW_GROUP_ID)\n assert resp['team_id'] == TEAM_ID\n assert resp['group_deleted']", "def test_delete_run(self):\n pass", "def test_create_team(self):\n pass", "def destroy(self, request, pk=None):\n try:\n deleted_team = self.controller.delete_team(pk)\n return Response(status=status.HTTP_204_NO_CONTENT)\n except ObjectDoesNotExist:\n return Response(ObjectDoesNotExist, status=status.HTTP_400_BAD_REQUEST)", "def test_handle_remove(self):\n test_user = User(\"userid\")\n test_user.permissions_level = Permissions.admin\n test_user.github_username = \"githubuser\"\n team = Team(\"BRS\", \"brs\", \"web\")\n team.github_team_id = \"githubid\"\n other_user = User(\"anotheruser\")\n other_user.github_id = \"githubID\"\n other_user.github_username = \"myuser\"\n self.db.retrieve.side_effect = [test_user, other_user,\n test_user, other_user]\n self.db.query.return_value = [team]\n team_attach = [team.get_attachment()]\n with self.app.app_context():\n self.testcommand.handle(\"team add brs ID\", user)\n resp, code = self.testcommand.handle(\"team remove brs ID\", user)\n expect = {'attachments': team_attach,\n 'text': 'Removed ' 'User from brs'}\n self.assertDictEqual(resp, expect)\n self.assertEqual(code, 200)\n self.db.store.assert_called_with(team)\n self.gh.remove_team_member.assert_called_once_with(\"myuser\",\n \"githubid\")", "async def delete_team_member(team_id: str = Path(..., description=\"ID value of the desired team\"),\n user_id: str = Path(..., description=\"ID value of the desired user\"),\n db_handler: DBHandler = Depends(database_dependency)):\n deleted_record = await db_handler.delete_team_member(team_id=team_id, user_id=user_id)\n deleted_record = {\"id_team\": deleted_record[0], \"id_user\": deleted_record[1]}\n\n return deleted_record", "def test_meeting_delete(self):\n pass", "def test_removeperson(self):\n p1, p2, p3 = self.create3persons()\n t = model.Team(name='Tigers', persons=[p1, p2, p3])\n id = t.store()\n t.remove_person(p2)\n t.store()\n\n t2 = model.Team(id=id)\n self.assertEqual(t2.persons, [p1.id, p3.id])\n\n with self.assertRaises(ValueError): # cannot be removed again\n t2.remove_person(p2)", "def test_delete(self):\n pass" ]
[ "0.8947988", "0.8845459", "0.8455357", "0.7699966", "0.7632404", "0.7621499", "0.7612774", "0.7545243", "0.7499598", "0.7487986", "0.7477289", "0.7415911", "0.74132884", "0.73915285", "0.7368085", "0.72588295", "0.72588295", "0.7225099", "0.72123814", "0.72032857", "0.7188579", "0.71843404", "0.71803653", "0.7169649", "0.7137038", "0.7131324", "0.7125713", "0.7106972", "0.7019003", "0.70113355" ]
0.945034
0
Test case for delete_template_subscription
def test_delete_template_subscription(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_delete_subscription_template(self):\n pass", "def test_delete_subscription(self):\n pass", "def test_issue_delete_subscription(self):\n pass", "def test_delete_on_background_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_create_subscription_template(self):\n pass", "def test_update_template_subscription(self):\n pass", "def test_get_template_subscription(self):\n pass", "def test_delete_namespaced_template(self):\n pass", "def test_delete_template_success(self):\n template_id = util.MOCK_UUID_1\n\n rv = TEST_CLIENT.delete(f\"/templates/{template_id}\")\n result = rv.json()\n\n expected = {\"message\": \"Template deleted\"}\n self.assertDictEqual(expected, result)", "def test_update_subscription_template(self):\n pass", "def test_delete_device_template(self):\n pass", "def test_delete_on_background_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_delete_activity_template(self):\n pass", "def test_get_subscription_template(self):\n pass", "def test_delete_confirmation_template(self):\n self.login()\n\n # BlogIndex needs translated pages before child pages can be translated\n self.fr_blog_index = self.en_blog_index.copy_for_translation(self.fr_locale)\n # Create a copy of the en_blog_post object as a translated page\n self.fr_blog_post = self.en_blog_post.copy_for_translation(self.fr_locale)\n\n # Create an alias page to test the `translations_to_move_count`\n # in the template context\n new_page = CreatePageAliasAction(\n self.en_blog_post,\n recursive=False,\n parent=self.en_blog_index,\n update_slug=\"alias-page-slug\",\n user=None,\n )\n new_page.execute(skip_permission_checks=True)\n\n response = self.client.get(\n reverse(\n \"wagtailadmin_pages:delete\",\n args=(self.en_blog_post.id,),\n ),\n follow=True,\n )\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.context[\"translation_count\"], 1)\n self.assertEqual(response.context[\"translation_descendant_count\"], 0)\n self.assertIn(\n \"Deleting this page will also delete 1 translation of this page.\",\n response.content.decode(\"utf-8\"),\n )", "def test_delete_collection_namespaced_template(self):\n pass", "def test_delete_multiple_templates_success(self):\n template_id_1 = util.MOCK_UUID_1\n template_id_2 = util.MOCK_UUID_2\n\n rv = TEST_CLIENT.post(\n \"/templates/deletetemplates\", json=[template_id_1, template_id_2]\n )\n result = rv.json()\n\n expected = {\"message\": \"Successfully removed templates\"}\n self.assertDictEqual(expected, result)\n self.assertEqual(rv.status_code, 200)", "def test_list_template_subscriptions(self):\n pass", "def pre_service_template_delete(self, resource_id):\n pass", "def test_get_subscription_templates(self):\n pass", "def test_xml_template_delete(self):\n XmlTemplate.delete_by_id(1)\n self.assertEqual(XmlTemplate.objects.count(), 1)\n self.assertFalse(XmlTemplate.objects.filter(pk=1).exists())", "def delete_template():\n posted_json = request.get_json(force=True)\n try:\n name = posted_json['template_name']\n except KeyError:\n print(\"Not all required keys are present!\")\n r = jsonify(message=\"Not all required keys for add template are present\", success=False, status_code=400)\n r.status_code = 400\n return r\n\n if bootstrapper_utils.delete_template(name):\n return jsonify(success=True, message='Deleted Template Successfully', status_code=200)\n else:\n r = jsonify(success=False, message='Could not delete template', status_code=500)\n r.status_code = 500\n return r", "def test_unshare_template_registration(self):\n pass", "def post_service_template_delete(self, resource_id, resource_dict):\n pass", "def test_create_subscription(self):\n pass", "async def delete_sub(self, sub: TSub) -> None:", "def test_publication_view_delete(self):\n \n test_response = self.client.get('/papers/14-3-3-proteins-a-number-of-functions-for-a-numbered-protein/delete/')\n self.assertEqual(test_response.status_code, 200)\n self.assertTrue('publication' in test_response.context) \n self.assertTemplateUsed(test_response, 'confirm_delete.html')\n self.assertEqual(test_response.context['publication'].pk, 1)\n self.assertEqual(test_response.context['publication'].title, u'14-3-3 proteins: a number of functions for a numbered protein.')\n\n #verifies that a non-existent object returns a 404 error.\n null_response = self.client.get('/papers/not-a-real-paper/delete/')\n self.assertEqual(null_response.status_code, 404)", "def test_unregister_template(self):\n pass", "def test_delete(admin_client):\n book = BookFactory()\n url = reverse(\"admin:books_book_delete\", args=(book.pk,))\n\n response = admin_client.get(url)\n templates_used = [t.name for t in response.templates]\n\n assert response.status_code == 200\n render_counts = {x: templates_used.count(x) for x in set(templates_used)}\n\n # The number of times each template was rendered\n assert render_counts == {\n \"admin/delete_confirmation.html\": 1,\n \"admin/base_site.html\": 1,\n \"admin/base.html\": 1,\n \"admin/includes/object_delete_summary.html\": 1,\n \"jazzmin/includes/ui_builder_panel.html\": 1,\n }\n\n # The templates that were used\n assert set(templates_used) == {\n \"admin/delete_confirmation.html\",\n \"admin/base_site.html\",\n \"admin/base.html\",\n \"admin/includes/object_delete_summary.html\",\n \"jazzmin/includes/ui_builder_panel.html\",\n }\n\n response = admin_client.post(url, data={\"post\": \"yes\"}, follow=True)\n\n # We deleted our object, and are now back on the changelist\n assert not Book.objects.all().exists()\n assert response.resolver_match.url_name == \"books_book_changelist\"", "def test_cancel_subscription(self):\n try:\n self.arb.cancel_subscription()\n except KeyError:\n self.arb.cancel_subscription(subscription_id=u\"1234\")" ]
[ "0.93200946", "0.8312369", "0.79072946", "0.76360947", "0.74938536", "0.742648", "0.7405564", "0.7405501", "0.7369085", "0.7240035", "0.7142435", "0.7085907", "0.6982647", "0.6981773", "0.690621", "0.68341494", "0.6748115", "0.6743684", "0.6704954", "0.66484153", "0.6624082", "0.6586667", "0.6545917", "0.6533862", "0.64925456", "0.6463877", "0.643815", "0.6423308", "0.640594", "0.63928586" ]
0.95145535
0
Test case for determine_valid_virtualization_realms
def test_determine_valid_virtualization_realms(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_virtualization_realms(self):\n pass", "def test_get_virtualization_realm(self):\n pass", "def test_register_virtualization_realm(self):\n pass", "def test_allocate_virtualization_realm(self):\n pass", "def test_get_team_owned_or_managed_virtualization_realms(self):\n pass", "def test_set_virtualization_realm_active(self):\n pass", "def test_update_virtualization_realm(self):\n pass", "def test_get_virtualization_realm_resources(self):\n pass", "def test_list_virt_realms_in_cloud(self):\n pass", "def test_list_virtualization_realm_templates(self):\n pass", "def test_get_valid_networks_for_virtualization_realm(self):\n pass", "def test_get_project_virt_realms(self):\n pass", "def test_get_templates_in_virtualization_realm(self):\n pass", "def test_svm_vs_vm_count():\n assert templates.svms() >= templates.vm_count()", "def test_svm_vs_vm_count():\n assert environments.svms() >= environments.vm_count()", "def is_virtualized (self):\n return len([i for i in self.infras if\n i.infra_type not in (self.TYPE_INFRA_SDN_SW, self.TYPE_INFRA_EE,\n self.TYPE_INFRA_STATIC_EE)]) > 0", "def test_get_deployment_runs_in_virtualization_realm(self):\n pass", "def _check_vm_record(self):\n instances = self.conn.list_instances()\n self.assertEquals(len(instances), 1)\n\n # Get Nova record for VM\n vm_info = self.conn.get_info(1)\n\n # Get record for VM\n vms = vmwareapi_fake._get_objects(\"VirtualMachine\")\n vm = vms[0]\n\n # Check that m1.large above turned into the right thing.\n mem_kib = long(self.type_data['memory_mb']) << 10\n vcpus = self.type_data['vcpus']\n self.assertEquals(vm_info['max_mem'], mem_kib)\n self.assertEquals(vm_info['mem'], mem_kib)\n self.assertEquals(vm.get(\"summary.config.numCpu\"), vcpus)\n self.assertEquals(vm.get(\"summary.config.memorySizeMB\"),\n self.type_data['memory_mb'])\n\n # Check that the VM is running according to Nova\n self.assertEquals(vm_info['state'], power_state.RUNNING)\n\n # Check that the VM is running according to vSphere API.\n self.assertEquals(vm.get(\"runtime.powerState\"), 'poweredOn')", "def test_vm_migration(self):\n self.check_vm_host_after_migration(positive=False)", "def test_vm_migration(self):\n self.check_vm_host_after_migration(positive=False)", "def valid_vm(func): \n return lambda * args, **kwargs: \\\n _check_vm(None,\n 'VM', func, *args, **kwargs)", "def _check_virtualbox():\n # Adapted from https://github.com/giampaolo/psutil/issues/132#issuecomment-44017679\n # to avoid race conditions\n for proc in psutil.process_iter():\n try:\n if proc.name() == 'VBoxHeadless':\n raise CommandError('S2E uses KVM to build images. VirtualBox '\n 'is currently running, which is not '\n 'compatible with KVM. Please close all '\n 'VirtualBox VMs and try again.')\n except NoSuchProcess:\n pass", "def test_set_project_default_virtualization_realm(self):\n pass", "def test_vm_migration(self):\n self.check_vm_host_after_migration(positive=True)", "def test_vm_migration(self):\n self.check_vm_host_after_migration(positive=True)", "def check(cls):\n vms = list(cls._vm_agents_for_host())\n\n large_overhead_vms = []\n swapping_vms = []\n total_guest_and_overhead = 0\n expected_guest_and_overhead = 0\n\n # individual VMs ok?\n for vm in vms:\n with vm:\n try:\n vm_mem = vm.qemu.proc().memory_full_info()\n except Exception:\n # It's likely that the process went away while we analyzed\n # it. Ignore.\n continue\n if vm_mem.swap > 1 * GiB:\n swapping_vms.append(vm)\n expected_size = (\n vm.cfg[\"memory\"] * MiB\n + 2 * vm.qemu.vm_expected_overhead * MiB\n )\n expected_guest_and_overhead += (\n vm.cfg[\"memory\"] * MiB + vm.qemu.vm_expected_overhead * MiB\n )\n total_guest_and_overhead += vm_mem.pss\n if vm_mem.pss > expected_size:\n large_overhead_vms.append(vm)\n\n output = []\n result = OK\n if large_overhead_vms:\n result = WARNING\n output.append(\n \"VMs with large overhead: \"\n + \",\".join(x.name for x in large_overhead_vms)\n )\n if swapping_vms:\n result = WARNING\n output.append(\n \"VMs swapping:\" + \",\".join(x.name for x in swapping_vms)\n )\n if total_guest_and_overhead > expected_guest_and_overhead:\n result = CRITICAL\n output.append(\"High total overhead\")\n\n if result is OK:\n output.insert(0, \"OK\")\n elif result is WARNING:\n output.insert(0, \"WARNING\")\n elif result is CRITICAL:\n output.insert(0, \"CRITICAL\")\n else:\n output.insert(0, \"UNKNOWN\")\n\n output.insert(1, \"{} VMs\".format(len(vms)))\n output.insert(\n 2, \"{:,.0f} MiB used\".format(total_guest_and_overhead / MiB)\n )\n output.insert(\n 3, \"{:,.0f} MiB expected\".format(expected_guest_and_overhead / MiB)\n )\n\n print(\" - \".join(output))\n\n return result", "def test_vsg_for_multiple_vcpes_in_vsg_vm_with_one_vcpe_going_down(self):", "def test_ipam_vrfs_list(self):\n pass", "def test_vms_host(self):\n testflow.step(\"Check if VM's started on different hosts\")\n assert (\n ll_vms.get_vm_host(vm_name=conf.VM_NAME[0]) !=\n ll_vms.get_vm_host(vm_name=conf.VM_NAME[1])\n )", "def test_vms_host(self):\n testflow.step(\"Check if VM's started on different hosts\")\n assert (\n ll_vms.get_vm_host(vm_name=conf.VM_NAME[0]) !=\n ll_vms.get_vm_host(vm_name=conf.VM_NAME[1])\n )" ]
[ "0.83034915", "0.7327131", "0.7214104", "0.7093031", "0.69469243", "0.69159096", "0.6753926", "0.67384803", "0.64560604", "0.6443416", "0.64283746", "0.6202861", "0.6066466", "0.60032904", "0.5895371", "0.58919233", "0.58813286", "0.5852768", "0.58234596", "0.58234596", "0.58123296", "0.57705516", "0.57684374", "0.5740042", "0.5740042", "0.56718284", "0.55811566", "0.54991645", "0.54860723", "0.54860723" ]
0.9549797
0
Test case for disable_virt_realm_remote_access
def test_disable_virt_realm_remote_access(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_enable_virt_realm_remote_access(self):\n pass", "def test_update_virt_realm_remote_access_config(self):\n pass", "def test_remove_virt_realm(self):\n pass", "def test_update_virt_realm(self):\n pass", "def test_deallocate_virt_realm(self):\n pass", "def test_set_virtualization_realm_active(self):\n pass", "def test_get_virtualization_realm(self):\n pass", "def testDenyAllowAccess(self):\n self.host.ContinueAuth()\n self.host.SignIn(self.account['username'], self.account['password'])\n self.host.DenyAccess()\n self.host.ContinueAuth()\n self.host.AllowAccess()", "def test_update_virtualization_realm(self):\n pass", "def test_get_virtualization_realm_resources(self):\n pass", "def test_auth_private_unowned(self):\n self.do_visible(False, 'pattieblack', False, tenant='froggy')", "def test_register_virtualization_realm(self):\n pass", "def test_you_must_be_realm_admin(self) -> None:\n user_profile = self.example_user(\"hamlet\")\n self.login_user(user_profile)\n\n other_realm = do_create_realm(string_id=\"other\", name=\"other\")\n stream = self.make_stream(\"other_realm_stream\", realm=other_realm)\n\n result = self.client_delete(\"/json/streams/\" + str(stream.id))\n self.assert_json_error(result, \"Invalid stream ID\")\n\n # Even becoming a realm admin doesn't help us for an out-of-realm\n # stream.\n do_change_user_role(user_profile, UserProfile.ROLE_REALM_ADMINISTRATOR, acting_user=None)\n result = self.client_delete(\"/json/streams/\" + str(stream.id))\n self.assert_json_error(result, \"Invalid stream ID\")", "def stop_remote_access_session(arn=None):\n pass", "def test_allocate_virtualization_realm(self):\n pass", "def test_disable_svn_access(self):\n org = 'o=%s' % (self.org_name)\n people = '%s=%s' % (self.container_attr, self.user_container)\n uid = '%s=%s' % (self.user_key, self.user_id)\n dn = '%s,%s,%s,%s' % (uid, people, org, self.base_dn)\n dn_info = {self.svn_enable_attr: ['FALSE']}\n expected_result = [(dn, dn_info)]\n svn = SpokeSVN(self.org_name, self.user_id)\n result = svn.modify(enable=False)['data']\n self.assertEqual(result, expected_result)", "def firewallOff():\n pass", "def test_dontStartPrivilegedService(self):\n ports = self._privilegedStartService(self.highPortNumber)\n self.assertEqual(ports, [])", "def allow_remote_invocation(func, method='auto'):\r\n setattr(func, 'allow_rmi', method)\r\n return func", "def test_001_unauthorized_access(self):\n false_token = \"12345\"\n self.info(\"Will use token %s\", false_token)\n client = ComputeClient(self.clients.compute_url, false_token)\n client.CONNECTION_RETRY_LIMIT = self.clients.retry\n\n with self.assertRaises(ClientError) as cl_error:\n client.list_servers()\n self.assertEqual(cl_error.exception.status, 401)", "def disable(ctx):\n config_db = ConfigDBConnector()\n config_db.connect()\n config_db.mod_entry(\"NAT_GLOBAL\", \"Values\", {\"admin_mode\": \"disabled\"})", "def test_auth_public_unowned(self):\n self.do_visible(True, 'pattieblack', True, tenant='froggy')", "def test_server_administrator():\n if is_server_administrator():\n return True\n raise False", "def test_get_deployment_runs_in_virtualization_realm(self):\n pass", "def syslog_remote_disable(handle, name):\n\n mo = handle.query_dn(\"sys/svc-ext/syslog/client-\" + name)\n if mo:\n mo.admin_state = \"disabled\"\n handle.add_mo(mo, modify_present=True)\n handle.commit()\n else:\n raise ValueError(\"Syslog Mo is not available.\")", "def test_disable_missing_svn_access(self):\n svn = SpokeSVN(self.org_name, self.user_id)\n svn.delete(self.svn_repo_name)\n self.assertRaises(error.NotFound, svn.modify, enable=False)", "def require_server_administrator():\n if not test_server_administrator():\n raise cherrypy.HTTPError(403)", "def test_live_migrate_server_fails_as_user(self):\n with self.assertRaises(Forbidden):\n self.servers_client.live_migrate_server(self.server.id)", "def disable_authentication():\n cherrypy.request.security = { \"user\" : \"\", \"name\" : \"\", \"roles\": [] }", "def remote(self):\n logging.info(__name__ + ' : Set control to remote & locked')\n self.set_remote_status(1)" ]
[ "0.8754143", "0.7818323", "0.7398297", "0.6757615", "0.667105", "0.6400662", "0.63291025", "0.62900096", "0.62387425", "0.60416585", "0.6013636", "0.600299", "0.5893716", "0.5850884", "0.57936156", "0.5786105", "0.57541054", "0.5742724", "0.57228196", "0.57190657", "0.5684966", "0.5675352", "0.5634905", "0.5580286", "0.5543333", "0.5523513", "0.5498401", "0.54955274", "0.5466199", "0.544886" ]
0.96557635
0
Test case for download
def test_download(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_download1(self):\n pass", "def test_download2(self):\n pass", "def test_download(self):\n\n # Test for all correct data\n self.assertEqual(\n download(TestSteelEye.url, self.xmlfilepath, \"sourcefile.xml\"),\n self.xmlfilepath + os.sep + \"sourcefile.xml\",\n )\n\n # Test for incorrect url\n self.assertEqual(\n download(\"http://example.com\", self.xmlfilepath, \"sourcefile.xml\"), \"\"\n )\n\n # Test for different download path\n self.assertEqual(\n download(\n TestSteelEye.url,\n os.path.join(os.getcwd(), \"anotherpath\"),\n \"sourcefile.xml\",\n ),\n os.path.join(os.getcwd(), \"anotherpath\") + os.sep + \"sourcefile.xml\",\n )\n\n # Test for incorrect download path\n self.assertEqual(download(TestSteelEye.url, \"E:\", \"sourcefile.xml\"), \"\")", "def download():\n raise NotImplementedError", "def test_download(self):\n test_file = os.path.join(self._system.get_temporary_path(), \"nusoft.test\")\n self._system.download(\"http://www.github.com\", name=test_file)\n self.assertTrue(os.path.exists(test_file))\n os.remove(test_file)", "def test_download_host(self):\n pass", "def download(self):\n pass", "def download(self):\n pass", "def test_download(self, file_list, _):\n url = reverse(\"report_download\")\n response = self.client.get(url)\n body = response.json()\n\n self.assertEqual(response.status_code, 200)\n self.assertIn(\"Download Request Task ID\", body)\n\n url_w_params = url + \"?provider_uuid=1&bill_date=2021-04-01\"\n response = self.client.get(url_w_params)\n body = response.json()\n\n self.assertEqual(response.status_code, 200)\n self.assertIn(\"Download Request Task ID\", body)", "def download_files(self):", "def test_downloading(self):\n month = '2013-01' # smallest of available datasets\n path = download_data(month)\n self.assertTrue(os.path.isfile(path), msg='File on returned location does not exist')\n os.remove(path)", "def test_download_and_unlink(self):\n scrape_category.get_simfile_from_ziv(self.simfile, self.link, self.dest)\n assert os.path.exists(os.path.join(self.dest, \"sim100.zip\"))\n\n scrape_category.unlink_zip(self.simfile, self.dest)\n assert not os.path.exists(os.path.join(self.dest, \"sim100.zip\"))", "def test_file_download_fail(self):\n with mock.patch(\"JumpScale.j\") as j_mock:\n from JumpScale import j\n import JumpScale.tools.cuisine.CuisineCore\n JumpScale.tools.cuisine.CuisineCore.j = j\n from JumpScale.tools.cuisine.CuisineCore import CuisineCore\n from JumpScale.core.errorhandling import JSExceptions\n executor_mock = mock.MagicMock()\n j.tools.executor.getLocal.return_value = executor_mock\n executor = j.tools.executor.getLocal()\n cuisine = j.tools.cuisine.local\n cuisine_core = CuisineCore(executor, cuisine)\n url = 'http://hallo.com/downloadme.txt'\n to = '/tmp/path'\n cuisine_core.file_exists = mock.MagicMock()\n cuisine_core.file_exists.return_value = False\n cuisine_core.createDir = mock.MagicMock()\n cuisine_core.file_unlink = mock.MagicMock()\n cuisine_core.run = mock.MagicMock()\n cuisine_core.run.side_effect = [(32, '', 'err'), (0, 'Ok', '')]\n cuisine_core.touch = mock.MagicMock()\n j.exceptions.RuntimeError = JSExceptions.RuntimeError\n self.assertRaises(JSExceptions.RuntimeError, cuisine_core.file_download, url, to)", "def test_download_to_file(req, tmpdir):\n req.get(ENTREZ_URL, text='This works.')\n outdir = tmpdir.mkdir('outdir')\n filename = outdir.join('foo')\n expected = outdir.join('foo.gbk')\n config = core.Config(molecule='nucleotide', verbose=False)\n\n core.download_to_file('FOO', config, filename=filename)\n\n assert expected.check()", "def test_download_file(self):\n expected_full_path = \"{}/{}/azure/{}/{}\".format(\n Config.TMP_DIR, self.customer_name.replace(\" \", \"_\"), self.mock_data.container, self.mock_data.export_file\n )\n full_file_path, etag, _, __ = self.downloader.download_file(self.mock_data.export_key)\n self.assertEqual(full_file_path, expected_full_path)\n self.assertEqual(etag, self.mock_data.export_etag)", "def test_file_download(self):\n\n # Downloading without auth = unauthorized error (401)\n with self.assertRaises(requests.exceptions.HTTPError):\n self.assertFalse(self.api.downloadFile('/media/part/files/1/test.pdf', 'test.pdf'))", "def test_download_deployment_run_test_report(self):\n pass", "def test_download_view_valid(self):\n link = DownloadLink(path='/home/blah/file')\n link.save()\n factory = RequestFactory()\n request = factory.get('/downloads');\n error = False\n response = views.download(request, link.id);\n self.assertEqual(response['X-SendFile'], link.path)", "def test_download_simfile(self):\n scrape_category.download_simfile(self.simfile, self.dest,\n tidy=False,\n use_logfile=True,\n extract=True,\n link=self.link)\n\n # There should now be three files - a download log, a zip, and\n # an unzipped simfile.\n self.check_saved_files(log=True, unzipped=True, zipped=True)\n\n records = {\"100\": self.simfile}\n updated_records = scrape_category.update_records_from_log(records, self.dest)\n assert len(updated_records) == 1\n assert \"100\" in updated_records\n # The records should be updated to reflect where the simfile\n # was actually saved\n assert updated_records[\"100\"].name == \"foo\"", "def download(self, url_match):\n pass", "def download(self,fn):\n\t\treturn False #TODO: implement meme download", "def test_download(client: FlaskClient):\n # Note: this wouldn't work for image files, which may be processed during upload\n file = get_example_file(ExampleFileType.Txt)\n response_upload = util.upload_file(client, DEFAULT_USER, file)\n response_download = util.download_file(\n client, DEFAULT_USER, response_upload.json[\"id\"]\n )\n assert response_download.status == \"200 OK\"\n assert response_download.data == file.contents", "def test_download_view_invalid_url(self):\n factory = RequestFactory()\n request = factory.get('/downloads');\n error = False\n try:\n response = views.download(request, 'fake_id');\n except: \n error = True\n\n self.assertTrue(error)", "def download(self, download_request):\n raise NotImplementedError", "def test_download(self):\n imgurl = \"{}spei03.nc\".format(self.processor.base_url)\n httpretty.register_uri(httpretty.GET, imgurl,\n body=get_mock_image())\n imgfile = self.processor.download(imgurl, 'spei03.tif')\n self.assertTrue(os.path.exists(os.path.join(\n self.processor.tmp_dir, imgfile)))", "def test_download_to_file_retry(req, tmpdir):\n req.get(ENTREZ_URL, response_list=[\n {\"text\": u'Whoa, slow down', \"status_code\": 429, \"headers\": {\"Retry-After\": \"0\"}},\n {\"text\": 'This works.'},\n ])\n outdir = tmpdir.mkdir('outdir')\n filename = outdir.join('foo')\n expected = outdir.join('foo.gbk')\n config = core.Config(molecule='nucleotide', verbose=False)\n\n core.download_to_file('FOO', config, filename=filename)\n\n assert expected.check()", "def test_download(api):\n # upload file prior to download\n # with pytest.raises(APIConnectionError):\n uploaded_file = api.upload(\n tag='test_upload',\n expiry='1w',\n path='tests/test_file.txt'\n )\n\n # check that instance of FileIO has these fields\n assert uploaded_file.link\n assert uploaded_file.key\n assert uploaded_file.tag\n assert uploaded_file.path\n\n # remove the uploaded file from the os\n remove('tests/test_file.txt')\n\n # download and save the file\n api.download(tag='test_upload')\n\n # check that file was saved in a filesystem\n assert path.isfile('tests/test_file.txt')", "def download(all):\n print(\"Downloading\")", "def run(self):\n download(self.attempt)", "def test_package_check_download(self, isfile):\n isfile.return_value = False\n r = self.client.get(self.check_download_url)\n eq_(r.status_code, 200)\n eq_(r.content, '{\"ready\": false}')\n isfile.return_value = True\n r = self.client.get(self.check_download_url)\n eq_(r.status_code, 200)\n eq_(r.content, '{\"ready\": true}')" ]
[ "0.8944001", "0.8849267", "0.80503774", "0.7840032", "0.77702606", "0.7720349", "0.75097364", "0.75097364", "0.7370474", "0.73632914", "0.73593014", "0.73494947", "0.7332144", "0.7329043", "0.7306071", "0.72306436", "0.7220985", "0.7213135", "0.71477216", "0.71468073", "0.7141396", "0.710677", "0.71034336", "0.70876807", "0.7056602", "0.705085", "0.70318115", "0.70266396", "0.7011951", "0.69952387" ]
0.91974604
0
Test case for download1
def test_download1(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_download2(self):\n pass", "def test_download(self):\n pass", "def test_download(self):\n\n # Test for all correct data\n self.assertEqual(\n download(TestSteelEye.url, self.xmlfilepath, \"sourcefile.xml\"),\n self.xmlfilepath + os.sep + \"sourcefile.xml\",\n )\n\n # Test for incorrect url\n self.assertEqual(\n download(\"http://example.com\", self.xmlfilepath, \"sourcefile.xml\"), \"\"\n )\n\n # Test for different download path\n self.assertEqual(\n download(\n TestSteelEye.url,\n os.path.join(os.getcwd(), \"anotherpath\"),\n \"sourcefile.xml\",\n ),\n os.path.join(os.getcwd(), \"anotherpath\") + os.sep + \"sourcefile.xml\",\n )\n\n # Test for incorrect download path\n self.assertEqual(download(TestSteelEye.url, \"E:\", \"sourcefile.xml\"), \"\")", "def download_files(self):", "def download():\n raise NotImplementedError", "def test_download_and_unlink(self):\n scrape_category.get_simfile_from_ziv(self.simfile, self.link, self.dest)\n assert os.path.exists(os.path.join(self.dest, \"sim100.zip\"))\n\n scrape_category.unlink_zip(self.simfile, self.dest)\n assert not os.path.exists(os.path.join(self.dest, \"sim100.zip\"))", "def test_download(self):\n test_file = os.path.join(self._system.get_temporary_path(), \"nusoft.test\")\n self._system.download(\"http://www.github.com\", name=test_file)\n self.assertTrue(os.path.exists(test_file))\n os.remove(test_file)", "def download(all):\n print(\"Downloading\")", "def test_download_host(self):\n pass", "def test_downloading(self):\n month = '2013-01' # smallest of available datasets\n path = download_data(month)\n self.assertTrue(os.path.isfile(path), msg='File on returned location does not exist')\n os.remove(path)", "def download1():\n #t=request.vars.arg(0)\n response.flash=request\n #print request.wsgi.environ['HTTP_REFERER']\n #print 'yghklo=',request.args[0]\n a=db(db.Project.Project_File==request.args[0]).select(db.Project.ALL)\n #a=db(db.Project.id==38).select(db.Project.ALL)\n #if a == None:\n#\t print 'silent'\n # print 'a= aabhas download',a[0].no_of_download, a[0].Project_File\n # if a[0].no_of_download==None:\n#\t a[0].no_download=0\n db(db.Project.Project_File==a[0].Project_File).update(no_of_download=(a[0].no_of_download or 0)+1)\n print 'a.id=',a[0].id\n # print len(a),'\\n'\n #print \"\\n\\n\\n\\n\"\n return response.download(request, db)", "def download(self,fn):\n\t\treturn False #TODO: implement meme download", "def test_file_download_fail(self):\n with mock.patch(\"JumpScale.j\") as j_mock:\n from JumpScale import j\n import JumpScale.tools.cuisine.CuisineCore\n JumpScale.tools.cuisine.CuisineCore.j = j\n from JumpScale.tools.cuisine.CuisineCore import CuisineCore\n from JumpScale.core.errorhandling import JSExceptions\n executor_mock = mock.MagicMock()\n j.tools.executor.getLocal.return_value = executor_mock\n executor = j.tools.executor.getLocal()\n cuisine = j.tools.cuisine.local\n cuisine_core = CuisineCore(executor, cuisine)\n url = 'http://hallo.com/downloadme.txt'\n to = '/tmp/path'\n cuisine_core.file_exists = mock.MagicMock()\n cuisine_core.file_exists.return_value = False\n cuisine_core.createDir = mock.MagicMock()\n cuisine_core.file_unlink = mock.MagicMock()\n cuisine_core.run = mock.MagicMock()\n cuisine_core.run.side_effect = [(32, '', 'err'), (0, 'Ok', '')]\n cuisine_core.touch = mock.MagicMock()\n j.exceptions.RuntimeError = JSExceptions.RuntimeError\n self.assertRaises(JSExceptions.RuntimeError, cuisine_core.file_download, url, to)", "def download(self):\n pass", "def download(self):\n pass", "def test_download_simfile(self):\n scrape_category.download_simfile(self.simfile, self.dest,\n tidy=False,\n use_logfile=True,\n extract=True,\n link=self.link)\n\n # There should now be three files - a download log, a zip, and\n # an unzipped simfile.\n self.check_saved_files(log=True, unzipped=True, zipped=True)\n\n records = {\"100\": self.simfile}\n updated_records = scrape_category.update_records_from_log(records, self.dest)\n assert len(updated_records) == 1\n assert \"100\" in updated_records\n # The records should be updated to reflect where the simfile\n # was actually saved\n assert updated_records[\"100\"].name == \"foo\"", "def test_download(self, file_list, _):\n url = reverse(\"report_download\")\n response = self.client.get(url)\n body = response.json()\n\n self.assertEqual(response.status_code, 200)\n self.assertIn(\"Download Request Task ID\", body)\n\n url_w_params = url + \"?provider_uuid=1&bill_date=2021-04-01\"\n response = self.client.get(url_w_params)\n body = response.json()\n\n self.assertEqual(response.status_code, 200)\n self.assertIn(\"Download Request Task ID\", body)", "def test_download_to_file(req, tmpdir):\n req.get(ENTREZ_URL, text='This works.')\n outdir = tmpdir.mkdir('outdir')\n filename = outdir.join('foo')\n expected = outdir.join('foo.gbk')\n config = core.Config(molecule='nucleotide', verbose=False)\n\n core.download_to_file('FOO', config, filename=filename)\n\n assert expected.check()", "def test_download_redirect(self):\n\n fetcher = Fetcher('/unused/root/dir')\n with self.setup_server() as base_url:\n self._URL = base_url\n self.assertFalse(self._URL2_ACCESSED)\n self.assertFalse(self._URL1_ACCESSED)\n\n path = fetcher.download(base_url + '/url2')\n self.assertTrue(self._URL2_ACCESSED)\n self.assertTrue(self._URL1_ACCESSED)\n\n with open(path) as fp:\n self.assertEqual('returned from redirect\\r\\n', fp.read())", "def test_package_check_download(self, isfile):\n isfile.return_value = False\n r = self.client.get(self.check_download_url)\n eq_(r.status_code, 200)\n eq_(r.content, '{\"ready\": false}')\n isfile.return_value = True\n r = self.client.get(self.check_download_url)\n eq_(r.status_code, 200)\n eq_(r.content, '{\"ready\": true}')", "def test_download_deployment_run_test_report(self):\n pass", "def download(self, url_match):\n pass", "def test_download_to_file_retry(req, tmpdir):\n req.get(ENTREZ_URL, response_list=[\n {\"text\": u'Whoa, slow down', \"status_code\": 429, \"headers\": {\"Retry-After\": \"0\"}},\n {\"text\": 'This works.'},\n ])\n outdir = tmpdir.mkdir('outdir')\n filename = outdir.join('foo')\n expected = outdir.join('foo.gbk')\n config = core.Config(molecule='nucleotide', verbose=False)\n\n core.download_to_file('FOO', config, filename=filename)\n\n assert expected.check()", "def test_download_file(self):\n expected_full_path = \"{}/{}/azure/{}/{}\".format(\n Config.TMP_DIR, self.customer_name.replace(\" \", \"_\"), self.mock_data.container, self.mock_data.export_file\n )\n full_file_path, etag, _, __ = self.downloader.download_file(self.mock_data.export_key)\n self.assertEqual(full_file_path, expected_full_path)\n self.assertEqual(etag, self.mock_data.export_etag)", "def run(self):\n download(self.attempt)", "def download_and_prepare(self):\n self._download_and_prepare()", "def test_download(client: FlaskClient):\n # Note: this wouldn't work for image files, which may be processed during upload\n file = get_example_file(ExampleFileType.Txt)\n response_upload = util.upload_file(client, DEFAULT_USER, file)\n response_download = util.download_file(\n client, DEFAULT_USER, response_upload.json[\"id\"]\n )\n assert response_download.status == \"200 OK\"\n assert response_download.data == file.contents", "def test_file_download(self):\n\n # Downloading without auth = unauthorized error (401)\n with self.assertRaises(requests.exceptions.HTTPError):\n self.assertFalse(self.api.downloadFile('/media/part/files/1/test.pdf', 'test.pdf'))", "def download_test_files(request):\n\n # Log the start of the function\n logger.info(\"=========== returns ms1 test files from code directory input/ms1\")\n\n # create an absolute path to the 'example_data_dir' containing the test data files, then create\n # absolute paths to each test data file. Note the test data files are located in this code base.\n example_data_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..','input/ms1')\n pos_input = os.path.join(example_data_dir, example_pos_filename)\n neg_input = os.path.join(example_data_dir, example_neg_filename)\n tracer_file = os.path.join(example_data_dir, example_tracer_filename)\n run_sequence_pos_file = os.path.join(example_data_dir, example_run_sequence_pos_filename)\n run_sequence_neg_file = os.path.join(example_data_dir, example_run_sequence_neg_filename)\n\n # create filenames\n filename1 = 'ms1_pos_input_test_data.csv'\n filename2 = 'ms1_neg_input_test_data.csv'\n filename3 = 'ms1_tracer_test_data.csv'\n filename4 = 'ms1_run_sequence_pos_test_data.csv'\n filename5 = 'ms1_run_sequence_neg_test_data.csv'\n\n # List of files to be zipped\n files_to_zip = {filename1: pos_input, filename2: neg_input, filename3: tracer_file, filename4: run_sequence_pos_file, filename5: run_sequence_neg_file}\n\n # Create an in-memory zip file\n in_memory_zip = BytesIO()\n with ZipFile(in_memory_zip, 'w', ZIP_DEFLATED) as zipf:\n # Add each file to the zipfile\n for filename in files_to_zip:\n logger.info('filename: {}'.format(filename))\n file_path = files_to_zip[filename]\n with open(file_path, 'rb') as file:\n file_content = file.read()\n zipf.writestr(filename, file_content)\n # The ZipFile object is automatically closed when exiting the 'with' block\n\n zip_filename = \"ms1_test_data_files.zip\"\n # Create an HTTP response with the zip file attached for download\n response = HttpResponse(in_memory_zip.getvalue(),content_type='application/zip')\n response['Content-Disposition'] = 'attachment; filename=' + zip_filename\n response['Content-length'] = in_memory_zip.tell()\n\n # Return the HTTP response\n return response", "def test_download_view_invalid_url(self):\n factory = RequestFactory()\n request = factory.get('/downloads');\n error = False\n try:\n response = views.download(request, 'fake_id');\n except: \n error = True\n\n self.assertTrue(error)" ]
[ "0.8843646", "0.8475365", "0.732788", "0.7254818", "0.7198353", "0.7128626", "0.7094492", "0.7063594", "0.703302", "0.7007184", "0.69974625", "0.6974339", "0.68953013", "0.6882323", "0.6882323", "0.68799114", "0.68451935", "0.6800624", "0.6794042", "0.6739918", "0.6677447", "0.66727114", "0.66109276", "0.656102", "0.6557939", "0.6531024", "0.6470864", "0.6442661", "0.6440364", "0.64362186" ]
0.9083264
0
Test case for download2
def test_download2(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_download1(self):\n pass", "def test_download(self):\n pass", "def test_download(self):\n\n # Test for all correct data\n self.assertEqual(\n download(TestSteelEye.url, self.xmlfilepath, \"sourcefile.xml\"),\n self.xmlfilepath + os.sep + \"sourcefile.xml\",\n )\n\n # Test for incorrect url\n self.assertEqual(\n download(\"http://example.com\", self.xmlfilepath, \"sourcefile.xml\"), \"\"\n )\n\n # Test for different download path\n self.assertEqual(\n download(\n TestSteelEye.url,\n os.path.join(os.getcwd(), \"anotherpath\"),\n \"sourcefile.xml\",\n ),\n os.path.join(os.getcwd(), \"anotherpath\") + os.sep + \"sourcefile.xml\",\n )\n\n # Test for incorrect download path\n self.assertEqual(download(TestSteelEye.url, \"E:\", \"sourcefile.xml\"), \"\")", "def download():\n raise NotImplementedError", "def download_files(self):", "def test_download_and_unlink(self):\n scrape_category.get_simfile_from_ziv(self.simfile, self.link, self.dest)\n assert os.path.exists(os.path.join(self.dest, \"sim100.zip\"))\n\n scrape_category.unlink_zip(self.simfile, self.dest)\n assert not os.path.exists(os.path.join(self.dest, \"sim100.zip\"))", "def test_download_to_file(req, tmpdir):\n req.get(ENTREZ_URL, text='This works.')\n outdir = tmpdir.mkdir('outdir')\n filename = outdir.join('foo')\n expected = outdir.join('foo.gbk')\n config = core.Config(molecule='nucleotide', verbose=False)\n\n core.download_to_file('FOO', config, filename=filename)\n\n assert expected.check()", "def test_file_download_fail(self):\n with mock.patch(\"JumpScale.j\") as j_mock:\n from JumpScale import j\n import JumpScale.tools.cuisine.CuisineCore\n JumpScale.tools.cuisine.CuisineCore.j = j\n from JumpScale.tools.cuisine.CuisineCore import CuisineCore\n from JumpScale.core.errorhandling import JSExceptions\n executor_mock = mock.MagicMock()\n j.tools.executor.getLocal.return_value = executor_mock\n executor = j.tools.executor.getLocal()\n cuisine = j.tools.cuisine.local\n cuisine_core = CuisineCore(executor, cuisine)\n url = 'http://hallo.com/downloadme.txt'\n to = '/tmp/path'\n cuisine_core.file_exists = mock.MagicMock()\n cuisine_core.file_exists.return_value = False\n cuisine_core.createDir = mock.MagicMock()\n cuisine_core.file_unlink = mock.MagicMock()\n cuisine_core.run = mock.MagicMock()\n cuisine_core.run.side_effect = [(32, '', 'err'), (0, 'Ok', '')]\n cuisine_core.touch = mock.MagicMock()\n j.exceptions.RuntimeError = JSExceptions.RuntimeError\n self.assertRaises(JSExceptions.RuntimeError, cuisine_core.file_download, url, to)", "def test_download(self):\n test_file = os.path.join(self._system.get_temporary_path(), \"nusoft.test\")\n self._system.download(\"http://www.github.com\", name=test_file)\n self.assertTrue(os.path.exists(test_file))\n os.remove(test_file)", "def test_download_redirect(self):\n\n fetcher = Fetcher('/unused/root/dir')\n with self.setup_server() as base_url:\n self._URL = base_url\n self.assertFalse(self._URL2_ACCESSED)\n self.assertFalse(self._URL1_ACCESSED)\n\n path = fetcher.download(base_url + '/url2')\n self.assertTrue(self._URL2_ACCESSED)\n self.assertTrue(self._URL1_ACCESSED)\n\n with open(path) as fp:\n self.assertEqual('returned from redirect\\r\\n', fp.read())", "def test_download_host(self):\n pass", "def download(self):\n pass", "def download(self):\n pass", "def test_download_simfile(self):\n scrape_category.download_simfile(self.simfile, self.dest,\n tidy=False,\n use_logfile=True,\n extract=True,\n link=self.link)\n\n # There should now be three files - a download log, a zip, and\n # an unzipped simfile.\n self.check_saved_files(log=True, unzipped=True, zipped=True)\n\n records = {\"100\": self.simfile}\n updated_records = scrape_category.update_records_from_log(records, self.dest)\n assert len(updated_records) == 1\n assert \"100\" in updated_records\n # The records should be updated to reflect where the simfile\n # was actually saved\n assert updated_records[\"100\"].name == \"foo\"", "def download(self,fn):\n\t\treturn False #TODO: implement meme download", "def test_download_to_file_retry(req, tmpdir):\n req.get(ENTREZ_URL, response_list=[\n {\"text\": u'Whoa, slow down', \"status_code\": 429, \"headers\": {\"Retry-After\": \"0\"}},\n {\"text\": 'This works.'},\n ])\n outdir = tmpdir.mkdir('outdir')\n filename = outdir.join('foo')\n expected = outdir.join('foo.gbk')\n config = core.Config(molecule='nucleotide', verbose=False)\n\n core.download_to_file('FOO', config, filename=filename)\n\n assert expected.check()", "def test_downloading(self):\n month = '2013-01' # smallest of available datasets\n path = download_data(month)\n self.assertTrue(os.path.isfile(path), msg='File on returned location does not exist')\n os.remove(path)", "def test_download_file(self):\n expected_full_path = \"{}/{}/azure/{}/{}\".format(\n Config.TMP_DIR, self.customer_name.replace(\" \", \"_\"), self.mock_data.container, self.mock_data.export_file\n )\n full_file_path, etag, _, __ = self.downloader.download_file(self.mock_data.export_key)\n self.assertEqual(full_file_path, expected_full_path)\n self.assertEqual(etag, self.mock_data.export_etag)", "def download(self, url_match):\n pass", "def download1():\n #t=request.vars.arg(0)\n response.flash=request\n #print request.wsgi.environ['HTTP_REFERER']\n #print 'yghklo=',request.args[0]\n a=db(db.Project.Project_File==request.args[0]).select(db.Project.ALL)\n #a=db(db.Project.id==38).select(db.Project.ALL)\n #if a == None:\n#\t print 'silent'\n # print 'a= aabhas download',a[0].no_of_download, a[0].Project_File\n # if a[0].no_of_download==None:\n#\t a[0].no_download=0\n db(db.Project.Project_File==a[0].Project_File).update(no_of_download=(a[0].no_of_download or 0)+1)\n print 'a.id=',a[0].id\n # print len(a),'\\n'\n #print \"\\n\\n\\n\\n\"\n return response.download(request, db)", "def test_package_check_download(self, isfile):\n isfile.return_value = False\n r = self.client.get(self.check_download_url)\n eq_(r.status_code, 200)\n eq_(r.content, '{\"ready\": false}')\n isfile.return_value = True\n r = self.client.get(self.check_download_url)\n eq_(r.status_code, 200)\n eq_(r.content, '{\"ready\": true}')", "def test_file_download(self):\n\n # Downloading without auth = unauthorized error (401)\n with self.assertRaises(requests.exceptions.HTTPError):\n self.assertFalse(self.api.downloadFile('/media/part/files/1/test.pdf', 'test.pdf'))", "def test_download(self, file_list, _):\n url = reverse(\"report_download\")\n response = self.client.get(url)\n body = response.json()\n\n self.assertEqual(response.status_code, 200)\n self.assertIn(\"Download Request Task ID\", body)\n\n url_w_params = url + \"?provider_uuid=1&bill_date=2021-04-01\"\n response = self.client.get(url_w_params)\n body = response.json()\n\n self.assertEqual(response.status_code, 200)\n self.assertIn(\"Download Request Task ID\", body)", "def download(all):\n print(\"Downloading\")", "def download_and_prepare(self):\n self._download_and_prepare()", "def test_download(self):\n imgurl = \"{}spei03.nc\".format(self.processor.base_url)\n httpretty.register_uri(httpretty.GET, imgurl,\n body=get_mock_image())\n imgfile = self.processor.download(imgurl, 'spei03.tif')\n self.assertTrue(os.path.exists(os.path.join(\n self.processor.tmp_dir, imgfile)))", "def test_download(api):\n # upload file prior to download\n # with pytest.raises(APIConnectionError):\n uploaded_file = api.upload(\n tag='test_upload',\n expiry='1w',\n path='tests/test_file.txt'\n )\n\n # check that instance of FileIO has these fields\n assert uploaded_file.link\n assert uploaded_file.key\n assert uploaded_file.tag\n assert uploaded_file.path\n\n # remove the uploaded file from the os\n remove('tests/test_file.txt')\n\n # download and save the file\n api.download(tag='test_upload')\n\n # check that file was saved in a filesystem\n assert path.isfile('tests/test_file.txt')", "def test_download(client: FlaskClient):\n # Note: this wouldn't work for image files, which may be processed during upload\n file = get_example_file(ExampleFileType.Txt)\n response_upload = util.upload_file(client, DEFAULT_USER, file)\n response_download = util.download_file(\n client, DEFAULT_USER, response_upload.json[\"id\"]\n )\n assert response_download.status == \"200 OK\"\n assert response_download.data == file.contents", "def _check_url_file (url, path_download, outfile) :\n if \"http://\" in url.lower () :\n dest = outfile if outfile != None else _get_file_url (url, path_download)\n down = False\n nyet = dest + \".notyet\"\n \n if os.path.exists (dest) and not os.path.exists (nyet) :\n try :\n fLOG(\"trying to connect\", url)\n f1 = urllib.urlopen (url)\n down = _first_more_recent (f1, dest)\n newdate = down\n f1.close ()\n except IOError :\n fLOG(\"unable to connect Internet, working offline for url\", url)\n down = False\n else : \n down = True\n newdate = False\n \n if down :\n if newdate : fLOG (\" downloading (updated) \", url)\n else : fLOG (\" downloading \", url)\n \n if len (url) > 4 and url [-4].lower () in [\".txt\", \".csv\", \".tsv\", \".log\"] :\n fLOG (\"creating text file \", dest)\n format = \"w\"\n else : \n fLOG (\"creating binary file \", dest)\n format = \"wb\"\n \n if os.path.exists (nyet) :\n size = os.stat (dest).st_size\n fLOG (\"resume downloading (stop at\", size, \") from \", url)\n request = urllib.request.Request(url) \n request.add_header(\"Range\", \"bytes=%d-\" % size)\n fu = urllib.request.urlopen (request) \n f = open (dest, format.replace (\"w\", \"a\"))\n else :\n fLOG (\"downloading \", url)\n request = urllib.request.Request(url) \n fu = urllib.request.urlopen (url)\n f = open (dest, format)\n \n open (nyet, \"w\").close ()\n c = fu.read (2**21)\n size = 0\n while len (c) > 0 :\n size += len (c)\n fLOG(\" size\", size)\n f.write (c)\n f.flush ()\n c = fu.read (2**21)\n fLOG (\"end downloading\")\n f.close ()\n fu.close ()\n os.remove (nyet)\n \n url = dest\n return url", "def download(self, download_path):\n return" ]
[ "0.88635963", "0.84648824", "0.7365474", "0.7287708", "0.7232981", "0.72066337", "0.70977277", "0.70610595", "0.70601726", "0.7044479", "0.7025788", "0.6959563", "0.6959563", "0.6951651", "0.694343", "0.68706083", "0.6795131", "0.6781452", "0.6766455", "0.67443883", "0.66672873", "0.66587025", "0.66511863", "0.66137457", "0.6550238", "0.6535698", "0.65027755", "0.6475476", "0.6473419", "0.64694256" ]
0.903136
0
Test case for download_deployment_run_test_report
def test_download_deployment_run_test_report(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_deployment_run_reports(self):\n pass", "def test_get_deployment_run(self):\n pass", "def test_get_deployment_runs(self):\n pass", "def test_get_deployment_runs1(self):\n pass", "def test_download(self):\n pass", "def test_execute_deployment(self):\n pass", "def test_download(self, file_list, _):\n url = reverse(\"report_download\")\n response = self.client.get(url)\n body = response.json()\n\n self.assertEqual(response.status_code, 200)\n self.assertIn(\"Download Request Task ID\", body)\n\n url_w_params = url + \"?provider_uuid=1&bill_date=2021-04-01\"\n response = self.client.get(url_w_params)\n body = response.json()\n\n self.assertEqual(response.status_code, 200)\n self.assertIn(\"Download Request Task ID\", body)", "def test_download1(self):\n pass", "def test_release_deployment_run(self):\n pass", "def test_retest_deployment_run(self):\n pass", "def test_publish_deployment_run(self):\n pass", "def test_archive_run(self):\n pass", "def test_download2(self):\n pass", "def test_get_deployment(self):\n pass", "def test_get_deployment(self):\n pass", "def test_launch_deployment(self):\n pass", "def _DownloadResultResources(self):\n\n target_zip = \"%s/layout-test-results-%s.zip\" % (self.output_dir,\n self.build)\n if self.zip_file:\n filename = self.zip_file\n self.delete_zip_file = False\n else:\n revision, build_name = self._GetRevisionAndBuildFromArchiveStep()\n zip_url = GetZipFileURL(revision, build_name)\n if self.verbose:\n print \"Downloading zip file from %s to %s\" % (zip_url, target_zip)\n filename = self._DownloadFile(zip_url, target_zip, \"b\")\n if not filename:\n if self.verbose:\n print \"Could not download zip file from %s. Does it exist?\" % zip_url\n return False\n\n if zipfile.is_zipfile(filename):\n zip = zipfile.ZipFile(filename)\n if self.verbose:\n print 'Extracting files...'\n directory = \"%s/layout-test-results-%s\" % (self.output_dir, self.build)\n CreateDirectory(directory)\n self._UnzipZipfile(zip, TEMP_ZIP_DIR)\n\n for failure in self.failures:\n failure.test_expectations_line = (\n self._GetTestExpectationsLine(failure.test_path))\n if self.exclude_wontfix and failure.IsWontFix():\n self.failures.remove(failure)\n continue\n if failure.text_diff_mismatch:\n self._PopulateTextFailure(failure, directory, zip)\n if failure.image_mismatch:\n self._PopulateImageFailure(failure, directory, zip)\n if not self.use_local_baselines:\n failure.test_age = self._GetFileAge(failure.GetTestHome())\n failure.flakiness = self._GetFlakiness(failure.test_path, self.platform)\n zip.close()\n if self.verbose:\n print \"Files extracted.\"\n if self.delete_zip_file:\n if self.verbose:\n print \"Cleaning up zip file...\"\n path_utils.RemoveDirectory(TEMP_ZIP_DIR)\n os.remove(filename)\n return True\n else:\n if self.verbose:\n print \"Downloaded file '%s' doesn't look like a zip file.\" % filename\n return False", "def test_get_run(self):\n pass", "def test_get_deployment_resource_data(self):\n pass", "def test_get_deployment_resources(self):\n pass", "def test_download_host(self):\n pass", "def test_delete_deployment_run(self):\n pass", "def test_download_file(self):\n expected_full_path = \"{}/{}/azure/{}/{}\".format(\n Config.TMP_DIR, self.customer_name.replace(\" \", \"_\"), self.mock_data.container, self.mock_data.export_file\n )\n full_file_path, etag, _, __ = self.downloader.download_file(self.mock_data.export_key)\n self.assertEqual(full_file_path, expected_full_path)\n self.assertEqual(etag, self.mock_data.export_etag)", "def process(self):\n super(Test200SmartSanityDownload004, self).process()\n\n self.logger.info('Step actions:')\n self.logger.info('1. Download All to PCL;')\n self.PROJECT.project_download()\n result = self.MicroWIN.compare_with_plc()\n\n self.logger.info('Expected results:')\n self.logger.info('4. Download All successful;')\n if not (result['sdb'] and result['ob'] and result['db']):\n self.logger.info('Download result: %s' % result)\n raise CheckException('1. Download All failed;')", "def test_get_deployment_resource(self):\n pass", "def test_get_deployments(self):\n pass", "def test_get_deployments(self):\n pass", "def test_download_artifact(self, clean_mongo, test_case):\n self.logger.info(\"RUN: %s\", test_case[\"name\"])\n\n uuidv4 = str(uuid.uuid4())\n tenant, username, password = (\n \"test.mender.io-\" + uuidv4,\n \"some.user+\" + uuidv4 + \"@example.com\",\n \"secretsecret\",\n )\n tenant = create_org(tenant, username, password, \"enterprise\")\n create_roles(tenant.users[0].token, test_case[\"roles\"])\n test_case[\"user\"][\"name\"] = test_case[\"user\"][\"name\"].replace(\"UUID\", uuidv4)\n test_user = create_user(tid=tenant.id, **test_case[\"user\"])\n login(test_user, test_case[\"use_personal_access_token\"])\n\n # Create admin user so artifact can be uploaded\n admin_user_data = {\n \"name\": \"[email protected]\",\n \"pwd\": \"password\",\n \"roles\": [\"RBAC_ROLE_PERMIT_ALL\"],\n }\n admin_user = create_user(tid=tenant.id, **admin_user_data)\n login(admin_user, test_case[\"use_personal_access_token\"])\n\n # Upload a bogus artifact\n artifact = Artifact(\"tester\", [\"qemux86-64\"], payload=\"bogus\")\n\n dplmnt_MGMT = ApiClient(deployments.URL_MGMT)\n rsp = dplmnt_MGMT.with_auth(admin_user.token).call(\n \"POST\",\n deployments.URL_DEPLOYMENTS_ARTIFACTS,\n files=(\n (\n \"artifact\",\n (\"artifact.mender\", artifact.make(), \"application/octet-stream\"),\n ),\n ),\n )\n assert rsp.status_code == 201, rsp.text\n\n # Attempt to download artifact with test user\n artifact_id = rsp.headers[\"Location\"].split(\"/\")[-1]\n rsp = dplmnt_MGMT.with_auth(test_user.token).call(\n \"GET\", deployments.URL_DEPLOYMENTS_ARTIFACTS_DOWNLOAD.format(id=artifact_id)\n )\n assert rsp.status_code == test_case[\"status_code\"], rsp.text\n self.logger.info(\"PASS: %s\" % test_case[\"name\"])", "def test_download_and_unlink(self):\n scrape_category.get_simfile_from_ziv(self.simfile, self.link, self.dest)\n assert os.path.exists(os.path.join(self.dest, \"sim100.zip\"))\n\n scrape_category.unlink_zip(self.simfile, self.dest)\n assert not os.path.exists(os.path.join(self.dest, \"sim100.zip\"))", "def pytest_runtest_logreport(report):\n\n report_test_status(logger, report)" ]
[ "0.8337569", "0.72825813", "0.7025892", "0.6914659", "0.6796901", "0.67696214", "0.67557836", "0.6701369", "0.66973853", "0.65379226", "0.64914423", "0.6486823", "0.6453156", "0.6282467", "0.6282467", "0.6264636", "0.6146567", "0.6134244", "0.61095744", "0.6097391", "0.60544324", "0.6037893", "0.5959051", "0.5958937", "0.5923086", "0.5907969", "0.5907969", "0.5890579", "0.5885543", "0.5862182" ]
0.9353624
0
Test case for download_host
def test_download_host(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_download(self):\n pass", "def test_download1(self):\n pass", "def test_download2(self):\n pass", "def test_download(self):\n\n # Test for all correct data\n self.assertEqual(\n download(TestSteelEye.url, self.xmlfilepath, \"sourcefile.xml\"),\n self.xmlfilepath + os.sep + \"sourcefile.xml\",\n )\n\n # Test for incorrect url\n self.assertEqual(\n download(\"http://example.com\", self.xmlfilepath, \"sourcefile.xml\"), \"\"\n )\n\n # Test for different download path\n self.assertEqual(\n download(\n TestSteelEye.url,\n os.path.join(os.getcwd(), \"anotherpath\"),\n \"sourcefile.xml\",\n ),\n os.path.join(os.getcwd(), \"anotherpath\") + os.sep + \"sourcefile.xml\",\n )\n\n # Test for incorrect download path\n self.assertEqual(download(TestSteelEye.url, \"E:\", \"sourcefile.xml\"), \"\")", "def download():\n raise NotImplementedError", "def test_download(self):\n test_file = os.path.join(self._system.get_temporary_path(), \"nusoft.test\")\n self._system.download(\"http://www.github.com\", name=test_file)\n self.assertTrue(os.path.exists(test_file))\n os.remove(test_file)", "def test_download_file(self):\n expected_full_path = \"{}/{}/azure/{}/{}\".format(\n Config.TMP_DIR, self.customer_name.replace(\" \", \"_\"), self.mock_data.container, self.mock_data.export_file\n )\n full_file_path, etag, _, __ = self.downloader.download_file(self.mock_data.export_key)\n self.assertEqual(full_file_path, expected_full_path)\n self.assertEqual(etag, self.mock_data.export_etag)", "def test_get_host(self):\n pass", "def test_get_url(self):\n package = make_package(version=\"1.1+g12345\")\n response = self.storage.download_response(package)\n\n parts = urlparse(response.location)\n self.assertEqual(parts.scheme, 'https')\n self.assertEqual(parts.netloc, 'abcdef.cloudfront.net')\n self.assertEqual(parts.path, '/bcc4/mypkg/mypkg-1.1%2Bg12345.tar.gz')\n query = parse_qs(parts.query)\n self.assertItemsEqual(query.keys(), ['Key-Pair-Id', 'Expires',\n 'Signature'])\n self.assertTrue(int(query['Expires'][0]) > time.time())\n self.assertEqual(query['Key-Pair-Id'][0],\n self.settings['storage.cloud_front_key_id'])", "def test_download(self):\n imgurl = \"{}spei03.nc\".format(self.processor.base_url)\n httpretty.register_uri(httpretty.GET, imgurl,\n body=get_mock_image())\n imgfile = self.processor.download(imgurl, 'spei03.tif')\n self.assertTrue(os.path.exists(os.path.join(\n self.processor.tmp_dir, imgfile)))", "def test_download_and_unlink(self):\n scrape_category.get_simfile_from_ziv(self.simfile, self.link, self.dest)\n assert os.path.exists(os.path.join(self.dest, \"sim100.zip\"))\n\n scrape_category.unlink_zip(self.simfile, self.dest)\n assert not os.path.exists(os.path.join(self.dest, \"sim100.zip\"))", "def testRemote(self):\n try:\n remoteLocator = self.__httpsFileUrl\n ok = self.__fileU.isLocal(remoteLocator)\n self.assertFalse(ok)\n #\n ok = self.__fileU.exists(remoteLocator)\n self.assertTrue(ok)\n size = self.__fileU.size(remoteLocator)\n self.assertGreaterEqual(size, 1000)\n\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()", "def test_download_redirect(self):\n\n fetcher = Fetcher('/unused/root/dir')\n with self.setup_server() as base_url:\n self._URL = base_url\n self.assertFalse(self._URL2_ACCESSED)\n self.assertFalse(self._URL1_ACCESSED)\n\n path = fetcher.download(base_url + '/url2')\n self.assertTrue(self._URL2_ACCESSED)\n self.assertTrue(self._URL1_ACCESSED)\n\n with open(path) as fp:\n self.assertEqual('returned from redirect\\r\\n', fp.read())", "def test_download_view_invalid_url(self):\n factory = RequestFactory()\n request = factory.get('/downloads');\n error = False\n try:\n response = views.download(request, 'fake_id');\n except: \n error = True\n\n self.assertTrue(error)", "def test_download_deployment_run_test_report(self):\n pass", "def test_file_download(self):\n\n # Downloading without auth = unauthorized error (401)\n with self.assertRaises(requests.exceptions.HTTPError):\n self.assertFalse(self.api.downloadFile('/media/part/files/1/test.pdf', 'test.pdf'))", "def test_download_nonexistent(client: FlaskClient):\n response = util.download_file(client, DEFAULT_USER, \"test-nonexistent\")\n assert response.status == \"404 NOT FOUND\"", "def test_get_host_access(self):\n pass", "def test_package_check_download(self, isfile):\n isfile.return_value = False\n r = self.client.get(self.check_download_url)\n eq_(r.status_code, 200)\n eq_(r.content, '{\"ready\": false}')\n isfile.return_value = True\n r = self.client.get(self.check_download_url)\n eq_(r.status_code, 200)\n eq_(r.content, '{\"ready\": true}')", "def test_file_download_fail(self):\n with mock.patch(\"JumpScale.j\") as j_mock:\n from JumpScale import j\n import JumpScale.tools.cuisine.CuisineCore\n JumpScale.tools.cuisine.CuisineCore.j = j\n from JumpScale.tools.cuisine.CuisineCore import CuisineCore\n from JumpScale.core.errorhandling import JSExceptions\n executor_mock = mock.MagicMock()\n j.tools.executor.getLocal.return_value = executor_mock\n executor = j.tools.executor.getLocal()\n cuisine = j.tools.cuisine.local\n cuisine_core = CuisineCore(executor, cuisine)\n url = 'http://hallo.com/downloadme.txt'\n to = '/tmp/path'\n cuisine_core.file_exists = mock.MagicMock()\n cuisine_core.file_exists.return_value = False\n cuisine_core.createDir = mock.MagicMock()\n cuisine_core.file_unlink = mock.MagicMock()\n cuisine_core.run = mock.MagicMock()\n cuisine_core.run.side_effect = [(32, '', 'err'), (0, 'Ok', '')]\n cuisine_core.touch = mock.MagicMock()\n j.exceptions.RuntimeError = JSExceptions.RuntimeError\n self.assertRaises(JSExceptions.RuntimeError, cuisine_core.file_download, url, to)", "def get_download_url():\n return _DOWNLOAD", "def test_downloading(self):\n month = '2013-01' # smallest of available datasets\n path = download_data(month)\n self.assertTrue(os.path.isfile(path), msg='File on returned location does not exist')\n os.remove(path)", "def test_get_url(self):\n package = make_package()\n response = self.storage.download_response(package)\n\n parts = urlparse(response.location)\n self.assertEqual(parts.scheme, 'https')\n self.assertEqual(parts.netloc, 'mybucket.s3.amazonaws.com')\n self.assertEqual(parts.path, '/' + self.storage.get_path(package))\n query = parse_qs(parts.query)\n self.assertItemsEqual(query.keys(), ['Expires', 'Signature',\n 'AWSAccessKeyId'])\n self.assertTrue(int(query['Expires'][0]) > time.time())\n self.assertEqual(query['AWSAccessKeyId'][0],\n self.settings['storage.access_key'])", "def download(self, url_match):\n pass", "def test_url():\r\n global provided_url\r\n global verbose_flag\r\n # extracting url\r\n provided_url = urlparse(provided_url).scheme+\"://\"+urlparse(provided_url).netloc\r\n print provided_url \r\n if verbose_flag: print \"\\t[.] Checking if connection can be established...\",# + provided_url\r\n try:\r\n response = urllib2.urlopen(provided_url)\r\n \r\n except HTTPError, e:\r\n if verbose_flag: print \"[!] Failed\"\r\n return 0\r\n except URLError, e:\r\n if verbose_flag: print \"[!] Failed\"\r\n return 0\r\n else:\r\n valid_target = 1\r\n if verbose_flag: print \"Success\"\r\n return 1", "def test_download(self, file_list, _):\n url = reverse(\"report_download\")\n response = self.client.get(url)\n body = response.json()\n\n self.assertEqual(response.status_code, 200)\n self.assertIn(\"Download Request Task ID\", body)\n\n url_w_params = url + \"?provider_uuid=1&bill_date=2021-04-01\"\n response = self.client.get(url_w_params)\n body = response.json()\n\n self.assertEqual(response.status_code, 200)\n self.assertIn(\"Download Request Task ID\", body)", "def download(self, download_path):\n return", "def download(self, download_request):\n raise NotImplementedError", "def test_download_to_file(req, tmpdir):\n req.get(ENTREZ_URL, text='This works.')\n outdir = tmpdir.mkdir('outdir')\n filename = outdir.join('foo')\n expected = outdir.join('foo.gbk')\n config = core.Config(molecule='nucleotide', verbose=False)\n\n core.download_to_file('FOO', config, filename=filename)\n\n assert expected.check()", "def test_download_links():\n\n # dir to download data to\n out_dir = 'test/download_data'\n\n # remove out_dir if it already exists and make a new one\n if os.path.exists(out_dir):\n shutil.rmtree(out_dir)\n os.system('mkdir -p %s'%out_dir)\n\n # List of all available fits\n fit_names = surfinBH.fits_collection.keys()\n for name in fit_names:\n surfinBH.DownloadData(name=name, data_dir=out_dir)\n\n # allow for both naming formats surfinBH7dq2 and NRSur7dq4Remnant\n if 'surfinBH' in name:\n name_tag = name.split('surfinBH')[-1]\n else:\n name_tag = name.split('NRSur')[-1].split('Remnant')[0]\n\n # check that it has the right name\n assert(os.path.isfile('%s/fit_%s.h5'%(out_dir, name_tag)))\n # check that the fit_name matches with the name in the attributes\n # of h5 file.\n h5file = h5py.File('%s/fit_%s.h5'%(out_dir, name_tag), 'r')\n assert(name_tag == h5file.attrs['name'].decode('utf-8'))\n h5file.close()" ]
[ "0.7872551", "0.7484949", "0.7398708", "0.70807505", "0.67864126", "0.6761874", "0.6674917", "0.6636758", "0.66050196", "0.6560064", "0.65451294", "0.65326107", "0.6492889", "0.6481335", "0.6405368", "0.6405045", "0.63973653", "0.63928217", "0.63637215", "0.63626426", "0.6299517", "0.626801", "0.6257777", "0.62515116", "0.6251495", "0.6219792", "0.62184745", "0.6216579", "0.618298", "0.6181034" ]
0.9202
0
Test case for enable_maintence_mode
def test_enable_maintence_mode(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_enable_maintence_mode1(self):\n pass", "def maintenance_mode():\n pass", "def check_enable_mode(self, *args, **kwargs):\n pass", "def host_maintenance_mode(self, host, mode):\n if not mode:\n return 'off_maintenance'\n return 'on_maintenance'", "def host_maintenance_mode(self, host, mode):\n if not mode:\n return 'off_maintenance'\n return 'on_maintenance'", "def maintainance(self, on_off, instance_type):\n print((\"enabling\" if on_off else \"disabling\") + \" Maintainer mode\")\n tries = 60\n while True:\n reply = self.send_request(\n instance_type,\n requests.put,\n \"/_admin/cluster/maintenance\",\n '\"on\"' if on_off else '\"off\"',\n )\n if len(reply) > 0:\n print(\"Reply: \" + str(reply[0].text))\n if reply[0].status_code == 200:\n return\n print(f\"Reply status code is {reply[0].status_code}. Sleeping for 3 s.\")\n time.sleep(3)\n tries -= 1\n else:\n print(\"Reply is empty. Sleeping for 3 s.\")\n time.sleep(3)\n tries -= 1\n if tries <= 0:\n action = \"enable\" if on_off else \"disable\"\n raise Exception(f\"Couldn't {action} maintainance mode!\")", "def is_maintenance_active(self):\n pass", "def test_update_hyperflex_auto_support_policy(self):\n pass", "def check_manual_mode_change(self, event):\n if self.vehicle.get_manual_mode_change(reset=True):\n data = lambda: None\n data.mode_to_set = \"Inactive\"\n self.set_companion_mode(data)", "def test_enabled(self):\n # OSA script should have been installed in setUp function, which sets\n # enabled to True by default.\n self.assertTrue(self.run_function(\"assistive.enabled\", [OSA_SCRIPT]))\n # Disable OSA Script\n self.run_function(\"assistive.enable\", [OSA_SCRIPT, False])\n # Assert against new disabled status\n self.assertFalse(self.run_function(\"assistive.enabled\", [OSA_SCRIPT]))", "def activate(wait, timeout, logger, client):\n\n if timeout and not wait:\n msg = \"'--timeout' was used without '--wait'.\"\n error = exceptions.CloudifyCliError(msg)\n error.possible_solutions = [\n \"Add the '--wait' flag to the command in order to wait.\"\n ]\n raise error\n\n logger.info('Entering maintenance mode...')\n client.maintenance_mode.activate()\n\n if wait:\n logger.info(\"Cloudify manager will enter Maintenance mode once \"\n \"there are no running or pending executions...\\n\")\n deadline = time.time() + timeout\n\n while True:\n if _is_timeout(timeout, deadline):\n raise exceptions.CloudifyCliError(\n \"Timed out while entering maintenance mode. \"\n \"Note that the manager is still entering maintenance mode\"\n \" in the background. You can run \"\n \"'cfy maintenance-mode status' to check the status.\")\n\n status_response = client.maintenance_mode.status()\n if status_response.status == MAINTENANCE_MODE_ACTIVE:\n logger.info('Manager is in maintenance mode.')\n logger.info('While in maintenance mode most requests will '\n 'be blocked.')\n return\n time.sleep(DEFAULT_TIMEOUT_INTERVAL)\n logger.info(\"Run 'cfy maintenance-mode status' to check the \"\n \"maintenance mode's status.\\n\")", "def in_maintenance_mode():\n return os.path.exists(\"maintenance.txt\")", "def check_enable_mode(self, check_string='#'):\n return True", "def __check_mode(self):\n self.mode[\"auto_mode\"] = self.communications.get_mode()", "def test_change_provisioned_throughput_usual_case():", "def test_enable(self):\n # OSA script should have been installed and enabled in setUp function\n # Now let's disable it, which should return True.\n self.assertTrue(self.run_function(\"assistive.enable\", [OSA_SCRIPT, False]))\n # Double check the script was disabled, as intended.\n self.assertFalse(self.run_function(\"assistive.enabled\", [OSA_SCRIPT]))\n # Now re-enable\n self.assertTrue(self.run_function(\"assistive.enable\", [OSA_SCRIPT]))\n # Double check the script was enabled, as intended.\n self.assertTrue(self.run_function(\"assistive.enabled\", [OSA_SCRIPT]))", "def enter_maintenance_mode(self):\n cmd = self._cmd('enterMaintenanceMode')\n if cmd.success:\n self._update(_get_role(self._get_resource_root(), self._path()))\n return cmd", "def test_operate_cyclic_storage(self, on):\n if on is True:\n override = {} # cyclic storage is True by default\n m = build_model(\n override, \"simple_supply_and_supply_plus,operate,investment_costs\"\n )\n assert m.run_config[\"cyclic_storage\"] is True\n elif on is False:\n override = {\"run.cyclic_storage\": False}\n m = build_model(\n override, \"simple_supply_and_supply_plus,operate,investment_costs\"\n )\n assert m.run_config[\"cyclic_storage\"] is False\n with pytest.warns(exceptions.ModelWarning) as warning:\n m.run(build_only=True)\n check_warn = check_error_or_warning(\n warning, \"Storage cannot be cyclic in operate run mode\"\n )\n if on is True:\n assert check_warn\n elif on is True:\n assert not check_warn\n assert (\n AttrDict.from_yaml_string(m._model_data.attrs[\"run_config\"]).cyclic_storage\n is False\n )", "def test_patch_hyperflex_auto_support_policy(self):\n pass", "def ceph_enabled(self):", "def check_config_mode(self):\n return False", "def test_create_hyperflex_auto_support_policy(self):\n pass", "def in_test_mode(mode: str) -> bool:\n return mode == TEST", "def affection_status_switch_on(self):\n self._affection_status_switch = False", "def affection_status_switch_on(self):\n self._affection_status_switch = True", "def test_mixedModes(self):\n self._sendModeChange(\"+osv\", \"a_user another_user\")\n self._checkModeChange([(True, \"osv\", (\"a_user\", None, \"another_user\"))])\n self._sendModeChange(\"+v-os\", \"a_user another_user\")\n self._checkModeChange(\n [(True, \"v\", (\"a_user\",)), (False, \"os\", (\"another_user\", None))]\n )", "def DualMode(self) -> bool:", "def test_support_MODES(self):\n self._testIntOrDefaultFeature(\"MODES\")", "def test_update_software_asset_impact_level(self):\n pass", "def test_update_enforcement_mode_command_success(\n mock_client, enforcement_mode_success, monkeypatch\n):\n monkeypatch.setattr(\n illumio.pce.PolicyComputeEngine._PCEObjectAPI,\n \"bulk_update\",\n lambda *a: [\n Workload.from_json(workload) for workload in enforcement_mode_success\n ],\n )\n args = {\n \"enforcement_mode\": \"idle\",\n \"workloads\": [\"/orgs/1/workloads/dummy\", \"/orgs/1/workloads/dummy1\"],\n }\n resp = update_enforcement_mode_command(mock_client, args)\n\n assert resp.raw_response == enforcement_mode_success" ]
[ "0.92848986", "0.7033659", "0.64146847", "0.62658453", "0.62658453", "0.6238736", "0.61857754", "0.606983", "0.6026264", "0.6022495", "0.59624964", "0.585408", "0.58480006", "0.5841078", "0.5806692", "0.5770104", "0.576556", "0.5672393", "0.5662674", "0.56612885", "0.5647217", "0.555681", "0.55287904", "0.5511815", "0.5443744", "0.5442782", "0.5429483", "0.5427556", "0.542171", "0.54099196" ]
0.9325534
0
Test case for enable_maintence_mode1
def test_enable_maintence_mode1(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_enable_maintence_mode(self):\n pass", "def maintenance_mode():\n pass", "def check_enable_mode(self, *args, **kwargs):\n pass", "def test_update_hyperflex_auto_support_policy(self):\n pass", "def check_manual_mode_change(self, event):\n if self.vehicle.get_manual_mode_change(reset=True):\n data = lambda: None\n data.mode_to_set = \"Inactive\"\n self.set_companion_mode(data)", "def maintainance(self, on_off, instance_type):\n print((\"enabling\" if on_off else \"disabling\") + \" Maintainer mode\")\n tries = 60\n while True:\n reply = self.send_request(\n instance_type,\n requests.put,\n \"/_admin/cluster/maintenance\",\n '\"on\"' if on_off else '\"off\"',\n )\n if len(reply) > 0:\n print(\"Reply: \" + str(reply[0].text))\n if reply[0].status_code == 200:\n return\n print(f\"Reply status code is {reply[0].status_code}. Sleeping for 3 s.\")\n time.sleep(3)\n tries -= 1\n else:\n print(\"Reply is empty. Sleeping for 3 s.\")\n time.sleep(3)\n tries -= 1\n if tries <= 0:\n action = \"enable\" if on_off else \"disable\"\n raise Exception(f\"Couldn't {action} maintainance mode!\")", "def host_maintenance_mode(self, host, mode):\n if not mode:\n return 'off_maintenance'\n return 'on_maintenance'", "def host_maintenance_mode(self, host, mode):\n if not mode:\n return 'off_maintenance'\n return 'on_maintenance'", "def test_change_provisioned_throughput_usual_case():", "def test_enabled(self):\n # OSA script should have been installed in setUp function, which sets\n # enabled to True by default.\n self.assertTrue(self.run_function(\"assistive.enabled\", [OSA_SCRIPT]))\n # Disable OSA Script\n self.run_function(\"assistive.enable\", [OSA_SCRIPT, False])\n # Assert against new disabled status\n self.assertFalse(self.run_function(\"assistive.enabled\", [OSA_SCRIPT]))", "def __check_mode(self):\n self.mode[\"auto_mode\"] = self.communications.get_mode()", "def test_patch_hyperflex_auto_support_policy(self):\n pass", "def check_enable_mode(self, check_string='#'):\n return True", "def test_enable(self):\n # OSA script should have been installed and enabled in setUp function\n # Now let's disable it, which should return True.\n self.assertTrue(self.run_function(\"assistive.enable\", [OSA_SCRIPT, False]))\n # Double check the script was disabled, as intended.\n self.assertFalse(self.run_function(\"assistive.enabled\", [OSA_SCRIPT]))\n # Now re-enable\n self.assertTrue(self.run_function(\"assistive.enable\", [OSA_SCRIPT]))\n # Double check the script was enabled, as intended.\n self.assertTrue(self.run_function(\"assistive.enabled\", [OSA_SCRIPT]))", "def is_maintenance_active(self):\n pass", "def activate(wait, timeout, logger, client):\n\n if timeout and not wait:\n msg = \"'--timeout' was used without '--wait'.\"\n error = exceptions.CloudifyCliError(msg)\n error.possible_solutions = [\n \"Add the '--wait' flag to the command in order to wait.\"\n ]\n raise error\n\n logger.info('Entering maintenance mode...')\n client.maintenance_mode.activate()\n\n if wait:\n logger.info(\"Cloudify manager will enter Maintenance mode once \"\n \"there are no running or pending executions...\\n\")\n deadline = time.time() + timeout\n\n while True:\n if _is_timeout(timeout, deadline):\n raise exceptions.CloudifyCliError(\n \"Timed out while entering maintenance mode. \"\n \"Note that the manager is still entering maintenance mode\"\n \" in the background. You can run \"\n \"'cfy maintenance-mode status' to check the status.\")\n\n status_response = client.maintenance_mode.status()\n if status_response.status == MAINTENANCE_MODE_ACTIVE:\n logger.info('Manager is in maintenance mode.')\n logger.info('While in maintenance mode most requests will '\n 'be blocked.')\n return\n time.sleep(DEFAULT_TIMEOUT_INTERVAL)\n logger.info(\"Run 'cfy maintenance-mode status' to check the \"\n \"maintenance mode's status.\\n\")", "def test_create_hyperflex_auto_support_policy(self):\n pass", "def test_mixedModes(self):\n self._sendModeChange(\"+osv\", \"a_user another_user\")\n self._checkModeChange([(True, \"osv\", (\"a_user\", None, \"another_user\"))])\n self._sendModeChange(\"+v-os\", \"a_user another_user\")\n self._checkModeChange(\n [(True, \"v\", (\"a_user\",)), (False, \"os\", (\"another_user\", None))]\n )", "def DualMode(self) -> bool:", "def affection_status_switch_on(self):\n self._affection_status_switch = False", "def enter_maintenance_mode(self):\n cmd = self._cmd('enterMaintenanceMode')\n if cmd.success:\n self._update(_get_role(self._get_resource_root(), self._path()))\n return cmd", "def test_operate_cyclic_storage(self, on):\n if on is True:\n override = {} # cyclic storage is True by default\n m = build_model(\n override, \"simple_supply_and_supply_plus,operate,investment_costs\"\n )\n assert m.run_config[\"cyclic_storage\"] is True\n elif on is False:\n override = {\"run.cyclic_storage\": False}\n m = build_model(\n override, \"simple_supply_and_supply_plus,operate,investment_costs\"\n )\n assert m.run_config[\"cyclic_storage\"] is False\n with pytest.warns(exceptions.ModelWarning) as warning:\n m.run(build_only=True)\n check_warn = check_error_or_warning(\n warning, \"Storage cannot be cyclic in operate run mode\"\n )\n if on is True:\n assert check_warn\n elif on is True:\n assert not check_warn\n assert (\n AttrDict.from_yaml_string(m._model_data.attrs[\"run_config\"]).cyclic_storage\n is False\n )", "def affection_status_switch_on(self):\n self._affection_status_switch = True", "def testConsistency(self):", "def check_config_mode(self):\n return False", "def test_upgrade_with_auto_upgrade_latest_engine_enabled():", "def test_apply_device_rules(self):\n pass", "def ceph_enabled(self):", "def in_maintenance_mode():\n return os.path.exists(\"maintenance.txt\")", "def test_update_hyperflex_ucsm_config_policy(self):\n pass" ]
[ "0.9044327", "0.66818094", "0.62052804", "0.61182517", "0.6020975", "0.5991954", "0.5882213", "0.5882213", "0.5868845", "0.58044535", "0.57522273", "0.56983507", "0.5659279", "0.56252015", "0.5620639", "0.5617534", "0.5600708", "0.55745924", "0.5512063", "0.54568386", "0.54548454", "0.5444984", "0.5432596", "0.53748703", "0.5364596", "0.53565943", "0.5348493", "0.53383833", "0.53367954", "0.53342205" ]
0.94883007
0
Test case for enable_virt_realm_remote_access
def test_enable_virt_realm_remote_access(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_disable_virt_realm_remote_access(self):\n pass", "def test_update_virt_realm_remote_access_config(self):\n pass", "def test_update_virt_realm(self):\n pass", "def test_set_virtualization_realm_active(self):\n pass", "def test_register_virtualization_realm(self):\n pass", "def test_get_virtualization_realm(self):\n pass", "def test_update_virtualization_realm(self):\n pass", "def test_allocate_virtualization_realm(self):\n pass", "def test_get_virtualization_realm_resources(self):\n pass", "def test_remove_virt_realm(self):\n pass", "def test_get_deployment_runs_in_virtualization_realm(self):\n pass", "def test_list_virt_realms_in_cloud(self):\n pass", "def test_deallocate_virt_realm(self):\n pass", "def test_direct_access_telnet_mode_manual(self):\n self.assert_enter_command_mode()\n\n # go direct access\n cmd = AgentCommand(command=ResourceAgentEvent.GO_DIRECT_ACCESS,\n kwargs={'session_type': DirectAccessTypes.vsp,\n 'session_timeout':600,\n 'inactivity_timeout':600})\n retval = self.instrument_agent_client.execute_agent(cmd, timeout=600)\n log.warn(\"go_direct_access retval=\" + str(retval.result))\n\n state = self.instrument_agent_client.get_agent_state()\n self.assertEqual(state, ResourceAgentState.DIRECT_ACCESS)\n \n print(\"test_direct_access_telnet_mode: waiting 120 seconds for manual testing\")\n gevent.sleep(120)\n\n cmd = AgentCommand(command=ResourceAgentEvent.GO_COMMAND)\n retval = self.instrument_agent_client.execute_agent(cmd) \n\n state = self.instrument_agent_client.get_agent_state()\n self.assertEqual(state, ResourceAgentState.COMMAND)", "def test__enable_tunnel_request_valid(self):\n netapp_elem = zapi_fakes.FAKE_NA_ELEMENT\n server = zapi_fakes.FAKE_NA_SERVER_API_1_20\n mock_invoke = self.mock_object(netapp_elem, 'add_attr')\n expected_call_args = [mock.call('vfiler', 'filer'),\n mock.call('vfiler', 'server')]\n\n server._enable_tunnel_request(netapp_elem)\n\n self.assertEqual(expected_call_args, mock_invoke.call_args_list)", "def test_server_administrator():\n if is_server_administrator():\n return True\n raise False", "def test_you_must_be_realm_admin(self) -> None:\n user_profile = self.example_user(\"hamlet\")\n self.login_user(user_profile)\n\n other_realm = do_create_realm(string_id=\"other\", name=\"other\")\n stream = self.make_stream(\"other_realm_stream\", realm=other_realm)\n\n result = self.client_delete(\"/json/streams/\" + str(stream.id))\n self.assert_json_error(result, \"Invalid stream ID\")\n\n # Even becoming a realm admin doesn't help us for an out-of-realm\n # stream.\n do_change_user_role(user_profile, UserProfile.ROLE_REALM_ADMINISTRATOR, acting_user=None)\n result = self.client_delete(\"/json/streams/\" + str(stream.id))\n self.assert_json_error(result, \"Invalid stream ID\")", "def test_list_virtualization_realm_templates(self):\n pass", "def test_get_host_access(self):\n pass", "def allow_remote_invocation(func, method='auto'):\r\n setattr(func, 'allow_rmi', method)\r\n return func", "def test_get_valid_networks_for_virtualization_realm(self):\n pass", "def testGetManagementNodeForZoneAuth(self):\n management_node = self._saveManagementNode()\n response = self._get('inventory/zones/%d/management_nodes/%s/' % (\n management_node.zone.zone_id, management_node.system_ptr_id))\n self.assertEquals(response.status_code, 401)\n\n response = self._get('inventory/zones/%d/management_nodes/%s/' % (\n management_node.zone.zone_id, management_node.system_ptr_id),\n username=\"testuser\", password=\"password\")\n self.assertEquals(response.status_code, 200)", "def test_update_hyperflex_local_credential_policy(self):\n pass", "def test_set_project_default_virtualization_realm(self):\n pass", "def testDenyAllowAccess(self):\n self.host.ContinueAuth()\n self.host.SignIn(self.account['username'], self.account['password'])\n self.host.DenyAccess()\n self.host.ContinueAuth()\n self.host.AllowAccess()", "def test_create_hyperflex_local_credential_policy(self):\n pass", "def enableRemoteAccess():\n myClusterProps = redshift.describe_clusters(\n ClusterIdentifier=DWH_CLUSTER_IDENTIFIER\n )['Clusters'][0]\n\n # attempt to allow incoming TCP connections to the cluster\n print(\"Enabling incoming TCP connections...\")\n try:\n vpc = ec2.Vpc(id=myClusterProps[\"VpcId\"])\n for sg in vpc.security_groups.all():\n if sg.group_name == \"default\":\n defaultSg = sg\n break\n\n defaultSg.authorize_ingress(\n GroupName=defaultSg.group_name,\n CidrIp=CIDR_IP_ALLOWED,\n IpProtocol='TCP',\n FromPort=int(DWH_PORT),\n ToPort=int(DWH_PORT)\n )\n except Exception as e:\n print(e)\n return\n\n print(\"\\nCluster is ready for access!\")\n endpoint = myClusterProps[\"Endpoint\"][\"Address\"]\n print(f\"Endpoint: {endpoint}\")", "def test_unix_client_account_verification(core_session, agent_enrolled_unix_system_with_users, proxy_start_stop):\n\n \"\"\"\n Testrail Link:\n https://testrail.centrify.com/index.php?/cases/view/1293456\n https://testrail.centrify.com/index.php?/cases/view/1293457\n https://testrail.centrify.com/index.php?/cases/view/1293458\n \"\"\"\n\n # verfiy the test is run with single thread.\n assert 'PYTEST_XDIST_WORKER_COUNT' not in os.environ, \\\n f'This test cannot be run with multiple threads due to starting and stopping connectors'\n\n enrolledsystems = agent_enrolled_unix_system_with_users\n accounts = enrolledsystems[0][\"Accounts\"]\n proxyid = enrolledsystems[0][\"ProxyId\"]\n session = enrolledsystems[0][\"Session\"]\n proxycontrol = proxy_start_stop\n\n logger.info(\"stop the agent\")\n ssh_manager.ssh_stop_agent(session)\n logger.info(\"start the connector\")\n proxycontrol(proxyid, True)\n\n logger.info(\"Verifying accounts, Connector is available\")\n for i, val in enumerate(accounts):\n logger.info(str(i) + \", Name: \" + val[\"Name\"])\n verify_pass_result, verify_pass_success = ResourceManager.check_account_health(core_session, val[\"Id\"])\n assert verify_pass_result == 'OK', \"Verify Failed on Account: \" + val['Name'] + \", \" + verify_pass_result\n\n # stop Conector , Should fail\n logger.info(\"Stopping the connector\")\n proxycontrol(proxyid, False)\n logger.info(\"Verifying accounts, no agent or connector\")\n for i, val in enumerate(accounts):\n verify_pass_result, verify_pass_success = ResourceManager.check_account_health(core_session, val[\"Id\"])\n assert verify_pass_result != 'OK', \"Verify success on Account: \" + val['Name'] + \", \" + verify_pass_result\n\n # Start agent\n logger.info(\"Starting the agent\")\n ssh_manager.ssh_start_agent(session, True)\n\n logger.info(\"Verifying accounts, agent is available.\")\n for i, val in enumerate(accounts):\n verify_pass_result, verify_pass_success = ResourceManager.check_account_health(core_session, val[\"Id\"])\n assert verify_pass_result == 'OK', \"Verify failed on Account: \" + val['Name'] + \", \" + verify_pass_result\n\n # verify account again, both connector and agent are running\n proxycontrol(proxyid, True)\n logger.info(\"Verifying accounts, both agent and connector are available\")\n for i, val in enumerate(accounts):\n verify_pass_result, verify_pass_success = ResourceManager.check_account_health(core_session, val[\"Id\"])\n assert verify_pass_result == 'OK', \"Verify Failed on Account: \" + val['Name'] + \", \" + verify_pass_result", "def test_get_virtualization_realms(self):\n pass", "def test_auth_allow(self):\n # Setting authentication on volume for client1 using ip\n auth_dict = {'all': [self.mounts[0].client_system]}\n ret = set_auth_allow(self.volname, self.mnode, auth_dict)\n self.assertTrue(ret, \"Failed to set authentication\")\n g.log.info(\"Successfully set authentication on volume\")\n\n # Mounting volume on client1\n self.authenticated_mount(self.mounts[0])\n\n # Trying to mount volume on client2\n self.unauthenticated_mount(self.mounts[1])\n\n # Verify whether mount failure on client2 is due to auth error\n log_msg = self.is_auth_failure(self.mounts[1].client_system)\n prev_log_statement = log_msg\n\n g.log.info(\"Verification of auth.allow on volume using client IP is \"\n \"successful\")\n\n # Unmount volume from client1\n ret = self.mounts[0].unmount()\n self.assertTrue(ret, (\"Failed to unmount volume %s from client %s\"\n % (self.volname, self.mounts[0].client_system)))\n\n # Obtain hostname of client1\n ret, hostname_client1, _ = g.run(self.mounts[0].client_system,\n \"hostname\")\n self.assertEqual(ret, 0, (\"Failed to obtain hostname of client %s\"\n % self.mounts[0].client_system))\n g.log.info(\"Obtained hostname of client. IP- %s, hostname- %s\",\n self.mounts[0].client_system, hostname_client1.strip())\n\n # Setting authentication on volume for client1 using hostname\n auth_dict = {'all': [hostname_client1.strip()]}\n ret = set_auth_allow(self.volname, self.mnode, auth_dict)\n self.assertTrue(ret, \"Failed to set authentication\")\n g.log.info(\"Successfully set authentication on volume\")\n\n # Mounting volume on client1\n self.authenticated_mount(self.mounts[0])\n\n # Trying to mount volume on client2\n self.unauthenticated_mount(self.mounts[1])\n\n # Verify whether mount failure on client2 is due to auth error\n log_msg = self.is_auth_failure(self.mounts[1].client_system,\n prev_log_statement)\n prev_log_statement = log_msg\n\n g.log.info(\"Verification of auth.allow on volume using client \"\n \"hostname is successful\")\n\n # Unmount volume from client1\n ret = self.mounts[0].unmount()\n self.assertTrue(ret, (\"Failed to unmount volume %s from client %s\"\n % (self.volname, self.mounts[0].client_system)))" ]
[ "0.8507453", "0.8491414", "0.7384301", "0.71532255", "0.71479493", "0.7014119", "0.68402636", "0.67820174", "0.67787737", "0.66900384", "0.63854367", "0.60134757", "0.5917729", "0.5741763", "0.5698821", "0.56894636", "0.56874615", "0.568284", "0.5636769", "0.56319046", "0.5589011", "0.5568394", "0.5549797", "0.5527568", "0.55086625", "0.5474408", "0.5450845", "0.5449037", "0.5408343", "0.5396533" ]
0.9555191
0
Test case for execute_deployment
def test_execute_deployment(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_deployment_run(self):\n pass", "def test_publish_deployment_run(self):\n pass", "def test_release_deployment_run(self):\n pass", "def test_create_deployment(self):\n pass", "def test_launch_deployment(self):\n pass", "def test_get_deployment_runs1(self):\n pass", "def test_get_deployment(self):\n pass", "def test_get_deployment(self):\n pass", "def test_create_deployment_entire(self):\n pass", "def test_get_deployment_runs(self):\n pass", "def test_retest_deployment_run(self):\n pass", "def test_delete_deployment_run(self):\n pass", "def test_update_deployment(self):\n pass", "def test_delete_deployment(self):\n pass", "def test_relaunch_deployment_run(self):\n pass", "def test_get_deployments(self):\n pass", "def test_get_deployments(self):\n pass", "def test_download_deployment_run_test_report(self):\n pass", "def deploy(parameters):\n\n print(\"In deploy module\")", "def test_get_deployment_resource(self):\n pass", "def test_get_deployment_resources(self):\n pass", "def deploy():", "def test_deploy_exit_code(deploy_result: Result) -> None:\n assert deploy_result.exit_code == 0", "def test_get_deployment_runs_in_virtualization_realm(self):\n pass", "def test_get_deployment_run_reports(self):\n pass", "def test_deploy_exit_code(deploy_result: Result) -> None:\n assert deploy_result.exit_code != 0", "def test_clone_deployment(self):\n pass", "def test_deployment(self):\n config = {'example.com': {\n 'applications': {\n 'site-hybridcluster': {\n 'image': 'unknown',\n },\n 'mysql-hybridcluster': {\n 'image': 'unknown',\n }\n },\n 'version': 1\n }}\n expected = Deployment(nodes=frozenset([\n Node(hostname='example.com', applications=frozenset([\n Application(\n name='mysql-hybridcluster',\n image=DockerImage.from_string('unknown'),\n ports=frozenset(),\n ),\n Application(\n name='site-hybridcluster',\n image=DockerImage.from_string('unknown'),\n )]))]))\n self.assertEqual(expected,\n current_from_configuration(config))", "def test_update_deployment_state(self):\n pass", "def test_redeploy(self):\n pass" ]
[ "0.83965224", "0.83112377", "0.79395133", "0.7937938", "0.79108244", "0.7898238", "0.7697238", "0.7697238", "0.7658062", "0.76351124", "0.7622511", "0.7610689", "0.75644535", "0.71708757", "0.7055158", "0.69198626", "0.69198626", "0.6904399", "0.68929946", "0.6832125", "0.68229073", "0.67971206", "0.67919576", "0.6786531", "0.6781579", "0.67052615", "0.6691742", "0.6661516", "0.6658427", "0.6560355" ]
0.94170886
0
Test case for get_activity
def test_get_activity(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_activities(self):\n pass", "def get_activities():\n pass", "def test_get_activity(self):\n activity = self.client.get_activity(96089609)\n self.assertEquals('El Dorado County, CA, USA', activity.location_city)\n\n self.assertIsInstance(activity.start_latlng, attributes.LatLon)\n self.assertAlmostEquals(-120.4357631, activity.start_latlng.lon, places=2)\n self.assertAlmostEquals(38.74263759999999, activity.start_latlng.lat, places=2)\n\n self.assertIsInstance(activity.map, model.Map)\n\n self.assertIsInstance(activity.athlete, model.Athlete)\n self.assertEquals(1513, activity.athlete.id)\n\n #self.assertAlmostEqual(first, second, places, msg, delta)\n # Ensure that iw as read in with correct units\n self.assertEquals(22.5308, float(uh.kilometers(activity.distance)))", "def test_get_activity_occurrence_details(self):\n pass", "def test_get_detailed_activity(self, Activity1, StravaTokens1):\n self.mock_get.return_value = Mock(ok=True)\n self.mock_get.return_value.json.return_value = Activity1\n strava_tokens = StravaTokens1\n response = get_detailed_activity(12345678987654321, strava_tokens)\n assert response.ok is True\n assert response.json() == Activity1", "def test_create_activity(self):\n pass", "def test_get_detailed_activity():\n tokens = get_tokens()\n activity = get_detailed_activity(4563031911, tokens)\n # this activity does not have a description\n assert activity.status_code == 200\n activity = activity.json()\n assert type(activity[\"id\"]) == int\n assert type(activity[\"distance\"]) == float\n assert type(activity[\"moving_time\"]) == int\n assert type(activity[\"elapsed_time\"]) == int\n assert type(activity[\"total_elevation_gain\"]) == float\n assert type(activity[\"elev_high\"]) == float\n assert type(activity[\"elev_low\"]) == float\n assert type(activity[\"type\"]) == str\n assert type(activity[\"start_date\"]) == str\n assert type(activity[\"average_speed\"]) == float\n assert type(activity[\"gear_id\"]) == str\n assert type(activity[\"description\"]) is type(None)\n activity = get_detailed_activity(4576599261, tokens)\n assert activity.status_code == 200\n activity = activity.json()\n # this activity has a description but I added it manually so there's no elev high or low\n assert type(activity[\"description\"]) == str\n\n assert type(activity[\"id\"]) == int\n assert type(activity[\"distance\"]) == float\n assert type(activity[\"moving_time\"]) == int\n assert type(activity[\"elapsed_time\"]) == int\n assert type(activity[\"total_elevation_gain\"]) == float\n assert type(activity[\"type\"]) == str\n assert type(activity[\"start_date\"]) == str\n assert type(activity[\"average_speed\"]) == float\n assert type(activity[\"gear_id\"]) == str", "def test_get_activity_template(self):\n pass", "def test_api_get_activity_by_id(self):\n # create a bucket\n res = self.register_login_get_token()\n self.assertEqual(res.status_code, 201)\n\n # create a activity\n res = self.client().post('/bucketlist/1/activities',\n headers=dict(\n Authorization=\"Bearer \" + self.access_token),\n data=self.activity)\n self.assertEqual(res.status_code, 201)\n # get activity created\n activity_created = json.loads(res.data.decode())\n # get activity by its ID\n res = self.client().get('/bucketlist/1/activities/{}'.format(activity_created['id']),\n headers=dict(\n Authorization=\"Bearer \" + self.access_token))\n self.assertEqual(res.status_code, 200)\n self.assertIn('Shop in', str(res.data))", "def testExerciseModActivity(self):\n attr = self.session.create_visit_attr()\n\n # mod_activity_days\n self.util.intTypeTest(self, attr, \"mod_activity_days\")\n\n self.util.intPropertyTest(self, attr, \"mod_activity_days\")\n\n # mod_activity_hours\n self.util.intTypeTest(self, attr, \"mod_activity_hours\")\n\n self.util.intPropertyTest(self, attr, \"mod_activity_hours\")\n\n # mod_activity_minutes\n self.util.intTypeTest(self, attr, \"mod_activity_minutes\")\n\n self.util.intPropertyTest(self, attr, \"mod_activity_minutes\")", "def test_list_activity_occurrences(self):\n pass", "def test_create_activity(self):\n contact = Contact.objects.first()\n sales_cycle = contact.sales_cycles.first()\n\n data = {\n \"owner\": self.user.id,\n \"sales_cycle_id\": sales_cycle.id,\n \"description\": \"test text\",\n }\n\n url, parsed = self.prepare_urls('v1:activity-list', subdomain=self.company.subdomain)\n \n response = self.client.post(url, data, HTTP_HOST=parsed.netloc, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n self.authenticate_user()\n response = self.client.post(url, data, HTTP_HOST=parsed.netloc, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n content = json.loads(response.content)\n self.assertTrue(content.has_key('owner'))\n self.assertNotEqual(content['owner'], None)\n self.assertTrue(content.has_key('company_id'))\n self.assertNotEqual(content['company_id'], None)\n\n url, parsed = self.prepare_urls('v1:activity-list', subdomain=self.company.subdomain)\n response = self.client.get(url, HTTP_HOST=parsed.netloc)\n content = json.loads(response.content)\n self.assertEqual(self.activities_count+1, content['count']) # added 1 activity", "def test_create_activity_occurrence(self):\n pass", "def test_activity_id(self):\n new_activity = self.app\n self.assertTrue(Activity.activity_id, 0)\n new_activity.create_activity(1)\n self.assertTrue(new_activity.activity_id, 1)\n for key in new_activity.activities:\n self.assertEqual(new_activity.activity_id, key)", "def fetch(self, activity):\n return None, None", "def test_activity_dictionary(self):\n new_activity = self.app\n self.assertEqual(len(new_activity.activities), 0)\n new_activity.create_activity(1)\n self.assertIsInstance(new_activity, Activity)\n self.assertEqual(len(new_activity.activities), 1)", "def test_get_activities_does_not_show_private_fields(self):\n from .mockers import context_query\n from .mockers import user_status_context\n from .mockers import subscribe_context, create_context\n username = 'messi'\n self.create_user(username)\n self.create_context(create_context)\n self.admin_subscribe_user_to_context(username, subscribe_context)\n self.create_activity(username, user_status_context)\n res = self.testapp.get('/contexts/%s/activities' % (context_query['context']), '', oauth2Header(username), status=200)\n self.assertEqual(len(res.json), 1)\n self.assertNotIn('_keywords', res.json[0]['object'])", "def test_update_activity(self):\n pass", "def test_get_activity_templates(self):\n pass", "def getactivity(self) -> Optional[ba.Activity]:\n if self._activity is None:\n return None\n return self._activity()", "def test_activity_creation(self):\n\n res = self.register_login_get_token()\n self.assertEqual(res.status_code, 201)\n # create a activity\n res = self.client().post('/bucketlist/1/activities',\n headers=dict(\n Authorization=\"Bearer \" + self.access_token),\n data=self.activity)\n self.assertEqual(res.status_code, 201)\n self.assertIn('Shop in', str(res.data))", "def get_activity():\n try:\n activity = Activity.objects.filter(active=1).latest('id')\n except Activity.DoesNotExist:\n activity = None\n return activity", "def manipulate_activity():\n pass", "def _verify_activity(**args):\n\n selenium2lib = ui_lib.get_s2l()\n error = 0\n\n # Validate Input\n # Set default values\n if \"multiple\" not in args:\n args[\"multiple\"] = 0\n if \"status\" not in args:\n args[\"status\"] = \"ok\"\n if args[\"multiple\"]:\n if \"error\" not in args:\n args[\"error\"] = 0\n if \"warning\" not in args:\n args[\"warning\"] = 0\n else:\n if \"message\" not in args:\n args[\"message\"] = \"No message\"\n\n # Verify required parameters\n required_attributes = [\"activity\", \"entity\"]\n if args[\"multiple\"]:\n required_attributes.append(\"completed\")\n for attribute in required_attributes:\n if attribute not in args:\n logger._warn(\"'%s' attribute not specified.\" % attribute)\n selenium2lib.capture_page_screenshot()\n return False\n\n # Open Activity Nav Bar\n if not ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_ACTIVITY_BAR):\n logger._warn(\"Failed to open Activity Bar\")\n selenium2lib.capture_page_screenshot()\n error += 1\n\n # Open Activity Flyout\n if not ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_LAST_ACTIVITY, timeout=7):\n logger._warn(\"Failed to open Activity Flyout\")\n selenium2lib.capture_page_screenshot()\n error += 1\n # TODO: Wait for activity to complete\n # TODO: Verify Activity Status\n # Currently unable to get status from page\n # for x in range(1, 6):\n # value = selenium2lib.get_text(FusionServerProfilesPage.ID_ACTIVITY_STATUS)\n # if value!=\"changing\": break\n # BuiltIn().sleep(\"10 seconds\", \"Waiting for Activity to complete\")\n # if not ui_lib.wait_for_element_text(FusionServerProfilesPage.ID_ACTIVITY_STATUS, status):\n # logger._warn(\"Failed to verify '%s' activity status. Got '%s'.\" % (status, value))\n selenium2lib.capture_page_screenshot()\n # error += 1\n\n # Verify Activity Name\n value = ui_lib.ignore_staleElementRefException(\"get_text\", FusionServerProfilesPage.ID_ACTIVITY_NAME)\n if not ui_lib.wait_for_element_text(FusionServerProfilesPage.ID_ACTIVITY_NAME, args[\"activity\"]):\n logger._warn(\"Failed to verify '%s' activity. \\nGot '%s'.\" % (args[\"activity\"], value))\n selenium2lib.capture_page_screenshot()\n error += 1\n\n # Verify Activity Message\n value = ui_lib.ignore_staleElementRefException(\"get_text\", FusionServerProfilesPage.ID_ACTIVITY_MESSAGE)\n if \"message\" in args:\n if not ui_lib.wait_for_element_text(FusionServerProfilesPage.ID_ACTIVITY_MESSAGE, args[\"message\"]):\n logger._warn(\"Failed to verify '%s' activity message. \\nGot '%s'.\" % (args[\"message\"], value))\n selenium2lib.capture_page_screenshot()\n # error += 1\n else:\n msg_lines = str(value).splitlines()\n index = 0\n\n # Verify Multiple Selected Activity Message\n for attribute in [\"completed\", \"excluded\", \"error\", \"warning\"]:\n # Ignore optional attributes\n if attribute not in args:\n continue\n value = str(msg_lines[index][len(attribute) + 2:])\n if attribute in [\"completed\", \"excluded\"]:\n rc = _compare_lists(value.split(\",\"), args[attribute])\n else:\n rc = (int(value) == int(args[attribute]))\n if not rc:\n logger._warn(\"Failed to verify '%s' %s message. \\nGot '%s'.\" % (args[attribute], attribute, value))\n selenium2lib.capture_page_screenshot()\n error += 1\n index += 1\n\n # Verify Activity Entity\n if not args[\"multiple\"]:\n value = ui_lib.ignore_staleElementRefException(\"get_text\", FusionServerProfilesPage.ID_SINGLE_ACTIVITY_ENTITY)\n if not ui_lib.wait_for_element_text(FusionServerProfilesPage.ID_SINGLE_ACTIVITY_ENTITY, args[\"entity\"]):\n logger._warn(\"Failed to verify '%s' activity entity. \\nGot '%s'.\" % (args[\"entity\"], value))\n selenium2lib.capture_page_screenshot()\n error += 1\n else:\n value = ui_lib.ignore_staleElementRefException(\"get_text\", FusionServerProfilesPage.ID_MULTIPLE_ACTIVITY_ENTITY)\n if not ui_lib.wait_for_element_text(FusionServerProfilesPage.ID_MULTIPLE_ACTIVITY_ENTITY, args[\"entity\"]):\n logger._warn(\"Failed to verify '%s' activity entity. \\nGot '%s'.\" % (args[\"entity\"], value))\n selenium2lib.capture_page_screenshot()\n error += 1\n\n # Close Activity Flyout\n value = ui_lib.ignore_staleElementRefException(\"get_text\", FusionServerProfilesPage.ID_ACTIVITY_STATUS)\n if not ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_LAST_ACTIVITY):\n logger._warn(\"Failed to close Activity Flyout\")\n selenium2lib.capture_page_screenshot()\n error += 1\n\n # Close Activity Nav Bar\n if not ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_ACTIVITY_BAR):\n logger._warn(\"Failed to close Activity Bar\")\n selenium2lib.capture_page_screenshot()\n error += 1\n\n if error > 0:\n return False\n return True", "def getactivity(self) -> Optional[ba.Activity]:\n stats = self._stats()\n if stats is not None:\n return stats.getactivity()\n return None", "def test_user_activities(self):\n url, parsed = self.prepare_urls(\n 'v1:activity-user-activities', subdomain=self.company.subdomain)\n \n response = self.client.get(url, HTTP_HOST=parsed.netloc)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n self.authenticate_user()\n response = self.client.get(url, HTTP_HOST=parsed.netloc)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n \n content = json.loads(response.content)\n self.assertTrue(content.has_key('count'))\n self.assertTrue(content.has_key('next'))\n self.assertTrue(content.has_key('previous'))\n self.assertTrue(content.has_key('results'))", "def test_create_planned_activity(self):\n contact = Contact.objects.first()\n sales_cycle = contact.sales_cycles.first()\n\n data = {\n \"owner\": self.user.id,\n \"sales_cycle_id\": sales_cycle.id,\n \"description\": \"test text\",\n \"deadline\": timezone.now(),\n }\n\n url, parsed = self.prepare_urls('v1:activity-list', subdomain=self.company.subdomain)\n \n response = self.client.post(url, data, HTTP_HOST=parsed.netloc, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n self.authenticate_user()\n response = self.client.post(url, data, HTTP_HOST=parsed.netloc, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n content = json.loads(response.content)\n self.assertTrue(content.has_key('owner'))\n self.assertNotEqual(content['owner'], None)\n self.assertTrue(content.has_key('company_id'))\n self.assertNotEqual(content['company_id'], None)\n\n url, parsed = self.prepare_urls('v1:activity-list', subdomain=self.company.subdomain)\n response = self.client.get(url, HTTP_HOST=parsed.netloc)\n content = json.loads(response.content)\n self.assertEqual(self.activities_count+1, content['count']) # added 1 activity", "def test_successful_activity_creation(self):\n result = self.app.create_activity(1)\n expected = {4: {'shoplist_id': 1, 'title': 'apples', 'description': 'Fresh Green Apples', 'status': True}}\n self.assertEqual(expected, result)", "def test_crm_activity_next_action(self):\n # Add the next activity (like we set it from a form view)\n lead_model_id = self.env['ir.model']._get('crm.lead').id\n activity = self.env['mail.activity'].with_user(self.user_sales_manager).create({\n 'activity_type_id': self.activity_type_1.id,\n 'summary': 'My Own Summary',\n 'res_id': self.lead_1.id,\n 'res_model_id': lead_model_id,\n })\n activity._onchange_activity_type_id()\n\n # Check the next activity is correct\n self.assertEqual(self.lead_1.activity_summary, activity.summary)\n self.assertEqual(self.lead_1.activity_type_id, activity.activity_type_id)\n # self.assertEqual(fields.Datetime.from_string(self.lead.activity_date_deadline), datetime.now() + timedelta(days=activity.activity_type_id.days))\n\n activity.write({\n 'activity_type_id': self.activity_type_2.id,\n 'summary': '',\n 'note': 'Content of the activity to log',\n })\n activity._onchange_activity_type_id()\n\n self.assertEqual(self.lead_1.activity_summary, activity.activity_type_id.summary)\n self.assertEqual(self.lead_1.activity_type_id, activity.activity_type_id)\n # self.assertEqual(fields.Datetime.from_string(self.lead.activity_date_deadline), datetime.now() + timedelta(days=activity.activity_type_id.days))\n\n activity.action_done()\n\n # Check the next activity on the lead has been removed\n self.assertFalse(self.lead_1.activity_type_id)", "def test_api_get_all_activities(self):\n # create a bucket\n res = self.register_login_get_token()\n self.assertEqual(res.status_code, 201)\n\n # create a activity\n res = self.client().post('/bucketlist/1/activities',\n headers=dict(\n Authorization=\"Bearer \" + self.access_token),\n data=self.activity)\n self.assertEqual(res.status_code, 201)\n\n # get activities\n res = self.client().get('/bucketlist/1/activities',\n headers=dict(\n Authorization=\"Bearer \" + self.access_token))\n self.assertEqual(res.status_code, 200)" ]
[ "0.83331674", "0.7658656", "0.7655726", "0.7414704", "0.72807354", "0.72648036", "0.719823", "0.7015424", "0.6768269", "0.66985774", "0.666823", "0.66591644", "0.6645971", "0.6642361", "0.66319597", "0.66228336", "0.6568418", "0.655863", "0.65396506", "0.6469291", "0.64495015", "0.63786685", "0.63342446", "0.6323036", "0.6291832", "0.62749845", "0.6260608", "0.62190145", "0.6166678", "0.6142516" ]
0.89848083
1
Test case for get_bindings_for_deployment
def test_get_bindings_for_deployment(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_deployment(self):\n pass", "def test_get_deployment(self):\n pass", "def test_get_deployment_resources(self):\n pass", "def test_get_deployment_run(self):\n pass", "def test_get_deployment_resource(self):\n pass", "def test_get_deployment_runs(self):\n pass", "def test_get_deployments(self):\n pass", "def test_get_deployments(self):\n pass", "def test_get_deployment_runs_in_virtualization_realm(self):\n pass", "def test_get_deployment_runs1(self):\n pass", "def bindings(self):\n return self.__bindings", "def test_execute_deployment(self):\n pass", "def test_get_deployment_resource_data(self):\n pass", "def test_create_deployment(self):\n pass", "def test_list_namespaced_deployment_config(self):\n pass", "def test_read_namespaced_deployment_config(self):\n pass", "def generate_python_bindings(test_api_path=None):\n spec = _get_spec(test_api_path)\n scheme = spec['schemes'][0] if 'schemes' in spec else \"https\"\n base_url = \"{}://{}{}\".format(scheme, spec['host'], spec['basePath'])\n\n param_holders = {}\n\n for path in spec['paths']:\n path_split = path.split(\"/\")\n\n for http_method in spec['paths'][path]:\n endpoint_info = spec['paths'][path][http_method]\n endpoint_name = _make_name(http_method, path_split)\n indexed_parameters = index_parameters(spec, endpoint_info)\n # Currently assumes only using one possible oauth scope (Google email) needed.\n requires_auth = 'security' in endpoint_info\n\n # Start tracking params if there's no current store.\n if endpoint_name not in param_holders:\n param_holders[endpoint_name] = {\n 'seen': False,\n 'body_params': indexed_parameters.get('top_level_body_params', {}),\n 'positional': [],\n 'options': {},\n 'requires_auth': requires_auth,\n 'description': endpoint_info.get('description', \"placeholder\")\n }\n else:\n param_holders[endpoint_name]['seen'] = True\n previously_requires_auth = param_holders[endpoint_name]['requires_auth']\n param_holders[endpoint_name]['requires_auth'] = previously_requires_auth or requires_auth\n\n # Add args to their base_route-indexed param_data object with requirement indications.\n _label_path_args_required(path, param_holders[endpoint_name], indexed_parameters)\n _label_optional_args_required(path, param_holders[endpoint_name], indexed_parameters)\n\n # Blow away previous files in the autogenerated folder and remake that directory\n dirname = os.path.dirname(__file__)\n api_function_path = os.path.join(dirname, \"__init__.py\")\n autogen_for_composite_commands_path = os.path.join(dirname, \"composite_commands\", \"__init__.py\")\n for path in api_function_path, autogen_for_composite_commands_path:\n if os.path.exists(path):\n os.remove(path)\n open(path, 'a').close()\n\n autogenerated_path = os.path.join(dirname, \"autogenerated\")\n if os.path.exists(autogenerated_path):\n shutil.rmtree(autogenerated_path)\n os.mkdir(autogenerated_path)\n open(os.path.join(autogenerated_path, \"__init__.py\"), 'a').close()\n\n function_payload = {'classes': []}\n\n for endpoint_name in sorted(param_holders.keys()):\n endpoint_info = param_holders[endpoint_name]\n file_name = format(endpoint_name.replace(\"-\", \"_\")) + \".py\"\n file_path = os.path.join(dirname, \"autogenerated\", file_name)\n\n template_file = \"/api.jinja\"\n template_vars = {'class_name': separator_to_camel_case(endpoint_name, \"-\"),\n 'base_url': base_url,\n 'snake_command_name': endpoint_name.replace(\"-\", \"_\"),\n 'sorted_options': sorted(endpoint_info['options']),\n 'function_def_arglist': _make_function_def_arglist(endpoint_info),\n 'endpoint_info': endpoint_info}\n\n _write_jinja_file(template_file, template_vars, file_path)\n function_payload['classes'].append(template_vars)\n\n # Write the initial api functions to dss init file because composite_commands rely on these.\n template_file = \"/functions.jinja\"\n file_path = os.path.join(dirname, \"__init__.py\")\n _write_jinja_file(template_file, function_payload, file_path)\n\n # Loop through all composite_commands files and add their command to python bindings\n function_payload = {'classes': []}\n # Don't want to add imports in the middle of the file, which is what would happen if we appended\n # the full functions template to this. Instead, add these imports to a list and we'll\n # append them at the top of the file and rewrite the full file.\n # Also add the 'do not modify' statement so that's a file docstring.\n added_imports = ['\"\"\"This file is autogenerated according to HCA api spec. Don\\'t modify.\"\"\"']\n\n composite_commands = importlib.import_module(\"hca.dss.composite_commands\")\n prefix = composite_commands.__name__ + \".\"\n for importer, modname, _ in pkgutil.iter_modules(composite_commands.__path__, prefix):\n module = importer.find_module(modname).load_module(modname)\n clsmembers = inspect.getmembers(module, inspect.isclass)\n\n for class_name, class_obj in clsmembers:\n if class_name not in Constants.composite_commands_class_names:\n continue\n\n endpoint_info = class_obj._get_endpoint_info()\n snake_command_name = class_obj.get_command_name().replace(\"-\", \"_\")\n template_vars = {'class_name': class_name,\n 'base_url': None, # unnecessary b/c only used for interacting directly w/ api.\n 'snake_command_name': snake_command_name,\n 'sorted_options': sorted(endpoint_info['options']),\n 'function_def_arglist': _make_function_def_arglist(endpoint_info),\n 'endpoint_info': endpoint_info}\n function_payload['classes'].append(template_vars)\n import_ = \"from .composite_commands.{} import {}\".format(snake_command_name, class_name)\n added_imports.append(import_)\n _write_jinja_file(template_file, function_payload, file_path, False)\n\n added_imports = \"\\n\".join(added_imports)\n with open(file_path, 'r+') as f:\n content = f.read()\n f.seek(0, 0)\n f.write(added_imports.rstrip('\\r\\n') + '\\n' + content)", "def list_policy_binding(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method list_policy_binding\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/oapi/v1/policybindings'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1PolicyBindingList',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def test_update_deployment(self):\n pass", "def test_launch_deployment(self):\n pass", "def get_all_catalystport_bindings():\n LOG.debug(\"get_all_catalystport_bindings() called\")\n session = db.get_session()\n try:\n bindings = session.query\n (catalyst_models.CatalystPortBinding).all()\n return bindings\n except exc.NoResultFound:\n return []", "def getBinding(o, name):\n raise RuntimeError()", "def test_generate_namespaced_deployment_config(self):\n pass", "def test_list_namespaced_policy_binding(self):\n pass", "def test_get_deployments_expanded(self):\n pass", "def load_bindings(self):\n for binding in self.bindings:\n self.accept(*binding)", "def test_list_deployment_config_for_all_namespaces(self):\n pass", "def add_bindings(self, configuration, bind_to, typ, bindings):\n wanted = list(bindings.wanted(configuration[typ].values()))\n if not self.get_current(bind_to)[0]:\n log.info(\"Would bind <%s>(%s) to %s\", typ, ', '.join(wanted), bind_to.long_name)\n return\n\n for thing in wanted:\n bound = self.is_bound(typ, thing, bind_to.typ, bind_to.name)\n\n if not bound:\n log.info(\"Binding <%s>(%s) to %s\", typ, thing, bind_to.long_name)\n combined_typ, binding_name_str, name_str = self.combined_typ(bind_to.typ, typ)\n payload = {binding_name_str: bind_to.name, name_str: thing}\n payload.update(configuration[typ][thing].binding_options)\n self.post(combined_typ, {combined_typ: payload, \"params\": {\"action\": \"bind\"}}, content_type=self.content_type(combined_typ))\n else:\n log.debug(\"<%s(%s) already bound to %s\", typ, thing, bind_to.long_name)", "def test_create_policy_binding_for_all_namespaces(self):\n pass", "def getBindings(self):\n return self.getBindingManager().getBindings()" ]
[ "0.6716382", "0.6716382", "0.64101577", "0.63051265", "0.62150395", "0.60583484", "0.6024149", "0.6024149", "0.6017725", "0.58653325", "0.58123964", "0.58099604", "0.57245517", "0.5633864", "0.5597744", "0.5557204", "0.55162925", "0.543872", "0.5434328", "0.54183847", "0.53876793", "0.5384412", "0.53827906", "0.5382684", "0.5376184", "0.53690195", "0.5367645", "0.5326174", "0.5306868", "0.52865964" ]
0.960366
0
Test case for get_build_number
def test_get_build_number(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getBuild(number):", "def getBuild(number):", "def build_number(self):\n return self.get_data(\"build_number\")", "def get_build_number():\n try:\n return int(os.getenv(*legion.config.BUILD_NUMBER))\n except ValueError:\n raise Exception('Cannot parse build number as integer')", "def build_number(self):\n return self._build_number", "def test_build_number(converted_tests):\n submission = SubmissionBuilder(\"t\", \"b\", converted_tests).build()\n assert submission.get(\"number\") == \"b\", submission", "def test_get_build_timestamp(self):\n pass", "def get_build(self, build_id):\n pass", "def build():\n return get_cached(\"build.json\", False).get(\"build_id\")", "def verify_ios_buildNumber():\r\n msg = \"\"\r\n try:\r\n 'Getting Build number for IOS '\r\n if g.platform == 'ios':\r\n text_view = ui_controls.text_view(get_obj_identifier('about_buildNumber_lbl'), label=True)\r\n\r\n 'Verifying whether Build number is matching with expected value IOS'\r\n if g.platform == 'ios' and text_view.strip() == g.build_number :\r\n print \"Build number for IOS is verified successfully. Expected : %s. Actual : %s\" % (g.build_number, text_view.strip())\r\n else:\r\n if g.platform == 'ios':\r\n print \"Build number is not verified successfully. Expected : %s. Actual : %s\" % (g.build_number, text_view.strip())\r\n return False, msg\r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n return True, msg", "def get_new_build(old_version, new_version, build):\n\n # Version did not change, increment the current build number\n if old_version == new_version:\n return str(int(build) + 1)\n\n # Version changed, start over at 1\n else:\n return str(1)", "def getBuild():", "def test_build_property(self):\n v1 = versions.Version(version='1.2.3.4', name='foo')\n expected = 4\n\n self.assertEqual(v1.build, expected)", "def get_build_id(build_line):\n match = re.search(r'\\[(\\d+)\\]', build_line)\n if match:\n return match.group(1)", "def test01getNumber(self):\n self.assertEqual( calc.getNumber(), 1234 )", "def build_id(self):\n if self.method == 'tagBuild':\n return self.params[1]", "def GetBuildID(build_bot, date):\n day = '{day:02d}'.format(day=date%100)\n mon = MONTHS[date/100%100]\n date_string = mon + ' ' + day\n if build_bot in WATERFALL_BUILDERS:\n url = 'https://uberchromegw.corp.google.com/i/chromeos/' + \\\n 'builders/%s?numbuilds=200' % build_bot\n if build_bot in ROTATING_BUILDERS:\n url = 'https://uberchromegw.corp.google.com/i/chromiumos.tryserver/' + \\\n 'builders/%s?numbuilds=200' % build_bot\n command = 'sso_client %s' %url\n retval = 1\n retry_time = 3\n while retval and retry_time:\n retval, output, _ = \\\n command_executer.GetCommandExecuter().RunCommandWOutput(command, \\\n print_to_console=False)\n retry_time -= 1\n\n if retval:\n return []\n\n out = output.split('\\n')\n line_num = 0\n build_id = []\n # Parse the output like this\n # <td>Dec 14 10:55</td>\n # <td class=\"revision\">??</td>\n # <td failure</td><td><a href=\"../builders/gcc_toolchain/builds/109\">#109</a>\n while line_num < len(out):\n if date_string in out[line_num]:\n if line_num + 2 < len(out):\n build_num_line = out[line_num + 2]\n raw_num = re.findall(r'builds/\\d+', build_num_line)\n # raw_num is ['builds/109'] in the example.\n if raw_num:\n build_id.append(int(raw_num[0].split('/')[1]))\n line_num += 1\n return build_id", "def verify_Version_buildNumber():\r\n msg, flag = \"\", False\r\n try:\r\n 'Getting Build number for IOS '\r\n if g.platform == 'ios':\r\n flag1, msg1 = verify_ios_versionNumber()\r\n msg += msg1\r\n flag2, msg2 = verify_ios_buildNumber()\r\n msg += msg2\r\n 'go back'\r\n flag3=ui_controls.image(get_obj_identifier('about_back_btn'))\r\n print 'cliked on back button'\r\n flag = False if not (flag1 and flag2 and flag3) else True\r\n else:\r\n text_view = ui_controls.text_view(get_obj_identifier('about_buildVersion_lbl'))\r\n \r\n if text_view.strip() == g.android_version_no.strip():\r\n \r\n print \"Version and Build number matched. Expected : %s. Actual : %s\" % (g.android_version_no, text_view.strip())\r\n flag = True \r\n else:\r\n \r\n print \"Version and Build number does not match. Expected : %s. Actual : %s\" % (g.android_version_no, text_view.strip())\r\n flag1=ui_controls.back_button()\r\n \r\n flag = False if not (flag1) else True\r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n return flag, msg", "def build(self):\n return int(self.build_string)", "def GetBuildInfo(builder_name, build_num):\n url = '%s/json/builders/%s/builds/%s' % (BUILD_MASTER_URL,\n builder_name,\n build_num)\n return json.load(urllib2.urlopen(url))", "def build_number(self, build_number):\n\n self._build_number = build_number", "def get_latest_build(self):\n # Retrieve last sanity-checked build number (could be 0)\n self.get_last_sanity()\n\n # * List all build numbers for this version. Note this may include\n # builds for other versions, since all versions for a given\n # release share a build directory.\n # * Ignore builds above 50000, which are toy builds\n\n builds = [int(x) for x in os.listdir(self.ver_dir)\n if x.isdigit() and int(x) > self.last_bld and int(x) < 50000]\n builds.sort()\n\n # Check each build after last sanity-checked build\n bld_num = self.last_bld\n for build in builds:\n print (\"Checking build \" + str(build))\n if self.check_build(build):\n bld_num = build\n print(\"bld_num is now \" + str(bld_num))\n return bld_num", "def get_build(self):\n return self.bot_data_file[\"build\"]", "def get_build_line(latest_build):\n proc = Popen([\"osg-koji\", \"buildinfo\", latest_build],\n stdout=PIPE)\n build_line = proc.stdout.readline().decode(\"latin-1\").strip()\n ret = proc.wait()\n if ret != 0 or not build_line:\n return\n return build_line", "def _get_next_build_sequence_id(self):\n self._build_sequence += 1\n return \"{:0>4}\".format(self._build_sequence)", "def readback2BuildNumber(resp):\n a = np.fromstring(resp, dtype='<u1')\n return a[51]", "def test_osx_version_number_value(self):\n \n running_version_number = get_osx_version()[0]\n \n # Check to make sure the returned valued is 10.11.1\n self.assertEqual(running_version_number, '10.11.1')", "def GetBuildNumFromBuilder(build_reason, bot_name, builder_host, builder_port):\n # Gets the buildbot url for the given host and port.\n server_url = _GetBuildBotUrl(builder_host, builder_port)\n buildbot_url = BUILDER_JSON_URL % {'server_url': server_url,\n 'bot_name': bot_name,\n 'build_num': '_all'\n }\n builds_json = _FetchBuilderData(buildbot_url)\n if builds_json:\n builds_data = json.loads(builds_json)\n for current_build in builds_data:\n if builds_data[current_build].get('reason') == build_reason:\n return builds_data[current_build].get('number')\n return None", "def build_id():\n return \"test123\"", "def check_highest_build(self, sys_info, api_results):\n if not api_results.get(\"latest_build_number\"):\n self.results[self.current_endpoint][\"latest_build_number\"] = self.__make_api_get(\n '/apple/latest_build_number/%s' % (\".\".join(sys_info.get(\"os_ver\").split(\".\")[:2])))\n\n self.message(\"\\n\\tHighest build number check:\")\n\n # Validate response from API\n if self._validate_response(api_results[\"latest_build_number\"]):\n\n # Valid response from API - now interpret it\n if api_results[\"latest_build_number\"][\n \"msg\"] == sys_info.get(\"build_num\"):\n self.message(\n \"\\t\\t[+] SUCCESS - You are running the latest build number (%s) of the OS version you have installed (%s)\" %\n (sys_info.get(\"build_num\"), sys_info.get(\"os_ver\")))\n\n elif sys_info.get(\"build_num\")[-1].isalpha():\n self.message(\n \"\\t\\t[!] ATTENTION - It looks like you might be running a development OS build '%s' (%s). The EFIgy API currently only has reliable data for production OS releases.\" %\n (sys_info.get(\"build_num\"), sys_info.get(\"os_ver\")))\n else:\n self.message(\n \"\\t\\t[-] ATTENTION - You are NOT running the latest release build number of your OS version (%s). Your build number is %s, the latest release build number is %s\" %\n (sys_info.get(\"os_ver\"), sys_info.get(\"build_num\"), api_results[\"latest_build_number\"][\"msg\"]))" ]
[ "0.7950772", "0.7950772", "0.7815931", "0.7420235", "0.72372687", "0.7152935", "0.6899265", "0.6845484", "0.65984696", "0.65687853", "0.6497847", "0.64489245", "0.64375013", "0.6411389", "0.64076024", "0.6334381", "0.6297275", "0.6255791", "0.6232759", "0.6139806", "0.6135452", "0.6108826", "0.6093129", "0.6086186", "0.5977133", "0.5974226", "0.5932304", "0.5928306", "0.59132814", "0.58954185" ]
0.94596505
0
Test case for get_build_timestamp
def test_get_build_timestamp(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_build_timestamp(jenkins_url, job_name, build_nr):\n timestamp = execute_command(\n f\"wget -qO- {jenkins_url}/{job_name}/{build_nr}\"\n )\n return datetime.fromtimestamp(timestamp/1000)", "def test_get_build_number(self):\n pass", "def getApplicationBuildDate(self) -> unicode:\n ...", "def build_time(self):\n return self.nodes[0].get('infos').get('system_info').get('build_time')", "def GetBuildDate(build_filename):\n try:\n with open(build_filename) as f:\n return float(f.readline())\n except (IOError, ValueError):\n return 0.0", "def build_date(self) -> str:\n data = \"none yet\"\n if self.STARTED:\n data = self.about.get(\"Build Date\", \"UNKNOWN\")\n return data", "def last_build_processed_timestamp(log_file):\n # Get last build processed timestamp\n last_timestamp = 0\n with open(log_file, \"r\") as process_file:\n if os.path.getsize(process_file.name) > 0:\n last_timestamp = process_file.readline().strip()\n return last_timestamp", "def _make_timestamp(self):\r\n\t\tlogger.debug(\"Get a timestamp\")\r\n\t\treturn time.mktime(datetime.today().timetuple())", "def GetBuildDate(build_type, utc_now):\n day = utc_now.day\n month = utc_now.month\n year = utc_now.year\n if build_type != 'official':\n first_sunday = GetFirstSundayOfMonth(year, month)\n # If our build is after the first Sunday, we've already refreshed our build\n # cache on a quiet day, so just use that day.\n # Otherwise, take the first Sunday of the previous month.\n if day >= first_sunday:\n day = first_sunday\n else:\n month -= 1\n if month == 0:\n month = 12\n year -= 1\n day = GetFirstSundayOfMonth(year, month)\n now = datetime.datetime(\n year, month, day, utc_now.hour, utc_now.minute, utc_now.second)\n return '{:%b %d %Y %H:%M:%S}'.format(now)", "def AssertOlderBuild(self, timestamp, timestamp_text):\n self.script.append(\n ('(!less_than_int(%s, getprop(\"ro.build.date.utc\"))) || '\n 'abort(\"E%d: Can\\'t install this package (%s) over newer '\n 'build (\" + getprop(\"ro.build.date\") + \").\");') % (\n timestamp, common.ErrorCode.OLDER_BUILD, timestamp_text))", "def get_git_timestamp(path):\n return int(_run_command(path, 'git log -1 --format=%ct'))", "def getSourceStamp():\n # TODO: it should be possible to expire the patch but still remember\n # that the build was r123+something.", "def read_buildstamp(subdir):\n if os.path.exists(subdir):\n try:\n with open(subdir + '/scripts/build-stamp.txt', 'r') as f:\n freesurfer_version = f.readlines()[0]\n # except a FileNotFound error\n except OSError as e:\n freesurfer_version = input(\n \"\"\"\n Could not find a build timestamp in the supplied subject directory.\n The used freesurfer version can not be extracted. Please enter the\n version of freesurfer you are using, if available: \"\"\"\n or \"\")\n return freesurfer_version", "def fixture_old_timestamp() -> datetime.datetime:\n return datetime.datetime(2018, 1, 1)", "def test_logging_timestamps(self):\n import datetime\n\n fmt = \"%Y-%m-%d-%H-%M-%S\"\n time = self.chatbot.timestamp(fmt)\n\n self.assertEqual(time, datetime.datetime.now().strftime(fmt))", "def getAbsoluteSourceStamp(self, got_revision):", "def getLastFinishedBuild():", "def test_version_time_ok(self):\n test_name = sys._getframe().f_code.co_name\n self._execute('version time 2.0 \"%s\"' % self._test_date)\n rv, output = self._execute('version list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)", "def getBuild(number):", "def getBuild(number):", "def GetGsutilVersionModifiedTime():\n if not VERSION_FILE:\n return 0\n return int(os.path.getmtime(VERSION_FILE))", "def get_changefile_timestamp(changefile_type, file_sequence_number):\n url = get_url(changefile_type) + \"/\"\n url = url + (\"%03i/%03i/%03i\" % (file_sequence_number / 1000000,\n file_sequence_number / 1000 % 1000,\n file_sequence_number % 1000))\n url = url + \".state.txt\"\n changefile_timestamp = None\n for result in urllib.urlopen(url):\n # get timestamp\n timestamp_p = result.find(\"timestamp=\")\n if timestamp_p != -1:\n # found timestamp line\n timestamp_p += 10 # jump over text\n result = result[timestamp_p:].replace(\"\\\\\", \"\").strip()\n changefile_timestamp = strtodatetime(result)\n\n if not changefile_timestamp:\n logging.info(\"(no timestamp)\")\n if file_sequence_number == 0:\n changefile_timestamp = datetime(1900, 1, 1)\n else:\n AssertionError(\"no timestamp for %s changefile %i.\" %\n (changefile_type, file_sequence_number))\n else:\n logging.info(\"%s, id: %i, timestamp: %s\" %\n (changefile_type, file_sequence_number,\n changefile_timestamp.isoformat()))\n return changefile_timestamp", "def test_timestamp():\n natural = timestamp(\"December 15, 2015\")\n assert natural == {\n \"unix\": 1450137600,\n \"natural\": \"December 15, 2015\"\n }\n unix = timestamp(\"1450137600\")\n assert unix == {\n \"unix\": 1450137600,\n \"natural\": \"December 15, 2015\"\n }", "def getBuild():", "def _generate_timestamp():\n\t\treturn strftime(\"%Y%m%dT%H%M%S\")", "def test_start_time_tag():\n return os.environ.get(\"RUINER_TEST_START_TIME\", \"latest\")", "def get_creation_time(ts):\n path_to_embed_file = os.path.join(DATA_DIR, STUDY, \"experiment_files\", \"experiment_\"+ ts, \"triplet_training_validation_embeddings.h5\")\n\n if os.path.exists(path_to_embed_file):\n stat = os.stat(path_to_embed_file)\n try:\n return stat.st_birthtime\n except AttributeError:\n # We're probably on Linux. No easy way to get creation dates here,\n # so we'll settle for when its content was last modified.\n return stat.st_mtime\n else:\n print (\"here, path is: \", path_to_embed_file)\n return None", "def test_get_time(self):\n self.assertEqual(\n get_time(), time.strftime(const.TIMESTAMP_FORMAT, time.gmtime(time.time()))\n )", "def get_timestamp(self):\n raise NotImplementedError", "def get_source_stamp(self):" ]
[ "0.7308918", "0.7111528", "0.6824657", "0.67653924", "0.6573493", "0.6523011", "0.6219573", "0.6202032", "0.61634916", "0.6136783", "0.6127701", "0.60959667", "0.6066608", "0.60237443", "0.6013192", "0.6008872", "0.6004678", "0.59854", "0.59559083", "0.59559083", "0.59535646", "0.5924281", "0.59016234", "0.589786", "0.589141", "0.58885187", "0.5847322", "0.58327055", "0.5798297", "0.5774919" ]
0.9438829
0
Test case for get_categories
def test_get_categories(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_categories(self):\n obs = self.tester._get_categories(self.conn_handler)\n self.assertEqual(obs, self.exp_categories)", "def test_get_categories(self):\n obs = self.tester._get_categories(self.conn_handler)\n self.assertEqual(obs, self.exp_categories)", "def test_extract_categories():\n pass", "def test_get_categories(self):\n res = self.client().get('/api/categories')\n res_body = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertTrue(res_body['success'])\n self.assertTrue(res_body['categories'])", "def test_get_categories(self):\n res = self.client().get('/categories')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(len(data['categories']))", "def test_get_categories(self):\n\n res = self.client().get('/categories')\n\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertEqual(len(data['categories']), 6)", "def test_get_categories(self):\n res = self.client().get('/categories')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['categories'])\n self.assertTrue(data['total_categories'])", "def test_get_categories(self, mocker):\n mock = mocker.patch(\"requests_html.HTMLSession\")\n mock.return_value.get.return_value.html.find.return_value = iter(\n [\n mocker.Mock(text=\"Ammo\", attrs={\"href\": \"catalogue?cat=1\"}),\n mocker.Mock(text=\"Food\", attrs={\"href\": \"catalogue?cat=2\"}),\n mocker.Mock(text=\"Armour\", attrs={\"href\": \"catalogue?cat=3\"}),\n mocker.Mock(text=\"Weapons\", attrs={\"href\": \"catalogue?cat=4\"}),\n ]\n )\n\n result = resources.get_categories()\n assert list(result) == [\n (1, \"Ammo\"),\n (2, \"Food\"),\n (3, \"Armour\"),\n (4, \"Weapons\"),\n ]", "def test_get_all_categories(self):\n response = self.client.get('/api/v1/categories',\n headers=self.attendant_headers)\n self.assertEqual(response.status_code, 200)", "def test_get_categories_success(self):\n self.test_add_category_success()\n response = self.client.get('/categories',\n headers={\"Authorization\": self.token})\n self.assertEqual(response.status_code, 200)\n self.assertIn('asian', response.data.decode())", "def test_get_categories(self):\n res = self.client().get('/api/categories')\n self.assertEqual(res.status_code, 200)\n data = json.loads(res.data)\n self.assertTrue(data)", "def _get_categories(self, *args):\n raise NotImplementedError(self, \"_get_categories\")", "def test_get_a_category(self):\n self.test_add_category_success()\n response = self.client.get('/categories/1',\n headers={\"Authorization\": self.token})\n self.assertEqual(response.status_code, 200)\n self.assertIn('asian', response.data.decode())", "def test_Categories_getter(self):\r\n expected = ['Treatment', 'DOB']\r\n observed = self.cs_overview.Categories\r\n self.assertEqual(observed, expected)", "def test_index_view_with_categories(self):\n add_cat('test',1,1)\n add_cat('temp',1,1)\n add_cat('tmp',1,1)\n add_cat('tmp test temp',1,1)\n\n response = self.client.get(reverse('index'))\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"tmp test temp\")\n\n num_cats =len(response.context['categories'])\n self.assertEqual(num_cats , 4)", "def test_get_category_search(self):\n self.test_add_category_success()\n response = self.client.get('/categories?q=a',\n headers={\"Authorization\": self.token})\n self.assertEqual(response.status_code, 200)\n self.assertIn('asian', response.data.decode())", "def getCategory():", "def test_category(self):\n\n # Test empty categories\n self.assertFalse(self.colorspace.hasCategory('ocio'))\n self.assertEqual(len(self.colorspace.getCategories()), 0)\n with self.assertRaises(IndexError):\n self.colorspace.getCategories()[0]\n\n # Test with defined TEST_CATEGORIES.\n for i, y in enumerate(TEST_CATEGORIES):\n self.assertEqual(len(self.colorspace.getCategories()), i)\n self.colorspace.addCategory(y)\n self.assertTrue(self.colorspace.hasCategory(y))\n\n # Test the output list is equal to TEST_CATEGORIES.\n self.assertListEqual(\n list(self.colorspace.getCategories()), TEST_CATEGORIES)\n\n # Test the length of list is equal to the length of TEST_CATEGORIES.\n self.assertEqual(len(self.colorspace.getCategories()),\n len(TEST_CATEGORIES))\n\n iterator = self.colorspace.getCategories()\n for a in TEST_CATEGORIES:\n self.assertEqual(a, next(iterator))\n\n # Test the length of categories is zero after clearCategories()\n self.colorspace.clearCategories()\n self.assertEqual(len(self.colorspace.getCategories()), 0)\n\n # Testing individually adding and removing a category.\n self.colorspace.addCategory(TEST_CATEGORIES[0])\n self.assertEqual(len(self.colorspace.getCategories()), 1)\n self.colorspace.removeCategory(TEST_CATEGORIES[0])\n self.assertEqual(len(self.colorspace.getCategories()), 0)", "def test_category(self):\n # XXX identifiers would be groovy\n self.check_search(\n dict(category=u'36:self'), # trap\n [u'Ingrain'],\n 'simple category search, vs self',\n exact=True,\n )\n self.check_search(\n dict(category=u'14:target'), # protect\n [u'Conversion 2', u'False Swipe'],\n 'simple category search, vs target',\n exact=True,\n )\n\n # Multiple categories\n # sleep OR attack up\n self.check_search(\n dict(category=[u'29:self', u'15:target'], category_operator=u'any'),\n [u'Rest', u'Swagger'],\n 'multiple category search (OR)',\n exact=True,\n )\n\n # sleep AND heal self\n self.check_search(\n dict(category=[u'29:self', u'13:self'], category_operator=u'all'),\n [u'Rest'],\n 'multiple category search (AND)',\n exact=True,\n )", "def test_view_categories(self):\n res = self.client().post('/categories/', data=self.category)\n self.assertEqual(res.status_code, 201)\n res = self.client().get('/categories/')\n self.assertEqual(res.status_code, 200)\n self.assertIn('Stews', str(res.data))", "def test_create_category(self):\n pass", "def test_retrieve_categories(self):\n sample_category()\n sample_category(name=\"people\")\n res = self.client.get(CATEGORY_URL)\n categories = Category.objects.all()\n serializer = CategorySerializer(categories, many=True)\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 2)\n self.assertEqual(len(res.data), len(serializer.data))\n self.assertEqual(res.data, serializer.data)", "def test_get_all_categories(self):\n login = self.autheniticate()\n token = json.loads(login.data.decode()).get('token')\n self.app.post(category_url,\n data=json.dumps(self.data),\n headers=dict(Authorization=\"Bearer \" + token),\n content_type='application/json')\n res = self.app.get(category_url,\n headers=dict(Authorization=\"Bearer \" + token))\n res1 = json.loads(res.data.decode())\n self.assertEqual(res1['status'], 'Success!')\n self.assertEqual(res.status_code, 200)", "def test_get_category(self):\n url = reverse(\n 'projectroles:api_project_retrieve',\n kwargs={'project': self.category.sodar_uuid},\n )\n response = self.request_knox(url)\n\n self.assertEqual(response.status_code, 200)\n response_data = json.loads(response.content)\n expected = {\n 'title': self.category.title,\n 'type': self.category.type,\n 'parent': None,\n 'description': self.category.description,\n 'readme': '',\n 'public_guest_access': False,\n 'archive': False,\n 'roles': {\n str(self.owner_as_cat.sodar_uuid): {\n 'user': self.get_serialized_user(self.user_owner_cat),\n 'role': PROJECT_ROLE_OWNER,\n 'inherited': False,\n 'sodar_uuid': str(self.owner_as_cat.sodar_uuid),\n }\n },\n 'sodar_uuid': str(self.category.sodar_uuid),\n }\n self.assertEqual(response_data, expected)", "def categories(self):\n pass", "def test_categories_add(self):\n categories = [category.category for category in self.note.categories.all()]\n self.assertIn('test', categories)\n self.assertNotIn('note', categories)", "def test_get_categories_not_found(self):\n response = self.client.get('/categories',\n headers={\"Authorization\": self.token})\n self.assertEqual(response.status_code, 400)\n self.assertIn('No categories found', response.data.decode())", "def test_get_data(self, mock_api):\n mock_client = mock_api.muranoclient(mock.Mock())\n mock_client.categories.list.return_value = [\n 'foo_cat', 'bar_cat'\n ]\n self.categories_view.request.GET.get.return_value = 'foo_marker'\n\n result = self.categories_view.get_data()\n\n expected_categories = ['bar_cat', 'foo_cat']\n expected_kwargs = {\n 'filters': {},\n 'marker': 'foo_marker',\n 'sort_dir': 'asc',\n 'limit': 3\n }\n\n self.assertEqual(expected_categories, result)\n self.assertTrue(self.categories_view.has_more_data(None))\n self.assertFalse(self.categories_view.has_prev_data(None))\n self.categories_view.request.GET.get.assert_called_once_with(\n tables.CategoriesTable._meta.prev_pagination_param, None)\n mock_client.categories.list.assert_called_once_with(\n **expected_kwargs)", "def test_0005_create_categories(self):\n self.create_category(name='Test 0060 Workflow Features', description='Test 0060 - Workflow Features')", "def test_category_manip_pipeline(self):\n raise NotImplementedError(\"\")" ]
[ "0.82036674", "0.82036674", "0.8047375", "0.7999085", "0.7909183", "0.78592485", "0.7836887", "0.78091455", "0.7775471", "0.7743397", "0.7734319", "0.77296174", "0.762621", "0.75798565", "0.75593615", "0.7543341", "0.75156116", "0.7433685", "0.7256726", "0.7244411", "0.71488905", "0.7098355", "0.7090626", "0.7056062", "0.7035163", "0.7008647", "0.6963793", "0.6937108", "0.6892537", "0.6885462" ]
0.91772044
0
Test case for get_cloud
def test_get_cloud(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_clouds(self):\n pass", "def test_get_cloud_resources(self):\n pass", "def test_get_team_owned_clouds(self):\n pass", "def test_update_cloud(self):\n pass", "def test_delete_cloud(self):\n pass", "def got_info(self, cloud_obj):", "def test_register_cloud(self):\n pass", "def test_filter_cloud(self):\n\n self.assertIsPublishing('/camera/depth_registered/points', PointCloud2)\n self.assertGreater(len(self.lastMsg().data), 1000)\n\n service_available = True\n try:\n rospy.wait_for_service('/cloud_services/filter_cloud', 10.0)\n except rospy.ROSException:\n service_available = False\n\n self.assertTrue(service_available, \"Server not found\")\n\n filter_cloud = rospy.ServiceProxy('/cloud_services/filter_cloud',\n PclFilter)\n resp = None\n downsampling_grid_size = 0.01\n\n try:\n resp = filter_cloud(self.lastMsg(), downsampling_grid_size)\n except rospy.ServiceException, e:\n print \"Service call failed: %s\" % e\n\n # Check that the frame_id is still the same\n self.assertEqual(resp.point_cloud.header.frame_id,\n self.lastMsg().header.frame_id)\n # Check that the number of points has been reduced\n self.assertGreater(len(self.lastMsg().data),\n len(resp.point_cloud.data))", "def cloud_information(self):\n url = \"%s/state/teams/%s/cloud\" % (self.url, self.identifier, )\n return perform_request(url)", "def test_cloud_api(tmp_path, is_cloud):\n config = mk_tmp_file(\n tmp_path, key_to_update=\"auth.is_cloud\", value_to_update=is_cloud\n )\n result = runner.invoke(app, [\"--config\", str(config), \"validate\"])\n assert result.exit_code == 0\n if is_cloud:\n assert state.confluence_instance.api_version == \"cloud\"\n else:\n assert state.confluence_instance.api_version == \"latest\"", "def get_cloud_detail(sky):\n debug(\"Getting cloud details\")\n clouds = cloud_map(sky)\n debug(\"There are {} clouds listed in the Metar\".format(len(clouds)))\n thickest = thickest_clouds(clouds)\n debug(\"Found thickest clouds: thick: {} -- base {}\".format(thickest[0], thickest[1]))\n return {\n \"thickness\": thickest[0],\n \"base\": thickest[1]\n }", "def test_cloud_api():\n mock = provider.MockProvider()\n\n mock.setup_cloud('empty config....')\n\n assert mock.get_ext_ip_addr('some-node')", "def test_get_fabric_v_sphere_datastore(self):\n pass", "def test_aws_service_api_flavors_get(self):\n pass", "def test_get_entity(self):\n\n storage = StringIO.StringIO()\n c = pycurl.Curl()\n c.setopt(c.URL,\"http://127.0.0.1:8090/compute/9930\")\n c.setopt(c.HTTPHEADER, ['Accept:application/occi+json'])\n c.setopt(c.VERBOSE, True)\n c.setopt(c.CUSTOMREQUEST, 'GET')\n c.setopt(c.WRITEFUNCTION, storage.write)\n c.perform()\n content = storage.getvalue()\n print \" ===== Body content =====\\n \" + content + \" ==========\\n\"", "def test_aws_service_api_flavor_get(self):\n pass", "def init_cloud_api(self, args=None):\n pass", "def getCloud(self, mjd, config):\n w= 'cloud'\n cloud = self._checkWeather(mjd, w, config)\n return cloud", "def setup_clouds(auth=None):\n get_operator_cloud(auth)\n get_openstack_cloud(auth)", "def setup_clouds(auth=None):\n get_operator_cloud(auth)\n get_openstack_cloud(auth)", "def get_cloud(self):\n self.has_cloud = False\n while not self.has_cloud:\n rospy.sleep(0.01)\n\n # cloud_time = self.active_cloud_msg.header.stamp\n # cloud_frame = self.active_cloud_msg.header.frame_id\n cloud = np.array(list(point_cloud2.read_points(self.active_cloud_msg)))[:, 0:3]\n mask = np.logical_not(np.isnan(cloud).any(axis=1))\n cloud = cloud[mask]\n\n print 'received cloud with {} points.'.format(cloud.shape[0])\n return cloud", "def getting_info(self, cloud_path):\n\t\telog(\"getting info on {}\".format(cloud_path))", "def test_gettem_using_get(self):\n pass", "def test_get(self):\n pass", "def test_demo_service_call(self):\n project_name = 'romainducarrouge-31f2'\n resp = DemoAivenStorage(os.environ[\"AIVEN_API_URL\"],\n os.environ[\"AIVEN_TOKEN\"]).get_services(project_name)\n assert isinstance(resp, list)\n assert len(resp) == 1\n assert 'pg-216f7124' in resp", "def tag_cloud():\n\n return LOAD('plugin_wiki','cloud')", "def test_cloud_service(self):\n cur = self.factory.create(access_token=ACCESS_TOKEN, expires_at=self.current_dt)\n with HTTMock(spark_cloud_mock):\n cloud = CloudCredentials.objects.cloud_service()\n self.assertEqual(cloud.access_token, ACCESS_TOKEN)\n cur.delete()", "def test_transform_cloud(self):\n\n self.assertIsPublishing('/camera/depth_registered/points', PointCloud2)\n self.assertGreater(len(self.lastMsg().data), 1000)\n\n service_available = True\n try:\n rospy.wait_for_service('/cloud_services/transform_cloud', 10.0)\n except rospy.ROSException:\n service_available = False\n\n self.assertTrue(service_available, \"Server not found\")\n\n transform_cloud = rospy.ServiceProxy('/cloud_services/transform_cloud',\n PclTransform)\n resp = None\n # Set the timestamp to now for the transform lookup to work\n self.lastMsg().header.stamp = rospy.Time.now()\n try:\n resp = transform_cloud(self.lastMsg(), \"world\")\n except rospy.ServiceException, e:\n print \"Service call failed: %s\" % e\n\n self.assertEqual(resp.point_cloud.header.frame_id, 'world')\n self.assertEqual(len(resp.point_cloud.data), len(self.lastMsg().data))", "def test_aws_service_api_vms_get(self):\n pass", "def test_list_virt_realms_in_cloud(self):\n pass" ]
[ "0.82805216", "0.80275285", "0.72777873", "0.69897956", "0.6691594", "0.6567123", "0.65205866", "0.65130216", "0.65112543", "0.65056866", "0.6406681", "0.63656723", "0.63179857", "0.6277758", "0.6240213", "0.62317336", "0.62090814", "0.61762565", "0.6168871", "0.6168871", "0.6151261", "0.6077316", "0.6048816", "0.6041682", "0.6039732", "0.6013936", "0.6010428", "0.6009892", "0.59789324", "0.5976267" ]
0.93392926
0
Test case for get_cloud_resources
def test_get_cloud_resources(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_cloud(self):\n pass", "def test_get_api_resources(self):\n pass", "def test_get_deployment_resources(self):\n pass", "def test_get_resource(self):\n for i in range(11):\n self.app.post(f'/v1/resource/{ResourceTypeName.get()}', data=json.dumps({'actions': ['tr:action1']}),\n headers=admin_headers)\n self._test_paging('/v1/resources', admin_headers, 10, 'resources')", "def init_cloud_virtual_resources():\n test_cldvirt_resources = []\n\n # add info to list in memory, one by one, following signature values\n cldvirtres_ID = 1\n cldvirtres_name = \"nova-compute-1\"\n cldvirtres_info = \"nova VM in Arm pod\"\n cldvirtres_IPAddress = \"50.60.70.80\"\n cldvirtres_URL = \"http://50.60.70.80:8080\"\n cldvirtres_related_phys_rsrcIDs = [1,3]\n\n test_cldvirt_resources.append(CloudVirtualResource(cldvirtres_ID, cldvirtres_name,\n cldvirtres_info,\n cldvirtres_IPAddress,\n cldvirtres_URL,\n cldvirtres_related_phys_rsrcIDs))\n\n cldvirtres_ID = 2\n cldvirtres_name = \"nova-compute-2\"\n cldvirtres_info = \"nova VM in LaaS\"\n cldvirtres_IPAddress = \"50.60.70.80\"\n cldvirtres_URL = \"http://50.60.70.80:8080\"\n cldvirtres_related_phys_rsrcIDs = [2,3]\n\n test_cldvirt_resources.append(CloudVirtualResource(cldvirtres_ID, cldvirtres_name,\n cldvirtres_info,\n cldvirtres_IPAddress,\n cldvirtres_URL,\n cldvirtres_related_phys_rsrcIDs))\n\n cldvirtres_ID = 3\n cldvirtres_name = \"nova-compute-3\"\n cldvirtres_info = \"nova VM in x86 pod\"\n cldvirtres_IPAddress = \"50.60.70.80\"\n cldvirtres_URL = \"http://50.60.70.80:8080\"\n cldvirtres_related_phys_rsrcIDs = [1]\n\n test_cldvirt_resources.append(CloudVirtualResource(cldvirtres_ID, cldvirtres_name,\n cldvirtres_info,\n cldvirtres_IPAddress,\n cldvirtres_URL,\n cldvirtres_related_phys_rsrcIDs))\n\n\n # write list to binary file\n write_list_bin(test_cldvirt_resources, FILE_CLOUD_RESOURCES)\n\n return test_cldvirt_resources", "def test_get_clouds(self):\n pass", "def test_successful_resources(self):\n\n url = '/%s/jobs/%i/' % (self.api, self.job.id)\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n\n self.assertEqual(result['resources']['resources']['cpus'], 1.0)\n self.assertEqual(result['resources']['resources']['mem'], 128.0)\n self.assertEqual(result['resources']['resources']['disk'], 10.0)\n\n url = '/%s/jobs/%i/' % (self.api, self.job2.id)\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n\n self.assertEqual(result['resources']['resources']['cpus'], 1.0)\n self.assertEqual(result['resources']['resources']['mem'], 1024.0)\n self.assertEqual(result['resources']['resources']['disk'], 1040.0)", "def GetResourceSample():\n client = CreateClient()\n for e1 in client.GetResources(limit=5).entry:\n e2 = client.GetResource(e1)\n print 'Refetched: ', e2.title.text, e2.resource_id.text", "def test_get_deployment_resource_data(self):\n pass", "def get_resources(resource_client) -> list:\n resource_list = []\n paginator = resource_client.get_paginator(BOTO3_LIST_FUNCTION)\n pages = paginator.paginate()\n for page in pages:\n # Your going to have to look through the response and append the correct value to the list\n resource = page[\"something\"]\n resource_list = resource_list + resource\n return resource_list", "def test_list_cluster_resource_quota(self):\n pass", "def test_get_resource_group_list(self):\n pass", "def GetAllResourcesSample():\n client = CreateClient()\n # Unlike client.GetResources, this returns a list of resources\n for resource in client.GetAllResources():\n PrintResource(resource)", "def test_get_deployment_resource(self):\n pass", "def test_get_virtualization_realm_resources(self):\n pass", "def test_get_resource_license_resource_count_list(self):\n pass", "def test_load_response_descriptor_projects_release_release_resource_spaces(self):\n pass", "def test_resource_service(self, integrationtest, k8sconfig):\n # Fixtures.\n k8sconfig = self.k8sconfig(integrationtest, k8sconfig)\n err_resp = (K8sResource(\"\", \"\", \"\", False, \"\"), True)\n\n # Tuples of API version that we ask for (if any), and what the final\n # K8sResource element will contain.\n api_versions = [\n # We expect to get the version we asked for.\n (\"v1\", \"v1\"),\n\n # Function must automatically determine the latest version of the resource.\n (\"\", \"v1\"),\n ]\n\n for src, expected in api_versions:\n # A particular Service in a particular namespace.\n res, err = k8s.resource(k8sconfig, MetaManifest(src, \"Service\", \"ns\", \"name\"))\n assert not err\n assert res == K8sResource(\n apiVersion=expected, kind=\"Service\", name=\"services\", namespaced=True,\n url=f\"{k8sconfig.url}/api/v1/namespaces/ns/services/name\",\n )\n\n # All Services in all namespaces.\n res, err = k8s.resource(k8sconfig, MetaManifest(src, \"Service\", None, None))\n assert not err\n assert res == K8sResource(\n apiVersion=expected, kind=\"Service\", name=\"services\", namespaced=True,\n url=f\"{k8sconfig.url}/api/v1/services\",\n )\n\n # All Services in a particular namespace.\n res, err = k8s.resource(k8sconfig, MetaManifest(src, \"Service\", \"ns\", \"\"))\n assert not err\n assert res == K8sResource(\n apiVersion=expected, kind=\"Service\", name=\"services\", namespaced=True,\n url=f\"{k8sconfig.url}/api/v1/namespaces/ns/services\",\n )\n\n # A particular Service in all namespaces -> Invalid.\n MM = MetaManifest\n assert k8s.resource(k8sconfig, MM(src, \"Service\", None, \"name\")) == err_resp", "def get_TestServiceDirectResources(test_case, # type: AnyMagpieTestCaseType\n ignore_missing_service=False, # type: bool\n override_service_name=null, # type: Optional[Str]\n override_headers=null, # type: Optional[HeadersType]\n override_cookies=null, # type: Optional[CookiesType]\n ): # type: (...) -> List[JSON]\n app_or_url = get_app_or_url(test_case)\n svc_name = override_service_name if override_service_name is not null else test_case.test_service_name\n path = \"/services/{svc}/resources\".format(svc=svc_name)\n resp = test_request(app_or_url, \"GET\", path,\n headers=override_headers if override_headers is not null else test_case.json_headers,\n cookies=override_cookies if override_cookies is not null else test_case.cookies,\n expect_errors=ignore_missing_service)\n if ignore_missing_service and resp.status_code == 404:\n return []\n json_body = get_json_body(resp)\n resources = json_body[svc_name][\"resources\"]\n return [resources[res] for res in resources]", "def test_read_cluster_resource_quota(self):\n pass", "def get_resources(name: Optional[str] = None,\n required_tags: Optional[Mapping[str, str]] = None,\n resource_group_name: Optional[str] = None,\n type: Optional[str] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetResourcesResult:\n __args__ = dict()\n __args__['name'] = name\n __args__['requiredTags'] = required_tags\n __args__['resourceGroupName'] = resource_group_name\n __args__['type'] = type\n opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)\n __ret__ = pulumi.runtime.invoke('azure:core/getResources:getResources', __args__, opts=opts, typ=GetResourcesResult).value\n\n return AwaitableGetResourcesResult(\n id=pulumi.get(__ret__, 'id'),\n name=pulumi.get(__ret__, 'name'),\n required_tags=pulumi.get(__ret__, 'required_tags'),\n resource_group_name=pulumi.get(__ret__, 'resource_group_name'),\n resources=pulumi.get(__ret__, 'resources'),\n type=pulumi.get(__ret__, 'type'))", "def test_get_translation_resources(self):\n pass", "def get_overcloud_resource(resource_type=None,\n resource_state=None):\n pcs_df = get_pcs_resources_table()\n\n if resource_type and resource_state:\n pcs_df_query_resource_type_state = pcs_df.query(\n 'resource_type==\"{}\" and resource_state==\"{}\"'.format(\n resource_type, resource_state))\n return pcs_df_query_resource_type_state[\n 'resource'].unique().tolist()\n\n if resource_type and not resource_state:\n # pylint: disable=no-member\n pcs_df_query_resource_type = pcs_df.query(\n 'resource_type==\"{}\"'.format(resource_type))\n return pcs_df_query_resource_type['resource'].unique().tolist()", "def mock_resource_collection_containing(\n resources: Iterable[message.Message],\n) -> mock.MagicMock:\n mock_collection = mock.MagicMock(spec=fhir_package.ResourceCollection)\n resources = {\n cast(Any, resource).url.value: resource for resource in resources\n }\n\n def mock_get(uri: str) -> message.Message:\n return resources.get(uri)\n\n mock_collection.get.side_effect = mock_get\n\n return mock_collection", "def test_get_resourcetypes_extract(self):\n self.assertEqual(\"nodes\", util.get_resourcetypes(\"/resource/nodes\"))\n self.assertEqual(\"dests\", util.get_resourcetypes(\"http://localhost:8080/resource/dests/\"))\n self.assertEqual(\"flows\", util.get_resourcetypes(\"/resource/flows/ABCDEF\"))\n self.assertEqual(\"nodes\", util.get_resourcetypes(\"http://localhost:8080/resource/nodes/ABCDEF/\"))", "def _get_resource(self, *args, **kwargs):\r\n r = []\r\n if kwargs['resource'] in self.resources:\r\n r = self.get_list(\r\n api_endpoint=settings.GITHUB_SETTINGS['GITHUB_USER_REPO_API'].format(**kwargs), **kwargs\r\n )\r\n else:\r\n ValueError(\"{resource} - Resource Not Supported\".format(**kwargs)) \r\n return r", "def GetResourcesSample():\n client = CreateClient()\n # Get a feed and print it\n feed = client.GetResources()\n PrintFeed(feed)", "def test_aws_service_api_flavors_get(self):\n pass", "def resources():\n check_resources()", "def test_read_cluster_resource_quota_status(self):\n pass" ]
[ "0.70932215", "0.70018494", "0.68724173", "0.68520796", "0.66928756", "0.6503559", "0.64927614", "0.6459083", "0.63399476", "0.6281268", "0.62456095", "0.6223968", "0.62105274", "0.6195349", "0.61847264", "0.6171011", "0.6135712", "0.60823786", "0.6054148", "0.60434777", "0.6001516", "0.5990652", "0.5975575", "0.59503126", "0.59299785", "0.5920539", "0.58813244", "0.58769715", "0.5851952", "0.5845177" ]
0.9290637
0
Test case for get_clouds
def test_get_clouds(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_cloud(self):\n pass", "def test_get_team_owned_clouds(self):\n pass", "def test_get_cloud_resources(self):\n pass", "def setup_clouds(auth=None):\n get_operator_cloud(auth)\n get_openstack_cloud(auth)", "def setup_clouds(auth=None):\n get_operator_cloud(auth)\n get_openstack_cloud(auth)", "def get_cloud_detail(sky):\n debug(\"Getting cloud details\")\n clouds = cloud_map(sky)\n debug(\"There are {} clouds listed in the Metar\".format(len(clouds)))\n thickest = thickest_clouds(clouds)\n debug(\"Found thickest clouds: thick: {} -- base {}\".format(thickest[0], thickest[1]))\n return {\n \"thickness\": thickest[0],\n \"base\": thickest[1]\n }", "def recognizing_clouds(cat):\n print(\"length in helpers\", len(cat))\n # finding clouds in catalogs\n base_table = pd.read_excel(r'/Users/shlomo/Desktop/Thesis/pythonProject/Combined plots/Other '\n r'catalogs/Table_Zucker.xlsx')\n\n names_col = []\n for cloud_number in range(len(base_table)):\n cloud_name = base_table[\"cloud\"][cloud_number]\n # getting locations from catalog\n locations_xyz = [base_table[\"x_pc\"][cloud_number], base_table[\"y_pc\"][cloud_number],\n base_table[\"z_pc\"][cloud_number]]\n # locations_lb = [base_table['l'][cloud_number], base_table['b'][cloud_number]]\n result = find_apt_line(locations_xyz, cat)[0]\n names_col.append([result, cloud_name])\n print([result, cloud_name])\n\n add_names(cat, names_col)\n return cat", "def test_update_cloud(self):\n pass", "def test_aws_service_api_flavors_get(self):\n pass", "def test_aws_service_api_flavor_get(self):\n pass", "def test_filter_cloud(self):\n\n self.assertIsPublishing('/camera/depth_registered/points', PointCloud2)\n self.assertGreater(len(self.lastMsg().data), 1000)\n\n service_available = True\n try:\n rospy.wait_for_service('/cloud_services/filter_cloud', 10.0)\n except rospy.ROSException:\n service_available = False\n\n self.assertTrue(service_available, \"Server not found\")\n\n filter_cloud = rospy.ServiceProxy('/cloud_services/filter_cloud',\n PclFilter)\n resp = None\n downsampling_grid_size = 0.01\n\n try:\n resp = filter_cloud(self.lastMsg(), downsampling_grid_size)\n except rospy.ServiceException, e:\n print \"Service call failed: %s\" % e\n\n # Check that the frame_id is still the same\n self.assertEqual(resp.point_cloud.header.frame_id,\n self.lastMsg().header.frame_id)\n # Check that the number of points has been reduced\n self.assertGreater(len(self.lastMsg().data),\n len(resp.point_cloud.data))", "def test_delete_cloud(self):\n pass", "def get_clouds():\n clouds = [ x.get('cloud') for x in Schedconfig.objects.values('cloud').distinct() ]\n locale.setlocale(locale.LC_ALL, '')\n clouds = sorted(clouds, key=locale.strxfrm)\n return clouds", "def getCloud(self, mjd, config):\n w= 'cloud'\n cloud = self._checkWeather(mjd, w, config)\n return cloud", "def cloud_information(self):\n url = \"%s/state/teams/%s/cloud\" % (self.url, self.identifier, )\n return perform_request(url)", "def test_get_fabric_v_sphere_datastore(self):\n pass", "def test_aws_service_api_regions_get(self):\n pass", "def test_register_cloud(self):\n pass", "def test_volumes_get(self):\n pass", "def test_aws_service_api_volumes_get(self):\n pass", "def get_cloud(self):\n self.has_cloud = False\n while not self.has_cloud:\n rospy.sleep(0.01)\n\n # cloud_time = self.active_cloud_msg.header.stamp\n # cloud_frame = self.active_cloud_msg.header.frame_id\n cloud = np.array(list(point_cloud2.read_points(self.active_cloud_msg)))[:, 0:3]\n mask = np.logical_not(np.isnan(cloud).any(axis=1))\n cloud = cloud[mask]\n\n print 'received cloud with {} points.'.format(cloud.shape[0])\n return cloud", "def got_info(self, cloud_obj):", "def geocube():", "def test_get_fabric_v_sphere_datastores(self):\n pass", "def init_cloud_api(self, args=None):\n pass", "def find_cloud_layers(example_dict, min_path_kg_m02, for_ice=False):\n\n error_checking.assert_is_greater(min_path_kg_m02, 0.)\n error_checking.assert_is_boolean(for_ice)\n\n if for_ice:\n path_matrix_kg_m02 = get_field_from_dict(\n example_dict=example_dict, field_name=UPWARD_ICE_WATER_PATH_NAME\n )\n else:\n path_matrix_kg_m02 = get_field_from_dict(\n example_dict=example_dict, field_name=UPWARD_LIQUID_WATER_PATH_NAME\n )\n\n path_diff_matrix_kg_m02 = numpy.diff(path_matrix_kg_m02, axis=1, prepend=0.)\n\n num_examples = path_matrix_kg_m02.shape[0]\n cloud_mask_matrix = numpy.full(path_matrix_kg_m02.shape, False, dtype=bool)\n cloud_layer_counts = numpy.full(num_examples, 0, dtype=int)\n\n for i in range(num_examples):\n these_diffs = path_diff_matrix_kg_m02[i, :] + 0.\n\n if for_ice:\n these_diffs[these_diffs <= 1e9] = 0\n else:\n these_diffs[these_diffs <= 1e6] = 0\n\n these_start_indices, these_end_indices = _find_nonzero_runs(\n path_diff_matrix_kg_m02[i, :]\n )\n\n this_num_layers = len(these_start_indices)\n\n for j in range(this_num_layers):\n this_path_kg_m02 = numpy.sum(\n path_diff_matrix_kg_m02[\n i, these_start_indices[j]:(these_end_indices[j] + 1)\n ]\n )\n\n if this_path_kg_m02 < min_path_kg_m02:\n continue\n\n cloud_layer_counts[i] += 1\n cloud_mask_matrix[\n i, these_start_indices[j]:(these_end_indices[j] + 1)\n ] = True\n\n return cloud_mask_matrix, cloud_layer_counts", "def test_cloud_voxelation():\n\n # Setup\n cloud = tg.cloud_from_csv(os.path.join(sample_data_path, \"rdam_cloud.csv\"))\n\n expected_lattice = tg.lattice_from_csv(\n os.path.join(sample_data_path, \"rdam_lattice.csv\"))\n\n # Exercise\n computed_lattice = cloud.voxelate(1, closed=True)\n\n # Verify\n np.testing.assert_allclose(\n expected_lattice, computed_lattice, rtol=1e-6, atol=0)\n\n # Cleanup", "def test_aws_service_api_image_get(self):\n pass", "def test_aws_service_api_vms_get(self):\n pass", "def draw_clouds(a):\n small_cloud(0 + a, 0, 0)\n small_cloud(200 + a, -150, -10)\n big_cloud(350 + a, 0, -3)\n small_cloud(600 + a, -90, 3)\n small_cloud(800 + a, 0, 0)\n small_cloud(1000 + a, -150, -10)\n big_cloud(1150 + a, 0, -3)\n small_cloud(1400 + a, -90, 3)\n small_cloud(-800 + a, 0, 0)\n small_cloud(-600 + a, -150, -10)\n big_cloud(-450 + a, 0, -3)\n small_cloud(-200 + a, -90, 3)" ]
[ "0.82829165", "0.8106245", "0.73407876", "0.67277664", "0.67277664", "0.66463965", "0.63641065", "0.63612753", "0.6280552", "0.61628413", "0.60973257", "0.6055564", "0.6041021", "0.60140383", "0.5999089", "0.59575593", "0.5887805", "0.58812153", "0.5866492", "0.5793975", "0.5736535", "0.56989276", "0.56680727", "0.56594515", "0.5656892", "0.557796", "0.5577794", "0.55622816", "0.55553913", "0.5528678" ]
0.9352186
0
Test case for get_composition
def test_get_composition(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_composition(self):", "def test_list_compositions(self):\n pass", "def test_update_composition(self):\n pass", "def test_launch_composition(self):\n pass", "def test_list_composition_status(self):\n pass", "def test_publish_scenario_to_composition(self):\n pass", "def test_delete_composition(self):\n pass", "def test_composition_adds_to_100_percent(self):", "def parse_composition(self, filename, data):\n # Prepare the composition bean\n composition = beans.RawComposition(data['name'], filename)\n\n # Parse the root composite\n root = self.parse_composite(None, data['root'])\n if root is None:\n # Not a valid root composite\n return None\n\n composition.root = root\n return composition", "def get_decomposition(self):\n raise NotImplementedError('this should be implemented by a subclass')", "def composition(self):\n return self._composition.copy()", "def _can_for_composition(self, func_name, composition_id):\n return self._can_for_object(func_name, composition_id, 'get_repository_ids_for_composition')", "def fxt_default_composition(base_composition: conf_types.Composition):\n return base_composition", "def test_composing_workflow(self):\n\n test_data_dir = os.path.join(os.path.dirname(__file__), 'testdata')\n sys.path.append(test_data_dir)\n import compose\n tmpdir = tempfile.mkdtemp()\n try:\n # First make sure the simple pipeline can be compiled.\n simple_package_path = os.path.join(tmpdir, 'simple.tar.gz')\n compiler.Compiler().compile(compose.save_most_frequent_word, simple_package_path)\n\n # Then make sure the composed pipeline can be compiled and also compare with golden.\n compose_package_path = os.path.join(tmpdir, 'compose.tar.gz')\n compiler.Compiler().compile(compose.download_save_most_frequent_word, compose_package_path)\n with open(os.path.join(test_data_dir, 'compose.yaml'), 'r') as f:\n golden = yaml.load(f)\n compiled = self._get_yaml_from_tar(compose_package_path)\n\n self.maxDiff = None\n # Comment next line for generating golden yaml.\n self.assertEqual(golden, compiled)\n finally:\n # Replace next line with commented line for gathering golden yaml.\n shutil.rmtree(tmpdir)\n # print(tmpdir)", "def test_call(self):\r\n self.assertEqual(self.cs(), {})", "def test_components(self):\n\n good_cpts = self.good.components.components.copy()\n\n for cid, cpt in self.actual.components.components.items():\n goodcpt = good_cpts.pop(cid)\n\n self.assertEqual(cpt.name, goodcpt.name)\n self.assertEqual(cpt.attributes, goodcpt.attributes)\n self.assertEqual(len(cpt.symbols), 1)\n self.assertEqual(len(cpt.symbols[0].bodies), 1)\n\n body = cpt.symbols[0].bodies[0]\n goodbody = goodcpt.symbols[0].bodies[0]\n\n self.assertEqual(len(body.shapes), len(goodbody.shapes))\n for shape, goodshape in zip(body.shapes, goodbody.shapes):\n self.assertEqual(shape.__class__, goodshape.__class__)\n self.assertEqual(shape.json(), goodshape.json())\n\n self.assertEqual(len(body.pins), len(goodbody.pins))\n for pin, goodpin in zip(body.pins, goodbody.pins):\n self.assertEqual(pin.__class__, goodpin.__class__)\n self.assertEqual(pin.json(), goodpin.json())\n\n self.assertEqual(good_cpts, {})", "def test_compartments(self):\n element_factory = self.element_factory\n diagram = element_factory.create(UML.Diagram)\n klass = diagram.create(ClassItem, subject=element_factory.create(UML.Class))\n\n assert 2 == len(klass._compartments)\n assert 0 == len(klass._compartments[0])\n assert 0 == len(klass._compartments[1])\n assert (10, 10) == klass._compartments[0].get_size()\n\n diagram.canvas.update()\n\n assert (10, 10) == klass._compartments[0].get_size()\n assert 50 == float(klass.min_height) # min_height\n assert 100 == float(klass.min_width)\n\n attr = element_factory.create(UML.Property)\n attr.name = 4 * \"x\" # about 44 pixels\n klass.subject.ownedAttribute = attr\n\n diagram.canvas.update()\n assert 1 == len(klass._compartments[0])\n assert klass._compartments[0].get_size() > (44.0, 20.0)\n\n oper = element_factory.create(UML.Operation)\n oper.name = 4 * \"x\" # about 44 pixels\n klass.subject.ownedOperation = oper\n\n oper = element_factory.create(UML.Operation)\n oper.name = 6 * \"x\" # about 66 pixels\n klass.subject.ownedOperation = oper\n\n diagram.canvas.update()\n assert 2 == len(klass._compartments[1])\n assert klass._compartments[1].get_size() > (63.0, 34.0)", "def load_composition(args: argparse.Namespace) -> mzcompose.Composition:\n repo = mzbuild.Repository.from_arguments(ROOT, args)\n try:\n return mzcompose.Composition(\n repo,\n name=args.find or Path.cwd().name,\n preserve_ports=args.preserve_ports,\n project_name=args.project_name,\n )\n except mzcompose.UnknownCompositionError as e:\n if args.find:\n hint = \"available compositions:\\n\"\n for name in repo.compositions:\n hint += f\" {name}\\n\"\n e.set_hint(hint)\n raise e\n else:\n hint = \"enter one of the following directories and run ./mzcompose:\\n\"\n for path in repo.compositions.values():\n hint += f\" {path.relative_to(Path.cwd())}\\n\"\n raise UIError(\n \"directory does not contain mzcompose.py\",\n hint,\n )", "def test_composition(self):\n\n i = Code()\n j = Code()\n k = Code()\n l = Code()\n\n c = j + i ** i // 5 / l < j - k\n self.assertEqual(str(c), 'j + i ** i // 5 / l < j - k')", "def test_get_component_descriptors_by_type_using_get(self):\n pass", "def test_comp_surface(self, test_dict):\n test_obj = test_dict[\"test_obj\"]\n result = test_obj.slot.comp_surface()\n\n a = result\n b = test_dict[\"S_exp\"]\n msg = \"Return \" + str(a) + \" expected \" + str(b)\n self.assertAlmostEqual((a - b) / a, 0, delta=DELTA, msg=msg)\n\n b = comp_surface(test_obj.slot)\n msg = \"Return \" + str(a) + \" expected \" + str(b)\n self.assertAlmostEqual((a - b) / a, 0, delta=DELTA, msg=msg)", "def test_get_container(self):\n pass", "def test_first_level_composition(self):\n oe = expression.OperationalExpression\n v1, v2 = map(expression.Variable, [\"v1\", \"v2\"])\n data = [\n [(v1 + v2), oe('+', v1, v2)],\n [(v1 - v2), oe('-', v1, v2)],\n [(v1 / v2), oe('/', v1, v2)],\n [(v1 * v2), oe('*', v1, v2)],\n ]\n yield from self.generate_from_data(data)", "def test_elemental_composition_constraint():\n name = \"Property Band gap\"\n minimum = 1\n maximum = 2\n elements = [\"Ga\", \"N\"]\n\n c = ElementalCompositionConstraint(name=name,\n elements=elements,\n minimum=minimum,\n maximum=maximum)\n\n mapped_c = c.to_dict()\n\n assert mapped_c[\"type\"] is \"elementalCompositionConstraint\"\n assert mapped_c[\"name\"] is name\n assert mapped_c[\"options\"][\"min\"] is minimum\n assert mapped_c[\"options\"][\"max\"] is maximum\n assert mapped_c[\"options\"][\"elements\"] is elements", "def test_get_collection(self):\n pass", "def test_get_component_descriptors_by_types_using_get(self):\n pass", "def test_get_parts(self):\n pass", "def test_get_collections(self):\n pass", "def get_composition_search(self):\n # Implemented from azosid template for -\n # osid.resource.ResourceSearchSession.get_resource_search_template\n if not self._can('search'):\n raise PermissionDenied()\n return self._provider_session.get_composition_search()", "def GetCompositionMode(*args, **kwargs):\n return _gdi_.GraphicsContext_GetCompositionMode(*args, **kwargs)" ]
[ "0.8119706", "0.7530906", "0.74301285", "0.73741776", "0.67462796", "0.6667547", "0.63649756", "0.61619407", "0.59243745", "0.5873996", "0.5798331", "0.5757041", "0.5642884", "0.5611704", "0.5524565", "0.55125636", "0.5489186", "0.547148", "0.54595554", "0.53848314", "0.53768426", "0.53539443", "0.5347165", "0.5280678", "0.52781665", "0.5277861", "0.5238585", "0.52169967", "0.5183853", "0.51831186" ]
0.93647003
0
Test case for get_cons3rt_version
def test_get_cons3rt_version(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_version(self):\n pass", "def test_get_ucs_version(self):\n ver = self.u.get_ucs_version()\n self.assertTrue(isinstance(ver, basestring))\n self.assertEqual('3.0-1', ver)", "def _get_version(self):", "def test_version(self):\n version_instance = get_version('kolibri', __file__)\n self.assertIn(version_instance.major_version, kolibri.__version__)", "def test_get_version():\n result = uflash.get_version()\n assert result == '.'.join([str(i) for i in uflash._VERSION])", "def getversion(): # 3\n res,resargs = _msk.Env.getversion()\n if res != 0:\n raise Error(rescode(res),\"\")\n _major_return_value,_minor_return_value,_build_return_value,_revision_return_value = resargs\n return _major_return_value,_minor_return_value,_build_return_value,_revision_return_value", "def test__get_program_version():\n version = util._get_program_version(\"midgard\")\n assert isinstance(version, str) and re.search(\"[0-9]\", version)", "def test_get_short_version(self):\n pass", "def get_version():\n return 1", "def getversion():\r\n\r\n global VERSION\r\n\r\n if len(VERSION) == 3:\r\n return '{}.{}.{}'.format(VERSION[0], VERSION[1], VERSION[2])\r\n else:\r\n return '{}.{}.{}-{}'.format(VERSION[0], VERSION[1], VERSION[2], VERSION[3])", "def test_get_next_version(self):\n ver = self.u.get_next_version(version=U.UCS_Version((MAJOR, MINOR, PATCH)))\n self.assertEqual(None, ver)", "def test_version():\n versions = ((2, 7, 16), (3, 5, 7), (3, 6, 8), (3, 7, 3))\n assert sys.version_info[:3] in versions", "def test_get_oapi_version(self):\n pass", "def py_versiontest(c):\n pass", "def test_low_client(self):\n version, file = self.get('', '3000000001100',\n self.app, self.platform)\n assert version == self.version_1_0_2", "def test_version(self):\n pass", "def test_request_estable_version(self):\n current_stable_version = get_stable_version()\n self.assertIsNotNone(current_stable_version)", "def test_smc_version_value(self):\n \n smc_version = get_smc_version()\n \n # Check to make sure the smc_version is '2.15f7'\n self.assertEqual(smc_version, '2.15f7')", "def testPython3(self):\n resource = Resource.get()\n resource.load(self.__taskPath)\n crawler = FsCrawler.createFromPath(self.__sourcePath)\n dummyTask = Task.create('pythonMajorVerTestTask')\n dummyTask.add(crawler)\n\n wrapper = TaskWrapper.create(\"python3\")\n result = wrapper.run(dummyTask)\n self.assertTrue(len(result), 1)\n self.assertEqual(result[0].var(\"majorVer\"), 3)", "def test1_version(self):\n lVersion = rdbhdb.__version__.split('.')\n nVersion = need_version.split('.')\n self.assert_(lVersion >= nVersion, rdbhdb.__version__)", "def test_version():\n with open(\"pyproject.toml\") as f:\n tomllines = f.read().splitlines()\n tomlversion = set([l for l in tomllines if \"version =\" in l])\n initversion = set([f'version = \"{mei2volpiano.__version__}\"'])\n # set is there to catch any duplicate/additional entries\n assert initversion == tomlversion", "def test_version_exists():\n assert ztm.__version__", "def _get_version(self):\n if _cbc_version is None:\n return _extract_version('')\n return _cbc_version", "def testStratisVersion(self):\n version = Manager.Properties.Version.Get(get_object(TOP_OBJECT))\n (major, _, _) = version.split(\".\")\n self.assertEqual(major, \"0\")", "def is_version_3_or_newer() -> bool:\n if is_apptainer_1_or_newer():\n return True # this is equivalent to singularity-ce > 3.9.5\n v = get_version()\n return v[0][0] >= 3", "def test_version_initialize(self):\n instance = ClassVersionInitialize()\n self.assertEqual(instance.version_straight(), \"1.1.1\")\n self.assertEqual(instance.version_default(), \"1.1.2\")\n self.assertEqual(instance.version_both(), \"1.1.3\")", "def get_version():\n return \"0.0.1 (prerelease prototype)\"", "def Version(self) -> _n_0_t_12:", "def Version(self) -> _n_0_t_12:", "def is_version_3_1_or_newer() -> bool:\n if is_apptainer_1_or_newer():\n return True # this is equivalent to singularity-ce > 3.9.5\n v = get_version()\n return v[0][0] >= 4 or (v[0][0] == 3 and v[0][1] >= 1)" ]
[ "0.68376285", "0.64761525", "0.6471795", "0.6415447", "0.6295368", "0.62716836", "0.62359685", "0.6196519", "0.61739135", "0.61196023", "0.6113143", "0.6069466", "0.6060671", "0.6046113", "0.60228825", "0.6013373", "0.5983554", "0.5961622", "0.59597844", "0.5947673", "0.59393126", "0.5913588", "0.5899278", "0.58648896", "0.58570653", "0.5846302", "0.5830922", "0.5825208", "0.5825208", "0.58249044" ]
0.95568246
0
Test case for get_container
def test_get_container(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_show_container(self):\n pass", "def test_create_container(self):\n pass", "def get_container(self, account, container):\n \n pass", "def test_containers(self):\n\n message = {\"method\": \"containers\"}\n response = yield self._get_response(message)\n\n self.assertIsInstance(response, dict)\n self.assertEqual(response[\"method\"], \"containers\")\n self.assertIsInstance(response[\"result\"], list)\n self.assertNotEqual(len(response[\"result\"]), 0)\n\n container_name = \"/\" + self.container_to_run\n\n containers = {i[0]: i[1] for i in response[\"result\"]}\n self.assertIn(container_name, containers.keys(),\n \"Container not found\")", "def get_container(self) -> CNT:\n raise NotImplementedError()", "def test_get_container_assets(self):\n pass", "def _get_container(self) -> Container:\n obj = self.get_container()\n return to_container(obj)", "def test_update_container(self):\n pass", "def test_rackspace_uploader_get_container(self, mock1):\r\n with patch('pybossa.uploader.rackspace.pyrax.cloudfiles') as mycf:\r\n cdn_enabled_mock = PropertyMock(return_value=False)\r\n type(fake_container).cdn_enabled = cdn_enabled_mock\r\n mycf.get_container.side_effect = NoSuchContainer\r\n\r\n calls = [call.get_container('user_3'),\r\n call.create_container('user_3'),\r\n call.make_container_public('user_3')\r\n ]\r\n u = RackspaceUploader()\r\n u.init_app(self.flask_app)\r\n assert u.get_container('user_3')\r\n mycf.assert_has_calls(calls, any_order=True)", "def test04_get_container_type(self):\n r = LDPRS()\n self.assertEqual(r.get_container_type(context=\"http://ex.org/aa\"), None)\n self.assertEqual(r.get_container_type(context=\"http://ex.org/aa\", default=LDP.BasicContainer), LDP.BasicContainer)\n r.parse(b'<http://ex.org/aa> <http://ex.org/b> \"1\".')\n self.assertEqual(r.get_container_type(context=\"http://ex.org/aa\"), None)\n r.parse(b'<http://ex.org/aa> <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <http://ex.org/some_type>.')\n self.assertEqual(r.get_container_type(context=\"http://ex.org/aa\"), None)\n r.parse(b'<http://ex.org/aa> <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <http://www.w3.org/ns/ldp#DirectContainer>.')\n self.assertEqual(r.get_container_type(context=\"http://ex.org/aa\"), LDP.DirectContainer)\n r.parse(b'<http://ex.org/aa> <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <http://www.w3.org/ns/ldp#IndirectContainer>.')\n self.assertRaises(Exception, r.get_container_type, context=\"http://ex.org/aa\")\n self.assertEqual(r.get_container_type(context=\"http://ex.org/NOT_aa\"), None)", "def test_is_container(self):\n # verify ----------------------\n try:\n 1 in self.collection\n except TypeError:\n msg = \"'Collection' object is not container\"\n self.fail(msg)", "def run(self, container_config: ContainerConfig) -> Container:", "def test_sample_container_init(self):\n self.assertEqual({}, self.container._data)", "def getContainer(self, nwbfile):\n raise NotImplementedError('Cannot run test unless getContainer is implemented')", "def test_container_no_assets(self):\n context = {}\n container_name = \"left\"\n html = container(context, container_name)\n self.assertIn(\"storybase-container-placeholder\", html)\n self.assertIn(container_name, html)", "def test_docker_api(proc):\n assert len(proc.docker_container_id)\n assert proc.docker_inspect()['Id'].startswith(proc.docker_container_id)\n assert proc.docker_stats()['Container'] == proc.docker_container_id", "def test001_create_containers(self):\n self.log('%s STARTED' % self._testID)\n\n self.log('Create a two container on that node, should succeed.')\n self.cont1_name = self.random_string()\n self.containers = {self.cont1_name: {'hostname': self.cont1_name,\n 'flist': self.cont_flist,\n 'storage': self.cont_storage}}\n\n self.cont2_name = self.random_string()\n self.containers.update({self.cont2_name: {'hostname': self.cont2_name,\n 'flist': self.cont_flist,\n 'storage': self.cont_storage}})\n\n res = self.create_container(containers=self.containers, temp_actions=self.temp_actions)\n self.assertEqual(type(res), type(dict()))\n self.wait_for_service_action_status(self.cont1_name, res[self.cont1_name]['install'])\n self.wait_for_service_action_status(self.cont2_name, res[self.cont2_name]['install'])\n\n self.log('Check that the container have been created.')\n conts = self.zos_client.container.list()\n self.assertTrue([c for c in conts.values() if c['container']['arguments']['name'] == self.cont1_name])\n self.assertTrue([c for c in conts.values() if c['container']['arguments']['name'] == self.cont2_name])\n cont1 = [c for c in conts.values() if c['container']['arguments']['name'] == self.cont1_name][0]\n self.assertTrue(cont1['container']['arguments']['storage'], self.cont_storage)\n self.assertTrue(cont1['container']['arguments']['root'], self.cont_flist)\n self.assertTrue(cont1['container']['arguments']['hostname'], self.cont_flist)\n\n self.log('%s ENDED' % self._testID)", "def _create_container(self, container_name):\n try:\n container = self.swift.head_container(container_name)\n except client.ClientException:\n self.swift.put_container(container_name)\n else:\n return container", "def __init__(self, container):\r\n self.container = container", "def __init__(self, container):\r\n self.container = container", "def get_container(self, container_uuid):\n if container_uuid not in self.containers:\n if container_uuid == 'RAW' or not container_uuid:\n self.containers[container_uuid] = 'RAW'\n else:\n try:\n container = self.funcx_client.get_container(container_uuid, self.config.container_type)\n except Exception:\n logger.exception(\"[FETCH_CONTAINER] Unable to resolve container location\")\n self.containers[container_uuid] = 'RAW'\n else:\n logger.info(\"[FETCH_CONTAINER] Got container info: {}\".format(container))\n self.containers[container_uuid] = container.get('location', 'RAW')\n return self.containers[container_uuid]", "def test_get_nonexistant_container(self):\n ref = self.container_client._get_base_url() + '/invalid_uuid'\n get_resp = self.container_client.get_container(ref)\n self.assertEqual(get_resp.status_code, 404)", "def getInstacefromContainer(cls, container, valuedict=None):\n # print(container)\n for i in range(len(clslist)):\n if container == clslist[i]().getcontainer():\n tmp = clslist[i](valuedict)\n # print(\"cls :\",clslist[i]().getcontainer(), tmp)\n return tmp\n return None", "def test_create(set_env, container: Container):\n # pylint: disable=unused-argument\n assert container\n assert isinstance(container, Container)", "def __call__(container, name, object):", "def test_getContents(self):\n container = Thing(store=self.store)\n Containment(container)\n \n c = IContainer(container)\n self.assertEqual(list(c.getContents()), [])\n \n a = Thing(store=self.store)\n a.location = container\n b = Thing(store=self.store)\n b.location = container\n \n self.assertEqual(set(c.getContents()), set([a,b]))", "def get_container(self, container_name):\n response = self.client.get_container(container_name)\n return response", "def testGetAllContainers(self):\n containers_list = self.explorer_object.GetAllContainers()\n containers_list = sorted(containers_list, key=lambda ci: ci.name)\n self.assertEqual(7, len(containers_list))\n\n container_obj = containers_list[1]\n\n self.assertEqual('/dreamy_snyder', container_obj.name)\n self.assertEqual(\n '2017-02-13T16:45:05.629904159Z', container_obj.creation_timestamp)\n self.assertEqual('busybox', container_obj.config_image_name)\n self.assertTrue(container_obj.running)\n\n self.assertEqual(\n '7b02fb3e8a665a63e32b909af5babb7d6ba0b64e10003b2d9534c7d5f2af8966',\n container_obj.container_id)", "def ensure_container():\n return exec_fn(_init_container)", "def get_container(self, profile, exec_cmd):\n container = None\n if self._value.has_option(profile, 'container'):\n container = self._value.get(profile, 'container')\n elif exec_cmd is not None:\n self.logger.error(\n \"No container parameter found\"\n )\n exit(1)\n\n self.logger.info(\"%s is selected as container\" % container)\n return container" ]
[ "0.78883415", "0.7841005", "0.7772943", "0.73743165", "0.72988445", "0.7253073", "0.7055443", "0.6843021", "0.68394685", "0.68224424", "0.67283326", "0.6673606", "0.6657628", "0.6637826", "0.6592652", "0.6577402", "0.65670544", "0.6516326", "0.65076596", "0.65076596", "0.6498384", "0.6493502", "0.6462806", "0.64623713", "0.6445341", "0.6381719", "0.6353292", "0.6318171", "0.6303309", "0.6301636" ]
0.9426843
0
Test case for get_container_assets
def test_get_container_assets(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_container_assets_expanded(self):\n pass", "def test_get_test_assets(self):\n pass", "def test_get_container(self):\n pass", "def test_get_test_asset(self):\n pass", "def test_container_empty_assets(self):\n context = {\n 'assets': self.section.sectionasset_set.order_by('weight')\n }\n container_name = \"left\"\n html = container(context, container_name)\n self.assertIn(\"storybase-container-placeholder\", html)\n self.assertIn(container_name, html)", "def test_list_system_assets(self):\n pass", "def test_retrieve_system_asset(self):\n pass", "def test_container_no_assets(self):\n context = {}\n container_name = \"left\"\n html = container(context, container_name)\n self.assertIn(\"storybase-container-placeholder\", html)\n self.assertIn(container_name, html)", "def test_container_with_assets(self):\n assets = Asset.objects.select_subclasses()\n left = Container.objects.get(name='left')\n right = Container.objects.get(name='right')\n SectionAsset.objects.create(section=self.section, asset=assets[0],\n container=left)\n SectionAsset.objects.create(section=self.section, asset=assets[1],\n container=right)\n # Refresh the section object to get new relations\n self.section = Section.objects.get(pk=self.section.pk)\n context = {\n 'assets': self.section.sectionasset_set.order_by('weight')\n }\n html = container(context, \"left\")\n self.assertEqual(html, assets[0].render_html())\n html = container(context, \"right\")\n self.assertEqual(html, assets[1].render_html())", "def test_get_test_assets_expanded(self):\n pass", "def get_container(self, account, container):\n \n pass", "def test_redeploy_container_asset(self):\n pass", "def test_create_system_asset(self):\n pass", "def test_list_dependent_assets(self):\n pass", "def assets():", "def test_get_deployment_resources(self):\n pass", "def assets():\n pass", "def test_list_supported_assets(self):\n pass", "def test_create_container(self):\n pass", "def test_ocean_assets_compute(publisher_ocean_instance):\n publisher = get_publisher_wallet()\n metadata = get_computing_metadata()\n metadata[\"main\"][\"files\"][0][\"checksum\"] = str(uuid.uuid4())\n ddo = publisher_ocean_instance.assets.create(metadata, publisher)\n assert ddo\n _ddo = wait_for_ddo(publisher_ocean_instance, ddo.did)\n assert _ddo, f\"assets.resolve failed for did {ddo.did}\"", "def test_show_container(self):\n pass", "def test_import_system_asset(self):\n pass", "def test_rackspace_uploader_get_container(self, mock1):\r\n with patch('pybossa.uploader.rackspace.pyrax.cloudfiles') as mycf:\r\n cdn_enabled_mock = PropertyMock(return_value=False)\r\n type(fake_container).cdn_enabled = cdn_enabled_mock\r\n mycf.get_container.side_effect = NoSuchContainer\r\n\r\n calls = [call.get_container('user_3'),\r\n call.create_container('user_3'),\r\n call.make_container_public('user_3')\r\n ]\r\n u = RackspaceUploader()\r\n u.init_app(self.flask_app)\r\n assert u.get_container('user_3')\r\n mycf.assert_has_calls(calls, any_order=True)", "def test_existing_content_asset(self):\n with self.app.test_client() as client:\n response = client.get('/contentassets/logo.png')\n\n self.assert200(response)", "def test_container_no_asset_for_container(self):\n assets = Asset.objects.select_subclasses()\n right = Container.objects.get(name='right')\n SectionAsset.objects.create(section=self.section, asset=assets[0],\n container=right)\n # Refresh the section object to get new relations\n self.section = Section.objects.get(pk=self.section.pk)\n context = {\n 'assets': self.section.sectionasset_set.order_by('weight')\n }\n container_name = \"left\"\n html = container(context, container_name)\n self.assertIn(\"storybase-container-placeholder\", html)\n self.assertIn(container_name, html)", "def test_list_dependent_assets2(self):\n pass", "def test_ocean_assets_algorithm(publisher_ocean_instance):\n publisher = get_publisher_wallet()\n metadata = get_sample_algorithm_ddo()[\"service\"][0]\n metadata[\"attributes\"][\"main\"][\"files\"][0][\"checksum\"] = str(uuid.uuid4())\n ddo = publisher_ocean_instance.assets.create(metadata[\"attributes\"], publisher)\n assert ddo\n _ddo = wait_for_ddo(publisher_ocean_instance, ddo.did)\n assert _ddo, f\"assets.resolve failed for did {ddo.did}\"", "def test_containers(self):\n\n message = {\"method\": \"containers\"}\n response = yield self._get_response(message)\n\n self.assertIsInstance(response, dict)\n self.assertEqual(response[\"method\"], \"containers\")\n self.assertIsInstance(response[\"result\"], list)\n self.assertNotEqual(len(response[\"result\"]), 0)\n\n container_name = \"/\" + self.container_to_run\n\n containers = {i[0]: i[1] for i in response[\"result\"]}\n self.assertIn(container_name, containers.keys(),\n \"Container not found\")", "def test_resource_container(self):\n sdram = SDRAMResource(128 * (2**20))\n dtcm = DTCMResource(128 * (2**20) + 1)\n cpu = CPUCyclesResource(128 * (2**20) + 2)\n\n container = ResourceContainer(dtcm, sdram, cpu)\n self.assertEqual(container.sdram.get_value(), 128 * (2**20))\n self.assertEqual(container.dtcm.get_value(), 128 * (2**20) + 1)\n self.assertEqual(container.cpu.get_value(), 128 * (2**20) + 2)\n\n sdram = SDRAMResource(128 * (2**19))\n dtcm = DTCMResource(128 * (2**19) + 1)\n cpu = CPUCyclesResource(128 * (2**19) + 2)\n\n container = ResourceContainer(dtcm, sdram, cpu)\n self.assertEqual(container.sdram.get_value(), 128 * (2**19))\n self.assertEqual(container.dtcm.get_value(), 128 * (2**19) + 1)\n self.assertEqual(container.cpu.get_value(), 128 * (2**19) + 2)\n\n sdram = SDRAMResource(128 * (2**21))\n dtcm = DTCMResource(128 * (2**21) + 1)\n cpu = CPUCyclesResource(128 * (2**21) + 2)\n\n container = ResourceContainer(dtcm, sdram, cpu)\n self.assertEqual(container.sdram.get_value(), 128 * (2**21))\n self.assertEqual(container.dtcm.get_value(), 128 * (2**21) + 1)\n self.assertEqual(container.cpu.get_value(), 128 * (2**21) + 2)", "def test_list_dependent_assets1(self):\n pass" ]
[ "0.8120444", "0.7526939", "0.7163539", "0.70364815", "0.6906849", "0.69020694", "0.6875423", "0.6536799", "0.6524495", "0.6523459", "0.64177734", "0.6365273", "0.63496923", "0.6323022", "0.6294593", "0.62307656", "0.62048495", "0.6198302", "0.61757773", "0.6091485", "0.60836357", "0.60413873", "0.6006145", "0.5955135", "0.595421", "0.59477174", "0.5943797", "0.5892415", "0.58775514", "0.58685726" ]
0.94387263
0
Test case for get_container_assets_expanded
def test_get_container_assets_expanded(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_test_assets_expanded(self):\n pass", "def test_get_container_assets(self):\n pass", "def test_get_software_asset_bundle_expanded(self):\n pass", "def test_container_empty_assets(self):\n context = {\n 'assets': self.section.sectionasset_set.order_by('weight')\n }\n container_name = \"left\"\n html = container(context, container_name)\n self.assertIn(\"storybase-container-placeholder\", html)\n self.assertIn(container_name, html)", "def test_get_deployments_expanded(self):\n pass", "def test_container_with_assets(self):\n assets = Asset.objects.select_subclasses()\n left = Container.objects.get(name='left')\n right = Container.objects.get(name='right')\n SectionAsset.objects.create(section=self.section, asset=assets[0],\n container=left)\n SectionAsset.objects.create(section=self.section, asset=assets[1],\n container=right)\n # Refresh the section object to get new relations\n self.section = Section.objects.get(pk=self.section.pk)\n context = {\n 'assets': self.section.sectionasset_set.order_by('weight')\n }\n html = container(context, \"left\")\n self.assertEqual(html, assets[0].render_html())\n html = container(context, \"right\")\n self.assertEqual(html, assets[1].render_html())", "def test_get_test_assets(self):\n pass", "def test_get_projects_expanded(self):\n pass", "def test_list_dependent_assets(self):\n pass", "def test_list_system_assets(self):\n pass", "def test_list_dependent_assets2(self):\n pass", "def test_container_no_assets(self):\n context = {}\n container_name = \"left\"\n html = container(context, container_name)\n self.assertIn(\"storybase-container-placeholder\", html)\n self.assertIn(container_name, html)", "def test_get_container(self):\n pass", "def test_redeploy_container_asset(self):\n pass", "def test_list_dependent_assets1(self):\n pass", "def test_get_test_asset(self):\n pass", "def test_show_container(self):\n pass", "def test_container_no_asset_for_container(self):\n assets = Asset.objects.select_subclasses()\n right = Container.objects.get(name='right')\n SectionAsset.objects.create(section=self.section, asset=assets[0],\n container=right)\n # Refresh the section object to get new relations\n self.section = Section.objects.get(pk=self.section.pk)\n context = {\n 'assets': self.section.sectionasset_set.order_by('weight')\n }\n container_name = \"left\"\n html = container(context, container_name)\n self.assertIn(\"storybase-container-placeholder\", html)\n self.assertIn(container_name, html)", "def test_list_dependent_assets3(self):\n pass", "def test_get_scenarios_expanded(self):\n pass", "def test_retrieve_system_asset(self):\n pass", "def get_all_assets(self):\n\n with preserve_expanded_rows(self.view):\n with preserve_selection(self.view):\n self.clear()\n containers = lib.get_containers()\n items = lib.create_node(containers)\n self.add_items(items)\n\n return len(items) > 0", "def assets():", "def test_list_supported_assets(self):\n pass", "def test_flatten_inventory(self):\n pass", "def test_get_deployment_resources(self):\n pass", "def populateMasteredAssets(*args):\n #clear the lists first\n cmds.textScrollList(widgets[\"shotAssRigCharListTSL\"], e=True, ra=True)\n cmds.textScrollList(widgets[\"shotAssRigPropListTSL\"], e=True, ra=True)\n cmds.textScrollList(widgets[\"shotAssRigSetListTSL\"], e=True, ra=True)\n cmds.textScrollList(widgets[\"shotAnmMstListTSL\"], e=True, ra=True)\n\n chars, props, sets = cFuncs.getProjectAssetList(pi.assetFolder)\n\n #check for rig masters\n for char in chars:\n cMstr = cFuncs.getAssetMaster(char, cFuncs.fixPath(os.path.join(pi.assetFolder, \"characters\", char)), \"rig\")\n if cMstr:\n cmds.textScrollList(widgets[\"shotAssRigCharListTSL\"], e=True, a=char, dcc=showAssetImage)\n for prop in props:\n pMstr = cFuncs.getAssetMaster(prop, cFuncs.fixPath(os.path.join(pi.assetFolder, \"props\", prop)), \"rig\") \n if pMstr:\n cmds.textScrollList(widgets[\"shotAssRigPropListTSL\"], e=True, a=prop, dcc=showAssetImage)\n for sett in sets:\n sMstr = cFuncs.getAssetMaster(sett, cFuncs.fixPath(os.path.join(pi.assetFolder, \"sets\", sett)), \"rig\") \n if sMstr:\n cmds.textScrollList(widgets[\"shotAssRigSetListTSL\"], e=True, a=sett, dcc=showAssetImage)\n\n #check for anim variants and masters\n varAnm = []\n shots = cFuncs.getProjectShotList(pi.currentProject)\n # print \"shotWin.populateMasteredAssets (line 937): shots =\", shots\n if shots:\n for shot in shots:\n shotVars = cFuncs.getShotVariantDict(os.path.join(pi.currentProject, \"shots\", shot))\n if shotVars[\"anm\"]:\n for anm in shotVars[\"anm\"]:\n aMstr = cFuncs.getVarMaster(cFuncs.fixPath(os.path.join(pi.currentProject, \"shots\", shot, \"anm\", anm)))\n #print cFuncs.fixPath(os.path.join(pi.currentProject, \"shots\", shot, \"anm\", anm))\n if aMstr: \n varAnm.append(\"{0}.{1}\".format(anm, shot))\n\n for av in varAnm:\n cmds.textScrollList(widgets[\"shotAnmMstListTSL\"], e=True, a=av)\n\n populateSceneRefs()", "def assets():\n pass", "def _get_container_preview(self, usage_key):\r\n preview_url = reverse_usage_url(\"xblock_view_handler\", usage_key, {'view_name': 'container_preview'})\r\n resp = self.client.get(preview_url, HTTP_ACCEPT='application/json')\r\n self.assertEqual(resp.status_code, 200)\r\n resp_content = json.loads(resp.content)\r\n html = resp_content['html']\r\n self.assertTrue(html)\r\n resources = resp_content['resources']\r\n self.assertIsNotNone(resources)\r\n return html, resources", "def test_read_artifact(self):\n pass" ]
[ "0.8418383", "0.75024664", "0.72617674", "0.62497663", "0.6212591", "0.60598075", "0.5904019", "0.5860325", "0.57519877", "0.56163764", "0.55987006", "0.55839", "0.55167836", "0.5488663", "0.5478161", "0.5465407", "0.5379946", "0.5366149", "0.5349425", "0.5327451", "0.5299458", "0.52425605", "0.5203561", "0.5189948", "0.5168216", "0.51586527", "0.5155695", "0.5135428", "0.50642544", "0.4991717" ]
0.94806
0
Test case for get_default_network
def test_get_default_network(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_network(self):\n pass", "def get_default(self, create=True):\n if self._default_network is None and create:\n log.debug(\"Creating default network...\")\n self._default_network = self.create('default', driver='bridge')\n\n return self._default_network", "def test_networking_project_network_get(self):\n pass", "def GetDefaultWiredNetwork(self):\n config = ConfigParser.ConfigParser()\n config.read(self.wired_conf)\n profileList = config.sections()\n for profile in profileList:\n if config.has_option(profile, \"default\"):\n if misc.to_bool(config.get(profile, \"default\")):\n return profile\n return None", "def test_get_networks(self):\n pass", "def get_network_default_gateway(self):\n return self.mycam.devicemgmt.GetNetworkDefaultGateway()", "def init_default_network(\n agent_type: DefaultSupportedAgent,\n agent_spec: specs.EnvironmentSpec) -> ma_types.Networks:\n if agent_type == DefaultSupportedAgent.TD3:\n return td3.make_networks(agent_spec)\n elif agent_type == DefaultSupportedAgent.SAC:\n return sac.make_networks(agent_spec)\n elif agent_type == DefaultSupportedAgent.PPO:\n return ppo.make_networks(agent_spec)\n else:\n raise ValueError(f'Unsupported agent type: {agent_type}.')", "def get_default_network_policy(con):\n try:\n return con.network_policy_read(fq_name=conf.get('default_network_policy', 'UNEXPECTED_VALUE'))\n except NoIdError:\n log.debug('Unable to find default_network_policy')\n return None", "def test_create_network():\n _network = Network()", "def test_add_network(self):\n pass", "def create_default_network(context):\n return [{\n 'type': 'templates/network.py',\n 'name': 'fc-network',\n 'properties': {\n 'resourceName': 'network',\n 'name': 'network',\n 'projectId': '$(ref.fc-project.projectId)',\n 'autoCreateSubnetworks': True,\n # We pass the dependsOn list into the network template as a\n # parameter. Deployment Manager doesn't support dependsOn for\n # template-call nodes, so we can't have this resource itself depend on\n # the project-wide resources.\n 'dependsOn': '$(ref.fc-project.resourceNames)',\n },\n }]", "def guess_network(self):\n # decide what sort of network we are going to use\n # return the actual type\n # right now we just use the first host only network and that's it\n host_only = list(HostOnlyNetwork.find_networks())\n if host_only:\n return host_only[0]\n else:\n return NewHostOnlyNetwork()", "def _get_network_type(self, host):\n network_type = host.get(\"network\")\n default_network = self.config.get(\"default_network\")\n if network_type is None:\n network_type = self._metadata.get(\"network\", default_network)\n if not network_type:\n raise ProvisioningConfigError(\n \"No network type specified and project doesn't have default \"\n \"network type (property 'default_network') specified in \"\n \"provisioning config.\"\n )\n return network_type", "def default (no_flow = False,\n network = \"192.168.0.0/24\", # Address range\n first = 100, last = 199, count = None, # Address range\n ip = \"192.168.0.254\",\n router = (), # Auto\n dns = ()): # Auto\n launch(no_flow, network, first, last, count, ip, router, dns)", "def test_networking_project_network_service_get(self):\n pass", "def test_networking_project_network_create(self):\n pass", "def test_register_network(self):\n pass", "def get_net(con):\n try:\n return con.virtual_network_read(fq_name=conf.get('default_net', 'UNEXPECTED_VALUE'))\n except NoIdError:\n log.debug('Unable to find net.')\n return None", "def get_default_device():\n return MXNET_DEFAULT_DEVICE", "def test_default_connection_updated_correctly(self):\n result = self.run_cli_command(\n \"--skip-consistency-check\",\n \"config\",\n \"get\",\n \"agent.default_connection\",\n cwd=self._get_cwd(),\n )\n assert result.stdout == \"fetchai/stub:0.21.3\\n\"", "def getDefault():", "def dvs_connect_nodefault_net(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n\n tenant = os_conn.get_tenant(SERVTEST_TENANT)\n\n # Create security group with rules for ssh and ping\n security_group = os_conn.create_sec_group_for_ssh()\n\n _sg_groups = os_conn.neutron.list_security_groups()['security_groups']\n _srv_tenant = os_conn.get_tenant(SERVTEST_TENANT).id\n default_sg = [sg for sg in _sg_groups\n if sg['tenant_id'] == _srv_tenant and\n sg['name'] == 'default'][0]\n\n self.show_step(2)\n net_1 = os_conn.create_network(\n network_name=self.net_data[0].keys()[0],\n tenant_id=tenant.id\n )['network']\n\n subnet = os_conn.create_subnet(\n subnet_name=net_1['name'],\n network_id=net_1['id'],\n cidr=self.net_data[0][self.net_data[0].keys()[0]],\n ip_version=4)\n\n # Check that network are created\n assert_true(os_conn.get_network(net_1['name'])['id'] == net_1['id'])\n\n # Create Router_01, set gateway and add interface to external network\n router_1 = os_conn.create_router('router_1', tenant=tenant)\n\n # Add net_1 to router_1\n os_conn.add_router_interface(router_id=router_1[\"id\"],\n subnet_id=subnet[\"id\"])\n\n access_point, access_point_ip = openstack.create_access_point(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n security_groups=[security_group.name, default_sg['name']])\n\n self.show_step(3)\n self.show_step(4)\n openstack.create_instances(os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n vm_count=1,\n security_groups=[default_sg['name']])\n openstack.verify_instance_state(os_conn)\n\n # Get private ips of instances\n instances = [instance for instance in os_conn.get_servers()\n if instance.id != access_point.id]\n ips = [os_conn.get_nova_instance_ip(i, net_name=net_1['name'])\n for i in instances]\n\n self.show_step(5)\n ip_pair = dict.fromkeys(ips)\n for key in ip_pair:\n ip_pair[key] = [value for value in ips if key != value]\n openstack.check_connection_through_host(access_point_ip, ip_pair)", "def test_default_route(self):\n self.assertEqual(\n DogStatsd(use_default_route=True).host,\n \"172.17.0.1\"\n )", "def test_aws_service_api_networks_get(self):\n pass", "def test_default_router(self):\n assert self.rc_conf.has_key('defaultrouter')\n assert self.rc_conf['defaultrouter'] == '\"10.137.1.7\"'", "def get_default_route():\n # Discover the active/preferred network interface \n # by connecting to Google's public DNS server\n try:\n with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:\n s.settimeout(2)\n s.connect((\"8.8.8.8\", 80))\n iface_ip = s.getsockname()[0]\n except socket.error:\n sys.stderr.write('IoT Inspector cannot run without network connectivity.\\n')\n sys.exit(1)\n\n while True:\n routes = _get_routes()\n default_route = None\n for route in routes:\n if route[4] == iface_ip:\n # Reassign scapy's default interface to the one we selected\n sc.conf.iface = route[3]\n default_route = route[2:5]\n break\n if default_route:\n break\n\n log('get_default_route: retrying')\n time.sleep(1)\n \n\n # If we are using windows, conf.route.routes table doesn't update.\n # We have to update routing table manually for packets\n # to pick the correct route. \n if sys.platform.startswith('win'):\n for i, route in enumerate(routes):\n # if we see our selected iface, update the metrics to 0\n if route[3] == default_route[1]:\n routes[i] = (*route[:-1], 0)\n\n return default_route", "def _check_and_set_network(self) -> None:\n from hathor.transaction.storage.exceptions import WrongNetworkError\n\n network = settings.NETWORK_NAME\n stored_network = self.get_network()\n\n if stored_network is None:\n # no network is set, let's try to infer it\n self._checked_set_network(network)\n elif stored_network != network:\n # the stored network does not match, something is wrong\n raise WrongNetworkError(f'Databases created on {stored_network}, expected {network}')\n else:\n # the network is what is expected, nothing to do here\n pass", "def test_networking_project_network_list(self):\n pass", "def get_network(self):\n return self.get_ip_network()[-1]", "def get_network(networkToCheck):\r\n\tnet = networkToCheck.get()\r\n\tif netaddr.valid_ipv4(net[:-3]):\r\n\t\tnetworkInfo = f'''NETWORK: {IPNetwork(net).network}\r\nFIRST HOST: {get_first(net)}\r\nLAST HOST: {get_last(net)}\r\nBROADCAST: {IPNetwork(net).broadcast}\r\nNETMASK: {IPNetwork(net).netmask}\r\nNEXT NETWORK: {IPNetwork(net).next()}\\n'''\r\n\t\tnetworkVar.set(networkInfo)\r\n\telse:\r\n\t\tnetworkVar.set(f'**Error**: \"{net}\" is not a valid ip\\nExample: \"192.168.1.0/24\"')" ]
[ "0.77924114", "0.74992687", "0.70113444", "0.68592983", "0.685635", "0.6743069", "0.67254055", "0.65950114", "0.6542788", "0.6528922", "0.648575", "0.644165", "0.6399284", "0.6368224", "0.6341696", "0.63401234", "0.62886524", "0.6271149", "0.6237702", "0.62118465", "0.6202367", "0.61635625", "0.6163237", "0.6149638", "0.61461675", "0.6135768", "0.61243635", "0.60853785", "0.6080841", "0.60618466" ]
0.9539576
0
Test case for get_deployment
def test_get_deployment(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_deployment_run(self):\n pass", "def test_get_deployment_resource(self):\n pass", "def test_get_deployment_runs1(self):\n pass", "def test_get_deployment_resources(self):\n pass", "def test_get_deployment_resource_data(self):\n pass", "def test_get_deployment_runs(self):\n pass", "def test_create_deployment(self):\n pass", "def test_get_deployments(self):\n pass", "def test_get_deployments(self):\n pass", "def test_execute_deployment(self):\n pass", "def test_create_deployment_entire(self):\n pass", "def test_get_deployment_runs_in_virtualization_realm(self):\n pass", "def test_update_deployment(self):\n pass", "def test_launch_deployment(self):\n pass", "def test_release_deployment_run(self):\n pass", "def test_publish_deployment_run(self):\n pass", "def handle_get_deployment(project_id, deployment_id):\n return jsonify(get_deployment(uuid=deployment_id, project_id=project_id))", "def test_get_deployment_metric(self):\n pass", "def test_get_deployment_run_reports(self):\n pass", "def test_deployment(self):\n config = {'example.com': {\n 'applications': {\n 'site-hybridcluster': {\n 'image': 'unknown',\n },\n 'mysql-hybridcluster': {\n 'image': 'unknown',\n }\n },\n 'version': 1\n }}\n expected = Deployment(nodes=frozenset([\n Node(hostname='example.com', applications=frozenset([\n Application(\n name='mysql-hybridcluster',\n image=DockerImage.from_string('unknown'),\n ports=frozenset(),\n ),\n Application(\n name='site-hybridcluster',\n image=DockerImage.from_string('unknown'),\n )]))]))\n self.assertEqual(expected,\n current_from_configuration(config))", "def test_delete_deployment(self):\n pass", "def test_read_namespaced_deployment_config(self):\n pass", "def test_get_bindings_for_deployment(self):\n pass", "def test_delete_deployment_run(self):\n pass", "def test_get_deployments_expanded(self):\n pass", "def test_update_deployment_state(self):\n pass", "def test_read_namespaced_deployment_config_status(self):\n pass", "def test_retest_deployment_run(self):\n pass", "def get_deployment_output(account_name: Optional[pulumi.Input[str]] = None,\n deployment_name: Optional[pulumi.Input[str]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetDeploymentResult]:\n ...", "def test_clone_deployment(self):\n pass" ]
[ "0.8806934", "0.8803407", "0.8363606", "0.8245148", "0.80494815", "0.800243", "0.785613", "0.78456664", "0.78456664", "0.7714773", "0.74758905", "0.74579734", "0.7267071", "0.7226769", "0.71851367", "0.71055573", "0.7091958", "0.7005344", "0.69216394", "0.6911787", "0.6785751", "0.67837894", "0.6660957", "0.6621807", "0.6583082", "0.65440655", "0.6530932", "0.6513165", "0.64747864", "0.64224386" ]
0.9451193
1
Test case for get_deployment_metric
def test_get_deployment_metric(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_deployment_resource(self):\n pass", "def test_get_deployment_resource_data(self):\n pass", "def test_get_deployment(self):\n pass", "def test_get_deployment(self):\n pass", "def test_get_derived_metric(self):\n pass", "def test_get_deployment_run(self):\n pass", "def test_get_derived_metric_by_version(self):\n pass", "def test_get_deployment_resources(self):\n pass", "def test_get_deployment_runs1(self):\n pass", "def test_get_host_configuration_metrics(self):\n pass", "def extract_metric(self, json_field: str, metric_namespace: str, metric_name: str) -> aws_cdk.aws_cloudwatch.Metric:\n ...", "def test_api_build_metrics_get(self):\n default_api = DefaultApi(api_client=self.api_client)\n params = dlrnapi_client.MetricsRequest()\n path, method = default_api.api_build_metrics_get(params)\n self.assertEqual(path, '/api/metrics/builds')\n self.assertEqual(method, 'GET')", "def get_metric_policy(ContainerName=None):\n pass", "def test_get_host_configuration_metrics1(self):\n pass", "def test_get_derived_metric_tags(self):\n pass", "def test_get_deployment_runs(self):\n pass", "def test_create_derived_metric(self):\n pass", "def _get_eval_metric(self):\n raise NotImplementedError", "def handler(event, context):\n try:\n # Retrieve environment variables\n dimension_name = getenv(\"CODEDEPLOY_DIMENSION_NAME\")\n metric_name = getenv(\"CODEDEPLOY_METRIC_NAME\")\n if not dimension_name or not metric_name:\n return \"CODEDEPLOY_DIMENSION_NAME or CODEDEPLOY_METRIC_NAME not set\"\n\n # Get deployment state from CodeDeploy event\n deployment_state = event[\"detail\"][\"state\"]\n print(f\"Deployment state: {deployment_state}\")\n\n # Pushing custom metric to CW\n response = boto3.client(\"cloudwatch\").put_metric_data(\n MetricData=[\n {\n \"MetricName\": metric_name,\n \"Dimensions\": [{\"Name\": dimension_name, \"Value\": deployment_state}],\n \"Unit\": \"None\",\n \"Value\": 1,\n \"Timestamp\": datetime.datetime.now(),\n },\n ],\n Namespace=\"CodeDeployDeploymentStates\",\n )\n print(f\"Response from CW service: {response}\")\n return response\n # pylint: disable=broad-except\n except Exception as excpt:\n print(f\"Execution failed... {excpt}\")\n return None", "def test_get_deployment_run_reports(self):\n pass", "def _get_static_metric(self):\n return self.__static_metric", "def test_create_goal_metric(self):\n pass", "def test_get_cluster_capacity_daily_resolution(self):\n url = \"?filter[time_scope_units]=month&filter[time_scope_value]=-1&filter[resolution]=daily\"\n query_params = self.mocked_query_params(url, OCPCpuView)\n handler = OCPReportQueryHandler(query_params)\n query_data = handler.execute_query()\n\n daily_capacity = defaultdict(Decimal)\n total_capacity = Decimal(0)\n query_filter = handler.query_filter\n query_group_by = [\"usage_start\", \"cluster_id\"]\n annotations = {\"capacity\": Max(\"cluster_capacity_cpu_core_hours\")}\n cap_key = list(annotations.keys())[0]\n\n q_table = handler._mapper.provider_map.get(\"tables\").get(\"query\")\n query = q_table.objects.filter(query_filter)\n\n with tenant_context(self.tenant):\n cap_data = query.values(*query_group_by).annotate(**annotations)\n for entry in cap_data:\n date = handler.date_to_string(entry.get(\"usage_start\"))\n daily_capacity[date] += entry.get(cap_key, 0)\n cap_data = query.values(*query_group_by).annotate(**annotations)\n for entry in cap_data:\n total_capacity += entry.get(cap_key, 0)\n\n self.assertEqual(query_data.get(\"total\", {}).get(\"capacity\", {}).get(\"value\"), total_capacity)\n for entry in query_data.get(\"data\", []):\n date = entry.get(\"date\")\n values = entry.get(\"values\")\n if values:\n capacity = values[0].get(\"capacity\", {}).get(\"value\")\n self.assertEqual(capacity, daily_capacity[date])", "def get(metric_name: Text, dataset_info=None):\n # Register your metric by adding an entry in the dictionary below.\n return base.registry.get_instance(metric_name, dataset_info=dataset_info)", "def _get_metric(name):\n try:\n return metrics.metric(name)\n except InvalidMetricError:\n return None", "def test_read_namespaced_deployment_config_status(self):\n pass", "def __get_aws_metric(table_name, gsi_name, time_frame, metric_name):\n return cloudwatch_connection.get_metric_statistics(\n period=time_frame,\n start_time=datetime.utcnow()-timedelta(minutes=10, seconds=time_frame),\n end_time=datetime.utcnow()-timedelta(minutes=10),\n metric_name=metric_name,\n namespace='AWS/DynamoDB',\n statistics=['Sum'],\n dimensions={\n 'TableName': table_name,\n 'GlobalSecondaryIndexName': gsi_name\n },\n unit='Count')", "def test_metrics(client):\n response = client.get(\"/metrics\")\n assert response.status_code == 200", "def get_metric_info(self):\n metric_data_object = self.client.get_metric_data(\n MetricDataQueries=[\n {\n \"Id\": \"cdbdata_invocations\",\n \"MetricStat\": {\n \"Metric\": {\n \"Namespace\": \"AWS/Lambda\",\n \"MetricName\": \"Invocations\",\n \"Dimensions\": [\n {\n \"Name\": \"FunctionName\",\n \"Value\": self.function_name\n }\n ]\n },\n \"Period\": 60,\n \"Stat\": \"Sum\"\n },\n \"ReturnData\": True\n },\n {\n \"Id\": \"cdbdata_errors\",\n \"MetricStat\": {\n \"Metric\": {\n \"Namespace\": \"AWS/Lambda\",\n \"MetricName\": \"Errors\",\n \"Dimensions\": [\n {\n \"Name\": \"FunctionName\",\n \"Value\": self.function_name\n }\n ]\n },\n \"Period\": 60,\n \"Stat\": \"Sum\"\n },\n \"ReturnData\": True\n },\n {\n \"Id\": \"cdbdata_throttles\",\n \"MetricStat\": {\n \"Metric\": {\n \"Namespace\": \"AWS/Lambda\",\n \"MetricName\": \"Throttles\",\n \"Dimensions\": [\n {\n \"Name\": \"FunctionName\",\n \"Value\": self.function_name\n }\n ]\n },\n \"Period\": 60,\n \"Stat\": \"Sum\"\n },\n \"ReturnData\": True\n },\n {\n \"Id\": \"cdbdata_concurrentexec\",\n \"MetricStat\": {\n \"Metric\": {\n \"Namespace\": \"AWS/Lambda\",\n \"MetricName\": \"ConcurrentExecutions\",\n \"Dimensions\": [\n {\n \"Name\": \"FunctionName\",\n \"Value\": self.function_name\n }\n ]\n },\n \"Period\": 60,\n \"Stat\": \"Sum\"\n },\n \"ReturnData\": True\n }\n ],\n StartTime=self.start_timestamp,\n EndTime=self.end_timestamp,\n ScanBy='TimestampDescending'\n )\n\n metric_data_points = metric_data_object[DataPointsCollector.RESPONSE_KEY]\n\n return metric_data_points", "def get_deployment_data(cluster_id, namespace_id=None, deployment_id=None):\n # deployment detail\n if deployment_id and namespace_id is not None:\n # creating cell-pod mapping for getting cell details\n cell_pod_map = get_cell_pod_map(cluster_id)\n # getting pod count\n pods_data = [pod for pod in get_pod_data(cluster_id, namespace_id, deployment_id)]\n core_api = client.CoreV1Api()\n apps_api = client.AppsV1Api()\n deployment_cell_list = list()\n deployment_pod_list = list()\n for pod_name in pods_data:\n if pod_name['name'] in cell_pod_map:\n namespaced_pod_info = core_api.read_namespaced_pod(pod_name['name'], namespace_id).metadata.owner_references[0]\n if namespaced_pod_info.kind == 'ReplicaSet':\n replica_set = apps_api.read_namespaced_replica_set(core_api.read_namespaced_pod(\n pod_name['name'], namespace_id).metadata.owner_references[0].name, namespace_id)\n if replica_set.metadata.owner_references[0].name == deployment_id and pod_name['name'] in cell_pod_map \\\n and pod_name['status']=='Running':\n # fetching pods based on deployment\n deployment_pod_list.append(pod_name['name'])\n # fetching cells based on pods and deployment\n deployment_cell_list.append(cell_pod_map[pod_name['name']]['cell_name'])\n else:\n continue\n else:\n continue\n # if there are no pods for the passed deployment\n if len(deployment_pod_list) == 0:\n pods_for_resource_calculation = 'no_pod_resource'\n else:\n pods_for_resource_calculation = deployment_pod_list\n\n deployments_info = {\n 'resource_count': {\n 'cells': len(deployment_cell_list),\n 'pods': len(deployment_pod_list)\n },\n 'resource_info': get_resource_info(cluster_id, 'pods', namespace_id, pods_for_resource_calculation)\n }\n\n # deployment listing\n else:\n if namespace_id:\n deployments_info = [{'value': deployment_item.metadata.name, 'label': deployment_item.metadata.name}\n for deployment_item in\n client.AppsV1beta2Api().list_namespaced_deployment(namespace_id).items]\n else:\n deployments_info = [{'value': deployment_item.metadata.name, 'label': deployment_item.metadata.name}\n for deployment_item in\n client.AppsV1beta2Api().list_deployment_for_all_namespaces().items]\n return deployments_info" ]
[ "0.69096184", "0.6758528", "0.6713598", "0.6713598", "0.6592443", "0.64808583", "0.6403167", "0.625861", "0.6237708", "0.6157586", "0.6136067", "0.6127703", "0.6085391", "0.60746324", "0.60693747", "0.6025807", "0.593881", "0.5857284", "0.5829559", "0.58050215", "0.5769839", "0.572145", "0.56748575", "0.5674416", "0.567406", "0.56721735", "0.56667954", "0.56506497", "0.56294894", "0.56238174" ]
0.9364589
0
Test case for get_deployment_run
def test_get_deployment_run(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_deployment_runs(self):\n pass", "def test_get_deployment_runs1(self):\n pass", "def test_get_deployment(self):\n pass", "def test_get_deployment(self):\n pass", "def test_execute_deployment(self):\n pass", "def test_get_deployment_run_reports(self):\n pass", "def test_release_deployment_run(self):\n pass", "def test_get_deployment_resource(self):\n pass", "def test_get_deployment_runs_in_virtualization_realm(self):\n pass", "def test_publish_deployment_run(self):\n pass", "def test_launch_deployment(self):\n pass", "def test_get_deployment_resource_data(self):\n pass", "def test_get_deployment_resources(self):\n pass", "def test_retest_deployment_run(self):\n pass", "def test_create_deployment(self):\n pass", "def test_delete_deployment_run(self):\n pass", "def test_get_deployments(self):\n pass", "def test_get_deployments(self):\n pass", "def test_relaunch_deployment_run(self):\n pass", "def test_get_run(self):\n pass", "def test_update_deployment(self):\n pass", "def test_create_deployment_entire(self):\n pass", "def test_download_deployment_run_test_report(self):\n pass", "def test_get_deployment_metric(self):\n pass", "def test_deployment(self):\n config = {'example.com': {\n 'applications': {\n 'site-hybridcluster': {\n 'image': 'unknown',\n },\n 'mysql-hybridcluster': {\n 'image': 'unknown',\n }\n },\n 'version': 1\n }}\n expected = Deployment(nodes=frozenset([\n Node(hostname='example.com', applications=frozenset([\n Application(\n name='mysql-hybridcluster',\n image=DockerImage.from_string('unknown'),\n ports=frozenset(),\n ),\n Application(\n name='site-hybridcluster',\n image=DockerImage.from_string('unknown'),\n )]))]))\n self.assertEqual(expected,\n current_from_configuration(config))", "def create_deployment_run(project_id, deployment_id, is_experiment_deployment):\n raise_if_project_does_not_exist(project_id)\n\n if is_experiment_deployment:\n deployment = Experiment.query.get(deployment_id)\n else:\n deployment = Deployment.query.get(deployment_id)\n\n if deployment is None:\n raise NOT_FOUND\n\n deploy_operators = []\n operators = deployment.operators\n if operators and len(operators) > 0:\n for operator in operators:\n task = Task.query.get(operator.task_id)\n deploy_operator = {\n \"arguments\": task.arguments,\n \"commands\": task.commands,\n \"dependencies\": operator.dependencies,\n \"image\": task.image,\n \"notebookPath\": task.deployment_notebook_path,\n \"operatorId\": operator.uuid,\n }\n deploy_operators.append(deploy_operator)\n else:\n raise BadRequest('Necessary at least one operator')\n\n deploy_operators = remove_non_deployable_operators(deploy_operators)\n pipeline = Pipeline(deployment_id, deployment.name, deploy_operators)\n pipeline.compile_deployment_pipeline()\n return pipeline.run_pipeline()", "def test_delete_deployment(self):\n pass", "def test_update_deployment_state(self):\n pass", "def test_set_power_schedule_for_deployment_run(self):\n pass", "def get_deployment_data(cluster_id, namespace_id=None, deployment_id=None):\n # deployment detail\n if deployment_id and namespace_id is not None:\n # creating cell-pod mapping for getting cell details\n cell_pod_map = get_cell_pod_map(cluster_id)\n # getting pod count\n pods_data = [pod for pod in get_pod_data(cluster_id, namespace_id, deployment_id)]\n core_api = client.CoreV1Api()\n apps_api = client.AppsV1Api()\n deployment_cell_list = list()\n deployment_pod_list = list()\n for pod_name in pods_data:\n if pod_name['name'] in cell_pod_map:\n namespaced_pod_info = core_api.read_namespaced_pod(pod_name['name'], namespace_id).metadata.owner_references[0]\n if namespaced_pod_info.kind == 'ReplicaSet':\n replica_set = apps_api.read_namespaced_replica_set(core_api.read_namespaced_pod(\n pod_name['name'], namespace_id).metadata.owner_references[0].name, namespace_id)\n if replica_set.metadata.owner_references[0].name == deployment_id and pod_name['name'] in cell_pod_map \\\n and pod_name['status']=='Running':\n # fetching pods based on deployment\n deployment_pod_list.append(pod_name['name'])\n # fetching cells based on pods and deployment\n deployment_cell_list.append(cell_pod_map[pod_name['name']]['cell_name'])\n else:\n continue\n else:\n continue\n # if there are no pods for the passed deployment\n if len(deployment_pod_list) == 0:\n pods_for_resource_calculation = 'no_pod_resource'\n else:\n pods_for_resource_calculation = deployment_pod_list\n\n deployments_info = {\n 'resource_count': {\n 'cells': len(deployment_cell_list),\n 'pods': len(deployment_pod_list)\n },\n 'resource_info': get_resource_info(cluster_id, 'pods', namespace_id, pods_for_resource_calculation)\n }\n\n # deployment listing\n else:\n if namespace_id:\n deployments_info = [{'value': deployment_item.metadata.name, 'label': deployment_item.metadata.name}\n for deployment_item in\n client.AppsV1beta2Api().list_namespaced_deployment(namespace_id).items]\n else:\n deployments_info = [{'value': deployment_item.metadata.name, 'label': deployment_item.metadata.name}\n for deployment_item in\n client.AppsV1beta2Api().list_deployment_for_all_namespaces().items]\n return deployments_info" ]
[ "0.88087106", "0.86459535", "0.81122637", "0.81122637", "0.7793123", "0.7685812", "0.76720864", "0.7581286", "0.7370088", "0.7301753", "0.72268534", "0.7187506", "0.7119948", "0.708371", "0.6920872", "0.69154114", "0.6898356", "0.6898356", "0.6852856", "0.67866534", "0.666569", "0.6665486", "0.65006185", "0.64056337", "0.63301545", "0.6029711", "0.60067546", "0.59942454", "0.59844935", "0.59473044" ]
0.93214124
0
Test case for get_deployment_run_reports
def test_get_deployment_run_reports(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_deployment_runs(self):\n pass", "def test_download_deployment_run_test_report(self):\n pass", "def test_get_deployment_run(self):\n pass", "def test_get_deployment_runs1(self):\n pass", "def test_get_deployment_resources(self):\n pass", "def test_get_deployments(self):\n pass", "def test_get_deployments(self):\n pass", "def test_get_deployment(self):\n pass", "def test_get_deployment(self):\n pass", "def test_get_deployment_resource_data(self):\n pass", "def test_release_deployment_run(self):\n pass", "def test_execute_deployment(self):\n pass", "def test_basic_report(self):\n report = self.analytics.suites[testReportSuite].report\n queue = []\n queue.append(report)\n response = omniture.sync(queue)\n self.assertIsInstance(response, list)", "def test_publish_deployment_run(self):\n pass", "def main():\n r = ReportHelper()\n today = dt.today()\n\n start_date = (today - timedelta(days=1)).strftime('%Y-%m-%d')\n end_date = today.strftime('%Y-%m-%d')\n response, ingestion_results = r.get_report(start_date, end_date, 'daily')\n logger.debug('Daily report data from {s} to {e}'.format(s=start_date, e=end_date))\n logger.debug(json.dumps(response, indent=2))\n logger.debug(json.dumps(ingestion_results, indent=2))\n\n if time_to_generate_monthly_report(today):\n last_day_of_prev_month = date(today.year, today.month, 1) - timedelta(days=1)\n last_month_first_date = last_day_of_prev_month.strftime('%Y-%m-01')\n last_month_end_date = last_day_of_prev_month.strftime('%Y-%m-%d')\n response, ingestion_results = r.get_report(last_month_first_date,\n last_month_end_date,\n 'monthly')\n logger.debug('Monthly report data from {s} to {e}'.format(s=start_date, e=end_date))\n logger.debug(json.dumps(response, indent=2))\n\n return response", "def test_get_deployment_resource(self):\n pass", "def test_get_deployments_count(self):\n pass", "def test_retest_deployment_run(self):\n pass", "def __generate_reports__(self,configs,mockdb):\n sample_keys = self.__completed_samples_list__(mockdb)\n n = len(sample_keys)\n numbers = configs['pipeline'].get('Flowcell_reports','numbers').split(',')\n numbers.sort(key=int,reverse=True)\n flowcell = mockdb['Flowcell'].__get__(configs['system'],key=self.flowcell_key)\n for number in numbers:\n if n >= int(number):\n if getattr(self,'flowcell_report_' + str(number) + '_key') is None:\n report = mockdb['FlowcellStatisticReport'].__new__(configs['system'],sample_keys=sample_keys,flowcell=flowcell,number=number,base_output_dir=self.base_output_dir)\n report.__fill_qsub_file__(configs)\n report.__launch__(configs['system'])\n setattr(self,'flowcell_report_' + str(number) + '_key',report.key)\n return True\n return False\n return False", "def test_get_run(self):\n pass", "def test_get_deployment_runs_in_virtualization_realm(self):\n pass", "def run_reports(args):\n # Define the types of reports we can accept.\n # TODO: Roll this data structure into the Reports class.\n reports = {}\n reports[\"computers\"] = {\"heading\": \"Computer Report\",\n \"func\": build_computers_report,\n \"report\": None}\n reports[\"mobile_devices\"] = {\"heading\": \"Mobile Device Report\",\n \"func\": build_mobile_devices_report,\n \"report\": None}\n reports[\"computer_groups\"] = {\"heading\": \"Computer Groups Report\",\n \"func\": build_computer_groups_report,\n \"report\": None}\n reports[\"computer_extension_attributes\"] = {\n \"heading\": \"Computer Extension Attributes Report\",\n \"func\": build_computer_ea_report,\n \"report\": None}\n reports[\"packages\"] = {\"heading\": \"Package Report\",\n \"func\": build_packages_report,\n \"report\": None}\n reports[\"printers\"] = {\"heading\": \"Printers Report\",\n \"func\": build_printers_report,\n \"report\": None}\n reports[\"scripts\"] = {\"heading\": \"Scripts Report\",\n \"func\": build_scripts_report,\n \"report\": None}\n reports[\"policies\"] = {\"heading\": \"Policy Report\",\n \"func\": build_policies_report,\n \"report\": None}\n reports[\"computer_configuration_profiles\"] = {\n \"heading\": \"Computer Configuration Profile Report\",\n \"func\": build_config_profiles_report,\n \"report\": None}\n\n reports[\"mobile_device_configuration_profiles\"] = {\n \"heading\": \"Mobile Device Configuration Profile Report\",\n \"func\": build_md_config_profiles_report,\n \"report\": None}\n reports[\"mobile_device_groups\"] = {\n \"heading\": \"Mobile Device Group Report\",\n \"func\": build_device_groups_report,\n \"report\": None}\n reports[\"apps\"] = {\n \"heading\": \"Mobile Apps\",\n \"func\": build_apps_report,\n \"report\": None}\n\n args_dict = vars(args)\n # Build a list of report key names, requested by user, which are\n # tightly coupled, despite the smell, to arg names.\n requested_reports = [report for report in reports if\n args_dict[report]]\n\n # If either the --all option has been provided, OR none of the\n # other reports options have been specified, assume user wants all\n # reports (filtering out --remove is handled elsewhere).\n if args.all or not requested_reports:\n # Replace report list with all known report names.\n # TODO: THis is dumb... Just puts the name in so I can later\n # pull it again with dict.\n requested_reports = [report for report in reports]\n\n # Build the reports\n results = []\n for report_name in requested_reports:\n report_dict = reports[report_name]\n print \"%s Building: %s... %s\" % (SPRUCE, report_dict[\"heading\"],\n SPRUCE)\n func = reports[report_name][\"func\"]\n results.append(func(**args_dict))\n\n # Output the reports\n output_xml = ET.Element(\"SpruceReport\")\n add_output_metadata(output_xml)\n\n for report in results:\n # Print output to stdout.\n if not args.ofile:\n print\n print_output(report, args.verbose)\n else:\n add_report_output(output_xml, report)\n\n if args.ofile:\n indent(output_xml)\n tree = ET.ElementTree(output_xml)\n #print ET.tostring(output_xml, encoding=\"UTF-8\")\n try:\n tree.write(os.path.expanduser(args.ofile), encoding=\"UTF-8\",\n xml_declaration=True)\n print \"%s Wrote output to %s\" % (SPRUCE, args.ofile)\n except IOError:\n print \"Error writing output to %s\" % args.ofile\n sys.exit(1)", "def runtime_analysis(config, overall_report):\n test_case_report_list = []\n \n for test_suite in config.get_test_suite():\n report = dict()\n report['stdout_stream'] = ''\n report['stderr_stream'] = ''\n report['outfile'] = ''\n\n input_for_stdin = config.get_test_suite_input_for_stdin(test_suite)\n # using Popen instead of run because I need access to the pid\n # See comment under \"except subprocess.TimeoutExpired:\"\n infile = \"xinfile_\" + uuid.uuid4().hex[0:16] + \".txt\"\n outfile = \"xoutfile_\" + uuid.uuid4().hex[0:16] + \".txt\"\n p = subprocess.Popen(['./run_jail.sh',\n config.output_filename,\n str(len(test_suite)), infile, outfile], # command\n stdout=subprocess.PIPE, # capture stdout\n stderr=subprocess.PIPE, # capture stderr\n stdin=subprocess.PIPE, # capture stdin\n universal_newlines=True, # use text mode for std* file objects\n start_new_session=True, # otherwise killing the process group will also kill the Python interpreter\n )\n\n try:\n # send test suite input\n with open(infile, \"w\") as f:\n f.write(input_for_stdin)\n (stdout_stream, stderr_stream) = p.communicate(timeout=config.timeout)\n \n report['return_code'] = p.returncode\n report['stderr_stream'] += stderr_stream\n report['stdout_stream'] += stdout_stream\n with open(outfile, \"r\") as f:\n current_outfile = f.read()\n report['outfile'] += current_outfile\n \n # check if test cases passed\n ret_output_match = config.check_for_output_match(current_outfile, test_suite)\n report['test_suite'] = test_suite\n report['output_match'] = ret_output_match\n \n except subprocess.TimeoutExpired:\n # kill the process group so that all child processes spawned by the process are also killed\n # The child need to be killed because, in addition to wasting CPU cycles,\n # it can hold stdout and then Python will wait indefinitely even if the timeout is expired\n os.killpg(os.getpgid(p.pid), signal.SIGKILL) \n report['timeout'] = True\n finally:\n test_case_report_list.append(report)\n \n overall_report['runtime_analysis_done'] = True\n\n return overall_report, test_case_report_list", "def run(request_handler, name, generic_result_set=True, **kwargs) -> Union[List, Dict]:\n params = {\n 'genericResultSet': generic_result_set,\n 'pretty': False\n }\n for param in kwargs.keys():\n params['R_{}'.format(param)] = kwargs[param]\n\n return request_handler.make_request('GET', '/runreports/{}'.format(name), params=params)", "def test_get_deployment_metric(self):\n pass", "def test_split_reports_with_execution(self):\n self._test_reports_helper({\"--split-reports\": \"\",\n \"--profile-execution\": \"\"},\n [\"compile.txt\", \"execution.txt\"])", "def test_reports_list(client, app):\n with app.app_context():\n r = client.get(\"/reports\")\n\n assert r.status_code == 308\n\n r = client.get(\"/repo\")\n assert r.status_code == 404\n assert \"Sorry\" in r.get_data(as_text=True)", "def test_launch_deployment(self):\n pass", "def testFindAvailableRunPages(loggingMixin, yamlConfigForParsingPlugins):\n parameters = yamlConfigForParsingPlugins\n # Use pkg_resources to make sure we don't end up with the wrong resources (and to ensure that the tests\n # are cwd independent).\n expected = [name for name in pkg_resources.resource_listdir(\"overwatch.webApp\", \"templates\") if \"runPage\" in name]\n # Apparently the order of these lists can vary between different systems. We don't care about the order\n # - just the values themselves - so we compare them as sets, which don't depend on order.\n assert set(parameters[\"runPageTemplates\"]) == set(expected)", "def project_report(request, **kwargs):\n\n #Creating the command for the logs \n print(\"in the project_report ...........................................\")\n outputStr = \"Updating the logs...\"\n #Making the output\n context = {\n \"page_title\": _(\"Test Details\"),\n \"test_lists\": 'report_list', #tests_list\n \"log_data\": outputStr\n }\n return render(request, 'rally_dashboard/events/test_logs.html', context)" ]
[ "0.8036366", "0.78003824", "0.7705035", "0.74334186", "0.6723308", "0.6699503", "0.6699503", "0.6502517", "0.6502517", "0.6477796", "0.62390786", "0.62258255", "0.61799127", "0.6140444", "0.61401784", "0.6127425", "0.61230326", "0.61071736", "0.60303926", "0.6011866", "0.60030127", "0.5993448", "0.5928831", "0.5851631", "0.5842106", "0.58370656", "0.5835995", "0.5801816", "0.5730253", "0.5728125" ]
0.9344881
0
Test case for get_deployment_runs
def test_get_deployment_runs(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_deployment_run(self):\n pass", "def test_get_deployment_runs1(self):\n pass", "def test_get_deployment_run_reports(self):\n pass", "def test_get_deployments(self):\n pass", "def test_get_deployments(self):\n pass", "def test_get_deployment(self):\n pass", "def test_get_deployment(self):\n pass", "def test_get_deployment_resources(self):\n pass", "def test_execute_deployment(self):\n pass", "def test_list_runs(self):\n pass", "def test_get_deployment_runs_in_virtualization_realm(self):\n pass", "def test_release_deployment_run(self):\n pass", "def test_retest_deployment_run(self):\n pass", "def test_get_deployments_count(self):\n pass", "def test_get_deployment_resource_data(self):\n pass", "def test_publish_deployment_run(self):\n pass", "def test_get_deployment_resource(self):\n pass", "def test_launch_deployment(self):\n pass", "def test_get_run(self):\n pass", "def test_relaunch_deployment_run(self):\n pass", "def test_delete_deployment_run(self):\n pass", "def test_create_deployment_entire(self):\n pass", "def test_download_deployment_run_test_report(self):\n pass", "def test_create_deployment(self):\n pass", "def test_get_deployments_expanded(self):\n pass", "def test_update_deployment(self):\n pass", "def test_set_power_schedule_for_deployment_run(self):\n pass", "def test_get_deployment_metric(self):\n pass", "def _GetAllTestRuns(self, ispy):\n template = JINJA.get_template('list_view.html')\n data = {}\n max_keys = 1000\n marker = 'failures/%s' % self.request.get('marker')\n test_runs = list([path.split('/')[1] for path in\n ispy.GetAllPaths('failures/', max_keys=max_keys,\n marker=marker, delimiter='/')])\n base_url = '/?test_run=%s'\n next_url = '/?marker=%s' % test_runs[-1]\n data['next_url'] = next_url\n data['links'] = [(test_run, base_url % test_run) for test_run in test_runs]\n self.response.write(template.render(data))", "def test_cron_workflow_service_list_cron_workflows2(self):\n pass" ]
[ "0.8514573", "0.8482369", "0.7901847", "0.72559136", "0.72559136", "0.72362834", "0.72362834", "0.70702165", "0.6901347", "0.68948126", "0.68279433", "0.6815669", "0.67309415", "0.66859025", "0.6622968", "0.6598748", "0.65230644", "0.6467676", "0.6446612", "0.6444857", "0.6293132", "0.61546254", "0.61428", "0.61380434", "0.6088437", "0.6061429", "0.60375893", "0.59873104", "0.5849179", "0.58268076" ]
0.9374177
0
Test case for get_deployment_runs1
def test_get_deployment_runs1(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_deployment_runs(self):\n pass", "def test_get_deployment_run(self):\n pass", "def test_get_deployment_run_reports(self):\n pass", "def test_get_deployment(self):\n pass", "def test_get_deployment(self):\n pass", "def test_execute_deployment(self):\n pass", "def test_get_deployments(self):\n pass", "def test_get_deployments(self):\n pass", "def test_retest_deployment_run(self):\n pass", "def test_get_deployment_resources(self):\n pass", "def test_release_deployment_run(self):\n pass", "def test_publish_deployment_run(self):\n pass", "def test_get_deployment_runs_in_virtualization_realm(self):\n pass", "def test_relaunch_deployment_run(self):\n pass", "def test_launch_deployment(self):\n pass", "def test_get_deployments_count(self):\n pass", "def test_create_deployment_entire(self):\n pass", "def test_get_deployment_resource_data(self):\n pass", "def test_get_deployment_resource(self):\n pass", "def test_create_deployment(self):\n pass", "def test_delete_deployment_run(self):\n pass", "def test_update_deployment(self):\n pass", "def test_get_run(self):\n pass", "def test_list_runs(self):\n pass", "def test_get_deployment_metric(self):\n pass", "def test_download_deployment_run_test_report(self):\n pass", "def test_get_deployments_expanded(self):\n pass", "def test_deployment(self):\n config = {'example.com': {\n 'applications': {\n 'site-hybridcluster': {\n 'image': 'unknown',\n },\n 'mysql-hybridcluster': {\n 'image': 'unknown',\n }\n },\n 'version': 1\n }}\n expected = Deployment(nodes=frozenset([\n Node(hostname='example.com', applications=frozenset([\n Application(\n name='mysql-hybridcluster',\n image=DockerImage.from_string('unknown'),\n ports=frozenset(),\n ),\n Application(\n name='site-hybridcluster',\n image=DockerImage.from_string('unknown'),\n )]))]))\n self.assertEqual(expected,\n current_from_configuration(config))", "def test_set_power_schedule_for_deployment_run(self):\n pass", "def get_deployment_data(cluster_id, namespace_id=None, deployment_id=None):\n # deployment detail\n if deployment_id and namespace_id is not None:\n # creating cell-pod mapping for getting cell details\n cell_pod_map = get_cell_pod_map(cluster_id)\n # getting pod count\n pods_data = [pod for pod in get_pod_data(cluster_id, namespace_id, deployment_id)]\n core_api = client.CoreV1Api()\n apps_api = client.AppsV1Api()\n deployment_cell_list = list()\n deployment_pod_list = list()\n for pod_name in pods_data:\n if pod_name['name'] in cell_pod_map:\n namespaced_pod_info = core_api.read_namespaced_pod(pod_name['name'], namespace_id).metadata.owner_references[0]\n if namespaced_pod_info.kind == 'ReplicaSet':\n replica_set = apps_api.read_namespaced_replica_set(core_api.read_namespaced_pod(\n pod_name['name'], namespace_id).metadata.owner_references[0].name, namespace_id)\n if replica_set.metadata.owner_references[0].name == deployment_id and pod_name['name'] in cell_pod_map \\\n and pod_name['status']=='Running':\n # fetching pods based on deployment\n deployment_pod_list.append(pod_name['name'])\n # fetching cells based on pods and deployment\n deployment_cell_list.append(cell_pod_map[pod_name['name']]['cell_name'])\n else:\n continue\n else:\n continue\n # if there are no pods for the passed deployment\n if len(deployment_pod_list) == 0:\n pods_for_resource_calculation = 'no_pod_resource'\n else:\n pods_for_resource_calculation = deployment_pod_list\n\n deployments_info = {\n 'resource_count': {\n 'cells': len(deployment_cell_list),\n 'pods': len(deployment_pod_list)\n },\n 'resource_info': get_resource_info(cluster_id, 'pods', namespace_id, pods_for_resource_calculation)\n }\n\n # deployment listing\n else:\n if namespace_id:\n deployments_info = [{'value': deployment_item.metadata.name, 'label': deployment_item.metadata.name}\n for deployment_item in\n client.AppsV1beta2Api().list_namespaced_deployment(namespace_id).items]\n else:\n deployments_info = [{'value': deployment_item.metadata.name, 'label': deployment_item.metadata.name}\n for deployment_item in\n client.AppsV1beta2Api().list_deployment_for_all_namespaces().items]\n return deployments_info" ]
[ "0.9130747", "0.8592565", "0.7720188", "0.7396605", "0.7396605", "0.71723825", "0.71451426", "0.71451426", "0.7041238", "0.70089823", "0.6964683", "0.67657304", "0.6760633", "0.6752423", "0.6713195", "0.6648042", "0.65539503", "0.6547771", "0.6542156", "0.6493821", "0.6452613", "0.6447794", "0.63444996", "0.6335062", "0.6153225", "0.6141731", "0.6094132", "0.60845894", "0.60293525", "0.60033417" ]
0.90859807
1
Test case for get_deployment_runs_in_virtualization_realm
def test_get_deployment_runs_in_virtualization_realm(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_deployment_runs(self):\n pass", "def test_get_deployment_run(self):\n pass", "def test_get_deployment_runs1(self):\n pass", "def test_get_virtualization_realm_resources(self):\n pass", "def test_get_deployment_run_reports(self):\n pass", "def test_get_virtualization_realm(self):\n pass", "def test_get_deployment(self):\n pass", "def test_get_deployment(self):\n pass", "def test_get_deployment_resources(self):\n pass", "def test_get_deployments(self):\n pass", "def test_get_deployments(self):\n pass", "def test_execute_deployment(self):\n pass", "def test_get_virtualization_realms(self):\n pass", "def test_get_deployment_resource(self):\n pass", "def test_release_deployment_run(self):\n pass", "def test_get_deployment_resource_data(self):\n pass", "def test_allocate_virtualization_realm(self):\n pass", "def test_get_deployments_count(self):\n pass", "def test_register_virtualization_realm(self):\n pass", "def test_launch_deployment(self):\n pass", "def test_publish_deployment_run(self):\n pass", "def test_list_virtualization_realm_templates(self):\n pass", "def test_retest_deployment_run(self):\n pass", "def test_set_power_schedule_for_deployment_run(self):\n pass", "def test_determine_valid_virtualization_realms(self):\n pass", "def test_set_virtualization_realm_active(self):\n pass", "def test_get_templates_in_virtualization_realm(self):\n pass", "def test_get_project_virt_realms(self):\n pass", "def get_deployment_roles(plan_name):\n pass", "def test_update_virtualization_realm(self):\n pass" ]
[ "0.80955195", "0.7542476", "0.72114205", "0.6733126", "0.66010475", "0.6568793", "0.6403381", "0.6403381", "0.63537264", "0.6303567", "0.6303567", "0.6257609", "0.60801667", "0.595264", "0.5920311", "0.5895868", "0.58922637", "0.5852176", "0.584603", "0.57940525", "0.57104343", "0.56916416", "0.5690329", "0.56732976", "0.5670896", "0.56537586", "0.56487757", "0.56483006", "0.564451", "0.5613173" ]
0.893716
0
Test case for get_deployments
def test_get_deployments(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_deployments_expanded(self):\n pass", "def test_get_deployment(self):\n pass", "def test_get_deployment(self):\n pass", "def test_get_deployments_count(self):\n pass", "def test_get_deployment_runs(self):\n pass", "def test_get_deployment_resources(self):\n pass", "def test_get_deployment_run(self):\n pass", "def test_get_deployment_runs1(self):\n pass", "def test_get_deployment_run_reports(self):\n pass", "def test_get_deployment_resource(self):\n pass", "def test_execute_deployment(self):\n pass", "def test_installments_get(self):\n pass", "def handle_list_deployments(project_id):\n return jsonify(list_deployments(project_id=project_id))", "def bamboo_deployments(ctx, from_date, to_date, use_cache):\r\n\r\n if from_date is None:\r\n from_date, to_date = previous_month_range()\r\n\r\n log.info('Getting Bamboo deployments between {} and {}'.format(from_date, to_date))\r\n report = BambooDeploymentsReport(\r\n ctx.obj,\r\n from_date=from_date,\r\n to_date=to_date\r\n )\r\n report.run_report(use_cache=use_cache)", "def list_deployments() -> JSONResponse:\n\n deploy_manager = DeployManager()\n deployments = deploy_manager.list()\n return JSONResponse(deployments)", "def test_get_deployment_resource_data(self):\n pass", "def test_create_deployment(self):\n pass", "def test_get_deployment_runs_in_virtualization_realm(self):\n pass", "def describe_deployments(StackId=None, AppId=None, DeploymentIds=None):\n pass", "def test_create_deployment_entire(self):\n pass", "def test_release_deployment_run(self):\n pass", "def test_list_namespaced_deployment_config(self):\n pass", "def get_deployments(self, ApiId: str, MaxResults: str = None, NextToken: str = None) -> Dict:\n pass", "def test_publish_deployment_run(self):\n pass", "def test_update_deployment(self):\n pass", "def get(self):\n deploys = get_heroku_deployments('SERVER GOES HERE!')\n write_msg(deploys)\n return deploys", "def test_get_bindings_for_deployment(self):\n pass", "def deployments(self) -> List[Dict]:\n return [\n {\n 'name': self.name,\n 'head_host': self.head_host,\n 'head_port': self.head_port,\n }\n ]", "def deploy_environments(self):\n for key in self.deploy:\n yield key", "def test_list_deployment_config_for_all_namespaces(self):\n pass" ]
[ "0.796072", "0.79568464", "0.79568464", "0.78713965", "0.7855781", "0.7646339", "0.7618631", "0.7610572", "0.6963447", "0.68846107", "0.68364036", "0.67508566", "0.66934144", "0.6690729", "0.66338706", "0.66229856", "0.65987533", "0.65723974", "0.6571443", "0.6527052", "0.64954597", "0.64285606", "0.63903564", "0.63684094", "0.63467586", "0.6334812", "0.62910485", "0.622075", "0.62205124", "0.6173846" ]
0.94160146
1
Test case for get_deployments_expanded
def test_get_deployments_expanded(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_deployments(self):\n pass", "def test_get_deployments(self):\n pass", "def test_get_deployment_resources(self):\n pass", "def test_get_deployment(self):\n pass", "def test_get_deployment(self):\n pass", "def test_get_deployment_runs(self):\n pass", "def test_get_deployment_runs1(self):\n pass", "def test_get_container_assets_expanded(self):\n pass", "def test_get_deployment_run(self):\n pass", "def test_get_projects_expanded(self):\n pass", "def test_get_software_asset_bundle_expanded(self):\n pass", "def test_get_deployments_count(self):\n pass", "def test_create_deployment_entire(self):\n pass", "def test_get_test_assets_expanded(self):\n pass", "def test_get_deployment_resource(self):\n pass", "def test_execute_deployment(self):\n pass", "def describe_deployments(StackId=None, AppId=None, DeploymentIds=None):\n pass", "def test_get_software_set_expanded(self):\n pass", "def test_get_deployment_resource_data(self):\n pass", "def test_list_namespaced_deployment_config(self):\n pass", "def test_environments_deployment(\n self,\n cd_tmp_path: Path,\n empty_opts_from_file: None,\n fx_deployments: YamlLoaderDeployment,\n runway_context: MockRunwayContext,\n ) -> None:\n runway_context.env.root_dir = cd_tmp_path\n deployment = fx_deployments.load(\"environments_map_str\")\n mod_def = deployment.modules[0]\n mod_def.environments = {\"dev\": [\"us-east-1\"], \"prod\": [\"us-east-1\"]}\n mod = Module(context=runway_context, definition=mod_def, deployment=deployment)\n assert mod.environments == {\n \"dev\": [\"us-east-1\"],\n \"prod\": [\"us-east-1\"],\n \"test\": \"123456789012/us-east-1\",\n }", "def test_get_scenarios_expanded(self):\n pass", "def test_list_deployment_config_for_all_namespaces(self):\n pass", "def deploy_environments(self):\n for key in self.deploy:\n yield key", "def handle_list_deployments(project_id):\n return jsonify(list_deployments(project_id=project_id))", "def test_update_deployment(self):\n pass", "def test_get_bindings_for_deployment(self):\n pass", "def test_get_deployment_run_reports(self):\n pass", "def test_get_deployment_runs_in_virtualization_realm(self):\n pass", "def test_child_index_response_descriptor_projects_release_projects_deployment_release_resource_deployment_resource(self):\n pass" ]
[ "0.7691938", "0.7691938", "0.6684406", "0.6653419", "0.6653419", "0.659954", "0.64300287", "0.6367373", "0.63580096", "0.6344198", "0.63250256", "0.6322087", "0.6269188", "0.6085834", "0.60119164", "0.59499806", "0.58928794", "0.5855816", "0.576965", "0.57578206", "0.5757446", "0.5721807", "0.5714377", "0.56949204", "0.5615937", "0.5611326", "0.5605396", "0.558979", "0.55879915", "0.558338" ]
0.94793695
0
Test case for get_edge_gateway_i_ps
def test_get_edge_gateway_i_ps(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_find_ngpf(ixnetwork):\n for i in range(0, 5):\n srte_v4 = (\n ixnetwork.Topology.add()\n .DeviceGroup.add()\n .Ethernet.add()\n .Ipv4.add()\n .BgpIpv4Peer.add(NumberSRTEPolicies=1)\n .BgpSRTEPoliciesListV4\n )\n if i % 2 == 0:\n srte_v4.NumberOfTunnelsV4 = 2\n assert (\n len(\n ixnetwork.Topology.find()\n .DeviceGroup.find()\n .Ethernet.find()\n .Ipv4.find()\n .BgpIpv4Peer.find()\n .BgpSRTEPoliciesListV4.find(NumberOfTunnelsV4=2)\n )\n ) == 3", "def sx_router_ecmp_nexthops_get(handle, ecmp_id): \n try:\n next_hops = []\n \n next_hop_count_p = new_uint32_t_p()\n uint32_t_p_assign(next_hop_count_p, 0)\n \n rc = sx_api_router_operational_ecmp_get(handle, ecmp_id, None, next_hop_count_p)\n sx_check_rc(rc)\n \n next_hop_count = uint32_t_p_value(next_hop_count_p)\n next_hop_list_p = new_sx_next_hop_t_arr(next_hop_count)\n \n rc = sx_api_router_operational_ecmp_get(handle, ecmp_id, next_hop_list_p, next_hop_count_p)\n sx_check_rc(rc)\n \n next_hop_count = uint32_t_p_value(next_hop_count_p)\n for i in range(next_hop_count):\n next_hop = sx_next_hop_t_arr_getitem(next_hop_list_p, i)\n if next_hop.next_hop_key.type == SX_NEXT_HOP_TYPE_IP:\n next_hops.append(next_hop)\n \n return next_hops\n\n finally:\n delete_sx_next_hop_t_arr(next_hop_list_p)\n delete_uint32_t_p(next_hop_count_p)", "def test_get_network(self):\n pass", "def _get_igp_ospf(self):\n return self.__igp_ospf", "def _get_hop(self):\n return self.__hop", "def _get_next_hop_metric(self):\n return self.__next_hop_metric", "def get_internet_gateway_output(internet_gateway_name: Optional[pulumi.Input[str]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetInternetGatewayResult]:\n ...", "def get(profile):\n client = boto3client.get(\"ec2\", profile)\n return client.describe_internet_gateways()", "def test_mgre(self):\n\n for itf in self.pg_interfaces[3:]:\n #\n # one underlay nh for each overlay/tunnel peer\n #\n itf.generate_remote_hosts(4)\n itf.configure_ipv4_neighbors()\n\n #\n # Create an L3 GRE tunnel.\n # - set it admin up\n # - assign an IP Addres\n # - Add a route via the tunnel\n #\n gre_if = VppGreInterface(\n self,\n itf.local_ip4,\n \"0.0.0.0\",\n mode=(VppEnum.vl_api_tunnel_mode_t.TUNNEL_API_MODE_MP),\n )\n gre_if.add_vpp_config()\n gre_if.admin_up()\n gre_if.config_ip4()\n gre_if.generate_remote_hosts(4)\n\n self.logger.info(self.vapi.cli(\"sh adj\"))\n self.logger.info(self.vapi.cli(\"sh ip fib\"))\n\n #\n # ensure we don't match to the tunnel if the source address\n # is all zeros\n #\n tx = self.create_tunnel_stream_4o4(\n self.pg0,\n \"0.0.0.0\",\n itf.local_ip4,\n self.pg0.local_ip4,\n self.pg0.remote_ip4,\n )\n self.send_and_assert_no_replies(self.pg0, tx)\n\n #\n # for-each peer\n #\n for ii in range(1, 4):\n route_addr = \"4.4.4.%d\" % ii\n tx_e = self.create_stream_ip4(self.pg0, \"5.5.5.5\", route_addr)\n\n #\n # route traffic via the peer\n #\n route_via_tun = VppIpRoute(\n self,\n route_addr,\n 32,\n [VppRoutePath(gre_if._remote_hosts[ii].ip4, gre_if.sw_if_index)],\n )\n route_via_tun.add_vpp_config()\n\n # all packets dropped at this point\n rx = self.send_and_assert_no_replies(self.pg0, tx_e)\n\n gre_if.admin_down()\n gre_if.admin_up()\n rx = self.send_and_assert_no_replies(self.pg0, tx_e)\n\n #\n # Add a TEIB entry resolves the peer\n #\n teib = VppTeib(\n self,\n gre_if,\n gre_if._remote_hosts[ii].ip4,\n itf._remote_hosts[ii].ip4,\n )\n teib.add_vpp_config()\n\n #\n # Send a packet stream that is routed into the tunnel\n # - packets are GRE encapped\n #\n rx = self.send_and_expect(self.pg0, tx_e, itf)\n self.verify_tunneled_4o4(\n self.pg0, rx, tx_e, itf.local_ip4, itf._remote_hosts[ii].ip4\n )\n\n tx_i = self.create_tunnel_stream_4o4(\n self.pg0,\n itf._remote_hosts[ii].ip4,\n itf.local_ip4,\n self.pg0.local_ip4,\n self.pg0.remote_ip4,\n )\n rx = self.send_and_expect(self.pg0, tx_i, self.pg0)\n self.verify_decapped_4o4(self.pg0, rx, tx_i)\n\n #\n # delete and re-add the TEIB\n #\n teib.remove_vpp_config()\n self.send_and_assert_no_replies(self.pg0, tx_e)\n self.send_and_assert_no_replies(self.pg0, tx_i)\n\n teib.add_vpp_config()\n rx = self.send_and_expect(self.pg0, tx_e, itf)\n self.verify_tunneled_4o4(\n self.pg0, rx, tx_e, itf.local_ip4, itf._remote_hosts[ii].ip4\n )\n rx = self.send_and_expect(self.pg0, tx_i, self.pg0)\n self.verify_decapped_4o4(self.pg0, rx, tx_i)\n\n #\n # bounce the interface state and try packets again\n #\n gre_if.admin_down()\n gre_if.admin_up()\n rx = self.send_and_expect(self.pg0, tx_e, itf)\n self.verify_tunneled_4o4(\n self.pg0, rx, tx_e, itf.local_ip4, itf._remote_hosts[ii].ip4\n )\n rx = self.send_and_expect(self.pg0, tx_i, self.pg0)\n self.verify_decapped_4o4(self.pg0, rx, tx_i)\n\n gre_if.admin_down()\n gre_if.unconfig_ip4()", "def get_switch_interface(pop_url, pop_id, uuid):\n odl_info = _get_odl_info(pop_url, pop_id)\n topology_json = get_topology(odl_info[0], odl_info[1], odl_info[2])\n results = {}\n for topology in topology_json['network-topology']['topology']:\n nodes = topology['node']\n for node in nodes:\n node_features_json = get_node_features(odl_info[0], odl_info[1], odl_info[2], node['node-id'])\n if 'node' in node_features_json:\n for node_json in node_features_json['node']:\n if 'flow-node-inventory:serial-number' in node_json \\\n and node_json['flow-node-inventory:serial-number'].strip() != 'None':\n if 'node-connector' in node_json:\n for connector in node_json['node-connector']:\n if connector['id'] == uuid:\n results['name'] = connector.get('flow-node-inventory:name', '')\n results['attributes'] = {}\n results['attributes']['port-number'] = \\\n connector.get('flow-node-inventory:port-number', '')\n results['attributes']['current-speed'] = \\\n connector.get('flow-node-inventory:current-speed', '')\n results['attributes']['flow-capable-node-connector-statistics'] = \\\n connector.get(\n 'opendaylight-port-statistics:flow-capable-node-connector-statistics', '')\n results['attributes']['advertised-features'] = \\\n connector.get('flow-node-inventory:advertised-features', '')\n results['attributes']['configuration'] = \\\n connector.get('flow-node-inventory:configuration', '')\n results['attributes']['hardware-address'] = \\\n connector.get('flow-node-inventory:hardware-address', '')\n results['attributes']['maximum-speed'] = \\\n connector.get('flow-node-inventory:maximum-speed', '')\n results['attributes']['state'] = \\\n connector.get('flow-node-inventory:state', '')\n results['attributes']['supported'] = \\\n connector.get('flow-node-inventory:supported', '')\n results['attributes']['current-feature'] = \\\n connector.get('flow-node-inventory:current-feature', '')\n results['attributes']['peer-features'] = \\\n connector.get('flow-node-inventory:peer-features', '')\n return results", "def get_all_sghop_info (nffg, return_paths=False):\n sg_map = {}\n for i in nffg.infras:\n for p in i.ports:\n for fr in p.flowrules:\n # if fr.external:\n # continue\n if fr.id not in sg_map:\n # The path is unordered!!\n path_of_shop = []\n flowclass = NFFGToolBox._extract_flowclass(fr.match.split(\";\"))\n sg_map[fr.id] = [None, None, flowclass, fr.bandwidth, fr.delay]\n # We have to find the BEGINNING of this flowrule sequence.\n inbound_link = NFFGToolBox._find_infra_link(nffg, p, outbound=False,\n accept_dyn=True)\n while inbound_link.type != 'DYNAMIC':\n path_of_shop.append(inbound_link)\n if inbound_link.src.node.type == 'SAP':\n break\n # The link is STATIC, and its src is not SAP so it is an Infra.\n prev_fr, prev_p = \\\n NFFGToolBox._get_flowrule_and_its_starting_port(\n inbound_link.src.node, fr.id)\n NFFGToolBox._check_flow_consistencity(sg_map, prev_fr)\n inbound_link = NFFGToolBox._find_infra_link(nffg, prev_p,\n outbound=False,\n accept_dyn=True)\n # 'inbound_link' is DYNAMIC here or it is STATIC and starts from\n # a SAP,\n # so the sequence starts here\n sg_map[fr.id][0] = inbound_link.src\n\n # We have to find the ENDING of this flowrule sequence.\n output_port = NFFGToolBox._get_output_port_of_flowrule(i, fr)\n if output_port is None:\n continue\n outbound_link = NFFGToolBox._find_infra_link(nffg, output_port,\n outbound=True,\n accept_dyn=True)\n while outbound_link.type != 'DYNAMIC':\n path_of_shop.append(outbound_link)\n if outbound_link.dst.node.type == 'SAP':\n break\n # The link is STATIC and its dst is not a SAP so it is an Infra.\n next_fr, _ = NFFGToolBox._get_flowrule_and_its_starting_port(\n outbound_link.dst.node, fr.id)\n # '_' is 'outbound_link.dst'\n next_output_port = NFFGToolBox._get_output_port_of_flowrule(\n outbound_link.dst.node, next_fr)\n NFFGToolBox._check_flow_consistencity(sg_map, next_fr)\n outbound_link = NFFGToolBox._find_infra_link(nffg,\n next_output_port,\n outbound=True,\n accept_dyn=True)\n # the 'outbound_link' is DYNAMIC here or finishes in a SAP, so the\n # flowrule sequence finished here.\n sg_map[fr.id][1] = outbound_link.dst\n\n if return_paths:\n sg_map[fr.id].append(path_of_shop)\n\n return sg_map", "def test_get_pci_switch_by_moid(self):\n pass", "def __edgeRouter(self):\r\n def getEndpoint(nodeTuple, pointList, direction, isReversedEdge):\r\n \"\"\" Gets the nearest arrow endpoint. Handles edge reversal \"\"\"\r\n if((direction == 'start' and not isReversedEdge)\r\n or (direction == 'end' and isReversedEdge)): \r\n endNode = nodeTuple[0]\r\n if(isReversedEdge):\r\n ix = -2\r\n iy = -1\r\n else:\r\n ix = 0\r\n iy = 1\r\n else: \r\n endNode = nodeTuple[1]\r\n if(isReversedEdge):\r\n ix = 0\r\n iy = 1\r\n else:\r\n ix = -2 \r\n iy = -1 \r\n \r\n # Is it connected to a named port!?!\r\n if(endNode.isConnectedByNamedPort(edgeObject)):\r\n handler = endNode.getConnectedByNamedPortHandler(nodeTuple[2]) \r\n return dc.coords(handler)[:2]\r\n \r\n # Not a named port...\r\n return list(endNode.getClosestConnector2Point( endNode, pointList[ix], \r\n pointList[iy])) \r\n \r\n \r\n \r\n #todo: improve method for spline arrows + add comments + optimize?\r\n print '----------------Dummy Edge Routing-----------------'\r\n for dummyEdge in NodeWrapper.ID2LayerEdgeDict.keys():\r\n \r\n dummyList = NodeWrapper.ID2LayerEdgeDict[dummyEdge]\r\n dummyNode = dummyList[0]\r\n dummyChild = dummyNode.children.keys()[0]\r\n linkFlagList = dummyNode.children[dummyChild]\r\n \r\n # Real nodes at start/end of the edge\r\n edgeSourceNode = dummyNode.parents.keys()[0]\r\n edgeSourceNode = edgeSourceNode.getASGNode().graphObject_\r\n dummyNode = dummyList[-1]\r\n edgeTargetNode = dummyNode.children.keys()[0]\r\n #print 'Dummy edge number', dummyEdge,\r\n #print dummyList[0].parents.keys()[0].getName(), edgeTargetNode.getName()\r\n edgeTargetNode = edgeTargetNode.getASGNode().graphObject_\r\n nodeTuple = [edgeSourceNode, edgeTargetNode, None]\r\n \r\n # Some edges are internally reversed to break cycles, when drawing\r\n # this must be taken into account\r\n isReversedEdge = False\r\n edgesToRoute = []\r\n for linkNode, isReversed in linkFlagList:\r\n edgesToRoute.append(linkNode)\r\n if(isReversed):\r\n isReversedEdge = True\r\n \r\n # Get all the points the edge must pass through (sorted by layer order)\r\n dummyList.sort(lambda a, b: cmp(a.getLayer(), b.getLayer()))\r\n if(isReversedEdge):\r\n dummyList.reverse()\r\n sortedDummyRouteList = []\r\n for node in dummyList:\r\n sortedDummyRouteList += node.getEdgePosition()\r\n \r\n # Set the coordinates of the edge directly \r\n # This is complicated by the fact that AToM3 treats edges as two\r\n # segments that join poorly (for spline arrows)\r\n for edgeObject in edgesToRoute: \r\n dc = edgeObject.graphObject_.dc\r\n linkObj = edgeObject.graphObject_ \r\n tag = linkObj.tag\r\n \r\n if(isReversedEdge):\r\n inPoint = dc.coords( tag + \"2ndSeg0\" )[:2]\r\n outPoint = dc.coords( tag + \"1stSeg0\" )[:2]\r\n else:\r\n inPoint = dc.coords( tag + \"1stSeg0\" )[:2]\r\n outPoint = dc.coords( tag + \"2ndSeg0\" )[:2]\r\n \r\n #print 'Dummy route', sortedDummyRouteList\r\n numPoints = len(sortedDummyRouteList) / 2\r\n # Add 2 extra control points for odd case (to make splines nice)\r\n if(numPoints % 2 == 1):\r\n if(numPoints == 1):\r\n center = sortedDummyRouteList\r\n else:\r\n start = sortedDummyRouteList[:numPoints - 1]\r\n end = sortedDummyRouteList[numPoints + 1:]\r\n center = sortedDummyRouteList[numPoints - 1:numPoints + 1]\r\n \r\n if(not isReversedEdge):\r\n newMid1 = [center[0], center[1] - 20]\r\n newMid2 = [center[0], center[1] + 20]\r\n else:\r\n newMid2 = [center[0], center[1] - 20]\r\n newMid1 = [center[0], center[1] + 20]\r\n \r\n \r\n if(numPoints == 1):\r\n sortedDummyRouteList = newMid1 + center + newMid2 \r\n else:\r\n sortedDummyRouteList = start + newMid1 + center + newMid2 + end\r\n centerIndex = numPoints - 1 + 2\r\n \r\n # Add 1 extra control point for even case (to make splines nice)\r\n else:\r\n start = sortedDummyRouteList[:numPoints]\r\n end = sortedDummyRouteList[numPoints:]\r\n center = [start[-2] + (end[0] - start[-2]) / 2, \r\n start[-1] + (end[1] - start[-1]) / 2]\r\n sortedDummyRouteList = start + center + end \r\n centerIndex = numPoints\r\n \r\n # Now I know where the center is... so lets move the center object\r\n # Is the edge object a hyperlink?\r\n if(len(edgeObject.in_connections_ + edgeObject.out_connections_) > 2):\r\n fromObjs = []\r\n for semObj in edgeObject.in_connections_:\r\n fromObjs.append(semObj.graphObject_)\r\n toObjs = []\r\n for semObj in edgeObject.out_connections_:\r\n toObjs.append(semObj.graphObject_)\r\n optimizerHyperLink(dc, linkObj, fromObjs, toObjs, 0, 0, 0, center )\r\n continue\r\n \r\n else:\r\n linkObj.moveTo(* center)\r\n \r\n # Go through the 2 segments in the link\r\n nodeTuple[2] = edgeObject\r\n for connTuple in linkObj.connections:\r\n itemHandler = connTuple[0]\r\n direction = connTuple[1]\r\n \r\n if( direction ): \r\n inPoint = getEndpoint(nodeTuple, sortedDummyRouteList,\r\n 'start', isReversedEdge)\r\n\r\n segCoords = inPoint + sortedDummyRouteList[:centerIndex+2]\r\n else: \r\n outPoint = getEndpoint(nodeTuple, sortedDummyRouteList,\r\n 'end', isReversedEdge) \r\n segCoords = sortedDummyRouteList[centerIndex:] + outPoint\r\n segCoords = self.__reverseCoordList(segCoords)\r\n \r\n # Applies the changed coords to the canvas\r\n dc.coords( * [itemHandler] + segCoords ) \r\n \r\n # This may change the associated link drawings: \r\n # move them to the new point \r\n if( direction ):\r\n linkObj.updateDrawingsTo(inPoint[0], inPoint[1], itemHandler, \r\n segmentNumber=1)\r\n else:\r\n linkObj.updateDrawingsTo(outPoint[0], outPoint[1], itemHandler, \r\n segmentNumber=2)", "def _get_tunnel_interface_index(self):\n return self.__tunnel_interface_index", "def create_ig(ec2):\n ## create internet gateway\n print(\"\\n===Creating Internet Gateway...\")\n ig = ec2.create_internet_gateway(TagSpecifications=[{\n \"ResourceType\":\"internet-gateway\",\n \"Tags\":[{\"Key\": \"Name\", \"Value\": IG_NAME},\n ]}])\n print(\"===Internet gateway is reay!!\")\n return ig", "def sx_get_router_interface(handle, vrid, rif): \n try:\n vrid_p = new_sx_router_id_t_p()\n sx_router_id_t_p_assign(vrid_p, vrid)\n \n ifc_p = new_sx_router_interface_param_t_p()\n ifc_attr_p = new_sx_interface_attributes_t_p()\n rif_params = {}\n \n rc = sx_api_router_interface_get(handle, rif, vrid_p, ifc_p, ifc_attr_p)\n sx_check_rc(rc)\n \n if ifc_p.type == SX_L2_INTERFACE_TYPE_PORT_VLAN:\n rif_params[PORT] = ifc_p.ifc.port_vlan.port\n\n if ifc_p.type == SX_L2_INTERFACE_TYPE_VPORT:\n rif_params[VPORT] = ifc_p.ifc.vport.vport \n\n if ifc_p.type == SX_L2_INTERFACE_TYPE_VLAN:\n rif_params[VLAN] = ifc_p.ifc.vlan.vlan\n\n return rif_params\n \n finally:\n delete_sx_interface_attributes_t_p(ifc_attr_p)\n delete_sx_router_interface_param_t_p(ifc_p)\n delete_sx_router_id_t_p(vrid_p)", "def getNodeInterface(self,node,interface):\n data = self.connect('get','nodes/%s/network/%s' % (node,interface),None)\n return data", "def test_proximity_endpoint(self):\n endpoint = settings.PROXIMITY_ENDPOINT\n access_token = config.ACCESS_TOKEN\n self.assertValidGetOicJsonEndpoint(endpoint, access_token)", "def test_get_pci_switch_list(self):\n pass", "def test_get_node_internal_ip_address(self):\n pass", "def test_eigrp(sw):\n\tcmd = sw.show('show ip eigrp')\n\tresp = xmltodict.parse(cmd[1])['ins_api']['outputs']['output']\n\n\ttry:\n\t\tif resp[\"code\"] == \"400\":\n\t\t\t#most likely feature eigrp is not in the configuration.\n\t\t\treturn False\n\t\telif resp[\"code\"] == \"501\" and resp[\"clierror\"] == \"Note: process currently not running\\n\":\n\t\t\t#feature eigrp is enabled but not configured.\n\t\t\treturn False\n\t\telif resp[\"code\"] == \"200\":\n\t\t\t#eigrp appears to be configured\n\t\t\tcontexts = resp[\"body\"][\"TABLE_asn\"][\"ROW_asn\"]\n\t\t\tif len(contexts) > 0:\n\t\t\t\treturn True\n\texcept Exception as oops:\n\t\tprint type(oops)\n\t\tprint oops.args\n\t\tprint oops\n\treturn False", "def testGetEgressPort(self):\n self.oxc.get_egress(file_name = 'get_egress_port.xml')", "def describe_virtual_gateways():\n pass", "def get_device_traffic(context,target):\n\n result = context.get_operation('get_interfaces_traffic')\n return result", "def test_get_pci_link_by_moid(self):\n pass", "def _get_tunnel_vif(self):\n return self.__tunnel_vif", "def test_should_return_correct_gremlin_for_edge(self):\r\n expected = 'subscribed_to = g.makeType().name(\"subscribed_to\").primaryKey(updated_at).makeEdgeLabel()'\r\n edge = self.spec_parser.parse_edge(self.edge_spec)\r\n assert edge.gremlin == expected\r\n\r\n expected = 'subscribed_to = g.makeType().name(\"subscribed_to\").makeEdgeLabel()'\r\n self.spec_parser._names = []\r\n del self.edge_spec['primary_key']\r\n edge = self.spec_parser.parse_edge(self.edge_spec)\r\n assert edge.gremlin == expected", "def get_gateway(self):\n return self.gateway", "def get_graph_interface(self):\n if self._graph_interface is None:\n self._compute_graph_interface()\n return self._graph_interface", "def get_interface_traffic_input_pps(device: object, interface: str) -> str:\r\n\r\n try:\r\n out = device.parse(\r\n 'show interfaces {interface} extensive'.format(\r\n interface=interface\r\n )\r\n )\r\n except SchemaEmptyParserError as e:\r\n return None\r\n \r\n # Example dict\r\n # \"interface-information\": {\r\n # \"physical-interface\": [\r\n # {\r\n # \"traffic-statistics\": {\r\n # \"input-pps\": str\r\n\r\n phy_ = out.q.get_values('physical-interface', 0)\r\n return phy_.get('traffic-statistics').get('input-pps')" ]
[ "0.55887896", "0.5570448", "0.556951", "0.553323", "0.55329764", "0.5496236", "0.5490319", "0.5411637", "0.5357546", "0.5329887", "0.5327904", "0.5285603", "0.5271882", "0.5263755", "0.52596277", "0.5245366", "0.5232057", "0.52103406", "0.5190832", "0.5159648", "0.5142109", "0.5110258", "0.50865173", "0.50746477", "0.5045067", "0.50424314", "0.50174224", "0.50168926", "0.50112927", "0.4998201" ]
0.9325536
0
Test case for get_environment_string
def test_get_environment_string(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_environment():\n namespace = current_app.config.get('POD_NAMESPACE').lower()\n if namespace.endswith('dev'):\n return 'DEV'\n if namespace.endswith('test'):\n return 'TEST'\n if namespace.endswith('tools'):\n return 'SANDBOX'\n return ''", "def _get_env(key: str) -> str:\n value = os.getenv(key)\n assert isinstance(value, str), (\n f\"the {key} environment variable must be set and a string, \" f\"{value=}\"\n )\n return value", "def getenv_string(setting, default=''):\n return os.environ.get(setting, default)", "def test_environment_str_conversion():\n for l in list(environments.data):\n e = environments[l]\n assert str(e) == e.name", "def getenv(space, var):\n e = os.environ.get(var)\n if e is None:\n return space.w_False\n return space.newstr(e)", "def Environ(envstring):\n try:\n envint = int(envstring)\n except ValueError:\n return os.environ.get(envstring, \"\")\n # Is an integer - need to get the envint'th value\n try:\n return \"%s=%s\" % (list(os.environ.keys())[envint], list(os.environ.values())[envint])\n except IndexError:\n return \"\"", "def getEnvironment(self):\n pass", "def env(var):\n return os.environ[var]", "def get_os_env():\n env = os.environ\n# print(\"env \\n\" , env)\n return env", "def env(key: str) -> Optional[Any]:\n return os.getenv(key)", "def environ() -> Environ:\n try:\n return Environ(os.environ['LABELBOX_TEST_ENVIRON'])\n except KeyError:\n raise Exception(f'Missing LABELBOX_TEST_ENVIRON in: {os.environ}')", "def getenv_check(e):\n res = os.getenv(e)\n if res == None:\n print(e, 'environment variable not set - stopping.')\n exit(1)\n else:\n return res", "def test_environ(run_nait) -> None: # type: ignore\n result = run_nait(['--environ-shell', '--environ', 'foo=bar']).stdout.decode('utf-8')\n assert result.find('export NANAIMO_UNITTEST=\"This is a nanaimo unittest environment.\"') != -1\n assert result.find('export foo=\"bar\"') != -1", "def get_from_environ(key: str, default: Any = None) -> str:\n return os.environ.get(key, default)", "def env_str(name: str, default: str) -> str:\n value = stringify(env.get(name))\n return default if value is None else value", "def _get_environment(cls):\n return cls.__name__.lower()", "def maybe_environ(key):\n try:\n return os.environ[key]\n except KeyError:\n return \"\"", "def getenv_or_raise_exception(varname) -> str:\n\n env = os.getenv(varname)\n if env is None:\n raise EnvironmentError(f\"Environment variable {varname} is not set!\")\n return env", "def testFromEnviron(self):\n prop = make_prop(default='def', from_environ='ENV_VAR')\n\n # Nothing is given => falls back to hardcoded default.\n self.assertEqual('def', prop.interpret(recipe_api.PROPERTY_SENTINEL, {}))\n # Only env var is given => uses it.\n self.assertEqual(\n 'var', prop.interpret(recipe_api.PROPERTY_SENTINEL, {'ENV_VAR': 'var'}))\n # Explicit values override the environment.\n self.assertEqual('value', prop.interpret('value', {'ENV_VAR': 'var'}))", "def env_var_line(key: str) -> str:\n return str(os.environ.get(key) or \"\").strip()", "def get_environment(basedir):\n for env in ('devel', 'staging', 'prod'):\n if os.path.exists(os.path.join(basedir, env)):\n return env\n return 'devel'", "def test_get_environment_attribute(self):\n\n config = EnvironmentConfig()\n config.environment = {\"a_field\": \"a_value\"}\n\n self.assertEqual(\"a_value\", config.a_field)", "def env(key, default=None, required=False):\n try:\n value = os.environ[key]\n return ast.literal_eval(value)\n except (SyntaxError, ValueError):\n return value\n except KeyError:\n if default or not required:\n return default\n raise ImproperlyConfigured(\n \"Missing required environment variable '%s'\" % key)", "def get_env(environ_name):\n temp = os.getenv(environ_name)\n if temp is None:\n if ('ProgramFiles' in environ_name) or ('ProgramW6432' in environ_name):\n temp = os.getenv('ProgramFiles')\n return temp", "def get_value(key:str):\n value = environ.get(key)\n if value == None or len(str(value)) == 0:\n raise ValueError('Missing env: '+key)\n return value", "def test_environ() -> None:\n os.environ[\"TEST\"] = \"tester\"\n assert os.getenv(\"TEST\") == \"tester\"", "def get_environment(self):\r\n return self.mcas[0].get_environment()", "def environment_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"environment_name\")", "def Get():\n try:\n return os.environ['VIRTUAL_ENV']\n except KeyError as e:\n return None", "def get_env(self) -> str:\n return self.env or ENV" ]
[ "0.7463902", "0.72996217", "0.6940578", "0.6840579", "0.6776166", "0.66800684", "0.66537464", "0.6652348", "0.6626986", "0.65878564", "0.65742695", "0.6563765", "0.6465944", "0.64491796", "0.64137214", "0.6406609", "0.6382997", "0.636827", "0.63412637", "0.63345975", "0.6322643", "0.63182974", "0.6302538", "0.62858725", "0.62794787", "0.62289274", "0.62190026", "0.6203742", "0.6202296", "0.6183809" ]
0.95803916
0
Test case for get_file_content
def test_get_file_content(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_file_binary_content(self):\n content = image_helper.get_file_binary_content(self.subject)\n\n self.assertGreater(len(content), 0)\n\n with open(self.subject, \"rb\") as f:\n original_content = f.read()\n\n self.assertEqual(content, original_content)", "def get_file_content(*args, **kwargs):\n return get_file_content_async(*args, **kwargs).get_result()", "def test_get_file_object(self):\n pass", "def _get_file_content(self):\n with open(self.file_name, mode='rb') as file:\n self.file_content = file.read()", "def test_kyc_get_file(self):\n pass", "def get_content(self, file_path:str):\n raise NotImplementedError", "def test_content(self):\n\n obj = FBO(\n path=TEST_FILES_ROOT,\n ).all().get(\n name='test1.md',\n )\n\n self.assertEqual(\n 'A short work by me, for you to read.\\n',\n obj.content,\n )", "def test_get_infile(self):\r\n pass # not practically testable, but obvious file I/O\r", "def test_open_file_entity(self):\n virtpath = self.path_translator.split_virtual_path(\n \"/test/search1/rien_12345\")\n self.assertTrue(self.path_translator.is_file_entity(virtpath))\n ftp_file = self.path_translator.open_cw_file(virtpath)\n expected_file_content = \"nothing in 12345\"\n self.assertEqual(expected_file_content,\n ftp_file.readChunk(0, -1))\n self.assertEqual({\n \"size\": len(expected_file_content),\n \"uid\": 0,\n \"gid\": 0,\n \"mtime\": 0,\n \"atime\": 0,\n \"permissions\": self.path_translator.file_perm},\n ftp_file.getAttrs())\n self.assertTrue(hasattr(ftp_file, \"close\"))\n ftp_file.close()", "def getFileContent(fn):\n content = None\n if not os.path.exists(fn):\n print(\"Can not open file \" + fn)\n else:\n with open(fn, \"rb\") as f:\n content = f.readlines()\n return content", "def file_contents(filename=None, content=None):\n logging.debug('file_contents()')\n if content:\n f = open(filename, 'w')\n f.write(content)\n f.close()\n \n try:\n f = open(filename, 'r')\n text = f.read()\n f.close()\n except IOError:\n text = None\n\n return text", "def getFileContent(self, filePath, mode):\n with open(filePath, mode) as my_file:\n return my_file.read()", "def tests_ti_document_file_content(self):\n helper_ti = cast(Document, self.ti_helper.create_group())\n\n # update file content\n file_content = b'pytest content'\n r = helper_ti.file_content(file_content)\n assert r.status_code == 200", "def test_result_file_path_get(self):\n headers = { \n 'Accept': 'application/zip',\n }\n response = self.client.open(\n '/v1/result/{file_path}'.format(file_path='file_path_example'),\n method='GET',\n headers=headers)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_get_file_attachment(self, incident_id, artifact_id, task_id, attachment_id, expected_results_1, expected_results_2):\n\n results = get_file_attachment(mocked_res_client(), incident_id, artifact_id, task_id, attachment_id)\n\n data_content = results[\"content\"]\n file_name = results[\"filename\"]\n assert expected_results_1 == file_name\n assert expected_results_2 == data_content", "def GetFileContents(self, filename):\n with tempfile.NamedTemporaryFile() as t:\n self.GetFile(filename, t.name)\n with open(t.name, 'r') as f2:\n res = f2.read()\n logging.debug(\"GetFileContents(%s)->%s\" % (filename, res))\n return res", "def test_read_from_file():\n from scraper import read_from_file\n assert read_from_file(TEST_FILE) == (TEST_CONTENT, 'utf-8')", "def get_file_contents(filename):\n with open(filename, 'r') as f:\n content = f.read()\n return content", "def GetFileContents(self, filename):\n logging.debug(\"GetFileContents(%s)\" % (filename))\n with tempfile.NamedTemporaryFile(mode='w') as t:\n self.GetFile(filename, t.name)\n with open(t.name, 'r', encoding='UTF-8') as f2:\n res = f2.read()\n logging.debug(\"GetFileContents(%s)->%s\" % (filename, res))\n return res", "def get_file_data(filename):", "def get_contents(base_dir, filename):\n full_path = os.path.join(base_dir, filename)\n if not is_subdir(full_path, base_dir):\n # don't allow breaking away from base_dir\n return None\n\n if os.path.exists(full_path):\n with open(full_path, 'r') as f:\n data = f.read()\n return data\n return None", "def get_content(self):\r\n try:\r\n with open(self.path, \"rb\") as f:\r\n raw_data = f.read()\r\n result = chardet.detect(raw_data)\r\n encoding = result['encoding']\r\n if encoding:\r\n with open(self.path, 'r', encoding=encoding) as f:\r\n content = f.readlines()\r\n else:\r\n try:\r\n with open(self.path, 'r', encoding='ansi') as f:\r\n content = f.readlines()\r\n except UnicodeDecodeError:\r\n print('Jakis blad kodowania pliku')\r\n return -1\r\n return content\r\n except FileNotFoundError:\r\n print(f'Wskazany plik {self.path} nie istnieje')\r\n exit()", "def contents(file):\n with open(file) as f:\n return f.read()", "def get_file_content(path_str, encoding=\"utf8\"):\n try:\n with open(path_str, mode=\"r\", encoding=encoding) as file_handle:\n file_content = file_handle.read()\n return file_content\n if not file_content:\n logging.info(\"File %s was empty\", )\n return \"\"\n except IOError as io_err:\n logging.info(\"I/O error while reading %s code:%s, error: %s\",\n path_str, io_err.errno, io_err.strerror)", "def get_content(self, filename):\n f_id = self.face.FACES.files.find_one({ \"filename\" : filename }, { \"_id\" : 1 })\n return self.face_fs.get(f_id['_id']).read()", "def test_file_field():", "def get_content_from_file(path):\n\n\t\tPathUtil.ensure_path_exists(path)\n\t\twith open(path) as file:\n\t\t\tfile_content = file.read()\n\t\treturn file_content", "def testReadFile(self):\n content = archive_parser.Archive.GLOBAL_SIG\n file_name = 'test_file'\n content += file_name + ' ' * (archive_parser.Archive.FILE_ID_LENGTH -\n len(file_name))\n content += ' ' * archive_parser.Archive.FILE_TIMESTAMP_LENGTH\n content += ' ' * archive_parser.Archive.OWNER_ID_LENGTH\n content += ' ' * archive_parser.Archive.GROUP_ID_LENGTH\n content += ' ' * archive_parser.Archive.FILE_MODE_LENGTH\n\n message = 'test file contents'\n message_size = str(len(message))\n content += message_size + ' ' * (archive_parser.Archive.CONTENT_SIZE_LENGTH -\n len(message_size))\n content += archive_parser.Archive.END_TAG\n content += message\n archive = archive_parser.Archive(content)\n archive.Parse()\n self.assertIn(file_name, archive.files)\n self.assertEquals(archive.files[file_name], message)", "def file_open(self, path, method='r', content=''):\n\n f = open(path, method)\n if method == 'r' or method ==\"rb\": \n data = f.read()\n f.close()\n return data\n elif method == 'w' or method == 'a' or method == \"wb\":\n f.write(content)\n f.close()", "def test_file(self):\n browser = self.layer.get_web_browser(smi_settings)\n\n image = test_filename('test.txt')\n browser.login(self.username, self.username)\n self.assertEqual(browser.open('/root/edit'), 200)\n browser.macros.create(\n 'Silva File', id='file', title='Text File', file=image)\n self.assertEqual(\n browser.inspect.folder_listing, ['index', 'file'])\n\n # The user should by the last author on the content and container.\n self.assertEqual(\n self.root.sec_get_last_author_info().userid(),\n self.username)\n self.assertEqual(\n self.root.file.sec_get_last_author_info().userid(),\n self.username)\n\n # Visit the edit page\n self.assertEqual(\n browser.inspect.folder_listing['file'].click(),\n 200)\n self.assertEqual(browser.url, '/root/file/edit/tab_edit')\n self.assertEqual(browser.inspect.breadcrumbs, ['root', 'Text File'])\n browser.inspect.breadcrumbs['root'].click()\n browser.macros.delete('file')" ]
[ "0.75981253", "0.7473025", "0.7311799", "0.7211534", "0.7207336", "0.7022469", "0.6986867", "0.69583285", "0.6948856", "0.6902298", "0.6816126", "0.67869216", "0.67402613", "0.67038316", "0.6673472", "0.6654199", "0.661827", "0.65691304", "0.6560886", "0.6552946", "0.65331566", "0.6527675", "0.65266335", "0.65150213", "0.6506927", "0.6492766", "0.6489787", "0.6483566", "0.64690053", "0.64591604" ]
0.9107887
0